diff options
Diffstat (limited to 'src')
150 files changed, 5602 insertions, 3459 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f69d00a2b..6c99dd5e2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt | |||
| @@ -1,18 +1,79 @@ | |||
| 1 | # Enable modules to include each other's files | 1 | # Enable modules to include each other's files |
| 2 | include_directories(.) | 2 | include_directories(.) |
| 3 | 3 | ||
| 4 | # CMake seems to only define _DEBUG on Windows | ||
| 5 | set_property(DIRECTORY APPEND PROPERTY | ||
| 6 | COMPILE_DEFINITIONS $<$<CONFIG:Debug>:_DEBUG> $<$<NOT:$<CONFIG:Debug>>:NDEBUG>) | ||
| 7 | |||
| 8 | # Set compilation flags | ||
| 9 | if (MSVC) | ||
| 10 | set(CMAKE_CONFIGURATION_TYPES Debug Release CACHE STRING "" FORCE) | ||
| 11 | |||
| 12 | # Silence "deprecation" warnings | ||
| 13 | add_definitions(-D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE -D_SCL_SECURE_NO_WARNINGS) | ||
| 14 | |||
| 15 | # Avoid windows.h junk | ||
| 16 | add_definitions(-DNOMINMAX) | ||
| 17 | |||
| 18 | # Avoid windows.h from including some usually unused libs like winsocks.h, since this might cause some redefinition errors. | ||
| 19 | add_definitions(-DWIN32_LEAN_AND_MEAN) | ||
| 20 | |||
| 21 | # /W3 - Level 3 warnings | ||
| 22 | # /MP - Multi-threaded compilation | ||
| 23 | # /Zi - Output debugging information | ||
| 24 | # /Zo - enhanced debug info for optimized builds | ||
| 25 | # /permissive- - enables stricter C++ standards conformance checks | ||
| 26 | # /EHsc - C++-only exception handling semantics | ||
| 27 | # /Zc:throwingNew - let codegen assume `operator new` will never return null | ||
| 28 | # /Zc:inline - let codegen omit inline functions in object files | ||
| 29 | add_compile_options(/W3 /MP /Zi /Zo /permissive- /EHsc /std:c++latest /Zc:throwingNew,inline) | ||
| 30 | |||
| 31 | # /GS- - No stack buffer overflow checks | ||
| 32 | add_compile_options("$<$<CONFIG:Release>:/GS->") | ||
| 33 | |||
| 34 | set(CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG /MANIFEST:NO" CACHE STRING "" FORCE) | ||
| 35 | set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE) | ||
| 36 | else() | ||
| 37 | add_compile_options("-Wno-attributes") | ||
| 38 | |||
| 39 | if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang) | ||
| 40 | add_compile_options("-stdlib=libc++") | ||
| 41 | endif() | ||
| 42 | |||
| 43 | # Set file offset size to 64 bits. | ||
| 44 | # | ||
| 45 | # On modern Unixes, this is typically already the case. The lone exception is | ||
| 46 | # glibc, which may default to 32 bits. glibc allows this to be configured | ||
| 47 | # by setting _FILE_OFFSET_BITS. | ||
| 48 | if(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR MINGW) | ||
| 49 | add_definitions(-D_FILE_OFFSET_BITS=64) | ||
| 50 | endif() | ||
| 51 | |||
| 52 | if (MINGW) | ||
| 53 | add_definitions(-DMINGW_HAS_SECURE_API) | ||
| 54 | |||
| 55 | if (MINGW_STATIC_BUILD) | ||
| 56 | add_definitions(-DQT_STATICPLUGIN) | ||
| 57 | add_compile_options("-static") | ||
| 58 | endif() | ||
| 59 | endif() | ||
| 60 | endif() | ||
| 61 | |||
| 4 | add_subdirectory(common) | 62 | add_subdirectory(common) |
| 5 | add_subdirectory(core) | 63 | add_subdirectory(core) |
| 6 | add_subdirectory(audio_core) | 64 | add_subdirectory(audio_core) |
| 7 | add_subdirectory(video_core) | 65 | add_subdirectory(video_core) |
| 8 | add_subdirectory(input_common) | 66 | add_subdirectory(input_common) |
| 9 | add_subdirectory(tests) | 67 | add_subdirectory(tests) |
| 68 | |||
| 10 | if (ENABLE_SDL2) | 69 | if (ENABLE_SDL2) |
| 11 | add_subdirectory(yuzu_cmd) | 70 | add_subdirectory(yuzu_cmd) |
| 12 | endif() | 71 | endif() |
| 72 | |||
| 13 | if (ENABLE_QT) | 73 | if (ENABLE_QT) |
| 14 | add_subdirectory(yuzu) | 74 | add_subdirectory(yuzu) |
| 15 | endif() | 75 | endif() |
| 76 | |||
| 16 | if (ENABLE_WEB_SERVICE) | 77 | if (ENABLE_WEB_SERVICE) |
| 17 | add_subdirectory(web_service) | 78 | add_subdirectory(web_service) |
| 18 | endif() | 79 | endif() |
diff --git a/src/audio_core/cubeb_sink.cpp b/src/audio_core/cubeb_sink.cpp index 1da0b9f2a..7047ed9cf 100644 --- a/src/audio_core/cubeb_sink.cpp +++ b/src/audio_core/cubeb_sink.cpp | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #include "common/ring_buffer.h" | 12 | #include "common/ring_buffer.h" |
| 13 | #include "core/settings.h" | 13 | #include "core/settings.h" |
| 14 | 14 | ||
| 15 | #ifdef _MSC_VER | 15 | #ifdef _WIN32 |
| 16 | #include <objbase.h> | 16 | #include <objbase.h> |
| 17 | #endif | 17 | #endif |
| 18 | 18 | ||
| @@ -113,7 +113,7 @@ private: | |||
| 113 | 113 | ||
| 114 | CubebSink::CubebSink(std::string_view target_device_name) { | 114 | CubebSink::CubebSink(std::string_view target_device_name) { |
| 115 | // Cubeb requires COM to be initialized on the thread calling cubeb_init on Windows | 115 | // Cubeb requires COM to be initialized on the thread calling cubeb_init on Windows |
| 116 | #ifdef _MSC_VER | 116 | #ifdef _WIN32 |
| 117 | com_init_result = CoInitializeEx(nullptr, COINIT_MULTITHREADED); | 117 | com_init_result = CoInitializeEx(nullptr, COINIT_MULTITHREADED); |
| 118 | #endif | 118 | #endif |
| 119 | 119 | ||
| @@ -152,7 +152,7 @@ CubebSink::~CubebSink() { | |||
| 152 | 152 | ||
| 153 | cubeb_destroy(ctx); | 153 | cubeb_destroy(ctx); |
| 154 | 154 | ||
| 155 | #ifdef _MSC_VER | 155 | #ifdef _WIN32 |
| 156 | if (SUCCEEDED(com_init_result)) { | 156 | if (SUCCEEDED(com_init_result)) { |
| 157 | CoUninitialize(); | 157 | CoUninitialize(); |
| 158 | } | 158 | } |
diff --git a/src/audio_core/cubeb_sink.h b/src/audio_core/cubeb_sink.h index 511df7bb1..7ce850f47 100644 --- a/src/audio_core/cubeb_sink.h +++ b/src/audio_core/cubeb_sink.h | |||
| @@ -26,7 +26,7 @@ private: | |||
| 26 | cubeb_devid output_device{}; | 26 | cubeb_devid output_device{}; |
| 27 | std::vector<SinkStreamPtr> sink_streams; | 27 | std::vector<SinkStreamPtr> sink_streams; |
| 28 | 28 | ||
| 29 | #ifdef _MSC_VER | 29 | #ifdef _WIN32 |
| 30 | u32 com_init_result = 0; | 30 | u32 com_init_result = 0; |
| 31 | #endif | 31 | #endif |
| 32 | }; | 32 | }; |
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index bdd885273..43ae8a9e7 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -47,6 +47,7 @@ add_custom_command(OUTPUT scm_rev.cpp | |||
| 47 | "${VIDEO_CORE}/shader/decode/integer_set.cpp" | 47 | "${VIDEO_CORE}/shader/decode/integer_set.cpp" |
| 48 | "${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp" | 48 | "${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp" |
| 49 | "${VIDEO_CORE}/shader/decode/memory.cpp" | 49 | "${VIDEO_CORE}/shader/decode/memory.cpp" |
| 50 | "${VIDEO_CORE}/shader/decode/texture.cpp" | ||
| 50 | "${VIDEO_CORE}/shader/decode/other.cpp" | 51 | "${VIDEO_CORE}/shader/decode/other.cpp" |
| 51 | "${VIDEO_CORE}/shader/decode/predicate_set_predicate.cpp" | 52 | "${VIDEO_CORE}/shader/decode/predicate_set_predicate.cpp" |
| 52 | "${VIDEO_CORE}/shader/decode/predicate_set_register.cpp" | 53 | "${VIDEO_CORE}/shader/decode/predicate_set_register.cpp" |
| @@ -91,10 +92,14 @@ add_library(common STATIC | |||
| 91 | logging/text_formatter.cpp | 92 | logging/text_formatter.cpp |
| 92 | logging/text_formatter.h | 93 | logging/text_formatter.h |
| 93 | math_util.h | 94 | math_util.h |
| 95 | memory_hook.cpp | ||
| 96 | memory_hook.h | ||
| 94 | microprofile.cpp | 97 | microprofile.cpp |
| 95 | microprofile.h | 98 | microprofile.h |
| 96 | microprofileui.h | 99 | microprofileui.h |
| 97 | misc.cpp | 100 | misc.cpp |
| 101 | page_table.cpp | ||
| 102 | page_table.h | ||
| 98 | param_package.cpp | 103 | param_package.cpp |
| 99 | param_package.h | 104 | param_package.h |
| 100 | quaternion.h | 105 | quaternion.h |
| @@ -113,6 +118,8 @@ add_library(common STATIC | |||
| 113 | threadsafe_queue.h | 118 | threadsafe_queue.h |
| 114 | timer.cpp | 119 | timer.cpp |
| 115 | timer.h | 120 | timer.h |
| 121 | uint128.cpp | ||
| 122 | uint128.h | ||
| 116 | vector_math.h | 123 | vector_math.h |
| 117 | web_result.h | 124 | web_result.h |
| 118 | ) | 125 | ) |
diff --git a/src/common/bit_field.h b/src/common/bit_field.h index 7433c39ba..902e668e3 100644 --- a/src/common/bit_field.h +++ b/src/common/bit_field.h | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <limits> | 34 | #include <limits> |
| 35 | #include <type_traits> | 35 | #include <type_traits> |
| 36 | #include "common/common_funcs.h" | 36 | #include "common/common_funcs.h" |
| 37 | #include "common/swap.h" | ||
| 37 | 38 | ||
| 38 | /* | 39 | /* |
| 39 | * Abstract bitfield class | 40 | * Abstract bitfield class |
| @@ -108,7 +109,7 @@ | |||
| 108 | * symptoms. | 109 | * symptoms. |
| 109 | */ | 110 | */ |
| 110 | #pragma pack(1) | 111 | #pragma pack(1) |
| 111 | template <std::size_t Position, std::size_t Bits, typename T> | 112 | template <std::size_t Position, std::size_t Bits, typename T, typename EndianTag = LETag> |
| 112 | struct BitField { | 113 | struct BitField { |
| 113 | private: | 114 | private: |
| 114 | // UnderlyingType is T for non-enum types and the underlying type of T if | 115 | // UnderlyingType is T for non-enum types and the underlying type of T if |
| @@ -121,6 +122,8 @@ private: | |||
| 121 | // We store the value as the unsigned type to avoid undefined behaviour on value shifting | 122 | // We store the value as the unsigned type to avoid undefined behaviour on value shifting |
| 122 | using StorageType = std::make_unsigned_t<UnderlyingType>; | 123 | using StorageType = std::make_unsigned_t<UnderlyingType>; |
| 123 | 124 | ||
| 125 | using StorageTypeWithEndian = typename AddEndian<StorageType, EndianTag>::type; | ||
| 126 | |||
| 124 | public: | 127 | public: |
| 125 | /// Constants to allow limited introspection of fields if needed | 128 | /// Constants to allow limited introspection of fields if needed |
| 126 | static constexpr std::size_t position = Position; | 129 | static constexpr std::size_t position = Position; |
| @@ -170,7 +173,7 @@ public: | |||
| 170 | } | 173 | } |
| 171 | 174 | ||
| 172 | constexpr FORCE_INLINE void Assign(const T& value) { | 175 | constexpr FORCE_INLINE void Assign(const T& value) { |
| 173 | storage = (storage & ~mask) | FormatValue(value); | 176 | storage = (static_cast<StorageType>(storage) & ~mask) | FormatValue(value); |
| 174 | } | 177 | } |
| 175 | 178 | ||
| 176 | constexpr T Value() const { | 179 | constexpr T Value() const { |
| @@ -182,7 +185,7 @@ public: | |||
| 182 | } | 185 | } |
| 183 | 186 | ||
| 184 | private: | 187 | private: |
| 185 | StorageType storage; | 188 | StorageTypeWithEndian storage; |
| 186 | 189 | ||
| 187 | static_assert(bits + position <= 8 * sizeof(T), "Bitfield out of range"); | 190 | static_assert(bits + position <= 8 * sizeof(T), "Bitfield out of range"); |
| 188 | 191 | ||
| @@ -193,3 +196,6 @@ private: | |||
| 193 | static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable in a BitField"); | 196 | static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable in a BitField"); |
| 194 | }; | 197 | }; |
| 195 | #pragma pack() | 198 | #pragma pack() |
| 199 | |||
| 200 | template <std::size_t Position, std::size_t Bits, typename T> | ||
| 201 | using BitFieldBE = BitField<Position, Bits, T, BETag>; | ||
diff --git a/src/common/common_types.h b/src/common/common_types.h index 6b1766dca..4cec89fbd 100644 --- a/src/common/common_types.h +++ b/src/common/common_types.h | |||
| @@ -40,10 +40,9 @@ using s64 = std::int64_t; ///< 64-bit signed int | |||
| 40 | using f32 = float; ///< 32-bit floating point | 40 | using f32 = float; ///< 32-bit floating point |
| 41 | using f64 = double; ///< 64-bit floating point | 41 | using f64 = double; ///< 64-bit floating point |
| 42 | 42 | ||
| 43 | // TODO: It would be nice to eventually replace these with strong types that prevent accidental | 43 | using VAddr = u64; ///< Represents a pointer in the userspace virtual address space. |
| 44 | // conversion between each other. | 44 | using PAddr = u64; ///< Represents a pointer in the ARM11 physical address space. |
| 45 | using VAddr = u64; ///< Represents a pointer in the userspace virtual address space. | 45 | using GPUVAddr = u64; ///< Represents a pointer in the GPU virtual address space. |
| 46 | using PAddr = u64; ///< Represents a pointer in the ARM11 physical address space. | ||
| 47 | 46 | ||
| 48 | using u128 = std::array<std::uint64_t, 2>; | 47 | using u128 = std::array<std::uint64_t, 2>; |
| 49 | static_assert(sizeof(u128) == 16, "u128 must be 128 bits wide"); | 48 | static_assert(sizeof(u128) == 16, "u128 must be 128 bits wide"); |
diff --git a/src/core/memory_hook.cpp b/src/common/memory_hook.cpp index c61c6c1fb..3986986d6 100644 --- a/src/core/memory_hook.cpp +++ b/src/common/memory_hook.cpp | |||
| @@ -2,10 +2,10 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include "core/memory_hook.h" | 5 | #include "common/memory_hook.h" |
| 6 | 6 | ||
| 7 | namespace Memory { | 7 | namespace Common { |
| 8 | 8 | ||
| 9 | MemoryHook::~MemoryHook() = default; | 9 | MemoryHook::~MemoryHook() = default; |
| 10 | 10 | ||
| 11 | } // namespace Memory | 11 | } // namespace Common |
diff --git a/src/core/memory_hook.h b/src/common/memory_hook.h index 940777107..adaa4c2c5 100644 --- a/src/core/memory_hook.h +++ b/src/common/memory_hook.h | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | 11 | ||
| 12 | namespace Memory { | 12 | namespace Common { |
| 13 | 13 | ||
| 14 | /** | 14 | /** |
| 15 | * Memory hooks have two purposes: | 15 | * Memory hooks have two purposes: |
| @@ -44,4 +44,4 @@ public: | |||
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | using MemoryHookPointer = std::shared_ptr<MemoryHook>; | 46 | using MemoryHookPointer = std::shared_ptr<MemoryHook>; |
| 47 | } // namespace Memory | 47 | } // namespace Common |
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp new file mode 100644 index 000000000..69b7abc54 --- /dev/null +++ b/src/common/page_table.cpp | |||
| @@ -0,0 +1,31 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "common/page_table.h" | ||
| 6 | |||
| 7 | namespace Common { | ||
| 8 | |||
| 9 | PageTable::PageTable(std::size_t page_size_in_bits) : page_size_in_bits{page_size_in_bits} {} | ||
| 10 | |||
| 11 | PageTable::~PageTable() = default; | ||
| 12 | |||
| 13 | void PageTable::Resize(std::size_t address_space_width_in_bits) { | ||
| 14 | const std::size_t num_page_table_entries = 1ULL | ||
| 15 | << (address_space_width_in_bits - page_size_in_bits); | ||
| 16 | |||
| 17 | pointers.resize(num_page_table_entries); | ||
| 18 | attributes.resize(num_page_table_entries); | ||
| 19 | backing_addr.resize(num_page_table_entries); | ||
| 20 | |||
| 21 | // The default is a 39-bit address space, which causes an initial 1GB allocation size. If the | ||
| 22 | // vector size is subsequently decreased (via resize), the vector might not automatically | ||
| 23 | // actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for | ||
| 24 | // 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use. | ||
| 25 | |||
| 26 | pointers.shrink_to_fit(); | ||
| 27 | attributes.shrink_to_fit(); | ||
| 28 | backing_addr.shrink_to_fit(); | ||
| 29 | } | ||
| 30 | |||
| 31 | } // namespace Common | ||
diff --git a/src/common/page_table.h b/src/common/page_table.h new file mode 100644 index 000000000..8b8ff0bb8 --- /dev/null +++ b/src/common/page_table.h | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <vector> | ||
| 8 | #include <boost/icl/interval_map.hpp> | ||
| 9 | #include "common/common_types.h" | ||
| 10 | #include "common/memory_hook.h" | ||
| 11 | |||
| 12 | namespace Common { | ||
| 13 | |||
| 14 | enum class PageType : u8 { | ||
| 15 | /// Page is unmapped and should cause an access error. | ||
| 16 | Unmapped, | ||
| 17 | /// Page is mapped to regular memory. This is the only type you can get pointers to. | ||
| 18 | Memory, | ||
| 19 | /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and | ||
| 20 | /// invalidation | ||
| 21 | RasterizerCachedMemory, | ||
| 22 | /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions. | ||
| 23 | Special, | ||
| 24 | /// Page is allocated for use. | ||
| 25 | Allocated, | ||
| 26 | }; | ||
| 27 | |||
| 28 | struct SpecialRegion { | ||
| 29 | enum class Type { | ||
| 30 | DebugHook, | ||
| 31 | IODevice, | ||
| 32 | } type; | ||
| 33 | |||
| 34 | MemoryHookPointer handler; | ||
| 35 | |||
| 36 | bool operator<(const SpecialRegion& other) const { | ||
| 37 | return std::tie(type, handler) < std::tie(other.type, other.handler); | ||
| 38 | } | ||
| 39 | |||
| 40 | bool operator==(const SpecialRegion& other) const { | ||
| 41 | return std::tie(type, handler) == std::tie(other.type, other.handler); | ||
| 42 | } | ||
| 43 | }; | ||
| 44 | |||
| 45 | /** | ||
| 46 | * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely | ||
| 47 | * mimics the way a real CPU page table works. | ||
| 48 | */ | ||
| 49 | struct PageTable { | ||
| 50 | explicit PageTable(std::size_t page_size_in_bits); | ||
| 51 | ~PageTable(); | ||
| 52 | |||
| 53 | /** | ||
| 54 | * Resizes the page table to be able to accomodate enough pages within | ||
| 55 | * a given address space. | ||
| 56 | * | ||
| 57 | * @param address_space_width_in_bits The address size width in bits. | ||
| 58 | */ | ||
| 59 | void Resize(std::size_t address_space_width_in_bits); | ||
| 60 | |||
| 61 | /** | ||
| 62 | * Vector of memory pointers backing each page. An entry can only be non-null if the | ||
| 63 | * corresponding entry in the `attributes` vector is of type `Memory`. | ||
| 64 | */ | ||
| 65 | std::vector<u8*> pointers; | ||
| 66 | |||
| 67 | /** | ||
| 68 | * Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is | ||
| 69 | * of type `Special`. | ||
| 70 | */ | ||
| 71 | boost::icl::interval_map<u64, std::set<SpecialRegion>> special_regions; | ||
| 72 | |||
| 73 | /** | ||
| 74 | * Vector of fine grained page attributes. If it is set to any value other than `Memory`, then | ||
| 75 | * the corresponding entry in `pointers` MUST be set to null. | ||
| 76 | */ | ||
| 77 | std::vector<PageType> attributes; | ||
| 78 | |||
| 79 | std::vector<u64> backing_addr; | ||
| 80 | |||
| 81 | const std::size_t page_size_in_bits{}; | ||
| 82 | }; | ||
| 83 | |||
| 84 | } // namespace Common | ||
diff --git a/src/common/swap.h b/src/common/swap.h index 0e219747f..b3eab1324 100644 --- a/src/common/swap.h +++ b/src/common/swap.h | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | 17 | ||
| 18 | #pragma once | 18 | #pragma once |
| 19 | 19 | ||
| 20 | #include <type_traits> | ||
| 21 | |||
| 20 | #if defined(_MSC_VER) | 22 | #if defined(_MSC_VER) |
| 21 | #include <cstdlib> | 23 | #include <cstdlib> |
| 22 | #elif defined(__linux__) | 24 | #elif defined(__linux__) |
| @@ -170,7 +172,7 @@ struct swap_struct_t { | |||
| 170 | using swapped_t = swap_struct_t; | 172 | using swapped_t = swap_struct_t; |
| 171 | 173 | ||
| 172 | protected: | 174 | protected: |
| 173 | T value = T(); | 175 | T value; |
| 174 | 176 | ||
| 175 | static T swap(T v) { | 177 | static T swap(T v) { |
| 176 | return F::swap(v); | 178 | return F::swap(v); |
| @@ -605,52 +607,154 @@ struct swap_double_t { | |||
| 605 | } | 607 | } |
| 606 | }; | 608 | }; |
| 607 | 609 | ||
| 608 | #if COMMON_LITTLE_ENDIAN | 610 | template <typename T> |
| 609 | using u16_le = u16; | 611 | struct swap_enum_t { |
| 610 | using u32_le = u32; | 612 | static_assert(std::is_enum_v<T>); |
| 611 | using u64_le = u64; | 613 | using base = std::underlying_type_t<T>; |
| 614 | |||
| 615 | public: | ||
| 616 | swap_enum_t() = default; | ||
| 617 | swap_enum_t(const T& v) : value(swap(v)) {} | ||
| 618 | |||
| 619 | swap_enum_t& operator=(const T& v) { | ||
| 620 | value = swap(v); | ||
| 621 | return *this; | ||
| 622 | } | ||
| 623 | |||
| 624 | operator T() const { | ||
| 625 | return swap(value); | ||
| 626 | } | ||
| 627 | |||
| 628 | explicit operator base() const { | ||
| 629 | return static_cast<base>(swap(value)); | ||
| 630 | } | ||
| 612 | 631 | ||
| 613 | using s16_le = s16; | 632 | protected: |
| 614 | using s32_le = s32; | 633 | T value{}; |
| 615 | using s64_le = s64; | 634 | // clang-format off |
| 635 | using swap_t = std::conditional_t< | ||
| 636 | std::is_same_v<base, u16>, swap_16_t<u16>, std::conditional_t< | ||
| 637 | std::is_same_v<base, s16>, swap_16_t<s16>, std::conditional_t< | ||
| 638 | std::is_same_v<base, u32>, swap_32_t<u32>, std::conditional_t< | ||
| 639 | std::is_same_v<base, s32>, swap_32_t<s32>, std::conditional_t< | ||
| 640 | std::is_same_v<base, u64>, swap_64_t<u64>, std::conditional_t< | ||
| 641 | std::is_same_v<base, s64>, swap_64_t<s64>, void>>>>>>; | ||
| 642 | // clang-format on | ||
| 643 | static T swap(T x) { | ||
| 644 | return static_cast<T>(swap_t::swap(static_cast<base>(x))); | ||
| 645 | } | ||
| 646 | }; | ||
| 616 | 647 | ||
| 617 | using float_le = float; | 648 | struct SwapTag {}; // Use the different endianness from the system |
| 618 | using double_le = double; | 649 | struct KeepTag {}; // Use the same endianness as the system |
| 619 | 650 | ||
| 620 | using u64_be = swap_struct_t<u64, swap_64_t<u64>>; | 651 | template <typename T, typename Tag> |
| 621 | using s64_be = swap_struct_t<s64, swap_64_t<s64>>; | 652 | struct AddEndian; |
| 622 | 653 | ||
| 623 | using u32_be = swap_struct_t<u32, swap_32_t<u32>>; | 654 | // KeepTag specializations |
| 624 | using s32_be = swap_struct_t<s32, swap_32_t<s32>>; | ||
| 625 | 655 | ||
| 626 | using u16_be = swap_struct_t<u16, swap_16_t<u16>>; | 656 | template <typename T> |
| 627 | using s16_be = swap_struct_t<s16, swap_16_t<s16>>; | 657 | struct AddEndian<T, KeepTag> { |
| 658 | using type = T; | ||
| 659 | }; | ||
| 628 | 660 | ||
| 629 | using float_be = swap_struct_t<float, swap_float_t<float>>; | 661 | // SwapTag specializations |
| 630 | using double_be = swap_struct_t<double, swap_double_t<double>>; | 662 | |
| 631 | #else | 663 | template <> |
| 664 | struct AddEndian<u8, SwapTag> { | ||
| 665 | using type = u8; | ||
| 666 | }; | ||
| 667 | |||
| 668 | template <> | ||
| 669 | struct AddEndian<u16, SwapTag> { | ||
| 670 | using type = swap_struct_t<u16, swap_16_t<u16>>; | ||
| 671 | }; | ||
| 672 | |||
| 673 | template <> | ||
| 674 | struct AddEndian<u32, SwapTag> { | ||
| 675 | using type = swap_struct_t<u32, swap_32_t<u32>>; | ||
| 676 | }; | ||
| 632 | 677 | ||
| 633 | using u64_le = swap_struct_t<u64, swap_64_t<u64>>; | 678 | template <> |
| 634 | using s64_le = swap_struct_t<s64, swap_64_t<s64>>; | 679 | struct AddEndian<u64, SwapTag> { |
| 680 | using type = swap_struct_t<u64, swap_64_t<u64>>; | ||
| 681 | }; | ||
| 682 | |||
| 683 | template <> | ||
| 684 | struct AddEndian<s8, SwapTag> { | ||
| 685 | using type = s8; | ||
| 686 | }; | ||
| 635 | 687 | ||
| 636 | using u32_le = swap_struct_t<u32, swap_32_t<u32>>; | 688 | template <> |
| 637 | using s32_le = swap_struct_t<s32, swap_32_t<s32>>; | 689 | struct AddEndian<s16, SwapTag> { |
| 690 | using type = swap_struct_t<s16, swap_16_t<s16>>; | ||
| 691 | }; | ||
| 638 | 692 | ||
| 639 | using u16_le = swap_struct_t<u16, swap_16_t<u16>>; | 693 | template <> |
| 640 | using s16_le = swap_struct_t<s16, swap_16_t<s16>>; | 694 | struct AddEndian<s32, SwapTag> { |
| 695 | using type = swap_struct_t<s32, swap_32_t<s32>>; | ||
| 696 | }; | ||
| 697 | |||
| 698 | template <> | ||
| 699 | struct AddEndian<s64, SwapTag> { | ||
| 700 | using type = swap_struct_t<s64, swap_64_t<s64>>; | ||
| 701 | }; | ||
| 702 | |||
| 703 | template <> | ||
| 704 | struct AddEndian<float, SwapTag> { | ||
| 705 | using type = swap_struct_t<float, swap_float_t<float>>; | ||
| 706 | }; | ||
| 707 | |||
| 708 | template <> | ||
| 709 | struct AddEndian<double, SwapTag> { | ||
| 710 | using type = swap_struct_t<double, swap_double_t<double>>; | ||
| 711 | }; | ||
| 712 | |||
| 713 | template <typename T> | ||
| 714 | struct AddEndian<T, SwapTag> { | ||
| 715 | static_assert(std::is_enum_v<T>); | ||
| 716 | using type = swap_enum_t<T>; | ||
| 717 | }; | ||
| 641 | 718 | ||
| 642 | using float_le = swap_struct_t<float, swap_float_t<float>>; | 719 | // Alias LETag/BETag as KeepTag/SwapTag depending on the system |
| 643 | using double_le = swap_struct_t<double, swap_double_t<double>>; | 720 | #if COMMON_LITTLE_ENDIAN |
| 644 | 721 | ||
| 645 | using u16_be = u16; | 722 | using LETag = KeepTag; |
| 646 | using u32_be = u32; | 723 | using BETag = SwapTag; |
| 647 | using u64_be = u64; | ||
| 648 | 724 | ||
| 649 | using s16_be = s16; | 725 | #else |
| 650 | using s32_be = s32; | ||
| 651 | using s64_be = s64; | ||
| 652 | 726 | ||
| 653 | using float_be = float; | 727 | using BETag = KeepTag; |
| 654 | using double_be = double; | 728 | using LETag = SwapTag; |
| 655 | 729 | ||
| 656 | #endif | 730 | #endif |
| 731 | |||
| 732 | // Aliases for LE types | ||
| 733 | using u16_le = AddEndian<u16, LETag>::type; | ||
| 734 | using u32_le = AddEndian<u32, LETag>::type; | ||
| 735 | using u64_le = AddEndian<u64, LETag>::type; | ||
| 736 | |||
| 737 | using s16_le = AddEndian<s16, LETag>::type; | ||
| 738 | using s32_le = AddEndian<s32, LETag>::type; | ||
| 739 | using s64_le = AddEndian<s64, LETag>::type; | ||
| 740 | |||
| 741 | template <typename T> | ||
| 742 | using enum_le = std::enable_if_t<std::is_enum_v<T>, typename AddEndian<T, LETag>::type>; | ||
| 743 | |||
| 744 | using float_le = AddEndian<float, LETag>::type; | ||
| 745 | using double_le = AddEndian<double, LETag>::type; | ||
| 746 | |||
| 747 | // Aliases for BE types | ||
| 748 | using u16_be = AddEndian<u16, BETag>::type; | ||
| 749 | using u32_be = AddEndian<u32, BETag>::type; | ||
| 750 | using u64_be = AddEndian<u64, BETag>::type; | ||
| 751 | |||
| 752 | using s16_be = AddEndian<s16, BETag>::type; | ||
| 753 | using s32_be = AddEndian<s32, BETag>::type; | ||
| 754 | using s64_be = AddEndian<s64, BETag>::type; | ||
| 755 | |||
| 756 | template <typename T> | ||
| 757 | using enum_be = std::enable_if_t<std::is_enum_v<T>, typename AddEndian<T, BETag>::type>; | ||
| 758 | |||
| 759 | using float_be = AddEndian<float, BETag>::type; | ||
| 760 | using double_be = AddEndian<double, BETag>::type; | ||
diff --git a/src/common/thread_queue_list.h b/src/common/thread_queue_list.h index e7594db68..791f99a8c 100644 --- a/src/common/thread_queue_list.h +++ b/src/common/thread_queue_list.h | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | 6 | ||
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <deque> | 8 | #include <deque> |
| 9 | #include <boost/range/algorithm_ext/erase.hpp> | ||
| 10 | 9 | ||
| 11 | namespace Common { | 10 | namespace Common { |
| 12 | 11 | ||
| @@ -111,8 +110,9 @@ struct ThreadQueueList { | |||
| 111 | } | 110 | } |
| 112 | 111 | ||
| 113 | void remove(Priority priority, const T& thread_id) { | 112 | void remove(Priority priority, const T& thread_id) { |
| 114 | Queue* cur = &queues[priority]; | 113 | Queue* const cur = &queues[priority]; |
| 115 | boost::remove_erase(cur->data, thread_id); | 114 | const auto iter = std::remove(cur->data.begin(), cur->data.end(), thread_id); |
| 115 | cur->data.erase(iter, cur->data.end()); | ||
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | void rotate(Priority priority) { | 118 | void rotate(Priority priority) { |
diff --git a/src/common/uint128.cpp b/src/common/uint128.cpp new file mode 100644 index 000000000..32bf56730 --- /dev/null +++ b/src/common/uint128.cpp | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #ifdef _MSC_VER | ||
| 6 | #include <intrin.h> | ||
| 7 | |||
| 8 | #pragma intrinsic(_umul128) | ||
| 9 | #endif | ||
| 10 | #include <cstring> | ||
| 11 | #include "common/uint128.h" | ||
| 12 | |||
| 13 | namespace Common { | ||
| 14 | |||
| 15 | u128 Multiply64Into128(u64 a, u64 b) { | ||
| 16 | u128 result; | ||
| 17 | #ifdef _MSC_VER | ||
| 18 | result[0] = _umul128(a, b, &result[1]); | ||
| 19 | #else | ||
| 20 | unsigned __int128 tmp = a; | ||
| 21 | tmp *= b; | ||
| 22 | std::memcpy(&result, &tmp, sizeof(u128)); | ||
| 23 | #endif | ||
| 24 | return result; | ||
| 25 | } | ||
| 26 | |||
| 27 | std::pair<u64, u64> Divide128On32(u128 dividend, u32 divisor) { | ||
| 28 | u64 remainder = dividend[0] % divisor; | ||
| 29 | u64 accum = dividend[0] / divisor; | ||
| 30 | if (dividend[1] == 0) | ||
| 31 | return {accum, remainder}; | ||
| 32 | // We ignore dividend[1] / divisor as that overflows | ||
| 33 | const u64 first_segment = (dividend[1] % divisor) << 32; | ||
| 34 | accum += (first_segment / divisor) << 32; | ||
| 35 | const u64 second_segment = (first_segment % divisor) << 32; | ||
| 36 | accum += (second_segment / divisor); | ||
| 37 | remainder += second_segment % divisor; | ||
| 38 | if (remainder >= divisor) { | ||
| 39 | accum++; | ||
| 40 | remainder -= divisor; | ||
| 41 | } | ||
| 42 | return {accum, remainder}; | ||
| 43 | } | ||
| 44 | |||
| 45 | } // namespace Common | ||
diff --git a/src/common/uint128.h b/src/common/uint128.h new file mode 100644 index 000000000..a3be2a2cb --- /dev/null +++ b/src/common/uint128.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <utility> | ||
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | namespace Common { | ||
| 11 | |||
| 12 | // This function multiplies 2 u64 values and produces a u128 value; | ||
| 13 | u128 Multiply64Into128(u64 a, u64 b); | ||
| 14 | |||
| 15 | // This function divides a u128 by a u32 value and produces two u64 values: | ||
| 16 | // the result of division and the remainder | ||
| 17 | std::pair<u64, u64> Divide128On32(u128 dividend, u32 divisor); | ||
| 18 | |||
| 19 | } // namespace Common | ||
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 3f855dcb7..f156bca40 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -31,6 +31,8 @@ add_library(core STATIC | |||
| 31 | file_sys/bis_factory.h | 31 | file_sys/bis_factory.h |
| 32 | file_sys/card_image.cpp | 32 | file_sys/card_image.cpp |
| 33 | file_sys/card_image.h | 33 | file_sys/card_image.h |
| 34 | file_sys/cheat_engine.cpp | ||
| 35 | file_sys/cheat_engine.h | ||
| 34 | file_sys/content_archive.cpp | 36 | file_sys/content_archive.cpp |
| 35 | file_sys/content_archive.h | 37 | file_sys/content_archive.h |
| 36 | file_sys/control_metadata.cpp | 38 | file_sys/control_metadata.cpp |
| @@ -109,6 +111,8 @@ add_library(core STATIC | |||
| 109 | hle/kernel/client_port.h | 111 | hle/kernel/client_port.h |
| 110 | hle/kernel/client_session.cpp | 112 | hle/kernel/client_session.cpp |
| 111 | hle/kernel/client_session.h | 113 | hle/kernel/client_session.h |
| 114 | hle/kernel/code_set.cpp | ||
| 115 | hle/kernel/code_set.h | ||
| 112 | hle/kernel/errors.h | 116 | hle/kernel/errors.h |
| 113 | hle/kernel/handle_table.cpp | 117 | hle/kernel/handle_table.cpp |
| 114 | hle/kernel/handle_table.h | 118 | hle/kernel/handle_table.h |
| @@ -421,8 +425,6 @@ add_library(core STATIC | |||
| 421 | loader/deconstructed_rom_directory.h | 425 | loader/deconstructed_rom_directory.h |
| 422 | loader/elf.cpp | 426 | loader/elf.cpp |
| 423 | loader/elf.h | 427 | loader/elf.h |
| 424 | loader/linker.cpp | ||
| 425 | loader/linker.h | ||
| 426 | loader/loader.cpp | 428 | loader/loader.cpp |
| 427 | loader/loader.h | 429 | loader/loader.h |
| 428 | loader/nax.cpp | 430 | loader/nax.cpp |
| @@ -439,8 +441,6 @@ add_library(core STATIC | |||
| 439 | loader/xci.h | 441 | loader/xci.h |
| 440 | memory.cpp | 442 | memory.cpp |
| 441 | memory.h | 443 | memory.h |
| 442 | memory_hook.cpp | ||
| 443 | memory_hook.h | ||
| 444 | memory_setup.h | 444 | memory_setup.h |
| 445 | perf_stats.cpp | 445 | perf_stats.cpp |
| 446 | perf_stats.h | 446 | perf_stats.h |
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp index 9b7ca4030..4fdc12f11 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include "core/core.h" | 12 | #include "core/core.h" |
| 13 | #include "core/core_cpu.h" | 13 | #include "core/core_cpu.h" |
| 14 | #include "core/core_timing.h" | 14 | #include "core/core_timing.h" |
| 15 | #include "core/core_timing_util.h" | ||
| 15 | #include "core/gdbstub/gdbstub.h" | 16 | #include "core/gdbstub/gdbstub.h" |
| 16 | #include "core/hle/kernel/process.h" | 17 | #include "core/hle/kernel/process.h" |
| 17 | #include "core/hle/kernel/svc.h" | 18 | #include "core/hle/kernel/svc.h" |
| @@ -119,7 +120,7 @@ public: | |||
| 119 | return std::max(parent.core_timing.GetDowncount(), 0); | 120 | return std::max(parent.core_timing.GetDowncount(), 0); |
| 120 | } | 121 | } |
| 121 | u64 GetCNTPCT() override { | 122 | u64 GetCNTPCT() override { |
| 122 | return parent.core_timing.GetTicks(); | 123 | return Timing::CpuCyclesToClockCycles(parent.core_timing.GetTicks()); |
| 123 | } | 124 | } |
| 124 | 125 | ||
| 125 | ARM_Dynarmic& parent; | 126 | ARM_Dynarmic& parent; |
| @@ -151,7 +152,7 @@ std::unique_ptr<Dynarmic::A64::Jit> ARM_Dynarmic::MakeJit() const { | |||
| 151 | config.tpidr_el0 = &cb->tpidr_el0; | 152 | config.tpidr_el0 = &cb->tpidr_el0; |
| 152 | config.dczid_el0 = 4; | 153 | config.dczid_el0 = 4; |
| 153 | config.ctr_el0 = 0x8444c004; | 154 | config.ctr_el0 = 0x8444c004; |
| 154 | config.cntfrq_el0 = 19200000; // Value from fusee. | 155 | config.cntfrq_el0 = Timing::CNTFREQ; |
| 155 | 156 | ||
| 156 | // Unpredictable instructions | 157 | // Unpredictable instructions |
| 157 | config.define_unpredictable_behaviour = true; | 158 | config.define_unpredictable_behaviour = true; |
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h index 6cc458296..aada1e862 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.h +++ b/src/core/arm/dynarmic/arm_dynarmic.h | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #include "core/arm/exclusive_monitor.h" | 12 | #include "core/arm/exclusive_monitor.h" |
| 13 | #include "core/arm/unicorn/arm_unicorn.h" | 13 | #include "core/arm/unicorn/arm_unicorn.h" |
| 14 | 14 | ||
| 15 | namespace Memory { | 15 | namespace Common { |
| 16 | struct PageTable; | 16 | struct PageTable; |
| 17 | } | 17 | } |
| 18 | 18 | ||
| @@ -70,7 +70,7 @@ private: | |||
| 70 | Timing::CoreTiming& core_timing; | 70 | Timing::CoreTiming& core_timing; |
| 71 | DynarmicExclusiveMonitor& exclusive_monitor; | 71 | DynarmicExclusiveMonitor& exclusive_monitor; |
| 72 | 72 | ||
| 73 | Memory::PageTable* current_page_table = nullptr; | 73 | Common::PageTable* current_page_table = nullptr; |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | class DynarmicExclusiveMonitor final : public ExclusiveMonitor { | 76 | class DynarmicExclusiveMonitor final : public ExclusiveMonitor { |
diff --git a/src/core/core.cpp b/src/core/core.cpp index eba2177d1..4fe77c25b 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include "core/perf_stats.h" | 32 | #include "core/perf_stats.h" |
| 33 | #include "core/settings.h" | 33 | #include "core/settings.h" |
| 34 | #include "core/telemetry_session.h" | 34 | #include "core/telemetry_session.h" |
| 35 | #include "file_sys/cheat_engine.h" | ||
| 35 | #include "frontend/applets/profile_select.h" | 36 | #include "frontend/applets/profile_select.h" |
| 36 | #include "frontend/applets/software_keyboard.h" | 37 | #include "frontend/applets/software_keyboard.h" |
| 37 | #include "frontend/applets/web_browser.h" | 38 | #include "frontend/applets/web_browser.h" |
| @@ -116,7 +117,7 @@ struct System::Impl { | |||
| 116 | if (web_browser == nullptr) | 117 | if (web_browser == nullptr) |
| 117 | web_browser = std::make_unique<Core::Frontend::DefaultWebBrowserApplet>(); | 118 | web_browser = std::make_unique<Core::Frontend::DefaultWebBrowserApplet>(); |
| 118 | 119 | ||
| 119 | auto main_process = Kernel::Process::Create(kernel, "main"); | 120 | auto main_process = Kernel::Process::Create(system, "main"); |
| 120 | kernel.MakeCurrentProcess(main_process.get()); | 121 | kernel.MakeCurrentProcess(main_process.get()); |
| 121 | 122 | ||
| 122 | telemetry_session = std::make_unique<Core::TelemetrySession>(); | 123 | telemetry_session = std::make_unique<Core::TelemetrySession>(); |
| @@ -205,6 +206,7 @@ struct System::Impl { | |||
| 205 | GDBStub::Shutdown(); | 206 | GDBStub::Shutdown(); |
| 206 | Service::Shutdown(); | 207 | Service::Shutdown(); |
| 207 | service_manager.reset(); | 208 | service_manager.reset(); |
| 209 | cheat_engine.reset(); | ||
| 208 | telemetry_session.reset(); | 210 | telemetry_session.reset(); |
| 209 | gpu_core.reset(); | 211 | gpu_core.reset(); |
| 210 | 212 | ||
| @@ -255,6 +257,8 @@ struct System::Impl { | |||
| 255 | CpuCoreManager cpu_core_manager; | 257 | CpuCoreManager cpu_core_manager; |
| 256 | bool is_powered_on = false; | 258 | bool is_powered_on = false; |
| 257 | 259 | ||
| 260 | std::unique_ptr<FileSys::CheatEngine> cheat_engine; | ||
| 261 | |||
| 258 | /// Frontend applets | 262 | /// Frontend applets |
| 259 | std::unique_ptr<Core::Frontend::ProfileSelectApplet> profile_selector; | 263 | std::unique_ptr<Core::Frontend::ProfileSelectApplet> profile_selector; |
| 260 | std::unique_ptr<Core::Frontend::SoftwareKeyboardApplet> software_keyboard; | 264 | std::unique_ptr<Core::Frontend::SoftwareKeyboardApplet> software_keyboard; |
| @@ -453,6 +457,13 @@ Tegra::DebugContext* System::GetGPUDebugContext() const { | |||
| 453 | return impl->debug_context.get(); | 457 | return impl->debug_context.get(); |
| 454 | } | 458 | } |
| 455 | 459 | ||
| 460 | void System::RegisterCheatList(const std::vector<FileSys::CheatList>& list, | ||
| 461 | const std::string& build_id, VAddr code_region_start, | ||
| 462 | VAddr code_region_end) { | ||
| 463 | impl->cheat_engine = std::make_unique<FileSys::CheatEngine>(*this, list, build_id, | ||
| 464 | code_region_start, code_region_end); | ||
| 465 | } | ||
| 466 | |||
| 456 | void System::SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs) { | 467 | void System::SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs) { |
| 457 | impl->virtual_filesystem = std::move(vfs); | 468 | impl->virtual_filesystem = std::move(vfs); |
| 458 | } | 469 | } |
diff --git a/src/core/core.h b/src/core/core.h index ba76a41d8..4d83b93cc 100644 --- a/src/core/core.h +++ b/src/core/core.h | |||
| @@ -20,6 +20,7 @@ class WebBrowserApplet; | |||
| 20 | } // namespace Core::Frontend | 20 | } // namespace Core::Frontend |
| 21 | 21 | ||
| 22 | namespace FileSys { | 22 | namespace FileSys { |
| 23 | class CheatList; | ||
| 23 | class VfsFilesystem; | 24 | class VfsFilesystem; |
| 24 | } // namespace FileSys | 25 | } // namespace FileSys |
| 25 | 26 | ||
| @@ -253,6 +254,9 @@ public: | |||
| 253 | 254 | ||
| 254 | std::shared_ptr<FileSys::VfsFilesystem> GetFilesystem() const; | 255 | std::shared_ptr<FileSys::VfsFilesystem> GetFilesystem() const; |
| 255 | 256 | ||
| 257 | void RegisterCheatList(const std::vector<FileSys::CheatList>& list, const std::string& build_id, | ||
| 258 | VAddr code_region_start, VAddr code_region_end); | ||
| 259 | |||
| 256 | void SetProfileSelector(std::unique_ptr<Frontend::ProfileSelectApplet> applet); | 260 | void SetProfileSelector(std::unique_ptr<Frontend::ProfileSelectApplet> applet); |
| 257 | 261 | ||
| 258 | const Frontend::ProfileSelectApplet& GetProfileSelector() const; | 262 | const Frontend::ProfileSelectApplet& GetProfileSelector() const; |
diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp index 54aa21a3a..1eefed6d0 100644 --- a/src/core/core_cpu.cpp +++ b/src/core/core_cpu.cpp | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #endif | 11 | #endif |
| 12 | #include "core/arm/exclusive_monitor.h" | 12 | #include "core/arm/exclusive_monitor.h" |
| 13 | #include "core/arm/unicorn/arm_unicorn.h" | 13 | #include "core/arm/unicorn/arm_unicorn.h" |
| 14 | #include "core/core.h" | ||
| 14 | #include "core/core_cpu.h" | 15 | #include "core/core_cpu.h" |
| 15 | #include "core/core_timing.h" | 16 | #include "core/core_timing.h" |
| 16 | #include "core/hle/kernel/scheduler.h" | 17 | #include "core/hle/kernel/scheduler.h" |
| @@ -49,9 +50,9 @@ bool CpuBarrier::Rendezvous() { | |||
| 49 | return false; | 50 | return false; |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 52 | Cpu::Cpu(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor, | 53 | Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, |
| 53 | CpuBarrier& cpu_barrier, std::size_t core_index) | 54 | std::size_t core_index) |
| 54 | : cpu_barrier{cpu_barrier}, core_timing{core_timing}, core_index{core_index} { | 55 | : cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} { |
| 55 | if (Settings::values.use_cpu_jit) { | 56 | if (Settings::values.use_cpu_jit) { |
| 56 | #ifdef ARCHITECTURE_x86_64 | 57 | #ifdef ARCHITECTURE_x86_64 |
| 57 | arm_interface = std::make_unique<ARM_Dynarmic>(core_timing, exclusive_monitor, core_index); | 58 | arm_interface = std::make_unique<ARM_Dynarmic>(core_timing, exclusive_monitor, core_index); |
| @@ -63,7 +64,7 @@ Cpu::Cpu(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor, | |||
| 63 | arm_interface = std::make_unique<ARM_Unicorn>(core_timing); | 64 | arm_interface = std::make_unique<ARM_Unicorn>(core_timing); |
| 64 | } | 65 | } |
| 65 | 66 | ||
| 66 | scheduler = std::make_unique<Kernel::Scheduler>(*arm_interface); | 67 | scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface); |
| 67 | } | 68 | } |
| 68 | 69 | ||
| 69 | Cpu::~Cpu() = default; | 70 | Cpu::~Cpu() = default; |
diff --git a/src/core/core_cpu.h b/src/core/core_cpu.h index e2204c6b0..7589beb8c 100644 --- a/src/core/core_cpu.h +++ b/src/core/core_cpu.h | |||
| @@ -15,6 +15,10 @@ namespace Kernel { | |||
| 15 | class Scheduler; | 15 | class Scheduler; |
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | namespace Core { | ||
| 19 | class System; | ||
| 20 | } | ||
| 21 | |||
| 18 | namespace Core::Timing { | 22 | namespace Core::Timing { |
| 19 | class CoreTiming; | 23 | class CoreTiming; |
| 20 | } | 24 | } |
| @@ -45,8 +49,8 @@ private: | |||
| 45 | 49 | ||
| 46 | class Cpu { | 50 | class Cpu { |
| 47 | public: | 51 | public: |
| 48 | Cpu(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor, | 52 | Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, |
| 49 | CpuBarrier& cpu_barrier, std::size_t core_index); | 53 | std::size_t core_index); |
| 50 | ~Cpu(); | 54 | ~Cpu(); |
| 51 | 55 | ||
| 52 | void RunLoop(bool tight_loop = true); | 56 | void RunLoop(bool tight_loop = true); |
diff --git a/src/core/core_timing_util.cpp b/src/core/core_timing_util.cpp index 88ff70233..7942f30d6 100644 --- a/src/core/core_timing_util.cpp +++ b/src/core/core_timing_util.cpp | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <cinttypes> | 7 | #include <cinttypes> |
| 8 | #include <limits> | 8 | #include <limits> |
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "common/uint128.h" | ||
| 10 | 11 | ||
| 11 | namespace Core::Timing { | 12 | namespace Core::Timing { |
| 12 | 13 | ||
| @@ -60,4 +61,9 @@ s64 nsToCycles(u64 ns) { | |||
| 60 | return (BASE_CLOCK_RATE * static_cast<s64>(ns)) / 1000000000; | 61 | return (BASE_CLOCK_RATE * static_cast<s64>(ns)) / 1000000000; |
| 61 | } | 62 | } |
| 62 | 63 | ||
| 64 | u64 CpuCyclesToClockCycles(u64 ticks) { | ||
| 65 | const u128 temporal = Common::Multiply64Into128(ticks, CNTFREQ); | ||
| 66 | return Common::Divide128On32(temporal, static_cast<u32>(BASE_CLOCK_RATE)).first; | ||
| 67 | } | ||
| 68 | |||
| 63 | } // namespace Core::Timing | 69 | } // namespace Core::Timing |
diff --git a/src/core/core_timing_util.h b/src/core/core_timing_util.h index 513cfac1b..679aa3123 100644 --- a/src/core/core_timing_util.h +++ b/src/core/core_timing_util.h | |||
| @@ -11,6 +11,7 @@ namespace Core::Timing { | |||
| 11 | // The below clock rate is based on Switch's clockspeed being widely known as 1.020GHz | 11 | // The below clock rate is based on Switch's clockspeed being widely known as 1.020GHz |
| 12 | // The exact value used is of course unverified. | 12 | // The exact value used is of course unverified. |
| 13 | constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch clock speed is 1020MHz un/docked | 13 | constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch clock speed is 1020MHz un/docked |
| 14 | constexpr u64 CNTFREQ = 19200000; // Value from fusee. | ||
| 14 | 15 | ||
| 15 | inline s64 msToCycles(int ms) { | 16 | inline s64 msToCycles(int ms) { |
| 16 | // since ms is int there is no way to overflow | 17 | // since ms is int there is no way to overflow |
| @@ -61,4 +62,6 @@ inline u64 cyclesToMs(s64 cycles) { | |||
| 61 | return cycles * 1000 / BASE_CLOCK_RATE; | 62 | return cycles * 1000 / BASE_CLOCK_RATE; |
| 62 | } | 63 | } |
| 63 | 64 | ||
| 65 | u64 CpuCyclesToClockCycles(u64 ticks); | ||
| 66 | |||
| 64 | } // namespace Core::Timing | 67 | } // namespace Core::Timing |
diff --git a/src/core/cpu_core_manager.cpp b/src/core/cpu_core_manager.cpp index 2ddb3610d..93bc5619c 100644 --- a/src/core/cpu_core_manager.cpp +++ b/src/core/cpu_core_manager.cpp | |||
| @@ -27,8 +27,7 @@ void CpuCoreManager::Initialize(System& system) { | |||
| 27 | exclusive_monitor = Cpu::MakeExclusiveMonitor(cores.size()); | 27 | exclusive_monitor = Cpu::MakeExclusiveMonitor(cores.size()); |
| 28 | 28 | ||
| 29 | for (std::size_t index = 0; index < cores.size(); ++index) { | 29 | for (std::size_t index = 0; index < cores.size(); ++index) { |
| 30 | cores[index] = | 30 | cores[index] = std::make_unique<Cpu>(system, *exclusive_monitor, *barrier, index); |
| 31 | std::make_unique<Cpu>(system.CoreTiming(), *exclusive_monitor, *barrier, index); | ||
| 32 | } | 31 | } |
| 33 | 32 | ||
| 34 | // Create threads for CPU cores 1-3, and build thread_to_cpu map | 33 | // Create threads for CPU cores 1-3, and build thread_to_cpu map |
diff --git a/src/core/file_sys/cheat_engine.cpp b/src/core/file_sys/cheat_engine.cpp new file mode 100644 index 000000000..247fbc864 --- /dev/null +++ b/src/core/file_sys/cheat_engine.cpp | |||
| @@ -0,0 +1,490 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <locale> | ||
| 6 | #include "common/hex_util.h" | ||
| 7 | #include "common/microprofile.h" | ||
| 8 | #include "common/swap.h" | ||
| 9 | #include "core/core.h" | ||
| 10 | #include "core/core_timing.h" | ||
| 11 | #include "core/core_timing_util.h" | ||
| 12 | #include "core/file_sys/cheat_engine.h" | ||
| 13 | #include "core/hle/kernel/process.h" | ||
| 14 | #include "core/hle/service/hid/controllers/npad.h" | ||
| 15 | #include "core/hle/service/hid/hid.h" | ||
| 16 | #include "core/hle/service/sm/sm.h" | ||
| 17 | |||
| 18 | namespace FileSys { | ||
| 19 | |||
| 20 | constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 60); | ||
| 21 | constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF; | ||
| 22 | |||
| 23 | u64 Cheat::Address() const { | ||
| 24 | u64 out; | ||
| 25 | std::memcpy(&out, raw.data(), sizeof(u64)); | ||
| 26 | return Common::swap64(out) & 0xFFFFFFFFFF; | ||
| 27 | } | ||
| 28 | |||
| 29 | u64 Cheat::ValueWidth(u64 offset) const { | ||
| 30 | return Value(offset, width); | ||
| 31 | } | ||
| 32 | |||
| 33 | u64 Cheat::Value(u64 offset, u64 width) const { | ||
| 34 | u64 out; | ||
| 35 | std::memcpy(&out, raw.data() + offset, sizeof(u64)); | ||
| 36 | out = Common::swap64(out); | ||
| 37 | if (width == 8) | ||
| 38 | return out; | ||
| 39 | return out & ((1ull << (width * CHAR_BIT)) - 1); | ||
| 40 | } | ||
| 41 | |||
| 42 | u32 Cheat::KeypadValue() const { | ||
| 43 | u32 out; | ||
| 44 | std::memcpy(&out, raw.data(), sizeof(u32)); | ||
| 45 | return Common::swap32(out) & 0x0FFFFFFF; | ||
| 46 | } | ||
| 47 | |||
| 48 | void CheatList::SetMemoryParameters(VAddr main_begin, VAddr heap_begin, VAddr main_end, | ||
| 49 | VAddr heap_end, MemoryWriter writer, MemoryReader reader) { | ||
| 50 | this->main_region_begin = main_begin; | ||
| 51 | this->main_region_end = main_end; | ||
| 52 | this->heap_region_begin = heap_begin; | ||
| 53 | this->heap_region_end = heap_end; | ||
| 54 | this->writer = writer; | ||
| 55 | this->reader = reader; | ||
| 56 | } | ||
| 57 | |||
| 58 | MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70)); | ||
| 59 | |||
| 60 | void CheatList::Execute() { | ||
| 61 | MICROPROFILE_SCOPE(Cheat_Engine); | ||
| 62 | |||
| 63 | std::fill(scratch.begin(), scratch.end(), 0); | ||
| 64 | in_standard = false; | ||
| 65 | for (std::size_t i = 0; i < master_list.size(); ++i) { | ||
| 66 | LOG_DEBUG(Common_Filesystem, "Executing block #{:08X} ({})", i, master_list[i].first); | ||
| 67 | current_block = i; | ||
| 68 | ExecuteBlock(master_list[i].second); | ||
| 69 | } | ||
| 70 | |||
| 71 | in_standard = true; | ||
| 72 | for (std::size_t i = 0; i < standard_list.size(); ++i) { | ||
| 73 | LOG_DEBUG(Common_Filesystem, "Executing block #{:08X} ({})", i, standard_list[i].first); | ||
| 74 | current_block = i; | ||
| 75 | ExecuteBlock(standard_list[i].second); | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 79 | CheatList::CheatList(const Core::System& system_, ProgramSegment master, ProgramSegment standard) | ||
| 80 | : master_list{std::move(master)}, standard_list{std::move(standard)}, system{&system_} {} | ||
| 81 | |||
| 82 | bool CheatList::EvaluateConditional(const Cheat& cheat) const { | ||
| 83 | using ComparisonFunction = bool (*)(u64, u64); | ||
| 84 | constexpr std::array<ComparisonFunction, 6> comparison_functions{ | ||
| 85 | [](u64 a, u64 b) { return a > b; }, [](u64 a, u64 b) { return a >= b; }, | ||
| 86 | [](u64 a, u64 b) { return a < b; }, [](u64 a, u64 b) { return a <= b; }, | ||
| 87 | [](u64 a, u64 b) { return a == b; }, [](u64 a, u64 b) { return a != b; }, | ||
| 88 | }; | ||
| 89 | |||
| 90 | if (cheat.type == CodeType::ConditionalInput) { | ||
| 91 | const auto applet_resource = | ||
| 92 | system->ServiceManager().GetService<Service::HID::Hid>("hid")->GetAppletResource(); | ||
| 93 | if (applet_resource == nullptr) { | ||
| 94 | LOG_WARNING( | ||
| 95 | Common_Filesystem, | ||
| 96 | "Attempted to evaluate input conditional, but applet resource is not initialized!"); | ||
| 97 | return false; | ||
| 98 | } | ||
| 99 | |||
| 100 | const auto press_state = | ||
| 101 | applet_resource | ||
| 102 | ->GetController<Service::HID::Controller_NPad>(Service::HID::HidController::NPad) | ||
| 103 | .GetAndResetPressState(); | ||
| 104 | return ((press_state & cheat.KeypadValue()) & KEYPAD_BITMASK) != 0; | ||
| 105 | } | ||
| 106 | |||
| 107 | ASSERT(cheat.type == CodeType::Conditional); | ||
| 108 | |||
| 109 | const auto offset = | ||
| 110 | cheat.memory_type == MemoryType::MainNSO ? main_region_begin : heap_region_begin; | ||
| 111 | ASSERT(static_cast<u8>(cheat.comparison_op.Value()) < 6); | ||
| 112 | auto* function = comparison_functions[static_cast<u8>(cheat.comparison_op.Value())]; | ||
| 113 | const auto addr = cheat.Address() + offset; | ||
| 114 | |||
| 115 | return function(reader(cheat.width, SanitizeAddress(addr)), cheat.ValueWidth(8)); | ||
| 116 | } | ||
| 117 | |||
| 118 | void CheatList::ProcessBlockPairs(const Block& block) { | ||
| 119 | block_pairs.clear(); | ||
| 120 | |||
| 121 | u64 scope = 0; | ||
| 122 | std::map<u64, u64> pairs; | ||
| 123 | |||
| 124 | for (std::size_t i = 0; i < block.size(); ++i) { | ||
| 125 | const auto& cheat = block[i]; | ||
| 126 | |||
| 127 | switch (cheat.type) { | ||
| 128 | case CodeType::Conditional: | ||
| 129 | case CodeType::ConditionalInput: | ||
| 130 | pairs.insert_or_assign(scope, i); | ||
| 131 | ++scope; | ||
| 132 | break; | ||
| 133 | case CodeType::EndConditional: { | ||
| 134 | --scope; | ||
| 135 | const auto idx = pairs.at(scope); | ||
| 136 | block_pairs.insert_or_assign(idx, i); | ||
| 137 | break; | ||
| 138 | } | ||
| 139 | case CodeType::Loop: { | ||
| 140 | if (cheat.end_of_loop) { | ||
| 141 | --scope; | ||
| 142 | const auto idx = pairs.at(scope); | ||
| 143 | block_pairs.insert_or_assign(idx, i); | ||
| 144 | } else { | ||
| 145 | pairs.insert_or_assign(scope, i); | ||
| 146 | ++scope; | ||
| 147 | } | ||
| 148 | break; | ||
| 149 | } | ||
| 150 | } | ||
| 151 | } | ||
| 152 | } | ||
| 153 | |||
| 154 | void CheatList::WriteImmediate(const Cheat& cheat) { | ||
| 155 | const auto offset = | ||
| 156 | cheat.memory_type == MemoryType::MainNSO ? main_region_begin : heap_region_begin; | ||
| 157 | const auto& register_3 = scratch.at(cheat.register_3); | ||
| 158 | |||
| 159 | const auto addr = cheat.Address() + offset + register_3; | ||
| 160 | LOG_DEBUG(Common_Filesystem, "writing value={:016X} to addr={:016X}", addr, | ||
| 161 | cheat.Value(8, cheat.width)); | ||
| 162 | writer(cheat.width, SanitizeAddress(addr), cheat.ValueWidth(8)); | ||
| 163 | } | ||
| 164 | |||
| 165 | void CheatList::BeginConditional(const Cheat& cheat) { | ||
| 166 | if (EvaluateConditional(cheat)) { | ||
| 167 | return; | ||
| 168 | } | ||
| 169 | |||
| 170 | const auto iter = block_pairs.find(current_index); | ||
| 171 | ASSERT(iter != block_pairs.end()); | ||
| 172 | current_index = iter->second - 1; | ||
| 173 | } | ||
| 174 | |||
| 175 | void CheatList::EndConditional(const Cheat& cheat) { | ||
| 176 | LOG_DEBUG(Common_Filesystem, "Ending conditional block."); | ||
| 177 | } | ||
| 178 | |||
| 179 | void CheatList::Loop(const Cheat& cheat) { | ||
| 180 | if (cheat.end_of_loop.Value()) | ||
| 181 | ASSERT(!cheat.end_of_loop.Value()); | ||
| 182 | |||
| 183 | auto& register_3 = scratch.at(cheat.register_3); | ||
| 184 | const auto iter = block_pairs.find(current_index); | ||
| 185 | ASSERT(iter != block_pairs.end()); | ||
| 186 | ASSERT(iter->first < iter->second); | ||
| 187 | |||
| 188 | const s32 initial_value = static_cast<s32>(cheat.Value(4, sizeof(s32))); | ||
| 189 | for (s32 i = initial_value; i >= 0; --i) { | ||
| 190 | register_3 = static_cast<u64>(i); | ||
| 191 | for (std::size_t c = iter->first + 1; c < iter->second; ++c) { | ||
| 192 | current_index = c; | ||
| 193 | ExecuteSingleCheat( | ||
| 194 | (in_standard ? standard_list : master_list)[current_block].second[c]); | ||
| 195 | } | ||
| 196 | } | ||
| 197 | |||
| 198 | current_index = iter->second; | ||
| 199 | } | ||
| 200 | |||
| 201 | void CheatList::LoadImmediate(const Cheat& cheat) { | ||
| 202 | auto& register_3 = scratch.at(cheat.register_3); | ||
| 203 | |||
| 204 | LOG_DEBUG(Common_Filesystem, "setting register={:01X} equal to value={:016X}", cheat.register_3, | ||
| 205 | cheat.Value(4, 8)); | ||
| 206 | register_3 = cheat.Value(4, 8); | ||
| 207 | } | ||
| 208 | |||
| 209 | void CheatList::LoadIndexed(const Cheat& cheat) { | ||
| 210 | const auto offset = | ||
| 211 | cheat.memory_type == MemoryType::MainNSO ? main_region_begin : heap_region_begin; | ||
| 212 | auto& register_3 = scratch.at(cheat.register_3); | ||
| 213 | |||
| 214 | const auto addr = (cheat.load_from_register.Value() ? register_3 : offset) + cheat.Address(); | ||
| 215 | LOG_DEBUG(Common_Filesystem, "writing indexed value to register={:01X}, addr={:016X}", | ||
| 216 | cheat.register_3, addr); | ||
| 217 | register_3 = reader(cheat.width, SanitizeAddress(addr)); | ||
| 218 | } | ||
| 219 | |||
| 220 | void CheatList::StoreIndexed(const Cheat& cheat) { | ||
| 221 | const auto& register_3 = scratch.at(cheat.register_3); | ||
| 222 | |||
| 223 | const auto addr = | ||
| 224 | register_3 + (cheat.add_additional_register.Value() ? scratch.at(cheat.register_6) : 0); | ||
| 225 | LOG_DEBUG(Common_Filesystem, "writing value={:016X} to addr={:016X}", | ||
| 226 | cheat.Value(4, cheat.width), addr); | ||
| 227 | writer(cheat.width, SanitizeAddress(addr), cheat.ValueWidth(4)); | ||
| 228 | } | ||
| 229 | |||
| 230 | void CheatList::RegisterArithmetic(const Cheat& cheat) { | ||
| 231 | using ArithmeticFunction = u64 (*)(u64, u64); | ||
| 232 | constexpr std::array<ArithmeticFunction, 5> arithmetic_functions{ | ||
| 233 | [](u64 a, u64 b) { return a + b; }, [](u64 a, u64 b) { return a - b; }, | ||
| 234 | [](u64 a, u64 b) { return a * b; }, [](u64 a, u64 b) { return a << b; }, | ||
| 235 | [](u64 a, u64 b) { return a >> b; }, | ||
| 236 | }; | ||
| 237 | |||
| 238 | using ArithmeticOverflowCheck = bool (*)(u64, u64); | ||
| 239 | constexpr std::array<ArithmeticOverflowCheck, 5> arithmetic_overflow_checks{ | ||
| 240 | [](u64 a, u64 b) { return a > (std::numeric_limits<u64>::max() - b); }, // a + b | ||
| 241 | [](u64 a, u64 b) { return a > (std::numeric_limits<u64>::max() + b); }, // a - b | ||
| 242 | [](u64 a, u64 b) { return a > (std::numeric_limits<u64>::max() / b); }, // a * b | ||
| 243 | [](u64 a, u64 b) { return b >= 64 || (a & ~((1ull << (64 - b)) - 1)) != 0; }, // a << b | ||
| 244 | [](u64 a, u64 b) { return b >= 64 || (a & ((1ull << b) - 1)) != 0; }, // a >> b | ||
| 245 | }; | ||
| 246 | |||
| 247 | static_assert(sizeof(arithmetic_functions) == sizeof(arithmetic_overflow_checks), | ||
| 248 | "Missing or have extra arithmetic overflow checks compared to functions!"); | ||
| 249 | |||
| 250 | auto& register_3 = scratch.at(cheat.register_3); | ||
| 251 | |||
| 252 | ASSERT(static_cast<u8>(cheat.arithmetic_op.Value()) < 5); | ||
| 253 | auto* function = arithmetic_functions[static_cast<u8>(cheat.arithmetic_op.Value())]; | ||
| 254 | auto* overflow_function = | ||
| 255 | arithmetic_overflow_checks[static_cast<u8>(cheat.arithmetic_op.Value())]; | ||
| 256 | LOG_DEBUG(Common_Filesystem, "performing arithmetic with register={:01X}, value={:016X}", | ||
| 257 | cheat.register_3, cheat.ValueWidth(4)); | ||
| 258 | |||
| 259 | if (overflow_function(register_3, cheat.ValueWidth(4))) { | ||
| 260 | LOG_WARNING(Common_Filesystem, | ||
| 261 | "overflow will occur when performing arithmetic operation={:02X} with operands " | ||
| 262 | "a={:016X}, b={:016X}!", | ||
| 263 | static_cast<u8>(cheat.arithmetic_op.Value()), register_3, cheat.ValueWidth(4)); | ||
| 264 | } | ||
| 265 | |||
| 266 | register_3 = function(register_3, cheat.ValueWidth(4)); | ||
| 267 | } | ||
| 268 | |||
| 269 | void CheatList::BeginConditionalInput(const Cheat& cheat) { | ||
| 270 | if (EvaluateConditional(cheat)) | ||
| 271 | return; | ||
| 272 | |||
| 273 | const auto iter = block_pairs.find(current_index); | ||
| 274 | ASSERT(iter != block_pairs.end()); | ||
| 275 | current_index = iter->second - 1; | ||
| 276 | } | ||
| 277 | |||
| 278 | VAddr CheatList::SanitizeAddress(VAddr in) const { | ||
| 279 | if ((in < main_region_begin || in >= main_region_end) && | ||
| 280 | (in < heap_region_begin || in >= heap_region_end)) { | ||
| 281 | LOG_ERROR(Common_Filesystem, | ||
| 282 | "Cheat attempting to access memory at invalid address={:016X}, if this persists, " | ||
| 283 | "the cheat may be incorrect. However, this may be normal early in execution if " | ||
| 284 | "the game has not properly set up yet.", | ||
| 285 | in); | ||
| 286 | return 0; ///< Invalid addresses will hard crash | ||
| 287 | } | ||
| 288 | |||
| 289 | return in; | ||
| 290 | } | ||
| 291 | |||
| 292 | void CheatList::ExecuteSingleCheat(const Cheat& cheat) { | ||
| 293 | using CheatOperationFunction = void (CheatList::*)(const Cheat&); | ||
| 294 | constexpr std::array<CheatOperationFunction, 9> cheat_operation_functions{ | ||
| 295 | &CheatList::WriteImmediate, &CheatList::BeginConditional, | ||
| 296 | &CheatList::EndConditional, &CheatList::Loop, | ||
| 297 | &CheatList::LoadImmediate, &CheatList::LoadIndexed, | ||
| 298 | &CheatList::StoreIndexed, &CheatList::RegisterArithmetic, | ||
| 299 | &CheatList::BeginConditionalInput, | ||
| 300 | }; | ||
| 301 | |||
| 302 | const auto index = static_cast<u8>(cheat.type.Value()); | ||
| 303 | ASSERT(index < sizeof(cheat_operation_functions)); | ||
| 304 | const auto op = cheat_operation_functions[index]; | ||
| 305 | (this->*op)(cheat); | ||
| 306 | } | ||
| 307 | |||
| 308 | void CheatList::ExecuteBlock(const Block& block) { | ||
| 309 | encountered_loops.clear(); | ||
| 310 | |||
| 311 | ProcessBlockPairs(block); | ||
| 312 | for (std::size_t i = 0; i < block.size(); ++i) { | ||
| 313 | current_index = i; | ||
| 314 | ExecuteSingleCheat(block[i]); | ||
| 315 | i = current_index; | ||
| 316 | } | ||
| 317 | } | ||
| 318 | |||
| 319 | CheatParser::~CheatParser() = default; | ||
| 320 | |||
| 321 | CheatList CheatParser::MakeCheatList(const Core::System& system, CheatList::ProgramSegment master, | ||
| 322 | CheatList::ProgramSegment standard) const { | ||
| 323 | return {system, std::move(master), std::move(standard)}; | ||
| 324 | } | ||
| 325 | |||
| 326 | TextCheatParser::~TextCheatParser() = default; | ||
| 327 | |||
| 328 | CheatList TextCheatParser::Parse(const Core::System& system, const std::vector<u8>& data) const { | ||
| 329 | std::stringstream ss; | ||
| 330 | ss.write(reinterpret_cast<const char*>(data.data()), data.size()); | ||
| 331 | |||
| 332 | std::vector<std::string> lines; | ||
| 333 | std::string stream_line; | ||
| 334 | while (std::getline(ss, stream_line)) { | ||
| 335 | // Remove a trailing \r | ||
| 336 | if (!stream_line.empty() && stream_line.back() == '\r') | ||
| 337 | stream_line.pop_back(); | ||
| 338 | lines.push_back(std::move(stream_line)); | ||
| 339 | } | ||
| 340 | |||
| 341 | CheatList::ProgramSegment master_list; | ||
| 342 | CheatList::ProgramSegment standard_list; | ||
| 343 | |||
| 344 | for (std::size_t i = 0; i < lines.size(); ++i) { | ||
| 345 | auto line = lines[i]; | ||
| 346 | |||
| 347 | if (!line.empty() && (line[0] == '[' || line[0] == '{')) { | ||
| 348 | const auto master = line[0] == '{'; | ||
| 349 | const auto begin = master ? line.find('{') : line.find('['); | ||
| 350 | const auto end = master ? line.rfind('}') : line.rfind(']'); | ||
| 351 | |||
| 352 | ASSERT(begin != std::string::npos && end != std::string::npos); | ||
| 353 | |||
| 354 | const std::string patch_name{line.begin() + begin + 1, line.begin() + end}; | ||
| 355 | CheatList::Block block{}; | ||
| 356 | |||
| 357 | while (i < lines.size() - 1) { | ||
| 358 | line = lines[++i]; | ||
| 359 | if (!line.empty() && (line[0] == '[' || line[0] == '{')) { | ||
| 360 | --i; | ||
| 361 | break; | ||
| 362 | } | ||
| 363 | |||
| 364 | if (line.size() < 8) | ||
| 365 | continue; | ||
| 366 | |||
| 367 | Cheat out{}; | ||
| 368 | out.raw = ParseSingleLineCheat(line); | ||
| 369 | block.push_back(out); | ||
| 370 | } | ||
| 371 | |||
| 372 | (master ? master_list : standard_list).emplace_back(patch_name, block); | ||
| 373 | } | ||
| 374 | } | ||
| 375 | |||
| 376 | return MakeCheatList(system, master_list, standard_list); | ||
| 377 | } | ||
| 378 | |||
| 379 | std::array<u8, 16> TextCheatParser::ParseSingleLineCheat(const std::string& line) const { | ||
| 380 | std::array<u8, 16> out{}; | ||
| 381 | |||
| 382 | if (line.size() < 8) | ||
| 383 | return out; | ||
| 384 | |||
| 385 | const auto word1 = Common::HexStringToArray<sizeof(u32)>(std::string_view{line.data(), 8}); | ||
| 386 | std::memcpy(out.data(), word1.data(), sizeof(u32)); | ||
| 387 | |||
| 388 | if (line.size() < 17 || line[8] != ' ') | ||
| 389 | return out; | ||
| 390 | |||
| 391 | const auto word2 = Common::HexStringToArray<sizeof(u32)>(std::string_view{line.data() + 9, 8}); | ||
| 392 | std::memcpy(out.data() + sizeof(u32), word2.data(), sizeof(u32)); | ||
| 393 | |||
| 394 | if (line.size() < 26 || line[17] != ' ') { | ||
| 395 | // Perform shifting in case value is truncated early. | ||
| 396 | const auto type = static_cast<CodeType>((out[0] & 0xF0) >> 4); | ||
| 397 | if (type == CodeType::Loop || type == CodeType::LoadImmediate || | ||
| 398 | type == CodeType::StoreIndexed || type == CodeType::RegisterArithmetic) { | ||
| 399 | std::memcpy(out.data() + 8, out.data() + 4, sizeof(u32)); | ||
| 400 | std::memset(out.data() + 4, 0, sizeof(u32)); | ||
| 401 | } | ||
| 402 | |||
| 403 | return out; | ||
| 404 | } | ||
| 405 | |||
| 406 | const auto word3 = Common::HexStringToArray<sizeof(u32)>(std::string_view{line.data() + 18, 8}); | ||
| 407 | std::memcpy(out.data() + 2 * sizeof(u32), word3.data(), sizeof(u32)); | ||
| 408 | |||
| 409 | if (line.size() < 35 || line[26] != ' ') { | ||
| 410 | // Perform shifting in case value is truncated early. | ||
| 411 | const auto type = static_cast<CodeType>((out[0] & 0xF0) >> 4); | ||
| 412 | if (type == CodeType::WriteImmediate || type == CodeType::Conditional) { | ||
| 413 | std::memcpy(out.data() + 12, out.data() + 8, sizeof(u32)); | ||
| 414 | std::memset(out.data() + 8, 0, sizeof(u32)); | ||
| 415 | } | ||
| 416 | |||
| 417 | return out; | ||
| 418 | } | ||
| 419 | |||
| 420 | const auto word4 = Common::HexStringToArray<sizeof(u32)>(std::string_view{line.data() + 27, 8}); | ||
| 421 | std::memcpy(out.data() + 3 * sizeof(u32), word4.data(), sizeof(u32)); | ||
| 422 | |||
| 423 | return out; | ||
| 424 | } | ||
| 425 | |||
| 426 | u64 MemoryReadImpl(u32 width, VAddr addr) { | ||
| 427 | switch (width) { | ||
| 428 | case 1: | ||
| 429 | return Memory::Read8(addr); | ||
| 430 | case 2: | ||
| 431 | return Memory::Read16(addr); | ||
| 432 | case 4: | ||
| 433 | return Memory::Read32(addr); | ||
| 434 | case 8: | ||
| 435 | return Memory::Read64(addr); | ||
| 436 | default: | ||
| 437 | UNREACHABLE(); | ||
| 438 | return 0; | ||
| 439 | } | ||
| 440 | } | ||
| 441 | |||
| 442 | void MemoryWriteImpl(u32 width, VAddr addr, u64 value) { | ||
| 443 | switch (width) { | ||
| 444 | case 1: | ||
| 445 | Memory::Write8(addr, static_cast<u8>(value)); | ||
| 446 | break; | ||
| 447 | case 2: | ||
| 448 | Memory::Write16(addr, static_cast<u16>(value)); | ||
| 449 | break; | ||
| 450 | case 4: | ||
| 451 | Memory::Write32(addr, static_cast<u32>(value)); | ||
| 452 | break; | ||
| 453 | case 8: | ||
| 454 | Memory::Write64(addr, value); | ||
| 455 | break; | ||
| 456 | default: | ||
| 457 | UNREACHABLE(); | ||
| 458 | } | ||
| 459 | } | ||
| 460 | |||
| 461 | CheatEngine::CheatEngine(Core::System& system, std::vector<CheatList> cheats_, | ||
| 462 | const std::string& build_id, VAddr code_region_start, | ||
| 463 | VAddr code_region_end) | ||
| 464 | : cheats{std::move(cheats_)}, core_timing{system.CoreTiming()} { | ||
| 465 | event = core_timing.RegisterEvent( | ||
| 466 | "CheatEngine::FrameCallback::" + build_id, | ||
| 467 | [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); }); | ||
| 468 | core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event); | ||
| 469 | |||
| 470 | const auto& vm_manager = system.CurrentProcess()->VMManager(); | ||
| 471 | for (auto& list : this->cheats) { | ||
| 472 | list.SetMemoryParameters(code_region_start, vm_manager.GetHeapRegionBaseAddress(), | ||
| 473 | code_region_end, vm_manager.GetHeapRegionEndAddress(), | ||
| 474 | &MemoryWriteImpl, &MemoryReadImpl); | ||
| 475 | } | ||
| 476 | } | ||
| 477 | |||
| 478 | CheatEngine::~CheatEngine() { | ||
| 479 | core_timing.UnscheduleEvent(event, 0); | ||
| 480 | } | ||
| 481 | |||
| 482 | void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) { | ||
| 483 | for (auto& list : cheats) { | ||
| 484 | list.Execute(); | ||
| 485 | } | ||
| 486 | |||
| 487 | core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - cycles_late, event); | ||
| 488 | } | ||
| 489 | |||
| 490 | } // namespace FileSys | ||
diff --git a/src/core/file_sys/cheat_engine.h b/src/core/file_sys/cheat_engine.h new file mode 100644 index 000000000..ac22a82cb --- /dev/null +++ b/src/core/file_sys/cheat_engine.h | |||
| @@ -0,0 +1,234 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <map> | ||
| 8 | #include <set> | ||
| 9 | #include <vector> | ||
| 10 | #include "common/bit_field.h" | ||
| 11 | #include "common/common_types.h" | ||
| 12 | |||
| 13 | namespace Core { | ||
| 14 | class System; | ||
| 15 | } | ||
| 16 | |||
| 17 | namespace Core::Timing { | ||
| 18 | class CoreTiming; | ||
| 19 | struct EventType; | ||
| 20 | } // namespace Core::Timing | ||
| 21 | |||
| 22 | namespace FileSys { | ||
| 23 | |||
| 24 | enum class CodeType : u32 { | ||
| 25 | // 0TMR00AA AAAAAAAA YYYYYYYY YYYYYYYY | ||
| 26 | // Writes a T sized value Y to the address A added to the value of register R in memory domain M | ||
| 27 | WriteImmediate = 0, | ||
| 28 | |||
| 29 | // 1TMC00AA AAAAAAAA YYYYYYYY YYYYYYYY | ||
| 30 | // Compares the T sized value Y to the value at address A in memory domain M using the | ||
| 31 | // conditional function C. If success, continues execution. If failure, jumps to the matching | ||
| 32 | // EndConditional statement. | ||
| 33 | Conditional = 1, | ||
| 34 | |||
| 35 | // 20000000 | ||
| 36 | // Terminates a Conditional or ConditionalInput block. | ||
| 37 | EndConditional = 2, | ||
| 38 | |||
| 39 | // 300R0000 VVVVVVVV | ||
| 40 | // Starts looping V times, storing the current count in register R. | ||
| 41 | // Loop block is terminated with a matching 310R0000. | ||
| 42 | Loop = 3, | ||
| 43 | |||
| 44 | // 400R0000 VVVVVVVV VVVVVVVV | ||
| 45 | // Sets the value of register R to the value V. | ||
| 46 | LoadImmediate = 4, | ||
| 47 | |||
| 48 | // 5TMRI0AA AAAAAAAA | ||
| 49 | // Sets the value of register R to the value of width T at address A in memory domain M, with | ||
| 50 | // the current value of R added to the address if I == 1. | ||
| 51 | LoadIndexed = 5, | ||
| 52 | |||
| 53 | // 6T0RIFG0 VVVVVVVV VVVVVVVV | ||
| 54 | // Writes the value V of width T to the memory address stored in register R. Adds the value of | ||
| 55 | // register G to the final calculation if F is nonzero. Increments the value of register R by T | ||
| 56 | // after operation if I is nonzero. | ||
| 57 | StoreIndexed = 6, | ||
| 58 | |||
| 59 | // 7T0RA000 VVVVVVVV | ||
| 60 | // Performs the arithmetic operation A on the value in register R and the value V of width T, | ||
| 61 | // storing the result in register R. | ||
| 62 | RegisterArithmetic = 7, | ||
| 63 | |||
| 64 | // 8KKKKKKK | ||
| 65 | // Checks to see if any of the buttons defined by the bitmask K are pressed. If any are, | ||
| 66 | // execution continues. If none are, execution skips to the next EndConditional command. | ||
| 67 | ConditionalInput = 8, | ||
| 68 | }; | ||
| 69 | |||
| 70 | enum class MemoryType : u32 { | ||
| 71 | // Addressed relative to start of main NSO | ||
| 72 | MainNSO = 0, | ||
| 73 | |||
| 74 | // Addressed relative to start of heap | ||
| 75 | Heap = 1, | ||
| 76 | }; | ||
| 77 | |||
| 78 | enum class ArithmeticOp : u32 { | ||
| 79 | Add = 0, | ||
| 80 | Sub = 1, | ||
| 81 | Mult = 2, | ||
| 82 | LShift = 3, | ||
| 83 | RShift = 4, | ||
| 84 | }; | ||
| 85 | |||
| 86 | enum class ComparisonOp : u32 { | ||
| 87 | GreaterThan = 1, | ||
| 88 | GreaterThanEqual = 2, | ||
| 89 | LessThan = 3, | ||
| 90 | LessThanEqual = 4, | ||
| 91 | Equal = 5, | ||
| 92 | Inequal = 6, | ||
| 93 | }; | ||
| 94 | |||
| 95 | union Cheat { | ||
| 96 | std::array<u8, 16> raw; | ||
| 97 | |||
| 98 | BitField<4, 4, CodeType> type; | ||
| 99 | BitField<0, 4, u32> width; // Can be 1, 2, 4, or 8. Measured in bytes. | ||
| 100 | BitField<0, 4, u32> end_of_loop; | ||
| 101 | BitField<12, 4, MemoryType> memory_type; | ||
| 102 | BitField<8, 4, u32> register_3; | ||
| 103 | BitField<8, 4, ComparisonOp> comparison_op; | ||
| 104 | BitField<20, 4, u32> load_from_register; | ||
| 105 | BitField<20, 4, u32> increment_register; | ||
| 106 | BitField<20, 4, ArithmeticOp> arithmetic_op; | ||
| 107 | BitField<16, 4, u32> add_additional_register; | ||
| 108 | BitField<28, 4, u32> register_6; | ||
| 109 | |||
| 110 | u64 Address() const; | ||
| 111 | u64 ValueWidth(u64 offset) const; | ||
| 112 | u64 Value(u64 offset, u64 width) const; | ||
| 113 | u32 KeypadValue() const; | ||
| 114 | }; | ||
| 115 | |||
| 116 | class CheatParser; | ||
| 117 | |||
| 118 | // Represents a full collection of cheats for a game. The Execute function should be called every | ||
| 119 | // interval that all cheats should be executed. Clients should not directly instantiate this class | ||
| 120 | // (hence private constructor), they should instead receive an instance from CheatParser, which | ||
| 121 | // guarantees the list is always in an acceptable state. | ||
| 122 | class CheatList { | ||
| 123 | public: | ||
| 124 | friend class CheatParser; | ||
| 125 | |||
| 126 | using Block = std::vector<Cheat>; | ||
| 127 | using ProgramSegment = std::vector<std::pair<std::string, Block>>; | ||
| 128 | |||
| 129 | // (width in bytes, address, value) | ||
| 130 | using MemoryWriter = void (*)(u32, VAddr, u64); | ||
| 131 | // (width in bytes, address) -> value | ||
| 132 | using MemoryReader = u64 (*)(u32, VAddr); | ||
| 133 | |||
| 134 | void SetMemoryParameters(VAddr main_begin, VAddr heap_begin, VAddr main_end, VAddr heap_end, | ||
| 135 | MemoryWriter writer, MemoryReader reader); | ||
| 136 | |||
| 137 | void Execute(); | ||
| 138 | |||
| 139 | private: | ||
| 140 | CheatList(const Core::System& system_, ProgramSegment master, ProgramSegment standard); | ||
| 141 | |||
| 142 | void ProcessBlockPairs(const Block& block); | ||
| 143 | void ExecuteSingleCheat(const Cheat& cheat); | ||
| 144 | |||
| 145 | void ExecuteBlock(const Block& block); | ||
| 146 | |||
| 147 | bool EvaluateConditional(const Cheat& cheat) const; | ||
| 148 | |||
| 149 | // Individual cheat operations | ||
| 150 | void WriteImmediate(const Cheat& cheat); | ||
| 151 | void BeginConditional(const Cheat& cheat); | ||
| 152 | void EndConditional(const Cheat& cheat); | ||
| 153 | void Loop(const Cheat& cheat); | ||
| 154 | void LoadImmediate(const Cheat& cheat); | ||
| 155 | void LoadIndexed(const Cheat& cheat); | ||
| 156 | void StoreIndexed(const Cheat& cheat); | ||
| 157 | void RegisterArithmetic(const Cheat& cheat); | ||
| 158 | void BeginConditionalInput(const Cheat& cheat); | ||
| 159 | |||
| 160 | VAddr SanitizeAddress(VAddr in) const; | ||
| 161 | |||
| 162 | // Master Codes are defined as codes that cannot be disabled and are run prior to all | ||
| 163 | // others. | ||
| 164 | ProgramSegment master_list; | ||
| 165 | // All other codes | ||
| 166 | ProgramSegment standard_list; | ||
| 167 | |||
| 168 | bool in_standard = false; | ||
| 169 | |||
| 170 | // 16 (0x0-0xF) scratch registers that can be used by cheats | ||
| 171 | std::array<u64, 16> scratch{}; | ||
| 172 | |||
| 173 | MemoryWriter writer = nullptr; | ||
| 174 | MemoryReader reader = nullptr; | ||
| 175 | |||
| 176 | u64 main_region_begin{}; | ||
| 177 | u64 heap_region_begin{}; | ||
| 178 | u64 main_region_end{}; | ||
| 179 | u64 heap_region_end{}; | ||
| 180 | |||
| 181 | u64 current_block{}; | ||
| 182 | // The current index of the cheat within the current Block | ||
| 183 | u64 current_index{}; | ||
| 184 | |||
| 185 | // The 'stack' of the program. When a conditional or loop statement is encountered, its index is | ||
| 186 | // pushed onto this queue. When a end block is encountered, the condition is checked. | ||
| 187 | std::map<u64, u64> block_pairs; | ||
| 188 | |||
| 189 | std::set<u64> encountered_loops; | ||
| 190 | |||
| 191 | const Core::System* system; | ||
| 192 | }; | ||
| 193 | |||
| 194 | // Intermediary class that parses a text file or other disk format for storing cheats into a | ||
| 195 | // CheatList object, that can be used for execution. | ||
| 196 | class CheatParser { | ||
| 197 | public: | ||
| 198 | virtual ~CheatParser(); | ||
| 199 | |||
| 200 | virtual CheatList Parse(const Core::System& system, const std::vector<u8>& data) const = 0; | ||
| 201 | |||
| 202 | protected: | ||
| 203 | CheatList MakeCheatList(const Core::System& system_, CheatList::ProgramSegment master, | ||
| 204 | CheatList::ProgramSegment standard) const; | ||
| 205 | }; | ||
| 206 | |||
| 207 | // CheatParser implementation that parses text files | ||
| 208 | class TextCheatParser final : public CheatParser { | ||
| 209 | public: | ||
| 210 | ~TextCheatParser() override; | ||
| 211 | |||
| 212 | CheatList Parse(const Core::System& system, const std::vector<u8>& data) const override; | ||
| 213 | |||
| 214 | private: | ||
| 215 | std::array<u8, 16> ParseSingleLineCheat(const std::string& line) const; | ||
| 216 | }; | ||
| 217 | |||
| 218 | // Class that encapsulates a CheatList and manages its interaction with memory and CoreTiming | ||
| 219 | class CheatEngine final { | ||
| 220 | public: | ||
| 221 | CheatEngine(Core::System& system_, std::vector<CheatList> cheats_, const std::string& build_id, | ||
| 222 | VAddr code_region_start, VAddr code_region_end); | ||
| 223 | ~CheatEngine(); | ||
| 224 | |||
| 225 | private: | ||
| 226 | void FrameCallback(u64 userdata, s64 cycles_late); | ||
| 227 | |||
| 228 | std::vector<CheatList> cheats; | ||
| 229 | |||
| 230 | Core::Timing::EventType* event; | ||
| 231 | Core::Timing::CoreTiming& core_timing; | ||
| 232 | }; | ||
| 233 | |||
| 234 | } // namespace FileSys | ||
diff --git a/src/core/file_sys/content_archive.h b/src/core/file_sys/content_archive.h index 5d4d05c82..15b9e6624 100644 --- a/src/core/file_sys/content_archive.h +++ b/src/core/file_sys/content_archive.h | |||
| @@ -24,13 +24,26 @@ namespace FileSys { | |||
| 24 | 24 | ||
| 25 | union NCASectionHeader; | 25 | union NCASectionHeader; |
| 26 | 26 | ||
| 27 | /// Describes the type of content within an NCA archive. | ||
| 27 | enum class NCAContentType : u8 { | 28 | enum class NCAContentType : u8 { |
| 29 | /// Executable-related data | ||
| 28 | Program = 0, | 30 | Program = 0, |
| 31 | |||
| 32 | /// Metadata. | ||
| 29 | Meta = 1, | 33 | Meta = 1, |
| 34 | |||
| 35 | /// Access control data. | ||
| 30 | Control = 2, | 36 | Control = 2, |
| 37 | |||
| 38 | /// Information related to the game manual | ||
| 39 | /// e.g. Legal information, etc. | ||
| 31 | Manual = 3, | 40 | Manual = 3, |
| 41 | |||
| 42 | /// System data. | ||
| 32 | Data = 4, | 43 | Data = 4, |
| 33 | Data_Unknown5 = 5, ///< Seems to be used on some system archives | 44 | |
| 45 | /// Data that can be accessed by applications. | ||
| 46 | PublicData = 5, | ||
| 34 | }; | 47 | }; |
| 35 | 48 | ||
| 36 | enum class NCASectionCryptoType : u8 { | 49 | enum class NCASectionCryptoType : u8 { |
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp index 61706966e..e11217708 100644 --- a/src/core/file_sys/patch_manager.cpp +++ b/src/core/file_sys/patch_manager.cpp | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <cstddef> | 7 | #include <cstddef> |
| 8 | #include <cstring> | 8 | #include <cstring> |
| 9 | 9 | ||
| 10 | #include "common/file_util.h" | ||
| 10 | #include "common/hex_util.h" | 11 | #include "common/hex_util.h" |
| 11 | #include "common/logging/log.h" | 12 | #include "common/logging/log.h" |
| 12 | #include "core/file_sys/content_archive.h" | 13 | #include "core/file_sys/content_archive.h" |
| @@ -19,6 +20,7 @@ | |||
| 19 | #include "core/file_sys/vfs_vector.h" | 20 | #include "core/file_sys/vfs_vector.h" |
| 20 | #include "core/hle/service/filesystem/filesystem.h" | 21 | #include "core/hle/service/filesystem/filesystem.h" |
| 21 | #include "core/loader/loader.h" | 22 | #include "core/loader/loader.h" |
| 23 | #include "core/loader/nso.h" | ||
| 22 | #include "core/settings.h" | 24 | #include "core/settings.h" |
| 23 | 25 | ||
| 24 | namespace FileSys { | 26 | namespace FileSys { |
| @@ -31,14 +33,6 @@ constexpr std::array<const char*, 14> EXEFS_FILE_NAMES{ | |||
| 31 | "subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7", "subsdk8", "subsdk9", | 33 | "subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7", "subsdk8", "subsdk9", |
| 32 | }; | 34 | }; |
| 33 | 35 | ||
| 34 | struct NSOBuildHeader { | ||
| 35 | u32_le magic; | ||
| 36 | INSERT_PADDING_BYTES(0x3C); | ||
| 37 | std::array<u8, 0x20> build_id; | ||
| 38 | INSERT_PADDING_BYTES(0xA0); | ||
| 39 | }; | ||
| 40 | static_assert(sizeof(NSOBuildHeader) == 0x100, "NSOBuildHeader has incorrect size."); | ||
| 41 | |||
| 42 | std::string FormatTitleVersion(u32 version, TitleVersionFormat format) { | 36 | std::string FormatTitleVersion(u32 version, TitleVersionFormat format) { |
| 43 | std::array<u8, sizeof(u32)> bytes{}; | 37 | std::array<u8, sizeof(u32)> bytes{}; |
| 44 | bytes[0] = version % SINGLE_BYTE_MODULUS; | 38 | bytes[0] = version % SINGLE_BYTE_MODULUS; |
| @@ -162,14 +156,16 @@ std::vector<VirtualFile> PatchManager::CollectPatches(const std::vector<VirtualD | |||
| 162 | } | 156 | } |
| 163 | 157 | ||
| 164 | std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const { | 158 | std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const { |
| 165 | if (nso.size() < 0x100) | 159 | if (nso.size() < sizeof(Loader::NSOHeader)) { |
| 166 | return nso; | 160 | return nso; |
| 161 | } | ||
| 167 | 162 | ||
| 168 | NSOBuildHeader header; | 163 | Loader::NSOHeader header; |
| 169 | std::memcpy(&header, nso.data(), sizeof(NSOBuildHeader)); | 164 | std::memcpy(&header, nso.data(), sizeof(header)); |
| 170 | 165 | ||
| 171 | if (header.magic != Common::MakeMagic('N', 'S', 'O', '0')) | 166 | if (header.magic != Common::MakeMagic('N', 'S', 'O', '0')) { |
| 172 | return nso; | 167 | return nso; |
| 168 | } | ||
| 173 | 169 | ||
| 174 | const auto build_id_raw = Common::HexArrayToString(header.build_id); | 170 | const auto build_id_raw = Common::HexArrayToString(header.build_id); |
| 175 | const auto build_id = build_id_raw.substr(0, build_id_raw.find_last_not_of('0') + 1); | 171 | const auto build_id = build_id_raw.substr(0, build_id_raw.find_last_not_of('0') + 1); |
| @@ -212,9 +208,11 @@ std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const { | |||
| 212 | } | 208 | } |
| 213 | } | 209 | } |
| 214 | 210 | ||
| 215 | if (out.size() < 0x100) | 211 | if (out.size() < sizeof(Loader::NSOHeader)) { |
| 216 | return nso; | 212 | return nso; |
| 217 | std::memcpy(out.data(), &header, sizeof(NSOBuildHeader)); | 213 | } |
| 214 | |||
| 215 | std::memcpy(out.data(), &header, sizeof(header)); | ||
| 218 | return out; | 216 | return out; |
| 219 | } | 217 | } |
| 220 | 218 | ||
| @@ -232,6 +230,57 @@ bool PatchManager::HasNSOPatch(const std::array<u8, 32>& build_id_) const { | |||
| 232 | return !CollectPatches(patch_dirs, build_id).empty(); | 230 | return !CollectPatches(patch_dirs, build_id).empty(); |
| 233 | } | 231 | } |
| 234 | 232 | ||
| 233 | static std::optional<CheatList> ReadCheatFileFromFolder(const Core::System& system, u64 title_id, | ||
| 234 | const std::array<u8, 0x20>& build_id_, | ||
| 235 | const VirtualDir& base_path, bool upper) { | ||
| 236 | const auto build_id_raw = Common::HexArrayToString(build_id_, upper); | ||
| 237 | const auto build_id = build_id_raw.substr(0, sizeof(u64) * 2); | ||
| 238 | const auto file = base_path->GetFile(fmt::format("{}.txt", build_id)); | ||
| 239 | |||
| 240 | if (file == nullptr) { | ||
| 241 | LOG_INFO(Common_Filesystem, "No cheats file found for title_id={:016X}, build_id={}", | ||
| 242 | title_id, build_id); | ||
| 243 | return std::nullopt; | ||
| 244 | } | ||
| 245 | |||
| 246 | std::vector<u8> data(file->GetSize()); | ||
| 247 | if (file->Read(data.data(), data.size()) != data.size()) { | ||
| 248 | LOG_INFO(Common_Filesystem, "Failed to read cheats file for title_id={:016X}, build_id={}", | ||
| 249 | title_id, build_id); | ||
| 250 | return std::nullopt; | ||
| 251 | } | ||
| 252 | |||
| 253 | TextCheatParser parser; | ||
| 254 | return parser.Parse(system, data); | ||
| 255 | } | ||
| 256 | |||
| 257 | std::vector<CheatList> PatchManager::CreateCheatList(const Core::System& system, | ||
| 258 | const std::array<u8, 32>& build_id_) const { | ||
| 259 | const auto load_dir = Service::FileSystem::GetModificationLoadRoot(title_id); | ||
| 260 | auto patch_dirs = load_dir->GetSubdirectories(); | ||
| 261 | std::sort(patch_dirs.begin(), patch_dirs.end(), | ||
| 262 | [](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); }); | ||
| 263 | |||
| 264 | std::vector<CheatList> out; | ||
| 265 | out.reserve(patch_dirs.size()); | ||
| 266 | for (const auto& subdir : patch_dirs) { | ||
| 267 | auto cheats_dir = subdir->GetSubdirectory("cheats"); | ||
| 268 | if (cheats_dir != nullptr) { | ||
| 269 | auto res = ReadCheatFileFromFolder(system, title_id, build_id_, cheats_dir, true); | ||
| 270 | if (res.has_value()) { | ||
| 271 | out.push_back(std::move(*res)); | ||
| 272 | continue; | ||
| 273 | } | ||
| 274 | |||
| 275 | res = ReadCheatFileFromFolder(system, title_id, build_id_, cheats_dir, false); | ||
| 276 | if (res.has_value()) | ||
| 277 | out.push_back(std::move(*res)); | ||
| 278 | } | ||
| 279 | } | ||
| 280 | |||
| 281 | return out; | ||
| 282 | } | ||
| 283 | |||
| 235 | static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType type) { | 284 | static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType type) { |
| 236 | const auto load_dir = Service::FileSystem::GetModificationLoadRoot(title_id); | 285 | const auto load_dir = Service::FileSystem::GetModificationLoadRoot(title_id); |
| 237 | if ((type != ContentRecordType::Program && type != ContentRecordType::Data) || | 286 | if ((type != ContentRecordType::Program && type != ContentRecordType::Data) || |
| @@ -403,6 +452,8 @@ std::map<std::string, std::string, std::less<>> PatchManager::GetPatchVersionNam | |||
| 403 | } | 452 | } |
| 404 | if (IsDirValidAndNonEmpty(mod->GetSubdirectory("romfs"))) | 453 | if (IsDirValidAndNonEmpty(mod->GetSubdirectory("romfs"))) |
| 405 | AppendCommaIfNotEmpty(types, "LayeredFS"); | 454 | AppendCommaIfNotEmpty(types, "LayeredFS"); |
| 455 | if (IsDirValidAndNonEmpty(mod->GetSubdirectory("cheats"))) | ||
| 456 | AppendCommaIfNotEmpty(types, "Cheats"); | ||
| 406 | 457 | ||
| 407 | if (types.empty()) | 458 | if (types.empty()) |
| 408 | continue; | 459 | continue; |
diff --git a/src/core/file_sys/patch_manager.h b/src/core/file_sys/patch_manager.h index b8a1652fd..de2672c76 100644 --- a/src/core/file_sys/patch_manager.h +++ b/src/core/file_sys/patch_manager.h | |||
| @@ -8,9 +8,14 @@ | |||
| 8 | #include <memory> | 8 | #include <memory> |
| 9 | #include <string> | 9 | #include <string> |
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "core/file_sys/cheat_engine.h" | ||
| 11 | #include "core/file_sys/nca_metadata.h" | 12 | #include "core/file_sys/nca_metadata.h" |
| 12 | #include "core/file_sys/vfs.h" | 13 | #include "core/file_sys/vfs.h" |
| 13 | 14 | ||
| 15 | namespace Core { | ||
| 16 | class System; | ||
| 17 | } | ||
| 18 | |||
| 14 | namespace FileSys { | 19 | namespace FileSys { |
| 15 | 20 | ||
| 16 | class NCA; | 21 | class NCA; |
| @@ -45,6 +50,10 @@ public: | |||
| 45 | // Used to prevent expensive copies in NSO loader. | 50 | // Used to prevent expensive copies in NSO loader. |
| 46 | bool HasNSOPatch(const std::array<u8, 0x20>& build_id) const; | 51 | bool HasNSOPatch(const std::array<u8, 0x20>& build_id) const; |
| 47 | 52 | ||
| 53 | // Creates a CheatList object with all | ||
| 54 | std::vector<CheatList> CreateCheatList(const Core::System& system, | ||
| 55 | const std::array<u8, 0x20>& build_id) const; | ||
| 56 | |||
| 48 | // Currently tracked RomFS patches: | 57 | // Currently tracked RomFS patches: |
| 49 | // - Game Updates | 58 | // - Game Updates |
| 50 | // - LayeredFS | 59 | // - LayeredFS |
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp index 128199063..1c6bacace 100644 --- a/src/core/file_sys/registered_cache.cpp +++ b/src/core/file_sys/registered_cache.cpp | |||
| @@ -94,7 +94,7 @@ static ContentRecordType GetCRTypeFromNCAType(NCAContentType type) { | |||
| 94 | case NCAContentType::Control: | 94 | case NCAContentType::Control: |
| 95 | return ContentRecordType::Control; | 95 | return ContentRecordType::Control; |
| 96 | case NCAContentType::Data: | 96 | case NCAContentType::Data: |
| 97 | case NCAContentType::Data_Unknown5: | 97 | case NCAContentType::PublicData: |
| 98 | return ContentRecordType::Data; | 98 | return ContentRecordType::Data; |
| 99 | case NCAContentType::Manual: | 99 | case NCAContentType::Manual: |
| 100 | // TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal. | 100 | // TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal. |
diff --git a/src/core/hle/ipc.h b/src/core/hle/ipc.h index 455d1f346..fae54bcc7 100644 --- a/src/core/hle/ipc.h +++ b/src/core/hle/ipc.h | |||
| @@ -39,10 +39,10 @@ struct CommandHeader { | |||
| 39 | union { | 39 | union { |
| 40 | u32_le raw_low; | 40 | u32_le raw_low; |
| 41 | BitField<0, 16, CommandType> type; | 41 | BitField<0, 16, CommandType> type; |
| 42 | BitField<16, 4, u32_le> num_buf_x_descriptors; | 42 | BitField<16, 4, u32> num_buf_x_descriptors; |
| 43 | BitField<20, 4, u32_le> num_buf_a_descriptors; | 43 | BitField<20, 4, u32> num_buf_a_descriptors; |
| 44 | BitField<24, 4, u32_le> num_buf_b_descriptors; | 44 | BitField<24, 4, u32> num_buf_b_descriptors; |
| 45 | BitField<28, 4, u32_le> num_buf_w_descriptors; | 45 | BitField<28, 4, u32> num_buf_w_descriptors; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | enum class BufferDescriptorCFlag : u32 { | 48 | enum class BufferDescriptorCFlag : u32 { |
| @@ -53,28 +53,28 @@ struct CommandHeader { | |||
| 53 | 53 | ||
| 54 | union { | 54 | union { |
| 55 | u32_le raw_high; | 55 | u32_le raw_high; |
| 56 | BitField<0, 10, u32_le> data_size; | 56 | BitField<0, 10, u32> data_size; |
| 57 | BitField<10, 4, BufferDescriptorCFlag> buf_c_descriptor_flags; | 57 | BitField<10, 4, BufferDescriptorCFlag> buf_c_descriptor_flags; |
| 58 | BitField<31, 1, u32_le> enable_handle_descriptor; | 58 | BitField<31, 1, u32> enable_handle_descriptor; |
| 59 | }; | 59 | }; |
| 60 | }; | 60 | }; |
| 61 | static_assert(sizeof(CommandHeader) == 8, "CommandHeader size is incorrect"); | 61 | static_assert(sizeof(CommandHeader) == 8, "CommandHeader size is incorrect"); |
| 62 | 62 | ||
| 63 | union HandleDescriptorHeader { | 63 | union HandleDescriptorHeader { |
| 64 | u32_le raw_high; | 64 | u32_le raw_high; |
| 65 | BitField<0, 1, u32_le> send_current_pid; | 65 | BitField<0, 1, u32> send_current_pid; |
| 66 | BitField<1, 4, u32_le> num_handles_to_copy; | 66 | BitField<1, 4, u32> num_handles_to_copy; |
| 67 | BitField<5, 4, u32_le> num_handles_to_move; | 67 | BitField<5, 4, u32> num_handles_to_move; |
| 68 | }; | 68 | }; |
| 69 | static_assert(sizeof(HandleDescriptorHeader) == 4, "HandleDescriptorHeader size is incorrect"); | 69 | static_assert(sizeof(HandleDescriptorHeader) == 4, "HandleDescriptorHeader size is incorrect"); |
| 70 | 70 | ||
| 71 | struct BufferDescriptorX { | 71 | struct BufferDescriptorX { |
| 72 | union { | 72 | union { |
| 73 | BitField<0, 6, u32_le> counter_bits_0_5; | 73 | BitField<0, 6, u32> counter_bits_0_5; |
| 74 | BitField<6, 3, u32_le> address_bits_36_38; | 74 | BitField<6, 3, u32> address_bits_36_38; |
| 75 | BitField<9, 3, u32_le> counter_bits_9_11; | 75 | BitField<9, 3, u32> counter_bits_9_11; |
| 76 | BitField<12, 4, u32_le> address_bits_32_35; | 76 | BitField<12, 4, u32> address_bits_32_35; |
| 77 | BitField<16, 16, u32_le> size; | 77 | BitField<16, 16, u32> size; |
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | u32_le address_bits_0_31; | 80 | u32_le address_bits_0_31; |
| @@ -103,10 +103,10 @@ struct BufferDescriptorABW { | |||
| 103 | u32_le address_bits_0_31; | 103 | u32_le address_bits_0_31; |
| 104 | 104 | ||
| 105 | union { | 105 | union { |
| 106 | BitField<0, 2, u32_le> flags; | 106 | BitField<0, 2, u32> flags; |
| 107 | BitField<2, 3, u32_le> address_bits_36_38; | 107 | BitField<2, 3, u32> address_bits_36_38; |
| 108 | BitField<24, 4, u32_le> size_bits_32_35; | 108 | BitField<24, 4, u32> size_bits_32_35; |
| 109 | BitField<28, 4, u32_le> address_bits_32_35; | 109 | BitField<28, 4, u32> address_bits_32_35; |
| 110 | }; | 110 | }; |
| 111 | 111 | ||
| 112 | VAddr Address() const { | 112 | VAddr Address() const { |
| @@ -128,8 +128,8 @@ struct BufferDescriptorC { | |||
| 128 | u32_le address_bits_0_31; | 128 | u32_le address_bits_0_31; |
| 129 | 129 | ||
| 130 | union { | 130 | union { |
| 131 | BitField<0, 16, u32_le> address_bits_32_47; | 131 | BitField<0, 16, u32> address_bits_32_47; |
| 132 | BitField<16, 16, u32_le> size; | 132 | BitField<16, 16, u32> size; |
| 133 | }; | 133 | }; |
| 134 | 134 | ||
| 135 | VAddr Address() const { | 135 | VAddr Address() const { |
| @@ -167,8 +167,8 @@ struct DomainMessageHeader { | |||
| 167 | struct { | 167 | struct { |
| 168 | union { | 168 | union { |
| 169 | BitField<0, 8, CommandType> command; | 169 | BitField<0, 8, CommandType> command; |
| 170 | BitField<8, 8, u32_le> input_object_count; | 170 | BitField<8, 8, u32> input_object_count; |
| 171 | BitField<16, 16, u32_le> size; | 171 | BitField<16, 16, u32> size; |
| 172 | }; | 172 | }; |
| 173 | u32_le object_id; | 173 | u32_le object_id; |
| 174 | INSERT_PADDING_WORDS(2); | 174 | INSERT_PADDING_WORDS(2); |
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 079283830..68406eb63 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h | |||
| @@ -19,9 +19,12 @@ | |||
| 19 | #include "core/hle/kernel/hle_ipc.h" | 19 | #include "core/hle/kernel/hle_ipc.h" |
| 20 | #include "core/hle/kernel/object.h" | 20 | #include "core/hle/kernel/object.h" |
| 21 | #include "core/hle/kernel/server_session.h" | 21 | #include "core/hle/kernel/server_session.h" |
| 22 | #include "core/hle/result.h" | ||
| 22 | 23 | ||
| 23 | namespace IPC { | 24 | namespace IPC { |
| 24 | 25 | ||
| 26 | constexpr ResultCode ERR_REMOTE_PROCESS_DEAD{ErrorModule::HIPC, 301}; | ||
| 27 | |||
| 25 | class RequestHelperBase { | 28 | class RequestHelperBase { |
| 26 | protected: | 29 | protected: |
| 27 | Kernel::HLERequestContext* context = nullptr; | 30 | Kernel::HLERequestContext* context = nullptr; |
| @@ -272,6 +275,20 @@ inline void ResponseBuilder::Push(u64 value) { | |||
| 272 | } | 275 | } |
| 273 | 276 | ||
| 274 | template <> | 277 | template <> |
| 278 | inline void ResponseBuilder::Push(float value) { | ||
| 279 | u32 integral; | ||
| 280 | std::memcpy(&integral, &value, sizeof(u32)); | ||
| 281 | Push(integral); | ||
| 282 | } | ||
| 283 | |||
| 284 | template <> | ||
| 285 | inline void ResponseBuilder::Push(double value) { | ||
| 286 | u64 integral; | ||
| 287 | std::memcpy(&integral, &value, sizeof(u64)); | ||
| 288 | Push(integral); | ||
| 289 | } | ||
| 290 | |||
| 291 | template <> | ||
| 275 | inline void ResponseBuilder::Push(bool value) { | 292 | inline void ResponseBuilder::Push(bool value) { |
| 276 | Push(static_cast<u8>(value)); | 293 | Push(static_cast<u8>(value)); |
| 277 | } | 294 | } |
| @@ -362,6 +379,11 @@ inline u32 RequestParser::Pop() { | |||
| 362 | return cmdbuf[index++]; | 379 | return cmdbuf[index++]; |
| 363 | } | 380 | } |
| 364 | 381 | ||
| 382 | template <> | ||
| 383 | inline s32 RequestParser::Pop() { | ||
| 384 | return static_cast<s32>(Pop<u32>()); | ||
| 385 | } | ||
| 386 | |||
| 365 | template <typename T> | 387 | template <typename T> |
| 366 | void RequestParser::PopRaw(T& value) { | 388 | void RequestParser::PopRaw(T& value) { |
| 367 | std::memcpy(&value, cmdbuf + index, sizeof(T)); | 389 | std::memcpy(&value, cmdbuf + index, sizeof(T)); |
| @@ -393,11 +415,37 @@ inline u64 RequestParser::Pop() { | |||
| 393 | } | 415 | } |
| 394 | 416 | ||
| 395 | template <> | 417 | template <> |
| 418 | inline s8 RequestParser::Pop() { | ||
| 419 | return static_cast<s8>(Pop<u8>()); | ||
| 420 | } | ||
| 421 | |||
| 422 | template <> | ||
| 423 | inline s16 RequestParser::Pop() { | ||
| 424 | return static_cast<s16>(Pop<u16>()); | ||
| 425 | } | ||
| 426 | |||
| 427 | template <> | ||
| 396 | inline s64 RequestParser::Pop() { | 428 | inline s64 RequestParser::Pop() { |
| 397 | return static_cast<s64>(Pop<u64>()); | 429 | return static_cast<s64>(Pop<u64>()); |
| 398 | } | 430 | } |
| 399 | 431 | ||
| 400 | template <> | 432 | template <> |
| 433 | inline float RequestParser::Pop() { | ||
| 434 | const u32 value = Pop<u32>(); | ||
| 435 | float real; | ||
| 436 | std::memcpy(&real, &value, sizeof(real)); | ||
| 437 | return real; | ||
| 438 | } | ||
| 439 | |||
| 440 | template <> | ||
| 441 | inline double RequestParser::Pop() { | ||
| 442 | const u64 value = Pop<u64>(); | ||
| 443 | float real; | ||
| 444 | std::memcpy(&real, &value, sizeof(real)); | ||
| 445 | return real; | ||
| 446 | } | ||
| 447 | |||
| 448 | template <> | ||
| 401 | inline bool RequestParser::Pop() { | 449 | inline bool RequestParser::Pop() { |
| 402 | return Pop<u8>() != 0; | 450 | return Pop<u8>() != 0; |
| 403 | } | 451 | } |
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 9780a7849..352190da8 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp | |||
| @@ -42,7 +42,21 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_ | |||
| 42 | AddressArbiter::AddressArbiter(Core::System& system) : system{system} {} | 42 | AddressArbiter::AddressArbiter(Core::System& system) : system{system} {} |
| 43 | AddressArbiter::~AddressArbiter() = default; | 43 | AddressArbiter::~AddressArbiter() = default; |
| 44 | 44 | ||
| 45 | ResultCode AddressArbiter::SignalToAddress(VAddr address, s32 num_to_wake) { | 45 | ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value, |
| 46 | s32 num_to_wake) { | ||
| 47 | switch (type) { | ||
| 48 | case SignalType::Signal: | ||
| 49 | return SignalToAddressOnly(address, num_to_wake); | ||
| 50 | case SignalType::IncrementAndSignalIfEqual: | ||
| 51 | return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake); | ||
| 52 | case SignalType::ModifyByWaitingCountAndSignalIfEqual: | ||
| 53 | return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake); | ||
| 54 | default: | ||
| 55 | return ERR_INVALID_ENUM_VALUE; | ||
| 56 | } | ||
| 57 | } | ||
| 58 | |||
| 59 | ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { | ||
| 46 | const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); | 60 | const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); |
| 47 | WakeThreads(waiting_threads, num_to_wake); | 61 | WakeThreads(waiting_threads, num_to_wake); |
| 48 | return RESULT_SUCCESS; | 62 | return RESULT_SUCCESS; |
| @@ -60,7 +74,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 | |||
| 60 | } | 74 | } |
| 61 | 75 | ||
| 62 | Memory::Write32(address, static_cast<u32>(value + 1)); | 76 | Memory::Write32(address, static_cast<u32>(value + 1)); |
| 63 | return SignalToAddress(address, num_to_wake); | 77 | return SignalToAddressOnly(address, num_to_wake); |
| 64 | } | 78 | } |
| 65 | 79 | ||
| 66 | ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, | 80 | ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, |
| @@ -92,6 +106,20 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a | |||
| 92 | return RESULT_SUCCESS; | 106 | return RESULT_SUCCESS; |
| 93 | } | 107 | } |
| 94 | 108 | ||
| 109 | ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value, | ||
| 110 | s64 timeout_ns) { | ||
| 111 | switch (type) { | ||
| 112 | case ArbitrationType::WaitIfLessThan: | ||
| 113 | return WaitForAddressIfLessThan(address, value, timeout_ns, false); | ||
| 114 | case ArbitrationType::DecrementAndWaitIfLessThan: | ||
| 115 | return WaitForAddressIfLessThan(address, value, timeout_ns, true); | ||
| 116 | case ArbitrationType::WaitIfEqual: | ||
| 117 | return WaitForAddressIfEqual(address, value, timeout_ns); | ||
| 118 | default: | ||
| 119 | return ERR_INVALID_ENUM_VALUE; | ||
| 120 | } | ||
| 121 | } | ||
| 122 | |||
| 95 | ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, | 123 | ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, |
| 96 | bool should_decrement) { | 124 | bool should_decrement) { |
| 97 | // Ensure that we can read the address. | 125 | // Ensure that we can read the address. |
| @@ -113,7 +141,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 | |||
| 113 | return RESULT_TIMEOUT; | 141 | return RESULT_TIMEOUT; |
| 114 | } | 142 | } |
| 115 | 143 | ||
| 116 | return WaitForAddress(address, timeout); | 144 | return WaitForAddressImpl(address, timeout); |
| 117 | } | 145 | } |
| 118 | 146 | ||
| 119 | ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { | 147 | ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { |
| @@ -130,10 +158,10 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t | |||
| 130 | return RESULT_TIMEOUT; | 158 | return RESULT_TIMEOUT; |
| 131 | } | 159 | } |
| 132 | 160 | ||
| 133 | return WaitForAddress(address, timeout); | 161 | return WaitForAddressImpl(address, timeout); |
| 134 | } | 162 | } |
| 135 | 163 | ||
| 136 | ResultCode AddressArbiter::WaitForAddress(VAddr address, s64 timeout) { | 164 | ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) { |
| 137 | SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread(); | 165 | SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread(); |
| 138 | current_thread->SetArbiterWaitAddress(address); | 166 | current_thread->SetArbiterWaitAddress(address); |
| 139 | current_thread->SetStatus(ThreadStatus::WaitArb); | 167 | current_thread->SetStatus(ThreadStatus::WaitArb); |
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h index e0c36f2e3..ed0d0e69f 100644 --- a/src/core/hle/kernel/address_arbiter.h +++ b/src/core/hle/kernel/address_arbiter.h | |||
| @@ -4,8 +4,10 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <vector> | ||
| 8 | |||
| 7 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 8 | #include "core/hle/kernel/address_arbiter.h" | 10 | #include "core/hle/kernel/object.h" |
| 9 | 11 | ||
| 10 | union ResultCode; | 12 | union ResultCode; |
| 11 | 13 | ||
| @@ -40,8 +42,15 @@ public: | |||
| 40 | AddressArbiter(AddressArbiter&&) = default; | 42 | AddressArbiter(AddressArbiter&&) = default; |
| 41 | AddressArbiter& operator=(AddressArbiter&&) = delete; | 43 | AddressArbiter& operator=(AddressArbiter&&) = delete; |
| 42 | 44 | ||
| 45 | /// Signals an address being waited on with a particular signaling type. | ||
| 46 | ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake); | ||
| 47 | |||
| 48 | /// Waits on an address with a particular arbitration type. | ||
| 49 | ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns); | ||
| 50 | |||
| 51 | private: | ||
| 43 | /// Signals an address being waited on. | 52 | /// Signals an address being waited on. |
| 44 | ResultCode SignalToAddress(VAddr address, s32 num_to_wake); | 53 | ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake); |
| 45 | 54 | ||
| 46 | /// Signals an address being waited on and increments its value if equal to the value argument. | 55 | /// Signals an address being waited on and increments its value if equal to the value argument. |
| 47 | ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); | 56 | ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); |
| @@ -59,9 +68,8 @@ public: | |||
| 59 | /// Waits on an address if the value passed is equal to the argument value. | 68 | /// Waits on an address if the value passed is equal to the argument value. |
| 60 | ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); | 69 | ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); |
| 61 | 70 | ||
| 62 | private: | ||
| 63 | // Waits on the given address with a timeout in nanoseconds | 71 | // Waits on the given address with a timeout in nanoseconds |
| 64 | ResultCode WaitForAddress(VAddr address, s64 timeout); | 72 | ResultCode WaitForAddressImpl(VAddr address, s64 timeout); |
| 65 | 73 | ||
| 66 | // Gets the threads waiting on an address. | 74 | // Gets the threads waiting on an address. |
| 67 | std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const; | 75 | std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const; |
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp index d4c91d529..aa432658e 100644 --- a/src/core/hle/kernel/client_port.cpp +++ b/src/core/hle/kernel/client_port.cpp | |||
| @@ -33,10 +33,11 @@ ResultVal<SharedPtr<ClientSession>> ClientPort::Connect() { | |||
| 33 | // Create a new session pair, let the created sessions inherit the parent port's HLE handler. | 33 | // Create a new session pair, let the created sessions inherit the parent port's HLE handler. |
| 34 | auto sessions = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this); | 34 | auto sessions = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this); |
| 35 | 35 | ||
| 36 | if (server_port->hle_handler) | 36 | if (server_port->HasHLEHandler()) { |
| 37 | server_port->hle_handler->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions)); | 37 | server_port->GetHLEHandler()->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions)); |
| 38 | else | 38 | } else { |
| 39 | server_port->pending_sessions.push_back(std::get<SharedPtr<ServerSession>>(sessions)); | 39 | server_port->AppendPendingSession(std::get<SharedPtr<ServerSession>>(sessions)); |
| 40 | } | ||
| 40 | 41 | ||
| 41 | // Wake the threads waiting on the ServerPort | 42 | // Wake the threads waiting on the ServerPort |
| 42 | server_port->WakeupAllWaitingThreads(); | 43 | server_port->WakeupAllWaitingThreads(); |
diff --git a/src/core/hle/kernel/code_set.cpp b/src/core/hle/kernel/code_set.cpp new file mode 100644 index 000000000..1f434e9af --- /dev/null +++ b/src/core/hle/kernel/code_set.cpp | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | // Copyright 2019 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "core/hle/kernel/code_set.h" | ||
| 6 | |||
| 7 | namespace Kernel { | ||
| 8 | |||
| 9 | CodeSet::CodeSet() = default; | ||
| 10 | CodeSet::~CodeSet() = default; | ||
| 11 | |||
| 12 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h new file mode 100644 index 000000000..834fd23d2 --- /dev/null +++ b/src/core/hle/kernel/code_set.h | |||
| @@ -0,0 +1,90 @@ | |||
| 1 | // Copyright 2019 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <cstddef> | ||
| 8 | #include <memory> | ||
| 9 | #include <vector> | ||
| 10 | |||
| 11 | #include "common/common_types.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | /** | ||
| 16 | * Represents executable data that may be loaded into a kernel process. | ||
| 17 | * | ||
| 18 | * A code set consists of three basic segments: | ||
| 19 | * - A code (AKA text) segment, | ||
| 20 | * - A read-only data segment (rodata) | ||
| 21 | * - A data segment | ||
| 22 | * | ||
| 23 | * The code segment is the portion of the object file that contains | ||
| 24 | * executable instructions. | ||
| 25 | * | ||
| 26 | * The read-only data segment in the portion of the object file that | ||
| 27 | * contains (as one would expect) read-only data, such as fixed constant | ||
| 28 | * values and data structures. | ||
| 29 | * | ||
| 30 | * The data segment is similar to the read-only data segment -- it contains | ||
| 31 | * variables and data structures that have predefined values, however, | ||
| 32 | * entities within this segment can be modified. | ||
| 33 | */ | ||
| 34 | struct CodeSet final { | ||
| 35 | /// A single segment within a code set. | ||
| 36 | struct Segment final { | ||
| 37 | /// The byte offset that this segment is located at. | ||
| 38 | std::size_t offset = 0; | ||
| 39 | |||
| 40 | /// The address to map this segment to. | ||
| 41 | VAddr addr = 0; | ||
| 42 | |||
| 43 | /// The size of this segment in bytes. | ||
| 44 | u32 size = 0; | ||
| 45 | }; | ||
| 46 | |||
| 47 | explicit CodeSet(); | ||
| 48 | ~CodeSet(); | ||
| 49 | |||
| 50 | CodeSet(const CodeSet&) = delete; | ||
| 51 | CodeSet& operator=(const CodeSet&) = delete; | ||
| 52 | |||
| 53 | CodeSet(CodeSet&&) = default; | ||
| 54 | CodeSet& operator=(CodeSet&&) = default; | ||
| 55 | |||
| 56 | Segment& CodeSegment() { | ||
| 57 | return segments[0]; | ||
| 58 | } | ||
| 59 | |||
| 60 | const Segment& CodeSegment() const { | ||
| 61 | return segments[0]; | ||
| 62 | } | ||
| 63 | |||
| 64 | Segment& RODataSegment() { | ||
| 65 | return segments[1]; | ||
| 66 | } | ||
| 67 | |||
| 68 | const Segment& RODataSegment() const { | ||
| 69 | return segments[1]; | ||
| 70 | } | ||
| 71 | |||
| 72 | Segment& DataSegment() { | ||
| 73 | return segments[2]; | ||
| 74 | } | ||
| 75 | |||
| 76 | const Segment& DataSegment() const { | ||
| 77 | return segments[2]; | ||
| 78 | } | ||
| 79 | |||
| 80 | /// The overall data that backs this code set. | ||
| 81 | std::shared_ptr<std::vector<u8>> memory; | ||
| 82 | |||
| 83 | /// The segments that comprise this code set. | ||
| 84 | std::array<Segment, 3> segments; | ||
| 85 | |||
| 86 | /// The entry point address for this code set. | ||
| 87 | VAddr entrypoint = 0; | ||
| 88 | }; | ||
| 89 | |||
| 90 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 04ea9349e..4d224d01d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -87,7 +87,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_ | |||
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | struct KernelCore::Impl { | 89 | struct KernelCore::Impl { |
| 90 | explicit Impl(Core::System& system) : address_arbiter{system}, system{system} {} | 90 | explicit Impl(Core::System& system) : system{system} {} |
| 91 | 91 | ||
| 92 | void Initialize(KernelCore& kernel) { | 92 | void Initialize(KernelCore& kernel) { |
| 93 | Shutdown(); | 93 | Shutdown(); |
| @@ -138,8 +138,6 @@ struct KernelCore::Impl { | |||
| 138 | std::vector<SharedPtr<Process>> process_list; | 138 | std::vector<SharedPtr<Process>> process_list; |
| 139 | Process* current_process = nullptr; | 139 | Process* current_process = nullptr; |
| 140 | 140 | ||
| 141 | Kernel::AddressArbiter address_arbiter; | ||
| 142 | |||
| 143 | SharedPtr<ResourceLimit> system_resource_limit; | 141 | SharedPtr<ResourceLimit> system_resource_limit; |
| 144 | 142 | ||
| 145 | Core::Timing::EventType* thread_wakeup_event_type = nullptr; | 143 | Core::Timing::EventType* thread_wakeup_event_type = nullptr; |
| @@ -192,14 +190,6 @@ const Process* KernelCore::CurrentProcess() const { | |||
| 192 | return impl->current_process; | 190 | return impl->current_process; |
| 193 | } | 191 | } |
| 194 | 192 | ||
| 195 | AddressArbiter& KernelCore::AddressArbiter() { | ||
| 196 | return impl->address_arbiter; | ||
| 197 | } | ||
| 198 | |||
| 199 | const AddressArbiter& KernelCore::AddressArbiter() const { | ||
| 200 | return impl->address_arbiter; | ||
| 201 | } | ||
| 202 | |||
| 203 | void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) { | 193 | void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) { |
| 204 | impl->named_ports.emplace(std::move(name), std::move(port)); | 194 | impl->named_ports.emplace(std::move(name), std::move(port)); |
| 205 | } | 195 | } |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 4d292aca9..ff17ff865 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -75,12 +75,6 @@ public: | |||
| 75 | /// Retrieves a const pointer to the current process. | 75 | /// Retrieves a const pointer to the current process. |
| 76 | const Process* CurrentProcess() const; | 76 | const Process* CurrentProcess() const; |
| 77 | 77 | ||
| 78 | /// Provides a reference to the kernel's address arbiter. | ||
| 79 | Kernel::AddressArbiter& AddressArbiter(); | ||
| 80 | |||
| 81 | /// Provides a const reference to the kernel's address arbiter. | ||
| 82 | const Kernel::AddressArbiter& AddressArbiter() const; | ||
| 83 | |||
| 84 | /// Adds a port to the named port table | 78 | /// Adds a port to the named port table |
| 85 | void AddNamedPort(std::string name, SharedPtr<ClientPort> port); | 79 | void AddNamedPort(std::string name, SharedPtr<ClientPort> port); |
| 86 | 80 | ||
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 0743670ad..98e87313b 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp | |||
| @@ -2,7 +2,6 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <map> | ||
| 6 | #include <utility> | 5 | #include <utility> |
| 7 | #include <vector> | 6 | #include <vector> |
| 8 | 7 | ||
| @@ -10,8 +9,11 @@ | |||
| 10 | #include "core/core.h" | 9 | #include "core/core.h" |
| 11 | #include "core/hle/kernel/errors.h" | 10 | #include "core/hle/kernel/errors.h" |
| 12 | #include "core/hle/kernel/handle_table.h" | 11 | #include "core/hle/kernel/handle_table.h" |
| 12 | #include "core/hle/kernel/kernel.h" | ||
| 13 | #include "core/hle/kernel/mutex.h" | 13 | #include "core/hle/kernel/mutex.h" |
| 14 | #include "core/hle/kernel/object.h" | 14 | #include "core/hle/kernel/object.h" |
| 15 | #include "core/hle/kernel/process.h" | ||
| 16 | #include "core/hle/kernel/scheduler.h" | ||
| 15 | #include "core/hle/kernel/thread.h" | 17 | #include "core/hle/kernel/thread.h" |
| 16 | #include "core/hle/result.h" | 18 | #include "core/hle/result.h" |
| 17 | #include "core/memory.h" | 19 | #include "core/memory.h" |
| @@ -57,41 +59,47 @@ static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_t | |||
| 57 | } | 59 | } |
| 58 | } | 60 | } |
| 59 | 61 | ||
| 60 | ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle holding_thread_handle, | 62 | Mutex::Mutex(Core::System& system) : system{system} {} |
| 63 | Mutex::~Mutex() = default; | ||
| 64 | |||
| 65 | ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, | ||
| 61 | Handle requesting_thread_handle) { | 66 | Handle requesting_thread_handle) { |
| 62 | // The mutex address must be 4-byte aligned | 67 | // The mutex address must be 4-byte aligned |
| 63 | if ((address % sizeof(u32)) != 0) { | 68 | if ((address % sizeof(u32)) != 0) { |
| 64 | return ERR_INVALID_ADDRESS; | 69 | return ERR_INVALID_ADDRESS; |
| 65 | } | 70 | } |
| 66 | 71 | ||
| 72 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||
| 73 | Thread* const current_thread = system.CurrentScheduler().GetCurrentThread(); | ||
| 67 | SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); | 74 | SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); |
| 68 | SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle); | 75 | SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle); |
| 69 | 76 | ||
| 70 | // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another | 77 | // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another |
| 71 | // thread. | 78 | // thread. |
| 72 | ASSERT(requesting_thread == GetCurrentThread()); | 79 | ASSERT(requesting_thread == current_thread); |
| 73 | 80 | ||
| 74 | u32 addr_value = Memory::Read32(address); | 81 | const u32 addr_value = Memory::Read32(address); |
| 75 | 82 | ||
| 76 | // If the mutex isn't being held, just return success. | 83 | // If the mutex isn't being held, just return success. |
| 77 | if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) { | 84 | if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) { |
| 78 | return RESULT_SUCCESS; | 85 | return RESULT_SUCCESS; |
| 79 | } | 86 | } |
| 80 | 87 | ||
| 81 | if (holding_thread == nullptr) | 88 | if (holding_thread == nullptr) { |
| 82 | return ERR_INVALID_HANDLE; | 89 | return ERR_INVALID_HANDLE; |
| 90 | } | ||
| 83 | 91 | ||
| 84 | // Wait until the mutex is released | 92 | // Wait until the mutex is released |
| 85 | GetCurrentThread()->SetMutexWaitAddress(address); | 93 | current_thread->SetMutexWaitAddress(address); |
| 86 | GetCurrentThread()->SetWaitHandle(requesting_thread_handle); | 94 | current_thread->SetWaitHandle(requesting_thread_handle); |
| 87 | 95 | ||
| 88 | GetCurrentThread()->SetStatus(ThreadStatus::WaitMutex); | 96 | current_thread->SetStatus(ThreadStatus::WaitMutex); |
| 89 | GetCurrentThread()->InvalidateWakeupCallback(); | 97 | current_thread->InvalidateWakeupCallback(); |
| 90 | 98 | ||
| 91 | // Update the lock holder thread's priority to prevent priority inversion. | 99 | // Update the lock holder thread's priority to prevent priority inversion. |
| 92 | holding_thread->AddMutexWaiter(GetCurrentThread()); | 100 | holding_thread->AddMutexWaiter(current_thread); |
| 93 | 101 | ||
| 94 | Core::System::GetInstance().PrepareReschedule(); | 102 | system.PrepareReschedule(); |
| 95 | 103 | ||
| 96 | return RESULT_SUCCESS; | 104 | return RESULT_SUCCESS; |
| 97 | } | 105 | } |
| @@ -102,7 +110,8 @@ ResultCode Mutex::Release(VAddr address) { | |||
| 102 | return ERR_INVALID_ADDRESS; | 110 | return ERR_INVALID_ADDRESS; |
| 103 | } | 111 | } |
| 104 | 112 | ||
| 105 | auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address); | 113 | auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); |
| 114 | auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address); | ||
| 106 | 115 | ||
| 107 | // There are no more threads waiting for the mutex, release it completely. | 116 | // There are no more threads waiting for the mutex, release it completely. |
| 108 | if (thread == nullptr) { | 117 | if (thread == nullptr) { |
| @@ -111,7 +120,7 @@ ResultCode Mutex::Release(VAddr address) { | |||
| 111 | } | 120 | } |
| 112 | 121 | ||
| 113 | // Transfer the ownership of the mutex from the previous owner to the new one. | 122 | // Transfer the ownership of the mutex from the previous owner to the new one. |
| 114 | TransferMutexOwnership(address, GetCurrentThread(), thread); | 123 | TransferMutexOwnership(address, current_thread, thread); |
| 115 | 124 | ||
| 116 | u32 mutex_value = thread->GetWaitHandle(); | 125 | u32 mutex_value = thread->GetWaitHandle(); |
| 117 | 126 | ||
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h index 81e62d497..b904de2e8 100644 --- a/src/core/hle/kernel/mutex.h +++ b/src/core/hle/kernel/mutex.h | |||
| @@ -5,32 +5,34 @@ | |||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 8 | #include "core/hle/kernel/object.h" | ||
| 9 | 8 | ||
| 10 | union ResultCode; | 9 | union ResultCode; |
| 11 | 10 | ||
| 12 | namespace Kernel { | 11 | namespace Core { |
| 12 | class System; | ||
| 13 | } | ||
| 13 | 14 | ||
| 14 | class HandleTable; | 15 | namespace Kernel { |
| 15 | class Thread; | ||
| 16 | 16 | ||
| 17 | class Mutex final { | 17 | class Mutex final { |
| 18 | public: | 18 | public: |
| 19 | explicit Mutex(Core::System& system); | ||
| 20 | ~Mutex(); | ||
| 21 | |||
| 19 | /// Flag that indicates that a mutex still has threads waiting for it. | 22 | /// Flag that indicates that a mutex still has threads waiting for it. |
| 20 | static constexpr u32 MutexHasWaitersFlag = 0x40000000; | 23 | static constexpr u32 MutexHasWaitersFlag = 0x40000000; |
| 21 | /// Mask of the bits in a mutex address value that contain the mutex owner. | 24 | /// Mask of the bits in a mutex address value that contain the mutex owner. |
| 22 | static constexpr u32 MutexOwnerMask = 0xBFFFFFFF; | 25 | static constexpr u32 MutexOwnerMask = 0xBFFFFFFF; |
| 23 | 26 | ||
| 24 | /// Attempts to acquire a mutex at the specified address. | 27 | /// Attempts to acquire a mutex at the specified address. |
| 25 | static ResultCode TryAcquire(HandleTable& handle_table, VAddr address, | 28 | ResultCode TryAcquire(VAddr address, Handle holding_thread_handle, |
| 26 | Handle holding_thread_handle, Handle requesting_thread_handle); | 29 | Handle requesting_thread_handle); |
| 27 | 30 | ||
| 28 | /// Releases the mutex at the specified address. | 31 | /// Releases the mutex at the specified address. |
| 29 | static ResultCode Release(VAddr address); | 32 | ResultCode Release(VAddr address); |
| 30 | 33 | ||
| 31 | private: | 34 | private: |
| 32 | Mutex() = default; | 35 | Core::System& system; |
| 33 | ~Mutex() = default; | ||
| 34 | }; | 36 | }; |
| 35 | 37 | ||
| 36 | } // namespace Kernel | 38 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 8009150e0..0d782e4ba 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "core/core.h" | 10 | #include "core/core.h" |
| 11 | #include "core/file_sys/program_metadata.h" | 11 | #include "core/file_sys/program_metadata.h" |
| 12 | #include "core/hle/kernel/code_set.h" | ||
| 12 | #include "core/hle/kernel/errors.h" | 13 | #include "core/hle/kernel/errors.h" |
| 13 | #include "core/hle/kernel/kernel.h" | 14 | #include "core/hle/kernel/kernel.h" |
| 14 | #include "core/hle/kernel/process.h" | 15 | #include "core/hle/kernel/process.h" |
| @@ -31,7 +32,7 @@ namespace { | |||
| 31 | */ | 32 | */ |
| 32 | void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) { | 33 | void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) { |
| 33 | // Setup page table so we can write to memory | 34 | // Setup page table so we can write to memory |
| 34 | SetCurrentPageTable(&owner_process.VMManager().page_table); | 35 | Memory::SetCurrentPageTable(&owner_process.VMManager().page_table); |
| 35 | 36 | ||
| 36 | // Initialize new "main" thread | 37 | // Initialize new "main" thread |
| 37 | const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress(); | 38 | const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress(); |
| @@ -50,12 +51,10 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_poi | |||
| 50 | } | 51 | } |
| 51 | } // Anonymous namespace | 52 | } // Anonymous namespace |
| 52 | 53 | ||
| 53 | CodeSet::CodeSet() = default; | 54 | SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) { |
| 54 | CodeSet::~CodeSet() = default; | 55 | auto& kernel = system.Kernel(); |
| 55 | |||
| 56 | SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) { | ||
| 57 | SharedPtr<Process> process(new Process(kernel)); | ||
| 58 | 56 | ||
| 57 | SharedPtr<Process> process(new Process(system)); | ||
| 59 | process->name = std::move(name); | 58 | process->name = std::move(name); |
| 60 | process->resource_limit = kernel.GetSystemResourceLimit(); | 59 | process->resource_limit = kernel.GetSystemResourceLimit(); |
| 61 | process->status = ProcessStatus::Created; | 60 | process->status = ProcessStatus::Created; |
| @@ -132,7 +131,7 @@ void Process::PrepareForTermination() { | |||
| 132 | if (thread->GetOwnerProcess() != this) | 131 | if (thread->GetOwnerProcess() != this) |
| 133 | continue; | 132 | continue; |
| 134 | 133 | ||
| 135 | if (thread == GetCurrentThread()) | 134 | if (thread == system.CurrentScheduler().GetCurrentThread()) |
| 136 | continue; | 135 | continue; |
| 137 | 136 | ||
| 138 | // TODO(Subv): When are the other running/ready threads terminated? | 137 | // TODO(Subv): When are the other running/ready threads terminated? |
| @@ -144,7 +143,6 @@ void Process::PrepareForTermination() { | |||
| 144 | } | 143 | } |
| 145 | }; | 144 | }; |
| 146 | 145 | ||
| 147 | const auto& system = Core::System::GetInstance(); | ||
| 148 | stop_threads(system.Scheduler(0).GetThreadList()); | 146 | stop_threads(system.Scheduler(0).GetThreadList()); |
| 149 | stop_threads(system.Scheduler(1).GetThreadList()); | 147 | stop_threads(system.Scheduler(1).GetThreadList()); |
| 150 | stop_threads(system.Scheduler(2).GetThreadList()); | 148 | stop_threads(system.Scheduler(2).GetThreadList()); |
| @@ -212,7 +210,7 @@ void Process::FreeTLSSlot(VAddr tls_address) { | |||
| 212 | } | 210 | } |
| 213 | 211 | ||
| 214 | void Process::LoadModule(CodeSet module_, VAddr base_addr) { | 212 | void Process::LoadModule(CodeSet module_, VAddr base_addr) { |
| 215 | const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, | 213 | const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions, |
| 216 | MemoryState memory_state) { | 214 | MemoryState memory_state) { |
| 217 | const auto vma = vm_manager | 215 | const auto vma = vm_manager |
| 218 | .MapMemoryBlock(segment.addr + base_addr, module_.memory, | 216 | .MapMemoryBlock(segment.addr + base_addr, module_.memory, |
| @@ -222,19 +220,18 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) { | |||
| 222 | }; | 220 | }; |
| 223 | 221 | ||
| 224 | // Map CodeSet segments | 222 | // Map CodeSet segments |
| 225 | MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::CodeStatic); | 223 | MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code); |
| 226 | MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeMutable); | 224 | MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); |
| 227 | MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable); | 225 | MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); |
| 228 | 226 | ||
| 229 | // Clear instruction cache in CPU JIT | 227 | // Clear instruction cache in CPU JIT |
| 230 | Core::System::GetInstance().ArmInterface(0).ClearInstructionCache(); | 228 | system.InvalidateCpuInstructionCaches(); |
| 231 | Core::System::GetInstance().ArmInterface(1).ClearInstructionCache(); | ||
| 232 | Core::System::GetInstance().ArmInterface(2).ClearInstructionCache(); | ||
| 233 | Core::System::GetInstance().ArmInterface(3).ClearInstructionCache(); | ||
| 234 | } | 229 | } |
| 235 | 230 | ||
| 236 | Kernel::Process::Process(KernelCore& kernel) : WaitObject{kernel} {} | 231 | Process::Process(Core::System& system) |
| 237 | Kernel::Process::~Process() {} | 232 | : WaitObject{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {} |
| 233 | |||
| 234 | Process::~Process() = default; | ||
| 238 | 235 | ||
| 239 | void Process::Acquire(Thread* thread) { | 236 | void Process::Acquire(Thread* thread) { |
| 240 | ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); | 237 | ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); |
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index dcc57ae9f..1bd7bf5c1 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h | |||
| @@ -7,17 +7,22 @@ | |||
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <bitset> | 8 | #include <bitset> |
| 9 | #include <cstddef> | 9 | #include <cstddef> |
| 10 | #include <memory> | ||
| 11 | #include <string> | 10 | #include <string> |
| 12 | #include <vector> | 11 | #include <vector> |
| 13 | #include <boost/container/static_vector.hpp> | 12 | #include <boost/container/static_vector.hpp> |
| 14 | #include "common/common_types.h" | 13 | #include "common/common_types.h" |
| 14 | #include "core/hle/kernel/address_arbiter.h" | ||
| 15 | #include "core/hle/kernel/handle_table.h" | 15 | #include "core/hle/kernel/handle_table.h" |
| 16 | #include "core/hle/kernel/mutex.h" | ||
| 16 | #include "core/hle/kernel/process_capability.h" | 17 | #include "core/hle/kernel/process_capability.h" |
| 17 | #include "core/hle/kernel/vm_manager.h" | 18 | #include "core/hle/kernel/vm_manager.h" |
| 18 | #include "core/hle/kernel/wait_object.h" | 19 | #include "core/hle/kernel/wait_object.h" |
| 19 | #include "core/hle/result.h" | 20 | #include "core/hle/result.h" |
| 20 | 21 | ||
| 22 | namespace Core { | ||
| 23 | class System; | ||
| 24 | } | ||
| 25 | |||
| 21 | namespace FileSys { | 26 | namespace FileSys { |
| 22 | class ProgramMetadata; | 27 | class ProgramMetadata; |
| 23 | } | 28 | } |
| @@ -28,6 +33,8 @@ class KernelCore; | |||
| 28 | class ResourceLimit; | 33 | class ResourceLimit; |
| 29 | class Thread; | 34 | class Thread; |
| 30 | 35 | ||
| 36 | struct CodeSet; | ||
| 37 | |||
| 31 | struct AddressMapping { | 38 | struct AddressMapping { |
| 32 | // Address and size must be page-aligned | 39 | // Address and size must be page-aligned |
| 33 | VAddr address; | 40 | VAddr address; |
| @@ -60,46 +67,6 @@ enum class ProcessStatus { | |||
| 60 | DebugBreak, | 67 | DebugBreak, |
| 61 | }; | 68 | }; |
| 62 | 69 | ||
| 63 | struct CodeSet final { | ||
| 64 | struct Segment { | ||
| 65 | std::size_t offset = 0; | ||
| 66 | VAddr addr = 0; | ||
| 67 | u32 size = 0; | ||
| 68 | }; | ||
| 69 | |||
| 70 | explicit CodeSet(); | ||
| 71 | ~CodeSet(); | ||
| 72 | |||
| 73 | Segment& CodeSegment() { | ||
| 74 | return segments[0]; | ||
| 75 | } | ||
| 76 | |||
| 77 | const Segment& CodeSegment() const { | ||
| 78 | return segments[0]; | ||
| 79 | } | ||
| 80 | |||
| 81 | Segment& RODataSegment() { | ||
| 82 | return segments[1]; | ||
| 83 | } | ||
| 84 | |||
| 85 | const Segment& RODataSegment() const { | ||
| 86 | return segments[1]; | ||
| 87 | } | ||
| 88 | |||
| 89 | Segment& DataSegment() { | ||
| 90 | return segments[2]; | ||
| 91 | } | ||
| 92 | |||
| 93 | const Segment& DataSegment() const { | ||
| 94 | return segments[2]; | ||
| 95 | } | ||
| 96 | |||
| 97 | std::shared_ptr<std::vector<u8>> memory; | ||
| 98 | |||
| 99 | std::array<Segment, 3> segments; | ||
| 100 | VAddr entrypoint = 0; | ||
| 101 | }; | ||
| 102 | |||
| 103 | class Process final : public WaitObject { | 70 | class Process final : public WaitObject { |
| 104 | public: | 71 | public: |
| 105 | enum : u64 { | 72 | enum : u64 { |
| @@ -116,7 +83,7 @@ public: | |||
| 116 | 83 | ||
| 117 | static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; | 84 | static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; |
| 118 | 85 | ||
| 119 | static SharedPtr<Process> Create(KernelCore& kernel, std::string&& name); | 86 | static SharedPtr<Process> Create(Core::System& system, std::string&& name); |
| 120 | 87 | ||
| 121 | std::string GetTypeName() const override { | 88 | std::string GetTypeName() const override { |
| 122 | return "Process"; | 89 | return "Process"; |
| @@ -150,6 +117,26 @@ public: | |||
| 150 | return handle_table; | 117 | return handle_table; |
| 151 | } | 118 | } |
| 152 | 119 | ||
| 120 | /// Gets a reference to the process' address arbiter. | ||
| 121 | AddressArbiter& GetAddressArbiter() { | ||
| 122 | return address_arbiter; | ||
| 123 | } | ||
| 124 | |||
| 125 | /// Gets a const reference to the process' address arbiter. | ||
| 126 | const AddressArbiter& GetAddressArbiter() const { | ||
| 127 | return address_arbiter; | ||
| 128 | } | ||
| 129 | |||
| 130 | /// Gets a reference to the process' mutex lock. | ||
| 131 | Mutex& GetMutex() { | ||
| 132 | return mutex; | ||
| 133 | } | ||
| 134 | |||
| 135 | /// Gets a const reference to the process' mutex lock | ||
| 136 | const Mutex& GetMutex() const { | ||
| 137 | return mutex; | ||
| 138 | } | ||
| 139 | |||
| 153 | /// Gets the current status of the process | 140 | /// Gets the current status of the process |
| 154 | ProcessStatus GetStatus() const { | 141 | ProcessStatus GetStatus() const { |
| 155 | return status; | 142 | return status; |
| @@ -251,7 +238,7 @@ public: | |||
| 251 | void FreeTLSSlot(VAddr tls_address); | 238 | void FreeTLSSlot(VAddr tls_address); |
| 252 | 239 | ||
| 253 | private: | 240 | private: |
| 254 | explicit Process(KernelCore& kernel); | 241 | explicit Process(Core::System& system); |
| 255 | ~Process() override; | 242 | ~Process() override; |
| 256 | 243 | ||
| 257 | /// Checks if the specified thread should wait until this process is available. | 244 | /// Checks if the specified thread should wait until this process is available. |
| @@ -309,9 +296,21 @@ private: | |||
| 309 | /// Per-process handle table for storing created object handles in. | 296 | /// Per-process handle table for storing created object handles in. |
| 310 | HandleTable handle_table; | 297 | HandleTable handle_table; |
| 311 | 298 | ||
| 299 | /// Per-process address arbiter. | ||
| 300 | AddressArbiter address_arbiter; | ||
| 301 | |||
| 302 | /// The per-process mutex lock instance used for handling various | ||
| 303 | /// forms of services, such as lock arbitration, and condition | ||
| 304 | /// variable related facilities. | ||
| 305 | Mutex mutex; | ||
| 306 | |||
| 312 | /// Random values for svcGetInfo RandomEntropy | 307 | /// Random values for svcGetInfo RandomEntropy |
| 313 | std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; | 308 | std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; |
| 314 | 309 | ||
| 310 | /// System context | ||
| 311 | Core::System& system; | ||
| 312 | |||
| 313 | /// Name of this process | ||
| 315 | std::string name; | 314 | std::string name; |
| 316 | }; | 315 | }; |
| 317 | 316 | ||
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 44f30d070..cc189cc64 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -19,7 +19,8 @@ namespace Kernel { | |||
| 19 | 19 | ||
| 20 | std::mutex Scheduler::scheduler_mutex; | 20 | std::mutex Scheduler::scheduler_mutex; |
| 21 | 21 | ||
| 22 | Scheduler::Scheduler(Core::ARM_Interface& cpu_core) : cpu_core(cpu_core) {} | 22 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core) |
| 23 | : cpu_core{cpu_core}, system{system} {} | ||
| 23 | 24 | ||
| 24 | Scheduler::~Scheduler() { | 25 | Scheduler::~Scheduler() { |
| 25 | for (auto& thread : thread_list) { | 26 | for (auto& thread : thread_list) { |
| @@ -61,7 +62,7 @@ Thread* Scheduler::PopNextReadyThread() { | |||
| 61 | 62 | ||
| 62 | void Scheduler::SwitchContext(Thread* new_thread) { | 63 | void Scheduler::SwitchContext(Thread* new_thread) { |
| 63 | Thread* const previous_thread = GetCurrentThread(); | 64 | Thread* const previous_thread = GetCurrentThread(); |
| 64 | Process* const previous_process = Core::CurrentProcess(); | 65 | Process* const previous_process = system.Kernel().CurrentProcess(); |
| 65 | 66 | ||
| 66 | UpdateLastContextSwitchTime(previous_thread, previous_process); | 67 | UpdateLastContextSwitchTime(previous_thread, previous_process); |
| 67 | 68 | ||
| @@ -94,8 +95,8 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 94 | 95 | ||
| 95 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | 96 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); |
| 96 | if (previous_process != thread_owner_process) { | 97 | if (previous_process != thread_owner_process) { |
| 97 | Core::System::GetInstance().Kernel().MakeCurrentProcess(thread_owner_process); | 98 | system.Kernel().MakeCurrentProcess(thread_owner_process); |
| 98 | SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table); | 99 | Memory::SetCurrentPageTable(&thread_owner_process->VMManager().page_table); |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 101 | cpu_core.LoadContext(new_thread->GetContext()); | 102 | cpu_core.LoadContext(new_thread->GetContext()); |
| @@ -111,7 +112,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 111 | 112 | ||
| 112 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | 113 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { |
| 113 | const u64 prev_switch_ticks = last_context_switch_time; | 114 | const u64 prev_switch_ticks = last_context_switch_time; |
| 114 | const u64 most_recent_switch_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); | 115 | const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); |
| 115 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | 116 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; |
| 116 | 117 | ||
| 117 | if (thread != nullptr) { | 118 | if (thread != nullptr) { |
| @@ -198,8 +199,7 @@ void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { | |||
| 198 | ASSERT(thread->GetPriority() < THREADPRIO_COUNT); | 199 | ASSERT(thread->GetPriority() < THREADPRIO_COUNT); |
| 199 | 200 | ||
| 200 | // Yield this thread -- sleep for zero time and force reschedule to different thread | 201 | // Yield this thread -- sleep for zero time and force reschedule to different thread |
| 201 | WaitCurrentThread_Sleep(); | 202 | GetCurrentThread()->Sleep(0); |
| 202 | GetCurrentThread()->WakeAfterDelay(0); | ||
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | void Scheduler::YieldWithLoadBalancing(Thread* thread) { | 205 | void Scheduler::YieldWithLoadBalancing(Thread* thread) { |
| @@ -214,8 +214,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) { | |||
| 214 | ASSERT(priority < THREADPRIO_COUNT); | 214 | ASSERT(priority < THREADPRIO_COUNT); |
| 215 | 215 | ||
| 216 | // Sleep for zero time to be able to force reschedule to different thread | 216 | // Sleep for zero time to be able to force reschedule to different thread |
| 217 | WaitCurrentThread_Sleep(); | 217 | GetCurrentThread()->Sleep(0); |
| 218 | GetCurrentThread()->WakeAfterDelay(0); | ||
| 219 | 218 | ||
| 220 | Thread* suggested_thread = nullptr; | 219 | Thread* suggested_thread = nullptr; |
| 221 | 220 | ||
| @@ -223,8 +222,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) { | |||
| 223 | // Take the first non-nullptr one | 222 | // Take the first non-nullptr one |
| 224 | for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { | 223 | for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { |
| 225 | const auto res = | 224 | const auto res = |
| 226 | Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread( | 225 | system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority); |
| 227 | core, priority); | ||
| 228 | 226 | ||
| 229 | // If scheduler provides a suggested thread | 227 | // If scheduler provides a suggested thread |
| 230 | if (res != nullptr) { | 228 | if (res != nullptr) { |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 97ced4dfc..1c5bf57d9 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -13,7 +13,8 @@ | |||
| 13 | 13 | ||
| 14 | namespace Core { | 14 | namespace Core { |
| 15 | class ARM_Interface; | 15 | class ARM_Interface; |
| 16 | } | 16 | class System; |
| 17 | } // namespace Core | ||
| 17 | 18 | ||
| 18 | namespace Kernel { | 19 | namespace Kernel { |
| 19 | 20 | ||
| @@ -21,7 +22,7 @@ class Process; | |||
| 21 | 22 | ||
| 22 | class Scheduler final { | 23 | class Scheduler final { |
| 23 | public: | 24 | public: |
| 24 | explicit Scheduler(Core::ARM_Interface& cpu_core); | 25 | explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core); |
| 25 | ~Scheduler(); | 26 | ~Scheduler(); |
| 26 | 27 | ||
| 27 | /// Returns whether there are any threads that are ready to run. | 28 | /// Returns whether there are any threads that are ready to run. |
| @@ -162,6 +163,7 @@ private: | |||
| 162 | Core::ARM_Interface& cpu_core; | 163 | Core::ARM_Interface& cpu_core; |
| 163 | u64 last_context_switch_time = 0; | 164 | u64 last_context_switch_time = 0; |
| 164 | 165 | ||
| 166 | Core::System& system; | ||
| 165 | static std::mutex scheduler_mutex; | 167 | static std::mutex scheduler_mutex; |
| 166 | }; | 168 | }; |
| 167 | 169 | ||
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp index d6ceeb2da..0e1515c89 100644 --- a/src/core/hle/kernel/server_port.cpp +++ b/src/core/hle/kernel/server_port.cpp | |||
| @@ -26,6 +26,10 @@ ResultVal<SharedPtr<ServerSession>> ServerPort::Accept() { | |||
| 26 | return MakeResult(std::move(session)); | 26 | return MakeResult(std::move(session)); |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session) { | ||
| 30 | pending_sessions.push_back(std::move(pending_session)); | ||
| 31 | } | ||
| 32 | |||
| 29 | bool ServerPort::ShouldWait(Thread* thread) const { | 33 | bool ServerPort::ShouldWait(Thread* thread) const { |
| 30 | // If there are no pending sessions, we wait until a new one is added. | 34 | // If there are no pending sessions, we wait until a new one is added. |
| 31 | return pending_sessions.empty(); | 35 | return pending_sessions.empty(); |
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h index e52f8245f..9bc667cf2 100644 --- a/src/core/hle/kernel/server_port.h +++ b/src/core/hle/kernel/server_port.h | |||
| @@ -22,6 +22,8 @@ class SessionRequestHandler; | |||
| 22 | 22 | ||
| 23 | class ServerPort final : public WaitObject { | 23 | class ServerPort final : public WaitObject { |
| 24 | public: | 24 | public: |
| 25 | using HLEHandler = std::shared_ptr<SessionRequestHandler>; | ||
| 26 | |||
| 25 | /** | 27 | /** |
| 26 | * Creates a pair of ServerPort and an associated ClientPort. | 28 | * Creates a pair of ServerPort and an associated ClientPort. |
| 27 | * | 29 | * |
| @@ -51,22 +53,27 @@ public: | |||
| 51 | */ | 53 | */ |
| 52 | ResultVal<SharedPtr<ServerSession>> Accept(); | 54 | ResultVal<SharedPtr<ServerSession>> Accept(); |
| 53 | 55 | ||
| 56 | /// Whether or not this server port has an HLE handler available. | ||
| 57 | bool HasHLEHandler() const { | ||
| 58 | return hle_handler != nullptr; | ||
| 59 | } | ||
| 60 | |||
| 61 | /// Gets the HLE handler for this port. | ||
| 62 | HLEHandler GetHLEHandler() const { | ||
| 63 | return hle_handler; | ||
| 64 | } | ||
| 65 | |||
| 54 | /** | 66 | /** |
| 55 | * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port | 67 | * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port |
| 56 | * will inherit a reference to this handler. | 68 | * will inherit a reference to this handler. |
| 57 | */ | 69 | */ |
| 58 | void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) { | 70 | void SetHleHandler(HLEHandler hle_handler_) { |
| 59 | hle_handler = std::move(hle_handler_); | 71 | hle_handler = std::move(hle_handler_); |
| 60 | } | 72 | } |
| 61 | 73 | ||
| 62 | std::string name; ///< Name of port (optional) | 74 | /// Appends a ServerSession to the collection of ServerSessions |
| 63 | 75 | /// waiting to be accepted by this port. | |
| 64 | /// ServerSessions waiting to be accepted by the port | 76 | void AppendPendingSession(SharedPtr<ServerSession> pending_session); |
| 65 | std::vector<SharedPtr<ServerSession>> pending_sessions; | ||
| 66 | |||
| 67 | /// This session's HLE request handler template (optional) | ||
| 68 | /// ServerSessions created from this port inherit a reference to this handler. | ||
| 69 | std::shared_ptr<SessionRequestHandler> hle_handler; | ||
| 70 | 77 | ||
| 71 | bool ShouldWait(Thread* thread) const override; | 78 | bool ShouldWait(Thread* thread) const override; |
| 72 | void Acquire(Thread* thread) override; | 79 | void Acquire(Thread* thread) override; |
| @@ -74,6 +81,16 @@ public: | |||
| 74 | private: | 81 | private: |
| 75 | explicit ServerPort(KernelCore& kernel); | 82 | explicit ServerPort(KernelCore& kernel); |
| 76 | ~ServerPort() override; | 83 | ~ServerPort() override; |
| 84 | |||
| 85 | /// ServerSessions waiting to be accepted by the port | ||
| 86 | std::vector<SharedPtr<ServerSession>> pending_sessions; | ||
| 87 | |||
| 88 | /// This session's HLE request handler template (optional) | ||
| 89 | /// ServerSessions created from this port inherit a reference to this handler. | ||
| 90 | HLEHandler hle_handler; | ||
| 91 | |||
| 92 | /// Name of the port (optional) | ||
| 93 | std::string name; | ||
| 77 | }; | 94 | }; |
| 78 | 95 | ||
| 79 | } // namespace Kernel | 96 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 7f5c0cc86..a6a17efe7 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -551,9 +551,9 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr, | |||
| 551 | return ERR_INVALID_ADDRESS; | 551 | return ERR_INVALID_ADDRESS; |
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | auto& handle_table = Core::CurrentProcess()->GetHandleTable(); | 554 | auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess(); |
| 555 | return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle, | 555 | return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle, |
| 556 | requesting_thread_handle); | 556 | requesting_thread_handle); |
| 557 | } | 557 | } |
| 558 | 558 | ||
| 559 | /// Unlock a mutex | 559 | /// Unlock a mutex |
| @@ -571,7 +571,8 @@ static ResultCode ArbitrateUnlock(VAddr mutex_addr) { | |||
| 571 | return ERR_INVALID_ADDRESS; | 571 | return ERR_INVALID_ADDRESS; |
| 572 | } | 572 | } |
| 573 | 573 | ||
| 574 | return Mutex::Release(mutex_addr); | 574 | auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess(); |
| 575 | return current_process->GetMutex().Release(mutex_addr); | ||
| 575 | } | 576 | } |
| 576 | 577 | ||
| 577 | enum class BreakType : u32 { | 578 | enum class BreakType : u32 { |
| @@ -1284,10 +1285,14 @@ static ResultCode StartThread(Handle thread_handle) { | |||
| 1284 | 1285 | ||
| 1285 | /// Called when a thread exits | 1286 | /// Called when a thread exits |
| 1286 | static void ExitThread() { | 1287 | static void ExitThread() { |
| 1287 | LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC()); | 1288 | auto& system = Core::System::GetInstance(); |
| 1288 | 1289 | ||
| 1289 | ExitCurrentThread(); | 1290 | LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); |
| 1290 | Core::System::GetInstance().PrepareReschedule(); | 1291 | |
| 1292 | auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); | ||
| 1293 | current_thread->Stop(); | ||
| 1294 | system.CurrentScheduler().RemoveThread(current_thread); | ||
| 1295 | system.PrepareReschedule(); | ||
| 1291 | } | 1296 | } |
| 1292 | 1297 | ||
| 1293 | /// Sleep the current thread | 1298 | /// Sleep the current thread |
| @@ -1300,32 +1305,32 @@ static void SleepThread(s64 nanoseconds) { | |||
| 1300 | YieldAndWaitForLoadBalancing = -2, | 1305 | YieldAndWaitForLoadBalancing = -2, |
| 1301 | }; | 1306 | }; |
| 1302 | 1307 | ||
| 1308 | auto& system = Core::System::GetInstance(); | ||
| 1309 | auto& scheduler = system.CurrentScheduler(); | ||
| 1310 | auto* const current_thread = scheduler.GetCurrentThread(); | ||
| 1311 | |||
| 1303 | if (nanoseconds <= 0) { | 1312 | if (nanoseconds <= 0) { |
| 1304 | auto& scheduler{Core::System::GetInstance().CurrentScheduler()}; | ||
| 1305 | switch (static_cast<SleepType>(nanoseconds)) { | 1313 | switch (static_cast<SleepType>(nanoseconds)) { |
| 1306 | case SleepType::YieldWithoutLoadBalancing: | 1314 | case SleepType::YieldWithoutLoadBalancing: |
| 1307 | scheduler.YieldWithoutLoadBalancing(GetCurrentThread()); | 1315 | scheduler.YieldWithoutLoadBalancing(current_thread); |
| 1308 | break; | 1316 | break; |
| 1309 | case SleepType::YieldWithLoadBalancing: | 1317 | case SleepType::YieldWithLoadBalancing: |
| 1310 | scheduler.YieldWithLoadBalancing(GetCurrentThread()); | 1318 | scheduler.YieldWithLoadBalancing(current_thread); |
| 1311 | break; | 1319 | break; |
| 1312 | case SleepType::YieldAndWaitForLoadBalancing: | 1320 | case SleepType::YieldAndWaitForLoadBalancing: |
| 1313 | scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread()); | 1321 | scheduler.YieldAndWaitForLoadBalancing(current_thread); |
| 1314 | break; | 1322 | break; |
| 1315 | default: | 1323 | default: |
| 1316 | UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); | 1324 | UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); |
| 1317 | } | 1325 | } |
| 1318 | } else { | 1326 | } else { |
| 1319 | // Sleep current thread and check for next thread to schedule | 1327 | current_thread->Sleep(nanoseconds); |
| 1320 | WaitCurrentThread_Sleep(); | ||
| 1321 | |||
| 1322 | // Create an event to wake the thread up after the specified nanosecond delay has passed | ||
| 1323 | GetCurrentThread()->WakeAfterDelay(nanoseconds); | ||
| 1324 | } | 1328 | } |
| 1325 | 1329 | ||
| 1326 | // Reschedule all CPU cores | 1330 | // Reschedule all CPU cores |
| 1327 | for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) | 1331 | for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) { |
| 1328 | Core::System::GetInstance().CpuCore(i).PrepareReschedule(); | 1332 | system.CpuCore(i).PrepareReschedule(); |
| 1333 | } | ||
| 1329 | } | 1334 | } |
| 1330 | 1335 | ||
| 1331 | /// Wait process wide key atomic | 1336 | /// Wait process wide key atomic |
| @@ -1336,11 +1341,15 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var | |||
| 1336 | "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", | 1341 | "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", |
| 1337 | mutex_addr, condition_variable_addr, thread_handle, nano_seconds); | 1342 | mutex_addr, condition_variable_addr, thread_handle, nano_seconds); |
| 1338 | 1343 | ||
| 1339 | const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); | 1344 | auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess(); |
| 1345 | const auto& handle_table = current_process->GetHandleTable(); | ||
| 1340 | SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); | 1346 | SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); |
| 1341 | ASSERT(thread); | 1347 | ASSERT(thread); |
| 1342 | 1348 | ||
| 1343 | CASCADE_CODE(Mutex::Release(mutex_addr)); | 1349 | const auto release_result = current_process->GetMutex().Release(mutex_addr); |
| 1350 | if (release_result.IsError()) { | ||
| 1351 | return release_result; | ||
| 1352 | } | ||
| 1344 | 1353 | ||
| 1345 | SharedPtr<Thread> current_thread = GetCurrentThread(); | 1354 | SharedPtr<Thread> current_thread = GetCurrentThread(); |
| 1346 | current_thread->SetCondVarWaitAddress(condition_variable_addr); | 1355 | current_thread->SetCondVarWaitAddress(condition_variable_addr); |
| @@ -1479,21 +1488,10 @@ static ResultCode WaitForAddress(VAddr address, u32 type, s32 value, s64 timeout | |||
| 1479 | return ERR_INVALID_ADDRESS; | 1488 | return ERR_INVALID_ADDRESS; |
| 1480 | } | 1489 | } |
| 1481 | 1490 | ||
| 1482 | auto& address_arbiter = Core::System::GetInstance().Kernel().AddressArbiter(); | 1491 | const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type); |
| 1483 | switch (static_cast<AddressArbiter::ArbitrationType>(type)) { | 1492 | auto& address_arbiter = |
| 1484 | case AddressArbiter::ArbitrationType::WaitIfLessThan: | 1493 | Core::System::GetInstance().Kernel().CurrentProcess()->GetAddressArbiter(); |
| 1485 | return address_arbiter.WaitForAddressIfLessThan(address, value, timeout, false); | 1494 | return address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); |
| 1486 | case AddressArbiter::ArbitrationType::DecrementAndWaitIfLessThan: | ||
| 1487 | return address_arbiter.WaitForAddressIfLessThan(address, value, timeout, true); | ||
| 1488 | case AddressArbiter::ArbitrationType::WaitIfEqual: | ||
| 1489 | return address_arbiter.WaitForAddressIfEqual(address, value, timeout); | ||
| 1490 | default: | ||
| 1491 | LOG_ERROR(Kernel_SVC, | ||
| 1492 | "Invalid arbitration type, expected WaitIfLessThan, DecrementAndWaitIfLessThan " | ||
| 1493 | "or WaitIfEqual but got {}", | ||
| 1494 | type); | ||
| 1495 | return ERR_INVALID_ENUM_VALUE; | ||
| 1496 | } | ||
| 1497 | } | 1495 | } |
| 1498 | 1496 | ||
| 1499 | // Signals to an address (via Address Arbiter) | 1497 | // Signals to an address (via Address Arbiter) |
| @@ -1511,22 +1509,10 @@ static ResultCode SignalToAddress(VAddr address, u32 type, s32 value, s32 num_to | |||
| 1511 | return ERR_INVALID_ADDRESS; | 1509 | return ERR_INVALID_ADDRESS; |
| 1512 | } | 1510 | } |
| 1513 | 1511 | ||
| 1514 | auto& address_arbiter = Core::System::GetInstance().Kernel().AddressArbiter(); | 1512 | const auto signal_type = static_cast<AddressArbiter::SignalType>(type); |
| 1515 | switch (static_cast<AddressArbiter::SignalType>(type)) { | 1513 | auto& address_arbiter = |
| 1516 | case AddressArbiter::SignalType::Signal: | 1514 | Core::System::GetInstance().Kernel().CurrentProcess()->GetAddressArbiter(); |
| 1517 | return address_arbiter.SignalToAddress(address, num_to_wake); | 1515 | return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); |
| 1518 | case AddressArbiter::SignalType::IncrementAndSignalIfEqual: | ||
| 1519 | return address_arbiter.IncrementAndSignalToAddressIfEqual(address, value, num_to_wake); | ||
| 1520 | case AddressArbiter::SignalType::ModifyByWaitingCountAndSignalIfEqual: | ||
| 1521 | return address_arbiter.ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, | ||
| 1522 | num_to_wake); | ||
| 1523 | default: | ||
| 1524 | LOG_ERROR(Kernel_SVC, | ||
| 1525 | "Invalid signal type, expected Signal, IncrementAndSignalIfEqual " | ||
| 1526 | "or ModifyByWaitingCountAndSignalIfEqual but got {}", | ||
| 1527 | type); | ||
| 1528 | return ERR_INVALID_ENUM_VALUE; | ||
| 1529 | } | ||
| 1530 | } | 1516 | } |
| 1531 | 1517 | ||
| 1532 | /// This returns the total CPU ticks elapsed since the CPU was powered-on | 1518 | /// This returns the total CPU ticks elapsed since the CPU was powered-on |
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index eb54d6651..3b22e8e0d 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -7,8 +7,6 @@ | |||
| 7 | #include <optional> | 7 | #include <optional> |
| 8 | #include <vector> | 8 | #include <vector> |
| 9 | 9 | ||
| 10 | #include <boost/range/algorithm_ext/erase.hpp> | ||
| 11 | |||
| 12 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| 13 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 14 | #include "common/logging/log.h" | 12 | #include "common/logging/log.h" |
| @@ -68,17 +66,6 @@ void Thread::Stop() { | |||
| 68 | owner_process->FreeTLSSlot(tls_address); | 66 | owner_process->FreeTLSSlot(tls_address); |
| 69 | } | 67 | } |
| 70 | 68 | ||
| 71 | void WaitCurrentThread_Sleep() { | ||
| 72 | Thread* thread = GetCurrentThread(); | ||
| 73 | thread->SetStatus(ThreadStatus::WaitSleep); | ||
| 74 | } | ||
| 75 | |||
| 76 | void ExitCurrentThread() { | ||
| 77 | Thread* thread = GetCurrentThread(); | ||
| 78 | thread->Stop(); | ||
| 79 | Core::System::GetInstance().CurrentScheduler().RemoveThread(thread); | ||
| 80 | } | ||
| 81 | |||
| 82 | void Thread::WakeAfterDelay(s64 nanoseconds) { | 69 | void Thread::WakeAfterDelay(s64 nanoseconds) { |
| 83 | // Don't schedule a wakeup if the thread wants to wait forever | 70 | // Don't schedule a wakeup if the thread wants to wait forever |
| 84 | if (nanoseconds == -1) | 71 | if (nanoseconds == -1) |
| @@ -269,8 +256,8 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) { | |||
| 269 | if (thread->lock_owner == this) { | 256 | if (thread->lock_owner == this) { |
| 270 | // If the thread is already waiting for this thread to release the mutex, ensure that the | 257 | // If the thread is already waiting for this thread to release the mutex, ensure that the |
| 271 | // waiters list is consistent and return without doing anything. | 258 | // waiters list is consistent and return without doing anything. |
| 272 | auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 259 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); |
| 273 | ASSERT(itr != wait_mutex_threads.end()); | 260 | ASSERT(iter != wait_mutex_threads.end()); |
| 274 | return; | 261 | return; |
| 275 | } | 262 | } |
| 276 | 263 | ||
| @@ -278,11 +265,16 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) { | |||
| 278 | ASSERT(thread->lock_owner == nullptr); | 265 | ASSERT(thread->lock_owner == nullptr); |
| 279 | 266 | ||
| 280 | // Ensure that the thread is not already in the list of mutex waiters | 267 | // Ensure that the thread is not already in the list of mutex waiters |
| 281 | auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 268 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); |
| 282 | ASSERT(itr == wait_mutex_threads.end()); | 269 | ASSERT(iter == wait_mutex_threads.end()); |
| 283 | 270 | ||
| 271 | // Keep the list in an ordered fashion | ||
| 272 | const auto insertion_point = std::find_if( | ||
| 273 | wait_mutex_threads.begin(), wait_mutex_threads.end(), | ||
| 274 | [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); }); | ||
| 275 | wait_mutex_threads.insert(insertion_point, thread); | ||
| 284 | thread->lock_owner = this; | 276 | thread->lock_owner = this; |
| 285 | wait_mutex_threads.emplace_back(std::move(thread)); | 277 | |
| 286 | UpdatePriority(); | 278 | UpdatePriority(); |
| 287 | } | 279 | } |
| 288 | 280 | ||
| @@ -290,32 +282,44 @@ void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) { | |||
| 290 | ASSERT(thread->lock_owner == this); | 282 | ASSERT(thread->lock_owner == this); |
| 291 | 283 | ||
| 292 | // Ensure that the thread is in the list of mutex waiters | 284 | // Ensure that the thread is in the list of mutex waiters |
| 293 | auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 285 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); |
| 294 | ASSERT(itr != wait_mutex_threads.end()); | 286 | ASSERT(iter != wait_mutex_threads.end()); |
| 287 | |||
| 288 | wait_mutex_threads.erase(iter); | ||
| 295 | 289 | ||
| 296 | boost::remove_erase(wait_mutex_threads, thread); | ||
| 297 | thread->lock_owner = nullptr; | 290 | thread->lock_owner = nullptr; |
| 298 | UpdatePriority(); | 291 | UpdatePriority(); |
| 299 | } | 292 | } |
| 300 | 293 | ||
| 301 | void Thread::UpdatePriority() { | 294 | void Thread::UpdatePriority() { |
| 302 | // Find the highest priority among all the threads that are waiting for this thread's lock | 295 | // If any of the threads waiting on the mutex have a higher priority |
| 296 | // (taking into account priority inheritance), then this thread inherits | ||
| 297 | // that thread's priority. | ||
| 303 | u32 new_priority = nominal_priority; | 298 | u32 new_priority = nominal_priority; |
| 304 | for (const auto& thread : wait_mutex_threads) { | 299 | if (!wait_mutex_threads.empty()) { |
| 305 | if (thread->nominal_priority < new_priority) | 300 | if (wait_mutex_threads.front()->current_priority < new_priority) { |
| 306 | new_priority = thread->nominal_priority; | 301 | new_priority = wait_mutex_threads.front()->current_priority; |
| 302 | } | ||
| 307 | } | 303 | } |
| 308 | 304 | ||
| 309 | if (new_priority == current_priority) | 305 | if (new_priority == current_priority) { |
| 310 | return; | 306 | return; |
| 307 | } | ||
| 311 | 308 | ||
| 312 | scheduler->SetThreadPriority(this, new_priority); | 309 | scheduler->SetThreadPriority(this, new_priority); |
| 313 | |||
| 314 | current_priority = new_priority; | 310 | current_priority = new_priority; |
| 315 | 311 | ||
| 312 | if (!lock_owner) { | ||
| 313 | return; | ||
| 314 | } | ||
| 315 | |||
| 316 | // Ensure that the thread is within the correct location in the waiting list. | ||
| 317 | auto old_owner = lock_owner; | ||
| 318 | lock_owner->RemoveMutexWaiter(this); | ||
| 319 | old_owner->AddMutexWaiter(this); | ||
| 320 | |||
| 316 | // Recursively update the priority of the thread that depends on the priority of this one. | 321 | // Recursively update the priority of the thread that depends on the priority of this one. |
| 317 | if (lock_owner) | 322 | lock_owner->UpdatePriority(); |
| 318 | lock_owner->UpdatePriority(); | ||
| 319 | } | 323 | } |
| 320 | 324 | ||
| 321 | void Thread::ChangeCore(u32 core, u64 mask) { | 325 | void Thread::ChangeCore(u32 core, u64 mask) { |
| @@ -391,6 +395,14 @@ void Thread::SetActivity(ThreadActivity value) { | |||
| 391 | } | 395 | } |
| 392 | } | 396 | } |
| 393 | 397 | ||
| 398 | void Thread::Sleep(s64 nanoseconds) { | ||
| 399 | // Sleep current thread and check for next thread to schedule | ||
| 400 | SetStatus(ThreadStatus::WaitSleep); | ||
| 401 | |||
| 402 | // Create an event to wake the thread up after the specified nanosecond delay has passed | ||
| 403 | WakeAfterDelay(nanoseconds); | ||
| 404 | } | ||
| 405 | |||
| 394 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 406 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| 395 | 407 | ||
| 396 | /** | 408 | /** |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index c48b21aba..faad5f391 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -383,6 +383,9 @@ public: | |||
| 383 | 383 | ||
| 384 | void SetActivity(ThreadActivity value); | 384 | void SetActivity(ThreadActivity value); |
| 385 | 385 | ||
| 386 | /// Sleeps this thread for the given amount of nanoseconds. | ||
| 387 | void Sleep(s64 nanoseconds); | ||
| 388 | |||
| 386 | private: | 389 | private: |
| 387 | explicit Thread(KernelCore& kernel); | 390 | explicit Thread(KernelCore& kernel); |
| 388 | ~Thread() override; | 391 | ~Thread() override; |
| @@ -398,8 +401,14 @@ private: | |||
| 398 | VAddr entry_point = 0; | 401 | VAddr entry_point = 0; |
| 399 | VAddr stack_top = 0; | 402 | VAddr stack_top = 0; |
| 400 | 403 | ||
| 401 | u32 nominal_priority = 0; ///< Nominal thread priority, as set by the emulated application | 404 | /// Nominal thread priority, as set by the emulated application. |
| 402 | u32 current_priority = 0; ///< Current thread priority, can be temporarily changed | 405 | /// The nominal priority is the thread priority without priority |
| 406 | /// inheritance taken into account. | ||
| 407 | u32 nominal_priority = 0; | ||
| 408 | |||
| 409 | /// Current thread priority. This may change over the course of the | ||
| 410 | /// thread's lifetime in order to facilitate priority inheritance. | ||
| 411 | u32 current_priority = 0; | ||
| 403 | 412 | ||
| 404 | u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. | 413 | u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. |
| 405 | u64 last_running_ticks = 0; ///< CPU tick when thread was last running | 414 | u64 last_running_ticks = 0; ///< CPU tick when thread was last running |
| @@ -460,14 +469,4 @@ private: | |||
| 460 | */ | 469 | */ |
| 461 | Thread* GetCurrentThread(); | 470 | Thread* GetCurrentThread(); |
| 462 | 471 | ||
| 463 | /** | ||
| 464 | * Waits the current thread on a sleep | ||
| 465 | */ | ||
| 466 | void WaitCurrentThread_Sleep(); | ||
| 467 | |||
| 468 | /** | ||
| 469 | * Stops the current thread and removes it from the thread_list | ||
| 470 | */ | ||
| 471 | void ExitCurrentThread(); | ||
| 472 | |||
| 473 | } // namespace Kernel | 472 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 05c59af34..22bf55ce7 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -7,29 +7,29 @@ | |||
| 7 | #include <utility> | 7 | #include <utility> |
| 8 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "common/memory_hook.h" | ||
| 10 | #include "core/arm/arm_interface.h" | 11 | #include "core/arm/arm_interface.h" |
| 11 | #include "core/core.h" | 12 | #include "core/core.h" |
| 12 | #include "core/file_sys/program_metadata.h" | 13 | #include "core/file_sys/program_metadata.h" |
| 13 | #include "core/hle/kernel/errors.h" | 14 | #include "core/hle/kernel/errors.h" |
| 14 | #include "core/hle/kernel/vm_manager.h" | 15 | #include "core/hle/kernel/vm_manager.h" |
| 15 | #include "core/memory.h" | 16 | #include "core/memory.h" |
| 16 | #include "core/memory_hook.h" | ||
| 17 | #include "core/memory_setup.h" | 17 | #include "core/memory_setup.h" |
| 18 | 18 | ||
| 19 | namespace Kernel { | 19 | namespace Kernel { |
| 20 | namespace { | 20 | namespace { |
| 21 | const char* GetMemoryStateName(MemoryState state) { | 21 | const char* GetMemoryStateName(MemoryState state) { |
| 22 | static constexpr const char* names[] = { | 22 | static constexpr const char* names[] = { |
| 23 | "Unmapped", "Io", | 23 | "Unmapped", "Io", |
| 24 | "Normal", "CodeStatic", | 24 | "Normal", "Code", |
| 25 | "CodeMutable", "Heap", | 25 | "CodeData", "Heap", |
| 26 | "Shared", "Unknown1", | 26 | "Shared", "Unknown1", |
| 27 | "ModuleCodeStatic", "ModuleCodeMutable", | 27 | "ModuleCode", "ModuleCodeData", |
| 28 | "IpcBuffer0", "Stack", | 28 | "IpcBuffer0", "Stack", |
| 29 | "ThreadLocal", "TransferMemoryIsolated", | 29 | "ThreadLocal", "TransferMemoryIsolated", |
| 30 | "TransferMemory", "ProcessMemory", | 30 | "TransferMemory", "ProcessMemory", |
| 31 | "Inaccessible", "IpcBuffer1", | 31 | "Inaccessible", "IpcBuffer1", |
| 32 | "IpcBuffer3", "KernelStack", | 32 | "IpcBuffer3", "KernelStack", |
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | return names[ToSvcMemoryState(state)]; | 35 | return names[ToSvcMemoryState(state)]; |
| @@ -177,7 +177,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const { | |||
| 177 | 177 | ||
| 178 | ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, | 178 | ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, |
| 179 | MemoryState state, | 179 | MemoryState state, |
| 180 | Memory::MemoryHookPointer mmio_handler) { | 180 | Common::MemoryHookPointer mmio_handler) { |
| 181 | // This is the appropriately sized VMA that will turn into our allocation. | 181 | // This is the appropriately sized VMA that will turn into our allocation. |
| 182 | CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); | 182 | CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); |
| 183 | VirtualMemoryArea& final_vma = vma_handle->second; | 183 | VirtualMemoryArea& final_vma = vma_handle->second; |
| @@ -624,7 +624,7 @@ void VMManager::ClearPageTable() { | |||
| 624 | std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); | 624 | std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); |
| 625 | page_table.special_regions.clear(); | 625 | page_table.special_regions.clear(); |
| 626 | std::fill(page_table.attributes.begin(), page_table.attributes.end(), | 626 | std::fill(page_table.attributes.begin(), page_table.attributes.end(), |
| 627 | Memory::PageType::Unmapped); | 627 | Common::PageType::Unmapped); |
| 628 | } | 628 | } |
| 629 | 629 | ||
| 630 | VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, | 630 | VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, |
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 88e0b3c02..7cdff6094 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h | |||
| @@ -9,9 +9,10 @@ | |||
| 9 | #include <tuple> | 9 | #include <tuple> |
| 10 | #include <vector> | 10 | #include <vector> |
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/memory_hook.h" | ||
| 13 | #include "common/page_table.h" | ||
| 12 | #include "core/hle/result.h" | 14 | #include "core/hle/result.h" |
| 13 | #include "core/memory.h" | 15 | #include "core/memory.h" |
| 14 | #include "core/memory_hook.h" | ||
| 15 | 16 | ||
| 16 | namespace FileSys { | 17 | namespace FileSys { |
| 17 | enum class ProgramAddressSpaceType : u8; | 18 | enum class ProgramAddressSpaceType : u8; |
| @@ -164,12 +165,12 @@ enum class MemoryState : u32 { | |||
| 164 | Unmapped = 0x00, | 165 | Unmapped = 0x00, |
| 165 | Io = 0x01 | FlagMapped, | 166 | Io = 0x01 | FlagMapped, |
| 166 | Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed, | 167 | Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed, |
| 167 | CodeStatic = 0x03 | CodeFlags | FlagMapProcess, | 168 | Code = 0x03 | CodeFlags | FlagMapProcess, |
| 168 | CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory, | 169 | CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory, |
| 169 | Heap = 0x05 | DataFlags | FlagCodeMemory, | 170 | Heap = 0x05 | DataFlags | FlagCodeMemory, |
| 170 | Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated, | 171 | Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated, |
| 171 | ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess, | 172 | ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess, |
| 172 | ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory, | 173 | ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory, |
| 173 | 174 | ||
| 174 | IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated | | 175 | IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated | |
| 175 | IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned, | 176 | IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned, |
| @@ -290,7 +291,7 @@ struct VirtualMemoryArea { | |||
| 290 | // Settings for type = MMIO | 291 | // Settings for type = MMIO |
| 291 | /// Physical address of the register area this VMA maps to. | 292 | /// Physical address of the register area this VMA maps to. |
| 292 | PAddr paddr = 0; | 293 | PAddr paddr = 0; |
| 293 | Memory::MemoryHookPointer mmio_handler = nullptr; | 294 | Common::MemoryHookPointer mmio_handler = nullptr; |
| 294 | 295 | ||
| 295 | /// Tests if this area can be merged to the right with `next`. | 296 | /// Tests if this area can be merged to the right with `next`. |
| 296 | bool CanBeMergedWith(const VirtualMemoryArea& next) const; | 297 | bool CanBeMergedWith(const VirtualMemoryArea& next) const; |
| @@ -368,7 +369,7 @@ public: | |||
| 368 | * @param mmio_handler The handler that will implement read and write for this MMIO region. | 369 | * @param mmio_handler The handler that will implement read and write for this MMIO region. |
| 369 | */ | 370 | */ |
| 370 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, | 371 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, |
| 371 | Memory::MemoryHookPointer mmio_handler); | 372 | Common::MemoryHookPointer mmio_handler); |
| 372 | 373 | ||
| 373 | /// Unmaps a range of addresses, splitting VMAs as necessary. | 374 | /// Unmaps a range of addresses, splitting VMAs as necessary. |
| 374 | ResultCode UnmapRange(VAddr target, u64 size); | 375 | ResultCode UnmapRange(VAddr target, u64 size); |
| @@ -509,7 +510,7 @@ public: | |||
| 509 | 510 | ||
| 510 | /// Each VMManager has its own page table, which is set as the main one when the owning process | 511 | /// Each VMManager has its own page table, which is set as the main one when the owning process |
| 511 | /// is scheduled. | 512 | /// is scheduled. |
| 512 | Memory::PageTable page_table; | 513 | Common::PageTable page_table{Memory::PAGE_BITS}; |
| 513 | 514 | ||
| 514 | private: | 515 | private: |
| 515 | using VMAIter = VMAMap::iterator; | 516 | using VMAIter = VMAMap::iterator; |
| @@ -616,6 +617,9 @@ private: | |||
| 616 | VAddr new_map_region_base = 0; | 617 | VAddr new_map_region_base = 0; |
| 617 | VAddr new_map_region_end = 0; | 618 | VAddr new_map_region_end = 0; |
| 618 | 619 | ||
| 620 | VAddr main_code_region_base = 0; | ||
| 621 | VAddr main_code_region_end = 0; | ||
| 622 | |||
| 619 | VAddr tls_io_region_base = 0; | 623 | VAddr tls_io_region_base = 0; |
| 620 | VAddr tls_io_region_end = 0; | 624 | VAddr tls_io_region_end = 0; |
| 621 | 625 | ||
diff --git a/src/core/hle/result.h b/src/core/hle/result.h index 1ed144481..ab84f5ddc 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h | |||
| @@ -13,14 +13,6 @@ | |||
| 13 | // All the constants in this file come from http://switchbrew.org/index.php?title=Error_codes | 13 | // All the constants in this file come from http://switchbrew.org/index.php?title=Error_codes |
| 14 | 14 | ||
| 15 | /** | 15 | /** |
| 16 | * Detailed description of the error. Code 0 always means success. | ||
| 17 | */ | ||
| 18 | enum class ErrorDescription : u32 { | ||
| 19 | Success = 0, | ||
| 20 | RemoteProcessDead = 301, | ||
| 21 | }; | ||
| 22 | |||
| 23 | /** | ||
| 24 | * Identifies the module which caused the error. Error codes can be propagated through a call | 16 | * Identifies the module which caused the error. Error codes can be propagated through a call |
| 25 | * chain, meaning that this doesn't always correspond to the module where the API call made is | 17 | * chain, meaning that this doesn't always correspond to the module where the API call made is |
| 26 | * contained. | 18 | * contained. |
| @@ -120,7 +112,7 @@ enum class ErrorModule : u32 { | |||
| 120 | ShopN = 811, | 112 | ShopN = 811, |
| 121 | }; | 113 | }; |
| 122 | 114 | ||
| 123 | /// Encapsulates a CTR-OS error code, allowing it to be separated into its constituent fields. | 115 | /// Encapsulates a Horizon OS error code, allowing it to be separated into its constituent fields. |
| 124 | union ResultCode { | 116 | union ResultCode { |
| 125 | u32 raw; | 117 | u32 raw; |
| 126 | 118 | ||
| @@ -133,17 +125,9 @@ union ResultCode { | |||
| 133 | 125 | ||
| 134 | constexpr explicit ResultCode(u32 raw) : raw(raw) {} | 126 | constexpr explicit ResultCode(u32 raw) : raw(raw) {} |
| 135 | 127 | ||
| 136 | constexpr ResultCode(ErrorModule module, ErrorDescription description) | ||
| 137 | : ResultCode(module, static_cast<u32>(description)) {} | ||
| 138 | |||
| 139 | constexpr ResultCode(ErrorModule module_, u32 description_) | 128 | constexpr ResultCode(ErrorModule module_, u32 description_) |
| 140 | : raw(module.FormatValue(module_) | description.FormatValue(description_)) {} | 129 | : raw(module.FormatValue(module_) | description.FormatValue(description_)) {} |
| 141 | 130 | ||
| 142 | constexpr ResultCode& operator=(const ResultCode& o) { | ||
| 143 | raw = o.raw; | ||
| 144 | return *this; | ||
| 145 | } | ||
| 146 | |||
| 147 | constexpr bool IsSuccess() const { | 131 | constexpr bool IsSuccess() const { |
| 148 | return raw == 0; | 132 | return raw == 0; |
| 149 | } | 133 | } |
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index 3f009d2b7..9c44e27c6 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp | |||
| @@ -2,10 +2,10 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | ||
| 5 | #include <array> | 6 | #include <array> |
| 6 | #include <cinttypes> | 7 | #include <cinttypes> |
| 7 | #include <cstring> | 8 | #include <cstring> |
| 8 | #include <stack> | ||
| 9 | #include "audio_core/audio_renderer.h" | 9 | #include "audio_core/audio_renderer.h" |
| 10 | #include "core/core.h" | 10 | #include "core/core.h" |
| 11 | #include "core/file_sys/savedata_factory.h" | 11 | #include "core/file_sys/savedata_factory.h" |
| @@ -93,38 +93,84 @@ void IWindowController::AcquireForegroundRights(Kernel::HLERequestContext& ctx) | |||
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | IAudioController::IAudioController() : ServiceFramework("IAudioController") { | 95 | IAudioController::IAudioController() : ServiceFramework("IAudioController") { |
| 96 | // clang-format off | ||
| 96 | static const FunctionInfo functions[] = { | 97 | static const FunctionInfo functions[] = { |
| 97 | {0, &IAudioController::SetExpectedMasterVolume, "SetExpectedMasterVolume"}, | 98 | {0, &IAudioController::SetExpectedMasterVolume, "SetExpectedMasterVolume"}, |
| 98 | {1, &IAudioController::GetMainAppletExpectedMasterVolume, | 99 | {1, &IAudioController::GetMainAppletExpectedMasterVolume, "GetMainAppletExpectedMasterVolume"}, |
| 99 | "GetMainAppletExpectedMasterVolume"}, | 100 | {2, &IAudioController::GetLibraryAppletExpectedMasterVolume, "GetLibraryAppletExpectedMasterVolume"}, |
| 100 | {2, &IAudioController::GetLibraryAppletExpectedMasterVolume, | 101 | {3, &IAudioController::ChangeMainAppletMasterVolume, "ChangeMainAppletMasterVolume"}, |
| 101 | "GetLibraryAppletExpectedMasterVolume"}, | 102 | {4, &IAudioController::SetTransparentAudioRate, "SetTransparentVolumeRate"}, |
| 102 | {3, nullptr, "ChangeMainAppletMasterVolume"}, | ||
| 103 | {4, nullptr, "SetTransparentVolumeRate"}, | ||
| 104 | }; | 103 | }; |
| 104 | // clang-format on | ||
| 105 | |||
| 105 | RegisterHandlers(functions); | 106 | RegisterHandlers(functions); |
| 106 | } | 107 | } |
| 107 | 108 | ||
| 108 | IAudioController::~IAudioController() = default; | 109 | IAudioController::~IAudioController() = default; |
| 109 | 110 | ||
| 110 | void IAudioController::SetExpectedMasterVolume(Kernel::HLERequestContext& ctx) { | 111 | void IAudioController::SetExpectedMasterVolume(Kernel::HLERequestContext& ctx) { |
| 111 | LOG_WARNING(Service_AM, "(STUBBED) called"); | 112 | IPC::RequestParser rp{ctx}; |
| 113 | const float main_applet_volume_tmp = rp.Pop<float>(); | ||
| 114 | const float library_applet_volume_tmp = rp.Pop<float>(); | ||
| 115 | |||
| 116 | LOG_DEBUG(Service_AM, "called. main_applet_volume={}, library_applet_volume={}", | ||
| 117 | main_applet_volume_tmp, library_applet_volume_tmp); | ||
| 118 | |||
| 119 | // Ensure the volume values remain within the 0-100% range | ||
| 120 | main_applet_volume = std::clamp(main_applet_volume_tmp, min_allowed_volume, max_allowed_volume); | ||
| 121 | library_applet_volume = | ||
| 122 | std::clamp(library_applet_volume_tmp, min_allowed_volume, max_allowed_volume); | ||
| 123 | |||
| 112 | IPC::ResponseBuilder rb{ctx, 2}; | 124 | IPC::ResponseBuilder rb{ctx, 2}; |
| 113 | rb.Push(RESULT_SUCCESS); | 125 | rb.Push(RESULT_SUCCESS); |
| 114 | } | 126 | } |
| 115 | 127 | ||
| 116 | void IAudioController::GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { | 128 | void IAudioController::GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { |
| 117 | LOG_WARNING(Service_AM, "(STUBBED) called"); | 129 | LOG_DEBUG(Service_AM, "called. main_applet_volume={}", main_applet_volume); |
| 118 | IPC::ResponseBuilder rb{ctx, 3}; | 130 | IPC::ResponseBuilder rb{ctx, 3}; |
| 119 | rb.Push(RESULT_SUCCESS); | 131 | rb.Push(RESULT_SUCCESS); |
| 120 | rb.Push(volume); | 132 | rb.Push(main_applet_volume); |
| 121 | } | 133 | } |
| 122 | 134 | ||
| 123 | void IAudioController::GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { | 135 | void IAudioController::GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { |
| 124 | LOG_WARNING(Service_AM, "(STUBBED) called"); | 136 | LOG_DEBUG(Service_AM, "called. library_applet_volume={}", library_applet_volume); |
| 125 | IPC::ResponseBuilder rb{ctx, 3}; | 137 | IPC::ResponseBuilder rb{ctx, 3}; |
| 126 | rb.Push(RESULT_SUCCESS); | 138 | rb.Push(RESULT_SUCCESS); |
| 127 | rb.Push(volume); | 139 | rb.Push(library_applet_volume); |
| 140 | } | ||
| 141 | |||
| 142 | void IAudioController::ChangeMainAppletMasterVolume(Kernel::HLERequestContext& ctx) { | ||
| 143 | struct Parameters { | ||
| 144 | float volume; | ||
| 145 | s64 fade_time_ns; | ||
| 146 | }; | ||
| 147 | static_assert(sizeof(Parameters) == 16); | ||
| 148 | |||
| 149 | IPC::RequestParser rp{ctx}; | ||
| 150 | const auto parameters = rp.PopRaw<Parameters>(); | ||
| 151 | |||
| 152 | LOG_DEBUG(Service_AM, "called. volume={}, fade_time_ns={}", parameters.volume, | ||
| 153 | parameters.fade_time_ns); | ||
| 154 | |||
| 155 | main_applet_volume = std::clamp(parameters.volume, min_allowed_volume, max_allowed_volume); | ||
| 156 | fade_time_ns = std::chrono::nanoseconds{parameters.fade_time_ns}; | ||
| 157 | |||
| 158 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 159 | rb.Push(RESULT_SUCCESS); | ||
| 160 | } | ||
| 161 | |||
| 162 | void IAudioController::SetTransparentAudioRate(Kernel::HLERequestContext& ctx) { | ||
| 163 | IPC::RequestParser rp{ctx}; | ||
| 164 | const float transparent_volume_rate_tmp = rp.Pop<float>(); | ||
| 165 | |||
| 166 | LOG_DEBUG(Service_AM, "called. transparent_volume_rate={}", transparent_volume_rate_tmp); | ||
| 167 | |||
| 168 | // Clamp volume range to 0-100%. | ||
| 169 | transparent_volume_rate = | ||
| 170 | std::clamp(transparent_volume_rate_tmp, min_allowed_volume, max_allowed_volume); | ||
| 171 | |||
| 172 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 173 | rb.Push(RESULT_SUCCESS); | ||
| 128 | } | 174 | } |
| 129 | 175 | ||
| 130 | IDisplayController::IDisplayController() : ServiceFramework("IDisplayController") { | 176 | IDisplayController::IDisplayController() : ServiceFramework("IDisplayController") { |
| @@ -169,7 +215,21 @@ IDisplayController::IDisplayController() : ServiceFramework("IDisplayController" | |||
| 169 | 215 | ||
| 170 | IDisplayController::~IDisplayController() = default; | 216 | IDisplayController::~IDisplayController() = default; |
| 171 | 217 | ||
| 172 | IDebugFunctions::IDebugFunctions() : ServiceFramework("IDebugFunctions") {} | 218 | IDebugFunctions::IDebugFunctions() : ServiceFramework{"IDebugFunctions"} { |
| 219 | // clang-format off | ||
| 220 | static const FunctionInfo functions[] = { | ||
| 221 | {0, nullptr, "NotifyMessageToHomeMenuForDebug"}, | ||
| 222 | {1, nullptr, "OpenMainApplication"}, | ||
| 223 | {10, nullptr, "EmulateButtonEvent"}, | ||
| 224 | {20, nullptr, "InvalidateTransitionLayer"}, | ||
| 225 | {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"}, | ||
| 226 | {40, nullptr, "GetAppletResourceUsageInfo"}, | ||
| 227 | }; | ||
| 228 | // clang-format on | ||
| 229 | |||
| 230 | RegisterHandlers(functions); | ||
| 231 | } | ||
| 232 | |||
| 173 | IDebugFunctions::~IDebugFunctions() = default; | 233 | IDebugFunctions::~IDebugFunctions() = default; |
| 174 | 234 | ||
| 175 | ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger) | 235 | ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger) |
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h index b6113cfdd..565dd8e9e 100644 --- a/src/core/hle/service/am/am.h +++ b/src/core/hle/service/am/am.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <chrono> | ||
| 7 | #include <memory> | 8 | #include <memory> |
| 8 | #include <queue> | 9 | #include <queue> |
| 9 | #include "core/hle/kernel/writable_event.h" | 10 | #include "core/hle/kernel/writable_event.h" |
| @@ -81,8 +82,21 @@ private: | |||
| 81 | void SetExpectedMasterVolume(Kernel::HLERequestContext& ctx); | 82 | void SetExpectedMasterVolume(Kernel::HLERequestContext& ctx); |
| 82 | void GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); | 83 | void GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); |
| 83 | void GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); | 84 | void GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); |
| 85 | void ChangeMainAppletMasterVolume(Kernel::HLERequestContext& ctx); | ||
| 86 | void SetTransparentAudioRate(Kernel::HLERequestContext& ctx); | ||
| 84 | 87 | ||
| 85 | u32 volume{100}; | 88 | static constexpr float min_allowed_volume = 0.0f; |
| 89 | static constexpr float max_allowed_volume = 1.0f; | ||
| 90 | |||
| 91 | float main_applet_volume{0.25f}; | ||
| 92 | float library_applet_volume{max_allowed_volume}; | ||
| 93 | float transparent_volume_rate{min_allowed_volume}; | ||
| 94 | |||
| 95 | // Volume transition fade time in nanoseconds. | ||
| 96 | // e.g. If the main applet volume was 0% and was changed to 50% | ||
| 97 | // with a fade of 50ns, then over the course of 50ns, | ||
| 98 | // the volume will gradually fade up to 50% | ||
| 99 | std::chrono::nanoseconds fade_time_ns{0}; | ||
| 86 | }; | 100 | }; |
| 87 | 101 | ||
| 88 | class IDisplayController final : public ServiceFramework<IDisplayController> { | 102 | class IDisplayController final : public ServiceFramework<IDisplayController> { |
diff --git a/src/core/hle/service/audio/hwopus.cpp b/src/core/hle/service/audio/hwopus.cpp index 11eba4a12..cb4a1160d 100644 --- a/src/core/hle/service/audio/hwopus.cpp +++ b/src/core/hle/service/audio/hwopus.cpp | |||
| @@ -8,44 +8,34 @@ | |||
| 8 | #include <vector> | 8 | #include <vector> |
| 9 | 9 | ||
| 10 | #include <opus.h> | 10 | #include <opus.h> |
| 11 | #include <opus_multistream.h> | ||
| 11 | 12 | ||
| 12 | #include "common/common_funcs.h" | 13 | #include "common/assert.h" |
| 13 | #include "common/logging/log.h" | 14 | #include "common/logging/log.h" |
| 14 | #include "core/hle/ipc_helpers.h" | 15 | #include "core/hle/ipc_helpers.h" |
| 15 | #include "core/hle/kernel/hle_ipc.h" | 16 | #include "core/hle/kernel/hle_ipc.h" |
| 16 | #include "core/hle/service/audio/hwopus.h" | 17 | #include "core/hle/service/audio/hwopus.h" |
| 17 | 18 | ||
| 18 | namespace Service::Audio { | 19 | namespace Service::Audio { |
| 19 | 20 | namespace { | |
| 20 | struct OpusDeleter { | 21 | struct OpusDeleter { |
| 21 | void operator()(void* ptr) const { | 22 | void operator()(OpusMSDecoder* ptr) const { |
| 22 | operator delete(ptr); | 23 | opus_multistream_decoder_destroy(ptr); |
| 23 | } | 24 | } |
| 24 | }; | 25 | }; |
| 25 | 26 | ||
| 26 | class IHardwareOpusDecoderManager final : public ServiceFramework<IHardwareOpusDecoderManager> { | 27 | using OpusDecoderPtr = std::unique_ptr<OpusMSDecoder, OpusDeleter>; |
| 27 | public: | ||
| 28 | IHardwareOpusDecoderManager(std::unique_ptr<OpusDecoder, OpusDeleter> decoder, u32 sample_rate, | ||
| 29 | u32 channel_count) | ||
| 30 | : ServiceFramework("IHardwareOpusDecoderManager"), decoder(std::move(decoder)), | ||
| 31 | sample_rate(sample_rate), channel_count(channel_count) { | ||
| 32 | // clang-format off | ||
| 33 | static const FunctionInfo functions[] = { | ||
| 34 | {0, &IHardwareOpusDecoderManager::DecodeInterleavedOld, "DecodeInterleavedOld"}, | ||
| 35 | {1, nullptr, "SetContext"}, | ||
| 36 | {2, nullptr, "DecodeInterleavedForMultiStreamOld"}, | ||
| 37 | {3, nullptr, "SetContextForMultiStream"}, | ||
| 38 | {4, &IHardwareOpusDecoderManager::DecodeInterleavedWithPerfOld, "DecodeInterleavedWithPerfOld"}, | ||
| 39 | {5, nullptr, "DecodeInterleavedForMultiStreamWithPerfOld"}, | ||
| 40 | {6, &IHardwareOpusDecoderManager::DecodeInterleaved, "DecodeInterleaved"}, | ||
| 41 | {7, nullptr, "DecodeInterleavedForMultiStream"}, | ||
| 42 | }; | ||
| 43 | // clang-format on | ||
| 44 | 28 | ||
| 45 | RegisterHandlers(functions); | 29 | struct OpusPacketHeader { |
| 46 | } | 30 | // Packet size in bytes. |
| 31 | u32_be size; | ||
| 32 | // Indicates the final range of the codec's entropy coder. | ||
| 33 | u32_be final_range; | ||
| 34 | }; | ||
| 35 | static_assert(sizeof(OpusPacketHeader) == 0x8, "OpusHeader is an invalid size"); | ||
| 47 | 36 | ||
| 48 | private: | 37 | class OpusDecoderState { |
| 38 | public: | ||
| 49 | /// Describes extra behavior that may be asked of the decoding context. | 39 | /// Describes extra behavior that may be asked of the decoding context. |
| 50 | enum class ExtraBehavior { | 40 | enum class ExtraBehavior { |
| 51 | /// No extra behavior. | 41 | /// No extra behavior. |
| @@ -55,30 +45,27 @@ private: | |||
| 55 | ResetContext, | 45 | ResetContext, |
| 56 | }; | 46 | }; |
| 57 | 47 | ||
| 58 | void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) { | 48 | enum class PerfTime { |
| 59 | LOG_DEBUG(Audio, "called"); | 49 | Disabled, |
| 60 | 50 | Enabled, | |
| 61 | DecodeInterleavedHelper(ctx, nullptr, ExtraBehavior::None); | 51 | }; |
| 62 | } | ||
| 63 | |||
| 64 | void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) { | ||
| 65 | LOG_DEBUG(Audio, "called"); | ||
| 66 | |||
| 67 | u64 performance = 0; | ||
| 68 | DecodeInterleavedHelper(ctx, &performance, ExtraBehavior::None); | ||
| 69 | } | ||
| 70 | |||
| 71 | void DecodeInterleaved(Kernel::HLERequestContext& ctx) { | ||
| 72 | LOG_DEBUG(Audio, "called"); | ||
| 73 | |||
| 74 | IPC::RequestParser rp{ctx}; | ||
| 75 | const auto extra_behavior = | ||
| 76 | rp.Pop<bool>() ? ExtraBehavior::ResetContext : ExtraBehavior::None; | ||
| 77 | 52 | ||
| 78 | u64 performance = 0; | 53 | explicit OpusDecoderState(OpusDecoderPtr decoder, u32 sample_rate, u32 channel_count) |
| 79 | DecodeInterleavedHelper(ctx, &performance, extra_behavior); | 54 | : decoder{std::move(decoder)}, sample_rate{sample_rate}, channel_count{channel_count} {} |
| 55 | |||
| 56 | // Decodes interleaved Opus packets. Optionally allows reporting time taken to | ||
| 57 | // perform the decoding, as well as any relevant extra behavior. | ||
| 58 | void DecodeInterleaved(Kernel::HLERequestContext& ctx, PerfTime perf_time, | ||
| 59 | ExtraBehavior extra_behavior) { | ||
| 60 | if (perf_time == PerfTime::Disabled) { | ||
| 61 | DecodeInterleavedHelper(ctx, nullptr, extra_behavior); | ||
| 62 | } else { | ||
| 63 | u64 performance = 0; | ||
| 64 | DecodeInterleavedHelper(ctx, &performance, extra_behavior); | ||
| 65 | } | ||
| 80 | } | 66 | } |
| 81 | 67 | ||
| 68 | private: | ||
| 82 | void DecodeInterleavedHelper(Kernel::HLERequestContext& ctx, u64* performance, | 69 | void DecodeInterleavedHelper(Kernel::HLERequestContext& ctx, u64* performance, |
| 83 | ExtraBehavior extra_behavior) { | 70 | ExtraBehavior extra_behavior) { |
| 84 | u32 consumed = 0; | 71 | u32 consumed = 0; |
| @@ -89,8 +76,7 @@ private: | |||
| 89 | ResetDecoderContext(); | 76 | ResetDecoderContext(); |
| 90 | } | 77 | } |
| 91 | 78 | ||
| 92 | if (!Decoder_DecodeInterleaved(consumed, sample_count, ctx.ReadBuffer(), samples, | 79 | if (!DecodeOpusData(consumed, sample_count, ctx.ReadBuffer(), samples, performance)) { |
| 93 | performance)) { | ||
| 94 | LOG_ERROR(Audio, "Failed to decode opus data"); | 80 | LOG_ERROR(Audio, "Failed to decode opus data"); |
| 95 | IPC::ResponseBuilder rb{ctx, 2}; | 81 | IPC::ResponseBuilder rb{ctx, 2}; |
| 96 | // TODO(ogniK): Use correct error code | 82 | // TODO(ogniK): Use correct error code |
| @@ -109,27 +95,27 @@ private: | |||
| 109 | ctx.WriteBuffer(samples.data(), samples.size() * sizeof(s16)); | 95 | ctx.WriteBuffer(samples.data(), samples.size() * sizeof(s16)); |
| 110 | } | 96 | } |
| 111 | 97 | ||
| 112 | bool Decoder_DecodeInterleaved(u32& consumed, u32& sample_count, const std::vector<u8>& input, | 98 | bool DecodeOpusData(u32& consumed, u32& sample_count, const std::vector<u8>& input, |
| 113 | std::vector<opus_int16>& output, u64* out_performance_time) { | 99 | std::vector<opus_int16>& output, u64* out_performance_time) const { |
| 114 | const auto start_time = std::chrono::high_resolution_clock::now(); | 100 | const auto start_time = std::chrono::high_resolution_clock::now(); |
| 115 | const std::size_t raw_output_sz = output.size() * sizeof(opus_int16); | 101 | const std::size_t raw_output_sz = output.size() * sizeof(opus_int16); |
| 116 | if (sizeof(OpusHeader) > input.size()) { | 102 | if (sizeof(OpusPacketHeader) > input.size()) { |
| 117 | LOG_ERROR(Audio, "Input is smaller than the header size, header_sz={}, input_sz={}", | 103 | LOG_ERROR(Audio, "Input is smaller than the header size, header_sz={}, input_sz={}", |
| 118 | sizeof(OpusHeader), input.size()); | 104 | sizeof(OpusPacketHeader), input.size()); |
| 119 | return false; | 105 | return false; |
| 120 | } | 106 | } |
| 121 | 107 | ||
| 122 | OpusHeader hdr{}; | 108 | OpusPacketHeader hdr{}; |
| 123 | std::memcpy(&hdr, input.data(), sizeof(OpusHeader)); | 109 | std::memcpy(&hdr, input.data(), sizeof(OpusPacketHeader)); |
| 124 | if (sizeof(OpusHeader) + static_cast<u32>(hdr.sz) > input.size()) { | 110 | if (sizeof(OpusPacketHeader) + static_cast<u32>(hdr.size) > input.size()) { |
| 125 | LOG_ERROR(Audio, "Input does not fit in the opus header size. data_sz={}, input_sz={}", | 111 | LOG_ERROR(Audio, "Input does not fit in the opus header size. data_sz={}, input_sz={}", |
| 126 | sizeof(OpusHeader) + static_cast<u32>(hdr.sz), input.size()); | 112 | sizeof(OpusPacketHeader) + static_cast<u32>(hdr.size), input.size()); |
| 127 | return false; | 113 | return false; |
| 128 | } | 114 | } |
| 129 | 115 | ||
| 130 | const auto frame = input.data() + sizeof(OpusHeader); | 116 | const auto frame = input.data() + sizeof(OpusPacketHeader); |
| 131 | const auto decoded_sample_count = opus_packet_get_nb_samples( | 117 | const auto decoded_sample_count = opus_packet_get_nb_samples( |
| 132 | frame, static_cast<opus_int32>(input.size() - sizeof(OpusHeader)), | 118 | frame, static_cast<opus_int32>(input.size() - sizeof(OpusPacketHeader)), |
| 133 | static_cast<opus_int32>(sample_rate)); | 119 | static_cast<opus_int32>(sample_rate)); |
| 134 | if (decoded_sample_count * channel_count * sizeof(u16) > raw_output_sz) { | 120 | if (decoded_sample_count * channel_count * sizeof(u16) > raw_output_sz) { |
| 135 | LOG_ERROR( | 121 | LOG_ERROR( |
| @@ -141,18 +127,18 @@ private: | |||
| 141 | 127 | ||
| 142 | const int frame_size = (static_cast<int>(raw_output_sz / sizeof(s16) / channel_count)); | 128 | const int frame_size = (static_cast<int>(raw_output_sz / sizeof(s16) / channel_count)); |
| 143 | const auto out_sample_count = | 129 | const auto out_sample_count = |
| 144 | opus_decode(decoder.get(), frame, hdr.sz, output.data(), frame_size, 0); | 130 | opus_multistream_decode(decoder.get(), frame, hdr.size, output.data(), frame_size, 0); |
| 145 | if (out_sample_count < 0) { | 131 | if (out_sample_count < 0) { |
| 146 | LOG_ERROR(Audio, | 132 | LOG_ERROR(Audio, |
| 147 | "Incorrect sample count received from opus_decode, " | 133 | "Incorrect sample count received from opus_decode, " |
| 148 | "output_sample_count={}, frame_size={}, data_sz_from_hdr={}", | 134 | "output_sample_count={}, frame_size={}, data_sz_from_hdr={}", |
| 149 | out_sample_count, frame_size, static_cast<u32>(hdr.sz)); | 135 | out_sample_count, frame_size, static_cast<u32>(hdr.size)); |
| 150 | return false; | 136 | return false; |
| 151 | } | 137 | } |
| 152 | 138 | ||
| 153 | const auto end_time = std::chrono::high_resolution_clock::now() - start_time; | 139 | const auto end_time = std::chrono::high_resolution_clock::now() - start_time; |
| 154 | sample_count = out_sample_count; | 140 | sample_count = out_sample_count; |
| 155 | consumed = static_cast<u32>(sizeof(OpusHeader) + hdr.sz); | 141 | consumed = static_cast<u32>(sizeof(OpusPacketHeader) + hdr.size); |
| 156 | if (out_performance_time != nullptr) { | 142 | if (out_performance_time != nullptr) { |
| 157 | *out_performance_time = | 143 | *out_performance_time = |
| 158 | std::chrono::duration_cast<std::chrono::milliseconds>(end_time).count(); | 144 | std::chrono::duration_cast<std::chrono::milliseconds>(end_time).count(); |
| @@ -164,25 +150,86 @@ private: | |||
| 164 | void ResetDecoderContext() { | 150 | void ResetDecoderContext() { |
| 165 | ASSERT(decoder != nullptr); | 151 | ASSERT(decoder != nullptr); |
| 166 | 152 | ||
| 167 | opus_decoder_ctl(decoder.get(), OPUS_RESET_STATE); | 153 | opus_multistream_decoder_ctl(decoder.get(), OPUS_RESET_STATE); |
| 168 | } | 154 | } |
| 169 | 155 | ||
| 170 | struct OpusHeader { | 156 | OpusDecoderPtr decoder; |
| 171 | u32_be sz; // Needs to be BE for some odd reason | ||
| 172 | INSERT_PADDING_WORDS(1); | ||
| 173 | }; | ||
| 174 | static_assert(sizeof(OpusHeader) == 0x8, "OpusHeader is an invalid size"); | ||
| 175 | |||
| 176 | std::unique_ptr<OpusDecoder, OpusDeleter> decoder; | ||
| 177 | u32 sample_rate; | 157 | u32 sample_rate; |
| 178 | u32 channel_count; | 158 | u32 channel_count; |
| 179 | }; | 159 | }; |
| 180 | 160 | ||
| 181 | static std::size_t WorkerBufferSize(u32 channel_count) { | 161 | class IHardwareOpusDecoderManager final : public ServiceFramework<IHardwareOpusDecoderManager> { |
| 162 | public: | ||
| 163 | explicit IHardwareOpusDecoderManager(OpusDecoderState decoder_state) | ||
| 164 | : ServiceFramework("IHardwareOpusDecoderManager"), decoder_state{std::move(decoder_state)} { | ||
| 165 | // clang-format off | ||
| 166 | static const FunctionInfo functions[] = { | ||
| 167 | {0, &IHardwareOpusDecoderManager::DecodeInterleavedOld, "DecodeInterleavedOld"}, | ||
| 168 | {1, nullptr, "SetContext"}, | ||
| 169 | {2, nullptr, "DecodeInterleavedForMultiStreamOld"}, | ||
| 170 | {3, nullptr, "SetContextForMultiStream"}, | ||
| 171 | {4, &IHardwareOpusDecoderManager::DecodeInterleavedWithPerfOld, "DecodeInterleavedWithPerfOld"}, | ||
| 172 | {5, nullptr, "DecodeInterleavedForMultiStreamWithPerfOld"}, | ||
| 173 | {6, &IHardwareOpusDecoderManager::DecodeInterleaved, "DecodeInterleaved"}, | ||
| 174 | {7, nullptr, "DecodeInterleavedForMultiStream"}, | ||
| 175 | }; | ||
| 176 | // clang-format on | ||
| 177 | |||
| 178 | RegisterHandlers(functions); | ||
| 179 | } | ||
| 180 | |||
| 181 | private: | ||
| 182 | void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) { | ||
| 183 | LOG_DEBUG(Audio, "called"); | ||
| 184 | |||
| 185 | decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Disabled, | ||
| 186 | OpusDecoderState::ExtraBehavior::None); | ||
| 187 | } | ||
| 188 | |||
| 189 | void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) { | ||
| 190 | LOG_DEBUG(Audio, "called"); | ||
| 191 | |||
| 192 | decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Enabled, | ||
| 193 | OpusDecoderState::ExtraBehavior::None); | ||
| 194 | } | ||
| 195 | |||
| 196 | void DecodeInterleaved(Kernel::HLERequestContext& ctx) { | ||
| 197 | LOG_DEBUG(Audio, "called"); | ||
| 198 | |||
| 199 | IPC::RequestParser rp{ctx}; | ||
| 200 | const auto extra_behavior = rp.Pop<bool>() ? OpusDecoderState::ExtraBehavior::ResetContext | ||
| 201 | : OpusDecoderState::ExtraBehavior::None; | ||
| 202 | |||
| 203 | decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Enabled, extra_behavior); | ||
| 204 | } | ||
| 205 | |||
| 206 | OpusDecoderState decoder_state; | ||
| 207 | }; | ||
| 208 | |||
| 209 | std::size_t WorkerBufferSize(u32 channel_count) { | ||
| 182 | ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); | 210 | ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); |
| 183 | return opus_decoder_get_size(static_cast<int>(channel_count)); | 211 | constexpr int num_streams = 1; |
| 212 | const int num_stereo_streams = channel_count == 2 ? 1 : 0; | ||
| 213 | return opus_multistream_decoder_get_size(num_streams, num_stereo_streams); | ||
| 184 | } | 214 | } |
| 185 | 215 | ||
| 216 | // Creates the mapping table that maps the input channels to the particular | ||
| 217 | // output channels. In the stereo case, we map the left and right input channels | ||
| 218 | // to the left and right output channels respectively. | ||
| 219 | // | ||
| 220 | // However, in the monophonic case, we only map the one available channel | ||
| 221 | // to the sole output channel. We specify 255 for the would-be right channel | ||
| 222 | // as this is a special value defined by Opus to indicate to the decoder to | ||
| 223 | // ignore that channel. | ||
| 224 | std::array<u8, 2> CreateMappingTable(u32 channel_count) { | ||
| 225 | if (channel_count == 2) { | ||
| 226 | return {{0, 1}}; | ||
| 227 | } | ||
| 228 | |||
| 229 | return {{0, 255}}; | ||
| 230 | } | ||
| 231 | } // Anonymous namespace | ||
| 232 | |||
| 186 | void HwOpus::GetWorkBufferSize(Kernel::HLERequestContext& ctx) { | 233 | void HwOpus::GetWorkBufferSize(Kernel::HLERequestContext& ctx) { |
| 187 | IPC::RequestParser rp{ctx}; | 234 | IPC::RequestParser rp{ctx}; |
| 188 | const auto sample_rate = rp.Pop<u32>(); | 235 | const auto sample_rate = rp.Pop<u32>(); |
| @@ -220,10 +267,15 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { | |||
| 220 | const std::size_t worker_sz = WorkerBufferSize(channel_count); | 267 | const std::size_t worker_sz = WorkerBufferSize(channel_count); |
| 221 | ASSERT_MSG(buffer_sz >= worker_sz, "Worker buffer too large"); | 268 | ASSERT_MSG(buffer_sz >= worker_sz, "Worker buffer too large"); |
| 222 | 269 | ||
| 223 | std::unique_ptr<OpusDecoder, OpusDeleter> decoder{ | 270 | const int num_stereo_streams = channel_count == 2 ? 1 : 0; |
| 224 | static_cast<OpusDecoder*>(operator new(worker_sz))}; | 271 | const auto mapping_table = CreateMappingTable(channel_count); |
| 225 | if (const int err = opus_decoder_init(decoder.get(), sample_rate, channel_count)) { | 272 | |
| 226 | LOG_ERROR(Audio, "Failed to init opus decoder with error={}", err); | 273 | int error = 0; |
| 274 | OpusDecoderPtr decoder{ | ||
| 275 | opus_multistream_decoder_create(sample_rate, static_cast<int>(channel_count), 1, | ||
| 276 | num_stereo_streams, mapping_table.data(), &error)}; | ||
| 277 | if (error != OPUS_OK || decoder == nullptr) { | ||
| 278 | LOG_ERROR(Audio, "Failed to create Opus decoder (error={}).", error); | ||
| 227 | IPC::ResponseBuilder rb{ctx, 2}; | 279 | IPC::ResponseBuilder rb{ctx, 2}; |
| 228 | // TODO(ogniK): Use correct error code | 280 | // TODO(ogniK): Use correct error code |
| 229 | rb.Push(ResultCode(-1)); | 281 | rb.Push(ResultCode(-1)); |
| @@ -232,8 +284,8 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { | |||
| 232 | 284 | ||
| 233 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; | 285 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; |
| 234 | rb.Push(RESULT_SUCCESS); | 286 | rb.Push(RESULT_SUCCESS); |
| 235 | rb.PushIpcInterface<IHardwareOpusDecoderManager>(std::move(decoder), sample_rate, | 287 | rb.PushIpcInterface<IHardwareOpusDecoderManager>( |
| 236 | channel_count); | 288 | OpusDecoderState{std::move(decoder), sample_rate, channel_count}); |
| 237 | } | 289 | } |
| 238 | 290 | ||
| 239 | HwOpus::HwOpus() : ServiceFramework("hwopus") { | 291 | HwOpus::HwOpus() : ServiceFramework("hwopus") { |
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp index 54959edd8..f03fb629c 100644 --- a/src/core/hle/service/filesystem/fsp_srv.cpp +++ b/src/core/hle/service/filesystem/fsp_srv.cpp | |||
| @@ -733,7 +733,10 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { | |||
| 733 | FSP_SRV::~FSP_SRV() = default; | 733 | FSP_SRV::~FSP_SRV() = default; |
| 734 | 734 | ||
| 735 | void FSP_SRV::SetCurrentProcess(Kernel::HLERequestContext& ctx) { | 735 | void FSP_SRV::SetCurrentProcess(Kernel::HLERequestContext& ctx) { |
| 736 | LOG_WARNING(Service_FS, "(STUBBED) called"); | 736 | IPC::RequestParser rp{ctx}; |
| 737 | current_process_id = rp.Pop<u64>(); | ||
| 738 | |||
| 739 | LOG_DEBUG(Service_FS, "called. current_process_id=0x{:016X}", current_process_id); | ||
| 737 | 740 | ||
| 738 | IPC::ResponseBuilder rb{ctx, 2}; | 741 | IPC::ResponseBuilder rb{ctx, 2}; |
| 739 | rb.Push(RESULT_SUCCESS); | 742 | rb.Push(RESULT_SUCCESS); |
diff --git a/src/core/hle/service/filesystem/fsp_srv.h b/src/core/hle/service/filesystem/fsp_srv.h index 3a5f4e200..d7572ba7a 100644 --- a/src/core/hle/service/filesystem/fsp_srv.h +++ b/src/core/hle/service/filesystem/fsp_srv.h | |||
| @@ -32,6 +32,7 @@ private: | |||
| 32 | void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); | 32 | void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); |
| 33 | 33 | ||
| 34 | FileSys::VirtualFile romfs; | 34 | FileSys::VirtualFile romfs; |
| 35 | u64 current_process_id = 0; | ||
| 35 | }; | 36 | }; |
| 36 | 37 | ||
| 37 | } // namespace Service::FileSystem | 38 | } // namespace Service::FileSystem |
diff --git a/src/core/hle/service/hid/controllers/debug_pad.h b/src/core/hle/service/hid/controllers/debug_pad.h index 929035034..e584b92ec 100644 --- a/src/core/hle/service/hid/controllers/debug_pad.h +++ b/src/core/hle/service/hid/controllers/debug_pad.h | |||
| @@ -41,20 +41,20 @@ private: | |||
| 41 | struct PadState { | 41 | struct PadState { |
| 42 | union { | 42 | union { |
| 43 | u32_le raw{}; | 43 | u32_le raw{}; |
| 44 | BitField<0, 1, u32_le> a; | 44 | BitField<0, 1, u32> a; |
| 45 | BitField<1, 1, u32_le> b; | 45 | BitField<1, 1, u32> b; |
| 46 | BitField<2, 1, u32_le> x; | 46 | BitField<2, 1, u32> x; |
| 47 | BitField<3, 1, u32_le> y; | 47 | BitField<3, 1, u32> y; |
| 48 | BitField<4, 1, u32_le> l; | 48 | BitField<4, 1, u32> l; |
| 49 | BitField<5, 1, u32_le> r; | 49 | BitField<5, 1, u32> r; |
| 50 | BitField<6, 1, u32_le> zl; | 50 | BitField<6, 1, u32> zl; |
| 51 | BitField<7, 1, u32_le> zr; | 51 | BitField<7, 1, u32> zr; |
| 52 | BitField<8, 1, u32_le> plus; | 52 | BitField<8, 1, u32> plus; |
| 53 | BitField<9, 1, u32_le> minus; | 53 | BitField<9, 1, u32> minus; |
| 54 | BitField<10, 1, u32_le> d_left; | 54 | BitField<10, 1, u32> d_left; |
| 55 | BitField<11, 1, u32_le> d_up; | 55 | BitField<11, 1, u32> d_up; |
| 56 | BitField<12, 1, u32_le> d_right; | 56 | BitField<12, 1, u32> d_right; |
| 57 | BitField<13, 1, u32_le> d_down; | 57 | BitField<13, 1, u32> d_down; |
| 58 | }; | 58 | }; |
| 59 | }; | 59 | }; |
| 60 | static_assert(sizeof(PadState) == 0x4, "PadState is an invalid size"); | 60 | static_assert(sizeof(PadState) == 0x4, "PadState is an invalid size"); |
| @@ -62,7 +62,7 @@ private: | |||
| 62 | struct Attributes { | 62 | struct Attributes { |
| 63 | union { | 63 | union { |
| 64 | u32_le raw{}; | 64 | u32_le raw{}; |
| 65 | BitField<0, 1, u32_le> connected; | 65 | BitField<0, 1, u32> connected; |
| 66 | }; | 66 | }; |
| 67 | }; | 67 | }; |
| 68 | static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size"); | 68 | static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size"); |
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h index 18c7a94e6..4ff50b3cd 100644 --- a/src/core/hle/service/hid/controllers/npad.h +++ b/src/core/hle/service/hid/controllers/npad.h | |||
| @@ -39,13 +39,13 @@ public: | |||
| 39 | union { | 39 | union { |
| 40 | u32_le raw{}; | 40 | u32_le raw{}; |
| 41 | 41 | ||
| 42 | BitField<0, 1, u32_le> pro_controller; | 42 | BitField<0, 1, u32> pro_controller; |
| 43 | BitField<1, 1, u32_le> handheld; | 43 | BitField<1, 1, u32> handheld; |
| 44 | BitField<2, 1, u32_le> joycon_dual; | 44 | BitField<2, 1, u32> joycon_dual; |
| 45 | BitField<3, 1, u32_le> joycon_left; | 45 | BitField<3, 1, u32> joycon_left; |
| 46 | BitField<4, 1, u32_le> joycon_right; | 46 | BitField<4, 1, u32> joycon_right; |
| 47 | 47 | ||
| 48 | BitField<6, 1, u32_le> pokeball; // TODO(ogniK): Confirm when possible | 48 | BitField<6, 1, u32> pokeball; // TODO(ogniK): Confirm when possible |
| 49 | }; | 49 | }; |
| 50 | }; | 50 | }; |
| 51 | static_assert(sizeof(NPadType) == 4, "NPadType is an invalid size"); | 51 | static_assert(sizeof(NPadType) == 4, "NPadType is an invalid size"); |
| @@ -150,43 +150,43 @@ private: | |||
| 150 | union { | 150 | union { |
| 151 | u64_le raw{}; | 151 | u64_le raw{}; |
| 152 | // Button states | 152 | // Button states |
| 153 | BitField<0, 1, u64_le> a; | 153 | BitField<0, 1, u64> a; |
| 154 | BitField<1, 1, u64_le> b; | 154 | BitField<1, 1, u64> b; |
| 155 | BitField<2, 1, u64_le> x; | 155 | BitField<2, 1, u64> x; |
| 156 | BitField<3, 1, u64_le> y; | 156 | BitField<3, 1, u64> y; |
| 157 | BitField<4, 1, u64_le> l_stick; | 157 | BitField<4, 1, u64> l_stick; |
| 158 | BitField<5, 1, u64_le> r_stick; | 158 | BitField<5, 1, u64> r_stick; |
| 159 | BitField<6, 1, u64_le> l; | 159 | BitField<6, 1, u64> l; |
| 160 | BitField<7, 1, u64_le> r; | 160 | BitField<7, 1, u64> r; |
| 161 | BitField<8, 1, u64_le> zl; | 161 | BitField<8, 1, u64> zl; |
| 162 | BitField<9, 1, u64_le> zr; | 162 | BitField<9, 1, u64> zr; |
| 163 | BitField<10, 1, u64_le> plus; | 163 | BitField<10, 1, u64> plus; |
| 164 | BitField<11, 1, u64_le> minus; | 164 | BitField<11, 1, u64> minus; |
| 165 | 165 | ||
| 166 | // D-Pad | 166 | // D-Pad |
| 167 | BitField<12, 1, u64_le> d_left; | 167 | BitField<12, 1, u64> d_left; |
| 168 | BitField<13, 1, u64_le> d_up; | 168 | BitField<13, 1, u64> d_up; |
| 169 | BitField<14, 1, u64_le> d_right; | 169 | BitField<14, 1, u64> d_right; |
| 170 | BitField<15, 1, u64_le> d_down; | 170 | BitField<15, 1, u64> d_down; |
| 171 | 171 | ||
| 172 | // Left JoyStick | 172 | // Left JoyStick |
| 173 | BitField<16, 1, u64_le> l_stick_left; | 173 | BitField<16, 1, u64> l_stick_left; |
| 174 | BitField<17, 1, u64_le> l_stick_up; | 174 | BitField<17, 1, u64> l_stick_up; |
| 175 | BitField<18, 1, u64_le> l_stick_right; | 175 | BitField<18, 1, u64> l_stick_right; |
| 176 | BitField<19, 1, u64_le> l_stick_down; | 176 | BitField<19, 1, u64> l_stick_down; |
| 177 | 177 | ||
| 178 | // Right JoyStick | 178 | // Right JoyStick |
| 179 | BitField<20, 1, u64_le> r_stick_left; | 179 | BitField<20, 1, u64> r_stick_left; |
| 180 | BitField<21, 1, u64_le> r_stick_up; | 180 | BitField<21, 1, u64> r_stick_up; |
| 181 | BitField<22, 1, u64_le> r_stick_right; | 181 | BitField<22, 1, u64> r_stick_right; |
| 182 | BitField<23, 1, u64_le> r_stick_down; | 182 | BitField<23, 1, u64> r_stick_down; |
| 183 | 183 | ||
| 184 | // Not always active? | 184 | // Not always active? |
| 185 | BitField<24, 1, u64_le> left_sl; | 185 | BitField<24, 1, u64> left_sl; |
| 186 | BitField<25, 1, u64_le> left_sr; | 186 | BitField<25, 1, u64> left_sr; |
| 187 | 187 | ||
| 188 | BitField<26, 1, u64_le> right_sl; | 188 | BitField<26, 1, u64> right_sl; |
| 189 | BitField<27, 1, u64_le> right_sr; | 189 | BitField<27, 1, u64> right_sr; |
| 190 | }; | 190 | }; |
| 191 | }; | 191 | }; |
| 192 | static_assert(sizeof(ControllerPadState) == 8, "ControllerPadState is an invalid size"); | 192 | static_assert(sizeof(ControllerPadState) == 8, "ControllerPadState is an invalid size"); |
| @@ -200,12 +200,12 @@ private: | |||
| 200 | struct ConnectionState { | 200 | struct ConnectionState { |
| 201 | union { | 201 | union { |
| 202 | u32_le raw{}; | 202 | u32_le raw{}; |
| 203 | BitField<0, 1, u32_le> IsConnected; | 203 | BitField<0, 1, u32> IsConnected; |
| 204 | BitField<1, 1, u32_le> IsWired; | 204 | BitField<1, 1, u32> IsWired; |
| 205 | BitField<2, 1, u32_le> IsLeftJoyConnected; | 205 | BitField<2, 1, u32> IsLeftJoyConnected; |
| 206 | BitField<3, 1, u32_le> IsLeftJoyWired; | 206 | BitField<3, 1, u32> IsLeftJoyWired; |
| 207 | BitField<4, 1, u32_le> IsRightJoyConnected; | 207 | BitField<4, 1, u32> IsRightJoyConnected; |
| 208 | BitField<5, 1, u32_le> IsRightJoyWired; | 208 | BitField<5, 1, u32> IsRightJoyWired; |
| 209 | }; | 209 | }; |
| 210 | }; | 210 | }; |
| 211 | static_assert(sizeof(ConnectionState) == 4, "ConnectionState is an invalid size"); | 211 | static_assert(sizeof(ConnectionState) == 4, "ConnectionState is an invalid size"); |
| @@ -240,23 +240,23 @@ private: | |||
| 240 | struct NPadProperties { | 240 | struct NPadProperties { |
| 241 | union { | 241 | union { |
| 242 | s64_le raw{}; | 242 | s64_le raw{}; |
| 243 | BitField<11, 1, s64_le> is_vertical; | 243 | BitField<11, 1, s64> is_vertical; |
| 244 | BitField<12, 1, s64_le> is_horizontal; | 244 | BitField<12, 1, s64> is_horizontal; |
| 245 | BitField<13, 1, s64_le> use_plus; | 245 | BitField<13, 1, s64> use_plus; |
| 246 | BitField<14, 1, s64_le> use_minus; | 246 | BitField<14, 1, s64> use_minus; |
| 247 | }; | 247 | }; |
| 248 | }; | 248 | }; |
| 249 | 249 | ||
| 250 | struct NPadDevice { | 250 | struct NPadDevice { |
| 251 | union { | 251 | union { |
| 252 | u32_le raw{}; | 252 | u32_le raw{}; |
| 253 | BitField<0, 1, s32_le> pro_controller; | 253 | BitField<0, 1, s32> pro_controller; |
| 254 | BitField<1, 1, s32_le> handheld; | 254 | BitField<1, 1, s32> handheld; |
| 255 | BitField<2, 1, s32_le> handheld_left; | 255 | BitField<2, 1, s32> handheld_left; |
| 256 | BitField<3, 1, s32_le> handheld_right; | 256 | BitField<3, 1, s32> handheld_right; |
| 257 | BitField<4, 1, s32_le> joycon_left; | 257 | BitField<4, 1, s32> joycon_left; |
| 258 | BitField<5, 1, s32_le> joycon_right; | 258 | BitField<5, 1, s32> joycon_right; |
| 259 | BitField<6, 1, s32_le> pokeball; | 259 | BitField<6, 1, s32> pokeball; |
| 260 | }; | 260 | }; |
| 261 | }; | 261 | }; |
| 262 | 262 | ||
diff --git a/src/core/hle/service/hid/controllers/touchscreen.h b/src/core/hle/service/hid/controllers/touchscreen.h index 012b6e0dd..76fc340e9 100644 --- a/src/core/hle/service/hid/controllers/touchscreen.h +++ b/src/core/hle/service/hid/controllers/touchscreen.h | |||
| @@ -33,8 +33,8 @@ private: | |||
| 33 | struct Attributes { | 33 | struct Attributes { |
| 34 | union { | 34 | union { |
| 35 | u32 raw{}; | 35 | u32 raw{}; |
| 36 | BitField<0, 1, u32_le> start_touch; | 36 | BitField<0, 1, u32> start_touch; |
| 37 | BitField<1, 1, u32_le> end_touch; | 37 | BitField<1, 1, u32> end_touch; |
| 38 | }; | 38 | }; |
| 39 | }; | 39 | }; |
| 40 | static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size"); | 40 | static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size"); |
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h index 7cc58db4c..498602de5 100644 --- a/src/core/hle/service/hid/hid.h +++ b/src/core/hle/service/hid/hid.h | |||
| @@ -4,6 +4,9 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "core/hle/service/hid/controllers/controller_base.h" | ||
| 8 | #include "core/hle/service/service.h" | ||
| 9 | |||
| 7 | #include "controllers/controller_base.h" | 10 | #include "controllers/controller_base.h" |
| 8 | #include "core/hle/service/service.h" | 11 | #include "core/hle/service/service.h" |
| 9 | 12 | ||
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index 9df7ac50f..d65693fc7 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp | |||
| @@ -319,15 +319,14 @@ public: | |||
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | ASSERT(vm_manager | 321 | ASSERT(vm_manager |
| 322 | .MirrorMemory(*map_address, nro_addr, nro_size, | 322 | .MirrorMemory(*map_address, nro_addr, nro_size, Kernel::MemoryState::ModuleCode) |
| 323 | Kernel::MemoryState::ModuleCodeStatic) | ||
| 324 | .IsSuccess()); | 323 | .IsSuccess()); |
| 325 | ASSERT(vm_manager.UnmapRange(nro_addr, nro_size).IsSuccess()); | 324 | ASSERT(vm_manager.UnmapRange(nro_addr, nro_size).IsSuccess()); |
| 326 | 325 | ||
| 327 | if (bss_size > 0) { | 326 | if (bss_size > 0) { |
| 328 | ASSERT(vm_manager | 327 | ASSERT(vm_manager |
| 329 | .MirrorMemory(*map_address + nro_size, bss_addr, bss_size, | 328 | .MirrorMemory(*map_address + nro_size, bss_addr, bss_size, |
| 330 | Kernel::MemoryState::ModuleCodeStatic) | 329 | Kernel::MemoryState::ModuleCode) |
| 331 | .IsSuccess()); | 330 | .IsSuccess()); |
| 332 | ASSERT(vm_manager.UnmapRange(bss_addr, bss_size).IsSuccess()); | 331 | ASSERT(vm_manager.UnmapRange(bss_addr, bss_size).IsSuccess()); |
| 333 | } | 332 | } |
| @@ -388,8 +387,7 @@ public: | |||
| 388 | const auto& nro_size = iter->second.size; | 387 | const auto& nro_size = iter->second.size; |
| 389 | 388 | ||
| 390 | ASSERT(vm_manager | 389 | ASSERT(vm_manager |
| 391 | .MirrorMemory(heap_addr, mapped_addr, nro_size, | 390 | .MirrorMemory(heap_addr, mapped_addr, nro_size, Kernel::MemoryState::ModuleCode) |
| 392 | Kernel::MemoryState::ModuleCodeStatic) | ||
| 393 | .IsSuccess()); | 391 | .IsSuccess()); |
| 394 | ASSERT(vm_manager.UnmapRange(mapped_addr, nro_size).IsSuccess()); | 392 | ASSERT(vm_manager.UnmapRange(mapped_addr, nro_size).IsSuccess()); |
| 395 | 393 | ||
diff --git a/src/core/hle/service/lm/lm.cpp b/src/core/hle/service/lm/lm.cpp index 1f462e087..2a61593e2 100644 --- a/src/core/hle/service/lm/lm.cpp +++ b/src/core/hle/service/lm/lm.cpp | |||
| @@ -42,7 +42,7 @@ private: | |||
| 42 | union { | 42 | union { |
| 43 | BitField<0, 16, Flags> flags; | 43 | BitField<0, 16, Flags> flags; |
| 44 | BitField<16, 8, Severity> severity; | 44 | BitField<16, 8, Severity> severity; |
| 45 | BitField<24, 8, u32_le> verbosity; | 45 | BitField<24, 8, u32> verbosity; |
| 46 | }; | 46 | }; |
| 47 | u32_le payload_size; | 47 | u32_le payload_size; |
| 48 | 48 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h index 0f02a1a18..4f6042b00 100644 --- a/src/core/hle/service/nvdrv/devices/nvdevice.h +++ b/src/core/hle/service/nvdrv/devices/nvdevice.h | |||
| @@ -19,11 +19,11 @@ public: | |||
| 19 | virtual ~nvdevice() = default; | 19 | virtual ~nvdevice() = default; |
| 20 | union Ioctl { | 20 | union Ioctl { |
| 21 | u32_le raw; | 21 | u32_le raw; |
| 22 | BitField<0, 8, u32_le> cmd; | 22 | BitField<0, 8, u32> cmd; |
| 23 | BitField<8, 8, u32_le> group; | 23 | BitField<8, 8, u32> group; |
| 24 | BitField<16, 14, u32_le> length; | 24 | BitField<16, 14, u32> length; |
| 25 | BitField<30, 1, u32_le> is_in; | 25 | BitField<30, 1, u32> is_in; |
| 26 | BitField<31, 1, u32_le> is_out; | 26 | BitField<31, 1, u32> is_out; |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | /** | 29 | /** |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index a34b9e753..af62d33d2 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include "core/core.h" | 10 | #include "core/core.h" |
| 11 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" | 11 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" |
| 12 | #include "core/hle/service/nvdrv/devices/nvmap.h" | 12 | #include "core/hle/service/nvdrv/devices/nvmap.h" |
| 13 | #include "core/memory.h" | ||
| 13 | #include "video_core/memory_manager.h" | 14 | #include "video_core/memory_manager.h" |
| 14 | #include "video_core/rasterizer_interface.h" | 15 | #include "video_core/rasterizer_interface.h" |
| 15 | #include "video_core/renderer_base.h" | 16 | #include "video_core/renderer_base.h" |
| @@ -88,7 +89,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 88 | for (const auto& entry : entries) { | 89 | for (const auto& entry : entries) { |
| 89 | LOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", | 90 | LOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", |
| 90 | entry.offset, entry.nvmap_handle, entry.pages); | 91 | entry.offset, entry.nvmap_handle, entry.pages); |
| 91 | Tegra::GPUVAddr offset = static_cast<Tegra::GPUVAddr>(entry.offset) << 0x10; | 92 | GPUVAddr offset = static_cast<GPUVAddr>(entry.offset) << 0x10; |
| 92 | auto object = nvmap_dev->GetObject(entry.nvmap_handle); | 93 | auto object = nvmap_dev->GetObject(entry.nvmap_handle); |
| 93 | if (!object) { | 94 | if (!object) { |
| 94 | LOG_CRITICAL(Service_NVDRV, "nvmap {} is an invalid handle!", entry.nvmap_handle); | 95 | LOG_CRITICAL(Service_NVDRV, "nvmap {} is an invalid handle!", entry.nvmap_handle); |
| @@ -101,7 +102,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 101 | u64 size = static_cast<u64>(entry.pages) << 0x10; | 102 | u64 size = static_cast<u64>(entry.pages) << 0x10; |
| 102 | ASSERT(size <= object->size); | 103 | ASSERT(size <= object->size); |
| 103 | 104 | ||
| 104 | Tegra::GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size); | 105 | GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size); |
| 105 | ASSERT(returned == offset); | 106 | ASSERT(returned == offset); |
| 106 | } | 107 | } |
| 107 | std::memcpy(output.data(), entries.data(), output.size()); | 108 | std::memcpy(output.data(), entries.data(), output.size()); |
| @@ -172,16 +173,8 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou | |||
| 172 | return 0; | 173 | return 0; |
| 173 | } | 174 | } |
| 174 | 175 | ||
| 175 | auto& system_instance = Core::System::GetInstance(); | 176 | params.offset = Core::System::GetInstance().GPU().MemoryManager().UnmapBuffer(params.offset, |
| 176 | 177 | itr->second.size); | |
| 177 | // Remove this memory region from the rasterizer cache. | ||
| 178 | auto& gpu = system_instance.GPU(); | ||
| 179 | auto cpu_addr = gpu.MemoryManager().GpuToCpuAddress(params.offset); | ||
| 180 | ASSERT(cpu_addr); | ||
| 181 | gpu.FlushAndInvalidateRegion(*cpu_addr, itr->second.size); | ||
| 182 | |||
| 183 | params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size); | ||
| 184 | |||
| 185 | buffer_mappings.erase(itr->second.offset); | 178 | buffer_mappings.erase(itr->second.offset); |
| 186 | 179 | ||
| 187 | std::memcpy(output.data(), ¶ms, output.size()); | 180 | std::memcpy(output.data(), ¶ms, output.size()); |
diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp index 117f87a45..00806b0ed 100644 --- a/src/core/hle/service/service.cpp +++ b/src/core/hle/service/service.cpp | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | #include "core/hle/ipc.h" | 11 | #include "core/hle/ipc.h" |
| 12 | #include "core/hle/ipc_helpers.h" | 12 | #include "core/hle/ipc_helpers.h" |
| 13 | #include "core/hle/kernel/client_port.h" | 13 | #include "core/hle/kernel/client_port.h" |
| 14 | #include "core/hle/kernel/handle_table.h" | ||
| 15 | #include "core/hle/kernel/kernel.h" | 14 | #include "core/hle/kernel/kernel.h" |
| 16 | #include "core/hle/kernel/process.h" | 15 | #include "core/hle/kernel/process.h" |
| 17 | #include "core/hle/kernel/server_port.h" | 16 | #include "core/hle/kernel/server_port.h" |
| @@ -76,7 +75,8 @@ namespace Service { | |||
| 76 | * Creates a function string for logging, complete with the name (or header code, depending | 75 | * Creates a function string for logging, complete with the name (or header code, depending |
| 77 | * on what's passed in) the port name, and all the cmd_buff arguments. | 76 | * on what's passed in) the port name, and all the cmd_buff arguments. |
| 78 | */ | 77 | */ |
| 79 | [[maybe_unused]] static std::string MakeFunctionString(const char* name, const char* port_name, | 78 | [[maybe_unused]] static std::string MakeFunctionString(std::string_view name, |
| 79 | std::string_view port_name, | ||
| 80 | const u32* cmd_buff) { | 80 | const u32* cmd_buff) { |
| 81 | // Number of params == bits 0-5 + bits 6-11 | 81 | // Number of params == bits 0-5 + bits 6-11 |
| 82 | int num_params = (cmd_buff[0] & 0x3F) + ((cmd_buff[0] >> 6) & 0x3F); | 82 | int num_params = (cmd_buff[0] & 0x3F) + ((cmd_buff[0] >> 6) & 0x3F); |
| @@ -158,9 +158,7 @@ void ServiceFrameworkBase::InvokeRequest(Kernel::HLERequestContext& ctx) { | |||
| 158 | return ReportUnimplementedFunction(ctx, info); | 158 | return ReportUnimplementedFunction(ctx, info); |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | LOG_TRACE( | 161 | LOG_TRACE(Service, "{}", MakeFunctionString(info->name, GetServiceName(), ctx.CommandBuffer())); |
| 162 | Service, "{}", | ||
| 163 | MakeFunctionString(info->name, GetServiceName().c_str(), ctx.CommandBuffer()).c_str()); | ||
| 164 | handler_invoker(this, info->handler_callback, ctx); | 162 | handler_invoker(this, info->handler_callback, ctx); |
| 165 | } | 163 | } |
| 166 | 164 | ||
| @@ -169,7 +167,7 @@ ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& co | |||
| 169 | case IPC::CommandType::Close: { | 167 | case IPC::CommandType::Close: { |
| 170 | IPC::ResponseBuilder rb{context, 2}; | 168 | IPC::ResponseBuilder rb{context, 2}; |
| 171 | rb.Push(RESULT_SUCCESS); | 169 | rb.Push(RESULT_SUCCESS); |
| 172 | return ResultCode(ErrorModule::HIPC, ErrorDescription::RemoteProcessDead); | 170 | return IPC::ERR_REMOTE_PROCESS_DEAD; |
| 173 | } | 171 | } |
| 174 | case IPC::CommandType::ControlWithContext: | 172 | case IPC::CommandType::ControlWithContext: |
| 175 | case IPC::CommandType::Control: { | 173 | case IPC::CommandType::Control: { |
diff --git a/src/core/hle/service/sm/sm.h b/src/core/hle/service/sm/sm.h index bef25433e..b9d6381b4 100644 --- a/src/core/hle/service/sm/sm.h +++ b/src/core/hle/service/sm/sm.h | |||
| @@ -67,7 +67,7 @@ public: | |||
| 67 | if (port == nullptr) { | 67 | if (port == nullptr) { |
| 68 | return nullptr; | 68 | return nullptr; |
| 69 | } | 69 | } |
| 70 | return std::static_pointer_cast<T>(port->hle_handler); | 70 | return std::static_pointer_cast<T>(port->GetHLEHandler()); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | void InvokeControlRequest(Kernel::HLERequestContext& context); | 73 | void InvokeControlRequest(Kernel::HLERequestContext& context); |
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index a975767bb..566cd6006 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include "core/hle/service/nvdrv/nvdrv.h" | 24 | #include "core/hle/service/nvdrv/nvdrv.h" |
| 25 | #include "core/hle/service/nvflinger/buffer_queue.h" | 25 | #include "core/hle/service/nvflinger/buffer_queue.h" |
| 26 | #include "core/hle/service/nvflinger/nvflinger.h" | 26 | #include "core/hle/service/nvflinger/nvflinger.h" |
| 27 | #include "core/hle/service/service.h" | ||
| 27 | #include "core/hle/service/vi/vi.h" | 28 | #include "core/hle/service/vi/vi.h" |
| 28 | #include "core/hle/service/vi/vi_m.h" | 29 | #include "core/hle/service/vi/vi_m.h" |
| 29 | #include "core/hle/service/vi/vi_s.h" | 30 | #include "core/hle/service/vi/vi_s.h" |
| @@ -33,6 +34,7 @@ | |||
| 33 | namespace Service::VI { | 34 | namespace Service::VI { |
| 34 | 35 | ||
| 35 | constexpr ResultCode ERR_OPERATION_FAILED{ErrorModule::VI, 1}; | 36 | constexpr ResultCode ERR_OPERATION_FAILED{ErrorModule::VI, 1}; |
| 37 | constexpr ResultCode ERR_PERMISSION_DENIED{ErrorModule::VI, 5}; | ||
| 36 | constexpr ResultCode ERR_UNSUPPORTED{ErrorModule::VI, 6}; | 38 | constexpr ResultCode ERR_UNSUPPORTED{ErrorModule::VI, 6}; |
| 37 | constexpr ResultCode ERR_NOT_FOUND{ErrorModule::VI, 7}; | 39 | constexpr ResultCode ERR_NOT_FOUND{ErrorModule::VI, 7}; |
| 38 | 40 | ||
| @@ -1203,26 +1205,40 @@ IApplicationDisplayService::IApplicationDisplayService( | |||
| 1203 | RegisterHandlers(functions); | 1205 | RegisterHandlers(functions); |
| 1204 | } | 1206 | } |
| 1205 | 1207 | ||
| 1206 | Module::Interface::Interface(std::shared_ptr<Module> module, const char* name, | 1208 | static bool IsValidServiceAccess(Permission permission, Policy policy) { |
| 1207 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) | 1209 | if (permission == Permission::User) { |
| 1208 | : ServiceFramework(name), module(std::move(module)), nv_flinger(std::move(nv_flinger)) {} | 1210 | return policy == Policy::User; |
| 1211 | } | ||
| 1212 | |||
| 1213 | if (permission == Permission::System || permission == Permission::Manager) { | ||
| 1214 | return policy == Policy::User || policy == Policy::Compositor; | ||
| 1215 | } | ||
| 1209 | 1216 | ||
| 1210 | Module::Interface::~Interface() = default; | 1217 | return false; |
| 1218 | } | ||
| 1211 | 1219 | ||
| 1212 | void Module::Interface::GetDisplayService(Kernel::HLERequestContext& ctx) { | 1220 | void detail::GetDisplayServiceImpl(Kernel::HLERequestContext& ctx, |
| 1213 | LOG_WARNING(Service_VI, "(STUBBED) called"); | 1221 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger, |
| 1222 | Permission permission) { | ||
| 1223 | IPC::RequestParser rp{ctx}; | ||
| 1224 | const auto policy = rp.PopEnum<Policy>(); | ||
| 1225 | |||
| 1226 | if (!IsValidServiceAccess(permission, policy)) { | ||
| 1227 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 1228 | rb.Push(ERR_PERMISSION_DENIED); | ||
| 1229 | return; | ||
| 1230 | } | ||
| 1214 | 1231 | ||
| 1215 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; | 1232 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; |
| 1216 | rb.Push(RESULT_SUCCESS); | 1233 | rb.Push(RESULT_SUCCESS); |
| 1217 | rb.PushIpcInterface<IApplicationDisplayService>(nv_flinger); | 1234 | rb.PushIpcInterface<IApplicationDisplayService>(std::move(nv_flinger)); |
| 1218 | } | 1235 | } |
| 1219 | 1236 | ||
| 1220 | void InstallInterfaces(SM::ServiceManager& service_manager, | 1237 | void InstallInterfaces(SM::ServiceManager& service_manager, |
| 1221 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) { | 1238 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) { |
| 1222 | auto module = std::make_shared<Module>(); | 1239 | std::make_shared<VI_M>(nv_flinger)->InstallAsService(service_manager); |
| 1223 | std::make_shared<VI_M>(module, nv_flinger)->InstallAsService(service_manager); | 1240 | std::make_shared<VI_S>(nv_flinger)->InstallAsService(service_manager); |
| 1224 | std::make_shared<VI_S>(module, nv_flinger)->InstallAsService(service_manager); | 1241 | std::make_shared<VI_U>(nv_flinger)->InstallAsService(service_manager); |
| 1225 | std::make_shared<VI_U>(module, nv_flinger)->InstallAsService(service_manager); | ||
| 1226 | } | 1242 | } |
| 1227 | 1243 | ||
| 1228 | } // namespace Service::VI | 1244 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi.h b/src/core/hle/service/vi/vi.h index e3963502a..6b66f8b81 100644 --- a/src/core/hle/service/vi/vi.h +++ b/src/core/hle/service/vi/vi.h | |||
| @@ -4,12 +4,21 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "core/hle/service/service.h" | 7 | #include <memory> |
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | namespace Kernel { | ||
| 11 | class HLERequestContext; | ||
| 12 | } | ||
| 8 | 13 | ||
| 9 | namespace Service::NVFlinger { | 14 | namespace Service::NVFlinger { |
| 10 | class NVFlinger; | 15 | class NVFlinger; |
| 11 | } | 16 | } |
| 12 | 17 | ||
| 18 | namespace Service::SM { | ||
| 19 | class ServiceManager; | ||
| 20 | } | ||
| 21 | |||
| 13 | namespace Service::VI { | 22 | namespace Service::VI { |
| 14 | 23 | ||
| 15 | enum class DisplayResolution : u32 { | 24 | enum class DisplayResolution : u32 { |
| @@ -19,22 +28,25 @@ enum class DisplayResolution : u32 { | |||
| 19 | UndockedHeight = 720, | 28 | UndockedHeight = 720, |
| 20 | }; | 29 | }; |
| 21 | 30 | ||
| 22 | class Module final { | 31 | /// Permission level for a particular VI service instance |
| 23 | public: | 32 | enum class Permission { |
| 24 | class Interface : public ServiceFramework<Interface> { | 33 | User, |
| 25 | public: | 34 | System, |
| 26 | explicit Interface(std::shared_ptr<Module> module, const char* name, | 35 | Manager, |
| 27 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 36 | }; |
| 28 | ~Interface() override; | ||
| 29 | |||
| 30 | void GetDisplayService(Kernel::HLERequestContext& ctx); | ||
| 31 | 37 | ||
| 32 | protected: | 38 | /// A policy type that may be requested via GetDisplayService and |
| 33 | std::shared_ptr<Module> module; | 39 | /// GetDisplayServiceWithProxyNameExchange |
| 34 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | 40 | enum class Policy { |
| 35 | }; | 41 | User, |
| 42 | Compositor, | ||
| 36 | }; | 43 | }; |
| 37 | 44 | ||
| 45 | namespace detail { | ||
| 46 | void GetDisplayServiceImpl(Kernel::HLERequestContext& ctx, | ||
| 47 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger, Permission permission); | ||
| 48 | } // namespace detail | ||
| 49 | |||
| 38 | /// Registers all VI services with the specified service manager. | 50 | /// Registers all VI services with the specified service manager. |
| 39 | void InstallInterfaces(SM::ServiceManager& service_manager, | 51 | void InstallInterfaces(SM::ServiceManager& service_manager, |
| 40 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 52 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); |
diff --git a/src/core/hle/service/vi/vi_m.cpp b/src/core/hle/service/vi/vi_m.cpp index 207c06b16..06070087f 100644 --- a/src/core/hle/service/vi/vi_m.cpp +++ b/src/core/hle/service/vi/vi_m.cpp | |||
| @@ -2,12 +2,14 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include "common/logging/log.h" | ||
| 6 | #include "core/hle/service/vi/vi.h" | ||
| 5 | #include "core/hle/service/vi/vi_m.h" | 7 | #include "core/hle/service/vi/vi_m.h" |
| 6 | 8 | ||
| 7 | namespace Service::VI { | 9 | namespace Service::VI { |
| 8 | 10 | ||
| 9 | VI_M::VI_M(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) | 11 | VI_M::VI_M(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) |
| 10 | : Module::Interface(std::move(module), "vi:m", std::move(nv_flinger)) { | 12 | : ServiceFramework{"vi:m"}, nv_flinger{std::move(nv_flinger)} { |
| 11 | static const FunctionInfo functions[] = { | 13 | static const FunctionInfo functions[] = { |
| 12 | {2, &VI_M::GetDisplayService, "GetDisplayService"}, | 14 | {2, &VI_M::GetDisplayService, "GetDisplayService"}, |
| 13 | {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, | 15 | {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, |
| @@ -17,4 +19,10 @@ VI_M::VI_M(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> | |||
| 17 | 19 | ||
| 18 | VI_M::~VI_M() = default; | 20 | VI_M::~VI_M() = default; |
| 19 | 21 | ||
| 22 | void VI_M::GetDisplayService(Kernel::HLERequestContext& ctx) { | ||
| 23 | LOG_DEBUG(Service_VI, "called"); | ||
| 24 | |||
| 25 | detail::GetDisplayServiceImpl(ctx, nv_flinger, Permission::Manager); | ||
| 26 | } | ||
| 27 | |||
| 20 | } // namespace Service::VI | 28 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_m.h b/src/core/hle/service/vi/vi_m.h index 487d58d50..290e06689 100644 --- a/src/core/hle/service/vi/vi_m.h +++ b/src/core/hle/service/vi/vi_m.h | |||
| @@ -4,14 +4,27 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "core/hle/service/vi/vi.h" | 7 | #include "core/hle/service/service.h" |
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | class HLERequestContext; | ||
| 11 | } | ||
| 12 | |||
| 13 | namespace Service::NVFlinger { | ||
| 14 | class NVFlinger; | ||
| 15 | } | ||
| 8 | 16 | ||
| 9 | namespace Service::VI { | 17 | namespace Service::VI { |
| 10 | 18 | ||
| 11 | class VI_M final : public Module::Interface { | 19 | class VI_M final : public ServiceFramework<VI_M> { |
| 12 | public: | 20 | public: |
| 13 | explicit VI_M(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 21 | explicit VI_M(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); |
| 14 | ~VI_M() override; | 22 | ~VI_M() override; |
| 23 | |||
| 24 | private: | ||
| 25 | void GetDisplayService(Kernel::HLERequestContext& ctx); | ||
| 26 | |||
| 27 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | ||
| 15 | }; | 28 | }; |
| 16 | 29 | ||
| 17 | } // namespace Service::VI | 30 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_s.cpp b/src/core/hle/service/vi/vi_s.cpp index 920e6a1f6..57c596cc4 100644 --- a/src/core/hle/service/vi/vi_s.cpp +++ b/src/core/hle/service/vi/vi_s.cpp | |||
| @@ -2,12 +2,14 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include "common/logging/log.h" | ||
| 6 | #include "core/hle/service/vi/vi.h" | ||
| 5 | #include "core/hle/service/vi/vi_s.h" | 7 | #include "core/hle/service/vi/vi_s.h" |
| 6 | 8 | ||
| 7 | namespace Service::VI { | 9 | namespace Service::VI { |
| 8 | 10 | ||
| 9 | VI_S::VI_S(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) | 11 | VI_S::VI_S(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) |
| 10 | : Module::Interface(std::move(module), "vi:s", std::move(nv_flinger)) { | 12 | : ServiceFramework{"vi:s"}, nv_flinger{std::move(nv_flinger)} { |
| 11 | static const FunctionInfo functions[] = { | 13 | static const FunctionInfo functions[] = { |
| 12 | {1, &VI_S::GetDisplayService, "GetDisplayService"}, | 14 | {1, &VI_S::GetDisplayService, "GetDisplayService"}, |
| 13 | {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, | 15 | {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, |
| @@ -17,4 +19,10 @@ VI_S::VI_S(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> | |||
| 17 | 19 | ||
| 18 | VI_S::~VI_S() = default; | 20 | VI_S::~VI_S() = default; |
| 19 | 21 | ||
| 22 | void VI_S::GetDisplayService(Kernel::HLERequestContext& ctx) { | ||
| 23 | LOG_DEBUG(Service_VI, "called"); | ||
| 24 | |||
| 25 | detail::GetDisplayServiceImpl(ctx, nv_flinger, Permission::System); | ||
| 26 | } | ||
| 27 | |||
| 20 | } // namespace Service::VI | 28 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_s.h b/src/core/hle/service/vi/vi_s.h index bbc31148f..47804dc0b 100644 --- a/src/core/hle/service/vi/vi_s.h +++ b/src/core/hle/service/vi/vi_s.h | |||
| @@ -4,14 +4,27 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "core/hle/service/vi/vi.h" | 7 | #include "core/hle/service/service.h" |
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | class HLERequestContext; | ||
| 11 | } | ||
| 12 | |||
| 13 | namespace Service::NVFlinger { | ||
| 14 | class NVFlinger; | ||
| 15 | } | ||
| 8 | 16 | ||
| 9 | namespace Service::VI { | 17 | namespace Service::VI { |
| 10 | 18 | ||
| 11 | class VI_S final : public Module::Interface { | 19 | class VI_S final : public ServiceFramework<VI_S> { |
| 12 | public: | 20 | public: |
| 13 | explicit VI_S(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 21 | explicit VI_S(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); |
| 14 | ~VI_S() override; | 22 | ~VI_S() override; |
| 23 | |||
| 24 | private: | ||
| 25 | void GetDisplayService(Kernel::HLERequestContext& ctx); | ||
| 26 | |||
| 27 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | ||
| 15 | }; | 28 | }; |
| 16 | 29 | ||
| 17 | } // namespace Service::VI | 30 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_u.cpp b/src/core/hle/service/vi/vi_u.cpp index d81e410d6..9d5ceb608 100644 --- a/src/core/hle/service/vi/vi_u.cpp +++ b/src/core/hle/service/vi/vi_u.cpp | |||
| @@ -2,12 +2,14 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include "common/logging/log.h" | ||
| 6 | #include "core/hle/service/vi/vi.h" | ||
| 5 | #include "core/hle/service/vi/vi_u.h" | 7 | #include "core/hle/service/vi/vi_u.h" |
| 6 | 8 | ||
| 7 | namespace Service::VI { | 9 | namespace Service::VI { |
| 8 | 10 | ||
| 9 | VI_U::VI_U(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) | 11 | VI_U::VI_U(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) |
| 10 | : Module::Interface(std::move(module), "vi:u", std::move(nv_flinger)) { | 12 | : ServiceFramework{"vi:u"}, nv_flinger{std::move(nv_flinger)} { |
| 11 | static const FunctionInfo functions[] = { | 13 | static const FunctionInfo functions[] = { |
| 12 | {0, &VI_U::GetDisplayService, "GetDisplayService"}, | 14 | {0, &VI_U::GetDisplayService, "GetDisplayService"}, |
| 13 | }; | 15 | }; |
| @@ -16,4 +18,10 @@ VI_U::VI_U(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> | |||
| 16 | 18 | ||
| 17 | VI_U::~VI_U() = default; | 19 | VI_U::~VI_U() = default; |
| 18 | 20 | ||
| 21 | void VI_U::GetDisplayService(Kernel::HLERequestContext& ctx) { | ||
| 22 | LOG_DEBUG(Service_VI, "called"); | ||
| 23 | |||
| 24 | detail::GetDisplayServiceImpl(ctx, nv_flinger, Permission::User); | ||
| 25 | } | ||
| 26 | |||
| 19 | } // namespace Service::VI | 27 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_u.h b/src/core/hle/service/vi/vi_u.h index b92f28c92..19bdb73b0 100644 --- a/src/core/hle/service/vi/vi_u.h +++ b/src/core/hle/service/vi/vi_u.h | |||
| @@ -4,14 +4,27 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "core/hle/service/vi/vi.h" | 7 | #include "core/hle/service/service.h" |
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | class HLERequestContext; | ||
| 11 | } | ||
| 12 | |||
| 13 | namespace Service::NVFlinger { | ||
| 14 | class NVFlinger; | ||
| 15 | } | ||
| 8 | 16 | ||
| 9 | namespace Service::VI { | 17 | namespace Service::VI { |
| 10 | 18 | ||
| 11 | class VI_U final : public Module::Interface { | 19 | class VI_U final : public ServiceFramework<VI_U> { |
| 12 | public: | 20 | public: |
| 13 | explicit VI_U(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 21 | explicit VI_U(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); |
| 14 | ~VI_U() override; | 22 | ~VI_U() override; |
| 23 | |||
| 24 | private: | ||
| 25 | void GetDisplayService(Kernel::HLERequestContext& ctx); | ||
| 26 | |||
| 27 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | ||
| 15 | }; | 28 | }; |
| 16 | 29 | ||
| 17 | } // namespace Service::VI | 30 | } // namespace Service::VI |
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp index 6057c7f26..8b1920f22 100644 --- a/src/core/loader/elf.cpp +++ b/src/core/loader/elf.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "common/file_util.h" | 10 | #include "common/file_util.h" |
| 11 | #include "common/logging/log.h" | 11 | #include "common/logging/log.h" |
| 12 | #include "core/hle/kernel/code_set.h" | ||
| 12 | #include "core/hle/kernel/process.h" | 13 | #include "core/hle/kernel/process.h" |
| 13 | #include "core/hle/kernel/vm_manager.h" | 14 | #include "core/hle/kernel/vm_manager.h" |
| 14 | #include "core/loader/elf.h" | 15 | #include "core/loader/elf.h" |
diff --git a/src/core/loader/linker.cpp b/src/core/loader/linker.cpp deleted file mode 100644 index 57ca8c3ee..000000000 --- a/src/core/loader/linker.cpp +++ /dev/null | |||
| @@ -1,147 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <vector> | ||
| 6 | |||
| 7 | #include "common/common_funcs.h" | ||
| 8 | #include "common/logging/log.h" | ||
| 9 | #include "common/swap.h" | ||
| 10 | #include "core/loader/linker.h" | ||
| 11 | #include "core/memory.h" | ||
| 12 | |||
| 13 | namespace Loader { | ||
| 14 | |||
| 15 | enum class RelocationType : u32 { ABS64 = 257, GLOB_DAT = 1025, JUMP_SLOT = 1026, RELATIVE = 1027 }; | ||
| 16 | |||
| 17 | enum DynamicType : u32 { | ||
| 18 | DT_NULL = 0, | ||
| 19 | DT_PLTRELSZ = 2, | ||
| 20 | DT_STRTAB = 5, | ||
| 21 | DT_SYMTAB = 6, | ||
| 22 | DT_RELA = 7, | ||
| 23 | DT_RELASZ = 8, | ||
| 24 | DT_STRSZ = 10, | ||
| 25 | DT_JMPREL = 23, | ||
| 26 | }; | ||
| 27 | |||
| 28 | struct Elf64_Rela { | ||
| 29 | u64_le offset; | ||
| 30 | RelocationType type; | ||
| 31 | u32_le symbol; | ||
| 32 | s64_le addend; | ||
| 33 | }; | ||
| 34 | static_assert(sizeof(Elf64_Rela) == 0x18, "Elf64_Rela has incorrect size."); | ||
| 35 | |||
| 36 | struct Elf64_Dyn { | ||
| 37 | u64_le tag; | ||
| 38 | u64_le value; | ||
| 39 | }; | ||
| 40 | static_assert(sizeof(Elf64_Dyn) == 0x10, "Elf64_Dyn has incorrect size."); | ||
| 41 | |||
| 42 | struct Elf64_Sym { | ||
| 43 | u32_le name; | ||
| 44 | INSERT_PADDING_BYTES(0x2); | ||
| 45 | u16_le shndx; | ||
| 46 | u64_le value; | ||
| 47 | u64_le size; | ||
| 48 | }; | ||
| 49 | static_assert(sizeof(Elf64_Sym) == 0x18, "Elf64_Sym has incorrect size."); | ||
| 50 | |||
| 51 | void Linker::WriteRelocations(std::vector<u8>& program_image, const std::vector<Symbol>& symbols, | ||
| 52 | u64 relocation_offset, u64 size, VAddr load_base) { | ||
| 53 | for (u64 i = 0; i < size; i += sizeof(Elf64_Rela)) { | ||
| 54 | Elf64_Rela rela; | ||
| 55 | std::memcpy(&rela, &program_image[relocation_offset + i], sizeof(Elf64_Rela)); | ||
| 56 | |||
| 57 | const Symbol& symbol = symbols[rela.symbol]; | ||
| 58 | switch (rela.type) { | ||
| 59 | case RelocationType::RELATIVE: { | ||
| 60 | const u64 value = load_base + rela.addend; | ||
| 61 | if (!symbol.name.empty()) { | ||
| 62 | exports[symbol.name] = value; | ||
| 63 | } | ||
| 64 | std::memcpy(&program_image[rela.offset], &value, sizeof(u64)); | ||
| 65 | break; | ||
| 66 | } | ||
| 67 | case RelocationType::JUMP_SLOT: | ||
| 68 | case RelocationType::GLOB_DAT: | ||
| 69 | if (!symbol.value) { | ||
| 70 | imports[symbol.name] = {rela.offset + load_base, 0}; | ||
| 71 | } else { | ||
| 72 | exports[symbol.name] = symbol.value; | ||
| 73 | std::memcpy(&program_image[rela.offset], &symbol.value, sizeof(u64)); | ||
| 74 | } | ||
| 75 | break; | ||
| 76 | case RelocationType::ABS64: | ||
| 77 | if (!symbol.value) { | ||
| 78 | imports[symbol.name] = {rela.offset + load_base, rela.addend}; | ||
| 79 | } else { | ||
| 80 | const u64 value = symbol.value + rela.addend; | ||
| 81 | exports[symbol.name] = value; | ||
| 82 | std::memcpy(&program_image[rela.offset], &value, sizeof(u64)); | ||
| 83 | } | ||
| 84 | break; | ||
| 85 | default: | ||
| 86 | LOG_CRITICAL(Loader, "Unknown relocation type: {}", static_cast<int>(rela.type)); | ||
| 87 | break; | ||
| 88 | } | ||
| 89 | } | ||
| 90 | } | ||
| 91 | |||
| 92 | void Linker::Relocate(std::vector<u8>& program_image, u32 dynamic_section_offset, VAddr load_base) { | ||
| 93 | std::map<u64, u64> dynamic; | ||
| 94 | while (dynamic_section_offset < program_image.size()) { | ||
| 95 | Elf64_Dyn dyn; | ||
| 96 | std::memcpy(&dyn, &program_image[dynamic_section_offset], sizeof(Elf64_Dyn)); | ||
| 97 | dynamic_section_offset += sizeof(Elf64_Dyn); | ||
| 98 | |||
| 99 | if (dyn.tag == DT_NULL) { | ||
| 100 | break; | ||
| 101 | } | ||
| 102 | dynamic[dyn.tag] = dyn.value; | ||
| 103 | } | ||
| 104 | |||
| 105 | u64 offset = dynamic[DT_SYMTAB]; | ||
| 106 | std::vector<Symbol> symbols; | ||
| 107 | while (offset < program_image.size()) { | ||
| 108 | Elf64_Sym sym; | ||
| 109 | std::memcpy(&sym, &program_image[offset], sizeof(Elf64_Sym)); | ||
| 110 | offset += sizeof(Elf64_Sym); | ||
| 111 | |||
| 112 | if (sym.name >= dynamic[DT_STRSZ]) { | ||
| 113 | break; | ||
| 114 | } | ||
| 115 | |||
| 116 | std::string name = reinterpret_cast<char*>(&program_image[dynamic[DT_STRTAB] + sym.name]); | ||
| 117 | if (sym.value) { | ||
| 118 | exports[name] = load_base + sym.value; | ||
| 119 | symbols.emplace_back(std::move(name), load_base + sym.value); | ||
| 120 | } else { | ||
| 121 | symbols.emplace_back(std::move(name), 0); | ||
| 122 | } | ||
| 123 | } | ||
| 124 | |||
| 125 | if (dynamic.find(DT_RELA) != dynamic.end()) { | ||
| 126 | WriteRelocations(program_image, symbols, dynamic[DT_RELA], dynamic[DT_RELASZ], load_base); | ||
| 127 | } | ||
| 128 | |||
| 129 | if (dynamic.find(DT_JMPREL) != dynamic.end()) { | ||
| 130 | WriteRelocations(program_image, symbols, dynamic[DT_JMPREL], dynamic[DT_PLTRELSZ], | ||
| 131 | load_base); | ||
| 132 | } | ||
| 133 | } | ||
| 134 | |||
| 135 | void Linker::ResolveImports() { | ||
| 136 | // Resolve imports | ||
| 137 | for (const auto& import : imports) { | ||
| 138 | const auto& search = exports.find(import.first); | ||
| 139 | if (search != exports.end()) { | ||
| 140 | Memory::Write64(import.second.ea, search->second + import.second.addend); | ||
| 141 | } else { | ||
| 142 | LOG_ERROR(Loader, "Unresolved import: {}", import.first); | ||
| 143 | } | ||
| 144 | } | ||
| 145 | } | ||
| 146 | |||
| 147 | } // namespace Loader | ||
diff --git a/src/core/loader/linker.h b/src/core/loader/linker.h deleted file mode 100644 index 107625837..000000000 --- a/src/core/loader/linker.h +++ /dev/null | |||
| @@ -1,36 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <map> | ||
| 8 | #include <string> | ||
| 9 | #include "common/common_types.h" | ||
| 10 | |||
| 11 | namespace Loader { | ||
| 12 | |||
| 13 | class Linker { | ||
| 14 | protected: | ||
| 15 | struct Symbol { | ||
| 16 | Symbol(std::string&& name, u64 value) : name(std::move(name)), value(value) {} | ||
| 17 | std::string name; | ||
| 18 | u64 value; | ||
| 19 | }; | ||
| 20 | |||
| 21 | struct Import { | ||
| 22 | VAddr ea; | ||
| 23 | s64 addend; | ||
| 24 | }; | ||
| 25 | |||
| 26 | void WriteRelocations(std::vector<u8>& program_image, const std::vector<Symbol>& symbols, | ||
| 27 | u64 relocation_offset, u64 size, VAddr load_base); | ||
| 28 | void Relocate(std::vector<u8>& program_image, u32 dynamic_section_offset, VAddr load_base); | ||
| 29 | |||
| 30 | void ResolveImports(); | ||
| 31 | |||
| 32 | std::map<std::string, Import> imports; | ||
| 33 | std::map<std::string, VAddr> exports; | ||
| 34 | }; | ||
| 35 | |||
| 36 | } // namespace Loader | ||
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp index 4fad0c0dd..5de02a94b 100644 --- a/src/core/loader/nro.cpp +++ b/src/core/loader/nro.cpp | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include "core/file_sys/romfs_factory.h" | 14 | #include "core/file_sys/romfs_factory.h" |
| 15 | #include "core/file_sys/vfs_offset.h" | 15 | #include "core/file_sys/vfs_offset.h" |
| 16 | #include "core/gdbstub/gdbstub.h" | 16 | #include "core/gdbstub/gdbstub.h" |
| 17 | #include "core/hle/kernel/code_set.h" | ||
| 17 | #include "core/hle/kernel/process.h" | 18 | #include "core/hle/kernel/process.h" |
| 18 | #include "core/hle/kernel/vm_manager.h" | 19 | #include "core/hle/kernel/vm_manager.h" |
| 19 | #include "core/hle/service/filesystem/filesystem.h" | 20 | #include "core/hle/service/filesystem/filesystem.h" |
diff --git a/src/core/loader/nro.h b/src/core/loader/nro.h index 013d629c0..85b0ed644 100644 --- a/src/core/loader/nro.h +++ b/src/core/loader/nro.h | |||
| @@ -4,10 +4,10 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <memory> | ||
| 7 | #include <string> | 8 | #include <string> |
| 8 | #include <vector> | 9 | #include <vector> |
| 9 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 10 | #include "core/loader/linker.h" | ||
| 11 | #include "core/loader/loader.h" | 11 | #include "core/loader/loader.h" |
| 12 | 12 | ||
| 13 | namespace FileSys { | 13 | namespace FileSys { |
| @@ -21,7 +21,7 @@ class Process; | |||
| 21 | namespace Loader { | 21 | namespace Loader { |
| 22 | 22 | ||
| 23 | /// Loads an NRO file | 23 | /// Loads an NRO file |
| 24 | class AppLoader_NRO final : public AppLoader, Linker { | 24 | class AppLoader_NRO final : public AppLoader { |
| 25 | public: | 25 | public: |
| 26 | explicit AppLoader_NRO(FileSys::VirtualFile file); | 26 | explicit AppLoader_NRO(FileSys::VirtualFile file); |
| 27 | ~AppLoader_NRO() override; | 27 | ~AppLoader_NRO() override; |
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index 6ded0b707..714d85a59 100644 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp | |||
| @@ -7,10 +7,13 @@ | |||
| 7 | #include <lz4.h> | 7 | #include <lz4.h> |
| 8 | #include "common/common_funcs.h" | 8 | #include "common/common_funcs.h" |
| 9 | #include "common/file_util.h" | 9 | #include "common/file_util.h" |
| 10 | #include "common/hex_util.h" | ||
| 10 | #include "common/logging/log.h" | 11 | #include "common/logging/log.h" |
| 11 | #include "common/swap.h" | 12 | #include "common/swap.h" |
| 13 | #include "core/core.h" | ||
| 12 | #include "core/file_sys/patch_manager.h" | 14 | #include "core/file_sys/patch_manager.h" |
| 13 | #include "core/gdbstub/gdbstub.h" | 15 | #include "core/gdbstub/gdbstub.h" |
| 16 | #include "core/hle/kernel/code_set.h" | ||
| 14 | #include "core/hle/kernel/process.h" | 17 | #include "core/hle/kernel/process.h" |
| 15 | #include "core/hle/kernel/vm_manager.h" | 18 | #include "core/hle/kernel/vm_manager.h" |
| 16 | #include "core/loader/nso.h" | 19 | #include "core/loader/nso.h" |
| @@ -18,36 +21,8 @@ | |||
| 18 | #include "core/settings.h" | 21 | #include "core/settings.h" |
| 19 | 22 | ||
| 20 | namespace Loader { | 23 | namespace Loader { |
| 21 | 24 | namespace { | |
| 22 | struct NsoSegmentHeader { | 25 | struct MODHeader { |
| 23 | u32_le offset; | ||
| 24 | u32_le location; | ||
| 25 | u32_le size; | ||
| 26 | union { | ||
| 27 | u32_le alignment; | ||
| 28 | u32_le bss_size; | ||
| 29 | }; | ||
| 30 | }; | ||
| 31 | static_assert(sizeof(NsoSegmentHeader) == 0x10, "NsoSegmentHeader has incorrect size."); | ||
| 32 | |||
| 33 | struct NsoHeader { | ||
| 34 | u32_le magic; | ||
| 35 | u32_le version; | ||
| 36 | INSERT_PADDING_WORDS(1); | ||
| 37 | u8 flags; | ||
| 38 | std::array<NsoSegmentHeader, 3> segments; // Text, RoData, Data (in that order) | ||
| 39 | std::array<u8, 0x20> build_id; | ||
| 40 | std::array<u32_le, 3> segments_compressed_size; | ||
| 41 | |||
| 42 | bool IsSegmentCompressed(size_t segment_num) const { | ||
| 43 | ASSERT_MSG(segment_num < 3, "Invalid segment {}", segment_num); | ||
| 44 | return ((flags >> segment_num) & 1); | ||
| 45 | } | ||
| 46 | }; | ||
| 47 | static_assert(sizeof(NsoHeader) == 0x6c, "NsoHeader has incorrect size."); | ||
| 48 | static_assert(std::is_trivially_copyable_v<NsoHeader>, "NsoHeader isn't trivially copyable."); | ||
| 49 | |||
| 50 | struct ModHeader { | ||
| 51 | u32_le magic; | 26 | u32_le magic; |
| 52 | u32_le dynamic_offset; | 27 | u32_le dynamic_offset; |
| 53 | u32_le bss_start_offset; | 28 | u32_le bss_start_offset; |
| @@ -56,25 +31,10 @@ struct ModHeader { | |||
| 56 | u32_le eh_frame_hdr_end_offset; | 31 | u32_le eh_frame_hdr_end_offset; |
| 57 | u32_le module_offset; // Offset to runtime-generated module object. typically equal to .bss base | 32 | u32_le module_offset; // Offset to runtime-generated module object. typically equal to .bss base |
| 58 | }; | 33 | }; |
| 59 | static_assert(sizeof(ModHeader) == 0x1c, "ModHeader has incorrect size."); | 34 | static_assert(sizeof(MODHeader) == 0x1c, "MODHeader has incorrect size."); |
| 60 | |||
| 61 | AppLoader_NSO::AppLoader_NSO(FileSys::VirtualFile file) : AppLoader(std::move(file)) {} | ||
| 62 | 35 | ||
| 63 | FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) { | 36 | std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, |
| 64 | u32 magic = 0; | 37 | const NSOSegmentHeader& header) { |
| 65 | if (file->ReadObject(&magic) != sizeof(magic)) { | ||
| 66 | return FileType::Error; | ||
| 67 | } | ||
| 68 | |||
| 69 | if (Common::MakeMagic('N', 'S', 'O', '0') != magic) { | ||
| 70 | return FileType::Error; | ||
| 71 | } | ||
| 72 | |||
| 73 | return FileType::NSO; | ||
| 74 | } | ||
| 75 | |||
| 76 | static std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, | ||
| 77 | const NsoSegmentHeader& header) { | ||
| 78 | std::vector<u8> uncompressed_data(header.size); | 38 | std::vector<u8> uncompressed_data(header.size); |
| 79 | const int bytes_uncompressed = | 39 | const int bytes_uncompressed = |
| 80 | LZ4_decompress_safe(reinterpret_cast<const char*>(compressed_data.data()), | 40 | LZ4_decompress_safe(reinterpret_cast<const char*>(compressed_data.data()), |
| @@ -88,23 +48,47 @@ static std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, | |||
| 88 | return uncompressed_data; | 48 | return uncompressed_data; |
| 89 | } | 49 | } |
| 90 | 50 | ||
| 91 | static constexpr u32 PageAlignSize(u32 size) { | 51 | constexpr u32 PageAlignSize(u32 size) { |
| 92 | return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; | 52 | return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; |
| 93 | } | 53 | } |
| 54 | } // Anonymous namespace | ||
| 55 | |||
| 56 | bool NSOHeader::IsSegmentCompressed(size_t segment_num) const { | ||
| 57 | ASSERT_MSG(segment_num < 3, "Invalid segment {}", segment_num); | ||
| 58 | return ((flags >> segment_num) & 1) != 0; | ||
| 59 | } | ||
| 60 | |||
| 61 | AppLoader_NSO::AppLoader_NSO(FileSys::VirtualFile file) : AppLoader(std::move(file)) {} | ||
| 62 | |||
| 63 | FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) { | ||
| 64 | u32 magic = 0; | ||
| 65 | if (file->ReadObject(&magic) != sizeof(magic)) { | ||
| 66 | return FileType::Error; | ||
| 67 | } | ||
| 68 | |||
| 69 | if (Common::MakeMagic('N', 'S', 'O', '0') != magic) { | ||
| 70 | return FileType::Error; | ||
| 71 | } | ||
| 72 | |||
| 73 | return FileType::NSO; | ||
| 74 | } | ||
| 94 | 75 | ||
| 95 | std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, | 76 | std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, |
| 96 | const FileSys::VfsFile& file, VAddr load_base, | 77 | const FileSys::VfsFile& file, VAddr load_base, |
| 97 | bool should_pass_arguments, | 78 | bool should_pass_arguments, |
| 98 | std::optional<FileSys::PatchManager> pm) { | 79 | std::optional<FileSys::PatchManager> pm) { |
| 99 | if (file.GetSize() < sizeof(NsoHeader)) | 80 | if (file.GetSize() < sizeof(NSOHeader)) { |
| 100 | return {}; | 81 | return {}; |
| 82 | } | ||
| 101 | 83 | ||
| 102 | NsoHeader nso_header{}; | 84 | NSOHeader nso_header{}; |
| 103 | if (sizeof(NsoHeader) != file.ReadObject(&nso_header)) | 85 | if (sizeof(NSOHeader) != file.ReadObject(&nso_header)) { |
| 104 | return {}; | 86 | return {}; |
| 87 | } | ||
| 105 | 88 | ||
| 106 | if (nso_header.magic != Common::MakeMagic('N', 'S', 'O', '0')) | 89 | if (nso_header.magic != Common::MakeMagic('N', 'S', 'O', '0')) { |
| 107 | return {}; | 90 | return {}; |
| 91 | } | ||
| 108 | 92 | ||
| 109 | // Build program image | 93 | // Build program image |
| 110 | Kernel::CodeSet codeset; | 94 | Kernel::CodeSet codeset; |
| @@ -140,10 +124,10 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, | |||
| 140 | std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32)); | 124 | std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32)); |
| 141 | 125 | ||
| 142 | // Read MOD header | 126 | // Read MOD header |
| 143 | ModHeader mod_header{}; | 127 | MODHeader mod_header{}; |
| 144 | // Default .bss to size in segment header if MOD0 section doesn't exist | 128 | // Default .bss to size in segment header if MOD0 section doesn't exist |
| 145 | u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)}; | 129 | u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)}; |
| 146 | std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(ModHeader)); | 130 | std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(MODHeader)); |
| 147 | const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')}; | 131 | const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')}; |
| 148 | if (has_mod_header) { | 132 | if (has_mod_header) { |
| 149 | // Resize program image to include .bss section and page align each section | 133 | // Resize program image to include .bss section and page align each section |
| @@ -155,13 +139,25 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, | |||
| 155 | 139 | ||
| 156 | // Apply patches if necessary | 140 | // Apply patches if necessary |
| 157 | if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) { | 141 | if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) { |
| 158 | std::vector<u8> pi_header(program_image.size() + 0x100); | 142 | std::vector<u8> pi_header(sizeof(NSOHeader) + program_image.size()); |
| 159 | std::memcpy(pi_header.data(), &nso_header, sizeof(NsoHeader)); | 143 | pi_header.insert(pi_header.begin(), reinterpret_cast<u8*>(&nso_header), |
| 160 | std::memcpy(pi_header.data() + 0x100, program_image.data(), program_image.size()); | 144 | reinterpret_cast<u8*>(&nso_header) + sizeof(NSOHeader)); |
| 145 | pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.begin(), | ||
| 146 | program_image.end()); | ||
| 161 | 147 | ||
| 162 | pi_header = pm->PatchNSO(pi_header); | 148 | pi_header = pm->PatchNSO(pi_header); |
| 163 | 149 | ||
| 164 | std::memcpy(program_image.data(), pi_header.data() + 0x100, program_image.size()); | 150 | std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.begin()); |
| 151 | } | ||
| 152 | |||
| 153 | // Apply cheats if they exist and the program has a valid title ID | ||
| 154 | if (pm) { | ||
| 155 | auto& system = Core::System::GetInstance(); | ||
| 156 | const auto cheats = pm->CreateCheatList(system, nso_header.build_id); | ||
| 157 | if (!cheats.empty()) { | ||
| 158 | system.RegisterCheatList(cheats, Common::HexArrayToString(nso_header.build_id), | ||
| 159 | load_base, load_base + program_image.size()); | ||
| 160 | } | ||
| 165 | } | 161 | } |
| 166 | 162 | ||
| 167 | // Load codeset for current process | 163 | // Load codeset for current process |
diff --git a/src/core/loader/nso.h b/src/core/loader/nso.h index 135b6ea5a..4674c3724 100644 --- a/src/core/loader/nso.h +++ b/src/core/loader/nso.h | |||
| @@ -4,10 +4,12 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <array> | ||
| 7 | #include <optional> | 8 | #include <optional> |
| 9 | #include <type_traits> | ||
| 8 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "common/swap.h" | ||
| 9 | #include "core/file_sys/patch_manager.h" | 12 | #include "core/file_sys/patch_manager.h" |
| 10 | #include "core/loader/linker.h" | ||
| 11 | #include "core/loader/loader.h" | 13 | #include "core/loader/loader.h" |
| 12 | 14 | ||
| 13 | namespace Kernel { | 15 | namespace Kernel { |
| @@ -16,6 +18,43 @@ class Process; | |||
| 16 | 18 | ||
| 17 | namespace Loader { | 19 | namespace Loader { |
| 18 | 20 | ||
| 21 | struct NSOSegmentHeader { | ||
| 22 | u32_le offset; | ||
| 23 | u32_le location; | ||
| 24 | u32_le size; | ||
| 25 | union { | ||
| 26 | u32_le alignment; | ||
| 27 | u32_le bss_size; | ||
| 28 | }; | ||
| 29 | }; | ||
| 30 | static_assert(sizeof(NSOSegmentHeader) == 0x10, "NsoSegmentHeader has incorrect size."); | ||
| 31 | |||
| 32 | struct NSOHeader { | ||
| 33 | using SHA256Hash = std::array<u8, 0x20>; | ||
| 34 | |||
| 35 | struct RODataRelativeExtent { | ||
| 36 | u32_le data_offset; | ||
| 37 | u32_le size; | ||
| 38 | }; | ||
| 39 | |||
| 40 | u32_le magic; | ||
| 41 | u32_le version; | ||
| 42 | u32 reserved; | ||
| 43 | u32_le flags; | ||
| 44 | std::array<NSOSegmentHeader, 3> segments; // Text, RoData, Data (in that order) | ||
| 45 | std::array<u8, 0x20> build_id; | ||
| 46 | std::array<u32_le, 3> segments_compressed_size; | ||
| 47 | std::array<u8, 0x1C> padding; | ||
| 48 | RODataRelativeExtent api_info_extent; | ||
| 49 | RODataRelativeExtent dynstr_extent; | ||
| 50 | RODataRelativeExtent dynsyn_extent; | ||
| 51 | std::array<SHA256Hash, 3> segment_hashes; | ||
| 52 | |||
| 53 | bool IsSegmentCompressed(size_t segment_num) const; | ||
| 54 | }; | ||
| 55 | static_assert(sizeof(NSOHeader) == 0x100, "NSOHeader has incorrect size."); | ||
| 56 | static_assert(std::is_trivially_copyable_v<NSOHeader>, "NSOHeader must be trivially copyable."); | ||
| 57 | |||
| 19 | constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000; | 58 | constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000; |
| 20 | 59 | ||
| 21 | struct NSOArgumentHeader { | 60 | struct NSOArgumentHeader { |
| @@ -26,7 +65,7 @@ struct NSOArgumentHeader { | |||
| 26 | static_assert(sizeof(NSOArgumentHeader) == 0x20, "NSOArgumentHeader has incorrect size."); | 65 | static_assert(sizeof(NSOArgumentHeader) == 0x20, "NSOArgumentHeader has incorrect size."); |
| 27 | 66 | ||
| 28 | /// Loads an NSO file | 67 | /// Loads an NSO file |
| 29 | class AppLoader_NSO final : public AppLoader, Linker { | 68 | class AppLoader_NSO final : public AppLoader { |
| 30 | public: | 69 | public: |
| 31 | explicit AppLoader_NSO(FileSys::VirtualFile file); | 70 | explicit AppLoader_NSO(FileSys::VirtualFile file); |
| 32 | 71 | ||
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 6591c45d2..332c1037c 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/logging/log.h" | 12 | #include "common/logging/log.h" |
| 13 | #include "common/page_table.h" | ||
| 13 | #include "common/swap.h" | 14 | #include "common/swap.h" |
| 14 | #include "core/arm/arm_interface.h" | 15 | #include "core/arm/arm_interface.h" |
| 15 | #include "core/core.h" | 16 | #include "core/core.h" |
| @@ -18,13 +19,14 @@ | |||
| 18 | #include "core/hle/lock.h" | 19 | #include "core/hle/lock.h" |
| 19 | #include "core/memory.h" | 20 | #include "core/memory.h" |
| 20 | #include "core/memory_setup.h" | 21 | #include "core/memory_setup.h" |
| 22 | #include "video_core/gpu.h" | ||
| 21 | #include "video_core/renderer_base.h" | 23 | #include "video_core/renderer_base.h" |
| 22 | 24 | ||
| 23 | namespace Memory { | 25 | namespace Memory { |
| 24 | 26 | ||
| 25 | static PageTable* current_page_table = nullptr; | 27 | static Common::PageTable* current_page_table = nullptr; |
| 26 | 28 | ||
| 27 | void SetCurrentPageTable(PageTable* page_table) { | 29 | void SetCurrentPageTable(Common::PageTable* page_table) { |
| 28 | current_page_table = page_table; | 30 | current_page_table = page_table; |
| 29 | 31 | ||
| 30 | auto& system = Core::System::GetInstance(); | 32 | auto& system = Core::System::GetInstance(); |
| @@ -36,39 +38,20 @@ void SetCurrentPageTable(PageTable* page_table) { | |||
| 36 | } | 38 | } |
| 37 | } | 39 | } |
| 38 | 40 | ||
| 39 | PageTable* GetCurrentPageTable() { | 41 | Common::PageTable* GetCurrentPageTable() { |
| 40 | return current_page_table; | 42 | return current_page_table; |
| 41 | } | 43 | } |
| 42 | 44 | ||
| 43 | PageTable::PageTable() = default; | 45 | static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, |
| 44 | 46 | Common::PageType type) { | |
| 45 | PageTable::PageTable(std::size_t address_space_width_in_bits) { | ||
| 46 | Resize(address_space_width_in_bits); | ||
| 47 | } | ||
| 48 | |||
| 49 | PageTable::~PageTable() = default; | ||
| 50 | |||
| 51 | void PageTable::Resize(std::size_t address_space_width_in_bits) { | ||
| 52 | const std::size_t num_page_table_entries = 1ULL << (address_space_width_in_bits - PAGE_BITS); | ||
| 53 | |||
| 54 | pointers.resize(num_page_table_entries); | ||
| 55 | attributes.resize(num_page_table_entries); | ||
| 56 | |||
| 57 | // The default is a 39-bit address space, which causes an initial 1GB allocation size. If the | ||
| 58 | // vector size is subsequently decreased (via resize), the vector might not automatically | ||
| 59 | // actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for | ||
| 60 | // 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use. | ||
| 61 | |||
| 62 | pointers.shrink_to_fit(); | ||
| 63 | attributes.shrink_to_fit(); | ||
| 64 | } | ||
| 65 | |||
| 66 | static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, PageType type) { | ||
| 67 | LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, | 47 | LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, |
| 68 | (base + size) * PAGE_SIZE); | 48 | (base + size) * PAGE_SIZE); |
| 69 | 49 | ||
| 70 | RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, | 50 | // During boot, current_page_table might not be set yet, in which case we need not flush |
| 71 | FlushMode::FlushAndInvalidate); | 51 | if (Core::System::GetInstance().IsPoweredOn()) { |
| 52 | Core::System::GetInstance().GPU().FlushAndInvalidateRegion(base << PAGE_BITS, | ||
| 53 | size * PAGE_SIZE); | ||
| 54 | } | ||
| 72 | 55 | ||
| 73 | VAddr end = base + size; | 56 | VAddr end = base + size; |
| 74 | ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", | 57 | ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", |
| @@ -88,41 +71,47 @@ static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, Pa | |||
| 88 | } | 71 | } |
| 89 | } | 72 | } |
| 90 | 73 | ||
| 91 | void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target) { | 74 | void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { |
| 92 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | 75 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); |
| 93 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | 76 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); |
| 94 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); | 77 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); |
| 95 | } | 78 | } |
| 96 | 79 | ||
| 97 | void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler) { | 80 | void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, |
| 81 | Common::MemoryHookPointer mmio_handler) { | ||
| 98 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | 82 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); |
| 99 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | 83 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); |
| 100 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); | 84 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special); |
| 101 | 85 | ||
| 102 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 86 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 103 | SpecialRegion region{SpecialRegion::Type::IODevice, std::move(mmio_handler)}; | 87 | Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)}; |
| 104 | page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); | 88 | page_table.special_regions.add( |
| 89 | std::make_pair(interval, std::set<Common::SpecialRegion>{region})); | ||
| 105 | } | 90 | } |
| 106 | 91 | ||
| 107 | void UnmapRegion(PageTable& page_table, VAddr base, u64 size) { | 92 | void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { |
| 108 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | 93 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); |
| 109 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | 94 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); |
| 110 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); | 95 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped); |
| 111 | 96 | ||
| 112 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 97 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 113 | page_table.special_regions.erase(interval); | 98 | page_table.special_regions.erase(interval); |
| 114 | } | 99 | } |
| 115 | 100 | ||
| 116 | void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { | 101 | void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, |
| 102 | Common::MemoryHookPointer hook) { | ||
| 117 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 103 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 118 | SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)}; | 104 | Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; |
| 119 | page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); | 105 | page_table.special_regions.add( |
| 106 | std::make_pair(interval, std::set<Common::SpecialRegion>{region})); | ||
| 120 | } | 107 | } |
| 121 | 108 | ||
| 122 | void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { | 109 | void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, |
| 110 | Common::MemoryHookPointer hook) { | ||
| 123 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 111 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 124 | SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)}; | 112 | Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; |
| 125 | page_table.special_regions.subtract(std::make_pair(interval, std::set<SpecialRegion>{region})); | 113 | page_table.special_regions.subtract( |
| 114 | std::make_pair(interval, std::set<Common::SpecialRegion>{region})); | ||
| 126 | } | 115 | } |
| 127 | 116 | ||
| 128 | /** | 117 | /** |
| @@ -171,19 +160,19 @@ T Read(const VAddr vaddr) { | |||
| 171 | return value; | 160 | return value; |
| 172 | } | 161 | } |
| 173 | 162 | ||
| 174 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | 163 | Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 175 | switch (type) { | 164 | switch (type) { |
| 176 | case PageType::Unmapped: | 165 | case Common::PageType::Unmapped: |
| 177 | LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); | 166 | LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); |
| 178 | return 0; | 167 | return 0; |
| 179 | case PageType::Memory: | 168 | case Common::PageType::Memory: |
| 180 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | 169 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); |
| 181 | break; | 170 | break; |
| 182 | case PageType::RasterizerCachedMemory: { | 171 | case Common::PageType::RasterizerCachedMemory: { |
| 183 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush); | 172 | auto host_ptr{GetPointerFromVMA(vaddr)}; |
| 184 | 173 | Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T)); | |
| 185 | T value; | 174 | T value; |
| 186 | std::memcpy(&value, GetPointerFromVMA(vaddr), sizeof(T)); | 175 | std::memcpy(&value, host_ptr, sizeof(T)); |
| 187 | return value; | 176 | return value; |
| 188 | } | 177 | } |
| 189 | default: | 178 | default: |
| @@ -201,18 +190,19 @@ void Write(const VAddr vaddr, const T data) { | |||
| 201 | return; | 190 | return; |
| 202 | } | 191 | } |
| 203 | 192 | ||
| 204 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | 193 | Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 205 | switch (type) { | 194 | switch (type) { |
| 206 | case PageType::Unmapped: | 195 | case Common::PageType::Unmapped: |
| 207 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, | 196 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, |
| 208 | static_cast<u32>(data), vaddr); | 197 | static_cast<u32>(data), vaddr); |
| 209 | return; | 198 | return; |
| 210 | case PageType::Memory: | 199 | case Common::PageType::Memory: |
| 211 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | 200 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); |
| 212 | break; | 201 | break; |
| 213 | case PageType::RasterizerCachedMemory: { | 202 | case Common::PageType::RasterizerCachedMemory: { |
| 214 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Invalidate); | 203 | auto host_ptr{GetPointerFromVMA(vaddr)}; |
| 215 | std::memcpy(GetPointerFromVMA(vaddr), &data, sizeof(T)); | 204 | Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T)); |
| 205 | std::memcpy(host_ptr, &data, sizeof(T)); | ||
| 216 | break; | 206 | break; |
| 217 | } | 207 | } |
| 218 | default: | 208 | default: |
| @@ -227,10 +217,10 @@ bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { | |||
| 227 | if (page_pointer) | 217 | if (page_pointer) |
| 228 | return true; | 218 | return true; |
| 229 | 219 | ||
| 230 | if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) | 220 | if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) |
| 231 | return true; | 221 | return true; |
| 232 | 222 | ||
| 233 | if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special) | 223 | if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) |
| 234 | return false; | 224 | return false; |
| 235 | 225 | ||
| 236 | return false; | 226 | return false; |
| @@ -250,7 +240,8 @@ u8* GetPointer(const VAddr vaddr) { | |||
| 250 | return page_pointer + (vaddr & PAGE_MASK); | 240 | return page_pointer + (vaddr & PAGE_MASK); |
| 251 | } | 241 | } |
| 252 | 242 | ||
| 253 | if (current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) { | 243 | if (current_page_table->attributes[vaddr >> PAGE_BITS] == |
| 244 | Common::PageType::RasterizerCachedMemory) { | ||
| 254 | return GetPointerFromVMA(vaddr); | 245 | return GetPointerFromVMA(vaddr); |
| 255 | } | 246 | } |
| 256 | 247 | ||
| @@ -284,20 +275,20 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | |||
| 284 | 275 | ||
| 285 | u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; | 276 | u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; |
| 286 | for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { | 277 | for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { |
| 287 | PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; | 278 | Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 288 | 279 | ||
| 289 | if (cached) { | 280 | if (cached) { |
| 290 | // Switch page type to cached if now cached | 281 | // Switch page type to cached if now cached |
| 291 | switch (page_type) { | 282 | switch (page_type) { |
| 292 | case PageType::Unmapped: | 283 | case Common::PageType::Unmapped: |
| 293 | // It is not necessary for a process to have this region mapped into its address | 284 | // It is not necessary for a process to have this region mapped into its address |
| 294 | // space, for example, a system module need not have a VRAM mapping. | 285 | // space, for example, a system module need not have a VRAM mapping. |
| 295 | break; | 286 | break; |
| 296 | case PageType::Memory: | 287 | case Common::PageType::Memory: |
| 297 | page_type = PageType::RasterizerCachedMemory; | 288 | page_type = Common::PageType::RasterizerCachedMemory; |
| 298 | current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; | 289 | current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; |
| 299 | break; | 290 | break; |
| 300 | case PageType::RasterizerCachedMemory: | 291 | case Common::PageType::RasterizerCachedMemory: |
| 301 | // There can be more than one GPU region mapped per CPU region, so it's common that | 292 | // There can be more than one GPU region mapped per CPU region, so it's common that |
| 302 | // this area is already marked as cached. | 293 | // this area is already marked as cached. |
| 303 | break; | 294 | break; |
| @@ -307,23 +298,23 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | |||
| 307 | } else { | 298 | } else { |
| 308 | // Switch page type to uncached if now uncached | 299 | // Switch page type to uncached if now uncached |
| 309 | switch (page_type) { | 300 | switch (page_type) { |
| 310 | case PageType::Unmapped: | 301 | case Common::PageType::Unmapped: |
| 311 | // It is not necessary for a process to have this region mapped into its address | 302 | // It is not necessary for a process to have this region mapped into its address |
| 312 | // space, for example, a system module need not have a VRAM mapping. | 303 | // space, for example, a system module need not have a VRAM mapping. |
| 313 | break; | 304 | break; |
| 314 | case PageType::Memory: | 305 | case Common::PageType::Memory: |
| 315 | // There can be more than one GPU region mapped per CPU region, so it's common that | 306 | // There can be more than one GPU region mapped per CPU region, so it's common that |
| 316 | // this area is already unmarked as cached. | 307 | // this area is already unmarked as cached. |
| 317 | break; | 308 | break; |
| 318 | case PageType::RasterizerCachedMemory: { | 309 | case Common::PageType::RasterizerCachedMemory: { |
| 319 | u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); | 310 | u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); |
| 320 | if (pointer == nullptr) { | 311 | if (pointer == nullptr) { |
| 321 | // It's possible that this function has been called while updating the pagetable | 312 | // It's possible that this function has been called while updating the pagetable |
| 322 | // after unmapping a VMA. In that case the underlying VMA will no longer exist, | 313 | // after unmapping a VMA. In that case the underlying VMA will no longer exist, |
| 323 | // and we should just leave the pagetable entry blank. | 314 | // and we should just leave the pagetable entry blank. |
| 324 | page_type = PageType::Unmapped; | 315 | page_type = Common::PageType::Unmapped; |
| 325 | } else { | 316 | } else { |
| 326 | page_type = PageType::Memory; | 317 | page_type = Common::PageType::Memory; |
| 327 | current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; | 318 | current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; |
| 328 | } | 319 | } |
| 329 | break; | 320 | break; |
| @@ -335,47 +326,6 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | |||
| 335 | } | 326 | } |
| 336 | } | 327 | } |
| 337 | 328 | ||
| 338 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) { | ||
| 339 | auto& system_instance = Core::System::GetInstance(); | ||
| 340 | |||
| 341 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be | ||
| 342 | // null here | ||
| 343 | if (!system_instance.IsPoweredOn()) { | ||
| 344 | return; | ||
| 345 | } | ||
| 346 | |||
| 347 | const VAddr end = start + size; | ||
| 348 | |||
| 349 | const auto CheckRegion = [&](VAddr region_start, VAddr region_end) { | ||
| 350 | if (start >= region_end || end <= region_start) { | ||
| 351 | // No overlap with region | ||
| 352 | return; | ||
| 353 | } | ||
| 354 | |||
| 355 | const VAddr overlap_start = std::max(start, region_start); | ||
| 356 | const VAddr overlap_end = std::min(end, region_end); | ||
| 357 | const VAddr overlap_size = overlap_end - overlap_start; | ||
| 358 | |||
| 359 | auto& gpu = system_instance.GPU(); | ||
| 360 | switch (mode) { | ||
| 361 | case FlushMode::Flush: | ||
| 362 | gpu.FlushRegion(overlap_start, overlap_size); | ||
| 363 | break; | ||
| 364 | case FlushMode::Invalidate: | ||
| 365 | gpu.InvalidateRegion(overlap_start, overlap_size); | ||
| 366 | break; | ||
| 367 | case FlushMode::FlushAndInvalidate: | ||
| 368 | gpu.FlushAndInvalidateRegion(overlap_start, overlap_size); | ||
| 369 | break; | ||
| 370 | } | ||
| 371 | }; | ||
| 372 | |||
| 373 | const auto& vm_manager = Core::CurrentProcess()->VMManager(); | ||
| 374 | |||
| 375 | CheckRegion(vm_manager.GetCodeRegionBaseAddress(), vm_manager.GetCodeRegionEndAddress()); | ||
| 376 | CheckRegion(vm_manager.GetHeapRegionBaseAddress(), vm_manager.GetHeapRegionEndAddress()); | ||
| 377 | } | ||
| 378 | |||
| 379 | u8 Read8(const VAddr addr) { | 329 | u8 Read8(const VAddr addr) { |
| 380 | return Read<u8>(addr); | 330 | return Read<u8>(addr); |
| 381 | } | 331 | } |
| @@ -406,24 +356,24 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_ | |||
| 406 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 356 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 407 | 357 | ||
| 408 | switch (page_table.attributes[page_index]) { | 358 | switch (page_table.attributes[page_index]) { |
| 409 | case PageType::Unmapped: { | 359 | case Common::PageType::Unmapped: { |
| 410 | LOG_ERROR(HW_Memory, | 360 | LOG_ERROR(HW_Memory, |
| 411 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 361 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 412 | current_vaddr, src_addr, size); | 362 | current_vaddr, src_addr, size); |
| 413 | std::memset(dest_buffer, 0, copy_amount); | 363 | std::memset(dest_buffer, 0, copy_amount); |
| 414 | break; | 364 | break; |
| 415 | } | 365 | } |
| 416 | case PageType::Memory: { | 366 | case Common::PageType::Memory: { |
| 417 | DEBUG_ASSERT(page_table.pointers[page_index]); | 367 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 418 | 368 | ||
| 419 | const u8* src_ptr = page_table.pointers[page_index] + page_offset; | 369 | const u8* src_ptr = page_table.pointers[page_index] + page_offset; |
| 420 | std::memcpy(dest_buffer, src_ptr, copy_amount); | 370 | std::memcpy(dest_buffer, src_ptr, copy_amount); |
| 421 | break; | 371 | break; |
| 422 | } | 372 | } |
| 423 | case PageType::RasterizerCachedMemory: { | 373 | case Common::PageType::RasterizerCachedMemory: { |
| 424 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | 374 | const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; |
| 425 | FlushMode::Flush); | 375 | Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); |
| 426 | std::memcpy(dest_buffer, GetPointerFromVMA(process, current_vaddr), copy_amount); | 376 | std::memcpy(dest_buffer, host_ptr, copy_amount); |
| 427 | break; | 377 | break; |
| 428 | } | 378 | } |
| 429 | default: | 379 | default: |
| @@ -470,23 +420,23 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi | |||
| 470 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 420 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 471 | 421 | ||
| 472 | switch (page_table.attributes[page_index]) { | 422 | switch (page_table.attributes[page_index]) { |
| 473 | case PageType::Unmapped: { | 423 | case Common::PageType::Unmapped: { |
| 474 | LOG_ERROR(HW_Memory, | 424 | LOG_ERROR(HW_Memory, |
| 475 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 425 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 476 | current_vaddr, dest_addr, size); | 426 | current_vaddr, dest_addr, size); |
| 477 | break; | 427 | break; |
| 478 | } | 428 | } |
| 479 | case PageType::Memory: { | 429 | case Common::PageType::Memory: { |
| 480 | DEBUG_ASSERT(page_table.pointers[page_index]); | 430 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 481 | 431 | ||
| 482 | u8* dest_ptr = page_table.pointers[page_index] + page_offset; | 432 | u8* dest_ptr = page_table.pointers[page_index] + page_offset; |
| 483 | std::memcpy(dest_ptr, src_buffer, copy_amount); | 433 | std::memcpy(dest_ptr, src_buffer, copy_amount); |
| 484 | break; | 434 | break; |
| 485 | } | 435 | } |
| 486 | case PageType::RasterizerCachedMemory: { | 436 | case Common::PageType::RasterizerCachedMemory: { |
| 487 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | 437 | const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; |
| 488 | FlushMode::Invalidate); | 438 | Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); |
| 489 | std::memcpy(GetPointerFromVMA(process, current_vaddr), src_buffer, copy_amount); | 439 | std::memcpy(host_ptr, src_buffer, copy_amount); |
| 490 | break; | 440 | break; |
| 491 | } | 441 | } |
| 492 | default: | 442 | default: |
| @@ -516,23 +466,23 @@ void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std: | |||
| 516 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 466 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 517 | 467 | ||
| 518 | switch (page_table.attributes[page_index]) { | 468 | switch (page_table.attributes[page_index]) { |
| 519 | case PageType::Unmapped: { | 469 | case Common::PageType::Unmapped: { |
| 520 | LOG_ERROR(HW_Memory, | 470 | LOG_ERROR(HW_Memory, |
| 521 | "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 471 | "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 522 | current_vaddr, dest_addr, size); | 472 | current_vaddr, dest_addr, size); |
| 523 | break; | 473 | break; |
| 524 | } | 474 | } |
| 525 | case PageType::Memory: { | 475 | case Common::PageType::Memory: { |
| 526 | DEBUG_ASSERT(page_table.pointers[page_index]); | 476 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 527 | 477 | ||
| 528 | u8* dest_ptr = page_table.pointers[page_index] + page_offset; | 478 | u8* dest_ptr = page_table.pointers[page_index] + page_offset; |
| 529 | std::memset(dest_ptr, 0, copy_amount); | 479 | std::memset(dest_ptr, 0, copy_amount); |
| 530 | break; | 480 | break; |
| 531 | } | 481 | } |
| 532 | case PageType::RasterizerCachedMemory: { | 482 | case Common::PageType::RasterizerCachedMemory: { |
| 533 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | 483 | const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; |
| 534 | FlushMode::Invalidate); | 484 | Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); |
| 535 | std::memset(GetPointerFromVMA(process, current_vaddr), 0, copy_amount); | 485 | std::memset(host_ptr, 0, copy_amount); |
| 536 | break; | 486 | break; |
| 537 | } | 487 | } |
| 538 | default: | 488 | default: |
| @@ -558,23 +508,23 @@ void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, | |||
| 558 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 508 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 559 | 509 | ||
| 560 | switch (page_table.attributes[page_index]) { | 510 | switch (page_table.attributes[page_index]) { |
| 561 | case PageType::Unmapped: { | 511 | case Common::PageType::Unmapped: { |
| 562 | LOG_ERROR(HW_Memory, | 512 | LOG_ERROR(HW_Memory, |
| 563 | "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 513 | "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 564 | current_vaddr, src_addr, size); | 514 | current_vaddr, src_addr, size); |
| 565 | ZeroBlock(process, dest_addr, copy_amount); | 515 | ZeroBlock(process, dest_addr, copy_amount); |
| 566 | break; | 516 | break; |
| 567 | } | 517 | } |
| 568 | case PageType::Memory: { | 518 | case Common::PageType::Memory: { |
| 569 | DEBUG_ASSERT(page_table.pointers[page_index]); | 519 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 570 | const u8* src_ptr = page_table.pointers[page_index] + page_offset; | 520 | const u8* src_ptr = page_table.pointers[page_index] + page_offset; |
| 571 | WriteBlock(process, dest_addr, src_ptr, copy_amount); | 521 | WriteBlock(process, dest_addr, src_ptr, copy_amount); |
| 572 | break; | 522 | break; |
| 573 | } | 523 | } |
| 574 | case PageType::RasterizerCachedMemory: { | 524 | case Common::PageType::RasterizerCachedMemory: { |
| 575 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | 525 | const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; |
| 576 | FlushMode::Flush); | 526 | Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); |
| 577 | WriteBlock(process, dest_addr, GetPointerFromVMA(process, current_vaddr), copy_amount); | 527 | WriteBlock(process, dest_addr, host_ptr, copy_amount); |
| 578 | break; | 528 | break; |
| 579 | } | 529 | } |
| 580 | default: | 530 | default: |
diff --git a/src/core/memory.h b/src/core/memory.h index 1acf5ce8c..1d38cdca8 100644 --- a/src/core/memory.h +++ b/src/core/memory.h | |||
| @@ -6,11 +6,11 @@ | |||
| 6 | 6 | ||
| 7 | #include <cstddef> | 7 | #include <cstddef> |
| 8 | #include <string> | 8 | #include <string> |
| 9 | #include <tuple> | ||
| 10 | #include <vector> | ||
| 11 | #include <boost/icl/interval_map.hpp> | ||
| 12 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 13 | #include "core/memory_hook.h" | 10 | |
| 11 | namespace Common { | ||
| 12 | struct PageTable; | ||
| 13 | } | ||
| 14 | 14 | ||
| 15 | namespace Kernel { | 15 | namespace Kernel { |
| 16 | class Process; | 16 | class Process; |
| @@ -26,71 +26,6 @@ constexpr std::size_t PAGE_BITS = 12; | |||
| 26 | constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; | 26 | constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; |
| 27 | constexpr u64 PAGE_MASK = PAGE_SIZE - 1; | 27 | constexpr u64 PAGE_MASK = PAGE_SIZE - 1; |
| 28 | 28 | ||
| 29 | enum class PageType : u8 { | ||
| 30 | /// Page is unmapped and should cause an access error. | ||
| 31 | Unmapped, | ||
| 32 | /// Page is mapped to regular memory. This is the only type you can get pointers to. | ||
| 33 | Memory, | ||
| 34 | /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and | ||
| 35 | /// invalidation | ||
| 36 | RasterizerCachedMemory, | ||
| 37 | /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions. | ||
| 38 | Special, | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct SpecialRegion { | ||
| 42 | enum class Type { | ||
| 43 | DebugHook, | ||
| 44 | IODevice, | ||
| 45 | } type; | ||
| 46 | |||
| 47 | MemoryHookPointer handler; | ||
| 48 | |||
| 49 | bool operator<(const SpecialRegion& other) const { | ||
| 50 | return std::tie(type, handler) < std::tie(other.type, other.handler); | ||
| 51 | } | ||
| 52 | |||
| 53 | bool operator==(const SpecialRegion& other) const { | ||
| 54 | return std::tie(type, handler) == std::tie(other.type, other.handler); | ||
| 55 | } | ||
| 56 | }; | ||
| 57 | |||
| 58 | /** | ||
| 59 | * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely | ||
| 60 | * mimics the way a real CPU page table works. | ||
| 61 | */ | ||
| 62 | struct PageTable { | ||
| 63 | explicit PageTable(); | ||
| 64 | explicit PageTable(std::size_t address_space_width_in_bits); | ||
| 65 | ~PageTable(); | ||
| 66 | |||
| 67 | /** | ||
| 68 | * Resizes the page table to be able to accomodate enough pages within | ||
| 69 | * a given address space. | ||
| 70 | * | ||
| 71 | * @param address_space_width_in_bits The address size width in bits. | ||
| 72 | */ | ||
| 73 | void Resize(std::size_t address_space_width_in_bits); | ||
| 74 | |||
| 75 | /** | ||
| 76 | * Vector of memory pointers backing each page. An entry can only be non-null if the | ||
| 77 | * corresponding entry in the `attributes` vector is of type `Memory`. | ||
| 78 | */ | ||
| 79 | std::vector<u8*> pointers; | ||
| 80 | |||
| 81 | /** | ||
| 82 | * Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is | ||
| 83 | * of type `Special`. | ||
| 84 | */ | ||
| 85 | boost::icl::interval_map<VAddr, std::set<SpecialRegion>> special_regions; | ||
| 86 | |||
| 87 | /** | ||
| 88 | * Vector of fine grained page attributes. If it is set to any value other than `Memory`, then | ||
| 89 | * the corresponding entry in `pointers` MUST be set to null. | ||
| 90 | */ | ||
| 91 | std::vector<PageType> attributes; | ||
| 92 | }; | ||
| 93 | |||
| 94 | /// Virtual user-space memory regions | 29 | /// Virtual user-space memory regions |
| 95 | enum : VAddr { | 30 | enum : VAddr { |
| 96 | /// Read-only page containing kernel and system configuration values. | 31 | /// Read-only page containing kernel and system configuration values. |
| @@ -116,8 +51,8 @@ enum : VAddr { | |||
| 116 | }; | 51 | }; |
| 117 | 52 | ||
| 118 | /// Currently active page table | 53 | /// Currently active page table |
| 119 | void SetCurrentPageTable(PageTable* page_table); | 54 | void SetCurrentPageTable(Common::PageTable* page_table); |
| 120 | PageTable* GetCurrentPageTable(); | 55 | Common::PageTable* GetCurrentPageTable(); |
| 121 | 56 | ||
| 122 | /// Determines if the given VAddr is valid for the specified process. | 57 | /// Determines if the given VAddr is valid for the specified process. |
| 123 | bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr); | 58 | bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr); |
| @@ -161,10 +96,4 @@ enum class FlushMode { | |||
| 161 | */ | 96 | */ |
| 162 | void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached); | 97 | void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached); |
| 163 | 98 | ||
| 164 | /** | ||
| 165 | * Flushes and invalidates any externally cached rasterizer resources touching the given virtual | ||
| 166 | * address region. | ||
| 167 | */ | ||
| 168 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode); | ||
| 169 | |||
| 170 | } // namespace Memory | 99 | } // namespace Memory |
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h index 9a1a4f4be..5225ee8e2 100644 --- a/src/core/memory_setup.h +++ b/src/core/memory_setup.h | |||
| @@ -5,7 +5,11 @@ | |||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 8 | #include "core/memory_hook.h" | 8 | #include "common/memory_hook.h" |
| 9 | |||
| 10 | namespace Common { | ||
| 11 | struct PageTable; | ||
| 12 | } | ||
| 9 | 13 | ||
| 10 | namespace Memory { | 14 | namespace Memory { |
| 11 | 15 | ||
| @@ -17,7 +21,7 @@ namespace Memory { | |||
| 17 | * @param size The amount of bytes to map. Must be page-aligned. | 21 | * @param size The amount of bytes to map. Must be page-aligned. |
| 18 | * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. | 22 | * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. |
| 19 | */ | 23 | */ |
| 20 | void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target); | 24 | void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target); |
| 21 | 25 | ||
| 22 | /** | 26 | /** |
| 23 | * Maps a region of the emulated process address space as a IO region. | 27 | * Maps a region of the emulated process address space as a IO region. |
| @@ -26,11 +30,14 @@ void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target); | |||
| 26 | * @param size The amount of bytes to map. Must be page-aligned. | 30 | * @param size The amount of bytes to map. Must be page-aligned. |
| 27 | * @param mmio_handler The handler that backs the mapping. | 31 | * @param mmio_handler The handler that backs the mapping. |
| 28 | */ | 32 | */ |
| 29 | void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler); | 33 | void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, |
| 34 | Common::MemoryHookPointer mmio_handler); | ||
| 30 | 35 | ||
| 31 | void UnmapRegion(PageTable& page_table, VAddr base, u64 size); | 36 | void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size); |
| 32 | 37 | ||
| 33 | void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); | 38 | void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, |
| 34 | void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); | 39 | Common::MemoryHookPointer hook); |
| 40 | void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, | ||
| 41 | Common::MemoryHookPointer hook); | ||
| 35 | 42 | ||
| 36 | } // namespace Memory | 43 | } // namespace Memory |
diff --git a/src/input_common/CMakeLists.txt b/src/input_common/CMakeLists.txt index 1c7db28c0..5b4e032bd 100644 --- a/src/input_common/CMakeLists.txt +++ b/src/input_common/CMakeLists.txt | |||
| @@ -7,15 +7,18 @@ add_library(input_common STATIC | |||
| 7 | main.h | 7 | main.h |
| 8 | motion_emu.cpp | 8 | motion_emu.cpp |
| 9 | motion_emu.h | 9 | motion_emu.h |
| 10 | 10 | sdl/sdl.cpp | |
| 11 | $<$<BOOL:${SDL2_FOUND}>:sdl/sdl.cpp sdl/sdl.h> | 11 | sdl/sdl.h |
| 12 | ) | 12 | ) |
| 13 | 13 | ||
| 14 | create_target_directory_groups(input_common) | ||
| 15 | |||
| 16 | target_link_libraries(input_common PUBLIC core PRIVATE common) | ||
| 17 | |||
| 18 | if(SDL2_FOUND) | 14 | if(SDL2_FOUND) |
| 15 | target_sources(input_common PRIVATE | ||
| 16 | sdl/sdl_impl.cpp | ||
| 17 | sdl/sdl_impl.h | ||
| 18 | ) | ||
| 19 | target_link_libraries(input_common PRIVATE SDL2) | 19 | target_link_libraries(input_common PRIVATE SDL2) |
| 20 | target_compile_definitions(input_common PRIVATE HAVE_SDL2) | 20 | target_compile_definitions(input_common PRIVATE HAVE_SDL2) |
| 21 | endif() | 21 | endif() |
| 22 | |||
| 23 | create_target_directory_groups(input_common) | ||
| 24 | target_link_libraries(input_common PUBLIC core PRIVATE common) | ||
diff --git a/src/input_common/main.cpp b/src/input_common/main.cpp index 37f572853..8e66c1b15 100644 --- a/src/input_common/main.cpp +++ b/src/input_common/main.cpp | |||
| @@ -17,10 +17,7 @@ namespace InputCommon { | |||
| 17 | 17 | ||
| 18 | static std::shared_ptr<Keyboard> keyboard; | 18 | static std::shared_ptr<Keyboard> keyboard; |
| 19 | static std::shared_ptr<MotionEmu> motion_emu; | 19 | static std::shared_ptr<MotionEmu> motion_emu; |
| 20 | 20 | static std::unique_ptr<SDL::State> sdl; | |
| 21 | #ifdef HAVE_SDL2 | ||
| 22 | static std::thread poll_thread; | ||
| 23 | #endif | ||
| 24 | 21 | ||
| 25 | void Init() { | 22 | void Init() { |
| 26 | keyboard = std::make_shared<Keyboard>(); | 23 | keyboard = std::make_shared<Keyboard>(); |
| @@ -30,15 +27,7 @@ void Init() { | |||
| 30 | motion_emu = std::make_shared<MotionEmu>(); | 27 | motion_emu = std::make_shared<MotionEmu>(); |
| 31 | Input::RegisterFactory<Input::MotionDevice>("motion_emu", motion_emu); | 28 | Input::RegisterFactory<Input::MotionDevice>("motion_emu", motion_emu); |
| 32 | 29 | ||
| 33 | #ifdef HAVE_SDL2 | 30 | sdl = SDL::Init(); |
| 34 | SDL::Init(); | ||
| 35 | #endif | ||
| 36 | } | ||
| 37 | |||
| 38 | void StartJoystickEventHandler() { | ||
| 39 | #ifdef HAVE_SDL2 | ||
| 40 | poll_thread = std::thread(SDL::PollLoop); | ||
| 41 | #endif | ||
| 42 | } | 31 | } |
| 43 | 32 | ||
| 44 | void Shutdown() { | 33 | void Shutdown() { |
| @@ -47,11 +36,7 @@ void Shutdown() { | |||
| 47 | Input::UnregisterFactory<Input::AnalogDevice>("analog_from_button"); | 36 | Input::UnregisterFactory<Input::AnalogDevice>("analog_from_button"); |
| 48 | Input::UnregisterFactory<Input::MotionDevice>("motion_emu"); | 37 | Input::UnregisterFactory<Input::MotionDevice>("motion_emu"); |
| 49 | motion_emu.reset(); | 38 | motion_emu.reset(); |
| 50 | 39 | sdl.reset(); | |
| 51 | #ifdef HAVE_SDL2 | ||
| 52 | SDL::Shutdown(); | ||
| 53 | poll_thread.join(); | ||
| 54 | #endif | ||
| 55 | } | 40 | } |
| 56 | 41 | ||
| 57 | Keyboard* GetKeyboard() { | 42 | Keyboard* GetKeyboard() { |
| @@ -88,7 +73,7 @@ namespace Polling { | |||
| 88 | 73 | ||
| 89 | std::vector<std::unique_ptr<DevicePoller>> GetPollers(DeviceType type) { | 74 | std::vector<std::unique_ptr<DevicePoller>> GetPollers(DeviceType type) { |
| 90 | #ifdef HAVE_SDL2 | 75 | #ifdef HAVE_SDL2 |
| 91 | return SDL::Polling::GetPollers(type); | 76 | return sdl->GetPollers(type); |
| 92 | #else | 77 | #else |
| 93 | return {}; | 78 | return {}; |
| 94 | #endif | 79 | #endif |
diff --git a/src/input_common/main.h b/src/input_common/main.h index 9eb13106e..77a0ce90b 100644 --- a/src/input_common/main.h +++ b/src/input_common/main.h | |||
| @@ -20,8 +20,6 @@ void Init(); | |||
| 20 | /// Deregisters all built-in input device factories and shuts them down. | 20 | /// Deregisters all built-in input device factories and shuts them down. |
| 21 | void Shutdown(); | 21 | void Shutdown(); |
| 22 | 22 | ||
| 23 | void StartJoystickEventHandler(); | ||
| 24 | |||
| 25 | class Keyboard; | 23 | class Keyboard; |
| 26 | 24 | ||
| 27 | /// Gets the keyboard button device factory. | 25 | /// Gets the keyboard button device factory. |
diff --git a/src/input_common/sdl/sdl.cpp b/src/input_common/sdl/sdl.cpp index faf3c1fa3..644db3448 100644 --- a/src/input_common/sdl/sdl.cpp +++ b/src/input_common/sdl/sdl.cpp | |||
| @@ -1,631 +1,19 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | 1 | // Copyright 2018 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | ||
| 6 | #include <atomic> | ||
| 7 | #include <cmath> | ||
| 8 | #include <functional> | ||
| 9 | #include <iterator> | ||
| 10 | #include <mutex> | ||
| 11 | #include <string> | ||
| 12 | #include <thread> | ||
| 13 | #include <tuple> | ||
| 14 | #include <unordered_map> | ||
| 15 | #include <utility> | ||
| 16 | #include <vector> | ||
| 17 | #include <SDL.h> | ||
| 18 | #include "common/assert.h" | ||
| 19 | #include "common/logging/log.h" | ||
| 20 | #include "common/math_util.h" | ||
| 21 | #include "common/param_package.h" | ||
| 22 | #include "common/threadsafe_queue.h" | ||
| 23 | #include "input_common/main.h" | ||
| 24 | #include "input_common/sdl/sdl.h" | 5 | #include "input_common/sdl/sdl.h" |
| 6 | #ifdef HAVE_SDL2 | ||
| 7 | #include "input_common/sdl/sdl_impl.h" | ||
| 8 | #endif | ||
| 25 | 9 | ||
| 26 | namespace InputCommon { | 10 | namespace InputCommon::SDL { |
| 27 | 11 | ||
| 28 | namespace SDL { | 12 | std::unique_ptr<State> Init() { |
| 29 | 13 | #ifdef HAVE_SDL2 | |
| 30 | class SDLJoystick; | 14 | return std::make_unique<SDLState>(); |
| 31 | class SDLButtonFactory; | 15 | #else |
| 32 | class SDLAnalogFactory; | 16 | return std::make_unique<NullState>(); |
| 33 | 17 | #endif | |
| 34 | /// Map of GUID of a list of corresponding virtual Joysticks | ||
| 35 | static std::unordered_map<std::string, std::vector<std::shared_ptr<SDLJoystick>>> joystick_map; | ||
| 36 | static std::mutex joystick_map_mutex; | ||
| 37 | |||
| 38 | static std::shared_ptr<SDLButtonFactory> button_factory; | ||
| 39 | static std::shared_ptr<SDLAnalogFactory> analog_factory; | ||
| 40 | |||
| 41 | /// Used by the Pollers during config | ||
| 42 | static std::atomic<bool> polling; | ||
| 43 | static Common::SPSCQueue<SDL_Event> event_queue; | ||
| 44 | |||
| 45 | static std::atomic<bool> initialized = false; | ||
| 46 | |||
| 47 | static std::string GetGUID(SDL_Joystick* joystick) { | ||
| 48 | SDL_JoystickGUID guid = SDL_JoystickGetGUID(joystick); | ||
| 49 | char guid_str[33]; | ||
| 50 | SDL_JoystickGetGUIDString(guid, guid_str, sizeof(guid_str)); | ||
| 51 | return guid_str; | ||
| 52 | } | ||
| 53 | |||
| 54 | class SDLJoystick { | ||
| 55 | public: | ||
| 56 | SDLJoystick(std::string guid_, int port_, SDL_Joystick* joystick, | ||
| 57 | decltype(&SDL_JoystickClose) deleter = &SDL_JoystickClose) | ||
| 58 | : guid{std::move(guid_)}, port{port_}, sdl_joystick{joystick, deleter} {} | ||
| 59 | |||
| 60 | void SetButton(int button, bool value) { | ||
| 61 | std::lock_guard<std::mutex> lock(mutex); | ||
| 62 | state.buttons[button] = value; | ||
| 63 | } | ||
| 64 | |||
| 65 | bool GetButton(int button) const { | ||
| 66 | std::lock_guard<std::mutex> lock(mutex); | ||
| 67 | return state.buttons.at(button); | ||
| 68 | } | ||
| 69 | |||
| 70 | void SetAxis(int axis, Sint16 value) { | ||
| 71 | std::lock_guard<std::mutex> lock(mutex); | ||
| 72 | state.axes[axis] = value; | ||
| 73 | } | ||
| 74 | |||
| 75 | float GetAxis(int axis) const { | ||
| 76 | std::lock_guard<std::mutex> lock(mutex); | ||
| 77 | return state.axes.at(axis) / 32767.0f; | ||
| 78 | } | ||
| 79 | |||
| 80 | std::tuple<float, float> GetAnalog(int axis_x, int axis_y) const { | ||
| 81 | float x = GetAxis(axis_x); | ||
| 82 | float y = GetAxis(axis_y); | ||
| 83 | y = -y; // 3DS uses an y-axis inverse from SDL | ||
| 84 | |||
| 85 | // Make sure the coordinates are in the unit circle, | ||
| 86 | // otherwise normalize it. | ||
| 87 | float r = x * x + y * y; | ||
| 88 | if (r > 1.0f) { | ||
| 89 | r = std::sqrt(r); | ||
| 90 | x /= r; | ||
| 91 | y /= r; | ||
| 92 | } | ||
| 93 | |||
| 94 | return std::make_tuple(x, y); | ||
| 95 | } | ||
| 96 | |||
| 97 | void SetHat(int hat, Uint8 direction) { | ||
| 98 | std::lock_guard<std::mutex> lock(mutex); | ||
| 99 | state.hats[hat] = direction; | ||
| 100 | } | ||
| 101 | |||
| 102 | bool GetHatDirection(int hat, Uint8 direction) const { | ||
| 103 | std::lock_guard<std::mutex> lock(mutex); | ||
| 104 | return (state.hats.at(hat) & direction) != 0; | ||
| 105 | } | ||
| 106 | /** | ||
| 107 | * The guid of the joystick | ||
| 108 | */ | ||
| 109 | const std::string& GetGUID() const { | ||
| 110 | return guid; | ||
| 111 | } | ||
| 112 | |||
| 113 | /** | ||
| 114 | * The number of joystick from the same type that were connected before this joystick | ||
| 115 | */ | ||
| 116 | int GetPort() const { | ||
| 117 | return port; | ||
| 118 | } | ||
| 119 | |||
| 120 | SDL_Joystick* GetSDLJoystick() const { | ||
| 121 | return sdl_joystick.get(); | ||
| 122 | } | ||
| 123 | |||
| 124 | void SetSDLJoystick(SDL_Joystick* joystick, | ||
| 125 | decltype(&SDL_JoystickClose) deleter = &SDL_JoystickClose) { | ||
| 126 | sdl_joystick = | ||
| 127 | std::unique_ptr<SDL_Joystick, decltype(&SDL_JoystickClose)>(joystick, deleter); | ||
| 128 | } | ||
| 129 | |||
| 130 | private: | ||
| 131 | struct State { | ||
| 132 | std::unordered_map<int, bool> buttons; | ||
| 133 | std::unordered_map<int, Sint16> axes; | ||
| 134 | std::unordered_map<int, Uint8> hats; | ||
| 135 | } state; | ||
| 136 | std::string guid; | ||
| 137 | int port; | ||
| 138 | std::unique_ptr<SDL_Joystick, decltype(&SDL_JoystickClose)> sdl_joystick; | ||
| 139 | mutable std::mutex mutex; | ||
| 140 | }; | ||
| 141 | |||
| 142 | /** | ||
| 143 | * Get the nth joystick with the corresponding GUID | ||
| 144 | */ | ||
| 145 | static std::shared_ptr<SDLJoystick> GetSDLJoystickByGUID(const std::string& guid, int port) { | ||
| 146 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 147 | const auto it = joystick_map.find(guid); | ||
| 148 | if (it != joystick_map.end()) { | ||
| 149 | while (it->second.size() <= port) { | ||
| 150 | auto joystick = std::make_shared<SDLJoystick>(guid, it->second.size(), nullptr, | ||
| 151 | [](SDL_Joystick*) {}); | ||
| 152 | it->second.emplace_back(std::move(joystick)); | ||
| 153 | } | ||
| 154 | return it->second[port]; | ||
| 155 | } | ||
| 156 | auto joystick = std::make_shared<SDLJoystick>(guid, 0, nullptr, [](SDL_Joystick*) {}); | ||
| 157 | return joystick_map[guid].emplace_back(std::move(joystick)); | ||
| 158 | } | ||
| 159 | |||
| 160 | /** | ||
| 161 | * Check how many identical joysticks (by guid) were connected before the one with sdl_id and so tie | ||
| 162 | * it to a SDLJoystick with the same guid and that port | ||
| 163 | */ | ||
| 164 | static std::shared_ptr<SDLJoystick> GetSDLJoystickBySDLID(SDL_JoystickID sdl_id) { | ||
| 165 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 166 | auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id); | ||
| 167 | const std::string guid = GetGUID(sdl_joystick); | ||
| 168 | auto map_it = joystick_map.find(guid); | ||
| 169 | if (map_it != joystick_map.end()) { | ||
| 170 | auto vec_it = std::find_if(map_it->second.begin(), map_it->second.end(), | ||
| 171 | [&sdl_joystick](const std::shared_ptr<SDLJoystick>& joystick) { | ||
| 172 | return sdl_joystick == joystick->GetSDLJoystick(); | ||
| 173 | }); | ||
| 174 | if (vec_it != map_it->second.end()) { | ||
| 175 | // This is the common case: There is already an existing SDL_Joystick maped to a | ||
| 176 | // SDLJoystick. return the SDLJoystick | ||
| 177 | return *vec_it; | ||
| 178 | } | ||
| 179 | // Search for a SDLJoystick without a mapped SDL_Joystick... | ||
| 180 | auto nullptr_it = std::find_if(map_it->second.begin(), map_it->second.end(), | ||
| 181 | [](const std::shared_ptr<SDLJoystick>& joystick) { | ||
| 182 | return !joystick->GetSDLJoystick(); | ||
| 183 | }); | ||
| 184 | if (nullptr_it != map_it->second.end()) { | ||
| 185 | // ... and map it | ||
| 186 | (*nullptr_it)->SetSDLJoystick(sdl_joystick); | ||
| 187 | return *nullptr_it; | ||
| 188 | } | ||
| 189 | // There is no SDLJoystick without a mapped SDL_Joystick | ||
| 190 | // Create a new SDLJoystick | ||
| 191 | auto joystick = std::make_shared<SDLJoystick>(guid, map_it->second.size(), sdl_joystick); | ||
| 192 | return map_it->second.emplace_back(std::move(joystick)); | ||
| 193 | } | ||
| 194 | auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); | ||
| 195 | return joystick_map[guid].emplace_back(std::move(joystick)); | ||
| 196 | } | ||
| 197 | |||
| 198 | void InitJoystick(int joystick_index) { | ||
| 199 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 200 | SDL_Joystick* sdl_joystick = SDL_JoystickOpen(joystick_index); | ||
| 201 | if (!sdl_joystick) { | ||
| 202 | LOG_ERROR(Input, "failed to open joystick {}", joystick_index); | ||
| 203 | return; | ||
| 204 | } | ||
| 205 | std::string guid = GetGUID(sdl_joystick); | ||
| 206 | if (joystick_map.find(guid) == joystick_map.end()) { | ||
| 207 | auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); | ||
| 208 | joystick_map[guid].emplace_back(std::move(joystick)); | ||
| 209 | return; | ||
| 210 | } | ||
| 211 | auto& joystick_guid_list = joystick_map[guid]; | ||
| 212 | const auto it = std::find_if( | ||
| 213 | joystick_guid_list.begin(), joystick_guid_list.end(), | ||
| 214 | [](const std::shared_ptr<SDLJoystick>& joystick) { return !joystick->GetSDLJoystick(); }); | ||
| 215 | if (it != joystick_guid_list.end()) { | ||
| 216 | (*it)->SetSDLJoystick(sdl_joystick); | ||
| 217 | return; | ||
| 218 | } | ||
| 219 | auto joystick = std::make_shared<SDLJoystick>(guid, joystick_guid_list.size(), sdl_joystick); | ||
| 220 | joystick_guid_list.emplace_back(std::move(joystick)); | ||
| 221 | } | ||
| 222 | |||
| 223 | void CloseJoystick(SDL_Joystick* sdl_joystick) { | ||
| 224 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 225 | std::string guid = GetGUID(sdl_joystick); | ||
| 226 | // This call to guid is save since the joystick is guranteed to be in that map | ||
| 227 | auto& joystick_guid_list = joystick_map[guid]; | ||
| 228 | const auto joystick_it = | ||
| 229 | std::find_if(joystick_guid_list.begin(), joystick_guid_list.end(), | ||
| 230 | [&sdl_joystick](const std::shared_ptr<SDLJoystick>& joystick) { | ||
| 231 | return joystick->GetSDLJoystick() == sdl_joystick; | ||
| 232 | }); | ||
| 233 | (*joystick_it)->SetSDLJoystick(nullptr, [](SDL_Joystick*) {}); | ||
| 234 | } | ||
| 235 | |||
| 236 | void HandleGameControllerEvent(const SDL_Event& event) { | ||
| 237 | switch (event.type) { | ||
| 238 | case SDL_JOYBUTTONUP: { | ||
| 239 | auto joystick = GetSDLJoystickBySDLID(event.jbutton.which); | ||
| 240 | if (joystick) { | ||
| 241 | joystick->SetButton(event.jbutton.button, false); | ||
| 242 | } | ||
| 243 | break; | ||
| 244 | } | ||
| 245 | case SDL_JOYBUTTONDOWN: { | ||
| 246 | auto joystick = GetSDLJoystickBySDLID(event.jbutton.which); | ||
| 247 | if (joystick) { | ||
| 248 | joystick->SetButton(event.jbutton.button, true); | ||
| 249 | } | ||
| 250 | break; | ||
| 251 | } | ||
| 252 | case SDL_JOYHATMOTION: { | ||
| 253 | auto joystick = GetSDLJoystickBySDLID(event.jhat.which); | ||
| 254 | if (joystick) { | ||
| 255 | joystick->SetHat(event.jhat.hat, event.jhat.value); | ||
| 256 | } | ||
| 257 | break; | ||
| 258 | } | ||
| 259 | case SDL_JOYAXISMOTION: { | ||
| 260 | auto joystick = GetSDLJoystickBySDLID(event.jaxis.which); | ||
| 261 | if (joystick) { | ||
| 262 | joystick->SetAxis(event.jaxis.axis, event.jaxis.value); | ||
| 263 | } | ||
| 264 | break; | ||
| 265 | } | ||
| 266 | case SDL_JOYDEVICEREMOVED: | ||
| 267 | LOG_DEBUG(Input, "Controller removed with Instance_ID {}", event.jdevice.which); | ||
| 268 | CloseJoystick(SDL_JoystickFromInstanceID(event.jdevice.which)); | ||
| 269 | break; | ||
| 270 | case SDL_JOYDEVICEADDED: | ||
| 271 | LOG_DEBUG(Input, "Controller connected with device index {}", event.jdevice.which); | ||
| 272 | InitJoystick(event.jdevice.which); | ||
| 273 | break; | ||
| 274 | } | ||
| 275 | } | ||
| 276 | |||
| 277 | void CloseSDLJoysticks() { | ||
| 278 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 279 | joystick_map.clear(); | ||
| 280 | } | ||
| 281 | |||
| 282 | void PollLoop() { | ||
| 283 | if (SDL_Init(SDL_INIT_JOYSTICK) < 0) { | ||
| 284 | LOG_CRITICAL(Input, "SDL_Init(SDL_INIT_JOYSTICK) failed with: {}", SDL_GetError()); | ||
| 285 | return; | ||
| 286 | } | ||
| 287 | |||
| 288 | SDL_Event event; | ||
| 289 | while (initialized) { | ||
| 290 | // Wait for 10 ms or until an event happens | ||
| 291 | if (SDL_WaitEventTimeout(&event, 10)) { | ||
| 292 | // Don't handle the event if we are configuring | ||
| 293 | if (polling) { | ||
| 294 | event_queue.Push(event); | ||
| 295 | } else { | ||
| 296 | HandleGameControllerEvent(event); | ||
| 297 | } | ||
| 298 | } | ||
| 299 | } | ||
| 300 | CloseSDLJoysticks(); | ||
| 301 | SDL_QuitSubSystem(SDL_INIT_JOYSTICK); | ||
| 302 | } | ||
| 303 | |||
| 304 | class SDLButton final : public Input::ButtonDevice { | ||
| 305 | public: | ||
| 306 | explicit SDLButton(std::shared_ptr<SDLJoystick> joystick_, int button_) | ||
| 307 | : joystick(std::move(joystick_)), button(button_) {} | ||
| 308 | |||
| 309 | bool GetStatus() const override { | ||
| 310 | return joystick->GetButton(button); | ||
| 311 | } | ||
| 312 | |||
| 313 | private: | ||
| 314 | std::shared_ptr<SDLJoystick> joystick; | ||
| 315 | int button; | ||
| 316 | }; | ||
| 317 | |||
| 318 | class SDLDirectionButton final : public Input::ButtonDevice { | ||
| 319 | public: | ||
| 320 | explicit SDLDirectionButton(std::shared_ptr<SDLJoystick> joystick_, int hat_, Uint8 direction_) | ||
| 321 | : joystick(std::move(joystick_)), hat(hat_), direction(direction_) {} | ||
| 322 | |||
| 323 | bool GetStatus() const override { | ||
| 324 | return joystick->GetHatDirection(hat, direction); | ||
| 325 | } | ||
| 326 | |||
| 327 | private: | ||
| 328 | std::shared_ptr<SDLJoystick> joystick; | ||
| 329 | int hat; | ||
| 330 | Uint8 direction; | ||
| 331 | }; | ||
| 332 | |||
| 333 | class SDLAxisButton final : public Input::ButtonDevice { | ||
| 334 | public: | ||
| 335 | explicit SDLAxisButton(std::shared_ptr<SDLJoystick> joystick_, int axis_, float threshold_, | ||
| 336 | bool trigger_if_greater_) | ||
| 337 | : joystick(std::move(joystick_)), axis(axis_), threshold(threshold_), | ||
| 338 | trigger_if_greater(trigger_if_greater_) {} | ||
| 339 | |||
| 340 | bool GetStatus() const override { | ||
| 341 | float axis_value = joystick->GetAxis(axis); | ||
| 342 | if (trigger_if_greater) | ||
| 343 | return axis_value > threshold; | ||
| 344 | return axis_value < threshold; | ||
| 345 | } | ||
| 346 | |||
| 347 | private: | ||
| 348 | std::shared_ptr<SDLJoystick> joystick; | ||
| 349 | int axis; | ||
| 350 | float threshold; | ||
| 351 | bool trigger_if_greater; | ||
| 352 | }; | ||
| 353 | |||
| 354 | class SDLAnalog final : public Input::AnalogDevice { | ||
| 355 | public: | ||
| 356 | SDLAnalog(std::shared_ptr<SDLJoystick> joystick_, int axis_x_, int axis_y_) | ||
| 357 | : joystick(std::move(joystick_)), axis_x(axis_x_), axis_y(axis_y_) {} | ||
| 358 | |||
| 359 | std::tuple<float, float> GetStatus() const override { | ||
| 360 | return joystick->GetAnalog(axis_x, axis_y); | ||
| 361 | } | ||
| 362 | |||
| 363 | private: | ||
| 364 | std::shared_ptr<SDLJoystick> joystick; | ||
| 365 | int axis_x; | ||
| 366 | int axis_y; | ||
| 367 | }; | ||
| 368 | |||
| 369 | /// A button device factory that creates button devices from SDL joystick | ||
| 370 | class SDLButtonFactory final : public Input::Factory<Input::ButtonDevice> { | ||
| 371 | public: | ||
| 372 | /** | ||
| 373 | * Creates a button device from a joystick button | ||
| 374 | * @param params contains parameters for creating the device: | ||
| 375 | * - "guid": the guid of the joystick to bind | ||
| 376 | * - "port": the nth joystick of the same type to bind | ||
| 377 | * - "button"(optional): the index of the button to bind | ||
| 378 | * - "hat"(optional): the index of the hat to bind as direction buttons | ||
| 379 | * - "axis"(optional): the index of the axis to bind | ||
| 380 | * - "direction"(only used for hat): the direction name of the hat to bind. Can be "up", | ||
| 381 | * "down", "left" or "right" | ||
| 382 | * - "threshold"(only used for axis): a float value in (-1.0, 1.0) which the button is | ||
| 383 | * triggered if the axis value crosses | ||
| 384 | * - "direction"(only used for axis): "+" means the button is triggered when the axis | ||
| 385 | * value is greater than the threshold; "-" means the button is triggered when the axis | ||
| 386 | * value is smaller than the threshold | ||
| 387 | */ | ||
| 388 | std::unique_ptr<Input::ButtonDevice> Create(const Common::ParamPackage& params) override { | ||
| 389 | const std::string guid = params.Get("guid", "0"); | ||
| 390 | const int port = params.Get("port", 0); | ||
| 391 | |||
| 392 | auto joystick = GetSDLJoystickByGUID(guid, port); | ||
| 393 | |||
| 394 | if (params.Has("hat")) { | ||
| 395 | const int hat = params.Get("hat", 0); | ||
| 396 | const std::string direction_name = params.Get("direction", ""); | ||
| 397 | Uint8 direction; | ||
| 398 | if (direction_name == "up") { | ||
| 399 | direction = SDL_HAT_UP; | ||
| 400 | } else if (direction_name == "down") { | ||
| 401 | direction = SDL_HAT_DOWN; | ||
| 402 | } else if (direction_name == "left") { | ||
| 403 | direction = SDL_HAT_LEFT; | ||
| 404 | } else if (direction_name == "right") { | ||
| 405 | direction = SDL_HAT_RIGHT; | ||
| 406 | } else { | ||
| 407 | direction = 0; | ||
| 408 | } | ||
| 409 | // This is necessary so accessing GetHat with hat won't crash | ||
| 410 | joystick->SetHat(hat, SDL_HAT_CENTERED); | ||
| 411 | return std::make_unique<SDLDirectionButton>(joystick, hat, direction); | ||
| 412 | } | ||
| 413 | |||
| 414 | if (params.Has("axis")) { | ||
| 415 | const int axis = params.Get("axis", 0); | ||
| 416 | const float threshold = params.Get("threshold", 0.5f); | ||
| 417 | const std::string direction_name = params.Get("direction", ""); | ||
| 418 | bool trigger_if_greater; | ||
| 419 | if (direction_name == "+") { | ||
| 420 | trigger_if_greater = true; | ||
| 421 | } else if (direction_name == "-") { | ||
| 422 | trigger_if_greater = false; | ||
| 423 | } else { | ||
| 424 | trigger_if_greater = true; | ||
| 425 | LOG_ERROR(Input, "Unknown direction '{}'", direction_name); | ||
| 426 | } | ||
| 427 | // This is necessary so accessing GetAxis with axis won't crash | ||
| 428 | joystick->SetAxis(axis, 0); | ||
| 429 | return std::make_unique<SDLAxisButton>(joystick, axis, threshold, trigger_if_greater); | ||
| 430 | } | ||
| 431 | |||
| 432 | const int button = params.Get("button", 0); | ||
| 433 | // This is necessary so accessing GetButton with button won't crash | ||
| 434 | joystick->SetButton(button, false); | ||
| 435 | return std::make_unique<SDLButton>(joystick, button); | ||
| 436 | } | ||
| 437 | }; | ||
| 438 | |||
| 439 | /// An analog device factory that creates analog devices from SDL joystick | ||
| 440 | class SDLAnalogFactory final : public Input::Factory<Input::AnalogDevice> { | ||
| 441 | public: | ||
| 442 | /** | ||
| 443 | * Creates analog device from joystick axes | ||
| 444 | * @param params contains parameters for creating the device: | ||
| 445 | * - "guid": the guid of the joystick to bind | ||
| 446 | * - "port": the nth joystick of the same type | ||
| 447 | * - "axis_x": the index of the axis to be bind as x-axis | ||
| 448 | * - "axis_y": the index of the axis to be bind as y-axis | ||
| 449 | */ | ||
| 450 | std::unique_ptr<Input::AnalogDevice> Create(const Common::ParamPackage& params) override { | ||
| 451 | const std::string guid = params.Get("guid", "0"); | ||
| 452 | const int port = params.Get("port", 0); | ||
| 453 | const int axis_x = params.Get("axis_x", 0); | ||
| 454 | const int axis_y = params.Get("axis_y", 1); | ||
| 455 | |||
| 456 | auto joystick = GetSDLJoystickByGUID(guid, port); | ||
| 457 | |||
| 458 | // This is necessary so accessing GetAxis with axis_x and axis_y won't crash | ||
| 459 | joystick->SetAxis(axis_x, 0); | ||
| 460 | joystick->SetAxis(axis_y, 0); | ||
| 461 | return std::make_unique<SDLAnalog>(joystick, axis_x, axis_y); | ||
| 462 | } | ||
| 463 | }; | ||
| 464 | |||
| 465 | void Init() { | ||
| 466 | using namespace Input; | ||
| 467 | RegisterFactory<ButtonDevice>("sdl", std::make_shared<SDLButtonFactory>()); | ||
| 468 | RegisterFactory<AnalogDevice>("sdl", std::make_shared<SDLAnalogFactory>()); | ||
| 469 | polling = false; | ||
| 470 | initialized = true; | ||
| 471 | } | ||
| 472 | |||
| 473 | void Shutdown() { | ||
| 474 | if (initialized) { | ||
| 475 | using namespace Input; | ||
| 476 | UnregisterFactory<ButtonDevice>("sdl"); | ||
| 477 | UnregisterFactory<AnalogDevice>("sdl"); | ||
| 478 | initialized = false; | ||
| 479 | } | ||
| 480 | } | ||
| 481 | |||
| 482 | Common::ParamPackage SDLEventToButtonParamPackage(const SDL_Event& event) { | ||
| 483 | Common::ParamPackage params({{"engine", "sdl"}}); | ||
| 484 | switch (event.type) { | ||
| 485 | case SDL_JOYAXISMOTION: { | ||
| 486 | auto joystick = GetSDLJoystickBySDLID(event.jaxis.which); | ||
| 487 | params.Set("port", joystick->GetPort()); | ||
| 488 | params.Set("guid", joystick->GetGUID()); | ||
| 489 | params.Set("axis", event.jaxis.axis); | ||
| 490 | if (event.jaxis.value > 0) { | ||
| 491 | params.Set("direction", "+"); | ||
| 492 | params.Set("threshold", "0.5"); | ||
| 493 | } else { | ||
| 494 | params.Set("direction", "-"); | ||
| 495 | params.Set("threshold", "-0.5"); | ||
| 496 | } | ||
| 497 | break; | ||
| 498 | } | ||
| 499 | case SDL_JOYBUTTONUP: { | ||
| 500 | auto joystick = GetSDLJoystickBySDLID(event.jbutton.which); | ||
| 501 | params.Set("port", joystick->GetPort()); | ||
| 502 | params.Set("guid", joystick->GetGUID()); | ||
| 503 | params.Set("button", event.jbutton.button); | ||
| 504 | break; | ||
| 505 | } | ||
| 506 | case SDL_JOYHATMOTION: { | ||
| 507 | auto joystick = GetSDLJoystickBySDLID(event.jhat.which); | ||
| 508 | params.Set("port", joystick->GetPort()); | ||
| 509 | params.Set("guid", joystick->GetGUID()); | ||
| 510 | params.Set("hat", event.jhat.hat); | ||
| 511 | switch (event.jhat.value) { | ||
| 512 | case SDL_HAT_UP: | ||
| 513 | params.Set("direction", "up"); | ||
| 514 | break; | ||
| 515 | case SDL_HAT_DOWN: | ||
| 516 | params.Set("direction", "down"); | ||
| 517 | break; | ||
| 518 | case SDL_HAT_LEFT: | ||
| 519 | params.Set("direction", "left"); | ||
| 520 | break; | ||
| 521 | case SDL_HAT_RIGHT: | ||
| 522 | params.Set("direction", "right"); | ||
| 523 | break; | ||
| 524 | default: | ||
| 525 | return {}; | ||
| 526 | } | ||
| 527 | break; | ||
| 528 | } | ||
| 529 | } | ||
| 530 | return params; | ||
| 531 | } | ||
| 532 | |||
| 533 | namespace Polling { | ||
| 534 | |||
| 535 | class SDLPoller : public InputCommon::Polling::DevicePoller { | ||
| 536 | public: | ||
| 537 | void Start() override { | ||
| 538 | event_queue.Clear(); | ||
| 539 | polling = true; | ||
| 540 | } | ||
| 541 | |||
| 542 | void Stop() override { | ||
| 543 | polling = false; | ||
| 544 | } | ||
| 545 | }; | ||
| 546 | |||
| 547 | class SDLButtonPoller final : public SDLPoller { | ||
| 548 | public: | ||
| 549 | Common::ParamPackage GetNextInput() override { | ||
| 550 | SDL_Event event; | ||
| 551 | while (event_queue.Pop(event)) { | ||
| 552 | switch (event.type) { | ||
| 553 | case SDL_JOYAXISMOTION: | ||
| 554 | if (std::abs(event.jaxis.value / 32767.0) < 0.5) { | ||
| 555 | break; | ||
| 556 | } | ||
| 557 | case SDL_JOYBUTTONUP: | ||
| 558 | case SDL_JOYHATMOTION: | ||
| 559 | return SDLEventToButtonParamPackage(event); | ||
| 560 | } | ||
| 561 | } | ||
| 562 | return {}; | ||
| 563 | } | ||
| 564 | }; | ||
| 565 | |||
| 566 | class SDLAnalogPoller final : public SDLPoller { | ||
| 567 | public: | ||
| 568 | void Start() override { | ||
| 569 | SDLPoller::Start(); | ||
| 570 | |||
| 571 | // Reset stored axes | ||
| 572 | analog_xaxis = -1; | ||
| 573 | analog_yaxis = -1; | ||
| 574 | analog_axes_joystick = -1; | ||
| 575 | } | ||
| 576 | |||
| 577 | Common::ParamPackage GetNextInput() override { | ||
| 578 | SDL_Event event; | ||
| 579 | while (event_queue.Pop(event)) { | ||
| 580 | if (event.type != SDL_JOYAXISMOTION || std::abs(event.jaxis.value / 32767.0) < 0.5) { | ||
| 581 | continue; | ||
| 582 | } | ||
| 583 | // An analog device needs two axes, so we need to store the axis for later and wait for | ||
| 584 | // a second SDL event. The axes also must be from the same joystick. | ||
| 585 | int axis = event.jaxis.axis; | ||
| 586 | if (analog_xaxis == -1) { | ||
| 587 | analog_xaxis = axis; | ||
| 588 | analog_axes_joystick = event.jaxis.which; | ||
| 589 | } else if (analog_yaxis == -1 && analog_xaxis != axis && | ||
| 590 | analog_axes_joystick == event.jaxis.which) { | ||
| 591 | analog_yaxis = axis; | ||
| 592 | } | ||
| 593 | } | ||
| 594 | Common::ParamPackage params; | ||
| 595 | if (analog_xaxis != -1 && analog_yaxis != -1) { | ||
| 596 | auto joystick = GetSDLJoystickBySDLID(event.jaxis.which); | ||
| 597 | params.Set("engine", "sdl"); | ||
| 598 | params.Set("port", joystick->GetPort()); | ||
| 599 | params.Set("guid", joystick->GetGUID()); | ||
| 600 | params.Set("axis_x", analog_xaxis); | ||
| 601 | params.Set("axis_y", analog_yaxis); | ||
| 602 | analog_xaxis = -1; | ||
| 603 | analog_yaxis = -1; | ||
| 604 | analog_axes_joystick = -1; | ||
| 605 | return params; | ||
| 606 | } | ||
| 607 | return params; | ||
| 608 | } | ||
| 609 | |||
| 610 | private: | ||
| 611 | int analog_xaxis = -1; | ||
| 612 | int analog_yaxis = -1; | ||
| 613 | SDL_JoystickID analog_axes_joystick = -1; | ||
| 614 | }; | ||
| 615 | |||
| 616 | std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> GetPollers( | ||
| 617 | InputCommon::Polling::DeviceType type) { | ||
| 618 | std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> pollers; | ||
| 619 | switch (type) { | ||
| 620 | case InputCommon::Polling::DeviceType::Analog: | ||
| 621 | pollers.push_back(std::make_unique<SDLAnalogPoller>()); | ||
| 622 | break; | ||
| 623 | case InputCommon::Polling::DeviceType::Button: | ||
| 624 | pollers.push_back(std::make_unique<SDLButtonPoller>()); | ||
| 625 | break; | ||
| 626 | } | ||
| 627 | return pollers; | ||
| 628 | } | 18 | } |
| 629 | } // namespace Polling | 19 | } // namespace InputCommon::SDL |
| 630 | } // namespace SDL | ||
| 631 | } // namespace InputCommon | ||
diff --git a/src/input_common/sdl/sdl.h b/src/input_common/sdl/sdl.h index 0206860d3..d7f24c68a 100644 --- a/src/input_common/sdl/sdl.h +++ b/src/input_common/sdl/sdl.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | 1 | // Copyright 2018 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| @@ -7,45 +7,38 @@ | |||
| 7 | #include <memory> | 7 | #include <memory> |
| 8 | #include <vector> | 8 | #include <vector> |
| 9 | #include "core/frontend/input.h" | 9 | #include "core/frontend/input.h" |
| 10 | #include "input_common/main.h" | ||
| 10 | 11 | ||
| 11 | union SDL_Event; | 12 | union SDL_Event; |
| 13 | |||
| 12 | namespace Common { | 14 | namespace Common { |
| 13 | class ParamPackage; | 15 | class ParamPackage; |
| 14 | } | 16 | } // namespace Common |
| 15 | namespace InputCommon { | 17 | |
| 16 | namespace Polling { | 18 | namespace InputCommon::Polling { |
| 17 | class DevicePoller; | 19 | class DevicePoller; |
| 18 | enum class DeviceType; | 20 | enum class DeviceType; |
| 19 | } // namespace Polling | 21 | } // namespace InputCommon::Polling |
| 20 | } // namespace InputCommon | ||
| 21 | |||
| 22 | namespace InputCommon { | ||
| 23 | namespace SDL { | ||
| 24 | |||
| 25 | /// Initializes and registers SDL device factories | ||
| 26 | void Init(); | ||
| 27 | |||
| 28 | /// Unresisters SDL device factories and shut them down. | ||
| 29 | void Shutdown(); | ||
| 30 | 22 | ||
| 31 | /// Needs to be called before SDL_QuitSubSystem. | 23 | namespace InputCommon::SDL { |
| 32 | void CloseSDLJoysticks(); | ||
| 33 | 24 | ||
| 34 | /// Handle SDL_Events for joysticks from SDL_PollEvent | 25 | class State { |
| 35 | void HandleGameControllerEvent(const SDL_Event& event); | 26 | public: |
| 27 | using Pollers = std::vector<std::unique_ptr<Polling::DevicePoller>>; | ||
| 36 | 28 | ||
| 37 | /// A Loop that calls HandleGameControllerEvent until Shutdown is called | 29 | /// Unregisters SDL device factories and shut them down. |
| 38 | void PollLoop(); | 30 | virtual ~State() = default; |
| 39 | 31 | ||
| 40 | /// Creates a ParamPackage from an SDL_Event that can directly be used to create a ButtonDevice | 32 | virtual Pollers GetPollers(Polling::DeviceType type) = 0; |
| 41 | Common::ParamPackage SDLEventToButtonParamPackage(const SDL_Event& event); | 33 | }; |
| 42 | 34 | ||
| 43 | namespace Polling { | 35 | class NullState : public State { |
| 36 | public: | ||
| 37 | Pollers GetPollers(Polling::DeviceType type) override { | ||
| 38 | return {}; | ||
| 39 | } | ||
| 40 | }; | ||
| 44 | 41 | ||
| 45 | /// Get all DevicePoller that use the SDL backend for a specific device type | 42 | std::unique_ptr<State> Init(); |
| 46 | std::vector<std::unique_ptr<InputCommon::Polling::DevicePoller>> GetPollers( | ||
| 47 | InputCommon::Polling::DeviceType type); | ||
| 48 | 43 | ||
| 49 | } // namespace Polling | 44 | } // namespace InputCommon::SDL |
| 50 | } // namespace SDL | ||
| 51 | } // namespace InputCommon | ||
diff --git a/src/input_common/sdl/sdl_impl.cpp b/src/input_common/sdl/sdl_impl.cpp new file mode 100644 index 000000000..b132d77f5 --- /dev/null +++ b/src/input_common/sdl/sdl_impl.cpp | |||
| @@ -0,0 +1,669 @@ | |||
| 1 | // Copyright 2018 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | #include <atomic> | ||
| 7 | #include <cmath> | ||
| 8 | #include <functional> | ||
| 9 | #include <iterator> | ||
| 10 | #include <mutex> | ||
| 11 | #include <string> | ||
| 12 | #include <thread> | ||
| 13 | #include <tuple> | ||
| 14 | #include <unordered_map> | ||
| 15 | #include <utility> | ||
| 16 | #include <vector> | ||
| 17 | #include <SDL.h> | ||
| 18 | #include "common/assert.h" | ||
| 19 | #include "common/logging/log.h" | ||
| 20 | #include "common/math_util.h" | ||
| 21 | #include "common/param_package.h" | ||
| 22 | #include "common/threadsafe_queue.h" | ||
| 23 | #include "core/frontend/input.h" | ||
| 24 | #include "input_common/sdl/sdl_impl.h" | ||
| 25 | |||
| 26 | namespace InputCommon { | ||
| 27 | |||
| 28 | namespace SDL { | ||
| 29 | |||
| 30 | static std::string GetGUID(SDL_Joystick* joystick) { | ||
| 31 | SDL_JoystickGUID guid = SDL_JoystickGetGUID(joystick); | ||
| 32 | char guid_str[33]; | ||
| 33 | SDL_JoystickGetGUIDString(guid, guid_str, sizeof(guid_str)); | ||
| 34 | return guid_str; | ||
| 35 | } | ||
| 36 | |||
| 37 | /// Creates a ParamPackage from an SDL_Event that can directly be used to create a ButtonDevice | ||
| 38 | static Common::ParamPackage SDLEventToButtonParamPackage(SDLState& state, const SDL_Event& event); | ||
| 39 | |||
| 40 | static int SDLEventWatcher(void* userdata, SDL_Event* event) { | ||
| 41 | SDLState* sdl_state = reinterpret_cast<SDLState*>(userdata); | ||
| 42 | // Don't handle the event if we are configuring | ||
| 43 | if (sdl_state->polling) { | ||
| 44 | sdl_state->event_queue.Push(*event); | ||
| 45 | } else { | ||
| 46 | sdl_state->HandleGameControllerEvent(*event); | ||
| 47 | } | ||
| 48 | return 0; | ||
| 49 | } | ||
| 50 | |||
| 51 | class SDLJoystick { | ||
| 52 | public: | ||
| 53 | SDLJoystick(std::string guid_, int port_, SDL_Joystick* joystick, | ||
| 54 | decltype(&SDL_JoystickClose) deleter = &SDL_JoystickClose) | ||
| 55 | : guid{std::move(guid_)}, port{port_}, sdl_joystick{joystick, deleter} {} | ||
| 56 | |||
| 57 | void SetButton(int button, bool value) { | ||
| 58 | std::lock_guard<std::mutex> lock(mutex); | ||
| 59 | state.buttons[button] = value; | ||
| 60 | } | ||
| 61 | |||
| 62 | bool GetButton(int button) const { | ||
| 63 | std::lock_guard<std::mutex> lock(mutex); | ||
| 64 | return state.buttons.at(button); | ||
| 65 | } | ||
| 66 | |||
| 67 | void SetAxis(int axis, Sint16 value) { | ||
| 68 | std::lock_guard<std::mutex> lock(mutex); | ||
| 69 | state.axes[axis] = value; | ||
| 70 | } | ||
| 71 | |||
| 72 | float GetAxis(int axis) const { | ||
| 73 | std::lock_guard<std::mutex> lock(mutex); | ||
| 74 | return state.axes.at(axis) / 32767.0f; | ||
| 75 | } | ||
| 76 | |||
| 77 | std::tuple<float, float> GetAnalog(int axis_x, int axis_y) const { | ||
| 78 | float x = GetAxis(axis_x); | ||
| 79 | float y = GetAxis(axis_y); | ||
| 80 | y = -y; // 3DS uses an y-axis inverse from SDL | ||
| 81 | |||
| 82 | // Make sure the coordinates are in the unit circle, | ||
| 83 | // otherwise normalize it. | ||
| 84 | float r = x * x + y * y; | ||
| 85 | if (r > 1.0f) { | ||
| 86 | r = std::sqrt(r); | ||
| 87 | x /= r; | ||
| 88 | y /= r; | ||
| 89 | } | ||
| 90 | |||
| 91 | return std::make_tuple(x, y); | ||
| 92 | } | ||
| 93 | |||
| 94 | void SetHat(int hat, Uint8 direction) { | ||
| 95 | std::lock_guard<std::mutex> lock(mutex); | ||
| 96 | state.hats[hat] = direction; | ||
| 97 | } | ||
| 98 | |||
| 99 | bool GetHatDirection(int hat, Uint8 direction) const { | ||
| 100 | std::lock_guard<std::mutex> lock(mutex); | ||
| 101 | return (state.hats.at(hat) & direction) != 0; | ||
| 102 | } | ||
| 103 | /** | ||
| 104 | * The guid of the joystick | ||
| 105 | */ | ||
| 106 | const std::string& GetGUID() const { | ||
| 107 | return guid; | ||
| 108 | } | ||
| 109 | |||
| 110 | /** | ||
| 111 | * The number of joystick from the same type that were connected before this joystick | ||
| 112 | */ | ||
| 113 | int GetPort() const { | ||
| 114 | return port; | ||
| 115 | } | ||
| 116 | |||
| 117 | SDL_Joystick* GetSDLJoystick() const { | ||
| 118 | return sdl_joystick.get(); | ||
| 119 | } | ||
| 120 | |||
| 121 | void SetSDLJoystick(SDL_Joystick* joystick, | ||
| 122 | decltype(&SDL_JoystickClose) deleter = &SDL_JoystickClose) { | ||
| 123 | sdl_joystick = | ||
| 124 | std::unique_ptr<SDL_Joystick, decltype(&SDL_JoystickClose)>(joystick, deleter); | ||
| 125 | } | ||
| 126 | |||
| 127 | private: | ||
| 128 | struct State { | ||
| 129 | std::unordered_map<int, bool> buttons; | ||
| 130 | std::unordered_map<int, Sint16> axes; | ||
| 131 | std::unordered_map<int, Uint8> hats; | ||
| 132 | } state; | ||
| 133 | std::string guid; | ||
| 134 | int port; | ||
| 135 | std::unique_ptr<SDL_Joystick, decltype(&SDL_JoystickClose)> sdl_joystick; | ||
| 136 | mutable std::mutex mutex; | ||
| 137 | }; | ||
| 138 | |||
| 139 | /** | ||
| 140 | * Get the nth joystick with the corresponding GUID | ||
| 141 | */ | ||
| 142 | std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& guid, int port) { | ||
| 143 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 144 | const auto it = joystick_map.find(guid); | ||
| 145 | if (it != joystick_map.end()) { | ||
| 146 | while (it->second.size() <= port) { | ||
| 147 | auto joystick = std::make_shared<SDLJoystick>(guid, it->second.size(), nullptr, | ||
| 148 | [](SDL_Joystick*) {}); | ||
| 149 | it->second.emplace_back(std::move(joystick)); | ||
| 150 | } | ||
| 151 | return it->second[port]; | ||
| 152 | } | ||
| 153 | auto joystick = std::make_shared<SDLJoystick>(guid, 0, nullptr, [](SDL_Joystick*) {}); | ||
| 154 | return joystick_map[guid].emplace_back(std::move(joystick)); | ||
| 155 | } | ||
| 156 | |||
| 157 | /** | ||
| 158 | * Check how many identical joysticks (by guid) were connected before the one with sdl_id and so tie | ||
| 159 | * it to a SDLJoystick with the same guid and that port | ||
| 160 | */ | ||
| 161 | std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickBySDLID(SDL_JoystickID sdl_id) { | ||
| 162 | auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id); | ||
| 163 | const std::string guid = GetGUID(sdl_joystick); | ||
| 164 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 165 | auto map_it = joystick_map.find(guid); | ||
| 166 | if (map_it != joystick_map.end()) { | ||
| 167 | auto vec_it = std::find_if(map_it->second.begin(), map_it->second.end(), | ||
| 168 | [&sdl_joystick](const std::shared_ptr<SDLJoystick>& joystick) { | ||
| 169 | return sdl_joystick == joystick->GetSDLJoystick(); | ||
| 170 | }); | ||
| 171 | if (vec_it != map_it->second.end()) { | ||
| 172 | // This is the common case: There is already an existing SDL_Joystick maped to a | ||
| 173 | // SDLJoystick. return the SDLJoystick | ||
| 174 | return *vec_it; | ||
| 175 | } | ||
| 176 | // Search for a SDLJoystick without a mapped SDL_Joystick... | ||
| 177 | auto nullptr_it = std::find_if(map_it->second.begin(), map_it->second.end(), | ||
| 178 | [](const std::shared_ptr<SDLJoystick>& joystick) { | ||
| 179 | return !joystick->GetSDLJoystick(); | ||
| 180 | }); | ||
| 181 | if (nullptr_it != map_it->second.end()) { | ||
| 182 | // ... and map it | ||
| 183 | (*nullptr_it)->SetSDLJoystick(sdl_joystick); | ||
| 184 | return *nullptr_it; | ||
| 185 | } | ||
| 186 | // There is no SDLJoystick without a mapped SDL_Joystick | ||
| 187 | // Create a new SDLJoystick | ||
| 188 | auto joystick = std::make_shared<SDLJoystick>(guid, map_it->second.size(), sdl_joystick); | ||
| 189 | return map_it->second.emplace_back(std::move(joystick)); | ||
| 190 | } | ||
| 191 | auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); | ||
| 192 | return joystick_map[guid].emplace_back(std::move(joystick)); | ||
| 193 | } | ||
| 194 | |||
| 195 | void SDLState::InitJoystick(int joystick_index) { | ||
| 196 | SDL_Joystick* sdl_joystick = SDL_JoystickOpen(joystick_index); | ||
| 197 | if (!sdl_joystick) { | ||
| 198 | LOG_ERROR(Input, "failed to open joystick {}", joystick_index); | ||
| 199 | return; | ||
| 200 | } | ||
| 201 | std::string guid = GetGUID(sdl_joystick); | ||
| 202 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 203 | if (joystick_map.find(guid) == joystick_map.end()) { | ||
| 204 | auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); | ||
| 205 | joystick_map[guid].emplace_back(std::move(joystick)); | ||
| 206 | return; | ||
| 207 | } | ||
| 208 | auto& joystick_guid_list = joystick_map[guid]; | ||
| 209 | const auto it = std::find_if( | ||
| 210 | joystick_guid_list.begin(), joystick_guid_list.end(), | ||
| 211 | [](const std::shared_ptr<SDLJoystick>& joystick) { return !joystick->GetSDLJoystick(); }); | ||
| 212 | if (it != joystick_guid_list.end()) { | ||
| 213 | (*it)->SetSDLJoystick(sdl_joystick); | ||
| 214 | return; | ||
| 215 | } | ||
| 216 | auto joystick = std::make_shared<SDLJoystick>(guid, joystick_guid_list.size(), sdl_joystick); | ||
| 217 | joystick_guid_list.emplace_back(std::move(joystick)); | ||
| 218 | } | ||
| 219 | |||
| 220 | void SDLState::CloseJoystick(SDL_Joystick* sdl_joystick) { | ||
| 221 | std::string guid = GetGUID(sdl_joystick); | ||
| 222 | std::shared_ptr<SDLJoystick> joystick; | ||
| 223 | { | ||
| 224 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 225 | // This call to guid is safe since the joystick is guaranteed to be in the map | ||
| 226 | auto& joystick_guid_list = joystick_map[guid]; | ||
| 227 | const auto joystick_it = | ||
| 228 | std::find_if(joystick_guid_list.begin(), joystick_guid_list.end(), | ||
| 229 | [&sdl_joystick](const std::shared_ptr<SDLJoystick>& joystick) { | ||
| 230 | return joystick->GetSDLJoystick() == sdl_joystick; | ||
| 231 | }); | ||
| 232 | joystick = *joystick_it; | ||
| 233 | } | ||
| 234 | // Destruct SDL_Joystick outside the lock guard because SDL can internally call event calback | ||
| 235 | // which locks the mutex again | ||
| 236 | joystick->SetSDLJoystick(nullptr, [](SDL_Joystick*) {}); | ||
| 237 | } | ||
| 238 | |||
| 239 | void SDLState::HandleGameControllerEvent(const SDL_Event& event) { | ||
| 240 | switch (event.type) { | ||
| 241 | case SDL_JOYBUTTONUP: { | ||
| 242 | if (auto joystick = GetSDLJoystickBySDLID(event.jbutton.which)) { | ||
| 243 | joystick->SetButton(event.jbutton.button, false); | ||
| 244 | } | ||
| 245 | break; | ||
| 246 | } | ||
| 247 | case SDL_JOYBUTTONDOWN: { | ||
| 248 | if (auto joystick = GetSDLJoystickBySDLID(event.jbutton.which)) { | ||
| 249 | joystick->SetButton(event.jbutton.button, true); | ||
| 250 | } | ||
| 251 | break; | ||
| 252 | } | ||
| 253 | case SDL_JOYHATMOTION: { | ||
| 254 | if (auto joystick = GetSDLJoystickBySDLID(event.jhat.which)) { | ||
| 255 | joystick->SetHat(event.jhat.hat, event.jhat.value); | ||
| 256 | } | ||
| 257 | break; | ||
| 258 | } | ||
| 259 | case SDL_JOYAXISMOTION: { | ||
| 260 | if (auto joystick = GetSDLJoystickBySDLID(event.jaxis.which)) { | ||
| 261 | joystick->SetAxis(event.jaxis.axis, event.jaxis.value); | ||
| 262 | } | ||
| 263 | break; | ||
| 264 | } | ||
| 265 | case SDL_JOYDEVICEREMOVED: | ||
| 266 | LOG_DEBUG(Input, "Controller removed with Instance_ID {}", event.jdevice.which); | ||
| 267 | CloseJoystick(SDL_JoystickFromInstanceID(event.jdevice.which)); | ||
| 268 | break; | ||
| 269 | case SDL_JOYDEVICEADDED: | ||
| 270 | LOG_DEBUG(Input, "Controller connected with device index {}", event.jdevice.which); | ||
| 271 | InitJoystick(event.jdevice.which); | ||
| 272 | break; | ||
| 273 | } | ||
| 274 | } | ||
| 275 | |||
| 276 | void SDLState::CloseJoysticks() { | ||
| 277 | std::lock_guard<std::mutex> lock(joystick_map_mutex); | ||
| 278 | joystick_map.clear(); | ||
| 279 | } | ||
| 280 | |||
| 281 | class SDLButton final : public Input::ButtonDevice { | ||
| 282 | public: | ||
| 283 | explicit SDLButton(std::shared_ptr<SDLJoystick> joystick_, int button_) | ||
| 284 | : joystick(std::move(joystick_)), button(button_) {} | ||
| 285 | |||
| 286 | bool GetStatus() const override { | ||
| 287 | return joystick->GetButton(button); | ||
| 288 | } | ||
| 289 | |||
| 290 | private: | ||
| 291 | std::shared_ptr<SDLJoystick> joystick; | ||
| 292 | int button; | ||
| 293 | }; | ||
| 294 | |||
| 295 | class SDLDirectionButton final : public Input::ButtonDevice { | ||
| 296 | public: | ||
| 297 | explicit SDLDirectionButton(std::shared_ptr<SDLJoystick> joystick_, int hat_, Uint8 direction_) | ||
| 298 | : joystick(std::move(joystick_)), hat(hat_), direction(direction_) {} | ||
| 299 | |||
| 300 | bool GetStatus() const override { | ||
| 301 | return joystick->GetHatDirection(hat, direction); | ||
| 302 | } | ||
| 303 | |||
| 304 | private: | ||
| 305 | std::shared_ptr<SDLJoystick> joystick; | ||
| 306 | int hat; | ||
| 307 | Uint8 direction; | ||
| 308 | }; | ||
| 309 | |||
| 310 | class SDLAxisButton final : public Input::ButtonDevice { | ||
| 311 | public: | ||
| 312 | explicit SDLAxisButton(std::shared_ptr<SDLJoystick> joystick_, int axis_, float threshold_, | ||
| 313 | bool trigger_if_greater_) | ||
| 314 | : joystick(std::move(joystick_)), axis(axis_), threshold(threshold_), | ||
| 315 | trigger_if_greater(trigger_if_greater_) {} | ||
| 316 | |||
| 317 | bool GetStatus() const override { | ||
| 318 | float axis_value = joystick->GetAxis(axis); | ||
| 319 | if (trigger_if_greater) | ||
| 320 | return axis_value > threshold; | ||
| 321 | return axis_value < threshold; | ||
| 322 | } | ||
| 323 | |||
| 324 | private: | ||
| 325 | std::shared_ptr<SDLJoystick> joystick; | ||
| 326 | int axis; | ||
| 327 | float threshold; | ||
| 328 | bool trigger_if_greater; | ||
| 329 | }; | ||
| 330 | |||
| 331 | class SDLAnalog final : public Input::AnalogDevice { | ||
| 332 | public: | ||
| 333 | SDLAnalog(std::shared_ptr<SDLJoystick> joystick_, int axis_x_, int axis_y_, float deadzone_) | ||
| 334 | : joystick(std::move(joystick_)), axis_x(axis_x_), axis_y(axis_y_), deadzone(deadzone_) {} | ||
| 335 | |||
| 336 | std::tuple<float, float> GetStatus() const override { | ||
| 337 | const auto [x, y] = joystick->GetAnalog(axis_x, axis_y); | ||
| 338 | const float r = std::sqrt((x * x) + (y * y)); | ||
| 339 | if (r > deadzone) { | ||
| 340 | return std::make_tuple(x / r * (r - deadzone) / (1 - deadzone), | ||
| 341 | y / r * (r - deadzone) / (1 - deadzone)); | ||
| 342 | } | ||
| 343 | return std::make_tuple<float, float>(0.0f, 0.0f); | ||
| 344 | } | ||
| 345 | |||
| 346 | private: | ||
| 347 | std::shared_ptr<SDLJoystick> joystick; | ||
| 348 | const int axis_x; | ||
| 349 | const int axis_y; | ||
| 350 | const float deadzone; | ||
| 351 | }; | ||
| 352 | |||
| 353 | /// A button device factory that creates button devices from SDL joystick | ||
| 354 | class SDLButtonFactory final : public Input::Factory<Input::ButtonDevice> { | ||
| 355 | public: | ||
| 356 | explicit SDLButtonFactory(SDLState& state_) : state(state_) {} | ||
| 357 | |||
| 358 | /** | ||
| 359 | * Creates a button device from a joystick button | ||
| 360 | * @param params contains parameters for creating the device: | ||
| 361 | * - "guid": the guid of the joystick to bind | ||
| 362 | * - "port": the nth joystick of the same type to bind | ||
| 363 | * - "button"(optional): the index of the button to bind | ||
| 364 | * - "hat"(optional): the index of the hat to bind as direction buttons | ||
| 365 | * - "axis"(optional): the index of the axis to bind | ||
| 366 | * - "direction"(only used for hat): the direction name of the hat to bind. Can be "up", | ||
| 367 | * "down", "left" or "right" | ||
| 368 | * - "threshold"(only used for axis): a float value in (-1.0, 1.0) which the button is | ||
| 369 | * triggered if the axis value crosses | ||
| 370 | * - "direction"(only used for axis): "+" means the button is triggered when the axis | ||
| 371 | * value is greater than the threshold; "-" means the button is triggered when the axis | ||
| 372 | * value is smaller than the threshold | ||
| 373 | */ | ||
| 374 | std::unique_ptr<Input::ButtonDevice> Create(const Common::ParamPackage& params) override { | ||
| 375 | const std::string guid = params.Get("guid", "0"); | ||
| 376 | const int port = params.Get("port", 0); | ||
| 377 | |||
| 378 | auto joystick = state.GetSDLJoystickByGUID(guid, port); | ||
| 379 | |||
| 380 | if (params.Has("hat")) { | ||
| 381 | const int hat = params.Get("hat", 0); | ||
| 382 | const std::string direction_name = params.Get("direction", ""); | ||
| 383 | Uint8 direction; | ||
| 384 | if (direction_name == "up") { | ||
| 385 | direction = SDL_HAT_UP; | ||
| 386 | } else if (direction_name == "down") { | ||
| 387 | direction = SDL_HAT_DOWN; | ||
| 388 | } else if (direction_name == "left") { | ||
| 389 | direction = SDL_HAT_LEFT; | ||
| 390 | } else if (direction_name == "right") { | ||
| 391 | direction = SDL_HAT_RIGHT; | ||
| 392 | } else { | ||
| 393 | direction = 0; | ||
| 394 | } | ||
| 395 | // This is necessary so accessing GetHat with hat won't crash | ||
| 396 | joystick->SetHat(hat, SDL_HAT_CENTERED); | ||
| 397 | return std::make_unique<SDLDirectionButton>(joystick, hat, direction); | ||
| 398 | } | ||
| 399 | |||
| 400 | if (params.Has("axis")) { | ||
| 401 | const int axis = params.Get("axis", 0); | ||
| 402 | const float threshold = params.Get("threshold", 0.5f); | ||
| 403 | const std::string direction_name = params.Get("direction", ""); | ||
| 404 | bool trigger_if_greater; | ||
| 405 | if (direction_name == "+") { | ||
| 406 | trigger_if_greater = true; | ||
| 407 | } else if (direction_name == "-") { | ||
| 408 | trigger_if_greater = false; | ||
| 409 | } else { | ||
| 410 | trigger_if_greater = true; | ||
| 411 | LOG_ERROR(Input, "Unknown direction {}", direction_name); | ||
| 412 | } | ||
| 413 | // This is necessary so accessing GetAxis with axis won't crash | ||
| 414 | joystick->SetAxis(axis, 0); | ||
| 415 | return std::make_unique<SDLAxisButton>(joystick, axis, threshold, trigger_if_greater); | ||
| 416 | } | ||
| 417 | |||
| 418 | const int button = params.Get("button", 0); | ||
| 419 | // This is necessary so accessing GetButton with button won't crash | ||
| 420 | joystick->SetButton(button, false); | ||
| 421 | return std::make_unique<SDLButton>(joystick, button); | ||
| 422 | } | ||
| 423 | |||
| 424 | private: | ||
| 425 | SDLState& state; | ||
| 426 | }; | ||
| 427 | |||
| 428 | /// An analog device factory that creates analog devices from SDL joystick | ||
| 429 | class SDLAnalogFactory final : public Input::Factory<Input::AnalogDevice> { | ||
| 430 | public: | ||
| 431 | explicit SDLAnalogFactory(SDLState& state_) : state(state_) {} | ||
| 432 | /** | ||
| 433 | * Creates analog device from joystick axes | ||
| 434 | * @param params contains parameters for creating the device: | ||
| 435 | * - "guid": the guid of the joystick to bind | ||
| 436 | * - "port": the nth joystick of the same type | ||
| 437 | * - "axis_x": the index of the axis to be bind as x-axis | ||
| 438 | * - "axis_y": the index of the axis to be bind as y-axis | ||
| 439 | */ | ||
| 440 | std::unique_ptr<Input::AnalogDevice> Create(const Common::ParamPackage& params) override { | ||
| 441 | const std::string guid = params.Get("guid", "0"); | ||
| 442 | const int port = params.Get("port", 0); | ||
| 443 | const int axis_x = params.Get("axis_x", 0); | ||
| 444 | const int axis_y = params.Get("axis_y", 1); | ||
| 445 | float deadzone = std::clamp(params.Get("deadzone", 0.0f), 0.0f, .99f); | ||
| 446 | |||
| 447 | auto joystick = state.GetSDLJoystickByGUID(guid, port); | ||
| 448 | |||
| 449 | // This is necessary so accessing GetAxis with axis_x and axis_y won't crash | ||
| 450 | joystick->SetAxis(axis_x, 0); | ||
| 451 | joystick->SetAxis(axis_y, 0); | ||
| 452 | return std::make_unique<SDLAnalog>(joystick, axis_x, axis_y, deadzone); | ||
| 453 | } | ||
| 454 | |||
| 455 | private: | ||
| 456 | SDLState& state; | ||
| 457 | }; | ||
| 458 | |||
| 459 | SDLState::SDLState() { | ||
| 460 | using namespace Input; | ||
| 461 | RegisterFactory<ButtonDevice>("sdl", std::make_shared<SDLButtonFactory>(*this)); | ||
| 462 | RegisterFactory<AnalogDevice>("sdl", std::make_shared<SDLAnalogFactory>(*this)); | ||
| 463 | |||
| 464 | // If the frontend is going to manage the event loop, then we dont start one here | ||
| 465 | start_thread = !SDL_WasInit(SDL_INIT_JOYSTICK); | ||
| 466 | if (start_thread && SDL_Init(SDL_INIT_JOYSTICK) < 0) { | ||
| 467 | LOG_CRITICAL(Input, "SDL_Init(SDL_INIT_JOYSTICK) failed with: {}", SDL_GetError()); | ||
| 468 | return; | ||
| 469 | } | ||
| 470 | if (SDL_SetHint(SDL_HINT_JOYSTICK_ALLOW_BACKGROUND_EVENTS, "1") == SDL_FALSE) { | ||
| 471 | LOG_ERROR(Input, "Failed to set Hint for background events", SDL_GetError()); | ||
| 472 | } | ||
| 473 | |||
| 474 | SDL_AddEventWatch(&SDLEventWatcher, this); | ||
| 475 | |||
| 476 | initialized = true; | ||
| 477 | if (start_thread) { | ||
| 478 | poll_thread = std::thread([this] { | ||
| 479 | using namespace std::chrono_literals; | ||
| 480 | while (initialized) { | ||
| 481 | SDL_PumpEvents(); | ||
| 482 | std::this_thread::sleep_for(10ms); | ||
| 483 | } | ||
| 484 | }); | ||
| 485 | } | ||
| 486 | // Because the events for joystick connection happens before we have our event watcher added, we | ||
| 487 | // can just open all the joysticks right here | ||
| 488 | for (int i = 0; i < SDL_NumJoysticks(); ++i) { | ||
| 489 | InitJoystick(i); | ||
| 490 | } | ||
| 491 | } | ||
| 492 | |||
| 493 | SDLState::~SDLState() { | ||
| 494 | using namespace Input; | ||
| 495 | UnregisterFactory<ButtonDevice>("sdl"); | ||
| 496 | UnregisterFactory<AnalogDevice>("sdl"); | ||
| 497 | |||
| 498 | CloseJoysticks(); | ||
| 499 | SDL_DelEventWatch(&SDLEventWatcher, this); | ||
| 500 | |||
| 501 | initialized = false; | ||
| 502 | if (start_thread) { | ||
| 503 | poll_thread.join(); | ||
| 504 | SDL_QuitSubSystem(SDL_INIT_JOYSTICK); | ||
| 505 | } | ||
| 506 | } | ||
| 507 | |||
| 508 | Common::ParamPackage SDLEventToButtonParamPackage(SDLState& state, const SDL_Event& event) { | ||
| 509 | Common::ParamPackage params({{"engine", "sdl"}}); | ||
| 510 | |||
| 511 | switch (event.type) { | ||
| 512 | case SDL_JOYAXISMOTION: { | ||
| 513 | auto joystick = state.GetSDLJoystickBySDLID(event.jaxis.which); | ||
| 514 | params.Set("port", joystick->GetPort()); | ||
| 515 | params.Set("guid", joystick->GetGUID()); | ||
| 516 | params.Set("axis", event.jaxis.axis); | ||
| 517 | if (event.jaxis.value > 0) { | ||
| 518 | params.Set("direction", "+"); | ||
| 519 | params.Set("threshold", "0.5"); | ||
| 520 | } else { | ||
| 521 | params.Set("direction", "-"); | ||
| 522 | params.Set("threshold", "-0.5"); | ||
| 523 | } | ||
| 524 | break; | ||
| 525 | } | ||
| 526 | case SDL_JOYBUTTONUP: { | ||
| 527 | auto joystick = state.GetSDLJoystickBySDLID(event.jbutton.which); | ||
| 528 | params.Set("port", joystick->GetPort()); | ||
| 529 | params.Set("guid", joystick->GetGUID()); | ||
| 530 | params.Set("button", event.jbutton.button); | ||
| 531 | break; | ||
| 532 | } | ||
| 533 | case SDL_JOYHATMOTION: { | ||
| 534 | auto joystick = state.GetSDLJoystickBySDLID(event.jhat.which); | ||
| 535 | params.Set("port", joystick->GetPort()); | ||
| 536 | params.Set("guid", joystick->GetGUID()); | ||
| 537 | params.Set("hat", event.jhat.hat); | ||
| 538 | switch (event.jhat.value) { | ||
| 539 | case SDL_HAT_UP: | ||
| 540 | params.Set("direction", "up"); | ||
| 541 | break; | ||
| 542 | case SDL_HAT_DOWN: | ||
| 543 | params.Set("direction", "down"); | ||
| 544 | break; | ||
| 545 | case SDL_HAT_LEFT: | ||
| 546 | params.Set("direction", "left"); | ||
| 547 | break; | ||
| 548 | case SDL_HAT_RIGHT: | ||
| 549 | params.Set("direction", "right"); | ||
| 550 | break; | ||
| 551 | default: | ||
| 552 | return {}; | ||
| 553 | } | ||
| 554 | break; | ||
| 555 | } | ||
| 556 | } | ||
| 557 | return params; | ||
| 558 | } | ||
| 559 | |||
| 560 | namespace Polling { | ||
| 561 | |||
| 562 | class SDLPoller : public InputCommon::Polling::DevicePoller { | ||
| 563 | public: | ||
| 564 | explicit SDLPoller(SDLState& state_) : state(state_) {} | ||
| 565 | |||
| 566 | void Start() override { | ||
| 567 | state.event_queue.Clear(); | ||
| 568 | state.polling = true; | ||
| 569 | } | ||
| 570 | |||
| 571 | void Stop() override { | ||
| 572 | state.polling = false; | ||
| 573 | } | ||
| 574 | |||
| 575 | protected: | ||
| 576 | SDLState& state; | ||
| 577 | }; | ||
| 578 | |||
| 579 | class SDLButtonPoller final : public SDLPoller { | ||
| 580 | public: | ||
| 581 | explicit SDLButtonPoller(SDLState& state_) : SDLPoller(state_) {} | ||
| 582 | |||
| 583 | Common::ParamPackage GetNextInput() override { | ||
| 584 | SDL_Event event; | ||
| 585 | while (state.event_queue.Pop(event)) { | ||
| 586 | switch (event.type) { | ||
| 587 | case SDL_JOYAXISMOTION: | ||
| 588 | if (std::abs(event.jaxis.value / 32767.0) < 0.5) { | ||
| 589 | break; | ||
| 590 | } | ||
| 591 | case SDL_JOYBUTTONUP: | ||
| 592 | case SDL_JOYHATMOTION: | ||
| 593 | return SDLEventToButtonParamPackage(state, event); | ||
| 594 | } | ||
| 595 | } | ||
| 596 | return {}; | ||
| 597 | } | ||
| 598 | }; | ||
| 599 | |||
| 600 | class SDLAnalogPoller final : public SDLPoller { | ||
| 601 | public: | ||
| 602 | explicit SDLAnalogPoller(SDLState& state_) : SDLPoller(state_) {} | ||
| 603 | |||
| 604 | void Start() override { | ||
| 605 | SDLPoller::Start(); | ||
| 606 | |||
| 607 | // Reset stored axes | ||
| 608 | analog_xaxis = -1; | ||
| 609 | analog_yaxis = -1; | ||
| 610 | analog_axes_joystick = -1; | ||
| 611 | } | ||
| 612 | |||
| 613 | Common::ParamPackage GetNextInput() override { | ||
| 614 | SDL_Event event; | ||
| 615 | while (state.event_queue.Pop(event)) { | ||
| 616 | if (event.type != SDL_JOYAXISMOTION || std::abs(event.jaxis.value / 32767.0) < 0.5) { | ||
| 617 | continue; | ||
| 618 | } | ||
| 619 | // An analog device needs two axes, so we need to store the axis for later and wait for | ||
| 620 | // a second SDL event. The axes also must be from the same joystick. | ||
| 621 | int axis = event.jaxis.axis; | ||
| 622 | if (analog_xaxis == -1) { | ||
| 623 | analog_xaxis = axis; | ||
| 624 | analog_axes_joystick = event.jaxis.which; | ||
| 625 | } else if (analog_yaxis == -1 && analog_xaxis != axis && | ||
| 626 | analog_axes_joystick == event.jaxis.which) { | ||
| 627 | analog_yaxis = axis; | ||
| 628 | } | ||
| 629 | } | ||
| 630 | Common::ParamPackage params; | ||
| 631 | if (analog_xaxis != -1 && analog_yaxis != -1) { | ||
| 632 | auto joystick = state.GetSDLJoystickBySDLID(event.jaxis.which); | ||
| 633 | params.Set("engine", "sdl"); | ||
| 634 | params.Set("port", joystick->GetPort()); | ||
| 635 | params.Set("guid", joystick->GetGUID()); | ||
| 636 | params.Set("axis_x", analog_xaxis); | ||
| 637 | params.Set("axis_y", analog_yaxis); | ||
| 638 | analog_xaxis = -1; | ||
| 639 | analog_yaxis = -1; | ||
| 640 | analog_axes_joystick = -1; | ||
| 641 | return params; | ||
| 642 | } | ||
| 643 | return params; | ||
| 644 | } | ||
| 645 | |||
| 646 | private: | ||
| 647 | int analog_xaxis = -1; | ||
| 648 | int analog_yaxis = -1; | ||
| 649 | SDL_JoystickID analog_axes_joystick = -1; | ||
| 650 | }; | ||
| 651 | } // namespace Polling | ||
| 652 | |||
| 653 | SDLState::Pollers SDLState::GetPollers(InputCommon::Polling::DeviceType type) { | ||
| 654 | Pollers pollers; | ||
| 655 | |||
| 656 | switch (type) { | ||
| 657 | case InputCommon::Polling::DeviceType::Analog: | ||
| 658 | pollers.emplace_back(std::make_unique<Polling::SDLAnalogPoller>(*this)); | ||
| 659 | break; | ||
| 660 | case InputCommon::Polling::DeviceType::Button: | ||
| 661 | pollers.emplace_back(std::make_unique<Polling::SDLButtonPoller>(*this)); | ||
| 662 | break; | ||
| 663 | } | ||
| 664 | |||
| 665 | return pollers; | ||
| 666 | } | ||
| 667 | |||
| 668 | } // namespace SDL | ||
| 669 | } // namespace InputCommon | ||
diff --git a/src/input_common/sdl/sdl_impl.h b/src/input_common/sdl/sdl_impl.h new file mode 100644 index 000000000..2579741d6 --- /dev/null +++ b/src/input_common/sdl/sdl_impl.h | |||
| @@ -0,0 +1,63 @@ | |||
| 1 | // Copyright 2018 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <atomic> | ||
| 8 | #include <memory> | ||
| 9 | #include <thread> | ||
| 10 | #include "common/threadsafe_queue.h" | ||
| 11 | #include "input_common/sdl/sdl.h" | ||
| 12 | |||
| 13 | union SDL_Event; | ||
| 14 | using SDL_Joystick = struct _SDL_Joystick; | ||
| 15 | using SDL_JoystickID = s32; | ||
| 16 | |||
| 17 | namespace InputCommon::SDL { | ||
| 18 | |||
| 19 | class SDLJoystick; | ||
| 20 | class SDLButtonFactory; | ||
| 21 | class SDLAnalogFactory; | ||
| 22 | |||
| 23 | class SDLState : public State { | ||
| 24 | public: | ||
| 25 | /// Initializes and registers SDL device factories | ||
| 26 | SDLState(); | ||
| 27 | |||
| 28 | /// Unregisters SDL device factories and shut them down. | ||
| 29 | ~SDLState() override; | ||
| 30 | |||
| 31 | /// Handle SDL_Events for joysticks from SDL_PollEvent | ||
| 32 | void HandleGameControllerEvent(const SDL_Event& event); | ||
| 33 | |||
| 34 | std::shared_ptr<SDLJoystick> GetSDLJoystickBySDLID(SDL_JoystickID sdl_id); | ||
| 35 | std::shared_ptr<SDLJoystick> GetSDLJoystickByGUID(const std::string& guid, int port); | ||
| 36 | |||
| 37 | /// Get all DevicePoller that use the SDL backend for a specific device type | ||
| 38 | Pollers GetPollers(Polling::DeviceType type) override; | ||
| 39 | |||
| 40 | /// Used by the Pollers during config | ||
| 41 | std::atomic<bool> polling = false; | ||
| 42 | Common::SPSCQueue<SDL_Event> event_queue; | ||
| 43 | |||
| 44 | private: | ||
| 45 | void InitJoystick(int joystick_index); | ||
| 46 | void CloseJoystick(SDL_Joystick* sdl_joystick); | ||
| 47 | |||
| 48 | /// Needs to be called before SDL_QuitSubSystem. | ||
| 49 | void CloseJoysticks(); | ||
| 50 | |||
| 51 | /// Map of GUID of a list of corresponding virtual Joysticks | ||
| 52 | std::unordered_map<std::string, std::vector<std::shared_ptr<SDLJoystick>>> joystick_map; | ||
| 53 | std::mutex joystick_map_mutex; | ||
| 54 | |||
| 55 | std::shared_ptr<SDLButtonFactory> button_factory; | ||
| 56 | std::shared_ptr<SDLAnalogFactory> analog_factory; | ||
| 57 | |||
| 58 | bool start_thread = false; | ||
| 59 | std::atomic<bool> initialized = false; | ||
| 60 | |||
| 61 | std::thread poll_thread; | ||
| 62 | }; | ||
| 63 | } // namespace InputCommon::SDL | ||
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 37f09ce5f..d0284bdf4 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | add_executable(tests | 1 | add_executable(tests |
| 2 | common/bit_field.cpp | ||
| 2 | common/param_package.cpp | 3 | common/param_package.cpp |
| 3 | common/ring_buffer.cpp | 4 | common/ring_buffer.cpp |
| 4 | core/arm/arm_test_common.cpp | 5 | core/arm/arm_test_common.cpp |
diff --git a/src/tests/common/bit_field.cpp b/src/tests/common/bit_field.cpp new file mode 100644 index 000000000..8ca1889f9 --- /dev/null +++ b/src/tests/common/bit_field.cpp | |||
| @@ -0,0 +1,90 @@ | |||
| 1 | // Copyright 2019 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <array> | ||
| 6 | #include <cstring> | ||
| 7 | #include <type_traits> | ||
| 8 | #include <catch2/catch.hpp> | ||
| 9 | #include "common/bit_field.h" | ||
| 10 | |||
| 11 | TEST_CASE("BitField", "[common]") { | ||
| 12 | enum class TestEnum : u32 { | ||
| 13 | A = 0b10111101, | ||
| 14 | B = 0b10101110, | ||
| 15 | C = 0b00001111, | ||
| 16 | }; | ||
| 17 | |||
| 18 | union LEBitField { | ||
| 19 | u32_le raw; | ||
| 20 | BitField<0, 6, u32> a; | ||
| 21 | BitField<6, 4, s32> b; | ||
| 22 | BitField<10, 8, TestEnum> c; | ||
| 23 | BitField<18, 14, u32> d; | ||
| 24 | } le_bitfield; | ||
| 25 | |||
| 26 | union BEBitField { | ||
| 27 | u32_be raw; | ||
| 28 | BitFieldBE<0, 6, u32> a; | ||
| 29 | BitFieldBE<6, 4, s32> b; | ||
| 30 | BitFieldBE<10, 8, TestEnum> c; | ||
| 31 | BitFieldBE<18, 14, u32> d; | ||
| 32 | } be_bitfield; | ||
| 33 | |||
| 34 | static_assert(sizeof(LEBitField) == sizeof(u32)); | ||
| 35 | static_assert(sizeof(BEBitField) == sizeof(u32)); | ||
| 36 | static_assert(std::is_trivially_copyable_v<LEBitField>); | ||
| 37 | static_assert(std::is_trivially_copyable_v<BEBitField>); | ||
| 38 | |||
| 39 | std::array<u8, 4> raw{{ | ||
| 40 | 0b01101100, | ||
| 41 | 0b11110110, | ||
| 42 | 0b10111010, | ||
| 43 | 0b11101100, | ||
| 44 | }}; | ||
| 45 | |||
| 46 | std::memcpy(&le_bitfield, &raw, sizeof(raw)); | ||
| 47 | std::memcpy(&be_bitfield, &raw, sizeof(raw)); | ||
| 48 | |||
| 49 | // bit fields: 11101100101110'10111101'1001'101100 | ||
| 50 | REQUIRE(le_bitfield.raw == 0b11101100'10111010'11110110'01101100); | ||
| 51 | REQUIRE(le_bitfield.a == 0b101100); | ||
| 52 | REQUIRE(le_bitfield.b == -7); // 1001 as two's complement | ||
| 53 | REQUIRE(le_bitfield.c == TestEnum::A); | ||
| 54 | REQUIRE(le_bitfield.d == 0b11101100101110); | ||
| 55 | |||
| 56 | le_bitfield.a.Assign(0b000111); | ||
| 57 | le_bitfield.b.Assign(-1); | ||
| 58 | le_bitfield.c.Assign(TestEnum::C); | ||
| 59 | le_bitfield.d.Assign(0b01010101010101); | ||
| 60 | std::memcpy(&raw, &le_bitfield, sizeof(raw)); | ||
| 61 | // bit fields: 01010101010101'00001111'1111'000111 | ||
| 62 | REQUIRE(le_bitfield.raw == 0b01010101'01010100'00111111'11000111); | ||
| 63 | REQUIRE(raw == std::array<u8, 4>{{ | ||
| 64 | 0b11000111, | ||
| 65 | 0b00111111, | ||
| 66 | 0b01010100, | ||
| 67 | 0b01010101, | ||
| 68 | }}); | ||
| 69 | |||
| 70 | // bit fields: 01101100111101'10101110'1011'101100 | ||
| 71 | REQUIRE(be_bitfield.raw == 0b01101100'11110110'10111010'11101100); | ||
| 72 | REQUIRE(be_bitfield.a == 0b101100); | ||
| 73 | REQUIRE(be_bitfield.b == -5); // 1011 as two's complement | ||
| 74 | REQUIRE(be_bitfield.c == TestEnum::B); | ||
| 75 | REQUIRE(be_bitfield.d == 0b01101100111101); | ||
| 76 | |||
| 77 | be_bitfield.a.Assign(0b000111); | ||
| 78 | be_bitfield.b.Assign(-1); | ||
| 79 | be_bitfield.c.Assign(TestEnum::C); | ||
| 80 | be_bitfield.d.Assign(0b01010101010101); | ||
| 81 | std::memcpy(&raw, &be_bitfield, sizeof(raw)); | ||
| 82 | // bit fields: 01010101010101'00001111'1111'000111 | ||
| 83 | REQUIRE(be_bitfield.raw == 0b01010101'01010100'00111111'11000111); | ||
| 84 | REQUIRE(raw == std::array<u8, 4>{{ | ||
| 85 | 0b01010101, | ||
| 86 | 0b01010100, | ||
| 87 | 0b00111111, | ||
| 88 | 0b11000111, | ||
| 89 | }}); | ||
| 90 | } | ||
diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp index ea27ef90d..3e1a735c3 100644 --- a/src/tests/core/arm/arm_test_common.cpp +++ b/src/tests/core/arm/arm_test_common.cpp | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | #include <algorithm> | 5 | #include <algorithm> |
| 6 | 6 | ||
| 7 | #include "common/page_table.h" | ||
| 7 | #include "core/core.h" | 8 | #include "core/core.h" |
| 8 | #include "core/hle/kernel/process.h" | 9 | #include "core/hle/kernel/process.h" |
| 9 | #include "core/memory.h" | 10 | #include "core/memory.h" |
| @@ -15,14 +16,14 @@ namespace ArmTests { | |||
| 15 | TestEnvironment::TestEnvironment(bool mutable_memory_) | 16 | TestEnvironment::TestEnvironment(bool mutable_memory_) |
| 16 | : mutable_memory(mutable_memory_), | 17 | : mutable_memory(mutable_memory_), |
| 17 | test_memory(std::make_shared<TestMemory>(this)), kernel{Core::System::GetInstance()} { | 18 | test_memory(std::make_shared<TestMemory>(this)), kernel{Core::System::GetInstance()} { |
| 18 | auto process = Kernel::Process::Create(kernel, ""); | 19 | auto process = Kernel::Process::Create(Core::System::GetInstance(), ""); |
| 19 | kernel.MakeCurrentProcess(process.get()); | 20 | kernel.MakeCurrentProcess(process.get()); |
| 20 | page_table = &process->VMManager().page_table; | 21 | page_table = &process->VMManager().page_table; |
| 21 | 22 | ||
| 22 | std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr); | 23 | std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr); |
| 23 | page_table->special_regions.clear(); | 24 | page_table->special_regions.clear(); |
| 24 | std::fill(page_table->attributes.begin(), page_table->attributes.end(), | 25 | std::fill(page_table->attributes.begin(), page_table->attributes.end(), |
| 25 | Memory::PageType::Unmapped); | 26 | Common::PageType::Unmapped); |
| 26 | 27 | ||
| 27 | Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); | 28 | Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); |
| 28 | Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); | 29 | Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); |
diff --git a/src/tests/core/arm/arm_test_common.h b/src/tests/core/arm/arm_test_common.h index 0b7539601..d145dbfcc 100644 --- a/src/tests/core/arm/arm_test_common.h +++ b/src/tests/core/arm/arm_test_common.h | |||
| @@ -9,10 +9,10 @@ | |||
| 9 | #include <vector> | 9 | #include <vector> |
| 10 | 10 | ||
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/memory_hook.h" | ||
| 12 | #include "core/hle/kernel/kernel.h" | 13 | #include "core/hle/kernel/kernel.h" |
| 13 | #include "core/memory_hook.h" | ||
| 14 | 14 | ||
| 15 | namespace Memory { | 15 | namespace Common { |
| 16 | struct PageTable; | 16 | struct PageTable; |
| 17 | } | 17 | } |
| 18 | 18 | ||
| @@ -58,7 +58,7 @@ public: | |||
| 58 | 58 | ||
| 59 | private: | 59 | private: |
| 60 | friend struct TestMemory; | 60 | friend struct TestMemory; |
| 61 | struct TestMemory final : Memory::MemoryHook { | 61 | struct TestMemory final : Common::MemoryHook { |
| 62 | explicit TestMemory(TestEnvironment* env_) : env(env_) {} | 62 | explicit TestMemory(TestEnvironment* env_) : env(env_) {} |
| 63 | TestEnvironment* env; | 63 | TestEnvironment* env; |
| 64 | 64 | ||
| @@ -86,7 +86,7 @@ private: | |||
| 86 | bool mutable_memory; | 86 | bool mutable_memory; |
| 87 | std::shared_ptr<TestMemory> test_memory; | 87 | std::shared_ptr<TestMemory> test_memory; |
| 88 | std::vector<WriteRecord> write_records; | 88 | std::vector<WriteRecord> write_records; |
| 89 | Memory::PageTable* page_table = nullptr; | 89 | Common::PageTable* page_table = nullptr; |
| 90 | Kernel::KernelCore kernel; | 90 | Kernel::KernelCore kernel; |
| 91 | }; | 91 | }; |
| 92 | 92 | ||
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index dac992d44..14b76680f 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt | |||
| @@ -80,6 +80,7 @@ add_library(video_core STATIC | |||
| 80 | shader/decode/hfma2.cpp | 80 | shader/decode/hfma2.cpp |
| 81 | shader/decode/conversion.cpp | 81 | shader/decode/conversion.cpp |
| 82 | shader/decode/memory.cpp | 82 | shader/decode/memory.cpp |
| 83 | shader/decode/texture.cpp | ||
| 83 | shader/decode/float_set_predicate.cpp | 84 | shader/decode/float_set_predicate.cpp |
| 84 | shader/decode/integer_set_predicate.cpp | 85 | shader/decode/integer_set_predicate.cpp |
| 85 | shader/decode/half_set_predicate.cpp | 86 | shader/decode/half_set_predicate.cpp |
| @@ -122,6 +123,8 @@ if (ENABLE_VULKAN) | |||
| 122 | renderer_vulkan/vk_memory_manager.h | 123 | renderer_vulkan/vk_memory_manager.h |
| 123 | renderer_vulkan/vk_resource_manager.cpp | 124 | renderer_vulkan/vk_resource_manager.cpp |
| 124 | renderer_vulkan/vk_resource_manager.h | 125 | renderer_vulkan/vk_resource_manager.h |
| 126 | renderer_vulkan/vk_sampler_cache.cpp | ||
| 127 | renderer_vulkan/vk_sampler_cache.h | ||
| 125 | renderer_vulkan/vk_scheduler.cpp | 128 | renderer_vulkan/vk_scheduler.cpp |
| 126 | renderer_vulkan/vk_scheduler.h | 129 | renderer_vulkan/vk_scheduler.h |
| 127 | renderer_vulkan/vk_stream_buffer.cpp | 130 | renderer_vulkan/vk_stream_buffer.cpp |
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index bff1a37ff..8b1bea1ae 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp | |||
| @@ -55,12 +55,9 @@ bool DmaPusher::Step() { | |||
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | // Push buffer non-empty, read a word | 57 | // Push buffer non-empty, read a word |
| 58 | const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get); | ||
| 59 | ASSERT_MSG(address, "Invalid GPU address"); | ||
| 60 | |||
| 61 | command_headers.resize(command_list_header.size); | 58 | command_headers.resize(command_list_header.size); |
| 62 | 59 | gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(), | |
| 63 | Memory::ReadBlock(*address, command_headers.data(), command_list_header.size * sizeof(u32)); | 60 | command_list_header.size * sizeof(u32)); |
| 64 | 61 | ||
| 65 | for (const CommandHeader& command_header : command_headers) { | 62 | for (const CommandHeader& command_header : command_headers) { |
| 66 | 63 | ||
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index 27a36348c..6ab06518f 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | 9 | ||
| 10 | #include "common/bit_field.h" | 10 | #include "common/bit_field.h" |
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "video_core/memory_manager.h" | ||
| 13 | 12 | ||
| 14 | namespace Tegra { | 13 | namespace Tegra { |
| 15 | 14 | ||
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index aae2a4019..e259bf46b 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include "video_core/engines/kepler_memory.h" | 9 | #include "video_core/engines/kepler_memory.h" |
| 10 | #include "video_core/engines/maxwell_3d.h" | 10 | #include "video_core/engines/maxwell_3d.h" |
| 11 | #include "video_core/rasterizer_interface.h" | 11 | #include "video_core/rasterizer_interface.h" |
| 12 | #include "video_core/renderer_base.h" | ||
| 12 | 13 | ||
| 13 | namespace Tegra::Engines { | 14 | namespace Tegra::Engines { |
| 14 | 15 | ||
| @@ -40,17 +41,13 @@ void KeplerMemory::ProcessData(u32 data) { | |||
| 40 | ASSERT_MSG(regs.exec.linear, "Non-linear uploads are not supported"); | 41 | ASSERT_MSG(regs.exec.linear, "Non-linear uploads are not supported"); |
| 41 | ASSERT(regs.dest.x == 0 && regs.dest.y == 0 && regs.dest.z == 0); | 42 | ASSERT(regs.dest.x == 0 && regs.dest.y == 0 && regs.dest.z == 0); |
| 42 | 43 | ||
| 43 | const GPUVAddr address = regs.dest.Address(); | ||
| 44 | const auto dest_address = | ||
| 45 | memory_manager.GpuToCpuAddress(address + state.write_offset * sizeof(u32)); | ||
| 46 | ASSERT_MSG(dest_address, "Invalid GPU address"); | ||
| 47 | |||
| 48 | // We have to invalidate the destination region to evict any outdated surfaces from the cache. | 44 | // We have to invalidate the destination region to evict any outdated surfaces from the cache. |
| 49 | // We do this before actually writing the new data because the destination address might contain | 45 | // We do this before actually writing the new data because the destination address might |
| 50 | // a dirty surface that will have to be written back to memory. | 46 | // contain a dirty surface that will have to be written back to memory. |
| 51 | Core::System::GetInstance().GPU().InvalidateRegion(*dest_address, sizeof(u32)); | 47 | const GPUVAddr address{regs.dest.Address() + state.write_offset * sizeof(u32)}; |
| 48 | rasterizer.InvalidateRegion(ToCacheAddr(memory_manager.GetPointer(address)), sizeof(u32)); | ||
| 49 | memory_manager.Write<u32>(address, data); | ||
| 52 | 50 | ||
| 53 | Memory::Write32(*dest_address, data); | ||
| 54 | system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); | 51 | system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); |
| 55 | 52 | ||
| 56 | state.write_offset++; | 53 | state.write_offset++; |
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 144e7fa82..defcfbd3f 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp | |||
| @@ -270,11 +270,9 @@ void Maxwell3D::ProcessMacroBind(u32 data) { | |||
| 270 | } | 270 | } |
| 271 | 271 | ||
| 272 | void Maxwell3D::ProcessQueryGet() { | 272 | void Maxwell3D::ProcessQueryGet() { |
| 273 | GPUVAddr sequence_address = regs.query.QueryAddress(); | 273 | const GPUVAddr sequence_address{regs.query.QueryAddress()}; |
| 274 | // Since the sequence address is given as a GPU VAddr, we have to convert it to an application | 274 | // Since the sequence address is given as a GPU VAddr, we have to convert it to an application |
| 275 | // VAddr before writing. | 275 | // VAddr before writing. |
| 276 | const auto address = memory_manager.GpuToCpuAddress(sequence_address); | ||
| 277 | ASSERT_MSG(address, "Invalid GPU address"); | ||
| 278 | 276 | ||
| 279 | // TODO(Subv): Support the other query units. | 277 | // TODO(Subv): Support the other query units. |
| 280 | ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop, | 278 | ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop, |
| @@ -309,7 +307,7 @@ void Maxwell3D::ProcessQueryGet() { | |||
| 309 | // Write the current query sequence to the sequence address. | 307 | // Write the current query sequence to the sequence address. |
| 310 | // TODO(Subv): Find out what happens if you use a long query type but mark it as a short | 308 | // TODO(Subv): Find out what happens if you use a long query type but mark it as a short |
| 311 | // query. | 309 | // query. |
| 312 | Memory::Write32(*address, sequence); | 310 | memory_manager.Write<u32>(sequence_address, sequence); |
| 313 | } else { | 311 | } else { |
| 314 | // Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast | 312 | // Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast |
| 315 | // GPU, this command may actually take a while to complete in real hardware due to GPU | 313 | // GPU, this command may actually take a while to complete in real hardware due to GPU |
| @@ -318,7 +316,7 @@ void Maxwell3D::ProcessQueryGet() { | |||
| 318 | query_result.value = result; | 316 | query_result.value = result; |
| 319 | // TODO(Subv): Generate a real GPU timestamp and write it here instead of CoreTiming | 317 | // TODO(Subv): Generate a real GPU timestamp and write it here instead of CoreTiming |
| 320 | query_result.timestamp = system.CoreTiming().GetTicks(); | 318 | query_result.timestamp = system.CoreTiming().GetTicks(); |
| 321 | Memory::WriteBlock(*address, &query_result, sizeof(query_result)); | 319 | memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result)); |
| 322 | } | 320 | } |
| 323 | dirty_flags.OnMemoryWrite(); | 321 | dirty_flags.OnMemoryWrite(); |
| 324 | break; | 322 | break; |
| @@ -393,10 +391,12 @@ void Maxwell3D::ProcessCBData(u32 value) { | |||
| 393 | // Don't allow writing past the end of the buffer. | 391 | // Don't allow writing past the end of the buffer. |
| 394 | ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size); | 392 | ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size); |
| 395 | 393 | ||
| 396 | const auto address = memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos); | 394 | const GPUVAddr address{buffer_address + regs.const_buffer.cb_pos}; |
| 397 | ASSERT_MSG(address, "Invalid GPU address"); | 395 | |
| 396 | u8* ptr{memory_manager.GetPointer(address)}; | ||
| 397 | rasterizer.InvalidateRegion(ToCacheAddr(ptr), sizeof(u32)); | ||
| 398 | memory_manager.Write<u32>(address, value); | ||
| 398 | 399 | ||
| 399 | Memory::Write32(*address, value); | ||
| 400 | dirty_flags.OnMemoryWrite(); | 400 | dirty_flags.OnMemoryWrite(); |
| 401 | 401 | ||
| 402 | // Increment the current buffer position. | 402 | // Increment the current buffer position. |
| @@ -404,14 +404,10 @@ void Maxwell3D::ProcessCBData(u32 value) { | |||
| 404 | } | 404 | } |
| 405 | 405 | ||
| 406 | Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { | 406 | Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { |
| 407 | const GPUVAddr tic_base_address = regs.tic.TICAddress(); | 407 | const GPUVAddr tic_address_gpu{regs.tic.TICAddress() + tic_index * sizeof(Texture::TICEntry)}; |
| 408 | |||
| 409 | const GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry); | ||
| 410 | const auto tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu); | ||
| 411 | ASSERT_MSG(tic_address_cpu, "Invalid GPU address"); | ||
| 412 | 408 | ||
| 413 | Texture::TICEntry tic_entry; | 409 | Texture::TICEntry tic_entry; |
| 414 | Memory::ReadBlock(*tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry)); | 410 | memory_manager.ReadBlock(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry)); |
| 415 | 411 | ||
| 416 | ASSERT_MSG(tic_entry.header_version == Texture::TICHeaderVersion::BlockLinear || | 412 | ASSERT_MSG(tic_entry.header_version == Texture::TICHeaderVersion::BlockLinear || |
| 417 | tic_entry.header_version == Texture::TICHeaderVersion::Pitch, | 413 | tic_entry.header_version == Texture::TICHeaderVersion::Pitch, |
| @@ -429,14 +425,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { | |||
| 429 | } | 425 | } |
| 430 | 426 | ||
| 431 | Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const { | 427 | Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const { |
| 432 | const GPUVAddr tsc_base_address = regs.tsc.TSCAddress(); | 428 | const GPUVAddr tsc_address_gpu{regs.tsc.TSCAddress() + tsc_index * sizeof(Texture::TSCEntry)}; |
| 433 | |||
| 434 | const GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry); | ||
| 435 | const auto tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu); | ||
| 436 | ASSERT_MSG(tsc_address_cpu, "Invalid GPU address"); | ||
| 437 | 429 | ||
| 438 | Texture::TSCEntry tsc_entry; | 430 | Texture::TSCEntry tsc_entry; |
| 439 | Memory::ReadBlock(*tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry)); | 431 | memory_manager.ReadBlock(tsc_address_gpu, &tsc_entry, sizeof(Texture::TSCEntry)); |
| 440 | return tsc_entry; | 432 | return tsc_entry; |
| 441 | } | 433 | } |
| 442 | 434 | ||
| @@ -455,10 +447,7 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt | |||
| 455 | for (GPUVAddr current_texture = tex_info_buffer.address + TextureInfoOffset; | 447 | for (GPUVAddr current_texture = tex_info_buffer.address + TextureInfoOffset; |
| 456 | current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) { | 448 | current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) { |
| 457 | 449 | ||
| 458 | const auto address = memory_manager.GpuToCpuAddress(current_texture); | 450 | const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(current_texture)}; |
| 459 | ASSERT_MSG(address, "Invalid GPU address"); | ||
| 460 | |||
| 461 | const Texture::TextureHandle tex_handle{Memory::Read32(*address)}; | ||
| 462 | 451 | ||
| 463 | Texture::FullTextureInfo tex_info{}; | 452 | Texture::FullTextureInfo tex_info{}; |
| 464 | // TODO(Subv): Use the shader to determine which textures are actually accessed. | 453 | // TODO(Subv): Use the shader to determine which textures are actually accessed. |
| @@ -493,10 +482,7 @@ Texture::FullTextureInfo Maxwell3D::GetStageTexture(Regs::ShaderStage stage, | |||
| 493 | 482 | ||
| 494 | ASSERT(tex_info_address < tex_info_buffer.address + tex_info_buffer.size); | 483 | ASSERT(tex_info_address < tex_info_buffer.address + tex_info_buffer.size); |
| 495 | 484 | ||
| 496 | const auto tex_address_cpu = memory_manager.GpuToCpuAddress(tex_info_address); | 485 | const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(tex_info_address)}; |
| 497 | ASSERT_MSG(tex_address_cpu, "Invalid GPU address"); | ||
| 498 | |||
| 499 | const Texture::TextureHandle tex_handle{Memory::Read32(*tex_address_cpu)}; | ||
| 500 | 486 | ||
| 501 | Texture::FullTextureInfo tex_info{}; | 487 | Texture::FullTextureInfo tex_info{}; |
| 502 | tex_info.index = static_cast<u32>(offset); | 488 | tex_info.index = static_cast<u32>(offset); |
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 9dfea5999..5cca5c29a 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include "video_core/engines/maxwell_3d.h" | 9 | #include "video_core/engines/maxwell_3d.h" |
| 10 | #include "video_core/engines/maxwell_dma.h" | 10 | #include "video_core/engines/maxwell_dma.h" |
| 11 | #include "video_core/rasterizer_interface.h" | 11 | #include "video_core/rasterizer_interface.h" |
| 12 | #include "video_core/renderer_base.h" | ||
| 12 | #include "video_core/textures/decoders.h" | 13 | #include "video_core/textures/decoders.h" |
| 13 | 14 | ||
| 14 | namespace Tegra::Engines { | 15 | namespace Tegra::Engines { |
| @@ -42,11 +43,6 @@ void MaxwellDMA::HandleCopy() { | |||
| 42 | const GPUVAddr source = regs.src_address.Address(); | 43 | const GPUVAddr source = regs.src_address.Address(); |
| 43 | const GPUVAddr dest = regs.dst_address.Address(); | 44 | const GPUVAddr dest = regs.dst_address.Address(); |
| 44 | 45 | ||
| 45 | const auto source_cpu = memory_manager.GpuToCpuAddress(source); | ||
| 46 | const auto dest_cpu = memory_manager.GpuToCpuAddress(dest); | ||
| 47 | ASSERT_MSG(source_cpu, "Invalid source GPU address"); | ||
| 48 | ASSERT_MSG(dest_cpu, "Invalid destination GPU address"); | ||
| 49 | |||
| 50 | // TODO(Subv): Perform more research and implement all features of this engine. | 46 | // TODO(Subv): Perform more research and implement all features of this engine. |
| 51 | ASSERT(regs.exec.enable_swizzle == 0); | 47 | ASSERT(regs.exec.enable_swizzle == 0); |
| 52 | ASSERT(regs.exec.query_mode == Regs::QueryMode::None); | 48 | ASSERT(regs.exec.query_mode == Regs::QueryMode::None); |
| @@ -69,7 +65,7 @@ void MaxwellDMA::HandleCopy() { | |||
| 69 | // buffer of length `x_count`, otherwise we copy a 2D image of dimensions (x_count, | 65 | // buffer of length `x_count`, otherwise we copy a 2D image of dimensions (x_count, |
| 70 | // y_count). | 66 | // y_count). |
| 71 | if (!regs.exec.enable_2d) { | 67 | if (!regs.exec.enable_2d) { |
| 72 | Memory::CopyBlock(*dest_cpu, *source_cpu, regs.x_count); | 68 | memory_manager.CopyBlock(dest, source, regs.x_count); |
| 73 | return; | 69 | return; |
| 74 | } | 70 | } |
| 75 | 71 | ||
| @@ -78,9 +74,9 @@ void MaxwellDMA::HandleCopy() { | |||
| 78 | // rectangle. There is no need to manually flush/invalidate the regions because | 74 | // rectangle. There is no need to manually flush/invalidate the regions because |
| 79 | // CopyBlock does that for us. | 75 | // CopyBlock does that for us. |
| 80 | for (u32 line = 0; line < regs.y_count; ++line) { | 76 | for (u32 line = 0; line < regs.y_count; ++line) { |
| 81 | const VAddr source_line = *source_cpu + line * regs.src_pitch; | 77 | const GPUVAddr source_line = source + line * regs.src_pitch; |
| 82 | const VAddr dest_line = *dest_cpu + line * regs.dst_pitch; | 78 | const GPUVAddr dest_line = dest + line * regs.dst_pitch; |
| 83 | Memory::CopyBlock(dest_line, source_line, regs.x_count); | 79 | memory_manager.CopyBlock(dest_line, source_line, regs.x_count); |
| 84 | } | 80 | } |
| 85 | return; | 81 | return; |
| 86 | } | 82 | } |
| @@ -89,15 +85,28 @@ void MaxwellDMA::HandleCopy() { | |||
| 89 | 85 | ||
| 90 | const std::size_t copy_size = regs.x_count * regs.y_count; | 86 | const std::size_t copy_size = regs.x_count * regs.y_count; |
| 91 | 87 | ||
| 88 | auto source_ptr{memory_manager.GetPointer(source)}; | ||
| 89 | auto dst_ptr{memory_manager.GetPointer(dest)}; | ||
| 90 | |||
| 91 | if (!source_ptr) { | ||
| 92 | LOG_ERROR(HW_GPU, "source_ptr is invalid"); | ||
| 93 | return; | ||
| 94 | } | ||
| 95 | |||
| 96 | if (!dst_ptr) { | ||
| 97 | LOG_ERROR(HW_GPU, "dst_ptr is invalid"); | ||
| 98 | return; | ||
| 99 | } | ||
| 100 | |||
| 92 | const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) { | 101 | const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) { |
| 93 | // TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated | 102 | // TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated |
| 94 | // copying. | 103 | // copying. |
| 95 | Core::System::GetInstance().GPU().FlushRegion(*source_cpu, src_size); | 104 | rasterizer.FlushRegion(ToCacheAddr(source_ptr), src_size); |
| 96 | 105 | ||
| 97 | // We have to invalidate the destination region to evict any outdated surfaces from the | 106 | // We have to invalidate the destination region to evict any outdated surfaces from the |
| 98 | // cache. We do this before actually writing the new data because the destination address | 107 | // cache. We do this before actually writing the new data because the destination address |
| 99 | // might contain a dirty surface that will have to be written back to memory. | 108 | // might contain a dirty surface that will have to be written back to memory. |
| 100 | Core::System::GetInstance().GPU().InvalidateRegion(*dest_cpu, dst_size); | 109 | rasterizer.InvalidateRegion(ToCacheAddr(dst_ptr), dst_size); |
| 101 | }; | 110 | }; |
| 102 | 111 | ||
| 103 | if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) { | 112 | if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) { |
| @@ -110,8 +119,8 @@ void MaxwellDMA::HandleCopy() { | |||
| 110 | copy_size * src_bytes_per_pixel); | 119 | copy_size * src_bytes_per_pixel); |
| 111 | 120 | ||
| 112 | Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch, | 121 | Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch, |
| 113 | regs.src_params.size_x, src_bytes_per_pixel, *source_cpu, | 122 | regs.src_params.size_x, src_bytes_per_pixel, source_ptr, dst_ptr, |
| 114 | *dest_cpu, regs.src_params.BlockHeight(), regs.src_params.pos_x, | 123 | regs.src_params.BlockHeight(), regs.src_params.pos_x, |
| 115 | regs.src_params.pos_y); | 124 | regs.src_params.pos_y); |
| 116 | } else { | 125 | } else { |
| 117 | ASSERT(regs.dst_params.size_z == 1); | 126 | ASSERT(regs.dst_params.size_z == 1); |
| @@ -124,7 +133,7 @@ void MaxwellDMA::HandleCopy() { | |||
| 124 | 133 | ||
| 125 | // If the input is linear and the output is tiled, swizzle the input and copy it over. | 134 | // If the input is linear and the output is tiled, swizzle the input and copy it over. |
| 126 | Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x, | 135 | Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x, |
| 127 | src_bpp, *dest_cpu, *source_cpu, regs.dst_params.BlockHeight()); | 136 | src_bpp, dst_ptr, source_ptr, regs.dst_params.BlockHeight()); |
| 128 | } | 137 | } |
| 129 | } | 138 | } |
| 130 | 139 | ||
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index c7eb15b6a..7f613370b 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h | |||
| @@ -324,11 +324,11 @@ enum class TextureQueryType : u64 { | |||
| 324 | 324 | ||
| 325 | enum class TextureProcessMode : u64 { | 325 | enum class TextureProcessMode : u64 { |
| 326 | None = 0, | 326 | None = 0, |
| 327 | LZ = 1, // Unknown, appears to be the same as none. | 327 | LZ = 1, // Load LOD of zero. |
| 328 | LB = 2, // Load Bias. | 328 | LB = 2, // Load Bias. |
| 329 | LL = 3, // Load LOD (LevelOfDetail) | 329 | LL = 3, // Load LOD. |
| 330 | LBA = 6, // Load Bias. The A is unknown, does not appear to differ with LB | 330 | LBA = 6, // Load Bias. The A is unknown, does not appear to differ with LB. |
| 331 | LLA = 7 // Load LOD. The A is unknown, does not appear to differ with LL | 331 | LLA = 7 // Load LOD. The A is unknown, does not appear to differ with LL. |
| 332 | }; | 332 | }; |
| 333 | 333 | ||
| 334 | enum class TextureMiscMode : u64 { | 334 | enum class TextureMiscMode : u64 { |
| @@ -1445,6 +1445,7 @@ public: | |||
| 1445 | Flow, | 1445 | Flow, |
| 1446 | Synch, | 1446 | Synch, |
| 1447 | Memory, | 1447 | Memory, |
| 1448 | Texture, | ||
| 1448 | FloatSet, | 1449 | FloatSet, |
| 1449 | FloatSetPredicate, | 1450 | FloatSetPredicate, |
| 1450 | IntegerSet, | 1451 | IntegerSet, |
| @@ -1575,14 +1576,14 @@ private: | |||
| 1575 | INST("1110111101010---", Id::ST_L, Type::Memory, "ST_L"), | 1576 | INST("1110111101010---", Id::ST_L, Type::Memory, "ST_L"), |
| 1576 | INST("1110111011010---", Id::LDG, Type::Memory, "LDG"), | 1577 | INST("1110111011010---", Id::LDG, Type::Memory, "LDG"), |
| 1577 | INST("1110111011011---", Id::STG, Type::Memory, "STG"), | 1578 | INST("1110111011011---", Id::STG, Type::Memory, "STG"), |
| 1578 | INST("110000----111---", Id::TEX, Type::Memory, "TEX"), | 1579 | INST("110000----111---", Id::TEX, Type::Texture, "TEX"), |
| 1579 | INST("1101111101001---", Id::TXQ, Type::Memory, "TXQ"), | 1580 | INST("1101111101001---", Id::TXQ, Type::Texture, "TXQ"), |
| 1580 | INST("1101-00---------", Id::TEXS, Type::Memory, "TEXS"), | 1581 | INST("1101-00---------", Id::TEXS, Type::Texture, "TEXS"), |
| 1581 | INST("1101101---------", Id::TLDS, Type::Memory, "TLDS"), | 1582 | INST("1101101---------", Id::TLDS, Type::Texture, "TLDS"), |
| 1582 | INST("110010----111---", Id::TLD4, Type::Memory, "TLD4"), | 1583 | INST("110010----111---", Id::TLD4, Type::Texture, "TLD4"), |
| 1583 | INST("1101111100------", Id::TLD4S, Type::Memory, "TLD4S"), | 1584 | INST("1101111100------", Id::TLD4S, Type::Texture, "TLD4S"), |
| 1584 | INST("110111110110----", Id::TMML_B, Type::Memory, "TMML_B"), | 1585 | INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"), |
| 1585 | INST("1101111101011---", Id::TMML, Type::Memory, "TMML"), | 1586 | INST("1101111101011---", Id::TMML, Type::Texture, "TMML"), |
| 1586 | INST("111000110000----", Id::EXIT, Type::Trivial, "EXIT"), | 1587 | INST("111000110000----", Id::EXIT, Type::Trivial, "EXIT"), |
| 1587 | INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), | 1588 | INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), |
| 1588 | INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), | 1589 | INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), |
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 08abf8ac9..267a03f2d 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include "video_core/engines/maxwell_3d.h" | 12 | #include "video_core/engines/maxwell_3d.h" |
| 13 | #include "video_core/engines/maxwell_dma.h" | 13 | #include "video_core/engines/maxwell_dma.h" |
| 14 | #include "video_core/gpu.h" | 14 | #include "video_core/gpu.h" |
| 15 | #include "video_core/memory_manager.h" | ||
| 15 | #include "video_core/renderer_base.h" | 16 | #include "video_core/renderer_base.h" |
| 16 | 17 | ||
| 17 | namespace Tegra { | 18 | namespace Tegra { |
| @@ -274,7 +275,6 @@ void GPU::ProcessSemaphoreTriggerMethod() { | |||
| 274 | const auto op = | 275 | const auto op = |
| 275 | static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask); | 276 | static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask); |
| 276 | if (op == GpuSemaphoreOperation::WriteLong) { | 277 | if (op == GpuSemaphoreOperation::WriteLong) { |
| 277 | auto address = memory_manager->GpuToCpuAddress(regs.smaphore_address.SmaphoreAddress()); | ||
| 278 | struct Block { | 278 | struct Block { |
| 279 | u32 sequence; | 279 | u32 sequence; |
| 280 | u32 zeros = 0; | 280 | u32 zeros = 0; |
| @@ -286,11 +286,9 @@ void GPU::ProcessSemaphoreTriggerMethod() { | |||
| 286 | // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of | 286 | // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of |
| 287 | // CoreTiming | 287 | // CoreTiming |
| 288 | block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks(); | 288 | block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks(); |
| 289 | Memory::WriteBlock(*address, &block, sizeof(block)); | 289 | memory_manager->WriteBlock(regs.smaphore_address.SmaphoreAddress(), &block, sizeof(block)); |
| 290 | } else { | 290 | } else { |
| 291 | const auto address = | 291 | const u32 word{memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress())}; |
| 292 | memory_manager->GpuToCpuAddress(regs.smaphore_address.SmaphoreAddress()); | ||
| 293 | const u32 word = Memory::Read32(*address); | ||
| 294 | if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || | 292 | if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || |
| 295 | (op == GpuSemaphoreOperation::AcquireGequal && | 293 | (op == GpuSemaphoreOperation::AcquireGequal && |
| 296 | static_cast<s32>(word - regs.semaphore_sequence) > 0) || | 294 | static_cast<s32>(word - regs.semaphore_sequence) > 0) || |
| @@ -317,13 +315,11 @@ void GPU::ProcessSemaphoreTriggerMethod() { | |||
| 317 | } | 315 | } |
| 318 | 316 | ||
| 319 | void GPU::ProcessSemaphoreRelease() { | 317 | void GPU::ProcessSemaphoreRelease() { |
| 320 | const auto address = memory_manager->GpuToCpuAddress(regs.smaphore_address.SmaphoreAddress()); | 318 | memory_manager->Write<u32>(regs.smaphore_address.SmaphoreAddress(), regs.semaphore_release); |
| 321 | Memory::Write32(*address, regs.semaphore_release); | ||
| 322 | } | 319 | } |
| 323 | 320 | ||
| 324 | void GPU::ProcessSemaphoreAcquire() { | 321 | void GPU::ProcessSemaphoreAcquire() { |
| 325 | const auto address = memory_manager->GpuToCpuAddress(regs.smaphore_address.SmaphoreAddress()); | 322 | const u32 word = memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress()); |
| 326 | const u32 word = Memory::Read32(*address); | ||
| 327 | const auto value = regs.semaphore_acquire; | 323 | const auto value = regs.semaphore_acquire; |
| 328 | if (word != value) { | 324 | if (word != value) { |
| 329 | regs.acquire_active = true; | 325 | regs.acquire_active = true; |
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 56a203275..c1830ac8d 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h | |||
| @@ -9,7 +9,11 @@ | |||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "core/hle/service/nvflinger/buffer_queue.h" | 10 | #include "core/hle/service/nvflinger/buffer_queue.h" |
| 11 | #include "video_core/dma_pusher.h" | 11 | #include "video_core/dma_pusher.h" |
| 12 | #include "video_core/memory_manager.h" | 12 | |
| 13 | using CacheAddr = std::uintptr_t; | ||
| 14 | inline CacheAddr ToCacheAddr(const void* host_ptr) { | ||
| 15 | return reinterpret_cast<CacheAddr>(host_ptr); | ||
| 16 | } | ||
| 13 | 17 | ||
| 14 | namespace Core { | 18 | namespace Core { |
| 15 | class System; | 19 | class System; |
| @@ -119,6 +123,8 @@ enum class EngineID { | |||
| 119 | MAXWELL_DMA_COPY_A = 0xB0B5, | 123 | MAXWELL_DMA_COPY_A = 0xB0B5, |
| 120 | }; | 124 | }; |
| 121 | 125 | ||
| 126 | class MemoryManager; | ||
| 127 | |||
| 122 | class GPU { | 128 | class GPU { |
| 123 | public: | 129 | public: |
| 124 | explicit GPU(Core::System& system, VideoCore::RendererBase& renderer); | 130 | explicit GPU(Core::System& system, VideoCore::RendererBase& renderer); |
| @@ -209,13 +215,13 @@ public: | |||
| 209 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0; | 215 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0; |
| 210 | 216 | ||
| 211 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory | 217 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory |
| 212 | virtual void FlushRegion(VAddr addr, u64 size) = 0; | 218 | virtual void FlushRegion(CacheAddr addr, u64 size) = 0; |
| 213 | 219 | ||
| 214 | /// Notify rasterizer that any caches of the specified region should be invalidated | 220 | /// Notify rasterizer that any caches of the specified region should be invalidated |
| 215 | virtual void InvalidateRegion(VAddr addr, u64 size) = 0; | 221 | virtual void InvalidateRegion(CacheAddr addr, u64 size) = 0; |
| 216 | 222 | ||
| 217 | /// Notify rasterizer that any caches of the specified region should be flushed and invalidated | 223 | /// Notify rasterizer that any caches of the specified region should be flushed and invalidated |
| 218 | virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0; | 224 | virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0; |
| 219 | 225 | ||
| 220 | private: | 226 | private: |
| 221 | void ProcessBindMethod(const MethodCall& method_call); | 227 | void ProcessBindMethod(const MethodCall& method_call); |
| @@ -239,9 +245,8 @@ protected: | |||
| 239 | private: | 245 | private: |
| 240 | std::unique_ptr<Tegra::MemoryManager> memory_manager; | 246 | std::unique_ptr<Tegra::MemoryManager> memory_manager; |
| 241 | 247 | ||
| 242 | /// Mapping of command subchannels to their bound engine ids. | 248 | /// Mapping of command subchannels to their bound engine ids |
| 243 | std::array<EngineID, 8> bound_engines = {}; | 249 | std::array<EngineID, 8> bound_engines = {}; |
| 244 | |||
| 245 | /// 3D engine | 250 | /// 3D engine |
| 246 | std::unique_ptr<Engines::Maxwell3D> maxwell_3d; | 251 | std::unique_ptr<Engines::Maxwell3D> maxwell_3d; |
| 247 | /// 2D engine | 252 | /// 2D engine |
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp index ad0a747e3..8b355cf7b 100644 --- a/src/video_core/gpu_asynch.cpp +++ b/src/video_core/gpu_asynch.cpp | |||
| @@ -22,15 +22,15 @@ void GPUAsynch::SwapBuffers( | |||
| 22 | gpu_thread.SwapBuffers(std::move(framebuffer)); | 22 | gpu_thread.SwapBuffers(std::move(framebuffer)); |
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | void GPUAsynch::FlushRegion(VAddr addr, u64 size) { | 25 | void GPUAsynch::FlushRegion(CacheAddr addr, u64 size) { |
| 26 | gpu_thread.FlushRegion(addr, size); | 26 | gpu_thread.FlushRegion(addr, size); |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | void GPUAsynch::InvalidateRegion(VAddr addr, u64 size) { | 29 | void GPUAsynch::InvalidateRegion(CacheAddr addr, u64 size) { |
| 30 | gpu_thread.InvalidateRegion(addr, size); | 30 | gpu_thread.InvalidateRegion(addr, size); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | void GPUAsynch::FlushAndInvalidateRegion(VAddr addr, u64 size) { | 33 | void GPUAsynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { |
| 34 | gpu_thread.FlushAndInvalidateRegion(addr, size); | 34 | gpu_thread.FlushAndInvalidateRegion(addr, size); |
| 35 | } | 35 | } |
| 36 | 36 | ||
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h index e6a807aba..1dcc61a6c 100644 --- a/src/video_core/gpu_asynch.h +++ b/src/video_core/gpu_asynch.h | |||
| @@ -26,9 +26,9 @@ public: | |||
| 26 | void PushGPUEntries(Tegra::CommandList&& entries) override; | 26 | void PushGPUEntries(Tegra::CommandList&& entries) override; |
| 27 | void SwapBuffers( | 27 | void SwapBuffers( |
| 28 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override; | 28 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override; |
| 29 | void FlushRegion(VAddr addr, u64 size) override; | 29 | void FlushRegion(CacheAddr addr, u64 size) override; |
| 30 | void InvalidateRegion(VAddr addr, u64 size) override; | 30 | void InvalidateRegion(CacheAddr addr, u64 size) override; |
| 31 | void FlushAndInvalidateRegion(VAddr addr, u64 size) override; | 31 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; |
| 32 | 32 | ||
| 33 | private: | 33 | private: |
| 34 | GPUThread::ThreadManager gpu_thread; | 34 | GPUThread::ThreadManager gpu_thread; |
diff --git a/src/video_core/gpu_synch.cpp b/src/video_core/gpu_synch.cpp index 4c00b96c7..2cfc900ed 100644 --- a/src/video_core/gpu_synch.cpp +++ b/src/video_core/gpu_synch.cpp | |||
| @@ -22,15 +22,15 @@ void GPUSynch::SwapBuffers( | |||
| 22 | renderer.SwapBuffers(std::move(framebuffer)); | 22 | renderer.SwapBuffers(std::move(framebuffer)); |
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | void GPUSynch::FlushRegion(VAddr addr, u64 size) { | 25 | void GPUSynch::FlushRegion(CacheAddr addr, u64 size) { |
| 26 | renderer.Rasterizer().FlushRegion(addr, size); | 26 | renderer.Rasterizer().FlushRegion(addr, size); |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | void GPUSynch::InvalidateRegion(VAddr addr, u64 size) { | 29 | void GPUSynch::InvalidateRegion(CacheAddr addr, u64 size) { |
| 30 | renderer.Rasterizer().InvalidateRegion(addr, size); | 30 | renderer.Rasterizer().InvalidateRegion(addr, size); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | void GPUSynch::FlushAndInvalidateRegion(VAddr addr, u64 size) { | 33 | void GPUSynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { |
| 34 | renderer.Rasterizer().FlushAndInvalidateRegion(addr, size); | 34 | renderer.Rasterizer().FlushAndInvalidateRegion(addr, size); |
| 35 | } | 35 | } |
| 36 | 36 | ||
diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h index 7d5a241ff..766b5631c 100644 --- a/src/video_core/gpu_synch.h +++ b/src/video_core/gpu_synch.h | |||
| @@ -21,9 +21,9 @@ public: | |||
| 21 | void PushGPUEntries(Tegra::CommandList&& entries) override; | 21 | void PushGPUEntries(Tegra::CommandList&& entries) override; |
| 22 | void SwapBuffers( | 22 | void SwapBuffers( |
| 23 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override; | 23 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override; |
| 24 | void FlushRegion(VAddr addr, u64 size) override; | 24 | void FlushRegion(CacheAddr addr, u64 size) override; |
| 25 | void InvalidateRegion(VAddr addr, u64 size) override; | 25 | void InvalidateRegion(CacheAddr addr, u64 size) override; |
| 26 | void FlushAndInvalidateRegion(VAddr addr, u64 size) override; | 26 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | } // namespace VideoCommon | 29 | } // namespace VideoCommon |
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index c5bdd2a17..086b2f625 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp | |||
| @@ -5,7 +5,6 @@ | |||
| 5 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 6 | #include "common/microprofile.h" | 6 | #include "common/microprofile.h" |
| 7 | #include "core/frontend/scope_acquire_window_context.h" | 7 | #include "core/frontend/scope_acquire_window_context.h" |
| 8 | #include "core/settings.h" | ||
| 9 | #include "video_core/dma_pusher.h" | 8 | #include "video_core/dma_pusher.h" |
| 10 | #include "video_core/gpu.h" | 9 | #include "video_core/gpu.h" |
| 11 | #include "video_core/gpu_thread.h" | 10 | #include "video_core/gpu_thread.h" |
| @@ -13,38 +12,13 @@ | |||
| 13 | 12 | ||
| 14 | namespace VideoCommon::GPUThread { | 13 | namespace VideoCommon::GPUThread { |
| 15 | 14 | ||
| 16 | /// Executes a single GPU thread command | ||
| 17 | static void ExecuteCommand(CommandData* command, VideoCore::RendererBase& renderer, | ||
| 18 | Tegra::DmaPusher& dma_pusher) { | ||
| 19 | if (const auto submit_list = std::get_if<SubmitListCommand>(command)) { | ||
| 20 | dma_pusher.Push(std::move(submit_list->entries)); | ||
| 21 | dma_pusher.DispatchCalls(); | ||
| 22 | } else if (const auto data = std::get_if<SwapBuffersCommand>(command)) { | ||
| 23 | renderer.SwapBuffers(data->framebuffer); | ||
| 24 | } else if (const auto data = std::get_if<FlushRegionCommand>(command)) { | ||
| 25 | renderer.Rasterizer().FlushRegion(data->addr, data->size); | ||
| 26 | } else if (const auto data = std::get_if<InvalidateRegionCommand>(command)) { | ||
| 27 | renderer.Rasterizer().InvalidateRegion(data->addr, data->size); | ||
| 28 | } else if (const auto data = std::get_if<FlushAndInvalidateRegionCommand>(command)) { | ||
| 29 | renderer.Rasterizer().FlushAndInvalidateRegion(data->addr, data->size); | ||
| 30 | } else { | ||
| 31 | UNREACHABLE(); | ||
| 32 | } | ||
| 33 | } | ||
| 34 | |||
| 35 | /// Runs the GPU thread | 15 | /// Runs the GPU thread |
| 36 | static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher, | 16 | static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher, |
| 37 | SynchState& state) { | 17 | SynchState& state) { |
| 38 | |||
| 39 | MicroProfileOnThreadCreate("GpuThread"); | 18 | MicroProfileOnThreadCreate("GpuThread"); |
| 40 | 19 | ||
| 41 | auto WaitForWakeup = [&]() { | ||
| 42 | std::unique_lock<std::mutex> lock{state.signal_mutex}; | ||
| 43 | state.signal_condition.wait(lock, [&] { return !state.is_idle || !state.is_running; }); | ||
| 44 | }; | ||
| 45 | |||
| 46 | // Wait for first GPU command before acquiring the window context | 20 | // Wait for first GPU command before acquiring the window context |
| 47 | WaitForWakeup(); | 21 | state.WaitForCommands(); |
| 48 | 22 | ||
| 49 | // If emulation was stopped during disk shader loading, abort before trying to acquire context | 23 | // If emulation was stopped during disk shader loading, abort before trying to acquire context |
| 50 | if (!state.is_running) { | 24 | if (!state.is_running) { |
| @@ -53,100 +27,72 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p | |||
| 53 | 27 | ||
| 54 | Core::Frontend::ScopeAcquireWindowContext acquire_context{renderer.GetRenderWindow()}; | 28 | Core::Frontend::ScopeAcquireWindowContext acquire_context{renderer.GetRenderWindow()}; |
| 55 | 29 | ||
| 30 | CommandDataContainer next; | ||
| 56 | while (state.is_running) { | 31 | while (state.is_running) { |
| 57 | if (!state.is_running) { | 32 | state.WaitForCommands(); |
| 58 | return; | 33 | while (!state.queue.Empty()) { |
| 59 | } | 34 | state.queue.Pop(next); |
| 60 | 35 | if (const auto submit_list = std::get_if<SubmitListCommand>(&next.data)) { | |
| 61 | { | 36 | dma_pusher.Push(std::move(submit_list->entries)); |
| 62 | // Thread has been woken up, so make the previous write queue the next read queue | 37 | dma_pusher.DispatchCalls(); |
| 63 | std::lock_guard<std::mutex> lock{state.signal_mutex}; | 38 | } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) { |
| 64 | std::swap(state.push_queue, state.pop_queue); | 39 | state.DecrementFramesCounter(); |
| 65 | } | 40 | renderer.SwapBuffers(std::move(data->framebuffer)); |
| 66 | 41 | } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) { | |
| 67 | // Execute all of the GPU commands | 42 | renderer.Rasterizer().FlushRegion(data->addr, data->size); |
| 68 | while (!state.pop_queue->empty()) { | 43 | } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) { |
| 69 | ExecuteCommand(&state.pop_queue->front(), renderer, dma_pusher); | 44 | renderer.Rasterizer().InvalidateRegion(data->addr, data->size); |
| 70 | state.pop_queue->pop(); | 45 | } else if (const auto data = std::get_if<EndProcessingCommand>(&next.data)) { |
| 46 | return; | ||
| 47 | } else { | ||
| 48 | UNREACHABLE(); | ||
| 49 | } | ||
| 71 | } | 50 | } |
| 72 | |||
| 73 | state.UpdateIdleState(); | ||
| 74 | |||
| 75 | // Signal that the GPU thread has finished processing commands | ||
| 76 | if (state.is_idle) { | ||
| 77 | state.idle_condition.notify_one(); | ||
| 78 | } | ||
| 79 | |||
| 80 | // Wait for CPU thread to send more GPU commands | ||
| 81 | WaitForWakeup(); | ||
| 82 | } | 51 | } |
| 83 | } | 52 | } |
| 84 | 53 | ||
| 85 | ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) | 54 | ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) |
| 86 | : renderer{renderer}, dma_pusher{dma_pusher}, thread{RunThread, std::ref(renderer), | 55 | : renderer{renderer}, dma_pusher{dma_pusher}, thread{RunThread, std::ref(renderer), |
| 87 | std::ref(dma_pusher), std::ref(state)}, | 56 | std::ref(dma_pusher), std::ref(state)} {} |
| 88 | thread_id{thread.get_id()} {} | ||
| 89 | 57 | ||
| 90 | ThreadManager::~ThreadManager() { | 58 | ThreadManager::~ThreadManager() { |
| 91 | { | 59 | // Notify GPU thread that a shutdown is pending |
| 92 | // Notify GPU thread that a shutdown is pending | 60 | PushCommand(EndProcessingCommand()); |
| 93 | std::lock_guard<std::mutex> lock{state.signal_mutex}; | ||
| 94 | state.is_running = false; | ||
| 95 | } | ||
| 96 | |||
| 97 | state.signal_condition.notify_one(); | ||
| 98 | thread.join(); | 61 | thread.join(); |
| 99 | } | 62 | } |
| 100 | 63 | ||
| 101 | void ThreadManager::SubmitList(Tegra::CommandList&& entries) { | 64 | void ThreadManager::SubmitList(Tegra::CommandList&& entries) { |
| 102 | if (entries.empty()) { | 65 | PushCommand(SubmitListCommand(std::move(entries))); |
| 103 | return; | ||
| 104 | } | ||
| 105 | |||
| 106 | PushCommand(SubmitListCommand(std::move(entries)), false, false); | ||
| 107 | } | 66 | } |
| 108 | 67 | ||
| 109 | void ThreadManager::SwapBuffers( | 68 | void ThreadManager::SwapBuffers( |
| 110 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) { | 69 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) { |
| 111 | PushCommand(SwapBuffersCommand(std::move(framebuffer)), true, false); | 70 | state.IncrementFramesCounter(); |
| 71 | PushCommand(SwapBuffersCommand(std::move(framebuffer))); | ||
| 72 | state.WaitForFrames(); | ||
| 112 | } | 73 | } |
| 113 | 74 | ||
| 114 | void ThreadManager::FlushRegion(VAddr addr, u64 size) { | 75 | void ThreadManager::FlushRegion(CacheAddr addr, u64 size) { |
| 115 | // Block the CPU when using accurate emulation | 76 | PushCommand(FlushRegionCommand(addr, size)); |
| 116 | PushCommand(FlushRegionCommand(addr, size), Settings::values.use_accurate_gpu_emulation, false); | ||
| 117 | } | 77 | } |
| 118 | 78 | ||
| 119 | void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { | 79 | void ThreadManager::InvalidateRegion(CacheAddr addr, u64 size) { |
| 120 | PushCommand(InvalidateRegionCommand(addr, size), true, true); | 80 | if (state.queue.Empty()) { |
| 81 | // It's quicker to invalidate a single region on the CPU if the queue is already empty | ||
| 82 | renderer.Rasterizer().InvalidateRegion(addr, size); | ||
| 83 | } else { | ||
| 84 | PushCommand(InvalidateRegionCommand(addr, size)); | ||
| 85 | } | ||
| 121 | } | 86 | } |
| 122 | 87 | ||
| 123 | void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) { | 88 | void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { |
| 89 | // Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important | ||
| 124 | InvalidateRegion(addr, size); | 90 | InvalidateRegion(addr, size); |
| 125 | } | 91 | } |
| 126 | 92 | ||
| 127 | void ThreadManager::PushCommand(CommandData&& command_data, bool wait_for_idle, bool allow_on_cpu) { | 93 | void ThreadManager::PushCommand(CommandData&& command_data) { |
| 128 | { | 94 | state.queue.Push(CommandDataContainer(std::move(command_data))); |
| 129 | std::lock_guard<std::mutex> lock{state.signal_mutex}; | 95 | state.SignalCommands(); |
| 130 | |||
| 131 | if ((allow_on_cpu && state.is_idle) || IsGpuThread()) { | ||
| 132 | // Execute the command synchronously on the current thread | ||
| 133 | ExecuteCommand(&command_data, renderer, dma_pusher); | ||
| 134 | return; | ||
| 135 | } | ||
| 136 | |||
| 137 | // Push the command to the GPU thread | ||
| 138 | state.UpdateIdleState(); | ||
| 139 | state.push_queue->emplace(command_data); | ||
| 140 | } | ||
| 141 | |||
| 142 | // Signal the GPU thread that commands are pending | ||
| 143 | state.signal_condition.notify_one(); | ||
| 144 | |||
| 145 | if (wait_for_idle) { | ||
| 146 | // Wait for the GPU to be idle (all commands to be executed) | ||
| 147 | std::unique_lock<std::mutex> lock{state.idle_mutex}; | ||
| 148 | state.idle_condition.wait(lock, [this] { return static_cast<bool>(state.is_idle); }); | ||
| 149 | } | ||
| 150 | } | 96 | } |
| 151 | 97 | ||
| 152 | } // namespace VideoCommon::GPUThread | 98 | } // namespace VideoCommon::GPUThread |
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index edb148b14..8cd7db1c6 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h | |||
| @@ -13,6 +13,9 @@ | |||
| 13 | #include <thread> | 13 | #include <thread> |
| 14 | #include <variant> | 14 | #include <variant> |
| 15 | 15 | ||
| 16 | #include "common/threadsafe_queue.h" | ||
| 17 | #include "video_core/gpu.h" | ||
| 18 | |||
| 16 | namespace Tegra { | 19 | namespace Tegra { |
| 17 | struct FramebufferConfig; | 20 | struct FramebufferConfig; |
| 18 | class DmaPusher; | 21 | class DmaPusher; |
| @@ -24,6 +27,9 @@ class RendererBase; | |||
| 24 | 27 | ||
| 25 | namespace VideoCommon::GPUThread { | 28 | namespace VideoCommon::GPUThread { |
| 26 | 29 | ||
| 30 | /// Command to signal to the GPU thread that processing has ended | ||
| 31 | struct EndProcessingCommand final {}; | ||
| 32 | |||
| 27 | /// Command to signal to the GPU thread that a command list is ready for processing | 33 | /// Command to signal to the GPU thread that a command list is ready for processing |
| 28 | struct SubmitListCommand final { | 34 | struct SubmitListCommand final { |
| 29 | explicit SubmitListCommand(Tegra::CommandList&& entries) : entries{std::move(entries)} {} | 35 | explicit SubmitListCommand(Tegra::CommandList&& entries) : entries{std::move(entries)} {} |
| @@ -36,59 +42,110 @@ struct SwapBuffersCommand final { | |||
| 36 | explicit SwapBuffersCommand(std::optional<const Tegra::FramebufferConfig> framebuffer) | 42 | explicit SwapBuffersCommand(std::optional<const Tegra::FramebufferConfig> framebuffer) |
| 37 | : framebuffer{std::move(framebuffer)} {} | 43 | : framebuffer{std::move(framebuffer)} {} |
| 38 | 44 | ||
| 39 | std::optional<const Tegra::FramebufferConfig> framebuffer; | 45 | std::optional<Tegra::FramebufferConfig> framebuffer; |
| 40 | }; | 46 | }; |
| 41 | 47 | ||
| 42 | /// Command to signal to the GPU thread to flush a region | 48 | /// Command to signal to the GPU thread to flush a region |
| 43 | struct FlushRegionCommand final { | 49 | struct FlushRegionCommand final { |
| 44 | explicit constexpr FlushRegionCommand(VAddr addr, u64 size) : addr{addr}, size{size} {} | 50 | explicit constexpr FlushRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {} |
| 45 | 51 | ||
| 46 | const VAddr addr; | 52 | CacheAddr addr; |
| 47 | const u64 size; | 53 | u64 size; |
| 48 | }; | 54 | }; |
| 49 | 55 | ||
| 50 | /// Command to signal to the GPU thread to invalidate a region | 56 | /// Command to signal to the GPU thread to invalidate a region |
| 51 | struct InvalidateRegionCommand final { | 57 | struct InvalidateRegionCommand final { |
| 52 | explicit constexpr InvalidateRegionCommand(VAddr addr, u64 size) : addr{addr}, size{size} {} | 58 | explicit constexpr InvalidateRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {} |
| 53 | 59 | ||
| 54 | const VAddr addr; | 60 | CacheAddr addr; |
| 55 | const u64 size; | 61 | u64 size; |
| 56 | }; | 62 | }; |
| 57 | 63 | ||
| 58 | /// Command to signal to the GPU thread to flush and invalidate a region | 64 | /// Command to signal to the GPU thread to flush and invalidate a region |
| 59 | struct FlushAndInvalidateRegionCommand final { | 65 | struct FlushAndInvalidateRegionCommand final { |
| 60 | explicit constexpr FlushAndInvalidateRegionCommand(VAddr addr, u64 size) | 66 | explicit constexpr FlushAndInvalidateRegionCommand(CacheAddr addr, u64 size) |
| 61 | : addr{addr}, size{size} {} | 67 | : addr{addr}, size{size} {} |
| 62 | 68 | ||
| 63 | const VAddr addr; | 69 | CacheAddr addr; |
| 64 | const u64 size; | 70 | u64 size; |
| 65 | }; | 71 | }; |
| 66 | 72 | ||
| 67 | using CommandData = std::variant<SubmitListCommand, SwapBuffersCommand, FlushRegionCommand, | 73 | using CommandData = |
| 68 | InvalidateRegionCommand, FlushAndInvalidateRegionCommand>; | 74 | std::variant<EndProcessingCommand, SubmitListCommand, SwapBuffersCommand, FlushRegionCommand, |
| 75 | InvalidateRegionCommand, FlushAndInvalidateRegionCommand>; | ||
| 76 | |||
| 77 | struct CommandDataContainer { | ||
| 78 | CommandDataContainer() = default; | ||
| 79 | |||
| 80 | CommandDataContainer(CommandData&& data) : data{std::move(data)} {} | ||
| 81 | |||
| 82 | CommandDataContainer& operator=(const CommandDataContainer& t) { | ||
| 83 | data = std::move(t.data); | ||
| 84 | return *this; | ||
| 85 | } | ||
| 86 | |||
| 87 | CommandData data; | ||
| 88 | }; | ||
| 69 | 89 | ||
| 70 | /// Struct used to synchronize the GPU thread | 90 | /// Struct used to synchronize the GPU thread |
| 71 | struct SynchState final { | 91 | struct SynchState final { |
| 72 | std::atomic<bool> is_running{true}; | 92 | std::atomic_bool is_running{true}; |
| 73 | std::atomic<bool> is_idle{true}; | 93 | std::atomic_int queued_frame_count{}; |
| 74 | std::condition_variable signal_condition; | 94 | std::mutex frames_mutex; |
| 75 | std::mutex signal_mutex; | 95 | std::mutex commands_mutex; |
| 76 | std::condition_variable idle_condition; | 96 | std::condition_variable commands_condition; |
| 77 | std::mutex idle_mutex; | 97 | std::condition_variable frames_condition; |
| 78 | 98 | ||
| 79 | // We use two queues for sending commands to the GPU thread, one for writing (push_queue) to and | 99 | void IncrementFramesCounter() { |
| 80 | // one for reading from (pop_queue). These are swapped whenever the current pop_queue becomes | 100 | std::lock_guard<std::mutex> lock{frames_mutex}; |
| 81 | // empty. This allows for efficient thread-safe access, as it does not require any copies. | 101 | ++queued_frame_count; |
| 82 | 102 | } | |
| 83 | using CommandQueue = std::queue<CommandData>; | 103 | |
| 84 | std::array<CommandQueue, 2> command_queues; | 104 | void DecrementFramesCounter() { |
| 85 | CommandQueue* push_queue{&command_queues[0]}; | 105 | { |
| 86 | CommandQueue* pop_queue{&command_queues[1]}; | 106 | std::lock_guard<std::mutex> lock{frames_mutex}; |
| 87 | 107 | --queued_frame_count; | |
| 88 | void UpdateIdleState() { | 108 | |
| 89 | std::lock_guard<std::mutex> lock{idle_mutex}; | 109 | if (queued_frame_count) { |
| 90 | is_idle = command_queues[0].empty() && command_queues[1].empty(); | 110 | return; |
| 111 | } | ||
| 112 | } | ||
| 113 | frames_condition.notify_one(); | ||
| 91 | } | 114 | } |
| 115 | |||
| 116 | void WaitForFrames() { | ||
| 117 | { | ||
| 118 | std::lock_guard<std::mutex> lock{frames_mutex}; | ||
| 119 | if (!queued_frame_count) { | ||
| 120 | return; | ||
| 121 | } | ||
| 122 | } | ||
| 123 | |||
| 124 | // Wait for the GPU to be idle (all commands to be executed) | ||
| 125 | { | ||
| 126 | std::unique_lock<std::mutex> lock{frames_mutex}; | ||
| 127 | frames_condition.wait(lock, [this] { return !queued_frame_count; }); | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 131 | void SignalCommands() { | ||
| 132 | { | ||
| 133 | std::unique_lock<std::mutex> lock{commands_mutex}; | ||
| 134 | if (queue.Empty()) { | ||
| 135 | return; | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 139 | commands_condition.notify_one(); | ||
| 140 | } | ||
| 141 | |||
| 142 | void WaitForCommands() { | ||
| 143 | std::unique_lock<std::mutex> lock{commands_mutex}; | ||
| 144 | commands_condition.wait(lock, [this] { return !queue.Empty(); }); | ||
| 145 | } | ||
| 146 | |||
| 147 | using CommandQueue = Common::SPSCQueue<CommandDataContainer>; | ||
| 148 | CommandQueue queue; | ||
| 92 | }; | 149 | }; |
| 93 | 150 | ||
| 94 | /// Class used to manage the GPU thread | 151 | /// Class used to manage the GPU thread |
| @@ -105,22 +162,17 @@ public: | |||
| 105 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer); | 162 | std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer); |
| 106 | 163 | ||
| 107 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory | 164 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory |
| 108 | void FlushRegion(VAddr addr, u64 size); | 165 | void FlushRegion(CacheAddr addr, u64 size); |
| 109 | 166 | ||
| 110 | /// Notify rasterizer that any caches of the specified region should be invalidated | 167 | /// Notify rasterizer that any caches of the specified region should be invalidated |
| 111 | void InvalidateRegion(VAddr addr, u64 size); | 168 | void InvalidateRegion(CacheAddr addr, u64 size); |
| 112 | 169 | ||
| 113 | /// Notify rasterizer that any caches of the specified region should be flushed and invalidated | 170 | /// Notify rasterizer that any caches of the specified region should be flushed and invalidated |
| 114 | void FlushAndInvalidateRegion(VAddr addr, u64 size); | 171 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size); |
| 115 | 172 | ||
| 116 | private: | 173 | private: |
| 117 | /// Pushes a command to be executed by the GPU thread | 174 | /// Pushes a command to be executed by the GPU thread |
| 118 | void PushCommand(CommandData&& command_data, bool wait_for_idle, bool allow_on_cpu); | 175 | void PushCommand(CommandData&& command_data); |
| 119 | |||
| 120 | /// Returns true if this is called by the GPU thread | ||
| 121 | bool IsGpuThread() const { | ||
| 122 | return std::this_thread::get_id() == thread_id; | ||
| 123 | } | ||
| 124 | 176 | ||
| 125 | private: | 177 | private: |
| 126 | SynchState state; | 178 | SynchState state; |
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 54abe5298..e76b59842 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp | |||
| @@ -5,181 +5,446 @@ | |||
| 5 | #include "common/alignment.h" | 5 | #include "common/alignment.h" |
| 6 | #include "common/assert.h" | 6 | #include "common/assert.h" |
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | #include "core/core.h" | ||
| 9 | #include "core/memory.h" | ||
| 10 | #include "video_core/gpu.h" | ||
| 8 | #include "video_core/memory_manager.h" | 11 | #include "video_core/memory_manager.h" |
| 12 | #include "video_core/rasterizer_interface.h" | ||
| 13 | #include "video_core/renderer_base.h" | ||
| 9 | 14 | ||
| 10 | namespace Tegra { | 15 | namespace Tegra { |
| 11 | 16 | ||
| 12 | MemoryManager::MemoryManager() { | 17 | MemoryManager::MemoryManager() { |
| 13 | // Mark the first page as reserved, so that 0 is not a valid GPUVAddr. Otherwise, games might | 18 | std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); |
| 14 | // try to use 0 as a valid address, which is also used to mean nullptr. This fixes a bug with | 19 | std::fill(page_table.attributes.begin(), page_table.attributes.end(), |
| 15 | // Undertale using 0 for a render target. | 20 | Common::PageType::Unmapped); |
| 16 | PageSlot(0) = static_cast<u64>(PageStatus::Reserved); | 21 | page_table.Resize(address_space_width); |
| 22 | |||
| 23 | // Initialize the map with a single free region covering the entire managed space. | ||
| 24 | VirtualMemoryArea initial_vma; | ||
| 25 | initial_vma.size = address_space_end; | ||
| 26 | vma_map.emplace(initial_vma.base, initial_vma); | ||
| 27 | |||
| 28 | UpdatePageTableForVMA(initial_vma); | ||
| 17 | } | 29 | } |
| 18 | 30 | ||
| 19 | GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) { | 31 | GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) { |
| 20 | const std::optional<GPUVAddr> gpu_addr{FindFreeBlock(0, size, align, PageStatus::Unmapped)}; | 32 | const u64 aligned_size{Common::AlignUp(size, page_size)}; |
| 33 | const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; | ||
| 21 | 34 | ||
| 22 | ASSERT_MSG(gpu_addr, "unable to find available GPU memory"); | 35 | AllocateMemory(gpu_addr, 0, aligned_size); |
| 23 | 36 | ||
| 24 | for (u64 offset{}; offset < size; offset += PAGE_SIZE) { | 37 | return gpu_addr; |
| 25 | VAddr& slot{PageSlot(*gpu_addr + offset)}; | 38 | } |
| 26 | 39 | ||
| 27 | ASSERT(slot == static_cast<u64>(PageStatus::Unmapped)); | 40 | GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) { |
| 41 | const u64 aligned_size{Common::AlignUp(size, page_size)}; | ||
| 28 | 42 | ||
| 29 | slot = static_cast<u64>(PageStatus::Allocated); | 43 | AllocateMemory(gpu_addr, 0, aligned_size); |
| 30 | } | ||
| 31 | 44 | ||
| 32 | return *gpu_addr; | 45 | return gpu_addr; |
| 33 | } | 46 | } |
| 34 | 47 | ||
| 35 | GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) { | 48 | GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) { |
| 36 | for (u64 offset{}; offset < size; offset += PAGE_SIZE) { | 49 | const u64 aligned_size{Common::AlignUp(size, page_size)}; |
| 37 | VAddr& slot{PageSlot(gpu_addr + offset)}; | 50 | const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; |
| 38 | 51 | ||
| 39 | ASSERT(slot == static_cast<u64>(PageStatus::Unmapped)); | 52 | MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), aligned_size, cpu_addr); |
| 40 | 53 | ||
| 41 | slot = static_cast<u64>(PageStatus::Allocated); | 54 | return gpu_addr; |
| 42 | } | 55 | } |
| 56 | |||
| 57 | GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) { | ||
| 58 | ASSERT((gpu_addr & page_mask) == 0); | ||
| 59 | |||
| 60 | const u64 aligned_size{Common::AlignUp(size, page_size)}; | ||
| 61 | |||
| 62 | MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), aligned_size, cpu_addr); | ||
| 43 | 63 | ||
| 44 | return gpu_addr; | 64 | return gpu_addr; |
| 45 | } | 65 | } |
| 46 | 66 | ||
| 47 | GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) { | 67 | GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) { |
| 48 | const std::optional<GPUVAddr> gpu_addr{FindFreeBlock(0, size, PAGE_SIZE, PageStatus::Unmapped)}; | 68 | ASSERT((gpu_addr & page_mask) == 0); |
| 49 | 69 | ||
| 50 | ASSERT_MSG(gpu_addr, "unable to find available GPU memory"); | 70 | const u64 aligned_size{Common::AlignUp(size, page_size)}; |
| 71 | const CacheAddr cache_addr{ToCacheAddr(GetPointer(gpu_addr))}; | ||
| 72 | |||
| 73 | Core::System::GetInstance().Renderer().Rasterizer().FlushAndInvalidateRegion(cache_addr, | ||
| 74 | aligned_size); | ||
| 75 | UnmapRange(gpu_addr, aligned_size); | ||
| 76 | |||
| 77 | return gpu_addr; | ||
| 78 | } | ||
| 51 | 79 | ||
| 52 | for (u64 offset{}; offset < size; offset += PAGE_SIZE) { | 80 | GPUVAddr MemoryManager::FindFreeRegion(GPUVAddr region_start, u64 size) { |
| 53 | VAddr& slot{PageSlot(*gpu_addr + offset)}; | 81 | // Find the first Free VMA. |
| 82 | const VMAHandle vma_handle{std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) { | ||
| 83 | if (vma.second.type != VirtualMemoryArea::Type::Unmapped) { | ||
| 84 | return false; | ||
| 85 | } | ||
| 54 | 86 | ||
| 55 | ASSERT(slot == static_cast<u64>(PageStatus::Unmapped)); | 87 | const VAddr vma_end{vma.second.base + vma.second.size}; |
| 88 | return vma_end > region_start && vma_end >= region_start + size; | ||
| 89 | })}; | ||
| 56 | 90 | ||
| 57 | slot = cpu_addr + offset; | 91 | if (vma_handle == vma_map.end()) { |
| 92 | return {}; | ||
| 58 | } | 93 | } |
| 59 | 94 | ||
| 60 | const MappedRegion region{cpu_addr, *gpu_addr, size}; | 95 | return std::max(region_start, vma_handle->second.base); |
| 61 | mapped_regions.push_back(region); | 96 | } |
| 62 | 97 | ||
| 63 | return *gpu_addr; | 98 | bool MemoryManager::IsAddressValid(GPUVAddr addr) const { |
| 99 | return (addr >> page_bits) < page_table.pointers.size(); | ||
| 64 | } | 100 | } |
| 65 | 101 | ||
| 66 | GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) { | 102 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr) { |
| 67 | ASSERT((gpu_addr & PAGE_MASK) == 0); | 103 | if (!IsAddressValid(addr)) { |
| 104 | return {}; | ||
| 105 | } | ||
| 68 | 106 | ||
| 69 | if (PageSlot(gpu_addr) != static_cast<u64>(PageStatus::Allocated)) { | 107 | VAddr cpu_addr{page_table.backing_addr[addr >> page_bits]}; |
| 70 | // Page has been already mapped. In this case, we must find a new area of memory to use that | 108 | if (cpu_addr) { |
| 71 | // is different than the specified one. Super Mario Odyssey hits this scenario when changing | 109 | return cpu_addr + (addr & page_mask); |
| 72 | // areas, but we do not want to overwrite the old pages. | 110 | } |
| 73 | // TODO(bunnei): We need to write a hardware test to confirm this behavior. | ||
| 74 | 111 | ||
| 75 | LOG_ERROR(HW_GPU, "attempting to map addr 0x{:016X}, which is not available!", gpu_addr); | 112 | return {}; |
| 113 | } | ||
| 76 | 114 | ||
| 77 | const std::optional<GPUVAddr> new_gpu_addr{ | 115 | template <typename T> |
| 78 | FindFreeBlock(gpu_addr, size, PAGE_SIZE, PageStatus::Allocated)}; | 116 | T MemoryManager::Read(GPUVAddr addr) { |
| 117 | if (!IsAddressValid(addr)) { | ||
| 118 | return {}; | ||
| 119 | } | ||
| 79 | 120 | ||
| 80 | ASSERT_MSG(new_gpu_addr, "unable to find available GPU memory"); | 121 | const u8* page_pointer{page_table.pointers[addr >> page_bits]}; |
| 122 | if (page_pointer) { | ||
| 123 | // NOTE: Avoid adding any extra logic to this fast-path block | ||
| 124 | T value; | ||
| 125 | std::memcpy(&value, &page_pointer[addr & page_mask], sizeof(T)); | ||
| 126 | return value; | ||
| 127 | } | ||
| 81 | 128 | ||
| 82 | gpu_addr = *new_gpu_addr; | 129 | switch (page_table.attributes[addr >> page_bits]) { |
| 130 | case Common::PageType::Unmapped: | ||
| 131 | LOG_ERROR(HW_GPU, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, addr); | ||
| 132 | return 0; | ||
| 133 | case Common::PageType::Memory: | ||
| 134 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr); | ||
| 135 | break; | ||
| 136 | default: | ||
| 137 | UNREACHABLE(); | ||
| 83 | } | 138 | } |
| 139 | return {}; | ||
| 140 | } | ||
| 84 | 141 | ||
| 85 | for (u64 offset{}; offset < size; offset += PAGE_SIZE) { | 142 | template <typename T> |
| 86 | VAddr& slot{PageSlot(gpu_addr + offset)}; | 143 | void MemoryManager::Write(GPUVAddr addr, T data) { |
| 144 | if (!IsAddressValid(addr)) { | ||
| 145 | return; | ||
| 146 | } | ||
| 87 | 147 | ||
| 88 | ASSERT(slot == static_cast<u64>(PageStatus::Allocated)); | 148 | u8* page_pointer{page_table.pointers[addr >> page_bits]}; |
| 149 | if (page_pointer) { | ||
| 150 | // NOTE: Avoid adding any extra logic to this fast-path block | ||
| 151 | std::memcpy(&page_pointer[addr & page_mask], &data, sizeof(T)); | ||
| 152 | return; | ||
| 153 | } | ||
| 89 | 154 | ||
| 90 | slot = cpu_addr + offset; | 155 | switch (page_table.attributes[addr >> page_bits]) { |
| 156 | case Common::PageType::Unmapped: | ||
| 157 | LOG_ERROR(HW_GPU, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, | ||
| 158 | static_cast<u32>(data), addr); | ||
| 159 | return; | ||
| 160 | case Common::PageType::Memory: | ||
| 161 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr); | ||
| 162 | break; | ||
| 163 | default: | ||
| 164 | UNREACHABLE(); | ||
| 91 | } | 165 | } |
| 166 | } | ||
| 92 | 167 | ||
| 93 | const MappedRegion region{cpu_addr, gpu_addr, size}; | 168 | template u8 MemoryManager::Read<u8>(GPUVAddr addr); |
| 94 | mapped_regions.push_back(region); | 169 | template u16 MemoryManager::Read<u16>(GPUVAddr addr); |
| 170 | template u32 MemoryManager::Read<u32>(GPUVAddr addr); | ||
| 171 | template u64 MemoryManager::Read<u64>(GPUVAddr addr); | ||
| 172 | template void MemoryManager::Write<u8>(GPUVAddr addr, u8 data); | ||
| 173 | template void MemoryManager::Write<u16>(GPUVAddr addr, u16 data); | ||
| 174 | template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data); | ||
| 175 | template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); | ||
| 176 | |||
| 177 | u8* MemoryManager::GetPointer(GPUVAddr addr) { | ||
| 178 | if (!IsAddressValid(addr)) { | ||
| 179 | return {}; | ||
| 180 | } | ||
| 95 | 181 | ||
| 96 | return gpu_addr; | 182 | u8* page_pointer{page_table.pointers[addr >> page_bits]}; |
| 183 | if (page_pointer) { | ||
| 184 | return page_pointer + (addr & page_mask); | ||
| 185 | } | ||
| 186 | |||
| 187 | LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr); | ||
| 188 | return {}; | ||
| 97 | } | 189 | } |
| 98 | 190 | ||
| 99 | GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) { | 191 | void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) { |
| 100 | ASSERT((gpu_addr & PAGE_MASK) == 0); | 192 | std::memcpy(dest_buffer, GetPointer(src_addr), size); |
| 193 | } | ||
| 194 | void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size) { | ||
| 195 | std::memcpy(GetPointer(dest_addr), src_buffer, size); | ||
| 196 | } | ||
| 101 | 197 | ||
| 102 | for (u64 offset{}; offset < size; offset += PAGE_SIZE) { | 198 | void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size) { |
| 103 | VAddr& slot{PageSlot(gpu_addr + offset)}; | 199 | std::memcpy(GetPointer(dest_addr), GetPointer(src_addr), size); |
| 200 | } | ||
| 104 | 201 | ||
| 105 | ASSERT(slot != static_cast<u64>(PageStatus::Allocated) && | 202 | void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, |
| 106 | slot != static_cast<u64>(PageStatus::Unmapped)); | 203 | VAddr backing_addr) { |
| 204 | LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size, | ||
| 205 | (base + size) * page_size); | ||
| 206 | |||
| 207 | const VAddr end{base + size}; | ||
| 208 | ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", | ||
| 209 | base + page_table.pointers.size()); | ||
| 210 | |||
| 211 | std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); | ||
| 212 | |||
| 213 | if (memory == nullptr) { | ||
| 214 | std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); | ||
| 215 | std::fill(page_table.backing_addr.begin() + base, page_table.backing_addr.begin() + end, | ||
| 216 | backing_addr); | ||
| 217 | } else { | ||
| 218 | while (base != end) { | ||
| 219 | page_table.pointers[base] = memory; | ||
| 220 | page_table.backing_addr[base] = backing_addr; | ||
| 221 | |||
| 222 | base += 1; | ||
| 223 | memory += page_size; | ||
| 224 | backing_addr += page_size; | ||
| 225 | } | ||
| 226 | } | ||
| 227 | } | ||
| 107 | 228 | ||
| 108 | slot = static_cast<u64>(PageStatus::Unmapped); | 229 | void MemoryManager::MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr) { |
| 230 | ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size); | ||
| 231 | ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base); | ||
| 232 | MapPages(base / page_size, size / page_size, target, Common::PageType::Memory, backing_addr); | ||
| 233 | } | ||
| 234 | |||
| 235 | void MemoryManager::UnmapRegion(GPUVAddr base, u64 size) { | ||
| 236 | ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size); | ||
| 237 | ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base); | ||
| 238 | MapPages(base / page_size, size / page_size, nullptr, Common::PageType::Unmapped); | ||
| 239 | } | ||
| 240 | |||
| 241 | bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { | ||
| 242 | ASSERT(base + size == next.base); | ||
| 243 | if (type != next.type) { | ||
| 244 | return {}; | ||
| 245 | } | ||
| 246 | if (type == VirtualMemoryArea::Type::Allocated && (offset + size != next.offset)) { | ||
| 247 | return {}; | ||
| 248 | } | ||
| 249 | if (type == VirtualMemoryArea::Type::Mapped && backing_memory + size != next.backing_memory) { | ||
| 250 | return {}; | ||
| 251 | } | ||
| 252 | return true; | ||
| 253 | } | ||
| 254 | |||
| 255 | MemoryManager::VMAHandle MemoryManager::FindVMA(GPUVAddr target) const { | ||
| 256 | if (target >= address_space_end) { | ||
| 257 | return vma_map.end(); | ||
| 258 | } else { | ||
| 259 | return std::prev(vma_map.upper_bound(target)); | ||
| 109 | } | 260 | } |
| 261 | } | ||
| 110 | 262 | ||
| 111 | // Delete the region mappings that are contained within the unmapped region | 263 | MemoryManager::VMAIter MemoryManager::Allocate(VMAIter vma_handle) { |
| 112 | mapped_regions.erase(std::remove_if(mapped_regions.begin(), mapped_regions.end(), | 264 | VirtualMemoryArea& vma{vma_handle->second}; |
| 113 | [&](const MappedRegion& region) { | 265 | |
| 114 | return region.gpu_addr <= gpu_addr && | 266 | vma.type = VirtualMemoryArea::Type::Allocated; |
| 115 | region.gpu_addr + region.size < gpu_addr + size; | 267 | vma.backing_addr = 0; |
| 116 | }), | 268 | vma.backing_memory = {}; |
| 117 | mapped_regions.end()); | 269 | UpdatePageTableForVMA(vma); |
| 118 | return gpu_addr; | 270 | |
| 271 | return MergeAdjacent(vma_handle); | ||
| 119 | } | 272 | } |
| 120 | 273 | ||
| 121 | GPUVAddr MemoryManager::GetRegionEnd(GPUVAddr region_start) const { | 274 | MemoryManager::VMAHandle MemoryManager::AllocateMemory(GPUVAddr target, std::size_t offset, |
| 122 | for (const auto& region : mapped_regions) { | 275 | u64 size) { |
| 123 | const GPUVAddr region_end{region.gpu_addr + region.size}; | 276 | |
| 124 | if (region_start >= region.gpu_addr && region_start < region_end) { | 277 | // This is the appropriately sized VMA that will turn into our allocation. |
| 125 | return region_end; | 278 | VMAIter vma_handle{CarveVMA(target, size)}; |
| 126 | } | 279 | VirtualMemoryArea& vma{vma_handle->second}; |
| 280 | |||
| 281 | ASSERT(vma.size == size); | ||
| 282 | |||
| 283 | vma.offset = offset; | ||
| 284 | |||
| 285 | return Allocate(vma_handle); | ||
| 286 | } | ||
| 287 | |||
| 288 | MemoryManager::VMAHandle MemoryManager::MapBackingMemory(GPUVAddr target, u8* memory, u64 size, | ||
| 289 | VAddr backing_addr) { | ||
| 290 | // This is the appropriately sized VMA that will turn into our allocation. | ||
| 291 | VMAIter vma_handle{CarveVMA(target, size)}; | ||
| 292 | VirtualMemoryArea& vma{vma_handle->second}; | ||
| 293 | |||
| 294 | ASSERT(vma.size == size); | ||
| 295 | |||
| 296 | vma.type = VirtualMemoryArea::Type::Mapped; | ||
| 297 | vma.backing_memory = memory; | ||
| 298 | vma.backing_addr = backing_addr; | ||
| 299 | UpdatePageTableForVMA(vma); | ||
| 300 | |||
| 301 | return MergeAdjacent(vma_handle); | ||
| 302 | } | ||
| 303 | |||
| 304 | void MemoryManager::UnmapRange(GPUVAddr target, u64 size) { | ||
| 305 | VMAIter vma{CarveVMARange(target, size)}; | ||
| 306 | const VAddr target_end{target + size}; | ||
| 307 | const VMAIter end{vma_map.end()}; | ||
| 308 | |||
| 309 | // The comparison against the end of the range must be done using addresses since VMAs can be | ||
| 310 | // merged during this process, causing invalidation of the iterators. | ||
| 311 | while (vma != end && vma->second.base < target_end) { | ||
| 312 | // Unmapped ranges return to allocated state and can be reused | ||
| 313 | // This behavior is used by Super Mario Odyssey, Sonic Forces, and likely other games | ||
| 314 | vma = std::next(Allocate(vma)); | ||
| 127 | } | 315 | } |
| 128 | return {}; | 316 | |
| 317 | ASSERT(FindVMA(target)->second.size >= size); | ||
| 129 | } | 318 | } |
| 130 | 319 | ||
| 131 | std::optional<GPUVAddr> MemoryManager::FindFreeBlock(GPUVAddr region_start, u64 size, u64 align, | 320 | MemoryManager::VMAIter MemoryManager::StripIterConstness(const VMAHandle& iter) { |
| 132 | PageStatus status) { | 321 | // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given |
| 133 | GPUVAddr gpu_addr{region_start}; | 322 | // non-const access to its container. |
| 134 | u64 free_space{}; | 323 | return vma_map.erase(iter, iter); // Erases an empty range of elements |
| 135 | align = (align + PAGE_MASK) & ~PAGE_MASK; | 324 | } |
| 136 | 325 | ||
| 137 | while (gpu_addr + free_space < MAX_ADDRESS) { | 326 | MemoryManager::VMAIter MemoryManager::CarveVMA(GPUVAddr base, u64 size) { |
| 138 | if (PageSlot(gpu_addr + free_space) == static_cast<u64>(status)) { | 327 | ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size); |
| 139 | free_space += PAGE_SIZE; | 328 | ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: 0x{:016X}", base); |
| 140 | if (free_space >= size) { | 329 | |
| 141 | return gpu_addr; | 330 | VMAIter vma_handle{StripIterConstness(FindVMA(base))}; |
| 142 | } | 331 | if (vma_handle == vma_map.end()) { |
| 143 | } else { | 332 | // Target address is outside the managed range |
| 144 | gpu_addr += free_space + PAGE_SIZE; | 333 | return {}; |
| 145 | free_space = 0; | ||
| 146 | gpu_addr = Common::AlignUp(gpu_addr, align); | ||
| 147 | } | ||
| 148 | } | 334 | } |
| 149 | 335 | ||
| 150 | return {}; | 336 | const VirtualMemoryArea& vma{vma_handle->second}; |
| 337 | if (vma.type == VirtualMemoryArea::Type::Mapped) { | ||
| 338 | // Region is already allocated | ||
| 339 | return {}; | ||
| 340 | } | ||
| 341 | |||
| 342 | const VAddr start_in_vma{base - vma.base}; | ||
| 343 | const VAddr end_in_vma{start_in_vma + size}; | ||
| 344 | |||
| 345 | ASSERT_MSG(end_in_vma <= vma.size, "region size 0x{:016X} is less than required size 0x{:016X}", | ||
| 346 | vma.size, end_in_vma); | ||
| 347 | |||
| 348 | if (end_in_vma < vma.size) { | ||
| 349 | // Split VMA at the end of the allocated region | ||
| 350 | SplitVMA(vma_handle, end_in_vma); | ||
| 351 | } | ||
| 352 | if (start_in_vma != 0) { | ||
| 353 | // Split VMA at the start of the allocated region | ||
| 354 | vma_handle = SplitVMA(vma_handle, start_in_vma); | ||
| 355 | } | ||
| 356 | |||
| 357 | return vma_handle; | ||
| 151 | } | 358 | } |
| 152 | 359 | ||
| 153 | std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) { | 360 | MemoryManager::VMAIter MemoryManager::CarveVMARange(GPUVAddr target, u64 size) { |
| 154 | const VAddr base_addr{PageSlot(gpu_addr)}; | 361 | ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size); |
| 362 | ASSERT_MSG((target & page_mask) == 0, "non-page aligned base: 0x{:016X}", target); | ||
| 155 | 363 | ||
| 156 | if (base_addr == static_cast<u64>(PageStatus::Allocated) || | 364 | const VAddr target_end{target + size}; |
| 157 | base_addr == static_cast<u64>(PageStatus::Unmapped) || | 365 | ASSERT(target_end >= target); |
| 158 | base_addr == static_cast<u64>(PageStatus::Reserved)) { | 366 | ASSERT(size > 0); |
| 367 | |||
| 368 | VMAIter begin_vma{StripIterConstness(FindVMA(target))}; | ||
| 369 | const VMAIter i_end{vma_map.lower_bound(target_end)}; | ||
| 370 | if (std::any_of(begin_vma, i_end, [](const auto& entry) { | ||
| 371 | return entry.second.type == VirtualMemoryArea::Type::Unmapped; | ||
| 372 | })) { | ||
| 159 | return {}; | 373 | return {}; |
| 160 | } | 374 | } |
| 161 | 375 | ||
| 162 | return base_addr + (gpu_addr & PAGE_MASK); | 376 | if (target != begin_vma->second.base) { |
| 377 | begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base); | ||
| 378 | } | ||
| 379 | |||
| 380 | VMAIter end_vma{StripIterConstness(FindVMA(target_end))}; | ||
| 381 | if (end_vma != vma_map.end() && target_end != end_vma->second.base) { | ||
| 382 | end_vma = SplitVMA(end_vma, target_end - end_vma->second.base); | ||
| 383 | } | ||
| 384 | |||
| 385 | return begin_vma; | ||
| 163 | } | 386 | } |
| 164 | 387 | ||
| 165 | std::vector<GPUVAddr> MemoryManager::CpuToGpuAddress(VAddr cpu_addr) const { | 388 | MemoryManager::VMAIter MemoryManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { |
| 166 | std::vector<GPUVAddr> results; | 389 | VirtualMemoryArea& old_vma{vma_handle->second}; |
| 167 | for (const auto& region : mapped_regions) { | 390 | VirtualMemoryArea new_vma{old_vma}; // Make a copy of the VMA |
| 168 | if (cpu_addr >= region.cpu_addr && cpu_addr < (region.cpu_addr + region.size)) { | 391 | |
| 169 | const u64 offset{cpu_addr - region.cpu_addr}; | 392 | // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably |
| 170 | results.push_back(region.gpu_addr + offset); | 393 | // a bug. This restriction might be removed later. |
| 394 | ASSERT(offset_in_vma < old_vma.size); | ||
| 395 | ASSERT(offset_in_vma > 0); | ||
| 396 | |||
| 397 | old_vma.size = offset_in_vma; | ||
| 398 | new_vma.base += offset_in_vma; | ||
| 399 | new_vma.size -= offset_in_vma; | ||
| 400 | |||
| 401 | switch (new_vma.type) { | ||
| 402 | case VirtualMemoryArea::Type::Unmapped: | ||
| 403 | break; | ||
| 404 | case VirtualMemoryArea::Type::Allocated: | ||
| 405 | new_vma.offset += offset_in_vma; | ||
| 406 | break; | ||
| 407 | case VirtualMemoryArea::Type::Mapped: | ||
| 408 | new_vma.backing_memory += offset_in_vma; | ||
| 409 | break; | ||
| 410 | } | ||
| 411 | |||
| 412 | ASSERT(old_vma.CanBeMergedWith(new_vma)); | ||
| 413 | |||
| 414 | return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma); | ||
| 415 | } | ||
| 416 | |||
| 417 | MemoryManager::VMAIter MemoryManager::MergeAdjacent(VMAIter iter) { | ||
| 418 | const VMAIter next_vma{std::next(iter)}; | ||
| 419 | if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { | ||
| 420 | iter->second.size += next_vma->second.size; | ||
| 421 | vma_map.erase(next_vma); | ||
| 422 | } | ||
| 423 | |||
| 424 | if (iter != vma_map.begin()) { | ||
| 425 | VMAIter prev_vma{std::prev(iter)}; | ||
| 426 | if (prev_vma->second.CanBeMergedWith(iter->second)) { | ||
| 427 | prev_vma->second.size += iter->second.size; | ||
| 428 | vma_map.erase(iter); | ||
| 429 | iter = prev_vma; | ||
| 171 | } | 430 | } |
| 172 | } | 431 | } |
| 173 | return results; | 432 | |
| 433 | return iter; | ||
| 174 | } | 434 | } |
| 175 | 435 | ||
| 176 | VAddr& MemoryManager::PageSlot(GPUVAddr gpu_addr) { | 436 | void MemoryManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { |
| 177 | auto& block{page_table[(gpu_addr >> (PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK]}; | 437 | switch (vma.type) { |
| 178 | if (!block) { | 438 | case VirtualMemoryArea::Type::Unmapped: |
| 179 | block = std::make_unique<PageBlock>(); | 439 | UnmapRegion(vma.base, vma.size); |
| 180 | block->fill(static_cast<VAddr>(PageStatus::Unmapped)); | 440 | break; |
| 441 | case VirtualMemoryArea::Type::Allocated: | ||
| 442 | MapMemoryRegion(vma.base, vma.size, nullptr, vma.backing_addr); | ||
| 443 | break; | ||
| 444 | case VirtualMemoryArea::Type::Mapped: | ||
| 445 | MapMemoryRegion(vma.base, vma.size, vma.backing_memory, vma.backing_addr); | ||
| 446 | break; | ||
| 181 | } | 447 | } |
| 182 | return (*block)[(gpu_addr >> PAGE_BITS) & PAGE_BLOCK_MASK]; | ||
| 183 | } | 448 | } |
| 184 | 449 | ||
| 185 | } // namespace Tegra | 450 | } // namespace Tegra |
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index fb03497ca..34744bb27 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h | |||
| @@ -1,67 +1,148 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | 1 | // Copyright 2018 yuzu emulator team |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <array> | 7 | #include <map> |
| 8 | #include <memory> | ||
| 9 | #include <optional> | 8 | #include <optional> |
| 10 | #include <vector> | ||
| 11 | 9 | ||
| 12 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "common/page_table.h" | ||
| 13 | 12 | ||
| 14 | namespace Tegra { | 13 | namespace Tegra { |
| 15 | 14 | ||
| 16 | /// Virtual addresses in the GPU's memory map are 64 bit. | 15 | /** |
| 17 | using GPUVAddr = u64; | 16 | * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space |
| 17 | * with homogeneous attributes across its extents. In this particular implementation each VMA is | ||
| 18 | * also backed by a single host memory allocation. | ||
| 19 | */ | ||
| 20 | struct VirtualMemoryArea { | ||
| 21 | enum class Type : u8 { | ||
| 22 | Unmapped, | ||
| 23 | Allocated, | ||
| 24 | Mapped, | ||
| 25 | }; | ||
| 26 | |||
| 27 | /// Virtual base address of the region. | ||
| 28 | GPUVAddr base{}; | ||
| 29 | /// Size of the region. | ||
| 30 | u64 size{}; | ||
| 31 | /// Memory area mapping type. | ||
| 32 | Type type{Type::Unmapped}; | ||
| 33 | /// CPU memory mapped address corresponding to this memory area. | ||
| 34 | VAddr backing_addr{}; | ||
| 35 | /// Offset into the backing_memory the mapping starts from. | ||
| 36 | std::size_t offset{}; | ||
| 37 | /// Pointer backing this VMA. | ||
| 38 | u8* backing_memory{}; | ||
| 39 | |||
| 40 | /// Tests if this area can be merged to the right with `next`. | ||
| 41 | bool CanBeMergedWith(const VirtualMemoryArea& next) const; | ||
| 42 | }; | ||
| 18 | 43 | ||
| 19 | class MemoryManager final { | 44 | class MemoryManager final { |
| 20 | public: | 45 | public: |
| 21 | MemoryManager(); | 46 | MemoryManager(); |
| 22 | 47 | ||
| 23 | GPUVAddr AllocateSpace(u64 size, u64 align); | 48 | GPUVAddr AllocateSpace(u64 size, u64 align); |
| 24 | GPUVAddr AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align); | 49 | GPUVAddr AllocateSpace(GPUVAddr addr, u64 size, u64 align); |
| 25 | GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size); | 50 | GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size); |
| 26 | GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size); | 51 | GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr addr, u64 size); |
| 27 | GPUVAddr UnmapBuffer(GPUVAddr gpu_addr, u64 size); | 52 | GPUVAddr UnmapBuffer(GPUVAddr addr, u64 size); |
| 28 | GPUVAddr GetRegionEnd(GPUVAddr region_start) const; | 53 | std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr); |
| 29 | std::optional<VAddr> GpuToCpuAddress(GPUVAddr gpu_addr); | 54 | |
| 30 | std::vector<GPUVAddr> CpuToGpuAddress(VAddr cpu_addr) const; | 55 | template <typename T> |
| 56 | T Read(GPUVAddr addr); | ||
| 57 | |||
| 58 | template <typename T> | ||
| 59 | void Write(GPUVAddr addr, T data); | ||
| 31 | 60 | ||
| 32 | static constexpr u64 PAGE_BITS = 16; | 61 | u8* GetPointer(GPUVAddr addr); |
| 33 | static constexpr u64 PAGE_SIZE = 1 << PAGE_BITS; | 62 | |
| 34 | static constexpr u64 PAGE_MASK = PAGE_SIZE - 1; | 63 | void ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size); |
| 64 | void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); | ||
| 65 | void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); | ||
| 35 | 66 | ||
| 36 | private: | 67 | private: |
| 37 | enum class PageStatus : u64 { | 68 | using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>; |
| 38 | Unmapped = 0xFFFFFFFFFFFFFFFFULL, | 69 | using VMAHandle = VMAMap::const_iterator; |
| 39 | Allocated = 0xFFFFFFFFFFFFFFFEULL, | 70 | using VMAIter = VMAMap::iterator; |
| 40 | Reserved = 0xFFFFFFFFFFFFFFFDULL, | ||
| 41 | }; | ||
| 42 | 71 | ||
| 43 | std::optional<GPUVAddr> FindFreeBlock(GPUVAddr region_start, u64 size, u64 align, | 72 | bool IsAddressValid(GPUVAddr addr) const; |
| 44 | PageStatus status); | 73 | void MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, |
| 45 | VAddr& PageSlot(GPUVAddr gpu_addr); | 74 | VAddr backing_addr = 0); |
| 46 | 75 | void MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr); | |
| 47 | static constexpr u64 MAX_ADDRESS{0x10000000000ULL}; | 76 | void UnmapRegion(GPUVAddr base, u64 size); |
| 48 | static constexpr u64 PAGE_TABLE_BITS{10}; | 77 | |
| 49 | static constexpr u64 PAGE_TABLE_SIZE{1 << PAGE_TABLE_BITS}; | 78 | /// Finds the VMA in which the given address is included in, or `vma_map.end()`. |
| 50 | static constexpr u64 PAGE_TABLE_MASK{PAGE_TABLE_SIZE - 1}; | 79 | VMAHandle FindVMA(GPUVAddr target) const; |
| 51 | static constexpr u64 PAGE_BLOCK_BITS{14}; | 80 | |
| 52 | static constexpr u64 PAGE_BLOCK_SIZE{1 << PAGE_BLOCK_BITS}; | 81 | VMAHandle AllocateMemory(GPUVAddr target, std::size_t offset, u64 size); |
| 53 | static constexpr u64 PAGE_BLOCK_MASK{PAGE_BLOCK_SIZE - 1}; | 82 | |
| 54 | 83 | /** | |
| 55 | using PageBlock = std::array<VAddr, PAGE_BLOCK_SIZE>; | 84 | * Maps an unmanaged host memory pointer at a given address. |
| 56 | std::array<std::unique_ptr<PageBlock>, PAGE_TABLE_SIZE> page_table{}; | 85 | * |
| 57 | 86 | * @param target The guest address to start the mapping at. | |
| 58 | struct MappedRegion { | 87 | * @param memory The memory to be mapped. |
| 59 | VAddr cpu_addr; | 88 | * @param size Size of the mapping. |
| 60 | GPUVAddr gpu_addr; | 89 | * @param state MemoryState tag to attach to the VMA. |
| 61 | u64 size; | 90 | */ |
| 62 | }; | 91 | VMAHandle MapBackingMemory(GPUVAddr target, u8* memory, u64 size, VAddr backing_addr); |
| 92 | |||
| 93 | /// Unmaps a range of addresses, splitting VMAs as necessary. | ||
| 94 | void UnmapRange(GPUVAddr target, u64 size); | ||
| 95 | |||
| 96 | /// Converts a VMAHandle to a mutable VMAIter. | ||
| 97 | VMAIter StripIterConstness(const VMAHandle& iter); | ||
| 98 | |||
| 99 | /// Marks as the specfied VMA as allocated. | ||
| 100 | VMAIter Allocate(VMAIter vma); | ||
| 101 | |||
| 102 | /** | ||
| 103 | * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing | ||
| 104 | * the appropriate error checking. | ||
| 105 | */ | ||
| 106 | VMAIter CarveVMA(GPUVAddr base, u64 size); | ||
| 107 | |||
| 108 | /** | ||
| 109 | * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each | ||
| 110 | * end of the range. | ||
| 111 | */ | ||
| 112 | VMAIter CarveVMARange(GPUVAddr base, u64 size); | ||
| 113 | |||
| 114 | /** | ||
| 115 | * Splits a VMA in two, at the specified offset. | ||
| 116 | * @returns the right side of the split, with the original iterator becoming the left side. | ||
| 117 | */ | ||
| 118 | VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma); | ||
| 119 | |||
| 120 | /** | ||
| 121 | * Checks for and merges the specified VMA with adjacent ones if possible. | ||
| 122 | * @returns the merged VMA or the original if no merging was possible. | ||
| 123 | */ | ||
| 124 | VMAIter MergeAdjacent(VMAIter vma); | ||
| 125 | |||
| 126 | /// Updates the pages corresponding to this VMA so they match the VMA's attributes. | ||
| 127 | void UpdatePageTableForVMA(const VirtualMemoryArea& vma); | ||
| 128 | |||
| 129 | /// Finds a free (unmapped region) of the specified size starting at the specified address. | ||
| 130 | GPUVAddr FindFreeRegion(GPUVAddr region_start, u64 size); | ||
| 131 | |||
| 132 | private: | ||
| 133 | static constexpr u64 page_bits{16}; | ||
| 134 | static constexpr u64 page_size{1 << page_bits}; | ||
| 135 | static constexpr u64 page_mask{page_size - 1}; | ||
| 136 | |||
| 137 | /// Address space in bits, this is fairly arbitrary but sufficiently large. | ||
| 138 | static constexpr u32 address_space_width{39}; | ||
| 139 | /// Start address for mapping, this is fairly arbitrary but must be non-zero. | ||
| 140 | static constexpr GPUVAddr address_space_base{0x100000}; | ||
| 141 | /// End of address space, based on address space in bits. | ||
| 142 | static constexpr GPUVAddr address_space_end{1ULL << address_space_width}; | ||
| 63 | 143 | ||
| 64 | std::vector<MappedRegion> mapped_regions; | 144 | Common::PageTable page_table{page_bits}; |
| 145 | VMAMap vma_map; | ||
| 65 | }; | 146 | }; |
| 66 | 147 | ||
| 67 | } // namespace Tegra | 148 | } // namespace Tegra |
diff --git a/src/video_core/morton.cpp b/src/video_core/morton.cpp index b68f4fb13..3e91cbc83 100644 --- a/src/video_core/morton.cpp +++ b/src/video_core/morton.cpp | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | #include <cstring> | 6 | #include <cstring> |
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "core/memory.h" | ||
| 10 | #include "video_core/morton.h" | 9 | #include "video_core/morton.h" |
| 11 | #include "video_core/surface.h" | 10 | #include "video_core/surface.h" |
| 12 | #include "video_core/textures/decoders.h" | 11 | #include "video_core/textures/decoders.h" |
| @@ -16,12 +15,12 @@ namespace VideoCore { | |||
| 16 | using Surface::GetBytesPerPixel; | 15 | using Surface::GetBytesPerPixel; |
| 17 | using Surface::PixelFormat; | 16 | using Surface::PixelFormat; |
| 18 | 17 | ||
| 19 | using MortonCopyFn = void (*)(u32, u32, u32, u32, u32, u32, u8*, std::size_t, VAddr); | 18 | using MortonCopyFn = void (*)(u32, u32, u32, u32, u32, u32, u8*, u8*); |
| 20 | using ConversionArray = std::array<MortonCopyFn, Surface::MaxPixelFormat>; | 19 | using ConversionArray = std::array<MortonCopyFn, Surface::MaxPixelFormat>; |
| 21 | 20 | ||
| 22 | template <bool morton_to_linear, PixelFormat format> | 21 | template <bool morton_to_linear, PixelFormat format> |
| 23 | static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, | 22 | static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, |
| 24 | u32 tile_width_spacing, u8* buffer, std::size_t buffer_size, VAddr addr) { | 23 | u32 tile_width_spacing, u8* buffer, u8* addr) { |
| 25 | constexpr u32 bytes_per_pixel = GetBytesPerPixel(format); | 24 | constexpr u32 bytes_per_pixel = GetBytesPerPixel(format); |
| 26 | 25 | ||
| 27 | // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual | 26 | // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual |
| @@ -34,150 +33,146 @@ static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth | |||
| 34 | stride, height, depth, block_height, block_depth, | 33 | stride, height, depth, block_height, block_depth, |
| 35 | tile_width_spacing); | 34 | tile_width_spacing); |
| 36 | } else { | 35 | } else { |
| 37 | Tegra::Texture::CopySwizzledData( | 36 | Tegra::Texture::CopySwizzledData((stride + tile_size_x - 1) / tile_size_x, |
| 38 | (stride + tile_size_x - 1) / tile_size_x, (height + tile_size_y - 1) / tile_size_y, | 37 | (height + tile_size_y - 1) / tile_size_y, depth, |
| 39 | depth, bytes_per_pixel, bytes_per_pixel, Memory::GetPointer(addr), buffer, false, | 38 | bytes_per_pixel, bytes_per_pixel, addr, buffer, false, |
| 40 | block_height, block_depth, tile_width_spacing); | 39 | block_height, block_depth, tile_width_spacing); |
| 41 | } | 40 | } |
| 42 | } | 41 | } |
| 43 | 42 | ||
| 44 | static constexpr ConversionArray morton_to_linear_fns = { | 43 | static constexpr ConversionArray morton_to_linear_fns = { |
| 45 | // clang-format off | 44 | MortonCopy<true, PixelFormat::ABGR8U>, |
| 46 | MortonCopy<true, PixelFormat::ABGR8U>, | 45 | MortonCopy<true, PixelFormat::ABGR8S>, |
| 47 | MortonCopy<true, PixelFormat::ABGR8S>, | 46 | MortonCopy<true, PixelFormat::ABGR8UI>, |
| 48 | MortonCopy<true, PixelFormat::ABGR8UI>, | 47 | MortonCopy<true, PixelFormat::B5G6R5U>, |
| 49 | MortonCopy<true, PixelFormat::B5G6R5U>, | 48 | MortonCopy<true, PixelFormat::A2B10G10R10U>, |
| 50 | MortonCopy<true, PixelFormat::A2B10G10R10U>, | 49 | MortonCopy<true, PixelFormat::A1B5G5R5U>, |
| 51 | MortonCopy<true, PixelFormat::A1B5G5R5U>, | 50 | MortonCopy<true, PixelFormat::R8U>, |
| 52 | MortonCopy<true, PixelFormat::R8U>, | 51 | MortonCopy<true, PixelFormat::R8UI>, |
| 53 | MortonCopy<true, PixelFormat::R8UI>, | 52 | MortonCopy<true, PixelFormat::RGBA16F>, |
| 54 | MortonCopy<true, PixelFormat::RGBA16F>, | 53 | MortonCopy<true, PixelFormat::RGBA16U>, |
| 55 | MortonCopy<true, PixelFormat::RGBA16U>, | 54 | MortonCopy<true, PixelFormat::RGBA16UI>, |
| 56 | MortonCopy<true, PixelFormat::RGBA16UI>, | 55 | MortonCopy<true, PixelFormat::R11FG11FB10F>, |
| 57 | MortonCopy<true, PixelFormat::R11FG11FB10F>, | 56 | MortonCopy<true, PixelFormat::RGBA32UI>, |
| 58 | MortonCopy<true, PixelFormat::RGBA32UI>, | 57 | MortonCopy<true, PixelFormat::DXT1>, |
| 59 | MortonCopy<true, PixelFormat::DXT1>, | 58 | MortonCopy<true, PixelFormat::DXT23>, |
| 60 | MortonCopy<true, PixelFormat::DXT23>, | 59 | MortonCopy<true, PixelFormat::DXT45>, |
| 61 | MortonCopy<true, PixelFormat::DXT45>, | 60 | MortonCopy<true, PixelFormat::DXN1>, |
| 62 | MortonCopy<true, PixelFormat::DXN1>, | 61 | MortonCopy<true, PixelFormat::DXN2UNORM>, |
| 63 | MortonCopy<true, PixelFormat::DXN2UNORM>, | 62 | MortonCopy<true, PixelFormat::DXN2SNORM>, |
| 64 | MortonCopy<true, PixelFormat::DXN2SNORM>, | 63 | MortonCopy<true, PixelFormat::BC7U>, |
| 65 | MortonCopy<true, PixelFormat::BC7U>, | 64 | MortonCopy<true, PixelFormat::BC6H_UF16>, |
| 66 | MortonCopy<true, PixelFormat::BC6H_UF16>, | 65 | MortonCopy<true, PixelFormat::BC6H_SF16>, |
| 67 | MortonCopy<true, PixelFormat::BC6H_SF16>, | 66 | MortonCopy<true, PixelFormat::ASTC_2D_4X4>, |
| 68 | MortonCopy<true, PixelFormat::ASTC_2D_4X4>, | 67 | MortonCopy<true, PixelFormat::BGRA8>, |
| 69 | MortonCopy<true, PixelFormat::BGRA8>, | 68 | MortonCopy<true, PixelFormat::RGBA32F>, |
| 70 | MortonCopy<true, PixelFormat::RGBA32F>, | 69 | MortonCopy<true, PixelFormat::RG32F>, |
| 71 | MortonCopy<true, PixelFormat::RG32F>, | 70 | MortonCopy<true, PixelFormat::R32F>, |
| 72 | MortonCopy<true, PixelFormat::R32F>, | 71 | MortonCopy<true, PixelFormat::R16F>, |
| 73 | MortonCopy<true, PixelFormat::R16F>, | 72 | MortonCopy<true, PixelFormat::R16U>, |
| 74 | MortonCopy<true, PixelFormat::R16U>, | 73 | MortonCopy<true, PixelFormat::R16S>, |
| 75 | MortonCopy<true, PixelFormat::R16S>, | 74 | MortonCopy<true, PixelFormat::R16UI>, |
| 76 | MortonCopy<true, PixelFormat::R16UI>, | 75 | MortonCopy<true, PixelFormat::R16I>, |
| 77 | MortonCopy<true, PixelFormat::R16I>, | 76 | MortonCopy<true, PixelFormat::RG16>, |
| 78 | MortonCopy<true, PixelFormat::RG16>, | 77 | MortonCopy<true, PixelFormat::RG16F>, |
| 79 | MortonCopy<true, PixelFormat::RG16F>, | 78 | MortonCopy<true, PixelFormat::RG16UI>, |
| 80 | MortonCopy<true, PixelFormat::RG16UI>, | 79 | MortonCopy<true, PixelFormat::RG16I>, |
| 81 | MortonCopy<true, PixelFormat::RG16I>, | 80 | MortonCopy<true, PixelFormat::RG16S>, |
| 82 | MortonCopy<true, PixelFormat::RG16S>, | 81 | MortonCopy<true, PixelFormat::RGB32F>, |
| 83 | MortonCopy<true, PixelFormat::RGB32F>, | 82 | MortonCopy<true, PixelFormat::RGBA8_SRGB>, |
| 84 | MortonCopy<true, PixelFormat::RGBA8_SRGB>, | 83 | MortonCopy<true, PixelFormat::RG8U>, |
| 85 | MortonCopy<true, PixelFormat::RG8U>, | 84 | MortonCopy<true, PixelFormat::RG8S>, |
| 86 | MortonCopy<true, PixelFormat::RG8S>, | 85 | MortonCopy<true, PixelFormat::RG32UI>, |
| 87 | MortonCopy<true, PixelFormat::RG32UI>, | 86 | MortonCopy<true, PixelFormat::R32UI>, |
| 88 | MortonCopy<true, PixelFormat::R32UI>, | 87 | MortonCopy<true, PixelFormat::ASTC_2D_8X8>, |
| 89 | MortonCopy<true, PixelFormat::ASTC_2D_8X8>, | 88 | MortonCopy<true, PixelFormat::ASTC_2D_8X5>, |
| 90 | MortonCopy<true, PixelFormat::ASTC_2D_8X5>, | 89 | MortonCopy<true, PixelFormat::ASTC_2D_5X4>, |
| 91 | MortonCopy<true, PixelFormat::ASTC_2D_5X4>, | 90 | MortonCopy<true, PixelFormat::BGRA8_SRGB>, |
| 92 | MortonCopy<true, PixelFormat::BGRA8_SRGB>, | 91 | MortonCopy<true, PixelFormat::DXT1_SRGB>, |
| 93 | MortonCopy<true, PixelFormat::DXT1_SRGB>, | 92 | MortonCopy<true, PixelFormat::DXT23_SRGB>, |
| 94 | MortonCopy<true, PixelFormat::DXT23_SRGB>, | 93 | MortonCopy<true, PixelFormat::DXT45_SRGB>, |
| 95 | MortonCopy<true, PixelFormat::DXT45_SRGB>, | 94 | MortonCopy<true, PixelFormat::BC7U_SRGB>, |
| 96 | MortonCopy<true, PixelFormat::BC7U_SRGB>, | 95 | MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>, |
| 97 | MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>, | 96 | MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>, |
| 98 | MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>, | 97 | MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>, |
| 99 | MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>, | 98 | MortonCopy<true, PixelFormat::ASTC_2D_5X4_SRGB>, |
| 100 | MortonCopy<true, PixelFormat::ASTC_2D_5X4_SRGB>, | 99 | MortonCopy<true, PixelFormat::ASTC_2D_5X5>, |
| 101 | MortonCopy<true, PixelFormat::ASTC_2D_5X5>, | 100 | MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>, |
| 102 | MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>, | 101 | MortonCopy<true, PixelFormat::ASTC_2D_10X8>, |
| 103 | MortonCopy<true, PixelFormat::ASTC_2D_10X8>, | 102 | MortonCopy<true, PixelFormat::ASTC_2D_10X8_SRGB>, |
| 104 | MortonCopy<true, PixelFormat::ASTC_2D_10X8_SRGB>, | 103 | MortonCopy<true, PixelFormat::Z32F>, |
| 105 | MortonCopy<true, PixelFormat::Z32F>, | 104 | MortonCopy<true, PixelFormat::Z16>, |
| 106 | MortonCopy<true, PixelFormat::Z16>, | 105 | MortonCopy<true, PixelFormat::Z24S8>, |
| 107 | MortonCopy<true, PixelFormat::Z24S8>, | 106 | MortonCopy<true, PixelFormat::S8Z24>, |
| 108 | MortonCopy<true, PixelFormat::S8Z24>, | 107 | MortonCopy<true, PixelFormat::Z32FS8>, |
| 109 | MortonCopy<true, PixelFormat::Z32FS8>, | ||
| 110 | // clang-format on | ||
| 111 | }; | 108 | }; |
| 112 | 109 | ||
| 113 | static constexpr ConversionArray linear_to_morton_fns = { | 110 | static constexpr ConversionArray linear_to_morton_fns = { |
| 114 | // clang-format off | 111 | MortonCopy<false, PixelFormat::ABGR8U>, |
| 115 | MortonCopy<false, PixelFormat::ABGR8U>, | 112 | MortonCopy<false, PixelFormat::ABGR8S>, |
| 116 | MortonCopy<false, PixelFormat::ABGR8S>, | 113 | MortonCopy<false, PixelFormat::ABGR8UI>, |
| 117 | MortonCopy<false, PixelFormat::ABGR8UI>, | 114 | MortonCopy<false, PixelFormat::B5G6R5U>, |
| 118 | MortonCopy<false, PixelFormat::B5G6R5U>, | 115 | MortonCopy<false, PixelFormat::A2B10G10R10U>, |
| 119 | MortonCopy<false, PixelFormat::A2B10G10R10U>, | 116 | MortonCopy<false, PixelFormat::A1B5G5R5U>, |
| 120 | MortonCopy<false, PixelFormat::A1B5G5R5U>, | 117 | MortonCopy<false, PixelFormat::R8U>, |
| 121 | MortonCopy<false, PixelFormat::R8U>, | 118 | MortonCopy<false, PixelFormat::R8UI>, |
| 122 | MortonCopy<false, PixelFormat::R8UI>, | 119 | MortonCopy<false, PixelFormat::RGBA16F>, |
| 123 | MortonCopy<false, PixelFormat::RGBA16F>, | 120 | MortonCopy<false, PixelFormat::RGBA16U>, |
| 124 | MortonCopy<false, PixelFormat::RGBA16U>, | 121 | MortonCopy<false, PixelFormat::RGBA16UI>, |
| 125 | MortonCopy<false, PixelFormat::RGBA16UI>, | 122 | MortonCopy<false, PixelFormat::R11FG11FB10F>, |
| 126 | MortonCopy<false, PixelFormat::R11FG11FB10F>, | 123 | MortonCopy<false, PixelFormat::RGBA32UI>, |
| 127 | MortonCopy<false, PixelFormat::RGBA32UI>, | 124 | MortonCopy<false, PixelFormat::DXT1>, |
| 128 | MortonCopy<false, PixelFormat::DXT1>, | 125 | MortonCopy<false, PixelFormat::DXT23>, |
| 129 | MortonCopy<false, PixelFormat::DXT23>, | 126 | MortonCopy<false, PixelFormat::DXT45>, |
| 130 | MortonCopy<false, PixelFormat::DXT45>, | 127 | MortonCopy<false, PixelFormat::DXN1>, |
| 131 | MortonCopy<false, PixelFormat::DXN1>, | 128 | MortonCopy<false, PixelFormat::DXN2UNORM>, |
| 132 | MortonCopy<false, PixelFormat::DXN2UNORM>, | 129 | MortonCopy<false, PixelFormat::DXN2SNORM>, |
| 133 | MortonCopy<false, PixelFormat::DXN2SNORM>, | 130 | MortonCopy<false, PixelFormat::BC7U>, |
| 134 | MortonCopy<false, PixelFormat::BC7U>, | 131 | MortonCopy<false, PixelFormat::BC6H_UF16>, |
| 135 | MortonCopy<false, PixelFormat::BC6H_UF16>, | 132 | MortonCopy<false, PixelFormat::BC6H_SF16>, |
| 136 | MortonCopy<false, PixelFormat::BC6H_SF16>, | 133 | // TODO(Subv): Swizzling ASTC formats are not supported |
| 137 | // TODO(Subv): Swizzling ASTC formats are not supported | 134 | nullptr, |
| 138 | nullptr, | 135 | MortonCopy<false, PixelFormat::BGRA8>, |
| 139 | MortonCopy<false, PixelFormat::BGRA8>, | 136 | MortonCopy<false, PixelFormat::RGBA32F>, |
| 140 | MortonCopy<false, PixelFormat::RGBA32F>, | 137 | MortonCopy<false, PixelFormat::RG32F>, |
| 141 | MortonCopy<false, PixelFormat::RG32F>, | 138 | MortonCopy<false, PixelFormat::R32F>, |
| 142 | MortonCopy<false, PixelFormat::R32F>, | 139 | MortonCopy<false, PixelFormat::R16F>, |
| 143 | MortonCopy<false, PixelFormat::R16F>, | 140 | MortonCopy<false, PixelFormat::R16U>, |
| 144 | MortonCopy<false, PixelFormat::R16U>, | 141 | MortonCopy<false, PixelFormat::R16S>, |
| 145 | MortonCopy<false, PixelFormat::R16S>, | 142 | MortonCopy<false, PixelFormat::R16UI>, |
| 146 | MortonCopy<false, PixelFormat::R16UI>, | 143 | MortonCopy<false, PixelFormat::R16I>, |
| 147 | MortonCopy<false, PixelFormat::R16I>, | 144 | MortonCopy<false, PixelFormat::RG16>, |
| 148 | MortonCopy<false, PixelFormat::RG16>, | 145 | MortonCopy<false, PixelFormat::RG16F>, |
| 149 | MortonCopy<false, PixelFormat::RG16F>, | 146 | MortonCopy<false, PixelFormat::RG16UI>, |
| 150 | MortonCopy<false, PixelFormat::RG16UI>, | 147 | MortonCopy<false, PixelFormat::RG16I>, |
| 151 | MortonCopy<false, PixelFormat::RG16I>, | 148 | MortonCopy<false, PixelFormat::RG16S>, |
| 152 | MortonCopy<false, PixelFormat::RG16S>, | 149 | MortonCopy<false, PixelFormat::RGB32F>, |
| 153 | MortonCopy<false, PixelFormat::RGB32F>, | 150 | MortonCopy<false, PixelFormat::RGBA8_SRGB>, |
| 154 | MortonCopy<false, PixelFormat::RGBA8_SRGB>, | 151 | MortonCopy<false, PixelFormat::RG8U>, |
| 155 | MortonCopy<false, PixelFormat::RG8U>, | 152 | MortonCopy<false, PixelFormat::RG8S>, |
| 156 | MortonCopy<false, PixelFormat::RG8S>, | 153 | MortonCopy<false, PixelFormat::RG32UI>, |
| 157 | MortonCopy<false, PixelFormat::RG32UI>, | 154 | MortonCopy<false, PixelFormat::R32UI>, |
| 158 | MortonCopy<false, PixelFormat::R32UI>, | 155 | nullptr, |
| 159 | nullptr, | 156 | nullptr, |
| 160 | nullptr, | 157 | nullptr, |
| 161 | nullptr, | 158 | MortonCopy<false, PixelFormat::BGRA8_SRGB>, |
| 162 | MortonCopy<false, PixelFormat::BGRA8_SRGB>, | 159 | MortonCopy<false, PixelFormat::DXT1_SRGB>, |
| 163 | MortonCopy<false, PixelFormat::DXT1_SRGB>, | 160 | MortonCopy<false, PixelFormat::DXT23_SRGB>, |
| 164 | MortonCopy<false, PixelFormat::DXT23_SRGB>, | 161 | MortonCopy<false, PixelFormat::DXT45_SRGB>, |
| 165 | MortonCopy<false, PixelFormat::DXT45_SRGB>, | 162 | MortonCopy<false, PixelFormat::BC7U_SRGB>, |
| 166 | MortonCopy<false, PixelFormat::BC7U_SRGB>, | 163 | nullptr, |
| 167 | nullptr, | 164 | nullptr, |
| 168 | nullptr, | 165 | nullptr, |
| 169 | nullptr, | 166 | nullptr, |
| 170 | nullptr, | 167 | nullptr, |
| 171 | nullptr, | 168 | nullptr, |
| 172 | nullptr, | 169 | nullptr, |
| 173 | nullptr, | 170 | nullptr, |
| 174 | nullptr, | 171 | MortonCopy<false, PixelFormat::Z32F>, |
| 175 | MortonCopy<false, PixelFormat::Z32F>, | 172 | MortonCopy<false, PixelFormat::Z16>, |
| 176 | MortonCopy<false, PixelFormat::Z16>, | 173 | MortonCopy<false, PixelFormat::Z24S8>, |
| 177 | MortonCopy<false, PixelFormat::Z24S8>, | 174 | MortonCopy<false, PixelFormat::S8Z24>, |
| 178 | MortonCopy<false, PixelFormat::S8Z24>, | 175 | MortonCopy<false, PixelFormat::Z32FS8>, |
| 179 | MortonCopy<false, PixelFormat::Z32FS8>, | ||
| 180 | // clang-format on | ||
| 181 | }; | 176 | }; |
| 182 | 177 | ||
| 183 | static MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFormat format) { | 178 | static MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFormat format) { |
| @@ -191,45 +186,6 @@ static MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFor | |||
| 191 | return morton_to_linear_fns[static_cast<std::size_t>(format)]; | 186 | return morton_to_linear_fns[static_cast<std::size_t>(format)]; |
| 192 | } | 187 | } |
| 193 | 188 | ||
| 194 | /// 8x8 Z-Order coordinate from 2D coordinates | ||
| 195 | static u32 MortonInterleave(u32 x, u32 y) { | ||
| 196 | static const u32 xlut[] = {0x00, 0x01, 0x04, 0x05, 0x10, 0x11, 0x14, 0x15}; | ||
| 197 | static const u32 ylut[] = {0x00, 0x02, 0x08, 0x0a, 0x20, 0x22, 0x28, 0x2a}; | ||
| 198 | return xlut[x % 8] + ylut[y % 8]; | ||
| 199 | } | ||
| 200 | |||
| 201 | /// Calculates the offset of the position of the pixel in Morton order | ||
| 202 | static u32 GetMortonOffset(u32 x, u32 y, u32 bytes_per_pixel) { | ||
| 203 | // Images are split into 8x8 tiles. Each tile is composed of four 4x4 subtiles each | ||
| 204 | // of which is composed of four 2x2 subtiles each of which is composed of four texels. | ||
| 205 | // Each structure is embedded into the next-bigger one in a diagonal pattern, e.g. | ||
| 206 | // texels are laid out in a 2x2 subtile like this: | ||
| 207 | // 2 3 | ||
| 208 | // 0 1 | ||
| 209 | // | ||
| 210 | // The full 8x8 tile has the texels arranged like this: | ||
| 211 | // | ||
| 212 | // 42 43 46 47 58 59 62 63 | ||
| 213 | // 40 41 44 45 56 57 60 61 | ||
| 214 | // 34 35 38 39 50 51 54 55 | ||
| 215 | // 32 33 36 37 48 49 52 53 | ||
| 216 | // 10 11 14 15 26 27 30 31 | ||
| 217 | // 08 09 12 13 24 25 28 29 | ||
| 218 | // 02 03 06 07 18 19 22 23 | ||
| 219 | // 00 01 04 05 16 17 20 21 | ||
| 220 | // | ||
| 221 | // This pattern is what's called Z-order curve, or Morton order. | ||
| 222 | |||
| 223 | const unsigned int block_height = 8; | ||
| 224 | const unsigned int coarse_x = x & ~7; | ||
| 225 | |||
| 226 | u32 i = MortonInterleave(x, y); | ||
| 227 | |||
| 228 | const unsigned int offset = coarse_x * block_height; | ||
| 229 | |||
| 230 | return (i + offset) * bytes_per_pixel; | ||
| 231 | } | ||
| 232 | |||
| 233 | static u32 MortonInterleave128(u32 x, u32 y) { | 189 | static u32 MortonInterleave128(u32 x, u32 y) { |
| 234 | // 128x128 Z-Order coordinate from 2D coordinates | 190 | // 128x128 Z-Order coordinate from 2D coordinates |
| 235 | static constexpr u32 xlut[] = { | 191 | static constexpr u32 xlut[] = { |
| @@ -325,14 +281,14 @@ static u32 GetMortonOffset128(u32 x, u32 y, u32 bytes_per_pixel) { | |||
| 325 | 281 | ||
| 326 | void MortonSwizzle(MortonSwizzleMode mode, Surface::PixelFormat format, u32 stride, | 282 | void MortonSwizzle(MortonSwizzleMode mode, Surface::PixelFormat format, u32 stride, |
| 327 | u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing, | 283 | u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing, |
| 328 | u8* buffer, std::size_t buffer_size, VAddr addr) { | 284 | u8* buffer, u8* addr) { |
| 329 | |||
| 330 | GetSwizzleFunction(mode, format)(stride, block_height, height, block_depth, depth, | 285 | GetSwizzleFunction(mode, format)(stride, block_height, height, block_depth, depth, |
| 331 | tile_width_spacing, buffer, buffer_size, addr); | 286 | tile_width_spacing, buffer, addr); |
| 332 | } | 287 | } |
| 333 | 288 | ||
| 334 | void MortonCopyPixels128(u32 width, u32 height, u32 bytes_per_pixel, u32 linear_bytes_per_pixel, | 289 | void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel, |
| 335 | u8* morton_data, u8* linear_data, bool morton_to_linear) { | 290 | u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data) { |
| 291 | const bool morton_to_linear = mode == MortonSwizzleMode::MortonToLinear; | ||
| 336 | u8* data_ptrs[2]; | 292 | u8* data_ptrs[2]; |
| 337 | for (u32 y = 0; y < height; ++y) { | 293 | for (u32 y = 0; y < height; ++y) { |
| 338 | for (u32 x = 0; x < width; ++x) { | 294 | for (u32 x = 0; x < width; ++x) { |
diff --git a/src/video_core/morton.h b/src/video_core/morton.h index 065f59ce3..ee5b45555 100644 --- a/src/video_core/morton.h +++ b/src/video_core/morton.h | |||
| @@ -13,9 +13,9 @@ enum class MortonSwizzleMode { MortonToLinear, LinearToMorton }; | |||
| 13 | 13 | ||
| 14 | void MortonSwizzle(MortonSwizzleMode mode, VideoCore::Surface::PixelFormat format, u32 stride, | 14 | void MortonSwizzle(MortonSwizzleMode mode, VideoCore::Surface::PixelFormat format, u32 stride, |
| 15 | u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing, | 15 | u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing, |
| 16 | u8* buffer, std::size_t buffer_size, VAddr addr); | 16 | u8* buffer, u8* addr); |
| 17 | 17 | ||
| 18 | void MortonCopyPixels128(u32 width, u32 height, u32 bytes_per_pixel, u32 linear_bytes_per_pixel, | 18 | void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel, |
| 19 | u8* morton_data, u8* linear_data, bool morton_to_linear); | 19 | u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data); |
| 20 | 20 | ||
| 21 | } // namespace VideoCore | 21 | } // namespace VideoCore |
diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h index a7bcf26fb..9fc9f3056 100644 --- a/src/video_core/rasterizer_cache.h +++ b/src/video_core/rasterizer_cache.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <mutex> | ||
| 7 | #include <set> | 8 | #include <set> |
| 8 | #include <unordered_map> | 9 | #include <unordered_map> |
| 9 | 10 | ||
| @@ -12,14 +13,26 @@ | |||
| 12 | 13 | ||
| 13 | #include "common/common_types.h" | 14 | #include "common/common_types.h" |
| 14 | #include "core/settings.h" | 15 | #include "core/settings.h" |
| 16 | #include "video_core/gpu.h" | ||
| 15 | #include "video_core/rasterizer_interface.h" | 17 | #include "video_core/rasterizer_interface.h" |
| 16 | 18 | ||
| 17 | class RasterizerCacheObject { | 19 | class RasterizerCacheObject { |
| 18 | public: | 20 | public: |
| 21 | explicit RasterizerCacheObject(const u8* host_ptr) | ||
| 22 | : host_ptr{host_ptr}, cache_addr{ToCacheAddr(host_ptr)} {} | ||
| 23 | |||
| 19 | virtual ~RasterizerCacheObject(); | 24 | virtual ~RasterizerCacheObject(); |
| 20 | 25 | ||
| 26 | CacheAddr GetCacheAddr() const { | ||
| 27 | return cache_addr; | ||
| 28 | } | ||
| 29 | |||
| 30 | const u8* GetHostPtr() const { | ||
| 31 | return host_ptr; | ||
| 32 | } | ||
| 33 | |||
| 21 | /// Gets the address of the shader in guest memory, required for cache management | 34 | /// Gets the address of the shader in guest memory, required for cache management |
| 22 | virtual VAddr GetAddr() const = 0; | 35 | virtual VAddr GetCpuAddr() const = 0; |
| 23 | 36 | ||
| 24 | /// Gets the size of the shader in guest memory, required for cache management | 37 | /// Gets the size of the shader in guest memory, required for cache management |
| 25 | virtual std::size_t GetSizeInBytes() const = 0; | 38 | virtual std::size_t GetSizeInBytes() const = 0; |
| @@ -58,6 +71,8 @@ private: | |||
| 58 | bool is_registered{}; ///< Whether the object is currently registered with the cache | 71 | bool is_registered{}; ///< Whether the object is currently registered with the cache |
| 59 | bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory) | 72 | bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory) |
| 60 | u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing | 73 | u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing |
| 74 | CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space | ||
| 75 | const u8* host_ptr{}; ///< Pointer to the memory backing this cached region | ||
| 61 | }; | 76 | }; |
| 62 | 77 | ||
| 63 | template <class T> | 78 | template <class T> |
| @@ -68,7 +83,9 @@ public: | |||
| 68 | explicit RasterizerCache(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {} | 83 | explicit RasterizerCache(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {} |
| 69 | 84 | ||
| 70 | /// Write any cached resources overlapping the specified region back to memory | 85 | /// Write any cached resources overlapping the specified region back to memory |
| 71 | void FlushRegion(Tegra::GPUVAddr addr, size_t size) { | 86 | void FlushRegion(CacheAddr addr, std::size_t size) { |
| 87 | std::lock_guard<std::recursive_mutex> lock{mutex}; | ||
| 88 | |||
| 72 | const auto& objects{GetSortedObjectsFromRegion(addr, size)}; | 89 | const auto& objects{GetSortedObjectsFromRegion(addr, size)}; |
| 73 | for (auto& object : objects) { | 90 | for (auto& object : objects) { |
| 74 | FlushObject(object); | 91 | FlushObject(object); |
| @@ -76,7 +93,9 @@ public: | |||
| 76 | } | 93 | } |
| 77 | 94 | ||
| 78 | /// Mark the specified region as being invalidated | 95 | /// Mark the specified region as being invalidated |
| 79 | void InvalidateRegion(VAddr addr, u64 size) { | 96 | void InvalidateRegion(CacheAddr addr, u64 size) { |
| 97 | std::lock_guard<std::recursive_mutex> lock{mutex}; | ||
| 98 | |||
| 80 | const auto& objects{GetSortedObjectsFromRegion(addr, size)}; | 99 | const auto& objects{GetSortedObjectsFromRegion(addr, size)}; |
| 81 | for (auto& object : objects) { | 100 | for (auto& object : objects) { |
| 82 | if (!object->IsRegistered()) { | 101 | if (!object->IsRegistered()) { |
| @@ -89,48 +108,60 @@ public: | |||
| 89 | 108 | ||
| 90 | /// Invalidates everything in the cache | 109 | /// Invalidates everything in the cache |
| 91 | void InvalidateAll() { | 110 | void InvalidateAll() { |
| 111 | std::lock_guard<std::recursive_mutex> lock{mutex}; | ||
| 112 | |||
| 92 | while (interval_cache.begin() != interval_cache.end()) { | 113 | while (interval_cache.begin() != interval_cache.end()) { |
| 93 | Unregister(*interval_cache.begin()->second.begin()); | 114 | Unregister(*interval_cache.begin()->second.begin()); |
| 94 | } | 115 | } |
| 95 | } | 116 | } |
| 96 | 117 | ||
| 97 | protected: | 118 | protected: |
| 98 | /// Tries to get an object from the cache with the specified address | 119 | /// Tries to get an object from the cache with the specified cache address |
| 99 | T TryGet(VAddr addr) const { | 120 | T TryGet(CacheAddr addr) const { |
| 100 | const auto iter = map_cache.find(addr); | 121 | const auto iter = map_cache.find(addr); |
| 101 | if (iter != map_cache.end()) | 122 | if (iter != map_cache.end()) |
| 102 | return iter->second; | 123 | return iter->second; |
| 103 | return nullptr; | 124 | return nullptr; |
| 104 | } | 125 | } |
| 105 | 126 | ||
| 127 | T TryGet(const void* addr) const { | ||
| 128 | const auto iter = map_cache.find(ToCacheAddr(addr)); | ||
| 129 | if (iter != map_cache.end()) | ||
| 130 | return iter->second; | ||
| 131 | return nullptr; | ||
| 132 | } | ||
| 133 | |||
| 106 | /// Register an object into the cache | 134 | /// Register an object into the cache |
| 107 | void Register(const T& object) { | 135 | virtual void Register(const T& object) { |
| 136 | std::lock_guard<std::recursive_mutex> lock{mutex}; | ||
| 137 | |||
| 108 | object->SetIsRegistered(true); | 138 | object->SetIsRegistered(true); |
| 109 | interval_cache.add({GetInterval(object), ObjectSet{object}}); | 139 | interval_cache.add({GetInterval(object), ObjectSet{object}}); |
| 110 | map_cache.insert({object->GetAddr(), object}); | 140 | map_cache.insert({object->GetCacheAddr(), object}); |
| 111 | rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), 1); | 141 | rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), 1); |
| 112 | } | 142 | } |
| 113 | 143 | ||
| 114 | /// Unregisters an object from the cache | 144 | /// Unregisters an object from the cache |
| 115 | void Unregister(const T& object) { | 145 | virtual void Unregister(const T& object) { |
| 116 | object->SetIsRegistered(false); | 146 | std::lock_guard<std::recursive_mutex> lock{mutex}; |
| 117 | rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), -1); | ||
| 118 | // Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit | ||
| 119 | if (Settings::values.use_accurate_gpu_emulation) { | ||
| 120 | FlushObject(object); | ||
| 121 | } | ||
| 122 | 147 | ||
| 148 | object->SetIsRegistered(false); | ||
| 149 | rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1); | ||
| 123 | interval_cache.subtract({GetInterval(object), ObjectSet{object}}); | 150 | interval_cache.subtract({GetInterval(object), ObjectSet{object}}); |
| 124 | map_cache.erase(object->GetAddr()); | 151 | map_cache.erase(object->GetCacheAddr()); |
| 125 | } | 152 | } |
| 126 | 153 | ||
| 127 | /// Returns a ticks counter used for tracking when cached objects were last modified | 154 | /// Returns a ticks counter used for tracking when cached objects were last modified |
| 128 | u64 GetModifiedTicks() { | 155 | u64 GetModifiedTicks() { |
| 156 | std::lock_guard<std::recursive_mutex> lock{mutex}; | ||
| 157 | |||
| 129 | return ++modified_ticks; | 158 | return ++modified_ticks; |
| 130 | } | 159 | } |
| 131 | 160 | ||
| 132 | /// Flushes the specified object, updating appropriate cache state as needed | 161 | /// Flushes the specified object, updating appropriate cache state as needed |
| 133 | void FlushObject(const T& object) { | 162 | void FlushObject(const T& object) { |
| 163 | std::lock_guard<std::recursive_mutex> lock{mutex}; | ||
| 164 | |||
| 134 | if (!object->IsDirty()) { | 165 | if (!object->IsDirty()) { |
| 135 | return; | 166 | return; |
| 136 | } | 167 | } |
| @@ -140,7 +171,7 @@ protected: | |||
| 140 | 171 | ||
| 141 | private: | 172 | private: |
| 142 | /// Returns a list of cached objects from the specified memory region, ordered by access time | 173 | /// Returns a list of cached objects from the specified memory region, ordered by access time |
| 143 | std::vector<T> GetSortedObjectsFromRegion(VAddr addr, u64 size) { | 174 | std::vector<T> GetSortedObjectsFromRegion(CacheAddr addr, u64 size) { |
| 144 | if (size == 0) { | 175 | if (size == 0) { |
| 145 | return {}; | 176 | return {}; |
| 146 | } | 177 | } |
| @@ -164,17 +195,18 @@ private: | |||
| 164 | } | 195 | } |
| 165 | 196 | ||
| 166 | using ObjectSet = std::set<T>; | 197 | using ObjectSet = std::set<T>; |
| 167 | using ObjectCache = std::unordered_map<VAddr, T>; | 198 | using ObjectCache = std::unordered_map<CacheAddr, T>; |
| 168 | using IntervalCache = boost::icl::interval_map<VAddr, ObjectSet>; | 199 | using IntervalCache = boost::icl::interval_map<CacheAddr, ObjectSet>; |
| 169 | using ObjectInterval = typename IntervalCache::interval_type; | 200 | using ObjectInterval = typename IntervalCache::interval_type; |
| 170 | 201 | ||
| 171 | static auto GetInterval(const T& object) { | 202 | static auto GetInterval(const T& object) { |
| 172 | return ObjectInterval::right_open(object->GetAddr(), | 203 | return ObjectInterval::right_open(object->GetCacheAddr(), |
| 173 | object->GetAddr() + object->GetSizeInBytes()); | 204 | object->GetCacheAddr() + object->GetSizeInBytes()); |
| 174 | } | 205 | } |
| 175 | 206 | ||
| 176 | ObjectCache map_cache; | 207 | ObjectCache map_cache; |
| 177 | IntervalCache interval_cache; ///< Cache of objects | 208 | IntervalCache interval_cache; ///< Cache of objects |
| 178 | u64 modified_ticks{}; ///< Counter of cache state ticks, used for in-order flushing | 209 | u64 modified_ticks{}; ///< Counter of cache state ticks, used for in-order flushing |
| 179 | VideoCore::RasterizerInterface& rasterizer; | 210 | VideoCore::RasterizerInterface& rasterizer; |
| 211 | std::recursive_mutex mutex; | ||
| 180 | }; | 212 | }; |
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index 6a1dc9cf6..d7b86df38 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "video_core/engines/fermi_2d.h" | 10 | #include "video_core/engines/fermi_2d.h" |
| 11 | #include "video_core/gpu.h" | 11 | #include "video_core/gpu.h" |
| 12 | #include "video_core/memory_manager.h" | ||
| 13 | 12 | ||
| 14 | namespace VideoCore { | 13 | namespace VideoCore { |
| 15 | 14 | ||
| @@ -35,14 +34,14 @@ public: | |||
| 35 | virtual void FlushAll() = 0; | 34 | virtual void FlushAll() = 0; |
| 36 | 35 | ||
| 37 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory | 36 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory |
| 38 | virtual void FlushRegion(VAddr addr, u64 size) = 0; | 37 | virtual void FlushRegion(CacheAddr addr, u64 size) = 0; |
| 39 | 38 | ||
| 40 | /// Notify rasterizer that any caches of the specified region should be invalidated | 39 | /// Notify rasterizer that any caches of the specified region should be invalidated |
| 41 | virtual void InvalidateRegion(VAddr addr, u64 size) = 0; | 40 | virtual void InvalidateRegion(CacheAddr addr, u64 size) = 0; |
| 42 | 41 | ||
| 43 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory | 42 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory |
| 44 | /// and invalidated | 43 | /// and invalidated |
| 45 | virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0; | 44 | virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0; |
| 46 | 45 | ||
| 47 | /// Attempt to use a faster method to perform a surface copy | 46 | /// Attempt to use a faster method to perform a surface copy |
| 48 | virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, | 47 | virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, |
| @@ -63,7 +62,7 @@ public: | |||
| 63 | } | 62 | } |
| 64 | 63 | ||
| 65 | /// Increase/decrease the number of object in pages touching the specified region | 64 | /// Increase/decrease the number of object in pages touching the specified region |
| 66 | virtual void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {} | 65 | virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {} |
| 67 | 66 | ||
| 68 | /// Initialize disk cached resources for the game being emulated | 67 | /// Initialize disk cached resources for the game being emulated |
| 69 | virtual void LoadDiskResources(const std::atomic_bool& stop_loading = false, | 68 | virtual void LoadDiskResources(const std::atomic_bool& stop_loading = false, |
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index b3062e5ba..f75c65825 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp | |||
| @@ -13,24 +13,28 @@ | |||
| 13 | 13 | ||
| 14 | namespace OpenGL { | 14 | namespace OpenGL { |
| 15 | 15 | ||
| 16 | CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, | ||
| 17 | std::size_t alignment, u8* host_ptr) | ||
| 18 | : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ | ||
| 19 | host_ptr} {} | ||
| 20 | |||
| 16 | OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) | 21 | OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) |
| 17 | : RasterizerCache{rasterizer}, stream_buffer(size, true) {} | 22 | : RasterizerCache{rasterizer}, stream_buffer(size, true) {} |
| 18 | 23 | ||
| 19 | GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, | 24 | GLintptr OGLBufferCache::UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment, |
| 20 | std::size_t alignment, bool cache) { | 25 | bool cache) { |
| 21 | auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager(); | 26 | auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager(); |
| 22 | const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)}; | ||
| 23 | ASSERT_MSG(cpu_addr, "Invalid GPU address"); | ||
| 24 | 27 | ||
| 25 | // Cache management is a big overhead, so only cache entries with a given size. | 28 | // Cache management is a big overhead, so only cache entries with a given size. |
| 26 | // TODO: Figure out which size is the best for given games. | 29 | // TODO: Figure out which size is the best for given games. |
| 27 | cache &= size >= 2048; | 30 | cache &= size >= 2048; |
| 28 | 31 | ||
| 32 | const auto& host_ptr{memory_manager.GetPointer(gpu_addr)}; | ||
| 29 | if (cache) { | 33 | if (cache) { |
| 30 | auto entry = TryGet(*cpu_addr); | 34 | auto entry = TryGet(host_ptr); |
| 31 | if (entry) { | 35 | if (entry) { |
| 32 | if (entry->size >= size && entry->alignment == alignment) { | 36 | if (entry->GetSize() >= size && entry->GetAlignment() == alignment) { |
| 33 | return entry->offset; | 37 | return entry->GetOffset(); |
| 34 | } | 38 | } |
| 35 | Unregister(entry); | 39 | Unregister(entry); |
| 36 | } | 40 | } |
| @@ -39,17 +43,17 @@ GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size | |||
| 39 | AlignBuffer(alignment); | 43 | AlignBuffer(alignment); |
| 40 | const GLintptr uploaded_offset = buffer_offset; | 44 | const GLintptr uploaded_offset = buffer_offset; |
| 41 | 45 | ||
| 42 | Memory::ReadBlock(*cpu_addr, buffer_ptr, size); | 46 | if (!host_ptr) { |
| 47 | return uploaded_offset; | ||
| 48 | } | ||
| 43 | 49 | ||
| 50 | std::memcpy(buffer_ptr, host_ptr, size); | ||
| 44 | buffer_ptr += size; | 51 | buffer_ptr += size; |
| 45 | buffer_offset += size; | 52 | buffer_offset += size; |
| 46 | 53 | ||
| 47 | if (cache) { | 54 | if (cache) { |
| 48 | auto entry = std::make_shared<CachedBufferEntry>(); | 55 | auto entry = std::make_shared<CachedBufferEntry>( |
| 49 | entry->offset = uploaded_offset; | 56 | *memory_manager.GpuToCpuAddress(gpu_addr), size, uploaded_offset, alignment, host_ptr); |
| 50 | entry->size = size; | ||
| 51 | entry->alignment = alignment; | ||
| 52 | entry->addr = *cpu_addr; | ||
| 53 | Register(entry); | 57 | Register(entry); |
| 54 | } | 58 | } |
| 55 | 59 | ||
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index c11acfb79..fc33aa433 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h | |||
| @@ -17,22 +17,39 @@ namespace OpenGL { | |||
| 17 | 17 | ||
| 18 | class RasterizerOpenGL; | 18 | class RasterizerOpenGL; |
| 19 | 19 | ||
| 20 | struct CachedBufferEntry final : public RasterizerCacheObject { | 20 | class CachedBufferEntry final : public RasterizerCacheObject { |
| 21 | VAddr GetAddr() const override { | 21 | public: |
| 22 | return addr; | 22 | explicit CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, |
| 23 | std::size_t alignment, u8* host_ptr); | ||
| 24 | |||
| 25 | VAddr GetCpuAddr() const override { | ||
| 26 | return cpu_addr; | ||
| 23 | } | 27 | } |
| 24 | 28 | ||
| 25 | std::size_t GetSizeInBytes() const override { | 29 | std::size_t GetSizeInBytes() const override { |
| 26 | return size; | 30 | return size; |
| 27 | } | 31 | } |
| 28 | 32 | ||
| 33 | std::size_t GetSize() const { | ||
| 34 | return size; | ||
| 35 | } | ||
| 36 | |||
| 37 | GLintptr GetOffset() const { | ||
| 38 | return offset; | ||
| 39 | } | ||
| 40 | |||
| 41 | std::size_t GetAlignment() const { | ||
| 42 | return alignment; | ||
| 43 | } | ||
| 44 | |||
| 29 | // We do not have to flush this cache as things in it are never modified by us. | 45 | // We do not have to flush this cache as things in it are never modified by us. |
| 30 | void Flush() override {} | 46 | void Flush() override {} |
| 31 | 47 | ||
| 32 | VAddr addr; | 48 | private: |
| 33 | std::size_t size; | 49 | VAddr cpu_addr{}; |
| 34 | GLintptr offset; | 50 | std::size_t size{}; |
| 35 | std::size_t alignment; | 51 | GLintptr offset{}; |
| 52 | std::size_t alignment{}; | ||
| 36 | }; | 53 | }; |
| 37 | 54 | ||
| 38 | class OGLBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> { | 55 | class OGLBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> { |
| @@ -41,7 +58,7 @@ public: | |||
| 41 | 58 | ||
| 42 | /// Uploads data from a guest GPU address. Returns host's buffer offset where it's been | 59 | /// Uploads data from a guest GPU address. Returns host's buffer offset where it's been |
| 43 | /// allocated. | 60 | /// allocated. |
| 44 | GLintptr UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4, | 61 | GLintptr UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4, |
| 45 | bool cache = true); | 62 | bool cache = true); |
| 46 | 63 | ||
| 47 | /// Uploads from a host memory. Returns host's buffer offset where it's been allocated. | 64 | /// Uploads from a host memory. Returns host's buffer offset where it's been allocated. |
diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp index c7f32feaa..0fbfbad55 100644 --- a/src/video_core/renderer_opengl/gl_global_cache.cpp +++ b/src/video_core/renderer_opengl/gl_global_cache.cpp | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "common/logging/log.h" | 8 | #include "common/logging/log.h" |
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "core/memory.h" | ||
| 11 | #include "video_core/renderer_opengl/gl_global_cache.h" | 10 | #include "video_core/renderer_opengl/gl_global_cache.h" |
| 12 | #include "video_core/renderer_opengl/gl_rasterizer.h" | 11 | #include "video_core/renderer_opengl/gl_rasterizer.h" |
| 13 | #include "video_core/renderer_opengl/gl_shader_decompiler.h" | 12 | #include "video_core/renderer_opengl/gl_shader_decompiler.h" |
| @@ -15,12 +14,13 @@ | |||
| 15 | 14 | ||
| 16 | namespace OpenGL { | 15 | namespace OpenGL { |
| 17 | 16 | ||
| 18 | CachedGlobalRegion::CachedGlobalRegion(VAddr addr, u32 size) : addr{addr}, size{size} { | 17 | CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr) |
| 18 | : cpu_addr{cpu_addr}, size{size}, RasterizerCacheObject{host_ptr} { | ||
| 19 | buffer.Create(); | 19 | buffer.Create(); |
| 20 | // Bind and unbind the buffer so it gets allocated by the driver | 20 | // Bind and unbind the buffer so it gets allocated by the driver |
| 21 | glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); | 21 | glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); |
| 22 | glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); | 22 | glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); |
| 23 | LabelGLObject(GL_BUFFER, buffer.handle, addr, "GlobalMemory"); | 23 | LabelGLObject(GL_BUFFER, buffer.handle, cpu_addr, "GlobalMemory"); |
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | void CachedGlobalRegion::Reload(u32 size_) { | 26 | void CachedGlobalRegion::Reload(u32 size_) { |
| @@ -35,10 +35,10 @@ void CachedGlobalRegion::Reload(u32 size_) { | |||
| 35 | 35 | ||
| 36 | // TODO(Rodrigo): Get rid of Memory::GetPointer with a staging buffer | 36 | // TODO(Rodrigo): Get rid of Memory::GetPointer with a staging buffer |
| 37 | glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); | 37 | glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); |
| 38 | glBufferData(GL_SHADER_STORAGE_BUFFER, size, Memory::GetPointer(addr), GL_DYNAMIC_DRAW); | 38 | glBufferData(GL_SHADER_STORAGE_BUFFER, size, GetHostPtr(), GL_DYNAMIC_DRAW); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(VAddr addr, u32 size) const { | 41 | GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(CacheAddr addr, u32 size) const { |
| 42 | const auto search{reserve.find(addr)}; | 42 | const auto search{reserve.find(addr)}; |
| 43 | if (search == reserve.end()) { | 43 | if (search == reserve.end()) { |
| 44 | return {}; | 44 | return {}; |
| @@ -46,19 +46,22 @@ GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(VAddr addr, u32 | |||
| 46 | return search->second; | 46 | return search->second; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | GlobalRegion GlobalRegionCacheOpenGL::GetUncachedGlobalRegion(VAddr addr, u32 size) { | 49 | GlobalRegion GlobalRegionCacheOpenGL::GetUncachedGlobalRegion(GPUVAddr addr, u32 size, |
| 50 | GlobalRegion region{TryGetReservedGlobalRegion(addr, size)}; | 50 | u8* host_ptr) { |
| 51 | GlobalRegion region{TryGetReservedGlobalRegion(ToCacheAddr(host_ptr), size)}; | ||
| 51 | if (!region) { | 52 | if (!region) { |
| 52 | // No reserved surface available, create a new one and reserve it | 53 | // No reserved surface available, create a new one and reserve it |
| 53 | region = std::make_shared<CachedGlobalRegion>(addr, size); | 54 | auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; |
| 55 | const auto cpu_addr = *memory_manager.GpuToCpuAddress(addr); | ||
| 56 | region = std::make_shared<CachedGlobalRegion>(cpu_addr, size, host_ptr); | ||
| 54 | ReserveGlobalRegion(region); | 57 | ReserveGlobalRegion(region); |
| 55 | } | 58 | } |
| 56 | region->Reload(size); | 59 | region->Reload(size); |
| 57 | return region; | 60 | return region; |
| 58 | } | 61 | } |
| 59 | 62 | ||
| 60 | void GlobalRegionCacheOpenGL::ReserveGlobalRegion(const GlobalRegion& region) { | 63 | void GlobalRegionCacheOpenGL::ReserveGlobalRegion(GlobalRegion region) { |
| 61 | reserve[region->GetAddr()] = region; | 64 | reserve.insert_or_assign(region->GetCacheAddr(), std::move(region)); |
| 62 | } | 65 | } |
| 63 | 66 | ||
| 64 | GlobalRegionCacheOpenGL::GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer) | 67 | GlobalRegionCacheOpenGL::GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer) |
| @@ -69,22 +72,20 @@ GlobalRegion GlobalRegionCacheOpenGL::GetGlobalRegion( | |||
| 69 | Tegra::Engines::Maxwell3D::Regs::ShaderStage stage) { | 72 | Tegra::Engines::Maxwell3D::Regs::ShaderStage stage) { |
| 70 | 73 | ||
| 71 | auto& gpu{Core::System::GetInstance().GPU()}; | 74 | auto& gpu{Core::System::GetInstance().GPU()}; |
| 72 | const auto cbufs = gpu.Maxwell3D().state.shader_stages[static_cast<u64>(stage)]; | 75 | auto& memory_manager{gpu.MemoryManager()}; |
| 73 | const auto cbuf_addr = gpu.MemoryManager().GpuToCpuAddress( | 76 | const auto cbufs{gpu.Maxwell3D().state.shader_stages[static_cast<u64>(stage)]}; |
| 74 | cbufs.const_buffers[global_region.GetCbufIndex()].address + global_region.GetCbufOffset()); | 77 | const auto addr{cbufs.const_buffers[global_region.GetCbufIndex()].address + |
| 75 | ASSERT(cbuf_addr); | 78 | global_region.GetCbufOffset()}; |
| 76 | 79 | const auto actual_addr{memory_manager.Read<u64>(addr)}; | |
| 77 | const auto actual_addr_gpu = Memory::Read64(*cbuf_addr); | 80 | const auto size{memory_manager.Read<u32>(addr + 8)}; |
| 78 | const auto size = Memory::Read32(*cbuf_addr + 8); | ||
| 79 | const auto actual_addr = gpu.MemoryManager().GpuToCpuAddress(actual_addr_gpu); | ||
| 80 | ASSERT(actual_addr); | ||
| 81 | 81 | ||
| 82 | // Look up global region in the cache based on address | 82 | // Look up global region in the cache based on address |
| 83 | GlobalRegion region = TryGet(*actual_addr); | 83 | const auto& host_ptr{memory_manager.GetPointer(actual_addr)}; |
| 84 | GlobalRegion region{TryGet(host_ptr)}; | ||
| 84 | 85 | ||
| 85 | if (!region) { | 86 | if (!region) { |
| 86 | // No global region found - create a new one | 87 | // No global region found - create a new one |
| 87 | region = GetUncachedGlobalRegion(*actual_addr, size); | 88 | region = GetUncachedGlobalRegion(actual_addr, size, host_ptr); |
| 88 | Register(region); | 89 | Register(region); |
| 89 | } | 90 | } |
| 90 | 91 | ||
diff --git a/src/video_core/renderer_opengl/gl_global_cache.h b/src/video_core/renderer_opengl/gl_global_cache.h index 37830bb7c..5a21ab66f 100644 --- a/src/video_core/renderer_opengl/gl_global_cache.h +++ b/src/video_core/renderer_opengl/gl_global_cache.h | |||
| @@ -27,15 +27,13 @@ using GlobalRegion = std::shared_ptr<CachedGlobalRegion>; | |||
| 27 | 27 | ||
| 28 | class CachedGlobalRegion final : public RasterizerCacheObject { | 28 | class CachedGlobalRegion final : public RasterizerCacheObject { |
| 29 | public: | 29 | public: |
| 30 | explicit CachedGlobalRegion(VAddr addr, u32 size); | 30 | explicit CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr); |
| 31 | 31 | ||
| 32 | /// Gets the address of the shader in guest memory, required for cache management | 32 | VAddr GetCpuAddr() const override { |
| 33 | VAddr GetAddr() const { | 33 | return cpu_addr; |
| 34 | return addr; | ||
| 35 | } | 34 | } |
| 36 | 35 | ||
| 37 | /// Gets the size of the shader in guest memory, required for cache management | 36 | std::size_t GetSizeInBytes() const override { |
| 38 | std::size_t GetSizeInBytes() const { | ||
| 39 | return size; | 37 | return size; |
| 40 | } | 38 | } |
| 41 | 39 | ||
| @@ -53,9 +51,8 @@ public: | |||
| 53 | } | 51 | } |
| 54 | 52 | ||
| 55 | private: | 53 | private: |
| 56 | VAddr addr{}; | 54 | VAddr cpu_addr{}; |
| 57 | u32 size{}; | 55 | u32 size{}; |
| 58 | |||
| 59 | OGLBuffer buffer; | 56 | OGLBuffer buffer; |
| 60 | }; | 57 | }; |
| 61 | 58 | ||
| @@ -68,11 +65,11 @@ public: | |||
| 68 | Tegra::Engines::Maxwell3D::Regs::ShaderStage stage); | 65 | Tegra::Engines::Maxwell3D::Regs::ShaderStage stage); |
| 69 | 66 | ||
| 70 | private: | 67 | private: |
| 71 | GlobalRegion TryGetReservedGlobalRegion(VAddr addr, u32 size) const; | 68 | GlobalRegion TryGetReservedGlobalRegion(CacheAddr addr, u32 size) const; |
| 72 | GlobalRegion GetUncachedGlobalRegion(VAddr addr, u32 size); | 69 | GlobalRegion GetUncachedGlobalRegion(GPUVAddr addr, u32 size, u8* host_ptr); |
| 73 | void ReserveGlobalRegion(const GlobalRegion& region); | 70 | void ReserveGlobalRegion(GlobalRegion region); |
| 74 | 71 | ||
| 75 | std::unordered_map<VAddr, GlobalRegion> reserve; | 72 | std::unordered_map<CacheAddr, GlobalRegion> reserve; |
| 76 | }; | 73 | }; |
| 77 | 74 | ||
| 78 | } // namespace OpenGL | 75 | } // namespace OpenGL |
diff --git a/src/video_core/renderer_opengl/gl_primitive_assembler.cpp b/src/video_core/renderer_opengl/gl_primitive_assembler.cpp index 77d5cedd2..2bcbd3da2 100644 --- a/src/video_core/renderer_opengl/gl_primitive_assembler.cpp +++ b/src/video_core/renderer_opengl/gl_primitive_assembler.cpp | |||
| @@ -40,16 +40,12 @@ GLintptr PrimitiveAssembler::MakeQuadArray(u32 first, u32 count) { | |||
| 40 | return index_offset; | 40 | return index_offset; |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | GLintptr PrimitiveAssembler::MakeQuadIndexed(Tegra::GPUVAddr gpu_addr, std::size_t index_size, | 43 | GLintptr PrimitiveAssembler::MakeQuadIndexed(GPUVAddr gpu_addr, std::size_t index_size, u32 count) { |
| 44 | u32 count) { | ||
| 45 | const std::size_t map_size{CalculateQuadSize(count)}; | 44 | const std::size_t map_size{CalculateQuadSize(count)}; |
| 46 | auto [dst_pointer, index_offset] = buffer_cache.ReserveMemory(map_size); | 45 | auto [dst_pointer, index_offset] = buffer_cache.ReserveMemory(map_size); |
| 47 | 46 | ||
| 48 | auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager(); | 47 | auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager(); |
| 49 | const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)}; | 48 | const u8* source{memory_manager.GetPointer(gpu_addr)}; |
| 50 | ASSERT_MSG(cpu_addr, "Invalid GPU address"); | ||
| 51 | |||
| 52 | const u8* source{Memory::GetPointer(*cpu_addr)}; | ||
| 53 | 49 | ||
| 54 | for (u32 primitive = 0; primitive < count / 4; ++primitive) { | 50 | for (u32 primitive = 0; primitive < count / 4; ++primitive) { |
| 55 | for (std::size_t i = 0; i < TRIANGLES_PER_QUAD; ++i) { | 51 | for (std::size_t i = 0; i < TRIANGLES_PER_QUAD; ++i) { |
| @@ -64,4 +60,4 @@ GLintptr PrimitiveAssembler::MakeQuadIndexed(Tegra::GPUVAddr gpu_addr, std::size | |||
| 64 | return index_offset; | 60 | return index_offset; |
| 65 | } | 61 | } |
| 66 | 62 | ||
| 67 | } // namespace OpenGL \ No newline at end of file | 63 | } // namespace OpenGL |
diff --git a/src/video_core/renderer_opengl/gl_primitive_assembler.h b/src/video_core/renderer_opengl/gl_primitive_assembler.h index a8cb88eb5..0e2e7dc36 100644 --- a/src/video_core/renderer_opengl/gl_primitive_assembler.h +++ b/src/video_core/renderer_opengl/gl_primitive_assembler.h | |||
| @@ -24,7 +24,7 @@ public: | |||
| 24 | 24 | ||
| 25 | GLintptr MakeQuadArray(u32 first, u32 count); | 25 | GLintptr MakeQuadArray(u32 first, u32 count); |
| 26 | 26 | ||
| 27 | GLintptr MakeQuadIndexed(Tegra::GPUVAddr gpu_addr, std::size_t index_size, u32 count); | 27 | GLintptr MakeQuadIndexed(GPUVAddr gpu_addr, std::size_t index_size, u32 count); |
| 28 | 28 | ||
| 29 | private: | 29 | private: |
| 30 | OGLBufferCache& buffer_cache; | 30 | OGLBufferCache& buffer_cache; |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index e038686ad..e06dfe43f 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -102,8 +102,9 @@ struct FramebufferCacheKey { | |||
| 102 | 102 | ||
| 103 | RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, | 103 | RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, |
| 104 | ScreenInfo& info) | 104 | ScreenInfo& info) |
| 105 | : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, emu_window{window}, | 105 | : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, |
| 106 | screen_info{info}, buffer_cache(*this, STREAM_BUFFER_SIZE) { | 106 | emu_window{window}, system{system}, screen_info{info}, |
| 107 | buffer_cache(*this, STREAM_BUFFER_SIZE) { | ||
| 107 | // Create sampler objects | 108 | // Create sampler objects |
| 108 | for (std::size_t i = 0; i < texture_samplers.size(); ++i) { | 109 | for (std::size_t i = 0; i < texture_samplers.size(); ++i) { |
| 109 | texture_samplers[i].Create(); | 110 | texture_samplers[i].Create(); |
| @@ -138,7 +139,7 @@ void RasterizerOpenGL::CheckExtensions() { | |||
| 138 | } | 139 | } |
| 139 | 140 | ||
| 140 | GLuint RasterizerOpenGL::SetupVertexFormat() { | 141 | GLuint RasterizerOpenGL::SetupVertexFormat() { |
| 141 | auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); | 142 | auto& gpu = system.GPU().Maxwell3D(); |
| 142 | const auto& regs = gpu.regs; | 143 | const auto& regs = gpu.regs; |
| 143 | 144 | ||
| 144 | if (!gpu.dirty_flags.vertex_attrib_format) { | 145 | if (!gpu.dirty_flags.vertex_attrib_format) { |
| @@ -207,7 +208,7 @@ GLuint RasterizerOpenGL::SetupVertexFormat() { | |||
| 207 | } | 208 | } |
| 208 | 209 | ||
| 209 | void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) { | 210 | void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) { |
| 210 | auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); | 211 | auto& gpu = system.GPU().Maxwell3D(); |
| 211 | const auto& regs = gpu.regs; | 212 | const auto& regs = gpu.regs; |
| 212 | 213 | ||
| 213 | if (gpu.dirty_flags.vertex_array.none()) | 214 | if (gpu.dirty_flags.vertex_array.none()) |
| @@ -224,8 +225,8 @@ void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) { | |||
| 224 | if (!vertex_array.IsEnabled()) | 225 | if (!vertex_array.IsEnabled()) |
| 225 | continue; | 226 | continue; |
| 226 | 227 | ||
| 227 | const Tegra::GPUVAddr start = vertex_array.StartAddress(); | 228 | const GPUVAddr start = vertex_array.StartAddress(); |
| 228 | const Tegra::GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); | 229 | const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); |
| 229 | 230 | ||
| 230 | ASSERT(end > start); | 231 | ASSERT(end > start); |
| 231 | const u64 size = end - start + 1; | 232 | const u64 size = end - start + 1; |
| @@ -248,7 +249,7 @@ void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) { | |||
| 248 | } | 249 | } |
| 249 | 250 | ||
| 250 | DrawParameters RasterizerOpenGL::SetupDraw() { | 251 | DrawParameters RasterizerOpenGL::SetupDraw() { |
| 251 | const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); | 252 | const auto& gpu = system.GPU().Maxwell3D(); |
| 252 | const auto& regs = gpu.regs; | 253 | const auto& regs = gpu.regs; |
| 253 | const bool is_indexed = accelerate_draw == AccelDraw::Indexed; | 254 | const bool is_indexed = accelerate_draw == AccelDraw::Indexed; |
| 254 | 255 | ||
| @@ -297,7 +298,7 @@ DrawParameters RasterizerOpenGL::SetupDraw() { | |||
| 297 | 298 | ||
| 298 | void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { | 299 | void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { |
| 299 | MICROPROFILE_SCOPE(OpenGL_Shader); | 300 | MICROPROFILE_SCOPE(OpenGL_Shader); |
| 300 | auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); | 301 | auto& gpu = system.GPU().Maxwell3D(); |
| 301 | 302 | ||
| 302 | BaseBindings base_bindings; | 303 | BaseBindings base_bindings; |
| 303 | std::array<bool, Maxwell::NumClipDistances> clip_distances{}; | 304 | std::array<bool, Maxwell::NumClipDistances> clip_distances{}; |
| @@ -413,15 +414,15 @@ void RasterizerOpenGL::SetupCachedFramebuffer(const FramebufferCacheKey& fbkey, | |||
| 413 | } | 414 | } |
| 414 | 415 | ||
| 415 | std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const { | 416 | std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const { |
| 416 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 417 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 417 | 418 | ||
| 418 | std::size_t size = 0; | 419 | std::size_t size = 0; |
| 419 | for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { | 420 | for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { |
| 420 | if (!regs.vertex_array[index].IsEnabled()) | 421 | if (!regs.vertex_array[index].IsEnabled()) |
| 421 | continue; | 422 | continue; |
| 422 | 423 | ||
| 423 | const Tegra::GPUVAddr start = regs.vertex_array[index].StartAddress(); | 424 | const GPUVAddr start = regs.vertex_array[index].StartAddress(); |
| 424 | const Tegra::GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); | 425 | const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); |
| 425 | 426 | ||
| 426 | ASSERT(end > start); | 427 | ASSERT(end > start); |
| 427 | size += end - start + 1; | 428 | size += end - start + 1; |
| @@ -431,7 +432,7 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const { | |||
| 431 | } | 432 | } |
| 432 | 433 | ||
| 433 | std::size_t RasterizerOpenGL::CalculateIndexBufferSize() const { | 434 | std::size_t RasterizerOpenGL::CalculateIndexBufferSize() const { |
| 434 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 435 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 435 | 436 | ||
| 436 | return static_cast<std::size_t>(regs.index_array.count) * | 437 | return static_cast<std::size_t>(regs.index_array.count) * |
| 437 | static_cast<std::size_t>(regs.index_array.FormatSizeInBytes()); | 438 | static_cast<std::size_t>(regs.index_array.FormatSizeInBytes()); |
| @@ -448,7 +449,7 @@ static constexpr auto RangeFromInterval(Map& map, const Interval& interval) { | |||
| 448 | return boost::make_iterator_range(map.equal_range(interval)); | 449 | return boost::make_iterator_range(map.equal_range(interval)); |
| 449 | } | 450 | } |
| 450 | 451 | ||
| 451 | void RasterizerOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) { | 452 | void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { |
| 452 | const u64 page_start{addr >> Memory::PAGE_BITS}; | 453 | const u64 page_start{addr >> Memory::PAGE_BITS}; |
| 453 | const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS}; | 454 | const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS}; |
| 454 | 455 | ||
| @@ -487,7 +488,7 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers( | |||
| 487 | OpenGLState& current_state, bool using_color_fb, bool using_depth_fb, bool preserve_contents, | 488 | OpenGLState& current_state, bool using_color_fb, bool using_depth_fb, bool preserve_contents, |
| 488 | std::optional<std::size_t> single_color_target) { | 489 | std::optional<std::size_t> single_color_target) { |
| 489 | MICROPROFILE_SCOPE(OpenGL_Framebuffer); | 490 | MICROPROFILE_SCOPE(OpenGL_Framebuffer); |
| 490 | auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); | 491 | auto& gpu = system.GPU().Maxwell3D(); |
| 491 | const auto& regs = gpu.regs; | 492 | const auto& regs = gpu.regs; |
| 492 | 493 | ||
| 493 | const FramebufferConfigState fb_config_state{using_color_fb, using_depth_fb, preserve_contents, | 494 | const FramebufferConfigState fb_config_state{using_color_fb, using_depth_fb, preserve_contents, |
| @@ -581,7 +582,7 @@ void RasterizerOpenGL::Clear() { | |||
| 581 | const auto prev_state{state}; | 582 | const auto prev_state{state}; |
| 582 | SCOPE_EXIT({ prev_state.Apply(); }); | 583 | SCOPE_EXIT({ prev_state.Apply(); }); |
| 583 | 584 | ||
| 584 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 585 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 585 | bool use_color{}; | 586 | bool use_color{}; |
| 586 | bool use_depth{}; | 587 | bool use_depth{}; |
| 587 | bool use_stencil{}; | 588 | bool use_stencil{}; |
| @@ -672,7 +673,7 @@ void RasterizerOpenGL::DrawArrays() { | |||
| 672 | return; | 673 | return; |
| 673 | 674 | ||
| 674 | MICROPROFILE_SCOPE(OpenGL_Drawing); | 675 | MICROPROFILE_SCOPE(OpenGL_Drawing); |
| 675 | auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); | 676 | auto& gpu = system.GPU().Maxwell3D(); |
| 676 | const auto& regs = gpu.regs; | 677 | const auto& regs = gpu.regs; |
| 677 | 678 | ||
| 678 | ConfigureFramebuffers(state); | 679 | ConfigureFramebuffers(state); |
| @@ -746,20 +747,26 @@ void RasterizerOpenGL::DrawArrays() { | |||
| 746 | 747 | ||
| 747 | void RasterizerOpenGL::FlushAll() {} | 748 | void RasterizerOpenGL::FlushAll() {} |
| 748 | 749 | ||
| 749 | void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { | 750 | void RasterizerOpenGL::FlushRegion(CacheAddr addr, u64 size) { |
| 750 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); | 751 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); |
| 752 | if (!addr || !size) { | ||
| 753 | return; | ||
| 754 | } | ||
| 751 | res_cache.FlushRegion(addr, size); | 755 | res_cache.FlushRegion(addr, size); |
| 752 | } | 756 | } |
| 753 | 757 | ||
| 754 | void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { | 758 | void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) { |
| 755 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); | 759 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); |
| 760 | if (!addr || !size) { | ||
| 761 | return; | ||
| 762 | } | ||
| 756 | res_cache.InvalidateRegion(addr, size); | 763 | res_cache.InvalidateRegion(addr, size); |
| 757 | shader_cache.InvalidateRegion(addr, size); | 764 | shader_cache.InvalidateRegion(addr, size); |
| 758 | global_cache.InvalidateRegion(addr, size); | 765 | global_cache.InvalidateRegion(addr, size); |
| 759 | buffer_cache.InvalidateRegion(addr, size); | 766 | buffer_cache.InvalidateRegion(addr, size); |
| 760 | } | 767 | } |
| 761 | 768 | ||
| 762 | void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) { | 769 | void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { |
| 763 | FlushRegion(addr, size); | 770 | FlushRegion(addr, size); |
| 764 | InvalidateRegion(addr, size); | 771 | InvalidateRegion(addr, size); |
| 765 | } | 772 | } |
| @@ -781,7 +788,7 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, | |||
| 781 | 788 | ||
| 782 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); | 789 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); |
| 783 | 790 | ||
| 784 | const auto& surface{res_cache.TryFindFramebufferSurface(framebuffer_addr)}; | 791 | const auto& surface{res_cache.TryFindFramebufferSurface(Memory::GetPointer(framebuffer_addr))}; |
| 785 | if (!surface) { | 792 | if (!surface) { |
| 786 | return {}; | 793 | return {}; |
| 787 | } | 794 | } |
| @@ -804,104 +811,87 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, | |||
| 804 | 811 | ||
| 805 | void RasterizerOpenGL::SamplerInfo::Create() { | 812 | void RasterizerOpenGL::SamplerInfo::Create() { |
| 806 | sampler.Create(); | 813 | sampler.Create(); |
| 807 | mag_filter = min_filter = Tegra::Texture::TextureFilter::Linear; | 814 | mag_filter = Tegra::Texture::TextureFilter::Linear; |
| 808 | wrap_u = wrap_v = wrap_p = Tegra::Texture::WrapMode::Wrap; | 815 | min_filter = Tegra::Texture::TextureFilter::Linear; |
| 809 | uses_depth_compare = false; | 816 | wrap_u = Tegra::Texture::WrapMode::Wrap; |
| 817 | wrap_v = Tegra::Texture::WrapMode::Wrap; | ||
| 818 | wrap_p = Tegra::Texture::WrapMode::Wrap; | ||
| 819 | use_depth_compare = false; | ||
| 810 | depth_compare_func = Tegra::Texture::DepthCompareFunc::Never; | 820 | depth_compare_func = Tegra::Texture::DepthCompareFunc::Never; |
| 811 | 821 | ||
| 812 | // default is GL_LINEAR_MIPMAP_LINEAR | 822 | // OpenGL's default is GL_LINEAR_MIPMAP_LINEAR |
| 813 | glSamplerParameteri(sampler.handle, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | 823 | glSamplerParameteri(sampler.handle, GL_TEXTURE_MIN_FILTER, GL_LINEAR); |
| 814 | // Other attributes have correct defaults | ||
| 815 | glSamplerParameteri(sampler.handle, GL_TEXTURE_COMPARE_FUNC, GL_NEVER); | 824 | glSamplerParameteri(sampler.handle, GL_TEXTURE_COMPARE_FUNC, GL_NEVER); |
| 825 | |||
| 826 | // Other attributes have correct defaults | ||
| 816 | } | 827 | } |
| 817 | 828 | ||
| 818 | void RasterizerOpenGL::SamplerInfo::SyncWithConfig(const Tegra::Texture::TSCEntry& config) { | 829 | void RasterizerOpenGL::SamplerInfo::SyncWithConfig(const Tegra::Texture::TSCEntry& config) { |
| 819 | const GLuint s = sampler.handle; | 830 | const GLuint sampler_id = sampler.handle; |
| 820 | if (mag_filter != config.mag_filter) { | 831 | if (mag_filter != config.mag_filter) { |
| 821 | mag_filter = config.mag_filter; | 832 | mag_filter = config.mag_filter; |
| 822 | glSamplerParameteri( | 833 | glSamplerParameteri( |
| 823 | s, GL_TEXTURE_MAG_FILTER, | 834 | sampler_id, GL_TEXTURE_MAG_FILTER, |
| 824 | MaxwellToGL::TextureFilterMode(mag_filter, Tegra::Texture::TextureMipmapFilter::None)); | 835 | MaxwellToGL::TextureFilterMode(mag_filter, Tegra::Texture::TextureMipmapFilter::None)); |
| 825 | } | 836 | } |
| 826 | if (min_filter != config.min_filter || mip_filter != config.mip_filter) { | 837 | if (min_filter != config.min_filter || mipmap_filter != config.mipmap_filter) { |
| 827 | min_filter = config.min_filter; | 838 | min_filter = config.min_filter; |
| 828 | mip_filter = config.mip_filter; | 839 | mipmap_filter = config.mipmap_filter; |
| 829 | glSamplerParameteri(s, GL_TEXTURE_MIN_FILTER, | 840 | glSamplerParameteri(sampler_id, GL_TEXTURE_MIN_FILTER, |
| 830 | MaxwellToGL::TextureFilterMode(min_filter, mip_filter)); | 841 | MaxwellToGL::TextureFilterMode(min_filter, mipmap_filter)); |
| 831 | } | 842 | } |
| 832 | 843 | ||
| 833 | if (wrap_u != config.wrap_u) { | 844 | if (wrap_u != config.wrap_u) { |
| 834 | wrap_u = config.wrap_u; | 845 | wrap_u = config.wrap_u; |
| 835 | glSamplerParameteri(s, GL_TEXTURE_WRAP_S, MaxwellToGL::WrapMode(wrap_u)); | 846 | glSamplerParameteri(sampler_id, GL_TEXTURE_WRAP_S, MaxwellToGL::WrapMode(wrap_u)); |
| 836 | } | 847 | } |
| 837 | if (wrap_v != config.wrap_v) { | 848 | if (wrap_v != config.wrap_v) { |
| 838 | wrap_v = config.wrap_v; | 849 | wrap_v = config.wrap_v; |
| 839 | glSamplerParameteri(s, GL_TEXTURE_WRAP_T, MaxwellToGL::WrapMode(wrap_v)); | 850 | glSamplerParameteri(sampler_id, GL_TEXTURE_WRAP_T, MaxwellToGL::WrapMode(wrap_v)); |
| 840 | } | 851 | } |
| 841 | if (wrap_p != config.wrap_p) { | 852 | if (wrap_p != config.wrap_p) { |
| 842 | wrap_p = config.wrap_p; | 853 | wrap_p = config.wrap_p; |
| 843 | glSamplerParameteri(s, GL_TEXTURE_WRAP_R, MaxwellToGL::WrapMode(wrap_p)); | 854 | glSamplerParameteri(sampler_id, GL_TEXTURE_WRAP_R, MaxwellToGL::WrapMode(wrap_p)); |
| 844 | } | 855 | } |
| 845 | 856 | ||
| 846 | if (uses_depth_compare != (config.depth_compare_enabled == 1)) { | 857 | if (const bool enabled = config.depth_compare_enabled == 1; use_depth_compare != enabled) { |
| 847 | uses_depth_compare = (config.depth_compare_enabled == 1); | 858 | use_depth_compare = enabled; |
| 848 | if (uses_depth_compare) { | 859 | glSamplerParameteri(sampler_id, GL_TEXTURE_COMPARE_MODE, |
| 849 | glSamplerParameteri(s, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE); | 860 | use_depth_compare ? GL_COMPARE_REF_TO_TEXTURE : GL_NONE); |
| 850 | } else { | ||
| 851 | glSamplerParameteri(s, GL_TEXTURE_COMPARE_MODE, GL_NONE); | ||
| 852 | } | ||
| 853 | } | 861 | } |
| 854 | 862 | ||
| 855 | if (depth_compare_func != config.depth_compare_func) { | 863 | if (depth_compare_func != config.depth_compare_func) { |
| 856 | depth_compare_func = config.depth_compare_func; | 864 | depth_compare_func = config.depth_compare_func; |
| 857 | glSamplerParameteri(s, GL_TEXTURE_COMPARE_FUNC, | 865 | glSamplerParameteri(sampler_id, GL_TEXTURE_COMPARE_FUNC, |
| 858 | MaxwellToGL::DepthCompareFunc(depth_compare_func)); | 866 | MaxwellToGL::DepthCompareFunc(depth_compare_func)); |
| 859 | } | 867 | } |
| 860 | 868 | ||
| 861 | GLvec4 new_border_color; | 869 | if (const auto new_border_color = config.GetBorderColor(); border_color != new_border_color) { |
| 862 | if (config.srgb_conversion) { | ||
| 863 | new_border_color[0] = config.srgb_border_color_r / 255.0f; | ||
| 864 | new_border_color[1] = config.srgb_border_color_g / 255.0f; | ||
| 865 | new_border_color[2] = config.srgb_border_color_g / 255.0f; | ||
| 866 | } else { | ||
| 867 | new_border_color[0] = config.border_color_r; | ||
| 868 | new_border_color[1] = config.border_color_g; | ||
| 869 | new_border_color[2] = config.border_color_b; | ||
| 870 | } | ||
| 871 | new_border_color[3] = config.border_color_a; | ||
| 872 | |||
| 873 | if (border_color != new_border_color) { | ||
| 874 | border_color = new_border_color; | 870 | border_color = new_border_color; |
| 875 | glSamplerParameterfv(s, GL_TEXTURE_BORDER_COLOR, border_color.data()); | 871 | glSamplerParameterfv(sampler_id, GL_TEXTURE_BORDER_COLOR, border_color.data()); |
| 876 | } | 872 | } |
| 877 | 873 | ||
| 878 | const float anisotropic_max = static_cast<float>(1 << config.max_anisotropy.Value()); | 874 | if (const float anisotropic = config.GetMaxAnisotropy(); max_anisotropic != anisotropic) { |
| 879 | if (anisotropic_max != max_anisotropic) { | 875 | max_anisotropic = anisotropic; |
| 880 | max_anisotropic = anisotropic_max; | ||
| 881 | if (GLAD_GL_ARB_texture_filter_anisotropic) { | 876 | if (GLAD_GL_ARB_texture_filter_anisotropic) { |
| 882 | glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY, max_anisotropic); | 877 | glSamplerParameterf(sampler_id, GL_TEXTURE_MAX_ANISOTROPY, max_anisotropic); |
| 883 | } else if (GLAD_GL_EXT_texture_filter_anisotropic) { | 878 | } else if (GLAD_GL_EXT_texture_filter_anisotropic) { |
| 884 | glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_anisotropic); | 879 | glSamplerParameterf(sampler_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_anisotropic); |
| 885 | } | 880 | } |
| 886 | } | 881 | } |
| 887 | const float lod_min = static_cast<float>(config.min_lod_clamp.Value()) / 256.0f; | ||
| 888 | if (lod_min != min_lod) { | ||
| 889 | min_lod = lod_min; | ||
| 890 | glSamplerParameterf(s, GL_TEXTURE_MIN_LOD, min_lod); | ||
| 891 | } | ||
| 892 | 882 | ||
| 893 | const float lod_max = static_cast<float>(config.max_lod_clamp.Value()) / 256.0f; | 883 | if (const float min = config.GetMinLod(); min_lod != min) { |
| 894 | if (lod_max != max_lod) { | 884 | min_lod = min; |
| 895 | max_lod = lod_max; | 885 | glSamplerParameterf(sampler_id, GL_TEXTURE_MIN_LOD, min_lod); |
| 896 | glSamplerParameterf(s, GL_TEXTURE_MAX_LOD, max_lod); | 886 | } |
| 887 | if (const float max = config.GetMaxLod(); max_lod != max) { | ||
| 888 | max_lod = max; | ||
| 889 | glSamplerParameterf(sampler_id, GL_TEXTURE_MAX_LOD, max_lod); | ||
| 897 | } | 890 | } |
| 898 | const u32 bias = config.mip_lod_bias.Value(); | 891 | |
| 899 | // Sign extend the 13-bit value. | 892 | if (const float bias = config.GetLodBias(); lod_bias != bias) { |
| 900 | constexpr u32 mask = 1U << (13 - 1); | 893 | lod_bias = bias; |
| 901 | const float bias_lod = static_cast<s32>((bias ^ mask) - mask) / 256.f; | 894 | glSamplerParameterf(sampler_id, GL_TEXTURE_LOD_BIAS, lod_bias); |
| 902 | if (lod_bias != bias_lod) { | ||
| 903 | lod_bias = bias_lod; | ||
| 904 | glSamplerParameterf(s, GL_TEXTURE_LOD_BIAS, lod_bias); | ||
| 905 | } | 895 | } |
| 906 | } | 896 | } |
| 907 | 897 | ||
| @@ -909,7 +899,7 @@ void RasterizerOpenGL::SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::Shader | |||
| 909 | const Shader& shader, GLuint program_handle, | 899 | const Shader& shader, GLuint program_handle, |
| 910 | BaseBindings base_bindings) { | 900 | BaseBindings base_bindings) { |
| 911 | MICROPROFILE_SCOPE(OpenGL_UBO); | 901 | MICROPROFILE_SCOPE(OpenGL_UBO); |
| 912 | const auto& gpu = Core::System::GetInstance().GPU(); | 902 | const auto& gpu = system.GPU(); |
| 913 | const auto& maxwell3d = gpu.Maxwell3D(); | 903 | const auto& maxwell3d = gpu.Maxwell3D(); |
| 914 | const auto& shader_stage = maxwell3d.state.shader_stages[static_cast<std::size_t>(stage)]; | 904 | const auto& shader_stage = maxwell3d.state.shader_stages[static_cast<std::size_t>(stage)]; |
| 915 | const auto& entries = shader->GetShaderEntries().const_buffers; | 905 | const auto& entries = shader->GetShaderEntries().const_buffers; |
| @@ -988,7 +978,7 @@ void RasterizerOpenGL::SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::Shade | |||
| 988 | void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader, | 978 | void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader, |
| 989 | GLuint program_handle, BaseBindings base_bindings) { | 979 | GLuint program_handle, BaseBindings base_bindings) { |
| 990 | MICROPROFILE_SCOPE(OpenGL_Texture); | 980 | MICROPROFILE_SCOPE(OpenGL_Texture); |
| 991 | const auto& gpu = Core::System::GetInstance().GPU(); | 981 | const auto& gpu = system.GPU(); |
| 992 | const auto& maxwell3d = gpu.Maxwell3D(); | 982 | const auto& maxwell3d = gpu.Maxwell3D(); |
| 993 | const auto& entries = shader->GetShaderEntries().samplers; | 983 | const auto& entries = shader->GetShaderEntries().samplers; |
| 994 | 984 | ||
| @@ -1002,10 +992,9 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s | |||
| 1002 | 992 | ||
| 1003 | texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc); | 993 | texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc); |
| 1004 | 994 | ||
| 1005 | Surface surface = res_cache.GetTextureSurface(texture, entry); | 995 | if (Surface surface = res_cache.GetTextureSurface(texture, entry); surface) { |
| 1006 | if (surface != nullptr) { | ||
| 1007 | state.texture_units[current_bindpoint].texture = | 996 | state.texture_units[current_bindpoint].texture = |
| 1008 | entry.IsArray() ? surface->TextureLayer().handle : surface->Texture().handle; | 997 | surface->Texture(entry.IsArray()).handle; |
| 1009 | surface->UpdateSwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source, | 998 | surface->UpdateSwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source, |
| 1010 | texture.tic.w_source); | 999 | texture.tic.w_source); |
| 1011 | } else { | 1000 | } else { |
| @@ -1016,7 +1005,7 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s | |||
| 1016 | } | 1005 | } |
| 1017 | 1006 | ||
| 1018 | void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { | 1007 | void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { |
| 1019 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1008 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1020 | const bool geometry_shaders_enabled = | 1009 | const bool geometry_shaders_enabled = |
| 1021 | regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry)); | 1010 | regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry)); |
| 1022 | const std::size_t viewport_count = | 1011 | const std::size_t viewport_count = |
| @@ -1039,7 +1028,7 @@ void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { | |||
| 1039 | void RasterizerOpenGL::SyncClipEnabled( | 1028 | void RasterizerOpenGL::SyncClipEnabled( |
| 1040 | const std::array<bool, Maxwell::Regs::NumClipDistances>& clip_mask) { | 1029 | const std::array<bool, Maxwell::Regs::NumClipDistances>& clip_mask) { |
| 1041 | 1030 | ||
| 1042 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1031 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1043 | const std::array<bool, Maxwell::Regs::NumClipDistances> reg_state{ | 1032 | const std::array<bool, Maxwell::Regs::NumClipDistances> reg_state{ |
| 1044 | regs.clip_distance_enabled.c0 != 0, regs.clip_distance_enabled.c1 != 0, | 1033 | regs.clip_distance_enabled.c0 != 0, regs.clip_distance_enabled.c1 != 0, |
| 1045 | regs.clip_distance_enabled.c2 != 0, regs.clip_distance_enabled.c3 != 0, | 1034 | regs.clip_distance_enabled.c2 != 0, regs.clip_distance_enabled.c3 != 0, |
| @@ -1056,7 +1045,7 @@ void RasterizerOpenGL::SyncClipCoef() { | |||
| 1056 | } | 1045 | } |
| 1057 | 1046 | ||
| 1058 | void RasterizerOpenGL::SyncCullMode() { | 1047 | void RasterizerOpenGL::SyncCullMode() { |
| 1059 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1048 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1060 | 1049 | ||
| 1061 | state.cull.enabled = regs.cull.enabled != 0; | 1050 | state.cull.enabled = regs.cull.enabled != 0; |
| 1062 | 1051 | ||
| @@ -1080,14 +1069,14 @@ void RasterizerOpenGL::SyncCullMode() { | |||
| 1080 | } | 1069 | } |
| 1081 | 1070 | ||
| 1082 | void RasterizerOpenGL::SyncPrimitiveRestart() { | 1071 | void RasterizerOpenGL::SyncPrimitiveRestart() { |
| 1083 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1072 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1084 | 1073 | ||
| 1085 | state.primitive_restart.enabled = regs.primitive_restart.enabled; | 1074 | state.primitive_restart.enabled = regs.primitive_restart.enabled; |
| 1086 | state.primitive_restart.index = regs.primitive_restart.index; | 1075 | state.primitive_restart.index = regs.primitive_restart.index; |
| 1087 | } | 1076 | } |
| 1088 | 1077 | ||
| 1089 | void RasterizerOpenGL::SyncDepthTestState() { | 1078 | void RasterizerOpenGL::SyncDepthTestState() { |
| 1090 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1079 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1091 | 1080 | ||
| 1092 | state.depth.test_enabled = regs.depth_test_enable != 0; | 1081 | state.depth.test_enabled = regs.depth_test_enable != 0; |
| 1093 | state.depth.write_mask = regs.depth_write_enabled ? GL_TRUE : GL_FALSE; | 1082 | state.depth.write_mask = regs.depth_write_enabled ? GL_TRUE : GL_FALSE; |
| @@ -1099,7 +1088,7 @@ void RasterizerOpenGL::SyncDepthTestState() { | |||
| 1099 | } | 1088 | } |
| 1100 | 1089 | ||
| 1101 | void RasterizerOpenGL::SyncStencilTestState() { | 1090 | void RasterizerOpenGL::SyncStencilTestState() { |
| 1102 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1091 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1103 | state.stencil.test_enabled = regs.stencil_enable != 0; | 1092 | state.stencil.test_enabled = regs.stencil_enable != 0; |
| 1104 | 1093 | ||
| 1105 | if (!regs.stencil_enable) { | 1094 | if (!regs.stencil_enable) { |
| @@ -1133,7 +1122,7 @@ void RasterizerOpenGL::SyncStencilTestState() { | |||
| 1133 | } | 1122 | } |
| 1134 | 1123 | ||
| 1135 | void RasterizerOpenGL::SyncColorMask() { | 1124 | void RasterizerOpenGL::SyncColorMask() { |
| 1136 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1125 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1137 | const std::size_t count = | 1126 | const std::size_t count = |
| 1138 | regs.independent_blend_enable ? Tegra::Engines::Maxwell3D::Regs::NumRenderTargets : 1; | 1127 | regs.independent_blend_enable ? Tegra::Engines::Maxwell3D::Regs::NumRenderTargets : 1; |
| 1139 | for (std::size_t i = 0; i < count; i++) { | 1128 | for (std::size_t i = 0; i < count; i++) { |
| @@ -1147,18 +1136,18 @@ void RasterizerOpenGL::SyncColorMask() { | |||
| 1147 | } | 1136 | } |
| 1148 | 1137 | ||
| 1149 | void RasterizerOpenGL::SyncMultiSampleState() { | 1138 | void RasterizerOpenGL::SyncMultiSampleState() { |
| 1150 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1139 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1151 | state.multisample_control.alpha_to_coverage = regs.multisample_control.alpha_to_coverage != 0; | 1140 | state.multisample_control.alpha_to_coverage = regs.multisample_control.alpha_to_coverage != 0; |
| 1152 | state.multisample_control.alpha_to_one = regs.multisample_control.alpha_to_one != 0; | 1141 | state.multisample_control.alpha_to_one = regs.multisample_control.alpha_to_one != 0; |
| 1153 | } | 1142 | } |
| 1154 | 1143 | ||
| 1155 | void RasterizerOpenGL::SyncFragmentColorClampState() { | 1144 | void RasterizerOpenGL::SyncFragmentColorClampState() { |
| 1156 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1145 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1157 | state.fragment_color_clamp.enabled = regs.frag_color_clamp != 0; | 1146 | state.fragment_color_clamp.enabled = regs.frag_color_clamp != 0; |
| 1158 | } | 1147 | } |
| 1159 | 1148 | ||
| 1160 | void RasterizerOpenGL::SyncBlendState() { | 1149 | void RasterizerOpenGL::SyncBlendState() { |
| 1161 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1150 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1162 | 1151 | ||
| 1163 | state.blend_color.red = regs.blend_color.r; | 1152 | state.blend_color.red = regs.blend_color.r; |
| 1164 | state.blend_color.green = regs.blend_color.g; | 1153 | state.blend_color.green = regs.blend_color.g; |
| @@ -1200,7 +1189,7 @@ void RasterizerOpenGL::SyncBlendState() { | |||
| 1200 | } | 1189 | } |
| 1201 | 1190 | ||
| 1202 | void RasterizerOpenGL::SyncLogicOpState() { | 1191 | void RasterizerOpenGL::SyncLogicOpState() { |
| 1203 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1192 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1204 | 1193 | ||
| 1205 | state.logic_op.enabled = regs.logic_op.enable != 0; | 1194 | state.logic_op.enabled = regs.logic_op.enable != 0; |
| 1206 | 1195 | ||
| @@ -1214,7 +1203,7 @@ void RasterizerOpenGL::SyncLogicOpState() { | |||
| 1214 | } | 1203 | } |
| 1215 | 1204 | ||
| 1216 | void RasterizerOpenGL::SyncScissorTest(OpenGLState& current_state) { | 1205 | void RasterizerOpenGL::SyncScissorTest(OpenGLState& current_state) { |
| 1217 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1206 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1218 | const bool geometry_shaders_enabled = | 1207 | const bool geometry_shaders_enabled = |
| 1219 | regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry)); | 1208 | regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry)); |
| 1220 | const std::size_t viewport_count = | 1209 | const std::size_t viewport_count = |
| @@ -1236,17 +1225,17 @@ void RasterizerOpenGL::SyncScissorTest(OpenGLState& current_state) { | |||
| 1236 | } | 1225 | } |
| 1237 | 1226 | ||
| 1238 | void RasterizerOpenGL::SyncTransformFeedback() { | 1227 | void RasterizerOpenGL::SyncTransformFeedback() { |
| 1239 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1228 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1240 | UNIMPLEMENTED_IF_MSG(regs.tfb_enabled != 0, "Transform feedbacks are not implemented"); | 1229 | UNIMPLEMENTED_IF_MSG(regs.tfb_enabled != 0, "Transform feedbacks are not implemented"); |
| 1241 | } | 1230 | } |
| 1242 | 1231 | ||
| 1243 | void RasterizerOpenGL::SyncPointState() { | 1232 | void RasterizerOpenGL::SyncPointState() { |
| 1244 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1233 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1245 | state.point.size = regs.point_size; | 1234 | state.point.size = regs.point_size; |
| 1246 | } | 1235 | } |
| 1247 | 1236 | ||
| 1248 | void RasterizerOpenGL::SyncPolygonOffset() { | 1237 | void RasterizerOpenGL::SyncPolygonOffset() { |
| 1249 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1238 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1250 | state.polygon_offset.fill_enable = regs.polygon_offset_fill_enable != 0; | 1239 | state.polygon_offset.fill_enable = regs.polygon_offset_fill_enable != 0; |
| 1251 | state.polygon_offset.line_enable = regs.polygon_offset_line_enable != 0; | 1240 | state.polygon_offset.line_enable = regs.polygon_offset_line_enable != 0; |
| 1252 | state.polygon_offset.point_enable = regs.polygon_offset_point_enable != 0; | 1241 | state.polygon_offset.point_enable = regs.polygon_offset_point_enable != 0; |
| @@ -1256,7 +1245,7 @@ void RasterizerOpenGL::SyncPolygonOffset() { | |||
| 1256 | } | 1245 | } |
| 1257 | 1246 | ||
| 1258 | void RasterizerOpenGL::CheckAlphaTests() { | 1247 | void RasterizerOpenGL::CheckAlphaTests() { |
| 1259 | const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; | 1248 | const auto& regs = system.GPU().Maxwell3D().regs; |
| 1260 | UNIMPLEMENTED_IF_MSG(regs.alpha_test_enabled != 0 && regs.rt_control.count > 1, | 1249 | UNIMPLEMENTED_IF_MSG(regs.alpha_test_enabled != 0 && regs.rt_control.count > 1, |
| 1261 | "Alpha Testing is enabled with more than one rendertarget"); | 1250 | "Alpha Testing is enabled with more than one rendertarget"); |
| 1262 | } | 1251 | } |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 2f0524f85..30f3e8acb 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h | |||
| @@ -57,9 +57,9 @@ public: | |||
| 57 | void DrawArrays() override; | 57 | void DrawArrays() override; |
| 58 | void Clear() override; | 58 | void Clear() override; |
| 59 | void FlushAll() override; | 59 | void FlushAll() override; |
| 60 | void FlushRegion(VAddr addr, u64 size) override; | 60 | void FlushRegion(CacheAddr addr, u64 size) override; |
| 61 | void InvalidateRegion(VAddr addr, u64 size) override; | 61 | void InvalidateRegion(CacheAddr addr, u64 size) override; |
| 62 | void FlushAndInvalidateRegion(VAddr addr, u64 size) override; | 62 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; |
| 63 | bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, | 63 | bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, |
| 64 | const Tegra::Engines::Fermi2D::Regs::Surface& dst, | 64 | const Tegra::Engines::Fermi2D::Regs::Surface& dst, |
| 65 | const Common::Rectangle<u32>& src_rect, | 65 | const Common::Rectangle<u32>& src_rect, |
| @@ -67,7 +67,7 @@ public: | |||
| 67 | bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, | 67 | bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, |
| 68 | u32 pixel_stride) override; | 68 | u32 pixel_stride) override; |
| 69 | bool AccelerateDrawBatch(bool is_indexed) override; | 69 | bool AccelerateDrawBatch(bool is_indexed) override; |
| 70 | void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) override; | 70 | void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override; |
| 71 | void LoadDiskResources(const std::atomic_bool& stop_loading, | 71 | void LoadDiskResources(const std::atomic_bool& stop_loading, |
| 72 | const VideoCore::DiskResourceLoadCallback& callback) override; | 72 | const VideoCore::DiskResourceLoadCallback& callback) override; |
| 73 | 73 | ||
| @@ -94,11 +94,12 @@ private: | |||
| 94 | private: | 94 | private: |
| 95 | Tegra::Texture::TextureFilter mag_filter = Tegra::Texture::TextureFilter::Nearest; | 95 | Tegra::Texture::TextureFilter mag_filter = Tegra::Texture::TextureFilter::Nearest; |
| 96 | Tegra::Texture::TextureFilter min_filter = Tegra::Texture::TextureFilter::Nearest; | 96 | Tegra::Texture::TextureFilter min_filter = Tegra::Texture::TextureFilter::Nearest; |
| 97 | Tegra::Texture::TextureMipmapFilter mip_filter = Tegra::Texture::TextureMipmapFilter::None; | 97 | Tegra::Texture::TextureMipmapFilter mipmap_filter = |
| 98 | Tegra::Texture::TextureMipmapFilter::None; | ||
| 98 | Tegra::Texture::WrapMode wrap_u = Tegra::Texture::WrapMode::ClampToEdge; | 99 | Tegra::Texture::WrapMode wrap_u = Tegra::Texture::WrapMode::ClampToEdge; |
| 99 | Tegra::Texture::WrapMode wrap_v = Tegra::Texture::WrapMode::ClampToEdge; | 100 | Tegra::Texture::WrapMode wrap_v = Tegra::Texture::WrapMode::ClampToEdge; |
| 100 | Tegra::Texture::WrapMode wrap_p = Tegra::Texture::WrapMode::ClampToEdge; | 101 | Tegra::Texture::WrapMode wrap_p = Tegra::Texture::WrapMode::ClampToEdge; |
| 101 | bool uses_depth_compare = false; | 102 | bool use_depth_compare = false; |
| 102 | Tegra::Texture::DepthCompareFunc depth_compare_func = | 103 | Tegra::Texture::DepthCompareFunc depth_compare_func = |
| 103 | Tegra::Texture::DepthCompareFunc::Always; | 104 | Tegra::Texture::DepthCompareFunc::Always; |
| 104 | GLvec4 border_color = {}; | 105 | GLvec4 border_color = {}; |
| @@ -214,6 +215,7 @@ private: | |||
| 214 | GlobalRegionCacheOpenGL global_cache; | 215 | GlobalRegionCacheOpenGL global_cache; |
| 215 | 216 | ||
| 216 | Core::Frontend::EmuWindow& emu_window; | 217 | Core::Frontend::EmuWindow& emu_window; |
| 218 | Core::System& system; | ||
| 217 | 219 | ||
| 218 | ScreenInfo& screen_info; | 220 | ScreenInfo& screen_info; |
| 219 | 221 | ||
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 876698b37..0235317c0 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp | |||
| @@ -55,12 +55,11 @@ static void ApplyTextureDefaults(GLuint texture, u32 max_mip_level) { | |||
| 55 | } | 55 | } |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr_) { | 58 | void SurfaceParams::InitCacheParameters(GPUVAddr gpu_addr_) { |
| 59 | auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; | 59 | auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; |
| 60 | const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr_)}; | ||
| 61 | 60 | ||
| 62 | addr = cpu_addr ? *cpu_addr : 0; | ||
| 63 | gpu_addr = gpu_addr_; | 61 | gpu_addr = gpu_addr_; |
| 62 | host_ptr = memory_manager.GetPointer(gpu_addr_); | ||
| 64 | size_in_bytes = SizeInBytesRaw(); | 63 | size_in_bytes = SizeInBytesRaw(); |
| 65 | 64 | ||
| 66 | if (IsPixelFormatASTC(pixel_format)) { | 65 | if (IsPixelFormatASTC(pixel_format)) { |
| @@ -223,7 +222,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only, | |||
| 223 | } | 222 | } |
| 224 | 223 | ||
| 225 | /*static*/ SurfaceParams SurfaceParams::CreateForDepthBuffer( | 224 | /*static*/ SurfaceParams SurfaceParams::CreateForDepthBuffer( |
| 226 | u32 zeta_width, u32 zeta_height, Tegra::GPUVAddr zeta_address, Tegra::DepthFormat format, | 225 | u32 zeta_width, u32 zeta_height, GPUVAddr zeta_address, Tegra::DepthFormat format, |
| 227 | u32 block_width, u32 block_height, u32 block_depth, | 226 | u32 block_width, u32 block_height, u32 block_depth, |
| 228 | Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) { | 227 | Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) { |
| 229 | SurfaceParams params{}; | 228 | SurfaceParams params{}; |
| @@ -400,6 +399,27 @@ static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType | |||
| 400 | return format; | 399 | return format; |
| 401 | } | 400 | } |
| 402 | 401 | ||
| 402 | /// Returns the discrepant array target | ||
| 403 | constexpr GLenum GetArrayDiscrepantTarget(SurfaceTarget target) { | ||
| 404 | switch (target) { | ||
| 405 | case SurfaceTarget::Texture1D: | ||
| 406 | return GL_TEXTURE_1D_ARRAY; | ||
| 407 | case SurfaceTarget::Texture2D: | ||
| 408 | return GL_TEXTURE_2D_ARRAY; | ||
| 409 | case SurfaceTarget::Texture3D: | ||
| 410 | return GL_NONE; | ||
| 411 | case SurfaceTarget::Texture1DArray: | ||
| 412 | return GL_TEXTURE_1D; | ||
| 413 | case SurfaceTarget::Texture2DArray: | ||
| 414 | return GL_TEXTURE_2D; | ||
| 415 | case SurfaceTarget::TextureCubemap: | ||
| 416 | return GL_TEXTURE_CUBE_MAP_ARRAY; | ||
| 417 | case SurfaceTarget::TextureCubeArray: | ||
| 418 | return GL_TEXTURE_CUBE_MAP; | ||
| 419 | } | ||
| 420 | return GL_NONE; | ||
| 421 | } | ||
| 422 | |||
| 403 | Common::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const { | 423 | Common::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const { |
| 404 | u32 actual_height{std::max(1U, unaligned_height >> mip_level)}; | 424 | u32 actual_height{std::max(1U, unaligned_height >> mip_level)}; |
| 405 | if (IsPixelFormatASTC(pixel_format)) { | 425 | if (IsPixelFormatASTC(pixel_format)) { |
| @@ -425,7 +445,7 @@ void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params, | |||
| 425 | MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level), | 445 | MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level), |
| 426 | params.MipBlockHeight(mip_level), params.MipHeight(mip_level), | 446 | params.MipBlockHeight(mip_level), params.MipHeight(mip_level), |
| 427 | params.MipBlockDepth(mip_level), 1, params.tile_width_spacing, | 447 | params.MipBlockDepth(mip_level), 1, params.tile_width_spacing, |
| 428 | gl_buffer.data() + offset_gl, gl_size, params.addr + offset); | 448 | gl_buffer.data() + offset_gl, params.host_ptr + offset); |
| 429 | offset += layer_size; | 449 | offset += layer_size; |
| 430 | offset_gl += gl_size; | 450 | offset_gl += gl_size; |
| 431 | } | 451 | } |
| @@ -434,7 +454,7 @@ void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params, | |||
| 434 | MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level), | 454 | MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level), |
| 435 | params.MipBlockHeight(mip_level), params.MipHeight(mip_level), | 455 | params.MipBlockHeight(mip_level), params.MipHeight(mip_level), |
| 436 | params.MipBlockDepth(mip_level), depth, params.tile_width_spacing, | 456 | params.MipBlockDepth(mip_level), depth, params.tile_width_spacing, |
| 437 | gl_buffer.data(), gl_buffer.size(), params.addr + offset); | 457 | gl_buffer.data(), params.host_ptr + offset); |
| 438 | } | 458 | } |
| 439 | } | 459 | } |
| 440 | 460 | ||
| @@ -492,9 +512,9 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac | |||
| 492 | "reinterpretation but the texture is tiled."); | 512 | "reinterpretation but the texture is tiled."); |
| 493 | } | 513 | } |
| 494 | const std::size_t remaining_size = dst_params.size_in_bytes - src_params.size_in_bytes; | 514 | const std::size_t remaining_size = dst_params.size_in_bytes - src_params.size_in_bytes; |
| 495 | 515 | auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; | |
| 496 | glBufferSubData(GL_PIXEL_PACK_BUFFER, src_params.size_in_bytes, remaining_size, | 516 | glBufferSubData(GL_PIXEL_PACK_BUFFER, src_params.size_in_bytes, remaining_size, |
| 497 | Memory::GetPointer(dst_params.addr + src_params.size_in_bytes)); | 517 | memory_manager.GetPointer(dst_params.gpu_addr + src_params.size_in_bytes)); |
| 498 | } | 518 | } |
| 499 | 519 | ||
| 500 | glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); | 520 | glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); |
| @@ -542,8 +562,14 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac | |||
| 542 | } | 562 | } |
| 543 | 563 | ||
| 544 | CachedSurface::CachedSurface(const SurfaceParams& params) | 564 | CachedSurface::CachedSurface(const SurfaceParams& params) |
| 545 | : params(params), gl_target(SurfaceTargetToGL(params.target)), | 565 | : params{params}, gl_target{SurfaceTargetToGL(params.target)}, |
| 546 | cached_size_in_bytes(params.size_in_bytes) { | 566 | cached_size_in_bytes{params.size_in_bytes}, RasterizerCacheObject{params.host_ptr} { |
| 567 | |||
| 568 | const auto optional_cpu_addr{ | ||
| 569 | Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)}; | ||
| 570 | ASSERT_MSG(optional_cpu_addr, "optional_cpu_addr is invalid"); | ||
| 571 | cpu_addr = *optional_cpu_addr; | ||
| 572 | |||
| 547 | texture.Create(gl_target); | 573 | texture.Create(gl_target); |
| 548 | 574 | ||
| 549 | // TODO(Rodrigo): Using params.GetRect() returns a different size than using its Mip*(0) | 575 | // TODO(Rodrigo): Using params.GetRect() returns a different size than using its Mip*(0) |
| @@ -582,19 +608,7 @@ CachedSurface::CachedSurface(const SurfaceParams& params) | |||
| 582 | 608 | ||
| 583 | ApplyTextureDefaults(texture.handle, params.max_mip_level); | 609 | ApplyTextureDefaults(texture.handle, params.max_mip_level); |
| 584 | 610 | ||
| 585 | OpenGL::LabelGLObject(GL_TEXTURE, texture.handle, params.addr, params.IdentityString()); | 611 | OpenGL::LabelGLObject(GL_TEXTURE, texture.handle, params.gpu_addr, params.IdentityString()); |
| 586 | |||
| 587 | // Clamp size to mapped GPU memory region | ||
| 588 | // TODO(bunnei): Super Mario Odyssey maps a 0x40000 byte region and then uses it for a 0x80000 | ||
| 589 | // R32F render buffer. We do not yet know if this is a game bug or something else, but this | ||
| 590 | // check is necessary to prevent flushing from overwriting unmapped memory. | ||
| 591 | |||
| 592 | auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; | ||
| 593 | const u64 max_size{memory_manager.GetRegionEnd(params.gpu_addr) - params.gpu_addr}; | ||
| 594 | if (cached_size_in_bytes > max_size) { | ||
| 595 | LOG_ERROR(HW_GPU, "Surface size {} exceeds region size {}", params.size_in_bytes, max_size); | ||
| 596 | cached_size_in_bytes = max_size; | ||
| 597 | } | ||
| 598 | } | 612 | } |
| 599 | 613 | ||
| 600 | MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 192, 64)); | 614 | MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 192, 64)); |
| @@ -612,10 +626,9 @@ void CachedSurface::LoadGLBuffer() { | |||
| 612 | const u32 bpp = params.GetFormatBpp() / 8; | 626 | const u32 bpp = params.GetFormatBpp() / 8; |
| 613 | const u32 copy_size = params.width * bpp; | 627 | const u32 copy_size = params.width * bpp; |
| 614 | if (params.pitch == copy_size) { | 628 | if (params.pitch == copy_size) { |
| 615 | std::memcpy(gl_buffer[0].data(), Memory::GetPointer(params.addr), | 629 | std::memcpy(gl_buffer[0].data(), params.host_ptr, params.size_in_bytes_gl); |
| 616 | params.size_in_bytes_gl); | ||
| 617 | } else { | 630 | } else { |
| 618 | const u8* start = Memory::GetPointer(params.addr); | 631 | const u8* start{params.host_ptr}; |
| 619 | u8* write_to = gl_buffer[0].data(); | 632 | u8* write_to = gl_buffer[0].data(); |
| 620 | for (u32 h = params.height; h > 0; h--) { | 633 | for (u32 h = params.height; h > 0; h--) { |
| 621 | std::memcpy(write_to, start, copy_size); | 634 | std::memcpy(write_to, start, copy_size); |
| @@ -659,8 +672,6 @@ void CachedSurface::FlushGLBuffer() { | |||
| 659 | glPixelStorei(GL_PACK_ROW_LENGTH, 0); | 672 | glPixelStorei(GL_PACK_ROW_LENGTH, 0); |
| 660 | Tegra::Texture::ConvertFromHostToGuest(gl_buffer[0].data(), params.pixel_format, params.width, | 673 | Tegra::Texture::ConvertFromHostToGuest(gl_buffer[0].data(), params.pixel_format, params.width, |
| 661 | params.height, params.depth, true, true); | 674 | params.height, params.depth, true, true); |
| 662 | const u8* const texture_src_data = Memory::GetPointer(params.addr); | ||
| 663 | ASSERT(texture_src_data); | ||
| 664 | if (params.is_tiled) { | 675 | if (params.is_tiled) { |
| 665 | ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}", | 676 | ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}", |
| 666 | params.block_width, static_cast<u32>(params.target)); | 677 | params.block_width, static_cast<u32>(params.target)); |
| @@ -670,9 +681,9 @@ void CachedSurface::FlushGLBuffer() { | |||
| 670 | const u32 bpp = params.GetFormatBpp() / 8; | 681 | const u32 bpp = params.GetFormatBpp() / 8; |
| 671 | const u32 copy_size = params.width * bpp; | 682 | const u32 copy_size = params.width * bpp; |
| 672 | if (params.pitch == copy_size) { | 683 | if (params.pitch == copy_size) { |
| 673 | std::memcpy(Memory::GetPointer(params.addr), gl_buffer[0].data(), GetSizeInBytes()); | 684 | std::memcpy(params.host_ptr, gl_buffer[0].data(), GetSizeInBytes()); |
| 674 | } else { | 685 | } else { |
| 675 | u8* start = Memory::GetPointer(params.addr); | 686 | u8* start{params.host_ptr}; |
| 676 | const u8* read_to = gl_buffer[0].data(); | 687 | const u8* read_to = gl_buffer[0].data(); |
| 677 | for (u32 h = params.height; h > 0; h--) { | 688 | for (u32 h = params.height; h > 0; h--) { |
| 678 | std::memcpy(start, read_to, copy_size); | 689 | std::memcpy(start, read_to, copy_size); |
| @@ -795,20 +806,22 @@ void CachedSurface::UploadGLMipmapTexture(u32 mip_map, GLuint read_fb_handle, | |||
| 795 | glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); | 806 | glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); |
| 796 | } | 807 | } |
| 797 | 808 | ||
| 798 | void CachedSurface::EnsureTextureView() { | 809 | void CachedSurface::EnsureTextureDiscrepantView() { |
| 799 | if (texture_view.handle != 0) | 810 | if (discrepant_view.handle != 0) |
| 800 | return; | 811 | return; |
| 801 | 812 | ||
| 802 | const GLenum target{TargetLayer()}; | 813 | const GLenum target{GetArrayDiscrepantTarget(params.target)}; |
| 814 | ASSERT(target != GL_NONE); | ||
| 815 | |||
| 803 | const GLuint num_layers{target == GL_TEXTURE_CUBE_MAP_ARRAY ? 6u : 1u}; | 816 | const GLuint num_layers{target == GL_TEXTURE_CUBE_MAP_ARRAY ? 6u : 1u}; |
| 804 | constexpr GLuint min_layer = 0; | 817 | constexpr GLuint min_layer = 0; |
| 805 | constexpr GLuint min_level = 0; | 818 | constexpr GLuint min_level = 0; |
| 806 | 819 | ||
| 807 | glGenTextures(1, &texture_view.handle); | 820 | glGenTextures(1, &discrepant_view.handle); |
| 808 | glTextureView(texture_view.handle, target, texture.handle, gl_internal_format, min_level, | 821 | glTextureView(discrepant_view.handle, target, texture.handle, gl_internal_format, min_level, |
| 809 | params.max_mip_level, min_layer, num_layers); | 822 | params.max_mip_level, min_layer, num_layers); |
| 810 | ApplyTextureDefaults(texture_view.handle, params.max_mip_level); | 823 | ApplyTextureDefaults(discrepant_view.handle, params.max_mip_level); |
| 811 | glTextureParameteriv(texture_view.handle, GL_TEXTURE_SWIZZLE_RGBA, | 824 | glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA, |
| 812 | reinterpret_cast<const GLint*>(swizzle.data())); | 825 | reinterpret_cast<const GLint*>(swizzle.data())); |
| 813 | } | 826 | } |
| 814 | 827 | ||
| @@ -834,8 +847,8 @@ void CachedSurface::UpdateSwizzle(Tegra::Texture::SwizzleSource swizzle_x, | |||
| 834 | swizzle = {new_x, new_y, new_z, new_w}; | 847 | swizzle = {new_x, new_y, new_z, new_w}; |
| 835 | const auto swizzle_data = reinterpret_cast<const GLint*>(swizzle.data()); | 848 | const auto swizzle_data = reinterpret_cast<const GLint*>(swizzle.data()); |
| 836 | glTextureParameteriv(texture.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data); | 849 | glTextureParameteriv(texture.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data); |
| 837 | if (texture_view.handle != 0) { | 850 | if (discrepant_view.handle != 0) { |
| 838 | glTextureParameteriv(texture_view.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data); | 851 | glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data); |
| 839 | } | 852 | } |
| 840 | } | 853 | } |
| 841 | 854 | ||
| @@ -904,12 +917,12 @@ void RasterizerCacheOpenGL::LoadSurface(const Surface& surface) { | |||
| 904 | } | 917 | } |
| 905 | 918 | ||
| 906 | Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool preserve_contents) { | 919 | Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool preserve_contents) { |
| 907 | if (params.addr == 0 || params.height * params.width == 0) { | 920 | if (!params.IsValid()) { |
| 908 | return {}; | 921 | return {}; |
| 909 | } | 922 | } |
| 910 | 923 | ||
| 911 | // Look up surface in the cache based on address | 924 | // Look up surface in the cache based on address |
| 912 | Surface surface{TryGet(params.addr)}; | 925 | Surface surface{TryGet(params.host_ptr)}; |
| 913 | if (surface) { | 926 | if (surface) { |
| 914 | if (surface->GetSurfaceParams().IsCompatibleSurface(params)) { | 927 | if (surface->GetSurfaceParams().IsCompatibleSurface(params)) { |
| 915 | // Use the cached surface as-is unless it's not synced with memory | 928 | // Use the cached surface as-is unless it's not synced with memory |
| @@ -920,7 +933,7 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres | |||
| 920 | // If surface parameters changed and we care about keeping the previous data, recreate | 933 | // If surface parameters changed and we care about keeping the previous data, recreate |
| 921 | // the surface from the old one | 934 | // the surface from the old one |
| 922 | Surface new_surface{RecreateSurface(surface, params)}; | 935 | Surface new_surface{RecreateSurface(surface, params)}; |
| 923 | UnregisterSurface(surface); | 936 | Unregister(surface); |
| 924 | Register(new_surface); | 937 | Register(new_surface); |
| 925 | if (new_surface->IsUploaded()) { | 938 | if (new_surface->IsUploaded()) { |
| 926 | RegisterReinterpretSurface(new_surface); | 939 | RegisterReinterpretSurface(new_surface); |
| @@ -928,7 +941,7 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres | |||
| 928 | return new_surface; | 941 | return new_surface; |
| 929 | } else { | 942 | } else { |
| 930 | // Delete the old surface before creating a new one to prevent collisions. | 943 | // Delete the old surface before creating a new one to prevent collisions. |
| 931 | UnregisterSurface(surface); | 944 | Unregister(surface); |
| 932 | } | 945 | } |
| 933 | } | 946 | } |
| 934 | 947 | ||
| @@ -958,14 +971,16 @@ void RasterizerCacheOpenGL::FastLayeredCopySurface(const Surface& src_surface, | |||
| 958 | const Surface& dst_surface) { | 971 | const Surface& dst_surface) { |
| 959 | const auto& init_params{src_surface->GetSurfaceParams()}; | 972 | const auto& init_params{src_surface->GetSurfaceParams()}; |
| 960 | const auto& dst_params{dst_surface->GetSurfaceParams()}; | 973 | const auto& dst_params{dst_surface->GetSurfaceParams()}; |
| 961 | VAddr address = init_params.addr; | 974 | auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; |
| 962 | const std::size_t layer_size = dst_params.LayerMemorySize(); | 975 | GPUVAddr address{init_params.gpu_addr}; |
| 976 | const std::size_t layer_size{dst_params.LayerMemorySize()}; | ||
| 963 | for (u32 layer = 0; layer < dst_params.depth; layer++) { | 977 | for (u32 layer = 0; layer < dst_params.depth; layer++) { |
| 964 | for (u32 mipmap = 0; mipmap < dst_params.max_mip_level; mipmap++) { | 978 | for (u32 mipmap = 0; mipmap < dst_params.max_mip_level; mipmap++) { |
| 965 | const VAddr sub_address = address + dst_params.GetMipmapLevelOffset(mipmap); | 979 | const GPUVAddr sub_address{address + dst_params.GetMipmapLevelOffset(mipmap)}; |
| 966 | const Surface& copy = TryGet(sub_address); | 980 | const Surface& copy{TryGet(memory_manager.GetPointer(sub_address))}; |
| 967 | if (!copy) | 981 | if (!copy) { |
| 968 | continue; | 982 | continue; |
| 983 | } | ||
| 969 | const auto& src_params{copy->GetSurfaceParams()}; | 984 | const auto& src_params{copy->GetSurfaceParams()}; |
| 970 | const u32 width{std::min(src_params.width, dst_params.MipWidth(mipmap))}; | 985 | const u32 width{std::min(src_params.width, dst_params.MipWidth(mipmap))}; |
| 971 | const u32 height{std::min(src_params.height, dst_params.MipHeight(mipmap))}; | 986 | const u32 height{std::min(src_params.height, dst_params.MipHeight(mipmap))}; |
| @@ -1140,7 +1155,8 @@ void RasterizerCacheOpenGL::AccurateCopySurface(const Surface& src_surface, | |||
| 1140 | const auto& dst_params{dst_surface->GetSurfaceParams()}; | 1155 | const auto& dst_params{dst_surface->GetSurfaceParams()}; |
| 1141 | 1156 | ||
| 1142 | // Flush enough memory for both the source and destination surface | 1157 | // Flush enough memory for both the source and destination surface |
| 1143 | FlushRegion(src_params.addr, std::max(src_params.MemorySize(), dst_params.MemorySize())); | 1158 | FlushRegion(ToCacheAddr(src_params.host_ptr), |
| 1159 | std::max(src_params.MemorySize(), dst_params.MemorySize())); | ||
| 1144 | 1160 | ||
| 1145 | LoadSurface(dst_surface); | 1161 | LoadSurface(dst_surface); |
| 1146 | } | 1162 | } |
| @@ -1192,8 +1208,8 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, | |||
| 1192 | return new_surface; | 1208 | return new_surface; |
| 1193 | } | 1209 | } |
| 1194 | 1210 | ||
| 1195 | Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr addr) const { | 1211 | Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(const u8* host_ptr) const { |
| 1196 | return TryGet(addr); | 1212 | return TryGet(host_ptr); |
| 1197 | } | 1213 | } |
| 1198 | 1214 | ||
| 1199 | void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) { | 1215 | void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) { |
| @@ -1220,9 +1236,9 @@ static std::optional<u32> TryFindBestMipMap(std::size_t memory, const SurfacePar | |||
| 1220 | return {}; | 1236 | return {}; |
| 1221 | } | 1237 | } |
| 1222 | 1238 | ||
| 1223 | static std::optional<u32> TryFindBestLayer(VAddr addr, const SurfaceParams params, u32 mipmap) { | 1239 | static std::optional<u32> TryFindBestLayer(GPUVAddr addr, const SurfaceParams params, u32 mipmap) { |
| 1224 | const std::size_t size = params.LayerMemorySize(); | 1240 | const std::size_t size{params.LayerMemorySize()}; |
| 1225 | VAddr start = params.addr + params.GetMipmapLevelOffset(mipmap); | 1241 | GPUVAddr start{params.gpu_addr + params.GetMipmapLevelOffset(mipmap)}; |
| 1226 | for (u32 i = 0; i < params.depth; i++) { | 1242 | for (u32 i = 0; i < params.depth; i++) { |
| 1227 | if (start == addr) { | 1243 | if (start == addr) { |
| 1228 | return {i}; | 1244 | return {i}; |
| @@ -1244,7 +1260,7 @@ static bool LayerFitReinterpretSurface(RasterizerCacheOpenGL& cache, const Surfa | |||
| 1244 | src_params.height == dst_params.MipHeight(*level) && | 1260 | src_params.height == dst_params.MipHeight(*level) && |
| 1245 | src_params.block_height >= dst_params.MipBlockHeight(*level)) { | 1261 | src_params.block_height >= dst_params.MipBlockHeight(*level)) { |
| 1246 | const std::optional<u32> slot = | 1262 | const std::optional<u32> slot = |
| 1247 | TryFindBestLayer(render_surface->GetAddr(), dst_params, *level); | 1263 | TryFindBestLayer(render_surface->GetSurfaceParams().gpu_addr, dst_params, *level); |
| 1248 | if (slot.has_value()) { | 1264 | if (slot.has_value()) { |
| 1249 | glCopyImageSubData(render_surface->Texture().handle, | 1265 | glCopyImageSubData(render_surface->Texture().handle, |
| 1250 | SurfaceTargetToGL(src_params.target), 0, 0, 0, 0, | 1266 | SurfaceTargetToGL(src_params.target), 0, 0, 0, 0, |
| @@ -1260,8 +1276,8 @@ static bool LayerFitReinterpretSurface(RasterizerCacheOpenGL& cache, const Surfa | |||
| 1260 | } | 1276 | } |
| 1261 | 1277 | ||
| 1262 | static bool IsReinterpretInvalid(const Surface render_surface, const Surface blitted_surface) { | 1278 | static bool IsReinterpretInvalid(const Surface render_surface, const Surface blitted_surface) { |
| 1263 | const VAddr bound1 = blitted_surface->GetAddr() + blitted_surface->GetMemorySize(); | 1279 | const VAddr bound1 = blitted_surface->GetCpuAddr() + blitted_surface->GetMemorySize(); |
| 1264 | const VAddr bound2 = render_surface->GetAddr() + render_surface->GetMemorySize(); | 1280 | const VAddr bound2 = render_surface->GetCpuAddr() + render_surface->GetMemorySize(); |
| 1265 | if (bound2 > bound1) | 1281 | if (bound2 > bound1) |
| 1266 | return true; | 1282 | return true; |
| 1267 | const auto& dst_params = blitted_surface->GetSurfaceParams(); | 1283 | const auto& dst_params = blitted_surface->GetSurfaceParams(); |
| @@ -1279,12 +1295,12 @@ static bool IsReinterpretInvalidSecond(const Surface render_surface, | |||
| 1279 | bool RasterizerCacheOpenGL::PartialReinterpretSurface(Surface triggering_surface, | 1295 | bool RasterizerCacheOpenGL::PartialReinterpretSurface(Surface triggering_surface, |
| 1280 | Surface intersect) { | 1296 | Surface intersect) { |
| 1281 | if (IsReinterpretInvalid(triggering_surface, intersect)) { | 1297 | if (IsReinterpretInvalid(triggering_surface, intersect)) { |
| 1282 | UnregisterSurface(intersect); | 1298 | Unregister(intersect); |
| 1283 | return false; | 1299 | return false; |
| 1284 | } | 1300 | } |
| 1285 | if (!LayerFitReinterpretSurface(*this, triggering_surface, intersect)) { | 1301 | if (!LayerFitReinterpretSurface(*this, triggering_surface, intersect)) { |
| 1286 | if (IsReinterpretInvalidSecond(triggering_surface, intersect)) { | 1302 | if (IsReinterpretInvalidSecond(triggering_surface, intersect)) { |
| 1287 | UnregisterSurface(intersect); | 1303 | Unregister(intersect); |
| 1288 | return false; | 1304 | return false; |
| 1289 | } | 1305 | } |
| 1290 | FlushObject(intersect); | 1306 | FlushObject(intersect); |
| @@ -1304,7 +1320,8 @@ void RasterizerCacheOpenGL::SignalPreDrawCall() { | |||
| 1304 | void RasterizerCacheOpenGL::SignalPostDrawCall() { | 1320 | void RasterizerCacheOpenGL::SignalPostDrawCall() { |
| 1305 | for (u32 i = 0; i < Maxwell::NumRenderTargets; i++) { | 1321 | for (u32 i = 0; i < Maxwell::NumRenderTargets; i++) { |
| 1306 | if (current_color_buffers[i] != nullptr) { | 1322 | if (current_color_buffers[i] != nullptr) { |
| 1307 | Surface intersect = CollideOnReinterpretedSurface(current_color_buffers[i]->GetAddr()); | 1323 | Surface intersect = |
| 1324 | CollideOnReinterpretedSurface(current_color_buffers[i]->GetCacheAddr()); | ||
| 1308 | if (intersect != nullptr) { | 1325 | if (intersect != nullptr) { |
| 1309 | PartialReinterpretSurface(current_color_buffers[i], intersect); | 1326 | PartialReinterpretSurface(current_color_buffers[i], intersect); |
| 1310 | texception = true; | 1327 | texception = true; |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 797bbdc9c..c644271d0 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h | |||
| @@ -109,6 +109,11 @@ struct SurfaceParams { | |||
| 109 | return size; | 109 | return size; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | /// Returns true if the parameters constitute a valid rasterizer surface. | ||
| 113 | bool IsValid() const { | ||
| 114 | return gpu_addr && host_ptr && height && width; | ||
| 115 | } | ||
| 116 | |||
| 112 | /// Returns the exact size of the memory occupied by a layer in a texture in VRAM, including | 117 | /// Returns the exact size of the memory occupied by a layer in a texture in VRAM, including |
| 113 | /// mipmaps. | 118 | /// mipmaps. |
| 114 | std::size_t LayerMemorySize() const { | 119 | std::size_t LayerMemorySize() const { |
| @@ -210,7 +215,7 @@ struct SurfaceParams { | |||
| 210 | 215 | ||
| 211 | /// Creates SurfaceParams for a depth buffer configuration | 216 | /// Creates SurfaceParams for a depth buffer configuration |
| 212 | static SurfaceParams CreateForDepthBuffer( | 217 | static SurfaceParams CreateForDepthBuffer( |
| 213 | u32 zeta_width, u32 zeta_height, Tegra::GPUVAddr zeta_address, Tegra::DepthFormat format, | 218 | u32 zeta_width, u32 zeta_height, GPUVAddr zeta_address, Tegra::DepthFormat format, |
| 214 | u32 block_width, u32 block_height, u32 block_depth, | 219 | u32 block_width, u32 block_height, u32 block_depth, |
| 215 | Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type); | 220 | Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type); |
| 216 | 221 | ||
| @@ -232,7 +237,7 @@ struct SurfaceParams { | |||
| 232 | } | 237 | } |
| 233 | 238 | ||
| 234 | /// Initializes parameters for caching, should be called after everything has been initialized | 239 | /// Initializes parameters for caching, should be called after everything has been initialized |
| 235 | void InitCacheParameters(Tegra::GPUVAddr gpu_addr); | 240 | void InitCacheParameters(GPUVAddr gpu_addr); |
| 236 | 241 | ||
| 237 | std::string TargetName() const { | 242 | std::string TargetName() const { |
| 238 | switch (target) { | 243 | switch (target) { |
| @@ -296,8 +301,8 @@ struct SurfaceParams { | |||
| 296 | bool is_array; | 301 | bool is_array; |
| 297 | bool srgb_conversion; | 302 | bool srgb_conversion; |
| 298 | // Parameters used for caching | 303 | // Parameters used for caching |
| 299 | VAddr addr; | 304 | u8* host_ptr; |
| 300 | Tegra::GPUVAddr gpu_addr; | 305 | GPUVAddr gpu_addr; |
| 301 | std::size_t size_in_bytes; | 306 | std::size_t size_in_bytes; |
| 302 | std::size_t size_in_bytes_gl; | 307 | std::size_t size_in_bytes_gl; |
| 303 | 308 | ||
| @@ -345,10 +350,10 @@ class RasterizerOpenGL; | |||
| 345 | 350 | ||
| 346 | class CachedSurface final : public RasterizerCacheObject { | 351 | class CachedSurface final : public RasterizerCacheObject { |
| 347 | public: | 352 | public: |
| 348 | CachedSurface(const SurfaceParams& params); | 353 | explicit CachedSurface(const SurfaceParams& params); |
| 349 | 354 | ||
| 350 | VAddr GetAddr() const override { | 355 | VAddr GetCpuAddr() const override { |
| 351 | return params.addr; | 356 | return cpu_addr; |
| 352 | } | 357 | } |
| 353 | 358 | ||
| 354 | std::size_t GetSizeInBytes() const override { | 359 | std::size_t GetSizeInBytes() const override { |
| @@ -367,31 +372,19 @@ public: | |||
| 367 | return texture; | 372 | return texture; |
| 368 | } | 373 | } |
| 369 | 374 | ||
| 370 | const OGLTexture& TextureLayer() { | 375 | const OGLTexture& Texture(bool as_array) { |
| 371 | if (params.is_array) { | 376 | if (params.is_array == as_array) { |
| 372 | return Texture(); | 377 | return texture; |
| 378 | } else { | ||
| 379 | EnsureTextureDiscrepantView(); | ||
| 380 | return discrepant_view; | ||
| 373 | } | 381 | } |
| 374 | EnsureTextureView(); | ||
| 375 | return texture_view; | ||
| 376 | } | 382 | } |
| 377 | 383 | ||
| 378 | GLenum Target() const { | 384 | GLenum Target() const { |
| 379 | return gl_target; | 385 | return gl_target; |
| 380 | } | 386 | } |
| 381 | 387 | ||
| 382 | GLenum TargetLayer() const { | ||
| 383 | using VideoCore::Surface::SurfaceTarget; | ||
| 384 | switch (params.target) { | ||
| 385 | case SurfaceTarget::Texture1D: | ||
| 386 | return GL_TEXTURE_1D_ARRAY; | ||
| 387 | case SurfaceTarget::Texture2D: | ||
| 388 | return GL_TEXTURE_2D_ARRAY; | ||
| 389 | case SurfaceTarget::TextureCubemap: | ||
| 390 | return GL_TEXTURE_CUBE_MAP_ARRAY; | ||
| 391 | } | ||
| 392 | return Target(); | ||
| 393 | } | ||
| 394 | |||
| 395 | const SurfaceParams& GetSurfaceParams() const { | 388 | const SurfaceParams& GetSurfaceParams() const { |
| 396 | return params; | 389 | return params; |
| 397 | } | 390 | } |
| @@ -431,10 +424,10 @@ public: | |||
| 431 | private: | 424 | private: |
| 432 | void UploadGLMipmapTexture(u32 mip_map, GLuint read_fb_handle, GLuint draw_fb_handle); | 425 | void UploadGLMipmapTexture(u32 mip_map, GLuint read_fb_handle, GLuint draw_fb_handle); |
| 433 | 426 | ||
| 434 | void EnsureTextureView(); | 427 | void EnsureTextureDiscrepantView(); |
| 435 | 428 | ||
| 436 | OGLTexture texture; | 429 | OGLTexture texture; |
| 437 | OGLTexture texture_view; | 430 | OGLTexture discrepant_view; |
| 438 | std::vector<std::vector<u8>> gl_buffer; | 431 | std::vector<std::vector<u8>> gl_buffer; |
| 439 | SurfaceParams params{}; | 432 | SurfaceParams params{}; |
| 440 | GLenum gl_target{}; | 433 | GLenum gl_target{}; |
| @@ -444,6 +437,7 @@ private: | |||
| 444 | std::size_t memory_size; | 437 | std::size_t memory_size; |
| 445 | bool reinterpreted = false; | 438 | bool reinterpreted = false; |
| 446 | bool must_reload = false; | 439 | bool must_reload = false; |
| 440 | VAddr cpu_addr{}; | ||
| 447 | }; | 441 | }; |
| 448 | 442 | ||
| 449 | class RasterizerCacheOpenGL final : public RasterizerCache<Surface> { | 443 | class RasterizerCacheOpenGL final : public RasterizerCache<Surface> { |
| @@ -461,7 +455,7 @@ public: | |||
| 461 | Surface GetColorBufferSurface(std::size_t index, bool preserve_contents); | 455 | Surface GetColorBufferSurface(std::size_t index, bool preserve_contents); |
| 462 | 456 | ||
| 463 | /// Tries to find a framebuffer using on the provided CPU address | 457 | /// Tries to find a framebuffer using on the provided CPU address |
| 464 | Surface TryFindFramebufferSurface(VAddr addr) const; | 458 | Surface TryFindFramebufferSurface(const u8* host_ptr) const; |
| 465 | 459 | ||
| 466 | /// Copies the contents of one surface to another | 460 | /// Copies the contents of one surface to another |
| 467 | void FermiCopySurface(const Tegra::Engines::Fermi2D::Regs::Surface& src_config, | 461 | void FermiCopySurface(const Tegra::Engines::Fermi2D::Regs::Surface& src_config, |
| @@ -518,12 +512,12 @@ private: | |||
| 518 | std::array<Surface, Maxwell::NumRenderTargets> current_color_buffers; | 512 | std::array<Surface, Maxwell::NumRenderTargets> current_color_buffers; |
| 519 | Surface last_depth_buffer; | 513 | Surface last_depth_buffer; |
| 520 | 514 | ||
| 521 | using SurfaceIntervalCache = boost::icl::interval_map<VAddr, Surface>; | 515 | using SurfaceIntervalCache = boost::icl::interval_map<CacheAddr, Surface>; |
| 522 | using SurfaceInterval = typename SurfaceIntervalCache::interval_type; | 516 | using SurfaceInterval = typename SurfaceIntervalCache::interval_type; |
| 523 | 517 | ||
| 524 | static auto GetReinterpretInterval(const Surface& object) { | 518 | static auto GetReinterpretInterval(const Surface& object) { |
| 525 | return SurfaceInterval::right_open(object->GetAddr() + 1, | 519 | return SurfaceInterval::right_open(object->GetCacheAddr() + 1, |
| 526 | object->GetAddr() + object->GetMemorySize() - 1); | 520 | object->GetCacheAddr() + object->GetMemorySize() - 1); |
| 527 | } | 521 | } |
| 528 | 522 | ||
| 529 | // Reinterpreted surfaces are very fragil as the game may keep rendering into them. | 523 | // Reinterpreted surfaces are very fragil as the game may keep rendering into them. |
| @@ -535,7 +529,7 @@ private: | |||
| 535 | reinterpret_surface->MarkReinterpreted(); | 529 | reinterpret_surface->MarkReinterpreted(); |
| 536 | } | 530 | } |
| 537 | 531 | ||
| 538 | Surface CollideOnReinterpretedSurface(VAddr addr) const { | 532 | Surface CollideOnReinterpretedSurface(CacheAddr addr) const { |
| 539 | const SurfaceInterval interval{addr}; | 533 | const SurfaceInterval interval{addr}; |
| 540 | for (auto& pair : | 534 | for (auto& pair : |
| 541 | boost::make_iterator_range(reinterpreted_surfaces.equal_range(interval))) { | 535 | boost::make_iterator_range(reinterpreted_surfaces.equal_range(interval))) { |
| @@ -544,13 +538,17 @@ private: | |||
| 544 | return nullptr; | 538 | return nullptr; |
| 545 | } | 539 | } |
| 546 | 540 | ||
| 541 | void Register(const Surface& object) { | ||
| 542 | RasterizerCache<Surface>::Register(object); | ||
| 543 | } | ||
| 544 | |||
| 547 | /// Unregisters an object from the cache | 545 | /// Unregisters an object from the cache |
| 548 | void UnregisterSurface(const Surface& object) { | 546 | void Unregister(const Surface& object) { |
| 549 | if (object->IsReinterpreted()) { | 547 | if (object->IsReinterpreted()) { |
| 550 | auto interval = GetReinterpretInterval(object); | 548 | auto interval = GetReinterpretInterval(object); |
| 551 | reinterpreted_surfaces.erase(interval); | 549 | reinterpreted_surfaces.erase(interval); |
| 552 | } | 550 | } |
| 553 | Unregister(object); | 551 | RasterizerCache<Surface>::Unregister(object); |
| 554 | } | 552 | } |
| 555 | }; | 553 | }; |
| 556 | 554 | ||
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 4883e4f62..1f8eca6f0 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp | |||
| @@ -32,19 +32,16 @@ struct UnspecializedShader { | |||
| 32 | namespace { | 32 | namespace { |
| 33 | 33 | ||
| 34 | /// Gets the address for the specified shader stage program | 34 | /// Gets the address for the specified shader stage program |
| 35 | VAddr GetShaderAddress(Maxwell::ShaderProgram program) { | 35 | GPUVAddr GetShaderAddress(Maxwell::ShaderProgram program) { |
| 36 | const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); | 36 | const auto& gpu{Core::System::GetInstance().GPU().Maxwell3D()}; |
| 37 | const auto& shader_config = gpu.regs.shader_config[static_cast<std::size_t>(program)]; | 37 | const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]}; |
| 38 | const auto address = gpu.memory_manager.GpuToCpuAddress(gpu.regs.code_address.CodeAddress() + | 38 | return gpu.regs.code_address.CodeAddress() + shader_config.offset; |
| 39 | shader_config.offset); | ||
| 40 | ASSERT_MSG(address, "Invalid GPU address"); | ||
| 41 | return *address; | ||
| 42 | } | 39 | } |
| 43 | 40 | ||
| 44 | /// Gets the shader program code from memory for the specified address | 41 | /// Gets the shader program code from memory for the specified address |
| 45 | ProgramCode GetShaderCode(VAddr addr) { | 42 | ProgramCode GetShaderCode(const u8* host_ptr) { |
| 46 | ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); | 43 | ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); |
| 47 | Memory::ReadBlock(addr, program_code.data(), program_code.size() * sizeof(u64)); | 44 | std::memcpy(program_code.data(), host_ptr, program_code.size() * sizeof(u64)); |
| 48 | return program_code; | 45 | return program_code; |
| 49 | } | 46 | } |
| 50 | 47 | ||
| @@ -214,12 +211,13 @@ std::set<GLenum> GetSupportedFormats() { | |||
| 214 | 211 | ||
| 215 | } // namespace | 212 | } // namespace |
| 216 | 213 | ||
| 217 | CachedShader::CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, | 214 | CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier, |
| 218 | ShaderDiskCacheOpenGL& disk_cache, | 215 | Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, |
| 219 | const PrecompiledPrograms& precompiled_programs, | 216 | const PrecompiledPrograms& precompiled_programs, |
| 220 | ProgramCode&& program_code, ProgramCode&& program_code_b) | 217 | ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr) |
| 221 | : addr{addr}, unique_identifier{unique_identifier}, program_type{program_type}, | 218 | : host_ptr{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, |
| 222 | disk_cache{disk_cache}, precompiled_programs{precompiled_programs} { | 219 | program_type{program_type}, disk_cache{disk_cache}, |
| 220 | precompiled_programs{precompiled_programs}, RasterizerCacheObject{host_ptr} { | ||
| 223 | 221 | ||
| 224 | const std::size_t code_size = CalculateProgramSize(program_code); | 222 | const std::size_t code_size = CalculateProgramSize(program_code); |
| 225 | const std::size_t code_size_b = | 223 | const std::size_t code_size_b = |
| @@ -243,12 +241,13 @@ CachedShader::CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderPro | |||
| 243 | disk_cache.SaveRaw(raw); | 241 | disk_cache.SaveRaw(raw); |
| 244 | } | 242 | } |
| 245 | 243 | ||
| 246 | CachedShader::CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, | 244 | CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier, |
| 247 | ShaderDiskCacheOpenGL& disk_cache, | 245 | Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, |
| 248 | const PrecompiledPrograms& precompiled_programs, | 246 | const PrecompiledPrograms& precompiled_programs, |
| 249 | GLShader::ProgramResult result) | 247 | GLShader::ProgramResult result, u8* host_ptr) |
| 250 | : addr{addr}, unique_identifier{unique_identifier}, program_type{program_type}, | 248 | : cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, program_type{program_type}, |
| 251 | disk_cache{disk_cache}, precompiled_programs{precompiled_programs} { | 249 | disk_cache{disk_cache}, precompiled_programs{precompiled_programs}, RasterizerCacheObject{ |
| 250 | host_ptr} { | ||
| 252 | 251 | ||
| 253 | code = std::move(result.first); | 252 | code = std::move(result.first); |
| 254 | entries = result.second; | 253 | entries = result.second; |
| @@ -271,7 +270,7 @@ std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(GLenum primitive | |||
| 271 | disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings)); | 270 | disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings)); |
| 272 | } | 271 | } |
| 273 | 272 | ||
| 274 | LabelGLObject(GL_PROGRAM, program->handle, addr); | 273 | LabelGLObject(GL_PROGRAM, program->handle, cpu_addr); |
| 275 | } | 274 | } |
| 276 | 275 | ||
| 277 | handle = program->handle; | 276 | handle = program->handle; |
| @@ -323,7 +322,7 @@ GLuint CachedShader::LazyGeometryProgram(CachedProgram& target_program, BaseBind | |||
| 323 | disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings)); | 322 | disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings)); |
| 324 | } | 323 | } |
| 325 | 324 | ||
| 326 | LabelGLObject(GL_PROGRAM, target_program->handle, addr, debug_name); | 325 | LabelGLObject(GL_PROGRAM, target_program->handle, cpu_addr, debug_name); |
| 327 | 326 | ||
| 328 | return target_program->handle; | 327 | return target_program->handle; |
| 329 | }; | 328 | }; |
| @@ -486,29 +485,32 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { | |||
| 486 | return last_shaders[static_cast<u32>(program)]; | 485 | return last_shaders[static_cast<u32>(program)]; |
| 487 | } | 486 | } |
| 488 | 487 | ||
| 489 | const VAddr program_addr{GetShaderAddress(program)}; | 488 | auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; |
| 489 | const GPUVAddr program_addr{GetShaderAddress(program)}; | ||
| 490 | 490 | ||
| 491 | // Look up shader in the cache based on address | 491 | // Look up shader in the cache based on address |
| 492 | Shader shader{TryGet(program_addr)}; | 492 | const auto& host_ptr{memory_manager.GetPointer(program_addr)}; |
| 493 | Shader shader{TryGet(host_ptr)}; | ||
| 493 | 494 | ||
| 494 | if (!shader) { | 495 | if (!shader) { |
| 495 | // No shader found - create a new one | 496 | // No shader found - create a new one |
| 496 | ProgramCode program_code = GetShaderCode(program_addr); | 497 | ProgramCode program_code{GetShaderCode(host_ptr)}; |
| 497 | ProgramCode program_code_b; | 498 | ProgramCode program_code_b; |
| 498 | if (program == Maxwell::ShaderProgram::VertexA) { | 499 | if (program == Maxwell::ShaderProgram::VertexA) { |
| 499 | program_code_b = GetShaderCode(GetShaderAddress(Maxwell::ShaderProgram::VertexB)); | 500 | program_code_b = GetShaderCode( |
| 501 | memory_manager.GetPointer(GetShaderAddress(Maxwell::ShaderProgram::VertexB))); | ||
| 500 | } | 502 | } |
| 501 | const u64 unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b); | 503 | const u64 unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b); |
| 502 | 504 | const VAddr cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)}; | |
| 503 | const auto found = precompiled_shaders.find(unique_identifier); | 505 | const auto found = precompiled_shaders.find(unique_identifier); |
| 504 | if (found != precompiled_shaders.end()) { | 506 | if (found != precompiled_shaders.end()) { |
| 505 | shader = | 507 | shader = |
| 506 | std::make_shared<CachedShader>(program_addr, unique_identifier, program, disk_cache, | 508 | std::make_shared<CachedShader>(cpu_addr, unique_identifier, program, disk_cache, |
| 507 | precompiled_programs, found->second); | 509 | precompiled_programs, found->second, host_ptr); |
| 508 | } else { | 510 | } else { |
| 509 | shader = std::make_shared<CachedShader>( | 511 | shader = std::make_shared<CachedShader>( |
| 510 | program_addr, unique_identifier, program, disk_cache, precompiled_programs, | 512 | cpu_addr, unique_identifier, program, disk_cache, precompiled_programs, |
| 511 | std::move(program_code), std::move(program_code_b)); | 513 | std::move(program_code), std::move(program_code_b), host_ptr); |
| 512 | } | 514 | } |
| 513 | Register(shader); | 515 | Register(shader); |
| 514 | } | 516 | } |
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index 97eed192f..fd1c85115 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h | |||
| @@ -39,18 +39,18 @@ using PrecompiledShaders = std::unordered_map<u64, GLShader::ProgramResult>; | |||
| 39 | 39 | ||
| 40 | class CachedShader final : public RasterizerCacheObject { | 40 | class CachedShader final : public RasterizerCacheObject { |
| 41 | public: | 41 | public: |
| 42 | explicit CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, | 42 | explicit CachedShader(VAddr cpu_addr, u64 unique_identifier, |
| 43 | ShaderDiskCacheOpenGL& disk_cache, | 43 | Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, |
| 44 | const PrecompiledPrograms& precompiled_programs, | 44 | const PrecompiledPrograms& precompiled_programs, |
| 45 | ProgramCode&& program_code, ProgramCode&& program_code_b); | 45 | ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr); |
| 46 | 46 | ||
| 47 | explicit CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, | 47 | explicit CachedShader(VAddr cpu_addr, u64 unique_identifier, |
| 48 | ShaderDiskCacheOpenGL& disk_cache, | 48 | Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, |
| 49 | const PrecompiledPrograms& precompiled_programs, | 49 | const PrecompiledPrograms& precompiled_programs, |
| 50 | GLShader::ProgramResult result); | 50 | GLShader::ProgramResult result, u8* host_ptr); |
| 51 | 51 | ||
| 52 | VAddr GetAddr() const override { | 52 | VAddr GetCpuAddr() const override { |
| 53 | return addr; | 53 | return cpu_addr; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | std::size_t GetSizeInBytes() const override { | 56 | std::size_t GetSizeInBytes() const override { |
| @@ -91,7 +91,8 @@ private: | |||
| 91 | 91 | ||
| 92 | ShaderDiskCacheUsage GetUsage(GLenum primitive_mode, BaseBindings base_bindings) const; | 92 | ShaderDiskCacheUsage GetUsage(GLenum primitive_mode, BaseBindings base_bindings) const; |
| 93 | 93 | ||
| 94 | VAddr addr{}; | 94 | u8* host_ptr{}; |
| 95 | VAddr cpu_addr{}; | ||
| 95 | u64 unique_identifier{}; | 96 | u64 unique_identifier{}; |
| 96 | Maxwell::ShaderProgram program_type{}; | 97 | Maxwell::ShaderProgram program_type{}; |
| 97 | ShaderDiskCacheOpenGL& disk_cache; | 98 | ShaderDiskCacheOpenGL& disk_cache; |
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 72ff6ac6a..11d1169f0 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp | |||
| @@ -5,7 +5,9 @@ | |||
| 5 | #include <array> | 5 | #include <array> |
| 6 | #include <string> | 6 | #include <string> |
| 7 | #include <string_view> | 7 | #include <string_view> |
| 8 | #include <utility> | ||
| 8 | #include <variant> | 9 | #include <variant> |
| 10 | #include <vector> | ||
| 9 | 11 | ||
| 10 | #include <fmt/format.h> | 12 | #include <fmt/format.h> |
| 11 | 13 | ||
| @@ -717,7 +719,7 @@ private: | |||
| 717 | } | 719 | } |
| 718 | 720 | ||
| 719 | std::string GenerateTexture(Operation operation, const std::string& func, | 721 | std::string GenerateTexture(Operation operation, const std::string& func, |
| 720 | bool is_extra_int = false) { | 722 | const std::vector<std::pair<Type, Node>>& extras) { |
| 721 | constexpr std::array<const char*, 4> coord_constructors = {"float", "vec2", "vec3", "vec4"}; | 723 | constexpr std::array<const char*, 4> coord_constructors = {"float", "vec2", "vec3", "vec4"}; |
| 722 | 724 | ||
| 723 | const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); | 725 | const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); |
| @@ -738,36 +740,47 @@ private: | |||
| 738 | expr += Visit(operation[i]); | 740 | expr += Visit(operation[i]); |
| 739 | 741 | ||
| 740 | const std::size_t next = i + 1; | 742 | const std::size_t next = i + 1; |
| 741 | if (next < count || has_array || has_shadow) | 743 | if (next < count) |
| 742 | expr += ", "; | 744 | expr += ", "; |
| 743 | } | 745 | } |
| 744 | if (has_array) { | 746 | if (has_array) { |
| 745 | expr += "float(ftoi(" + Visit(meta->array) + "))"; | 747 | expr += ", float(ftoi(" + Visit(meta->array) + "))"; |
| 746 | } | 748 | } |
| 747 | if (has_shadow) { | 749 | if (has_shadow) { |
| 748 | if (has_array) | 750 | expr += ", " + Visit(meta->depth_compare); |
| 749 | expr += ", "; | ||
| 750 | expr += Visit(meta->depth_compare); | ||
| 751 | } | 751 | } |
| 752 | expr += ')'; | 752 | expr += ')'; |
| 753 | 753 | ||
| 754 | for (const Node extra : meta->extras) { | 754 | for (const auto& extra_pair : extras) { |
| 755 | const auto [type, operand] = extra_pair; | ||
| 756 | if (operand == nullptr) { | ||
| 757 | continue; | ||
| 758 | } | ||
| 755 | expr += ", "; | 759 | expr += ", "; |
| 756 | if (is_extra_int) { | 760 | |
| 757 | if (const auto immediate = std::get_if<ImmediateNode>(extra)) { | 761 | switch (type) { |
| 762 | case Type::Int: | ||
| 763 | if (const auto immediate = std::get_if<ImmediateNode>(operand)) { | ||
| 758 | // Inline the string as an immediate integer in GLSL (some extra arguments are | 764 | // Inline the string as an immediate integer in GLSL (some extra arguments are |
| 759 | // required to be constant) | 765 | // required to be constant) |
| 760 | expr += std::to_string(static_cast<s32>(immediate->GetValue())); | 766 | expr += std::to_string(static_cast<s32>(immediate->GetValue())); |
| 761 | } else { | 767 | } else { |
| 762 | expr += "ftoi(" + Visit(extra) + ')'; | 768 | expr += "ftoi(" + Visit(operand) + ')'; |
| 763 | } | 769 | } |
| 764 | } else { | 770 | break; |
| 765 | expr += Visit(extra); | 771 | case Type::Float: |
| 772 | expr += Visit(operand); | ||
| 773 | break; | ||
| 774 | default: { | ||
| 775 | const auto type_int = static_cast<u32>(type); | ||
| 776 | UNIMPLEMENTED_MSG("Unimplemented extra type={}", type_int); | ||
| 777 | expr += '0'; | ||
| 778 | break; | ||
| 779 | } | ||
| 766 | } | 780 | } |
| 767 | } | 781 | } |
| 768 | 782 | ||
| 769 | expr += ')'; | 783 | return expr + ')'; |
| 770 | return expr; | ||
| 771 | } | 784 | } |
| 772 | 785 | ||
| 773 | std::string Assign(Operation operation) { | 786 | std::string Assign(Operation operation) { |
| @@ -1146,7 +1159,7 @@ private: | |||
| 1146 | const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); | 1159 | const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); |
| 1147 | ASSERT(meta); | 1160 | ASSERT(meta); |
| 1148 | 1161 | ||
| 1149 | std::string expr = GenerateTexture(operation, "texture"); | 1162 | std::string expr = GenerateTexture(operation, "texture", {{Type::Float, meta->bias}}); |
| 1150 | if (meta->sampler.IsShadow()) { | 1163 | if (meta->sampler.IsShadow()) { |
| 1151 | expr = "vec4(" + expr + ')'; | 1164 | expr = "vec4(" + expr + ')'; |
| 1152 | } | 1165 | } |
| @@ -1157,7 +1170,7 @@ private: | |||
| 1157 | const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); | 1170 | const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); |
| 1158 | ASSERT(meta); | 1171 | ASSERT(meta); |
| 1159 | 1172 | ||
| 1160 | std::string expr = GenerateTexture(operation, "textureLod"); | 1173 | std::string expr = GenerateTexture(operation, "textureLod", {{Type::Float, meta->lod}}); |
| 1161 | if (meta->sampler.IsShadow()) { | 1174 | if (meta->sampler.IsShadow()) { |
| 1162 | expr = "vec4(" + expr + ')'; | 1175 | expr = "vec4(" + expr + ')'; |
| 1163 | } | 1176 | } |
| @@ -1168,7 +1181,8 @@ private: | |||
| 1168 | const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); | 1181 | const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); |
| 1169 | ASSERT(meta); | 1182 | ASSERT(meta); |
| 1170 | 1183 | ||
| 1171 | return GenerateTexture(operation, "textureGather", !meta->sampler.IsShadow()) + | 1184 | const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int; |
| 1185 | return GenerateTexture(operation, "textureGather", {{type, meta->component}}) + | ||
| 1172 | GetSwizzle(meta->element); | 1186 | GetSwizzle(meta->element); |
| 1173 | } | 1187 | } |
| 1174 | 1188 | ||
| @@ -1197,8 +1211,8 @@ private: | |||
| 1197 | ASSERT(meta); | 1211 | ASSERT(meta); |
| 1198 | 1212 | ||
| 1199 | if (meta->element < 2) { | 1213 | if (meta->element < 2) { |
| 1200 | return "itof(int((" + GenerateTexture(operation, "textureQueryLod") + " * vec2(256))" + | 1214 | return "itof(int((" + GenerateTexture(operation, "textureQueryLod", {}) + |
| 1201 | GetSwizzle(meta->element) + "))"; | 1215 | " * vec2(256))" + GetSwizzle(meta->element) + "))"; |
| 1202 | } | 1216 | } |
| 1203 | return "0"; | 1217 | return "0"; |
| 1204 | } | 1218 | } |
| @@ -1224,9 +1238,9 @@ private: | |||
| 1224 | else if (next < count) | 1238 | else if (next < count) |
| 1225 | expr += ", "; | 1239 | expr += ", "; |
| 1226 | } | 1240 | } |
| 1227 | for (std::size_t i = 0; i < meta->extras.size(); ++i) { | 1241 | if (meta->lod) { |
| 1228 | expr += ", "; | 1242 | expr += ", "; |
| 1229 | expr += CastOperand(Visit(meta->extras.at(i)), Type::Int); | 1243 | expr += CastOperand(Visit(meta->lod), Type::Int); |
| 1230 | } | 1244 | } |
| 1231 | expr += ')'; | 1245 | expr += ')'; |
| 1232 | 1246 | ||
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 8b510b6ae..5e3d862c6 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp | |||
| @@ -164,12 +164,13 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf | |||
| 164 | // Reset the screen info's display texture to its own permanent texture | 164 | // Reset the screen info's display texture to its own permanent texture |
| 165 | screen_info.display_texture = screen_info.texture.resource.handle; | 165 | screen_info.display_texture = screen_info.texture.resource.handle; |
| 166 | 166 | ||
| 167 | Memory::RasterizerFlushVirtualRegion(framebuffer_addr, size_in_bytes, | 167 | rasterizer->FlushRegion(ToCacheAddr(Memory::GetPointer(framebuffer_addr)), size_in_bytes); |
| 168 | Memory::FlushMode::Flush); | ||
| 169 | 168 | ||
| 170 | VideoCore::MortonCopyPixels128(framebuffer.width, framebuffer.height, bytes_per_pixel, 4, | 169 | constexpr u32 linear_bpp = 4; |
| 171 | Memory::GetPointer(framebuffer_addr), | 170 | VideoCore::MortonCopyPixels128(VideoCore::MortonSwizzleMode::MortonToLinear, |
| 172 | gl_framebuffer_data.data(), true); | 171 | framebuffer.width, framebuffer.height, bytes_per_pixel, |
| 172 | linear_bpp, Memory::GetPointer(framebuffer_addr), | ||
| 173 | gl_framebuffer_data.data()); | ||
| 173 | 174 | ||
| 174 | glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(framebuffer.stride)); | 175 | glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(framebuffer.stride)); |
| 175 | 176 | ||
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 4a33a6c84..eac51ecb3 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp | |||
| @@ -17,6 +17,11 @@ | |||
| 17 | 17 | ||
| 18 | namespace Vulkan { | 18 | namespace Vulkan { |
| 19 | 19 | ||
| 20 | CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, | ||
| 21 | std::size_t alignment, u8* host_ptr) | ||
| 22 | : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ | ||
| 23 | host_ptr} {} | ||
| 24 | |||
| 20 | VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, | 25 | VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, |
| 21 | VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, | 26 | VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, |
| 22 | VKMemoryManager& memory_manager, VKScheduler& scheduler, u64 size) | 27 | VKMemoryManager& memory_manager, VKScheduler& scheduler, u64 size) |
| @@ -34,19 +39,20 @@ VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, | |||
| 34 | 39 | ||
| 35 | VKBufferCache::~VKBufferCache() = default; | 40 | VKBufferCache::~VKBufferCache() = default; |
| 36 | 41 | ||
| 37 | u64 VKBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, u64 alignment, | 42 | u64 VKBufferCache::UploadMemory(GPUVAddr gpu_addr, std::size_t size, u64 alignment, bool cache) { |
| 38 | bool cache) { | ||
| 39 | const auto cpu_addr{tegra_memory_manager.GpuToCpuAddress(gpu_addr)}; | 43 | const auto cpu_addr{tegra_memory_manager.GpuToCpuAddress(gpu_addr)}; |
| 40 | ASSERT(cpu_addr); | 44 | ASSERT_MSG(cpu_addr, "Invalid GPU address"); |
| 41 | 45 | ||
| 42 | // Cache management is a big overhead, so only cache entries with a given size. | 46 | // Cache management is a big overhead, so only cache entries with a given size. |
| 43 | // TODO: Figure out which size is the best for given games. | 47 | // TODO: Figure out which size is the best for given games. |
| 44 | cache &= size >= 2048; | 48 | cache &= size >= 2048; |
| 45 | 49 | ||
| 50 | const auto& host_ptr{Memory::GetPointer(*cpu_addr)}; | ||
| 46 | if (cache) { | 51 | if (cache) { |
| 47 | if (auto entry = TryGet(*cpu_addr); entry) { | 52 | auto entry = TryGet(host_ptr); |
| 48 | if (entry->size >= size && entry->alignment == alignment) { | 53 | if (entry) { |
| 49 | return entry->offset; | 54 | if (entry->GetSize() >= size && entry->GetAlignment() == alignment) { |
| 55 | return entry->GetOffset(); | ||
| 50 | } | 56 | } |
| 51 | Unregister(entry); | 57 | Unregister(entry); |
| 52 | } | 58 | } |
| @@ -55,17 +61,17 @@ u64 VKBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, u64 | |||
| 55 | AlignBuffer(alignment); | 61 | AlignBuffer(alignment); |
| 56 | const u64 uploaded_offset = buffer_offset; | 62 | const u64 uploaded_offset = buffer_offset; |
| 57 | 63 | ||
| 58 | Memory::ReadBlock(*cpu_addr, buffer_ptr, size); | 64 | if (!host_ptr) { |
| 65 | return uploaded_offset; | ||
| 66 | } | ||
| 59 | 67 | ||
| 68 | std::memcpy(buffer_ptr, host_ptr, size); | ||
| 60 | buffer_ptr += size; | 69 | buffer_ptr += size; |
| 61 | buffer_offset += size; | 70 | buffer_offset += size; |
| 62 | 71 | ||
| 63 | if (cache) { | 72 | if (cache) { |
| 64 | auto entry = std::make_shared<CachedBufferEntry>(); | 73 | auto entry = std::make_shared<CachedBufferEntry>(*cpu_addr, size, uploaded_offset, |
| 65 | entry->offset = uploaded_offset; | 74 | alignment, host_ptr); |
| 66 | entry->size = size; | ||
| 67 | entry->alignment = alignment; | ||
| 68 | entry->addr = *cpu_addr; | ||
| 69 | Register(entry); | 75 | Register(entry); |
| 70 | } | 76 | } |
| 71 | 77 | ||
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index d8e916f31..08b786aad 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h | |||
| @@ -24,22 +24,39 @@ class VKFence; | |||
| 24 | class VKMemoryManager; | 24 | class VKMemoryManager; |
| 25 | class VKStreamBuffer; | 25 | class VKStreamBuffer; |
| 26 | 26 | ||
| 27 | struct CachedBufferEntry final : public RasterizerCacheObject { | 27 | class CachedBufferEntry final : public RasterizerCacheObject { |
| 28 | VAddr GetAddr() const override { | 28 | public: |
| 29 | return addr; | 29 | explicit CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, std::size_t alignment, |
| 30 | u8* host_ptr); | ||
| 31 | |||
| 32 | VAddr GetCpuAddr() const override { | ||
| 33 | return cpu_addr; | ||
| 30 | } | 34 | } |
| 31 | 35 | ||
| 32 | std::size_t GetSizeInBytes() const override { | 36 | std::size_t GetSizeInBytes() const override { |
| 33 | return size; | 37 | return size; |
| 34 | } | 38 | } |
| 35 | 39 | ||
| 40 | std::size_t GetSize() const { | ||
| 41 | return size; | ||
| 42 | } | ||
| 43 | |||
| 44 | u64 GetOffset() const { | ||
| 45 | return offset; | ||
| 46 | } | ||
| 47 | |||
| 48 | std::size_t GetAlignment() const { | ||
| 49 | return alignment; | ||
| 50 | } | ||
| 51 | |||
| 36 | // We do not have to flush this cache as things in it are never modified by us. | 52 | // We do not have to flush this cache as things in it are never modified by us. |
| 37 | void Flush() override {} | 53 | void Flush() override {} |
| 38 | 54 | ||
| 39 | VAddr addr; | 55 | private: |
| 40 | std::size_t size; | 56 | VAddr cpu_addr{}; |
| 41 | u64 offset; | 57 | std::size_t size{}; |
| 42 | std::size_t alignment; | 58 | u64 offset{}; |
| 59 | std::size_t alignment{}; | ||
| 43 | }; | 60 | }; |
| 44 | 61 | ||
| 45 | class VKBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> { | 62 | class VKBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> { |
| @@ -51,8 +68,7 @@ public: | |||
| 51 | 68 | ||
| 52 | /// Uploads data from a guest GPU address. Returns host's buffer offset where it's been | 69 | /// Uploads data from a guest GPU address. Returns host's buffer offset where it's been |
| 53 | /// allocated. | 70 | /// allocated. |
| 54 | u64 UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, u64 alignment = 4, | 71 | u64 UploadMemory(GPUVAddr gpu_addr, std::size_t size, u64 alignment = 4, bool cache = true); |
| 55 | bool cache = true); | ||
| 56 | 72 | ||
| 57 | /// Uploads from a host memory. Returns host's buffer offset where it's been allocated. | 73 | /// Uploads from a host memory. Returns host's buffer offset where it's been allocated. |
| 58 | u64 UploadHostMemory(const u8* raw_pointer, std::size_t size, u64 alignment = 4); | 74 | u64 UploadHostMemory(const u8* raw_pointer, std::size_t size, u64 alignment = 4); |
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp new file mode 100644 index 000000000..ed3178f09 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp | |||
| @@ -0,0 +1,81 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <cstring> | ||
| 6 | #include <optional> | ||
| 7 | #include <unordered_map> | ||
| 8 | |||
| 9 | #include "common/assert.h" | ||
| 10 | #include "common/cityhash.h" | ||
| 11 | #include "video_core/renderer_vulkan/declarations.h" | ||
| 12 | #include "video_core/renderer_vulkan/maxwell_to_vk.h" | ||
| 13 | #include "video_core/renderer_vulkan/vk_sampler_cache.h" | ||
| 14 | #include "video_core/textures/texture.h" | ||
| 15 | |||
| 16 | namespace Vulkan { | ||
| 17 | |||
| 18 | static std::optional<vk::BorderColor> TryConvertBorderColor(std::array<float, 4> color) { | ||
| 19 | // TODO(Rodrigo): Manage integer border colors | ||
| 20 | if (color == std::array<float, 4>{0, 0, 0, 0}) { | ||
| 21 | return vk::BorderColor::eFloatTransparentBlack; | ||
| 22 | } else if (color == std::array<float, 4>{0, 0, 0, 1}) { | ||
| 23 | return vk::BorderColor::eFloatOpaqueBlack; | ||
| 24 | } else if (color == std::array<float, 4>{1, 1, 1, 1}) { | ||
| 25 | return vk::BorderColor::eFloatOpaqueWhite; | ||
| 26 | } else { | ||
| 27 | return {}; | ||
| 28 | } | ||
| 29 | } | ||
| 30 | |||
| 31 | std::size_t SamplerCacheKey::Hash() const { | ||
| 32 | static_assert(sizeof(raw) % sizeof(u64) == 0); | ||
| 33 | return static_cast<std::size_t>( | ||
| 34 | Common::CityHash64(reinterpret_cast<const char*>(raw.data()), sizeof(raw) / sizeof(u64))); | ||
| 35 | } | ||
| 36 | |||
| 37 | bool SamplerCacheKey::operator==(const SamplerCacheKey& rhs) const { | ||
| 38 | return raw == rhs.raw; | ||
| 39 | } | ||
| 40 | |||
| 41 | VKSamplerCache::VKSamplerCache(const VKDevice& device) : device{device} {} | ||
| 42 | |||
| 43 | VKSamplerCache::~VKSamplerCache() = default; | ||
| 44 | |||
| 45 | vk::Sampler VKSamplerCache::GetSampler(const Tegra::Texture::TSCEntry& tsc) { | ||
| 46 | const auto [entry, is_cache_miss] = cache.try_emplace(SamplerCacheKey{tsc}); | ||
| 47 | auto& sampler = entry->second; | ||
| 48 | if (is_cache_miss) { | ||
| 49 | sampler = CreateSampler(tsc); | ||
| 50 | } | ||
| 51 | return *sampler; | ||
| 52 | } | ||
| 53 | |||
| 54 | UniqueSampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) { | ||
| 55 | const float max_anisotropy = tsc.GetMaxAnisotropy(); | ||
| 56 | const bool has_anisotropy = max_anisotropy > 1.0f; | ||
| 57 | |||
| 58 | const auto border_color = tsc.GetBorderColor(); | ||
| 59 | const auto vk_border_color = TryConvertBorderColor(border_color); | ||
| 60 | UNIMPLEMENTED_IF_MSG(!vk_border_color, "Unimplemented border color {} {} {} {}", | ||
| 61 | border_color[0], border_color[1], border_color[2], border_color[3]); | ||
| 62 | |||
| 63 | constexpr bool unnormalized_coords = false; | ||
| 64 | |||
| 65 | const vk::SamplerCreateInfo sampler_ci( | ||
| 66 | {}, MaxwellToVK::Sampler::Filter(tsc.mag_filter), | ||
| 67 | MaxwellToVK::Sampler::Filter(tsc.min_filter), | ||
| 68 | MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter), | ||
| 69 | MaxwellToVK::Sampler::WrapMode(tsc.wrap_u), MaxwellToVK::Sampler::WrapMode(tsc.wrap_v), | ||
| 70 | MaxwellToVK::Sampler::WrapMode(tsc.wrap_p), tsc.GetLodBias(), has_anisotropy, | ||
| 71 | max_anisotropy, tsc.depth_compare_enabled, | ||
| 72 | MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func), tsc.GetMinLod(), | ||
| 73 | tsc.GetMaxLod(), vk_border_color.value_or(vk::BorderColor::eFloatTransparentBlack), | ||
| 74 | unnormalized_coords); | ||
| 75 | |||
| 76 | const auto& dld = device.GetDispatchLoader(); | ||
| 77 | const auto dev = device.GetLogical(); | ||
| 78 | return dev.createSamplerUnique(sampler_ci, nullptr, dld); | ||
| 79 | } | ||
| 80 | |||
| 81 | } // namespace Vulkan | ||
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.h b/src/video_core/renderer_vulkan/vk_sampler_cache.h new file mode 100644 index 000000000..c6394dc87 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.h | |||
| @@ -0,0 +1,56 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <unordered_map> | ||
| 8 | |||
| 9 | #include "common/common_types.h" | ||
| 10 | #include "video_core/renderer_vulkan/declarations.h" | ||
| 11 | #include "video_core/textures/texture.h" | ||
| 12 | |||
| 13 | namespace Vulkan { | ||
| 14 | |||
| 15 | class VKDevice; | ||
| 16 | |||
| 17 | struct SamplerCacheKey final : public Tegra::Texture::TSCEntry { | ||
| 18 | std::size_t Hash() const; | ||
| 19 | |||
| 20 | bool operator==(const SamplerCacheKey& rhs) const; | ||
| 21 | |||
| 22 | bool operator!=(const SamplerCacheKey& rhs) const { | ||
| 23 | return !operator==(rhs); | ||
| 24 | } | ||
| 25 | }; | ||
| 26 | |||
| 27 | } // namespace Vulkan | ||
| 28 | |||
| 29 | namespace std { | ||
| 30 | |||
| 31 | template <> | ||
| 32 | struct hash<Vulkan::SamplerCacheKey> { | ||
| 33 | std::size_t operator()(const Vulkan::SamplerCacheKey& k) const noexcept { | ||
| 34 | return k.Hash(); | ||
| 35 | } | ||
| 36 | }; | ||
| 37 | |||
| 38 | } // namespace std | ||
| 39 | |||
| 40 | namespace Vulkan { | ||
| 41 | |||
| 42 | class VKSamplerCache { | ||
| 43 | public: | ||
| 44 | explicit VKSamplerCache(const VKDevice& device); | ||
| 45 | ~VKSamplerCache(); | ||
| 46 | |||
| 47 | vk::Sampler GetSampler(const Tegra::Texture::TSCEntry& tsc); | ||
| 48 | |||
| 49 | private: | ||
| 50 | UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc); | ||
| 51 | |||
| 52 | const VKDevice& device; | ||
| 53 | std::unordered_map<SamplerCacheKey, UniqueSampler> cache; | ||
| 54 | }; | ||
| 55 | |||
| 56 | } // namespace Vulkan | ||
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp index 740ac3118..e4c438792 100644 --- a/src/video_core/shader/decode.cpp +++ b/src/video_core/shader/decode.cpp | |||
| @@ -165,6 +165,7 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) { | |||
| 165 | {OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2}, | 165 | {OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2}, |
| 166 | {OpCode::Type::Conversion, &ShaderIR::DecodeConversion}, | 166 | {OpCode::Type::Conversion, &ShaderIR::DecodeConversion}, |
| 167 | {OpCode::Type::Memory, &ShaderIR::DecodeMemory}, | 167 | {OpCode::Type::Memory, &ShaderIR::DecodeMemory}, |
| 168 | {OpCode::Type::Texture, &ShaderIR::DecodeTexture}, | ||
| 168 | {OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate}, | 169 | {OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate}, |
| 169 | {OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate}, | 170 | {OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate}, |
| 170 | {OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate}, | 171 | {OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate}, |
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index 38f01ca50..ea3c71eed 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp | |||
| @@ -17,24 +17,6 @@ using Tegra::Shader::Attribute; | |||
| 17 | using Tegra::Shader::Instruction; | 17 | using Tegra::Shader::Instruction; |
| 18 | using Tegra::Shader::OpCode; | 18 | using Tegra::Shader::OpCode; |
| 19 | using Tegra::Shader::Register; | 19 | using Tegra::Shader::Register; |
| 20 | using Tegra::Shader::TextureMiscMode; | ||
| 21 | using Tegra::Shader::TextureProcessMode; | ||
| 22 | using Tegra::Shader::TextureType; | ||
| 23 | |||
| 24 | static std::size_t GetCoordCount(TextureType texture_type) { | ||
| 25 | switch (texture_type) { | ||
| 26 | case TextureType::Texture1D: | ||
| 27 | return 1; | ||
| 28 | case TextureType::Texture2D: | ||
| 29 | return 2; | ||
| 30 | case TextureType::Texture3D: | ||
| 31 | case TextureType::TextureCube: | ||
| 32 | return 3; | ||
| 33 | default: | ||
| 34 | UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type)); | ||
| 35 | return 0; | ||
| 36 | } | ||
| 37 | } | ||
| 38 | 20 | ||
| 39 | u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { | 21 | u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { |
| 40 | const Instruction instr = {program_code[pc]}; | 22 | const Instruction instr = {program_code[pc]}; |
| @@ -247,194 +229,6 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { | |||
| 247 | } | 229 | } |
| 248 | break; | 230 | break; |
| 249 | } | 231 | } |
| 250 | case OpCode::Id::TEX: { | ||
| 251 | UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI), | ||
| 252 | "AOFFI is not implemented"); | ||
| 253 | |||
| 254 | if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 255 | LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete"); | ||
| 256 | } | ||
| 257 | |||
| 258 | const TextureType texture_type{instr.tex.texture_type}; | ||
| 259 | const bool is_array = instr.tex.array != 0; | ||
| 260 | const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC); | ||
| 261 | const auto process_mode = instr.tex.GetTextureProcessMode(); | ||
| 262 | WriteTexInstructionFloat( | ||
| 263 | bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array)); | ||
| 264 | break; | ||
| 265 | } | ||
| 266 | case OpCode::Id::TEXS: { | ||
| 267 | const TextureType texture_type{instr.texs.GetTextureType()}; | ||
| 268 | const bool is_array{instr.texs.IsArrayTexture()}; | ||
| 269 | const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC); | ||
| 270 | const auto process_mode = instr.texs.GetTextureProcessMode(); | ||
| 271 | |||
| 272 | if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 273 | LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete"); | ||
| 274 | } | ||
| 275 | |||
| 276 | const Node4 components = | ||
| 277 | GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array); | ||
| 278 | |||
| 279 | if (instr.texs.fp32_flag) { | ||
| 280 | WriteTexsInstructionFloat(bb, instr, components); | ||
| 281 | } else { | ||
| 282 | WriteTexsInstructionHalfFloat(bb, instr, components); | ||
| 283 | } | ||
| 284 | break; | ||
| 285 | } | ||
| 286 | case OpCode::Id::TLD4: { | ||
| 287 | ASSERT(instr.tld4.array == 0); | ||
| 288 | UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI), | ||
| 289 | "AOFFI is not implemented"); | ||
| 290 | UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV), | ||
| 291 | "NDV is not implemented"); | ||
| 292 | UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP), | ||
| 293 | "PTP is not implemented"); | ||
| 294 | |||
| 295 | if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 296 | LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete"); | ||
| 297 | } | ||
| 298 | |||
| 299 | const auto texture_type = instr.tld4.texture_type.Value(); | ||
| 300 | const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC); | ||
| 301 | const bool is_array = instr.tld4.array != 0; | ||
| 302 | WriteTexInstructionFloat(bb, instr, | ||
| 303 | GetTld4Code(instr, texture_type, depth_compare, is_array)); | ||
| 304 | break; | ||
| 305 | } | ||
| 306 | case OpCode::Id::TLD4S: { | ||
| 307 | UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI), | ||
| 308 | "AOFFI is not implemented"); | ||
| 309 | if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 310 | LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete"); | ||
| 311 | } | ||
| 312 | |||
| 313 | const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); | ||
| 314 | const Node op_a = GetRegister(instr.gpr8); | ||
| 315 | const Node op_b = GetRegister(instr.gpr20); | ||
| 316 | |||
| 317 | // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. | ||
| 318 | std::vector<Node> coords; | ||
| 319 | if (depth_compare) { | ||
| 320 | // Note: TLD4S coordinate encoding works just like TEXS's | ||
| 321 | const Node op_y = GetRegister(instr.gpr8.Value() + 1); | ||
| 322 | coords.push_back(op_a); | ||
| 323 | coords.push_back(op_y); | ||
| 324 | coords.push_back(op_b); | ||
| 325 | } else { | ||
| 326 | coords.push_back(op_a); | ||
| 327 | coords.push_back(op_b); | ||
| 328 | } | ||
| 329 | std::vector<Node> extras; | ||
| 330 | extras.push_back(Immediate(static_cast<u32>(instr.tld4s.component))); | ||
| 331 | |||
| 332 | const auto& sampler = | ||
| 333 | GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare); | ||
| 334 | |||
| 335 | Node4 values; | ||
| 336 | for (u32 element = 0; element < values.size(); ++element) { | ||
| 337 | auto coords_copy = coords; | ||
| 338 | MetaTexture meta{sampler, {}, {}, extras, element}; | ||
| 339 | values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); | ||
| 340 | } | ||
| 341 | |||
| 342 | WriteTexsInstructionFloat(bb, instr, values); | ||
| 343 | break; | ||
| 344 | } | ||
| 345 | case OpCode::Id::TXQ: { | ||
| 346 | if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 347 | LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete"); | ||
| 348 | } | ||
| 349 | |||
| 350 | // TODO: The new commits on the texture refactor, change the way samplers work. | ||
| 351 | // Sadly, not all texture instructions specify the type of texture their sampler | ||
| 352 | // uses. This must be fixed at a later instance. | ||
| 353 | const auto& sampler = | ||
| 354 | GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); | ||
| 355 | |||
| 356 | u32 indexer = 0; | ||
| 357 | switch (instr.txq.query_type) { | ||
| 358 | case Tegra::Shader::TextureQueryType::Dimension: { | ||
| 359 | for (u32 element = 0; element < 4; ++element) { | ||
| 360 | if (!instr.txq.IsComponentEnabled(element)) { | ||
| 361 | continue; | ||
| 362 | } | ||
| 363 | MetaTexture meta{sampler, {}, {}, {}, element}; | ||
| 364 | const Node value = | ||
| 365 | Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8)); | ||
| 366 | SetTemporal(bb, indexer++, value); | ||
| 367 | } | ||
| 368 | for (u32 i = 0; i < indexer; ++i) { | ||
| 369 | SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); | ||
| 370 | } | ||
| 371 | break; | ||
| 372 | } | ||
| 373 | default: | ||
| 374 | UNIMPLEMENTED_MSG("Unhandled texture query type: {}", | ||
| 375 | static_cast<u32>(instr.txq.query_type.Value())); | ||
| 376 | } | ||
| 377 | break; | ||
| 378 | } | ||
| 379 | case OpCode::Id::TMML: { | ||
| 380 | UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), | ||
| 381 | "NDV is not implemented"); | ||
| 382 | |||
| 383 | if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 384 | LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); | ||
| 385 | } | ||
| 386 | |||
| 387 | auto texture_type = instr.tmml.texture_type.Value(); | ||
| 388 | const bool is_array = instr.tmml.array != 0; | ||
| 389 | const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); | ||
| 390 | |||
| 391 | std::vector<Node> coords; | ||
| 392 | |||
| 393 | // TODO: Add coordinates for different samplers once other texture types are implemented. | ||
| 394 | switch (texture_type) { | ||
| 395 | case TextureType::Texture1D: | ||
| 396 | coords.push_back(GetRegister(instr.gpr8)); | ||
| 397 | break; | ||
| 398 | case TextureType::Texture2D: | ||
| 399 | coords.push_back(GetRegister(instr.gpr8.Value() + 0)); | ||
| 400 | coords.push_back(GetRegister(instr.gpr8.Value() + 1)); | ||
| 401 | break; | ||
| 402 | default: | ||
| 403 | UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); | ||
| 404 | |||
| 405 | // Fallback to interpreting as a 2D texture for now | ||
| 406 | coords.push_back(GetRegister(instr.gpr8.Value() + 0)); | ||
| 407 | coords.push_back(GetRegister(instr.gpr8.Value() + 1)); | ||
| 408 | texture_type = TextureType::Texture2D; | ||
| 409 | } | ||
| 410 | |||
| 411 | for (u32 element = 0; element < 2; ++element) { | ||
| 412 | auto params = coords; | ||
| 413 | MetaTexture meta{sampler, {}, {}, {}, element}; | ||
| 414 | const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params)); | ||
| 415 | SetTemporal(bb, element, value); | ||
| 416 | } | ||
| 417 | for (u32 element = 0; element < 2; ++element) { | ||
| 418 | SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element)); | ||
| 419 | } | ||
| 420 | |||
| 421 | break; | ||
| 422 | } | ||
| 423 | case OpCode::Id::TLDS: { | ||
| 424 | const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()}; | ||
| 425 | const bool is_array{instr.tlds.IsArrayTexture()}; | ||
| 426 | |||
| 427 | UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI), | ||
| 428 | "AOFFI is not implemented"); | ||
| 429 | UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented"); | ||
| 430 | |||
| 431 | if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 432 | LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete"); | ||
| 433 | } | ||
| 434 | |||
| 435 | WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array)); | ||
| 436 | break; | ||
| 437 | } | ||
| 438 | default: | 232 | default: |
| 439 | UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); | 233 | UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); |
| 440 | } | 234 | } |
| @@ -442,291 +236,4 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { | |||
| 442 | return pc; | 236 | return pc; |
| 443 | } | 237 | } |
| 444 | 238 | ||
| 445 | const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type, | ||
| 446 | bool is_array, bool is_shadow) { | ||
| 447 | const auto offset = static_cast<std::size_t>(sampler.index.Value()); | ||
| 448 | |||
| 449 | // If this sampler has already been used, return the existing mapping. | ||
| 450 | const auto itr = | ||
| 451 | std::find_if(used_samplers.begin(), used_samplers.end(), | ||
| 452 | [&](const Sampler& entry) { return entry.GetOffset() == offset; }); | ||
| 453 | if (itr != used_samplers.end()) { | ||
| 454 | ASSERT(itr->GetType() == type && itr->IsArray() == is_array && | ||
| 455 | itr->IsShadow() == is_shadow); | ||
| 456 | return *itr; | ||
| 457 | } | ||
| 458 | |||
| 459 | // Otherwise create a new mapping for this sampler | ||
| 460 | const std::size_t next_index = used_samplers.size(); | ||
| 461 | const Sampler entry{offset, next_index, type, is_array, is_shadow}; | ||
| 462 | return *used_samplers.emplace(entry).first; | ||
| 463 | } | ||
| 464 | |||
| 465 | void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) { | ||
| 466 | u32 dest_elem = 0; | ||
| 467 | for (u32 elem = 0; elem < 4; ++elem) { | ||
| 468 | if (!instr.tex.IsComponentEnabled(elem)) { | ||
| 469 | // Skip disabled components | ||
| 470 | continue; | ||
| 471 | } | ||
| 472 | SetTemporal(bb, dest_elem++, components[elem]); | ||
| 473 | } | ||
| 474 | // After writing values in temporals, move them to the real registers | ||
| 475 | for (u32 i = 0; i < dest_elem; ++i) { | ||
| 476 | SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); | ||
| 477 | } | ||
| 478 | } | ||
| 479 | |||
| 480 | void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr, | ||
| 481 | const Node4& components) { | ||
| 482 | // TEXS has two destination registers and a swizzle. The first two elements in the swizzle | ||
| 483 | // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 | ||
| 484 | |||
| 485 | u32 dest_elem = 0; | ||
| 486 | for (u32 component = 0; component < 4; ++component) { | ||
| 487 | if (!instr.texs.IsComponentEnabled(component)) | ||
| 488 | continue; | ||
| 489 | SetTemporal(bb, dest_elem++, components[component]); | ||
| 490 | } | ||
| 491 | |||
| 492 | for (u32 i = 0; i < dest_elem; ++i) { | ||
| 493 | if (i < 2) { | ||
| 494 | // Write the first two swizzle components to gpr0 and gpr0+1 | ||
| 495 | SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i)); | ||
| 496 | } else { | ||
| 497 | ASSERT(instr.texs.HasTwoDestinations()); | ||
| 498 | // Write the rest of the swizzle components to gpr28 and gpr28+1 | ||
| 499 | SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i)); | ||
| 500 | } | ||
| 501 | } | ||
| 502 | } | ||
| 503 | |||
| 504 | void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr, | ||
| 505 | const Node4& components) { | ||
| 506 | // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half | ||
| 507 | // float instruction). | ||
| 508 | |||
| 509 | Node4 values; | ||
| 510 | u32 dest_elem = 0; | ||
| 511 | for (u32 component = 0; component < 4; ++component) { | ||
| 512 | if (!instr.texs.IsComponentEnabled(component)) | ||
| 513 | continue; | ||
| 514 | values[dest_elem++] = components[component]; | ||
| 515 | } | ||
| 516 | if (dest_elem == 0) | ||
| 517 | return; | ||
| 518 | |||
| 519 | std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); }); | ||
| 520 | |||
| 521 | const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]); | ||
| 522 | if (dest_elem <= 2) { | ||
| 523 | SetRegister(bb, instr.gpr0, first_value); | ||
| 524 | return; | ||
| 525 | } | ||
| 526 | |||
| 527 | SetTemporal(bb, 0, first_value); | ||
| 528 | SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3])); | ||
| 529 | |||
| 530 | SetRegister(bb, instr.gpr0, GetTemporal(0)); | ||
| 531 | SetRegister(bb, instr.gpr28, GetTemporal(1)); | ||
| 532 | } | ||
| 533 | |||
| 534 | Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, | ||
| 535 | TextureProcessMode process_mode, std::vector<Node> coords, | ||
| 536 | Node array, Node depth_compare, u32 bias_offset) { | ||
| 537 | const bool is_array = array; | ||
| 538 | const bool is_shadow = depth_compare; | ||
| 539 | |||
| 540 | UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) || | ||
| 541 | (texture_type == TextureType::TextureCube && is_array && is_shadow), | ||
| 542 | "This method is not supported."); | ||
| 543 | |||
| 544 | const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, is_shadow); | ||
| 545 | |||
| 546 | const bool lod_needed = process_mode == TextureProcessMode::LZ || | ||
| 547 | process_mode == TextureProcessMode::LL || | ||
| 548 | process_mode == TextureProcessMode::LLA; | ||
| 549 | |||
| 550 | // LOD selection (either via bias or explicit textureLod) not supported in GL for | ||
| 551 | // sampler2DArrayShadow and samplerCubeArrayShadow. | ||
| 552 | const bool gl_lod_supported = | ||
| 553 | !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) || | ||
| 554 | (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow)); | ||
| 555 | |||
| 556 | const OperationCode read_method = | ||
| 557 | lod_needed && gl_lod_supported ? OperationCode::TextureLod : OperationCode::Texture; | ||
| 558 | |||
| 559 | UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported); | ||
| 560 | |||
| 561 | std::vector<Node> extras; | ||
| 562 | if (process_mode != TextureProcessMode::None && gl_lod_supported) { | ||
| 563 | if (process_mode == TextureProcessMode::LZ) { | ||
| 564 | extras.push_back(Immediate(0.0f)); | ||
| 565 | } else { | ||
| 566 | // If present, lod or bias are always stored in the register indexed by the gpr20 | ||
| 567 | // field with an offset depending on the usage of the other registers | ||
| 568 | extras.push_back(GetRegister(instr.gpr20.Value() + bias_offset)); | ||
| 569 | } | ||
| 570 | } | ||
| 571 | |||
| 572 | Node4 values; | ||
| 573 | for (u32 element = 0; element < values.size(); ++element) { | ||
| 574 | auto copy_coords = coords; | ||
| 575 | MetaTexture meta{sampler, array, depth_compare, extras, element}; | ||
| 576 | values[element] = Operation(read_method, meta, std::move(copy_coords)); | ||
| 577 | } | ||
| 578 | |||
| 579 | return values; | ||
| 580 | } | ||
| 581 | |||
| 582 | Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type, | ||
| 583 | TextureProcessMode process_mode, bool depth_compare, bool is_array) { | ||
| 584 | const bool lod_bias_enabled = | ||
| 585 | (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); | ||
| 586 | |||
| 587 | const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( | ||
| 588 | texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); | ||
| 589 | // If enabled arrays index is always stored in the gpr8 field | ||
| 590 | const u64 array_register = instr.gpr8.Value(); | ||
| 591 | // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used | ||
| 592 | const u64 coord_register = array_register + (is_array ? 1 : 0); | ||
| 593 | |||
| 594 | std::vector<Node> coords; | ||
| 595 | for (std::size_t i = 0; i < coord_count; ++i) { | ||
| 596 | coords.push_back(GetRegister(coord_register + i)); | ||
| 597 | } | ||
| 598 | // 1D.DC in OpenGL the 2nd component is ignored. | ||
| 599 | if (depth_compare && !is_array && texture_type == TextureType::Texture1D) { | ||
| 600 | coords.push_back(Immediate(0.0f)); | ||
| 601 | } | ||
| 602 | |||
| 603 | const Node array = is_array ? GetRegister(array_register) : nullptr; | ||
| 604 | |||
| 605 | Node dc{}; | ||
| 606 | if (depth_compare) { | ||
| 607 | // Depth is always stored in the register signaled by gpr20 or in the next register if lod | ||
| 608 | // or bias are used | ||
| 609 | const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); | ||
| 610 | dc = GetRegister(depth_register); | ||
| 611 | } | ||
| 612 | |||
| 613 | return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, 0); | ||
| 614 | } | ||
| 615 | |||
| 616 | Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type, | ||
| 617 | TextureProcessMode process_mode, bool depth_compare, bool is_array) { | ||
| 618 | const bool lod_bias_enabled = | ||
| 619 | (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); | ||
| 620 | |||
| 621 | const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( | ||
| 622 | texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); | ||
| 623 | // If enabled arrays index is always stored in the gpr8 field | ||
| 624 | const u64 array_register = instr.gpr8.Value(); | ||
| 625 | // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used | ||
| 626 | const u64 coord_register = array_register + (is_array ? 1 : 0); | ||
| 627 | const u64 last_coord_register = | ||
| 628 | (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2)) | ||
| 629 | ? static_cast<u64>(instr.gpr20.Value()) | ||
| 630 | : coord_register + 1; | ||
| 631 | const u32 bias_offset = coord_count > 2 ? 1 : 0; | ||
| 632 | |||
| 633 | std::vector<Node> coords; | ||
| 634 | for (std::size_t i = 0; i < coord_count; ++i) { | ||
| 635 | const bool last = (i == (coord_count - 1)) && (coord_count > 1); | ||
| 636 | coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); | ||
| 637 | } | ||
| 638 | |||
| 639 | const Node array = is_array ? GetRegister(array_register) : nullptr; | ||
| 640 | |||
| 641 | Node dc{}; | ||
| 642 | if (depth_compare) { | ||
| 643 | // Depth is always stored in the register signaled by gpr20 or in the next register if lod | ||
| 644 | // or bias are used | ||
| 645 | const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); | ||
| 646 | dc = GetRegister(depth_register); | ||
| 647 | } | ||
| 648 | |||
| 649 | return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset); | ||
| 650 | } | ||
| 651 | |||
| 652 | Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare, | ||
| 653 | bool is_array) { | ||
| 654 | const std::size_t coord_count = GetCoordCount(texture_type); | ||
| 655 | const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0); | ||
| 656 | const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0); | ||
| 657 | |||
| 658 | // If enabled arrays index is always stored in the gpr8 field | ||
| 659 | const u64 array_register = instr.gpr8.Value(); | ||
| 660 | // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used | ||
| 661 | const u64 coord_register = array_register + (is_array ? 1 : 0); | ||
| 662 | |||
| 663 | std::vector<Node> coords; | ||
| 664 | for (size_t i = 0; i < coord_count; ++i) | ||
| 665 | coords.push_back(GetRegister(coord_register + i)); | ||
| 666 | |||
| 667 | const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); | ||
| 668 | |||
| 669 | Node4 values; | ||
| 670 | for (u32 element = 0; element < values.size(); ++element) { | ||
| 671 | auto coords_copy = coords; | ||
| 672 | MetaTexture meta{sampler, GetRegister(array_register), {}, {}, element}; | ||
| 673 | values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); | ||
| 674 | } | ||
| 675 | |||
| 676 | return values; | ||
| 677 | } | ||
| 678 | |||
| 679 | Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { | ||
| 680 | const std::size_t type_coord_count = GetCoordCount(texture_type); | ||
| 681 | const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; | ||
| 682 | |||
| 683 | // If enabled arrays index is always stored in the gpr8 field | ||
| 684 | const u64 array_register = instr.gpr8.Value(); | ||
| 685 | // if is array gpr20 is used | ||
| 686 | const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value(); | ||
| 687 | |||
| 688 | const u64 last_coord_register = | ||
| 689 | ((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array | ||
| 690 | ? static_cast<u64>(instr.gpr20.Value()) | ||
| 691 | : coord_register + 1; | ||
| 692 | |||
| 693 | std::vector<Node> coords; | ||
| 694 | for (std::size_t i = 0; i < type_coord_count; ++i) { | ||
| 695 | const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1); | ||
| 696 | coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); | ||
| 697 | } | ||
| 698 | |||
| 699 | const Node array = is_array ? GetRegister(array_register) : nullptr; | ||
| 700 | // When lod is used always is in gpr20 | ||
| 701 | const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0); | ||
| 702 | |||
| 703 | const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); | ||
| 704 | |||
| 705 | Node4 values; | ||
| 706 | for (u32 element = 0; element < values.size(); ++element) { | ||
| 707 | auto coords_copy = coords; | ||
| 708 | MetaTexture meta{sampler, array, {}, {lod}, element}; | ||
| 709 | values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); | ||
| 710 | } | ||
| 711 | return values; | ||
| 712 | } | ||
| 713 | |||
| 714 | std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement( | ||
| 715 | TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled, | ||
| 716 | std::size_t max_coords, std::size_t max_inputs) { | ||
| 717 | const std::size_t coord_count = GetCoordCount(texture_type); | ||
| 718 | |||
| 719 | std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0); | ||
| 720 | const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0); | ||
| 721 | if (total_coord_count > max_coords || total_reg_count > max_inputs) { | ||
| 722 | UNIMPLEMENTED_MSG("Unsupported Texture operation"); | ||
| 723 | total_coord_count = std::min(total_coord_count, max_coords); | ||
| 724 | } | ||
| 725 | // 1D.DC OpenGL is using a vec3 but 2nd component is ignored later. | ||
| 726 | total_coord_count += | ||
| 727 | (depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0; | ||
| 728 | |||
| 729 | return {coord_count, total_coord_count}; | ||
| 730 | } | ||
| 731 | |||
| 732 | } // namespace VideoCommon::Shader | 239 | } // namespace VideoCommon::Shader |
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp new file mode 100644 index 000000000..a99ae19bf --- /dev/null +++ b/src/video_core/shader/decode/texture.cpp | |||
| @@ -0,0 +1,534 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | #include <vector> | ||
| 7 | #include <fmt/format.h> | ||
| 8 | |||
| 9 | #include "common/assert.h" | ||
| 10 | #include "common/common_types.h" | ||
| 11 | #include "video_core/engines/shader_bytecode.h" | ||
| 12 | #include "video_core/shader/shader_ir.h" | ||
| 13 | |||
| 14 | namespace VideoCommon::Shader { | ||
| 15 | |||
| 16 | using Tegra::Shader::Instruction; | ||
| 17 | using Tegra::Shader::OpCode; | ||
| 18 | using Tegra::Shader::Register; | ||
| 19 | using Tegra::Shader::TextureMiscMode; | ||
| 20 | using Tegra::Shader::TextureProcessMode; | ||
| 21 | using Tegra::Shader::TextureType; | ||
| 22 | |||
| 23 | static std::size_t GetCoordCount(TextureType texture_type) { | ||
| 24 | switch (texture_type) { | ||
| 25 | case TextureType::Texture1D: | ||
| 26 | return 1; | ||
| 27 | case TextureType::Texture2D: | ||
| 28 | return 2; | ||
| 29 | case TextureType::Texture3D: | ||
| 30 | case TextureType::TextureCube: | ||
| 31 | return 3; | ||
| 32 | default: | ||
| 33 | UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type)); | ||
| 34 | return 0; | ||
| 35 | } | ||
| 36 | } | ||
| 37 | |||
| 38 | u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { | ||
| 39 | const Instruction instr = {program_code[pc]}; | ||
| 40 | const auto opcode = OpCode::Decode(instr); | ||
| 41 | |||
| 42 | switch (opcode->get().GetId()) { | ||
| 43 | case OpCode::Id::TEX: { | ||
| 44 | UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI), | ||
| 45 | "AOFFI is not implemented"); | ||
| 46 | |||
| 47 | if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 48 | LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete"); | ||
| 49 | } | ||
| 50 | |||
| 51 | const TextureType texture_type{instr.tex.texture_type}; | ||
| 52 | const bool is_array = instr.tex.array != 0; | ||
| 53 | const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC); | ||
| 54 | const auto process_mode = instr.tex.GetTextureProcessMode(); | ||
| 55 | WriteTexInstructionFloat( | ||
| 56 | bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array)); | ||
| 57 | break; | ||
| 58 | } | ||
| 59 | case OpCode::Id::TEXS: { | ||
| 60 | const TextureType texture_type{instr.texs.GetTextureType()}; | ||
| 61 | const bool is_array{instr.texs.IsArrayTexture()}; | ||
| 62 | const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC); | ||
| 63 | const auto process_mode = instr.texs.GetTextureProcessMode(); | ||
| 64 | |||
| 65 | if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 66 | LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete"); | ||
| 67 | } | ||
| 68 | |||
| 69 | const Node4 components = | ||
| 70 | GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array); | ||
| 71 | |||
| 72 | if (instr.texs.fp32_flag) { | ||
| 73 | WriteTexsInstructionFloat(bb, instr, components); | ||
| 74 | } else { | ||
| 75 | WriteTexsInstructionHalfFloat(bb, instr, components); | ||
| 76 | } | ||
| 77 | break; | ||
| 78 | } | ||
| 79 | case OpCode::Id::TLD4: { | ||
| 80 | ASSERT(instr.tld4.array == 0); | ||
| 81 | UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI), | ||
| 82 | "AOFFI is not implemented"); | ||
| 83 | UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV), | ||
| 84 | "NDV is not implemented"); | ||
| 85 | UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP), | ||
| 86 | "PTP is not implemented"); | ||
| 87 | |||
| 88 | if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 89 | LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete"); | ||
| 90 | } | ||
| 91 | |||
| 92 | const auto texture_type = instr.tld4.texture_type.Value(); | ||
| 93 | const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC); | ||
| 94 | const bool is_array = instr.tld4.array != 0; | ||
| 95 | WriteTexInstructionFloat(bb, instr, | ||
| 96 | GetTld4Code(instr, texture_type, depth_compare, is_array)); | ||
| 97 | break; | ||
| 98 | } | ||
| 99 | case OpCode::Id::TLD4S: { | ||
| 100 | UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI), | ||
| 101 | "AOFFI is not implemented"); | ||
| 102 | if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 103 | LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete"); | ||
| 104 | } | ||
| 105 | |||
| 106 | const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); | ||
| 107 | const Node op_a = GetRegister(instr.gpr8); | ||
| 108 | const Node op_b = GetRegister(instr.gpr20); | ||
| 109 | |||
| 110 | // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. | ||
| 111 | std::vector<Node> coords; | ||
| 112 | if (depth_compare) { | ||
| 113 | // Note: TLD4S coordinate encoding works just like TEXS's | ||
| 114 | const Node op_y = GetRegister(instr.gpr8.Value() + 1); | ||
| 115 | coords.push_back(op_a); | ||
| 116 | coords.push_back(op_y); | ||
| 117 | coords.push_back(op_b); | ||
| 118 | } else { | ||
| 119 | coords.push_back(op_a); | ||
| 120 | coords.push_back(op_b); | ||
| 121 | } | ||
| 122 | const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); | ||
| 123 | |||
| 124 | const auto& sampler = | ||
| 125 | GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare); | ||
| 126 | |||
| 127 | Node4 values; | ||
| 128 | for (u32 element = 0; element < values.size(); ++element) { | ||
| 129 | auto coords_copy = coords; | ||
| 130 | MetaTexture meta{sampler, {}, {}, {}, {}, component, element}; | ||
| 131 | values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); | ||
| 132 | } | ||
| 133 | |||
| 134 | WriteTexsInstructionFloat(bb, instr, values); | ||
| 135 | break; | ||
| 136 | } | ||
| 137 | case OpCode::Id::TXQ: { | ||
| 138 | if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 139 | LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete"); | ||
| 140 | } | ||
| 141 | |||
| 142 | // TODO: The new commits on the texture refactor, change the way samplers work. | ||
| 143 | // Sadly, not all texture instructions specify the type of texture their sampler | ||
| 144 | // uses. This must be fixed at a later instance. | ||
| 145 | const auto& sampler = | ||
| 146 | GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); | ||
| 147 | |||
| 148 | u32 indexer = 0; | ||
| 149 | switch (instr.txq.query_type) { | ||
| 150 | case Tegra::Shader::TextureQueryType::Dimension: { | ||
| 151 | for (u32 element = 0; element < 4; ++element) { | ||
| 152 | if (!instr.txq.IsComponentEnabled(element)) { | ||
| 153 | continue; | ||
| 154 | } | ||
| 155 | MetaTexture meta{sampler, {}, {}, {}, {}, {}, element}; | ||
| 156 | const Node value = | ||
| 157 | Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8)); | ||
| 158 | SetTemporal(bb, indexer++, value); | ||
| 159 | } | ||
| 160 | for (u32 i = 0; i < indexer; ++i) { | ||
| 161 | SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); | ||
| 162 | } | ||
| 163 | break; | ||
| 164 | } | ||
| 165 | default: | ||
| 166 | UNIMPLEMENTED_MSG("Unhandled texture query type: {}", | ||
| 167 | static_cast<u32>(instr.txq.query_type.Value())); | ||
| 168 | } | ||
| 169 | break; | ||
| 170 | } | ||
| 171 | case OpCode::Id::TMML: { | ||
| 172 | UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), | ||
| 173 | "NDV is not implemented"); | ||
| 174 | |||
| 175 | if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 176 | LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); | ||
| 177 | } | ||
| 178 | |||
| 179 | auto texture_type = instr.tmml.texture_type.Value(); | ||
| 180 | const bool is_array = instr.tmml.array != 0; | ||
| 181 | const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); | ||
| 182 | |||
| 183 | std::vector<Node> coords; | ||
| 184 | |||
| 185 | // TODO: Add coordinates for different samplers once other texture types are implemented. | ||
| 186 | switch (texture_type) { | ||
| 187 | case TextureType::Texture1D: | ||
| 188 | coords.push_back(GetRegister(instr.gpr8)); | ||
| 189 | break; | ||
| 190 | case TextureType::Texture2D: | ||
| 191 | coords.push_back(GetRegister(instr.gpr8.Value() + 0)); | ||
| 192 | coords.push_back(GetRegister(instr.gpr8.Value() + 1)); | ||
| 193 | break; | ||
| 194 | default: | ||
| 195 | UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); | ||
| 196 | |||
| 197 | // Fallback to interpreting as a 2D texture for now | ||
| 198 | coords.push_back(GetRegister(instr.gpr8.Value() + 0)); | ||
| 199 | coords.push_back(GetRegister(instr.gpr8.Value() + 1)); | ||
| 200 | texture_type = TextureType::Texture2D; | ||
| 201 | } | ||
| 202 | |||
| 203 | for (u32 element = 0; element < 2; ++element) { | ||
| 204 | auto params = coords; | ||
| 205 | MetaTexture meta{sampler, {}, {}, {}, {}, {}, element}; | ||
| 206 | const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params)); | ||
| 207 | SetTemporal(bb, element, value); | ||
| 208 | } | ||
| 209 | for (u32 element = 0; element < 2; ++element) { | ||
| 210 | SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element)); | ||
| 211 | } | ||
| 212 | |||
| 213 | break; | ||
| 214 | } | ||
| 215 | case OpCode::Id::TLDS: { | ||
| 216 | const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()}; | ||
| 217 | const bool is_array{instr.tlds.IsArrayTexture()}; | ||
| 218 | |||
| 219 | UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI), | ||
| 220 | "AOFFI is not implemented"); | ||
| 221 | UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented"); | ||
| 222 | |||
| 223 | if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) { | ||
| 224 | LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete"); | ||
| 225 | } | ||
| 226 | |||
| 227 | WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array)); | ||
| 228 | break; | ||
| 229 | } | ||
| 230 | default: | ||
| 231 | UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); | ||
| 232 | } | ||
| 233 | |||
| 234 | return pc; | ||
| 235 | } | ||
| 236 | |||
| 237 | const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type, | ||
| 238 | bool is_array, bool is_shadow) { | ||
| 239 | const auto offset = static_cast<std::size_t>(sampler.index.Value()); | ||
| 240 | |||
| 241 | // If this sampler has already been used, return the existing mapping. | ||
| 242 | const auto itr = | ||
| 243 | std::find_if(used_samplers.begin(), used_samplers.end(), | ||
| 244 | [&](const Sampler& entry) { return entry.GetOffset() == offset; }); | ||
| 245 | if (itr != used_samplers.end()) { | ||
| 246 | ASSERT(itr->GetType() == type && itr->IsArray() == is_array && | ||
| 247 | itr->IsShadow() == is_shadow); | ||
| 248 | return *itr; | ||
| 249 | } | ||
| 250 | |||
| 251 | // Otherwise create a new mapping for this sampler | ||
| 252 | const std::size_t next_index = used_samplers.size(); | ||
| 253 | const Sampler entry{offset, next_index, type, is_array, is_shadow}; | ||
| 254 | return *used_samplers.emplace(entry).first; | ||
| 255 | } | ||
| 256 | |||
| 257 | void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) { | ||
| 258 | u32 dest_elem = 0; | ||
| 259 | for (u32 elem = 0; elem < 4; ++elem) { | ||
| 260 | if (!instr.tex.IsComponentEnabled(elem)) { | ||
| 261 | // Skip disabled components | ||
| 262 | continue; | ||
| 263 | } | ||
| 264 | SetTemporal(bb, dest_elem++, components[elem]); | ||
| 265 | } | ||
| 266 | // After writing values in temporals, move them to the real registers | ||
| 267 | for (u32 i = 0; i < dest_elem; ++i) { | ||
| 268 | SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); | ||
| 269 | } | ||
| 270 | } | ||
| 271 | |||
| 272 | void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr, | ||
| 273 | const Node4& components) { | ||
| 274 | // TEXS has two destination registers and a swizzle. The first two elements in the swizzle | ||
| 275 | // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 | ||
| 276 | |||
| 277 | u32 dest_elem = 0; | ||
| 278 | for (u32 component = 0; component < 4; ++component) { | ||
| 279 | if (!instr.texs.IsComponentEnabled(component)) | ||
| 280 | continue; | ||
| 281 | SetTemporal(bb, dest_elem++, components[component]); | ||
| 282 | } | ||
| 283 | |||
| 284 | for (u32 i = 0; i < dest_elem; ++i) { | ||
| 285 | if (i < 2) { | ||
| 286 | // Write the first two swizzle components to gpr0 and gpr0+1 | ||
| 287 | SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i)); | ||
| 288 | } else { | ||
| 289 | ASSERT(instr.texs.HasTwoDestinations()); | ||
| 290 | // Write the rest of the swizzle components to gpr28 and gpr28+1 | ||
| 291 | SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i)); | ||
| 292 | } | ||
| 293 | } | ||
| 294 | } | ||
| 295 | |||
| 296 | void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr, | ||
| 297 | const Node4& components) { | ||
| 298 | // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half | ||
| 299 | // float instruction). | ||
| 300 | |||
| 301 | Node4 values; | ||
| 302 | u32 dest_elem = 0; | ||
| 303 | for (u32 component = 0; component < 4; ++component) { | ||
| 304 | if (!instr.texs.IsComponentEnabled(component)) | ||
| 305 | continue; | ||
| 306 | values[dest_elem++] = components[component]; | ||
| 307 | } | ||
| 308 | if (dest_elem == 0) | ||
| 309 | return; | ||
| 310 | |||
| 311 | std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); }); | ||
| 312 | |||
| 313 | const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]); | ||
| 314 | if (dest_elem <= 2) { | ||
| 315 | SetRegister(bb, instr.gpr0, first_value); | ||
| 316 | return; | ||
| 317 | } | ||
| 318 | |||
| 319 | SetTemporal(bb, 0, first_value); | ||
| 320 | SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3])); | ||
| 321 | |||
| 322 | SetRegister(bb, instr.gpr0, GetTemporal(0)); | ||
| 323 | SetRegister(bb, instr.gpr28, GetTemporal(1)); | ||
| 324 | } | ||
| 325 | |||
| 326 | Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, | ||
| 327 | TextureProcessMode process_mode, std::vector<Node> coords, | ||
| 328 | Node array, Node depth_compare, u32 bias_offset) { | ||
| 329 | const bool is_array = array; | ||
| 330 | const bool is_shadow = depth_compare; | ||
| 331 | |||
| 332 | UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) || | ||
| 333 | (texture_type == TextureType::TextureCube && is_array && is_shadow), | ||
| 334 | "This method is not supported."); | ||
| 335 | |||
| 336 | const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, is_shadow); | ||
| 337 | |||
| 338 | const bool lod_needed = process_mode == TextureProcessMode::LZ || | ||
| 339 | process_mode == TextureProcessMode::LL || | ||
| 340 | process_mode == TextureProcessMode::LLA; | ||
| 341 | |||
| 342 | // LOD selection (either via bias or explicit textureLod) not supported in GL for | ||
| 343 | // sampler2DArrayShadow and samplerCubeArrayShadow. | ||
| 344 | const bool gl_lod_supported = | ||
| 345 | !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) || | ||
| 346 | (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow)); | ||
| 347 | |||
| 348 | const OperationCode read_method = | ||
| 349 | (lod_needed && gl_lod_supported) ? OperationCode::TextureLod : OperationCode::Texture; | ||
| 350 | |||
| 351 | UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported); | ||
| 352 | |||
| 353 | Node bias = {}; | ||
| 354 | Node lod = {}; | ||
| 355 | if (process_mode != TextureProcessMode::None && gl_lod_supported) { | ||
| 356 | switch (process_mode) { | ||
| 357 | case TextureProcessMode::LZ: | ||
| 358 | lod = Immediate(0.0f); | ||
| 359 | break; | ||
| 360 | case TextureProcessMode::LB: | ||
| 361 | // If present, lod or bias are always stored in the register indexed by the gpr20 | ||
| 362 | // field with an offset depending on the usage of the other registers | ||
| 363 | bias = GetRegister(instr.gpr20.Value() + bias_offset); | ||
| 364 | break; | ||
| 365 | case TextureProcessMode::LL: | ||
| 366 | lod = GetRegister(instr.gpr20.Value() + bias_offset); | ||
| 367 | break; | ||
| 368 | default: | ||
| 369 | UNIMPLEMENTED_MSG("Unimplemented process mode={}", static_cast<u32>(process_mode)); | ||
| 370 | break; | ||
| 371 | } | ||
| 372 | } | ||
| 373 | |||
| 374 | Node4 values; | ||
| 375 | for (u32 element = 0; element < values.size(); ++element) { | ||
| 376 | auto copy_coords = coords; | ||
| 377 | MetaTexture meta{sampler, array, depth_compare, bias, lod, {}, element}; | ||
| 378 | values[element] = Operation(read_method, meta, std::move(copy_coords)); | ||
| 379 | } | ||
| 380 | |||
| 381 | return values; | ||
| 382 | } | ||
| 383 | |||
| 384 | Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type, | ||
| 385 | TextureProcessMode process_mode, bool depth_compare, bool is_array) { | ||
| 386 | const bool lod_bias_enabled = | ||
| 387 | (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); | ||
| 388 | |||
| 389 | const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( | ||
| 390 | texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); | ||
| 391 | // If enabled arrays index is always stored in the gpr8 field | ||
| 392 | const u64 array_register = instr.gpr8.Value(); | ||
| 393 | // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used | ||
| 394 | const u64 coord_register = array_register + (is_array ? 1 : 0); | ||
| 395 | |||
| 396 | std::vector<Node> coords; | ||
| 397 | for (std::size_t i = 0; i < coord_count; ++i) { | ||
| 398 | coords.push_back(GetRegister(coord_register + i)); | ||
| 399 | } | ||
| 400 | // 1D.DC in OpenGL the 2nd component is ignored. | ||
| 401 | if (depth_compare && !is_array && texture_type == TextureType::Texture1D) { | ||
| 402 | coords.push_back(Immediate(0.0f)); | ||
| 403 | } | ||
| 404 | |||
| 405 | const Node array = is_array ? GetRegister(array_register) : nullptr; | ||
| 406 | |||
| 407 | Node dc{}; | ||
| 408 | if (depth_compare) { | ||
| 409 | // Depth is always stored in the register signaled by gpr20 or in the next register if lod | ||
| 410 | // or bias are used | ||
| 411 | const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); | ||
| 412 | dc = GetRegister(depth_register); | ||
| 413 | } | ||
| 414 | |||
| 415 | return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, 0); | ||
| 416 | } | ||
| 417 | |||
| 418 | Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type, | ||
| 419 | TextureProcessMode process_mode, bool depth_compare, bool is_array) { | ||
| 420 | const bool lod_bias_enabled = | ||
| 421 | (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); | ||
| 422 | |||
| 423 | const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( | ||
| 424 | texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); | ||
| 425 | // If enabled arrays index is always stored in the gpr8 field | ||
| 426 | const u64 array_register = instr.gpr8.Value(); | ||
| 427 | // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used | ||
| 428 | const u64 coord_register = array_register + (is_array ? 1 : 0); | ||
| 429 | const u64 last_coord_register = | ||
| 430 | (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2)) | ||
| 431 | ? static_cast<u64>(instr.gpr20.Value()) | ||
| 432 | : coord_register + 1; | ||
| 433 | const u32 bias_offset = coord_count > 2 ? 1 : 0; | ||
| 434 | |||
| 435 | std::vector<Node> coords; | ||
| 436 | for (std::size_t i = 0; i < coord_count; ++i) { | ||
| 437 | const bool last = (i == (coord_count - 1)) && (coord_count > 1); | ||
| 438 | coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); | ||
| 439 | } | ||
| 440 | |||
| 441 | const Node array = is_array ? GetRegister(array_register) : nullptr; | ||
| 442 | |||
| 443 | Node dc{}; | ||
| 444 | if (depth_compare) { | ||
| 445 | // Depth is always stored in the register signaled by gpr20 or in the next register if lod | ||
| 446 | // or bias are used | ||
| 447 | const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0); | ||
| 448 | dc = GetRegister(depth_register); | ||
| 449 | } | ||
| 450 | |||
| 451 | return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset); | ||
| 452 | } | ||
| 453 | |||
| 454 | Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare, | ||
| 455 | bool is_array) { | ||
| 456 | const std::size_t coord_count = GetCoordCount(texture_type); | ||
| 457 | const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0); | ||
| 458 | const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0); | ||
| 459 | |||
| 460 | // If enabled arrays index is always stored in the gpr8 field | ||
| 461 | const u64 array_register = instr.gpr8.Value(); | ||
| 462 | // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used | ||
| 463 | const u64 coord_register = array_register + (is_array ? 1 : 0); | ||
| 464 | |||
| 465 | std::vector<Node> coords; | ||
| 466 | for (size_t i = 0; i < coord_count; ++i) | ||
| 467 | coords.push_back(GetRegister(coord_register + i)); | ||
| 468 | |||
| 469 | const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); | ||
| 470 | |||
| 471 | Node4 values; | ||
| 472 | for (u32 element = 0; element < values.size(); ++element) { | ||
| 473 | auto coords_copy = coords; | ||
| 474 | MetaTexture meta{sampler, GetRegister(array_register), {}, {}, {}, {}, element}; | ||
| 475 | values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); | ||
| 476 | } | ||
| 477 | |||
| 478 | return values; | ||
| 479 | } | ||
| 480 | |||
| 481 | Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { | ||
| 482 | const std::size_t type_coord_count = GetCoordCount(texture_type); | ||
| 483 | const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; | ||
| 484 | |||
| 485 | // If enabled arrays index is always stored in the gpr8 field | ||
| 486 | const u64 array_register = instr.gpr8.Value(); | ||
| 487 | // if is array gpr20 is used | ||
| 488 | const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value(); | ||
| 489 | |||
| 490 | const u64 last_coord_register = | ||
| 491 | ((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array | ||
| 492 | ? static_cast<u64>(instr.gpr20.Value()) | ||
| 493 | : coord_register + 1; | ||
| 494 | |||
| 495 | std::vector<Node> coords; | ||
| 496 | for (std::size_t i = 0; i < type_coord_count; ++i) { | ||
| 497 | const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1); | ||
| 498 | coords.push_back(GetRegister(last ? last_coord_register : coord_register + i)); | ||
| 499 | } | ||
| 500 | |||
| 501 | const Node array = is_array ? GetRegister(array_register) : nullptr; | ||
| 502 | // When lod is used always is in gpr20 | ||
| 503 | const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0); | ||
| 504 | |||
| 505 | const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); | ||
| 506 | |||
| 507 | Node4 values; | ||
| 508 | for (u32 element = 0; element < values.size(); ++element) { | ||
| 509 | auto coords_copy = coords; | ||
| 510 | MetaTexture meta{sampler, array, {}, {}, lod, {}, element}; | ||
| 511 | values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); | ||
| 512 | } | ||
| 513 | return values; | ||
| 514 | } | ||
| 515 | |||
| 516 | std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement( | ||
| 517 | TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled, | ||
| 518 | std::size_t max_coords, std::size_t max_inputs) { | ||
| 519 | const std::size_t coord_count = GetCoordCount(texture_type); | ||
| 520 | |||
| 521 | std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0); | ||
| 522 | const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0); | ||
| 523 | if (total_coord_count > max_coords || total_reg_count > max_inputs) { | ||
| 524 | UNIMPLEMENTED_MSG("Unsupported Texture operation"); | ||
| 525 | total_coord_count = std::min(total_coord_count, max_coords); | ||
| 526 | } | ||
| 527 | // 1D.DC OpenGL is using a vec3 but 2nd component is ignored later. | ||
| 528 | total_coord_count += | ||
| 529 | (depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0; | ||
| 530 | |||
| 531 | return {coord_count, total_coord_count}; | ||
| 532 | } | ||
| 533 | |||
| 534 | } // namespace VideoCommon::Shader \ No newline at end of file | ||
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h index 52c7f2c4e..5bc3a3900 100644 --- a/src/video_core/shader/shader_ir.h +++ b/src/video_core/shader/shader_ir.h | |||
| @@ -290,7 +290,9 @@ struct MetaTexture { | |||
| 290 | const Sampler& sampler; | 290 | const Sampler& sampler; |
| 291 | Node array{}; | 291 | Node array{}; |
| 292 | Node depth_compare{}; | 292 | Node depth_compare{}; |
| 293 | std::vector<Node> extras; | 293 | Node bias{}; |
| 294 | Node lod{}; | ||
| 295 | Node component{}; | ||
| 294 | u32 element{}; | 296 | u32 element{}; |
| 295 | }; | 297 | }; |
| 296 | 298 | ||
| @@ -614,6 +616,7 @@ private: | |||
| 614 | u32 DecodeHfma2(NodeBlock& bb, u32 pc); | 616 | u32 DecodeHfma2(NodeBlock& bb, u32 pc); |
| 615 | u32 DecodeConversion(NodeBlock& bb, u32 pc); | 617 | u32 DecodeConversion(NodeBlock& bb, u32 pc); |
| 616 | u32 DecodeMemory(NodeBlock& bb, u32 pc); | 618 | u32 DecodeMemory(NodeBlock& bb, u32 pc); |
| 619 | u32 DecodeTexture(NodeBlock& bb, u32 pc); | ||
| 617 | u32 DecodeFloatSetPredicate(NodeBlock& bb, u32 pc); | 620 | u32 DecodeFloatSetPredicate(NodeBlock& bb, u32 pc); |
| 618 | u32 DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc); | 621 | u32 DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc); |
| 619 | u32 DecodeHalfSetPredicate(NodeBlock& bb, u32 pc); | 622 | u32 DecodeHalfSetPredicate(NodeBlock& bb, u32 pc); |
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index cad7340f5..995d0e068 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | #include <cstring> | 6 | #include <cstring> |
| 7 | #include "common/alignment.h" | 7 | #include "common/alignment.h" |
| 8 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 9 | #include "core/memory.h" | ||
| 10 | #include "video_core/gpu.h" | 9 | #include "video_core/gpu.h" |
| 11 | #include "video_core/textures/decoders.h" | 10 | #include "video_core/textures/decoders.h" |
| 12 | #include "video_core/textures/texture.h" | 11 | #include "video_core/textures/texture.h" |
| @@ -230,18 +229,18 @@ u32 BytesPerPixel(TextureFormat format) { | |||
| 230 | } | 229 | } |
| 231 | } | 230 | } |
| 232 | 231 | ||
| 233 | void UnswizzleTexture(u8* const unswizzled_data, VAddr address, u32 tile_size_x, u32 tile_size_y, | 232 | void UnswizzleTexture(u8* const unswizzled_data, u8* address, u32 tile_size_x, u32 tile_size_y, |
| 234 | u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, | 233 | u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, |
| 235 | u32 block_depth, u32 width_spacing) { | 234 | u32 block_depth, u32 width_spacing) { |
| 236 | CopySwizzledData((width + tile_size_x - 1) / tile_size_x, | 235 | CopySwizzledData((width + tile_size_x - 1) / tile_size_x, |
| 237 | (height + tile_size_y - 1) / tile_size_y, depth, bytes_per_pixel, | 236 | (height + tile_size_y - 1) / tile_size_y, depth, bytes_per_pixel, |
| 238 | bytes_per_pixel, Memory::GetPointer(address), unswizzled_data, true, | 237 | bytes_per_pixel, address, unswizzled_data, true, block_height, block_depth, |
| 239 | block_height, block_depth, width_spacing); | 238 | width_spacing); |
| 240 | } | 239 | } |
| 241 | 240 | ||
| 242 | std::vector<u8> UnswizzleTexture(VAddr address, u32 tile_size_x, u32 tile_size_y, | 241 | std::vector<u8> UnswizzleTexture(u8* address, u32 tile_size_x, u32 tile_size_y, u32 bytes_per_pixel, |
| 243 | u32 bytes_per_pixel, u32 width, u32 height, u32 depth, | 242 | u32 width, u32 height, u32 depth, u32 block_height, |
| 244 | u32 block_height, u32 block_depth, u32 width_spacing) { | 243 | u32 block_depth, u32 width_spacing) { |
| 245 | std::vector<u8> unswizzled_data(width * height * depth * bytes_per_pixel); | 244 | std::vector<u8> unswizzled_data(width * height * depth * bytes_per_pixel); |
| 246 | UnswizzleTexture(unswizzled_data.data(), address, tile_size_x, tile_size_y, bytes_per_pixel, | 245 | UnswizzleTexture(unswizzled_data.data(), address, tile_size_x, tile_size_y, bytes_per_pixel, |
| 247 | width, height, depth, block_height, block_depth, width_spacing); | 246 | width, height, depth, block_height, block_depth, width_spacing); |
| @@ -249,8 +248,7 @@ std::vector<u8> UnswizzleTexture(VAddr address, u32 tile_size_x, u32 tile_size_y | |||
| 249 | } | 248 | } |
| 250 | 249 | ||
| 251 | void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, | 250 | void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, |
| 252 | u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, | 251 | u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height) { |
| 253 | u32 block_height) { | ||
| 254 | const u32 image_width_in_gobs{(swizzled_width * bytes_per_pixel + (gob_size_x - 1)) / | 252 | const u32 image_width_in_gobs{(swizzled_width * bytes_per_pixel + (gob_size_x - 1)) / |
| 255 | gob_size_x}; | 253 | gob_size_x}; |
| 256 | for (u32 line = 0; line < subrect_height; ++line) { | 254 | for (u32 line = 0; line < subrect_height; ++line) { |
| @@ -262,17 +260,17 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 | |||
| 262 | const u32 gob_address = | 260 | const u32 gob_address = |
| 263 | gob_address_y + (x * bytes_per_pixel / gob_size_x) * gob_size * block_height; | 261 | gob_address_y + (x * bytes_per_pixel / gob_size_x) * gob_size * block_height; |
| 264 | const u32 swizzled_offset = gob_address + table[(x * bytes_per_pixel) % gob_size_x]; | 262 | const u32 swizzled_offset = gob_address + table[(x * bytes_per_pixel) % gob_size_x]; |
| 265 | const VAddr source_line = unswizzled_data + line * source_pitch + x * bytes_per_pixel; | 263 | u8* source_line = unswizzled_data + line * source_pitch + x * bytes_per_pixel; |
| 266 | const VAddr dest_addr = swizzled_data + swizzled_offset; | 264 | u8* dest_addr = swizzled_data + swizzled_offset; |
| 267 | 265 | ||
| 268 | Memory::CopyBlock(dest_addr, source_line, bytes_per_pixel); | 266 | std::memcpy(dest_addr, source_line, bytes_per_pixel); |
| 269 | } | 267 | } |
| 270 | } | 268 | } |
| 271 | } | 269 | } |
| 272 | 270 | ||
| 273 | void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, | 271 | void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, |
| 274 | u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, | 272 | u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height, |
| 275 | u32 block_height, u32 offset_x, u32 offset_y) { | 273 | u32 offset_x, u32 offset_y) { |
| 276 | for (u32 line = 0; line < subrect_height; ++line) { | 274 | for (u32 line = 0; line < subrect_height; ++line) { |
| 277 | const u32 y2 = line + offset_y; | 275 | const u32 y2 = line + offset_y; |
| 278 | const u32 gob_address_y = (y2 / (gob_size_y * block_height)) * gob_size * block_height + | 276 | const u32 gob_address_y = (y2 / (gob_size_y * block_height)) * gob_size * block_height + |
| @@ -282,10 +280,10 @@ void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 | |||
| 282 | const u32 x2 = (x + offset_x) * bytes_per_pixel; | 280 | const u32 x2 = (x + offset_x) * bytes_per_pixel; |
| 283 | const u32 gob_address = gob_address_y + (x2 / gob_size_x) * gob_size * block_height; | 281 | const u32 gob_address = gob_address_y + (x2 / gob_size_x) * gob_size * block_height; |
| 284 | const u32 swizzled_offset = gob_address + table[x2 % gob_size_x]; | 282 | const u32 swizzled_offset = gob_address + table[x2 % gob_size_x]; |
| 285 | const VAddr dest_line = unswizzled_data + line * dest_pitch + x * bytes_per_pixel; | 283 | u8* dest_line = unswizzled_data + line * dest_pitch + x * bytes_per_pixel; |
| 286 | const VAddr source_addr = swizzled_data + swizzled_offset; | 284 | u8* source_addr = swizzled_data + swizzled_offset; |
| 287 | 285 | ||
| 288 | Memory::CopyBlock(dest_line, source_addr, bytes_per_pixel); | 286 | std::memcpy(dest_line, source_addr, bytes_per_pixel); |
| 289 | } | 287 | } |
| 290 | } | 288 | } |
| 291 | } | 289 | } |
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index 65df86890..e078fa274 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h | |||
| @@ -17,14 +17,14 @@ inline std::size_t GetGOBSize() { | |||
| 17 | } | 17 | } |
| 18 | 18 | ||
| 19 | /// Unswizzles a swizzled texture without changing its format. | 19 | /// Unswizzles a swizzled texture without changing its format. |
| 20 | void UnswizzleTexture(u8* unswizzled_data, VAddr address, u32 tile_size_x, u32 tile_size_y, | 20 | void UnswizzleTexture(u8* unswizzled_data, u8* address, u32 tile_size_x, u32 tile_size_y, |
| 21 | u32 bytes_per_pixel, u32 width, u32 height, u32 depth, | 21 | u32 bytes_per_pixel, u32 width, u32 height, u32 depth, |
| 22 | u32 block_height = TICEntry::DefaultBlockHeight, | 22 | u32 block_height = TICEntry::DefaultBlockHeight, |
| 23 | u32 block_depth = TICEntry::DefaultBlockHeight, u32 width_spacing = 0); | 23 | u32 block_depth = TICEntry::DefaultBlockHeight, u32 width_spacing = 0); |
| 24 | 24 | ||
| 25 | /// Unswizzles a swizzled texture without changing its format. | 25 | /// Unswizzles a swizzled texture without changing its format. |
| 26 | std::vector<u8> UnswizzleTexture(VAddr address, u32 tile_size_x, u32 tile_size_y, | 26 | std::vector<u8> UnswizzleTexture(u8* address, u32 tile_size_x, u32 tile_size_y, u32 bytes_per_pixel, |
| 27 | u32 bytes_per_pixel, u32 width, u32 height, u32 depth, | 27 | u32 width, u32 height, u32 depth, |
| 28 | u32 block_height = TICEntry::DefaultBlockHeight, | 28 | u32 block_height = TICEntry::DefaultBlockHeight, |
| 29 | u32 block_depth = TICEntry::DefaultBlockHeight, | 29 | u32 block_depth = TICEntry::DefaultBlockHeight, |
| 30 | u32 width_spacing = 0); | 30 | u32 width_spacing = 0); |
| @@ -44,12 +44,11 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height | |||
| 44 | 44 | ||
| 45 | /// Copies an untiled subrectangle into a tiled surface. | 45 | /// Copies an untiled subrectangle into a tiled surface. |
| 46 | void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, | 46 | void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, |
| 47 | u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, | 47 | u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height); |
| 48 | u32 block_height); | ||
| 49 | 48 | ||
| 50 | /// Copies a tiled subrectangle into a linear surface. | 49 | /// Copies a tiled subrectangle into a linear surface. |
| 51 | void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, | 50 | void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, |
| 52 | u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, | 51 | u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height, |
| 53 | u32 block_height, u32 offset_x, u32 offset_y); | 52 | u32 offset_x, u32 offset_y); |
| 54 | 53 | ||
| 55 | } // namespace Tegra::Texture | 54 | } // namespace Tegra::Texture |
diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h index 0fc5530f2..93ecc6e31 100644 --- a/src/video_core/textures/texture.h +++ b/src/video_core/textures/texture.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <array> | ||
| 7 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 8 | #include "common/bit_field.h" | 9 | #include "common/bit_field.h" |
| 9 | #include "common/common_funcs.h" | 10 | #include "common/common_funcs.h" |
| @@ -282,34 +283,62 @@ enum class TextureMipmapFilter : u32 { | |||
| 282 | 283 | ||
| 283 | struct TSCEntry { | 284 | struct TSCEntry { |
| 284 | union { | 285 | union { |
| 285 | BitField<0, 3, WrapMode> wrap_u; | 286 | struct { |
| 286 | BitField<3, 3, WrapMode> wrap_v; | 287 | union { |
| 287 | BitField<6, 3, WrapMode> wrap_p; | 288 | BitField<0, 3, WrapMode> wrap_u; |
| 288 | BitField<9, 1, u32> depth_compare_enabled; | 289 | BitField<3, 3, WrapMode> wrap_v; |
| 289 | BitField<10, 3, DepthCompareFunc> depth_compare_func; | 290 | BitField<6, 3, WrapMode> wrap_p; |
| 290 | BitField<13, 1, u32> srgb_conversion; | 291 | BitField<9, 1, u32> depth_compare_enabled; |
| 291 | BitField<20, 3, u32> max_anisotropy; | 292 | BitField<10, 3, DepthCompareFunc> depth_compare_func; |
| 293 | BitField<13, 1, u32> srgb_conversion; | ||
| 294 | BitField<20, 3, u32> max_anisotropy; | ||
| 295 | }; | ||
| 296 | union { | ||
| 297 | BitField<0, 2, TextureFilter> mag_filter; | ||
| 298 | BitField<4, 2, TextureFilter> min_filter; | ||
| 299 | BitField<6, 2, TextureMipmapFilter> mipmap_filter; | ||
| 300 | BitField<9, 1, u32> cubemap_interface_filtering; | ||
| 301 | BitField<12, 13, u32> mip_lod_bias; | ||
| 302 | }; | ||
| 303 | union { | ||
| 304 | BitField<0, 12, u32> min_lod_clamp; | ||
| 305 | BitField<12, 12, u32> max_lod_clamp; | ||
| 306 | BitField<24, 8, u32> srgb_border_color_r; | ||
| 307 | }; | ||
| 308 | union { | ||
| 309 | BitField<12, 8, u32> srgb_border_color_g; | ||
| 310 | BitField<20, 8, u32> srgb_border_color_b; | ||
| 311 | }; | ||
| 312 | std::array<f32, 4> border_color; | ||
| 313 | }; | ||
| 314 | std::array<u8, 0x20> raw; | ||
| 292 | }; | 315 | }; |
| 293 | union { | 316 | |
| 294 | BitField<0, 2, TextureFilter> mag_filter; | 317 | float GetMaxAnisotropy() const { |
| 295 | BitField<4, 2, TextureFilter> min_filter; | 318 | return static_cast<float>(1U << max_anisotropy); |
| 296 | BitField<6, 2, TextureMipmapFilter> mip_filter; | 319 | } |
| 297 | BitField<9, 1, u32> cubemap_interface_filtering; | 320 | |
| 298 | BitField<12, 13, u32> mip_lod_bias; | 321 | float GetMinLod() const { |
| 299 | }; | 322 | return static_cast<float>(min_lod_clamp) / 256.0f; |
| 300 | union { | 323 | } |
| 301 | BitField<0, 12, u32> min_lod_clamp; | 324 | |
| 302 | BitField<12, 12, u32> max_lod_clamp; | 325 | float GetMaxLod() const { |
| 303 | BitField<24, 8, u32> srgb_border_color_r; | 326 | return static_cast<float>(max_lod_clamp) / 256.0f; |
| 304 | }; | 327 | } |
| 305 | union { | 328 | |
| 306 | BitField<12, 8, u32> srgb_border_color_g; | 329 | float GetLodBias() const { |
| 307 | BitField<20, 8, u32> srgb_border_color_b; | 330 | // Sign extend the 13-bit value. |
| 308 | }; | 331 | constexpr u32 mask = 1U << (13 - 1); |
| 309 | float border_color_r; | 332 | return static_cast<s32>((mip_lod_bias ^ mask) - mask) / 256.0f; |
| 310 | float border_color_g; | 333 | } |
| 311 | float border_color_b; | 334 | |
| 312 | float border_color_a; | 335 | std::array<float, 4> GetBorderColor() const { |
| 336 | if (srgb_conversion) { | ||
| 337 | return {srgb_border_color_r / 255.0f, srgb_border_color_g / 255.0f, | ||
| 338 | srgb_border_color_b / 255.0f, border_color[3]}; | ||
| 339 | } | ||
| 340 | return border_color; | ||
| 341 | } | ||
| 313 | }; | 342 | }; |
| 314 | static_assert(sizeof(TSCEntry) == 0x20, "TSCEntry has wrong size"); | 343 | static_assert(sizeof(TSCEntry) == 0x20, "TSCEntry has wrong size"); |
| 315 | 344 | ||
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index 3b070bfbb..05ad19e1d 100644 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp | |||
| @@ -24,8 +24,6 @@ void EmuThread::run() { | |||
| 24 | 24 | ||
| 25 | MicroProfileOnThreadCreate("EmuThread"); | 25 | MicroProfileOnThreadCreate("EmuThread"); |
| 26 | 26 | ||
| 27 | stop_run = false; | ||
| 28 | |||
| 29 | emit LoadProgress(VideoCore::LoadCallbackStage::Prepare, 0, 0); | 27 | emit LoadProgress(VideoCore::LoadCallbackStage::Prepare, 0, 0); |
| 30 | 28 | ||
| 31 | Core::System::GetInstance().Renderer().Rasterizer().LoadDiskResources( | 29 | Core::System::GetInstance().Renderer().Rasterizer().LoadDiskResources( |
| @@ -40,7 +38,7 @@ void EmuThread::run() { | |||
| 40 | render_window->DoneCurrent(); | 38 | render_window->DoneCurrent(); |
| 41 | } | 39 | } |
| 42 | 40 | ||
| 43 | // holds whether the cpu was running during the last iteration, | 41 | // Holds whether the cpu was running during the last iteration, |
| 44 | // so that the DebugModeLeft signal can be emitted before the | 42 | // so that the DebugModeLeft signal can be emitted before the |
| 45 | // next execution step | 43 | // next execution step |
| 46 | bool was_active = false; | 44 | bool was_active = false; |
| @@ -123,7 +121,6 @@ GRenderWindow::GRenderWindow(QWidget* parent, EmuThread* emu_thread) | |||
| 123 | setAttribute(Qt::WA_AcceptTouchEvents); | 121 | setAttribute(Qt::WA_AcceptTouchEvents); |
| 124 | 122 | ||
| 125 | InputCommon::Init(); | 123 | InputCommon::Init(); |
| 126 | InputCommon::StartJoystickEventHandler(); | ||
| 127 | connect(this, &GRenderWindow::FirstFrameDisplayed, static_cast<GMainWindow*>(parent), | 124 | connect(this, &GRenderWindow::FirstFrameDisplayed, static_cast<GMainWindow*>(parent), |
| 128 | &GMainWindow::OnLoadComplete); | 125 | &GMainWindow::OnLoadComplete); |
| 129 | } | 126 | } |
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp index 74dc6bb28..4650f96a3 100644 --- a/src/yuzu/configuration/config.cpp +++ b/src/yuzu/configuration/config.cpp | |||
| @@ -209,7 +209,7 @@ void Config::ReadPlayerValues() { | |||
| 209 | for (std::size_t p = 0; p < Settings::values.players.size(); ++p) { | 209 | for (std::size_t p = 0; p < Settings::values.players.size(); ++p) { |
| 210 | auto& player = Settings::values.players[p]; | 210 | auto& player = Settings::values.players[p]; |
| 211 | 211 | ||
| 212 | player.connected = qt_config->value(QString("player_%1_connected").arg(p), false).toBool(); | 212 | player.connected = ReadSetting(QString("player_%1_connected").arg(p), false).toBool(); |
| 213 | 213 | ||
| 214 | player.type = static_cast<Settings::ControllerType>( | 214 | player.type = static_cast<Settings::ControllerType>( |
| 215 | qt_config | 215 | qt_config |
| @@ -269,7 +269,7 @@ void Config::ReadPlayerValues() { | |||
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | void Config::ReadDebugValues() { | 271 | void Config::ReadDebugValues() { |
| 272 | Settings::values.debug_pad_enabled = qt_config->value("debug_pad_enabled", false).toBool(); | 272 | Settings::values.debug_pad_enabled = ReadSetting("debug_pad_enabled", false).toBool(); |
| 273 | for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) { | 273 | for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) { |
| 274 | std::string default_param = InputCommon::GenerateKeyboardParam(default_buttons[i]); | 274 | std::string default_param = InputCommon::GenerateKeyboardParam(default_buttons[i]); |
| 275 | Settings::values.debug_pad_buttons[i] = | 275 | Settings::values.debug_pad_buttons[i] = |
| @@ -298,7 +298,7 @@ void Config::ReadDebugValues() { | |||
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | void Config::ReadKeyboardValues() { | 300 | void Config::ReadKeyboardValues() { |
| 301 | Settings::values.keyboard_enabled = qt_config->value("keyboard_enabled", false).toBool(); | 301 | Settings::values.keyboard_enabled = ReadSetting("keyboard_enabled", false).toBool(); |
| 302 | 302 | ||
| 303 | std::transform(default_keyboard_keys.begin(), default_keyboard_keys.end(), | 303 | std::transform(default_keyboard_keys.begin(), default_keyboard_keys.end(), |
| 304 | Settings::values.keyboard_keys.begin(), InputCommon::GenerateKeyboardParam); | 304 | Settings::values.keyboard_keys.begin(), InputCommon::GenerateKeyboardParam); |
| @@ -311,7 +311,7 @@ void Config::ReadKeyboardValues() { | |||
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | void Config::ReadMouseValues() { | 313 | void Config::ReadMouseValues() { |
| 314 | Settings::values.mouse_enabled = qt_config->value("mouse_enabled", false).toBool(); | 314 | Settings::values.mouse_enabled = ReadSetting("mouse_enabled", false).toBool(); |
| 315 | 315 | ||
| 316 | for (int i = 0; i < Settings::NativeMouseButton::NumMouseButtons; ++i) { | 316 | for (int i = 0; i < Settings::NativeMouseButton::NumMouseButtons; ++i) { |
| 317 | std::string default_param = InputCommon::GenerateKeyboardParam(default_mouse_buttons[i]); | 317 | std::string default_param = InputCommon::GenerateKeyboardParam(default_mouse_buttons[i]); |
| @@ -327,16 +327,14 @@ void Config::ReadMouseValues() { | |||
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | void Config::ReadTouchscreenValues() { | 329 | void Config::ReadTouchscreenValues() { |
| 330 | Settings::values.touchscreen.enabled = qt_config->value("touchscreen_enabled", true).toBool(); | 330 | Settings::values.touchscreen.enabled = ReadSetting("touchscreen_enabled", true).toBool(); |
| 331 | Settings::values.touchscreen.device = | 331 | Settings::values.touchscreen.device = |
| 332 | qt_config->value("touchscreen_device", "engine:emu_window").toString().toStdString(); | 332 | ReadSetting("touchscreen_device", "engine:emu_window").toString().toStdString(); |
| 333 | 333 | ||
| 334 | Settings::values.touchscreen.finger = qt_config->value("touchscreen_finger", 0).toUInt(); | 334 | Settings::values.touchscreen.finger = ReadSetting("touchscreen_finger", 0).toUInt(); |
| 335 | Settings::values.touchscreen.rotation_angle = qt_config->value("touchscreen_angle", 0).toUInt(); | 335 | Settings::values.touchscreen.rotation_angle = ReadSetting("touchscreen_angle", 0).toUInt(); |
| 336 | Settings::values.touchscreen.diameter_x = | 336 | Settings::values.touchscreen.diameter_x = ReadSetting("touchscreen_diameter_x", 15).toUInt(); |
| 337 | qt_config->value("touchscreen_diameter_x", 15).toUInt(); | 337 | Settings::values.touchscreen.diameter_y = ReadSetting("touchscreen_diameter_y", 15).toUInt(); |
| 338 | Settings::values.touchscreen.diameter_y = | ||
| 339 | qt_config->value("touchscreen_diameter_y", 15).toUInt(); | ||
| 340 | qt_config->endGroup(); | 338 | qt_config->endGroup(); |
| 341 | } | 339 | } |
| 342 | 340 | ||
| @@ -357,42 +355,41 @@ void Config::ReadValues() { | |||
| 357 | ReadTouchscreenValues(); | 355 | ReadTouchscreenValues(); |
| 358 | 356 | ||
| 359 | Settings::values.motion_device = | 357 | Settings::values.motion_device = |
| 360 | qt_config->value("motion_device", "engine:motion_emu,update_period:100,sensitivity:0.01") | 358 | ReadSetting("motion_device", "engine:motion_emu,update_period:100,sensitivity:0.01") |
| 361 | .toString() | 359 | .toString() |
| 362 | .toStdString(); | 360 | .toStdString(); |
| 363 | 361 | ||
| 364 | qt_config->beginGroup("Core"); | 362 | qt_config->beginGroup("Core"); |
| 365 | Settings::values.use_cpu_jit = qt_config->value("use_cpu_jit", true).toBool(); | 363 | Settings::values.use_cpu_jit = ReadSetting("use_cpu_jit", true).toBool(); |
| 366 | Settings::values.use_multi_core = qt_config->value("use_multi_core", false).toBool(); | 364 | Settings::values.use_multi_core = ReadSetting("use_multi_core", false).toBool(); |
| 367 | qt_config->endGroup(); | 365 | qt_config->endGroup(); |
| 368 | 366 | ||
| 369 | qt_config->beginGroup("Renderer"); | 367 | qt_config->beginGroup("Renderer"); |
| 370 | Settings::values.resolution_factor = qt_config->value("resolution_factor", 1.0).toFloat(); | 368 | Settings::values.resolution_factor = ReadSetting("resolution_factor", 1.0).toFloat(); |
| 371 | Settings::values.use_frame_limit = qt_config->value("use_frame_limit", true).toBool(); | 369 | Settings::values.use_frame_limit = ReadSetting("use_frame_limit", true).toBool(); |
| 372 | Settings::values.frame_limit = qt_config->value("frame_limit", 100).toInt(); | 370 | Settings::values.frame_limit = ReadSetting("frame_limit", 100).toInt(); |
| 373 | Settings::values.use_disk_shader_cache = | 371 | Settings::values.use_disk_shader_cache = ReadSetting("use_disk_shader_cache", true).toBool(); |
| 374 | qt_config->value("use_disk_shader_cache", false).toBool(); | ||
| 375 | Settings::values.use_accurate_gpu_emulation = | 372 | Settings::values.use_accurate_gpu_emulation = |
| 376 | qt_config->value("use_accurate_gpu_emulation", false).toBool(); | 373 | ReadSetting("use_accurate_gpu_emulation", false).toBool(); |
| 377 | Settings::values.use_asynchronous_gpu_emulation = | 374 | Settings::values.use_asynchronous_gpu_emulation = |
| 378 | qt_config->value("use_asynchronous_gpu_emulation", false).toBool(); | 375 | ReadSetting("use_asynchronous_gpu_emulation", false).toBool(); |
| 379 | 376 | ||
| 380 | Settings::values.bg_red = qt_config->value("bg_red", 0.0).toFloat(); | 377 | Settings::values.bg_red = ReadSetting("bg_red", 0.0).toFloat(); |
| 381 | Settings::values.bg_green = qt_config->value("bg_green", 0.0).toFloat(); | 378 | Settings::values.bg_green = ReadSetting("bg_green", 0.0).toFloat(); |
| 382 | Settings::values.bg_blue = qt_config->value("bg_blue", 0.0).toFloat(); | 379 | Settings::values.bg_blue = ReadSetting("bg_blue", 0.0).toFloat(); |
| 383 | qt_config->endGroup(); | 380 | qt_config->endGroup(); |
| 384 | 381 | ||
| 385 | qt_config->beginGroup("Audio"); | 382 | qt_config->beginGroup("Audio"); |
| 386 | Settings::values.sink_id = qt_config->value("output_engine", "auto").toString().toStdString(); | 383 | Settings::values.sink_id = ReadSetting("output_engine", "auto").toString().toStdString(); |
| 387 | Settings::values.enable_audio_stretching = | 384 | Settings::values.enable_audio_stretching = |
| 388 | qt_config->value("enable_audio_stretching", true).toBool(); | 385 | ReadSetting("enable_audio_stretching", true).toBool(); |
| 389 | Settings::values.audio_device_id = | 386 | Settings::values.audio_device_id = |
| 390 | qt_config->value("output_device", "auto").toString().toStdString(); | 387 | ReadSetting("output_device", "auto").toString().toStdString(); |
| 391 | Settings::values.volume = qt_config->value("volume", 1).toFloat(); | 388 | Settings::values.volume = ReadSetting("volume", 1).toFloat(); |
| 392 | qt_config->endGroup(); | 389 | qt_config->endGroup(); |
| 393 | 390 | ||
| 394 | qt_config->beginGroup("Data Storage"); | 391 | qt_config->beginGroup("Data Storage"); |
| 395 | Settings::values.use_virtual_sd = qt_config->value("use_virtual_sd", true).toBool(); | 392 | Settings::values.use_virtual_sd = ReadSetting("use_virtual_sd", true).toBool(); |
| 396 | FileUtil::GetUserPath( | 393 | FileUtil::GetUserPath( |
| 397 | FileUtil::UserPath::NANDDir, | 394 | FileUtil::UserPath::NANDDir, |
| 398 | qt_config | 395 | qt_config |
| @@ -410,30 +407,30 @@ void Config::ReadValues() { | |||
| 410 | qt_config->endGroup(); | 407 | qt_config->endGroup(); |
| 411 | 408 | ||
| 412 | qt_config->beginGroup("Core"); | 409 | qt_config->beginGroup("Core"); |
| 413 | Settings::values.use_cpu_jit = qt_config->value("use_cpu_jit", true).toBool(); | 410 | Settings::values.use_cpu_jit = ReadSetting("use_cpu_jit", true).toBool(); |
| 414 | Settings::values.use_multi_core = qt_config->value("use_multi_core", false).toBool(); | 411 | Settings::values.use_multi_core = ReadSetting("use_multi_core", false).toBool(); |
| 415 | qt_config->endGroup(); | 412 | qt_config->endGroup(); |
| 416 | 413 | ||
| 417 | qt_config->beginGroup("System"); | 414 | qt_config->beginGroup("System"); |
| 418 | Settings::values.use_docked_mode = qt_config->value("use_docked_mode", false).toBool(); | 415 | Settings::values.use_docked_mode = ReadSetting("use_docked_mode", false).toBool(); |
| 419 | Settings::values.enable_nfc = qt_config->value("enable_nfc", true).toBool(); | 416 | Settings::values.enable_nfc = ReadSetting("enable_nfc", true).toBool(); |
| 420 | 417 | ||
| 421 | Settings::values.current_user = std::clamp<int>(qt_config->value("current_user", 0).toInt(), 0, | 418 | Settings::values.current_user = |
| 422 | Service::Account::MAX_USERS - 1); | 419 | std::clamp<int>(ReadSetting("current_user", 0).toInt(), 0, Service::Account::MAX_USERS - 1); |
| 423 | 420 | ||
| 424 | Settings::values.language_index = qt_config->value("language_index", 1).toInt(); | 421 | Settings::values.language_index = ReadSetting("language_index", 1).toInt(); |
| 425 | 422 | ||
| 426 | const auto rng_seed_enabled = qt_config->value("rng_seed_enabled", false).toBool(); | 423 | const auto rng_seed_enabled = ReadSetting("rng_seed_enabled", false).toBool(); |
| 427 | if (rng_seed_enabled) { | 424 | if (rng_seed_enabled) { |
| 428 | Settings::values.rng_seed = qt_config->value("rng_seed", 0).toULongLong(); | 425 | Settings::values.rng_seed = ReadSetting("rng_seed", 0).toULongLong(); |
| 429 | } else { | 426 | } else { |
| 430 | Settings::values.rng_seed = std::nullopt; | 427 | Settings::values.rng_seed = std::nullopt; |
| 431 | } | 428 | } |
| 432 | 429 | ||
| 433 | const auto custom_rtc_enabled = qt_config->value("custom_rtc_enabled", false).toBool(); | 430 | const auto custom_rtc_enabled = ReadSetting("custom_rtc_enabled", false).toBool(); |
| 434 | if (custom_rtc_enabled) { | 431 | if (custom_rtc_enabled) { |
| 435 | Settings::values.custom_rtc = | 432 | Settings::values.custom_rtc = |
| 436 | std::chrono::seconds(qt_config->value("custom_rtc", 0).toULongLong()); | 433 | std::chrono::seconds(ReadSetting("custom_rtc", 0).toULongLong()); |
| 437 | } else { | 434 | } else { |
| 438 | Settings::values.custom_rtc = std::nullopt; | 435 | Settings::values.custom_rtc = std::nullopt; |
| 439 | } | 436 | } |
| @@ -441,35 +438,35 @@ void Config::ReadValues() { | |||
| 441 | qt_config->endGroup(); | 438 | qt_config->endGroup(); |
| 442 | 439 | ||
| 443 | qt_config->beginGroup("Miscellaneous"); | 440 | qt_config->beginGroup("Miscellaneous"); |
| 444 | Settings::values.log_filter = qt_config->value("log_filter", "*:Info").toString().toStdString(); | 441 | Settings::values.log_filter = ReadSetting("log_filter", "*:Info").toString().toStdString(); |
| 445 | Settings::values.use_dev_keys = qt_config->value("use_dev_keys", false).toBool(); | 442 | Settings::values.use_dev_keys = ReadSetting("use_dev_keys", false).toBool(); |
| 446 | qt_config->endGroup(); | 443 | qt_config->endGroup(); |
| 447 | 444 | ||
| 448 | qt_config->beginGroup("Debugging"); | 445 | qt_config->beginGroup("Debugging"); |
| 449 | Settings::values.use_gdbstub = qt_config->value("use_gdbstub", false).toBool(); | 446 | Settings::values.use_gdbstub = ReadSetting("use_gdbstub", false).toBool(); |
| 450 | Settings::values.gdbstub_port = qt_config->value("gdbstub_port", 24689).toInt(); | 447 | Settings::values.gdbstub_port = ReadSetting("gdbstub_port", 24689).toInt(); |
| 451 | Settings::values.program_args = qt_config->value("program_args", "").toString().toStdString(); | 448 | Settings::values.program_args = ReadSetting("program_args", "").toString().toStdString(); |
| 452 | Settings::values.dump_exefs = qt_config->value("dump_exefs", false).toBool(); | 449 | Settings::values.dump_exefs = ReadSetting("dump_exefs", false).toBool(); |
| 453 | Settings::values.dump_nso = qt_config->value("dump_nso", false).toBool(); | 450 | Settings::values.dump_nso = ReadSetting("dump_nso", false).toBool(); |
| 454 | qt_config->endGroup(); | 451 | qt_config->endGroup(); |
| 455 | 452 | ||
| 456 | qt_config->beginGroup("WebService"); | 453 | qt_config->beginGroup("WebService"); |
| 457 | Settings::values.enable_telemetry = qt_config->value("enable_telemetry", true).toBool(); | 454 | Settings::values.enable_telemetry = ReadSetting("enable_telemetry", true).toBool(); |
| 458 | Settings::values.web_api_url = | 455 | Settings::values.web_api_url = |
| 459 | qt_config->value("web_api_url", "https://api.yuzu-emu.org").toString().toStdString(); | 456 | ReadSetting("web_api_url", "https://api.yuzu-emu.org").toString().toStdString(); |
| 460 | Settings::values.yuzu_username = qt_config->value("yuzu_username").toString().toStdString(); | 457 | Settings::values.yuzu_username = ReadSetting("yuzu_username").toString().toStdString(); |
| 461 | Settings::values.yuzu_token = qt_config->value("yuzu_token").toString().toStdString(); | 458 | Settings::values.yuzu_token = ReadSetting("yuzu_token").toString().toStdString(); |
| 462 | qt_config->endGroup(); | 459 | qt_config->endGroup(); |
| 463 | 460 | ||
| 464 | const auto size = qt_config->beginReadArray("DisabledAddOns"); | 461 | const auto size = qt_config->beginReadArray("DisabledAddOns"); |
| 465 | for (int i = 0; i < size; ++i) { | 462 | for (int i = 0; i < size; ++i) { |
| 466 | qt_config->setArrayIndex(i); | 463 | qt_config->setArrayIndex(i); |
| 467 | const auto title_id = qt_config->value("title_id", 0).toULongLong(); | 464 | const auto title_id = ReadSetting("title_id", 0).toULongLong(); |
| 468 | std::vector<std::string> out; | 465 | std::vector<std::string> out; |
| 469 | const auto d_size = qt_config->beginReadArray("disabled"); | 466 | const auto d_size = qt_config->beginReadArray("disabled"); |
| 470 | for (int j = 0; j < d_size; ++j) { | 467 | for (int j = 0; j < d_size; ++j) { |
| 471 | qt_config->setArrayIndex(j); | 468 | qt_config->setArrayIndex(j); |
| 472 | out.push_back(qt_config->value("d", "").toString().toStdString()); | 469 | out.push_back(ReadSetting("d", "").toString().toStdString()); |
| 473 | } | 470 | } |
| 474 | qt_config->endArray(); | 471 | qt_config->endArray(); |
| 475 | Settings::values.disabled_addons.insert_or_assign(title_id, out); | 472 | Settings::values.disabled_addons.insert_or_assign(title_id, out); |
| @@ -477,41 +474,38 @@ void Config::ReadValues() { | |||
| 477 | qt_config->endArray(); | 474 | qt_config->endArray(); |
| 478 | 475 | ||
| 479 | qt_config->beginGroup("UI"); | 476 | qt_config->beginGroup("UI"); |
| 480 | UISettings::values.theme = qt_config->value("theme", UISettings::themes[0].second).toString(); | 477 | UISettings::values.theme = ReadSetting("theme", UISettings::themes[0].second).toString(); |
| 481 | UISettings::values.enable_discord_presence = | 478 | UISettings::values.enable_discord_presence = |
| 482 | qt_config->value("enable_discord_presence", true).toBool(); | 479 | ReadSetting("enable_discord_presence", true).toBool(); |
| 483 | UISettings::values.screenshot_resolution_factor = | 480 | UISettings::values.screenshot_resolution_factor = |
| 484 | static_cast<u16>(qt_config->value("screenshot_resolution_factor", 0).toUInt()); | 481 | static_cast<u16>(ReadSetting("screenshot_resolution_factor", 0).toUInt()); |
| 485 | UISettings::values.select_user_on_boot = | 482 | UISettings::values.select_user_on_boot = ReadSetting("select_user_on_boot", false).toBool(); |
| 486 | qt_config->value("select_user_on_boot", false).toBool(); | ||
| 487 | 483 | ||
| 488 | qt_config->beginGroup("UIGameList"); | 484 | qt_config->beginGroup("UIGameList"); |
| 489 | UISettings::values.show_unknown = qt_config->value("show_unknown", true).toBool(); | 485 | UISettings::values.show_unknown = ReadSetting("show_unknown", true).toBool(); |
| 490 | UISettings::values.show_add_ons = qt_config->value("show_add_ons", true).toBool(); | 486 | UISettings::values.show_add_ons = ReadSetting("show_add_ons", true).toBool(); |
| 491 | UISettings::values.icon_size = qt_config->value("icon_size", 64).toUInt(); | 487 | UISettings::values.icon_size = ReadSetting("icon_size", 64).toUInt(); |
| 492 | UISettings::values.row_1_text_id = qt_config->value("row_1_text_id", 3).toUInt(); | 488 | UISettings::values.row_1_text_id = ReadSetting("row_1_text_id", 3).toUInt(); |
| 493 | UISettings::values.row_2_text_id = qt_config->value("row_2_text_id", 2).toUInt(); | 489 | UISettings::values.row_2_text_id = ReadSetting("row_2_text_id", 2).toUInt(); |
| 494 | qt_config->endGroup(); | 490 | qt_config->endGroup(); |
| 495 | 491 | ||
| 496 | qt_config->beginGroup("UILayout"); | 492 | qt_config->beginGroup("UILayout"); |
| 497 | UISettings::values.geometry = qt_config->value("geometry").toByteArray(); | 493 | UISettings::values.geometry = ReadSetting("geometry").toByteArray(); |
| 498 | UISettings::values.state = qt_config->value("state").toByteArray(); | 494 | UISettings::values.state = ReadSetting("state").toByteArray(); |
| 499 | UISettings::values.renderwindow_geometry = | 495 | UISettings::values.renderwindow_geometry = ReadSetting("geometryRenderWindow").toByteArray(); |
| 500 | qt_config->value("geometryRenderWindow").toByteArray(); | 496 | UISettings::values.gamelist_header_state = ReadSetting("gameListHeaderState").toByteArray(); |
| 501 | UISettings::values.gamelist_header_state = | ||
| 502 | qt_config->value("gameListHeaderState").toByteArray(); | ||
| 503 | UISettings::values.microprofile_geometry = | 497 | UISettings::values.microprofile_geometry = |
| 504 | qt_config->value("microProfileDialogGeometry").toByteArray(); | 498 | ReadSetting("microProfileDialogGeometry").toByteArray(); |
| 505 | UISettings::values.microprofile_visible = | 499 | UISettings::values.microprofile_visible = |
| 506 | qt_config->value("microProfileDialogVisible", false).toBool(); | 500 | ReadSetting("microProfileDialogVisible", false).toBool(); |
| 507 | qt_config->endGroup(); | 501 | qt_config->endGroup(); |
| 508 | 502 | ||
| 509 | qt_config->beginGroup("Paths"); | 503 | qt_config->beginGroup("Paths"); |
| 510 | UISettings::values.roms_path = qt_config->value("romsPath").toString(); | 504 | UISettings::values.roms_path = ReadSetting("romsPath").toString(); |
| 511 | UISettings::values.symbols_path = qt_config->value("symbolsPath").toString(); | 505 | UISettings::values.symbols_path = ReadSetting("symbolsPath").toString(); |
| 512 | UISettings::values.gamedir = qt_config->value("gameListRootDir", ".").toString(); | 506 | UISettings::values.gamedir = ReadSetting("gameListRootDir", ".").toString(); |
| 513 | UISettings::values.gamedir_deepscan = qt_config->value("gameListDeepScan", false).toBool(); | 507 | UISettings::values.gamedir_deepscan = ReadSetting("gameListDeepScan", false).toBool(); |
| 514 | UISettings::values.recent_files = qt_config->value("recentFiles").toStringList(); | 508 | UISettings::values.recent_files = ReadSetting("recentFiles").toStringList(); |
| 515 | qt_config->endGroup(); | 509 | qt_config->endGroup(); |
| 516 | 510 | ||
| 517 | qt_config->beginGroup("Shortcuts"); | 511 | qt_config->beginGroup("Shortcuts"); |
| @@ -524,8 +518,8 @@ void Config::ReadValues() { | |||
| 524 | qt_config->beginGroup(hotkey); | 518 | qt_config->beginGroup(hotkey); |
| 525 | UISettings::values.shortcuts.emplace_back(UISettings::Shortcut( | 519 | UISettings::values.shortcuts.emplace_back(UISettings::Shortcut( |
| 526 | group + "/" + hotkey, | 520 | group + "/" + hotkey, |
| 527 | UISettings::ContextualShortcut(qt_config->value("KeySeq").toString(), | 521 | UISettings::ContextualShortcut(ReadSetting("KeySeq").toString(), |
| 528 | qt_config->value("Context").toInt()))); | 522 | ReadSetting("Context").toInt()))); |
| 529 | qt_config->endGroup(); | 523 | qt_config->endGroup(); |
| 530 | } | 524 | } |
| 531 | 525 | ||
| @@ -533,16 +527,16 @@ void Config::ReadValues() { | |||
| 533 | } | 527 | } |
| 534 | qt_config->endGroup(); | 528 | qt_config->endGroup(); |
| 535 | 529 | ||
| 536 | UISettings::values.single_window_mode = qt_config->value("singleWindowMode", true).toBool(); | 530 | UISettings::values.single_window_mode = ReadSetting("singleWindowMode", true).toBool(); |
| 537 | UISettings::values.fullscreen = qt_config->value("fullscreen", false).toBool(); | 531 | UISettings::values.fullscreen = ReadSetting("fullscreen", false).toBool(); |
| 538 | UISettings::values.display_titlebar = qt_config->value("displayTitleBars", true).toBool(); | 532 | UISettings::values.display_titlebar = ReadSetting("displayTitleBars", true).toBool(); |
| 539 | UISettings::values.show_filter_bar = qt_config->value("showFilterBar", true).toBool(); | 533 | UISettings::values.show_filter_bar = ReadSetting("showFilterBar", true).toBool(); |
| 540 | UISettings::values.show_status_bar = qt_config->value("showStatusBar", true).toBool(); | 534 | UISettings::values.show_status_bar = ReadSetting("showStatusBar", true).toBool(); |
| 541 | UISettings::values.confirm_before_closing = qt_config->value("confirmClose", true).toBool(); | 535 | UISettings::values.confirm_before_closing = ReadSetting("confirmClose", true).toBool(); |
| 542 | UISettings::values.first_start = qt_config->value("firstStart", true).toBool(); | 536 | UISettings::values.first_start = ReadSetting("firstStart", true).toBool(); |
| 543 | UISettings::values.callout_flags = qt_config->value("calloutFlags", 0).toUInt(); | 537 | UISettings::values.callout_flags = ReadSetting("calloutFlags", 0).toUInt(); |
| 544 | UISettings::values.show_console = qt_config->value("showConsole", false).toBool(); | 538 | UISettings::values.show_console = ReadSetting("showConsole", false).toBool(); |
| 545 | UISettings::values.profile_index = qt_config->value("profileIndex", 0).toUInt(); | 539 | UISettings::values.profile_index = ReadSetting("profileIndex", 0).toUInt(); |
| 546 | 540 | ||
| 547 | ApplyDefaultProfileIfInputInvalid(); | 541 | ApplyDefaultProfileIfInputInvalid(); |
| 548 | 542 | ||
| @@ -553,62 +547,79 @@ void Config::SavePlayerValues() { | |||
| 553 | for (std::size_t p = 0; p < Settings::values.players.size(); ++p) { | 547 | for (std::size_t p = 0; p < Settings::values.players.size(); ++p) { |
| 554 | const auto& player = Settings::values.players[p]; | 548 | const auto& player = Settings::values.players[p]; |
| 555 | 549 | ||
| 556 | qt_config->setValue(QString("player_%1_connected").arg(p), player.connected); | 550 | WriteSetting(QString("player_%1_connected").arg(p), player.connected, false); |
| 557 | qt_config->setValue(QString("player_%1_type").arg(p), static_cast<u8>(player.type)); | 551 | WriteSetting(QString("player_%1_type").arg(p), static_cast<u8>(player.type), |
| 552 | static_cast<u8>(Settings::ControllerType::DualJoycon)); | ||
| 558 | 553 | ||
| 559 | qt_config->setValue(QString("player_%1_body_color_left").arg(p), player.body_color_left); | 554 | WriteSetting(QString("player_%1_body_color_left").arg(p), player.body_color_left, |
| 560 | qt_config->setValue(QString("player_%1_body_color_right").arg(p), player.body_color_right); | 555 | Settings::JOYCON_BODY_NEON_BLUE); |
| 561 | qt_config->setValue(QString("player_%1_button_color_left").arg(p), | 556 | WriteSetting(QString("player_%1_body_color_right").arg(p), player.body_color_right, |
| 562 | player.button_color_left); | 557 | Settings::JOYCON_BODY_NEON_RED); |
| 563 | qt_config->setValue(QString("player_%1_button_color_right").arg(p), | 558 | WriteSetting(QString("player_%1_button_color_left").arg(p), player.button_color_left, |
| 564 | player.button_color_right); | 559 | Settings::JOYCON_BUTTONS_NEON_BLUE); |
| 560 | WriteSetting(QString("player_%1_button_color_right").arg(p), player.button_color_right, | ||
| 561 | Settings::JOYCON_BUTTONS_NEON_RED); | ||
| 565 | 562 | ||
| 566 | for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) { | 563 | for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) { |
| 567 | qt_config->setValue(QString("player_%1_").arg(p) + | 564 | std::string default_param = InputCommon::GenerateKeyboardParam(default_buttons[i]); |
| 568 | QString::fromStdString(Settings::NativeButton::mapping[i]), | 565 | WriteSetting(QString("player_%1_").arg(p) + |
| 569 | QString::fromStdString(player.buttons[i])); | 566 | QString::fromStdString(Settings::NativeButton::mapping[i]), |
| 567 | QString::fromStdString(player.buttons[i]), | ||
| 568 | QString::fromStdString(default_param)); | ||
| 570 | } | 569 | } |
| 571 | for (int i = 0; i < Settings::NativeAnalog::NumAnalogs; ++i) { | 570 | for (int i = 0; i < Settings::NativeAnalog::NumAnalogs; ++i) { |
| 572 | qt_config->setValue(QString("player_%1_").arg(p) + | 571 | std::string default_param = InputCommon::GenerateAnalogParamFromKeys( |
| 573 | QString::fromStdString(Settings::NativeAnalog::mapping[i]), | 572 | default_analogs[i][0], default_analogs[i][1], default_analogs[i][2], |
| 574 | QString::fromStdString(player.analogs[i])); | 573 | default_analogs[i][3], default_analogs[i][4], 0.5f); |
| 574 | WriteSetting(QString("player_%1_").arg(p) + | ||
| 575 | QString::fromStdString(Settings::NativeAnalog::mapping[i]), | ||
| 576 | QString::fromStdString(player.analogs[i]), | ||
| 577 | QString::fromStdString(default_param)); | ||
| 575 | } | 578 | } |
| 576 | } | 579 | } |
| 577 | } | 580 | } |
| 578 | 581 | ||
| 579 | void Config::SaveDebugValues() { | 582 | void Config::SaveDebugValues() { |
| 580 | qt_config->setValue("debug_pad_enabled", Settings::values.debug_pad_enabled); | 583 | WriteSetting("debug_pad_enabled", Settings::values.debug_pad_enabled, false); |
| 581 | for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) { | 584 | for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) { |
| 582 | qt_config->setValue(QString("debug_pad_") + | 585 | std::string default_param = InputCommon::GenerateKeyboardParam(default_buttons[i]); |
| 583 | QString::fromStdString(Settings::NativeButton::mapping[i]), | 586 | WriteSetting(QString("debug_pad_") + |
| 584 | QString::fromStdString(Settings::values.debug_pad_buttons[i])); | 587 | QString::fromStdString(Settings::NativeButton::mapping[i]), |
| 588 | QString::fromStdString(Settings::values.debug_pad_buttons[i]), | ||
| 589 | QString::fromStdString(default_param)); | ||
| 585 | } | 590 | } |
| 586 | for (int i = 0; i < Settings::NativeAnalog::NumAnalogs; ++i) { | 591 | for (int i = 0; i < Settings::NativeAnalog::NumAnalogs; ++i) { |
| 587 | qt_config->setValue(QString("debug_pad_") + | 592 | std::string default_param = InputCommon::GenerateAnalogParamFromKeys( |
| 588 | QString::fromStdString(Settings::NativeAnalog::mapping[i]), | 593 | default_analogs[i][0], default_analogs[i][1], default_analogs[i][2], |
| 589 | QString::fromStdString(Settings::values.debug_pad_analogs[i])); | 594 | default_analogs[i][3], default_analogs[i][4], 0.5f); |
| 595 | WriteSetting(QString("debug_pad_") + | ||
| 596 | QString::fromStdString(Settings::NativeAnalog::mapping[i]), | ||
| 597 | QString::fromStdString(Settings::values.debug_pad_analogs[i]), | ||
| 598 | QString::fromStdString(default_param)); | ||
| 590 | } | 599 | } |
| 591 | } | 600 | } |
| 592 | 601 | ||
| 593 | void Config::SaveMouseValues() { | 602 | void Config::SaveMouseValues() { |
| 594 | qt_config->setValue("mouse_enabled", Settings::values.mouse_enabled); | 603 | WriteSetting("mouse_enabled", Settings::values.mouse_enabled, false); |
| 595 | 604 | ||
| 596 | for (int i = 0; i < Settings::NativeMouseButton::NumMouseButtons; ++i) { | 605 | for (int i = 0; i < Settings::NativeMouseButton::NumMouseButtons; ++i) { |
| 597 | qt_config->setValue(QString("mouse_") + | 606 | std::string default_param = InputCommon::GenerateKeyboardParam(default_mouse_buttons[i]); |
| 598 | QString::fromStdString(Settings::NativeMouseButton::mapping[i]), | 607 | WriteSetting(QString("mouse_") + |
| 599 | QString::fromStdString(Settings::values.mouse_buttons[i])); | 608 | QString::fromStdString(Settings::NativeMouseButton::mapping[i]), |
| 609 | QString::fromStdString(Settings::values.mouse_buttons[i]), | ||
| 610 | QString::fromStdString(default_param)); | ||
| 600 | } | 611 | } |
| 601 | } | 612 | } |
| 602 | 613 | ||
| 603 | void Config::SaveTouchscreenValues() { | 614 | void Config::SaveTouchscreenValues() { |
| 604 | qt_config->setValue("touchscreen_enabled", Settings::values.touchscreen.enabled); | 615 | WriteSetting("touchscreen_enabled", Settings::values.touchscreen.enabled, true); |
| 605 | qt_config->setValue("touchscreen_device", | 616 | WriteSetting("touchscreen_device", QString::fromStdString(Settings::values.touchscreen.device), |
| 606 | QString::fromStdString(Settings::values.touchscreen.device)); | 617 | "engine:emu_window"); |
| 607 | 618 | ||
| 608 | qt_config->setValue("touchscreen_finger", Settings::values.touchscreen.finger); | 619 | WriteSetting("touchscreen_finger", Settings::values.touchscreen.finger, 0); |
| 609 | qt_config->setValue("touchscreen_angle", Settings::values.touchscreen.rotation_angle); | 620 | WriteSetting("touchscreen_angle", Settings::values.touchscreen.rotation_angle, 0); |
| 610 | qt_config->setValue("touchscreen_diameter_x", Settings::values.touchscreen.diameter_x); | 621 | WriteSetting("touchscreen_diameter_x", Settings::values.touchscreen.diameter_x, 15); |
| 611 | qt_config->setValue("touchscreen_diameter_y", Settings::values.touchscreen.diameter_y); | 622 | WriteSetting("touchscreen_diameter_y", Settings::values.touchscreen.diameter_y, 15); |
| 612 | } | 623 | } |
| 613 | 624 | ||
| 614 | void Config::SaveValues() { | 625 | void Config::SaveValues() { |
| @@ -619,91 +630,96 @@ void Config::SaveValues() { | |||
| 619 | SaveMouseValues(); | 630 | SaveMouseValues(); |
| 620 | SaveTouchscreenValues(); | 631 | SaveTouchscreenValues(); |
| 621 | 632 | ||
| 622 | qt_config->setValue("motion_device", QString::fromStdString(Settings::values.motion_device)); | 633 | WriteSetting("motion_device", QString::fromStdString(Settings::values.motion_device), |
| 623 | qt_config->setValue("keyboard_enabled", Settings::values.keyboard_enabled); | 634 | "engine:motion_emu,update_period:100,sensitivity:0.01"); |
| 635 | WriteSetting("keyboard_enabled", Settings::values.keyboard_enabled, false); | ||
| 624 | 636 | ||
| 625 | qt_config->endGroup(); | 637 | qt_config->endGroup(); |
| 626 | 638 | ||
| 627 | qt_config->beginGroup("Core"); | 639 | qt_config->beginGroup("Core"); |
| 628 | qt_config->setValue("use_cpu_jit", Settings::values.use_cpu_jit); | 640 | WriteSetting("use_cpu_jit", Settings::values.use_cpu_jit, true); |
| 629 | qt_config->setValue("use_multi_core", Settings::values.use_multi_core); | 641 | WriteSetting("use_multi_core", Settings::values.use_multi_core, false); |
| 630 | qt_config->endGroup(); | 642 | qt_config->endGroup(); |
| 631 | 643 | ||
| 632 | qt_config->beginGroup("Renderer"); | 644 | qt_config->beginGroup("Renderer"); |
| 633 | qt_config->setValue("resolution_factor", (double)Settings::values.resolution_factor); | 645 | WriteSetting("resolution_factor", (double)Settings::values.resolution_factor, 1.0); |
| 634 | qt_config->setValue("use_frame_limit", Settings::values.use_frame_limit); | 646 | WriteSetting("use_frame_limit", Settings::values.use_frame_limit, true); |
| 635 | qt_config->setValue("frame_limit", Settings::values.frame_limit); | 647 | WriteSetting("frame_limit", Settings::values.frame_limit, 100); |
| 636 | qt_config->setValue("use_disk_shader_cache", Settings::values.use_disk_shader_cache); | 648 | WriteSetting("use_disk_shader_cache", Settings::values.use_disk_shader_cache, true); |
| 637 | qt_config->setValue("use_accurate_gpu_emulation", Settings::values.use_accurate_gpu_emulation); | 649 | WriteSetting("use_accurate_gpu_emulation", Settings::values.use_accurate_gpu_emulation, false); |
| 638 | qt_config->setValue("use_asynchronous_gpu_emulation", | 650 | WriteSetting("use_asynchronous_gpu_emulation", Settings::values.use_asynchronous_gpu_emulation, |
| 639 | Settings::values.use_asynchronous_gpu_emulation); | 651 | false); |
| 640 | 652 | ||
| 641 | // Cast to double because Qt's written float values are not human-readable | 653 | // Cast to double because Qt's written float values are not human-readable |
| 642 | qt_config->setValue("bg_red", (double)Settings::values.bg_red); | 654 | WriteSetting("bg_red", (double)Settings::values.bg_red, 0.0); |
| 643 | qt_config->setValue("bg_green", (double)Settings::values.bg_green); | 655 | WriteSetting("bg_green", (double)Settings::values.bg_green, 0.0); |
| 644 | qt_config->setValue("bg_blue", (double)Settings::values.bg_blue); | 656 | WriteSetting("bg_blue", (double)Settings::values.bg_blue, 0.0); |
| 645 | qt_config->endGroup(); | 657 | qt_config->endGroup(); |
| 646 | 658 | ||
| 647 | qt_config->beginGroup("Audio"); | 659 | qt_config->beginGroup("Audio"); |
| 648 | qt_config->setValue("output_engine", QString::fromStdString(Settings::values.sink_id)); | 660 | WriteSetting("output_engine", QString::fromStdString(Settings::values.sink_id), "auto"); |
| 649 | qt_config->setValue("enable_audio_stretching", Settings::values.enable_audio_stretching); | 661 | WriteSetting("enable_audio_stretching", Settings::values.enable_audio_stretching, true); |
| 650 | qt_config->setValue("output_device", QString::fromStdString(Settings::values.audio_device_id)); | 662 | WriteSetting("output_device", QString::fromStdString(Settings::values.audio_device_id), "auto"); |
| 651 | qt_config->setValue("volume", Settings::values.volume); | 663 | WriteSetting("volume", Settings::values.volume, 1.0f); |
| 652 | qt_config->endGroup(); | 664 | qt_config->endGroup(); |
| 653 | 665 | ||
| 654 | qt_config->beginGroup("Data Storage"); | 666 | qt_config->beginGroup("Data Storage"); |
| 655 | qt_config->setValue("use_virtual_sd", Settings::values.use_virtual_sd); | 667 | WriteSetting("use_virtual_sd", Settings::values.use_virtual_sd, true); |
| 656 | qt_config->setValue("nand_directory", | 668 | WriteSetting("nand_directory", |
| 657 | QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir))); | 669 | QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir)), |
| 658 | qt_config->setValue("sdmc_directory", | 670 | QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir))); |
| 659 | QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::SDMCDir))); | 671 | WriteSetting("sdmc_directory", |
| 672 | QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::SDMCDir)), | ||
| 673 | QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::SDMCDir))); | ||
| 660 | qt_config->endGroup(); | 674 | qt_config->endGroup(); |
| 661 | 675 | ||
| 662 | qt_config->beginGroup("System"); | 676 | qt_config->beginGroup("System"); |
| 663 | qt_config->setValue("use_docked_mode", Settings::values.use_docked_mode); | 677 | WriteSetting("use_docked_mode", Settings::values.use_docked_mode, false); |
| 664 | qt_config->setValue("enable_nfc", Settings::values.enable_nfc); | 678 | WriteSetting("enable_nfc", Settings::values.enable_nfc, true); |
| 665 | qt_config->setValue("current_user", Settings::values.current_user); | 679 | WriteSetting("current_user", Settings::values.current_user, 0); |
| 666 | qt_config->setValue("language_index", Settings::values.language_index); | 680 | WriteSetting("language_index", Settings::values.language_index, 1); |
| 667 | 681 | ||
| 668 | qt_config->setValue("rng_seed_enabled", Settings::values.rng_seed.has_value()); | 682 | WriteSetting("rng_seed_enabled", Settings::values.rng_seed.has_value(), false); |
| 669 | qt_config->setValue("rng_seed", Settings::values.rng_seed.value_or(0)); | 683 | WriteSetting("rng_seed", Settings::values.rng_seed.value_or(0), 0); |
| 670 | 684 | ||
| 671 | qt_config->setValue("custom_rtc_enabled", Settings::values.custom_rtc.has_value()); | 685 | WriteSetting("custom_rtc_enabled", Settings::values.custom_rtc.has_value(), false); |
| 672 | qt_config->setValue("custom_rtc", | 686 | WriteSetting("custom_rtc", |
| 673 | QVariant::fromValue<long long>( | 687 | QVariant::fromValue<long long>( |
| 674 | Settings::values.custom_rtc.value_or(std::chrono::seconds{}).count())); | 688 | Settings::values.custom_rtc.value_or(std::chrono::seconds{}).count()), |
| 689 | 0); | ||
| 675 | 690 | ||
| 676 | qt_config->endGroup(); | 691 | qt_config->endGroup(); |
| 677 | 692 | ||
| 678 | qt_config->beginGroup("Miscellaneous"); | 693 | qt_config->beginGroup("Miscellaneous"); |
| 679 | qt_config->setValue("log_filter", QString::fromStdString(Settings::values.log_filter)); | 694 | WriteSetting("log_filter", QString::fromStdString(Settings::values.log_filter), "*:Info"); |
| 680 | qt_config->setValue("use_dev_keys", Settings::values.use_dev_keys); | 695 | WriteSetting("use_dev_keys", Settings::values.use_dev_keys, false); |
| 681 | qt_config->endGroup(); | 696 | qt_config->endGroup(); |
| 682 | 697 | ||
| 683 | qt_config->beginGroup("Debugging"); | 698 | qt_config->beginGroup("Debugging"); |
| 684 | qt_config->setValue("use_gdbstub", Settings::values.use_gdbstub); | 699 | WriteSetting("use_gdbstub", Settings::values.use_gdbstub, false); |
| 685 | qt_config->setValue("gdbstub_port", Settings::values.gdbstub_port); | 700 | WriteSetting("gdbstub_port", Settings::values.gdbstub_port, 24689); |
| 686 | qt_config->setValue("program_args", QString::fromStdString(Settings::values.program_args)); | 701 | WriteSetting("program_args", QString::fromStdString(Settings::values.program_args), ""); |
| 687 | qt_config->setValue("dump_exefs", Settings::values.dump_exefs); | 702 | WriteSetting("dump_exefs", Settings::values.dump_exefs, false); |
| 688 | qt_config->setValue("dump_nso", Settings::values.dump_nso); | 703 | WriteSetting("dump_nso", Settings::values.dump_nso, false); |
| 689 | qt_config->endGroup(); | 704 | qt_config->endGroup(); |
| 690 | 705 | ||
| 691 | qt_config->beginGroup("WebService"); | 706 | qt_config->beginGroup("WebService"); |
| 692 | qt_config->setValue("enable_telemetry", Settings::values.enable_telemetry); | 707 | WriteSetting("enable_telemetry", Settings::values.enable_telemetry, true); |
| 693 | qt_config->setValue("web_api_url", QString::fromStdString(Settings::values.web_api_url)); | 708 | WriteSetting("web_api_url", QString::fromStdString(Settings::values.web_api_url), |
| 694 | qt_config->setValue("yuzu_username", QString::fromStdString(Settings::values.yuzu_username)); | 709 | "https://api.yuzu-emu.org"); |
| 695 | qt_config->setValue("yuzu_token", QString::fromStdString(Settings::values.yuzu_token)); | 710 | WriteSetting("yuzu_username", QString::fromStdString(Settings::values.yuzu_username)); |
| 711 | WriteSetting("yuzu_token", QString::fromStdString(Settings::values.yuzu_token)); | ||
| 696 | qt_config->endGroup(); | 712 | qt_config->endGroup(); |
| 697 | 713 | ||
| 698 | qt_config->beginWriteArray("DisabledAddOns"); | 714 | qt_config->beginWriteArray("DisabledAddOns"); |
| 699 | int i = 0; | 715 | int i = 0; |
| 700 | for (const auto& elem : Settings::values.disabled_addons) { | 716 | for (const auto& elem : Settings::values.disabled_addons) { |
| 701 | qt_config->setArrayIndex(i); | 717 | qt_config->setArrayIndex(i); |
| 702 | qt_config->setValue("title_id", QVariant::fromValue<u64>(elem.first)); | 718 | WriteSetting("title_id", QVariant::fromValue<u64>(elem.first), 0); |
| 703 | qt_config->beginWriteArray("disabled"); | 719 | qt_config->beginWriteArray("disabled"); |
| 704 | for (std::size_t j = 0; j < elem.second.size(); ++j) { | 720 | for (std::size_t j = 0; j < elem.second.size(); ++j) { |
| 705 | qt_config->setArrayIndex(static_cast<int>(j)); | 721 | qt_config->setArrayIndex(static_cast<int>(j)); |
| 706 | qt_config->setValue("d", QString::fromStdString(elem.second[j])); | 722 | WriteSetting("d", QString::fromStdString(elem.second[j]), ""); |
| 707 | } | 723 | } |
| 708 | qt_config->endArray(); | 724 | qt_config->endArray(); |
| 709 | ++i; | 725 | ++i; |
| @@ -711,60 +727,86 @@ void Config::SaveValues() { | |||
| 711 | qt_config->endArray(); | 727 | qt_config->endArray(); |
| 712 | 728 | ||
| 713 | qt_config->beginGroup("UI"); | 729 | qt_config->beginGroup("UI"); |
| 714 | qt_config->setValue("theme", UISettings::values.theme); | 730 | WriteSetting("theme", UISettings::values.theme, UISettings::themes[0].second); |
| 715 | qt_config->setValue("enable_discord_presence", UISettings::values.enable_discord_presence); | 731 | WriteSetting("enable_discord_presence", UISettings::values.enable_discord_presence, true); |
| 716 | qt_config->setValue("screenshot_resolution_factor", | 732 | WriteSetting("screenshot_resolution_factor", UISettings::values.screenshot_resolution_factor, |
| 717 | UISettings::values.screenshot_resolution_factor); | 733 | 0); |
| 718 | qt_config->setValue("select_user_on_boot", UISettings::values.select_user_on_boot); | 734 | WriteSetting("select_user_on_boot", UISettings::values.select_user_on_boot, false); |
| 719 | 735 | ||
| 720 | qt_config->beginGroup("UIGameList"); | 736 | qt_config->beginGroup("UIGameList"); |
| 721 | qt_config->setValue("show_unknown", UISettings::values.show_unknown); | 737 | WriteSetting("show_unknown", UISettings::values.show_unknown, true); |
| 722 | qt_config->setValue("show_add_ons", UISettings::values.show_add_ons); | 738 | WriteSetting("show_add_ons", UISettings::values.show_add_ons, true); |
| 723 | qt_config->setValue("icon_size", UISettings::values.icon_size); | 739 | WriteSetting("icon_size", UISettings::values.icon_size, 64); |
| 724 | qt_config->setValue("row_1_text_id", UISettings::values.row_1_text_id); | 740 | WriteSetting("row_1_text_id", UISettings::values.row_1_text_id, 3); |
| 725 | qt_config->setValue("row_2_text_id", UISettings::values.row_2_text_id); | 741 | WriteSetting("row_2_text_id", UISettings::values.row_2_text_id, 2); |
| 726 | qt_config->endGroup(); | 742 | qt_config->endGroup(); |
| 727 | 743 | ||
| 728 | qt_config->beginGroup("UILayout"); | 744 | qt_config->beginGroup("UILayout"); |
| 729 | qt_config->setValue("geometry", UISettings::values.geometry); | 745 | WriteSetting("geometry", UISettings::values.geometry); |
| 730 | qt_config->setValue("state", UISettings::values.state); | 746 | WriteSetting("state", UISettings::values.state); |
| 731 | qt_config->setValue("geometryRenderWindow", UISettings::values.renderwindow_geometry); | 747 | WriteSetting("geometryRenderWindow", UISettings::values.renderwindow_geometry); |
| 732 | qt_config->setValue("gameListHeaderState", UISettings::values.gamelist_header_state); | 748 | WriteSetting("gameListHeaderState", UISettings::values.gamelist_header_state); |
| 733 | qt_config->setValue("microProfileDialogGeometry", UISettings::values.microprofile_geometry); | 749 | WriteSetting("microProfileDialogGeometry", UISettings::values.microprofile_geometry); |
| 734 | qt_config->setValue("microProfileDialogVisible", UISettings::values.microprofile_visible); | 750 | WriteSetting("microProfileDialogVisible", UISettings::values.microprofile_visible, false); |
| 735 | qt_config->endGroup(); | 751 | qt_config->endGroup(); |
| 736 | 752 | ||
| 737 | qt_config->beginGroup("Paths"); | 753 | qt_config->beginGroup("Paths"); |
| 738 | qt_config->setValue("romsPath", UISettings::values.roms_path); | 754 | WriteSetting("romsPath", UISettings::values.roms_path); |
| 739 | qt_config->setValue("symbolsPath", UISettings::values.symbols_path); | 755 | WriteSetting("symbolsPath", UISettings::values.symbols_path); |
| 740 | qt_config->setValue("screenshotPath", UISettings::values.screenshot_path); | 756 | WriteSetting("screenshotPath", UISettings::values.screenshot_path); |
| 741 | qt_config->setValue("gameListRootDir", UISettings::values.gamedir); | 757 | WriteSetting("gameListRootDir", UISettings::values.gamedir, "."); |
| 742 | qt_config->setValue("gameListDeepScan", UISettings::values.gamedir_deepscan); | 758 | WriteSetting("gameListDeepScan", UISettings::values.gamedir_deepscan, false); |
| 743 | qt_config->setValue("recentFiles", UISettings::values.recent_files); | 759 | WriteSetting("recentFiles", UISettings::values.recent_files); |
| 744 | qt_config->endGroup(); | 760 | qt_config->endGroup(); |
| 745 | 761 | ||
| 746 | qt_config->beginGroup("Shortcuts"); | 762 | qt_config->beginGroup("Shortcuts"); |
| 747 | for (auto shortcut : UISettings::values.shortcuts) { | 763 | for (auto shortcut : UISettings::values.shortcuts) { |
| 748 | qt_config->setValue(shortcut.first + "/KeySeq", shortcut.second.first); | 764 | WriteSetting(shortcut.first + "/KeySeq", shortcut.second.first); |
| 749 | qt_config->setValue(shortcut.first + "/Context", shortcut.second.second); | 765 | WriteSetting(shortcut.first + "/Context", shortcut.second.second); |
| 750 | } | 766 | } |
| 751 | qt_config->endGroup(); | 767 | qt_config->endGroup(); |
| 752 | 768 | ||
| 753 | qt_config->setValue("singleWindowMode", UISettings::values.single_window_mode); | 769 | WriteSetting("singleWindowMode", UISettings::values.single_window_mode, true); |
| 754 | qt_config->setValue("fullscreen", UISettings::values.fullscreen); | 770 | WriteSetting("fullscreen", UISettings::values.fullscreen, false); |
| 755 | qt_config->setValue("displayTitleBars", UISettings::values.display_titlebar); | 771 | WriteSetting("displayTitleBars", UISettings::values.display_titlebar, true); |
| 756 | qt_config->setValue("showFilterBar", UISettings::values.show_filter_bar); | 772 | WriteSetting("showFilterBar", UISettings::values.show_filter_bar, true); |
| 757 | qt_config->setValue("showStatusBar", UISettings::values.show_status_bar); | 773 | WriteSetting("showStatusBar", UISettings::values.show_status_bar, true); |
| 758 | qt_config->setValue("confirmClose", UISettings::values.confirm_before_closing); | 774 | WriteSetting("confirmClose", UISettings::values.confirm_before_closing, true); |
| 759 | qt_config->setValue("firstStart", UISettings::values.first_start); | 775 | WriteSetting("firstStart", UISettings::values.first_start, true); |
| 760 | qt_config->setValue("calloutFlags", UISettings::values.callout_flags); | 776 | WriteSetting("calloutFlags", UISettings::values.callout_flags, 0); |
| 761 | qt_config->setValue("showConsole", UISettings::values.show_console); | 777 | WriteSetting("showConsole", UISettings::values.show_console, false); |
| 762 | qt_config->setValue("profileIndex", UISettings::values.profile_index); | 778 | WriteSetting("profileIndex", UISettings::values.profile_index, 0); |
| 763 | qt_config->endGroup(); | 779 | qt_config->endGroup(); |
| 764 | } | 780 | } |
| 765 | 781 | ||
| 782 | QVariant Config::ReadSetting(const QString& name) const { | ||
| 783 | return qt_config->value(name); | ||
| 784 | } | ||
| 785 | |||
| 786 | QVariant Config::ReadSetting(const QString& name, const QVariant& default_value) const { | ||
| 787 | QVariant result; | ||
| 788 | if (qt_config->value(name + "/default", false).toBool()) { | ||
| 789 | result = default_value; | ||
| 790 | } else { | ||
| 791 | result = qt_config->value(name, default_value); | ||
| 792 | } | ||
| 793 | return result; | ||
| 794 | } | ||
| 795 | |||
| 796 | void Config::WriteSetting(const QString& name, const QVariant& value) { | ||
| 797 | qt_config->setValue(name, value); | ||
| 798 | } | ||
| 799 | |||
| 800 | void Config::WriteSetting(const QString& name, const QVariant& value, | ||
| 801 | const QVariant& default_value) { | ||
| 802 | qt_config->setValue(name + "/default", value == default_value); | ||
| 803 | qt_config->setValue(name, value); | ||
| 804 | } | ||
| 805 | |||
| 766 | void Config::Reload() { | 806 | void Config::Reload() { |
| 767 | ReadValues(); | 807 | ReadValues(); |
| 808 | // To apply default value changes | ||
| 809 | SaveValues(); | ||
| 768 | Settings::Apply(); | 810 | Settings::Apply(); |
| 769 | } | 811 | } |
| 770 | 812 | ||
diff --git a/src/yuzu/configuration/config.h b/src/yuzu/configuration/config.h index e73ad19bb..f4185db18 100644 --- a/src/yuzu/configuration/config.h +++ b/src/yuzu/configuration/config.h | |||
| @@ -42,6 +42,11 @@ private: | |||
| 42 | void SaveMouseValues(); | 42 | void SaveMouseValues(); |
| 43 | void SaveTouchscreenValues(); | 43 | void SaveTouchscreenValues(); |
| 44 | 44 | ||
| 45 | QVariant ReadSetting(const QString& name) const; | ||
| 46 | QVariant ReadSetting(const QString& name, const QVariant& default_value) const; | ||
| 47 | void WriteSetting(const QString& name, const QVariant& value); | ||
| 48 | void WriteSetting(const QString& name, const QVariant& value, const QVariant& default_value); | ||
| 49 | |||
| 45 | std::unique_ptr<QSettings> qt_config; | 50 | std::unique_ptr<QSettings> qt_config; |
| 46 | std::string qt_config_loc; | 51 | std::string qt_config_loc; |
| 47 | }; | 52 | }; |
diff --git a/src/yuzu/debugger/graphics/graphics_surface.cpp b/src/yuzu/debugger/graphics/graphics_surface.cpp index 71683da8e..11023ed63 100644 --- a/src/yuzu/debugger/graphics/graphics_surface.cpp +++ b/src/yuzu/debugger/graphics/graphics_surface.cpp | |||
| @@ -261,7 +261,7 @@ void GraphicsSurfaceWidget::OnSurfaceSourceChanged(int new_value) { | |||
| 261 | 261 | ||
| 262 | void GraphicsSurfaceWidget::OnSurfaceAddressChanged(qint64 new_value) { | 262 | void GraphicsSurfaceWidget::OnSurfaceAddressChanged(qint64 new_value) { |
| 263 | if (surface_address != new_value) { | 263 | if (surface_address != new_value) { |
| 264 | surface_address = static_cast<Tegra::GPUVAddr>(new_value); | 264 | surface_address = static_cast<GPUVAddr>(new_value); |
| 265 | 265 | ||
| 266 | surface_source_list->setCurrentIndex(static_cast<int>(Source::Custom)); | 266 | surface_source_list->setCurrentIndex(static_cast<int>(Source::Custom)); |
| 267 | emit Update(); | 267 | emit Update(); |
| @@ -383,13 +383,12 @@ void GraphicsSurfaceWidget::OnUpdate() { | |||
| 383 | // TODO: Implement a good way to visualize alpha components! | 383 | // TODO: Implement a good way to visualize alpha components! |
| 384 | 384 | ||
| 385 | QImage decoded_image(surface_width, surface_height, QImage::Format_ARGB32); | 385 | QImage decoded_image(surface_width, surface_height, QImage::Format_ARGB32); |
| 386 | std::optional<VAddr> address = gpu.MemoryManager().GpuToCpuAddress(surface_address); | ||
| 387 | 386 | ||
| 388 | // TODO(bunnei): Will not work with BCn formats that swizzle 4x4 tiles. | 387 | // TODO(bunnei): Will not work with BCn formats that swizzle 4x4 tiles. |
| 389 | // Needs to be fixed if we plan to use this feature more, otherwise we may remove it. | 388 | // Needs to be fixed if we plan to use this feature more, otherwise we may remove it. |
| 390 | auto unswizzled_data = Tegra::Texture::UnswizzleTexture( | 389 | auto unswizzled_data = Tegra::Texture::UnswizzleTexture( |
| 391 | *address, 1, 1, Tegra::Texture::BytesPerPixel(surface_format), surface_width, | 390 | gpu.MemoryManager().GetPointer(surface_address), 1, 1, |
| 392 | surface_height, 1U); | 391 | Tegra::Texture::BytesPerPixel(surface_format), surface_width, surface_height, 1U); |
| 393 | 392 | ||
| 394 | auto texture_data = Tegra::Texture::DecodeTexture(unswizzled_data, surface_format, | 393 | auto texture_data = Tegra::Texture::DecodeTexture(unswizzled_data, surface_format, |
| 395 | surface_width, surface_height); | 394 | surface_width, surface_height); |
diff --git a/src/yuzu/debugger/graphics/graphics_surface.h b/src/yuzu/debugger/graphics/graphics_surface.h index 323e39d94..89445b18f 100644 --- a/src/yuzu/debugger/graphics/graphics_surface.h +++ b/src/yuzu/debugger/graphics/graphics_surface.h | |||
| @@ -87,7 +87,7 @@ private: | |||
| 87 | QPushButton* save_surface; | 87 | QPushButton* save_surface; |
| 88 | 88 | ||
| 89 | Source surface_source; | 89 | Source surface_source; |
| 90 | Tegra::GPUVAddr surface_address; | 90 | GPUVAddr surface_address; |
| 91 | unsigned surface_width; | 91 | unsigned surface_width; |
| 92 | unsigned surface_height; | 92 | unsigned surface_height; |
| 93 | Tegra::Texture::TextureFormat surface_format; | 93 | Tegra::Texture::TextureFormat surface_format; |
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp index 7df8eff53..de7a26e14 100644 --- a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp +++ b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp | |||
| @@ -135,16 +135,16 @@ bool EmuWindow_SDL2::SupportsRequiredGLExtensions() { | |||
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | EmuWindow_SDL2::EmuWindow_SDL2(bool fullscreen) { | 137 | EmuWindow_SDL2::EmuWindow_SDL2(bool fullscreen) { |
| 138 | InputCommon::Init(); | ||
| 139 | |||
| 140 | SDL_SetMainReady(); | ||
| 141 | |||
| 142 | // Initialize the window | 138 | // Initialize the window |
| 143 | if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_JOYSTICK) < 0) { | 139 | if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_JOYSTICK) < 0) { |
| 144 | LOG_CRITICAL(Frontend, "Failed to initialize SDL2! Exiting..."); | 140 | LOG_CRITICAL(Frontend, "Failed to initialize SDL2! Exiting..."); |
| 145 | exit(1); | 141 | exit(1); |
| 146 | } | 142 | } |
| 147 | 143 | ||
| 144 | InputCommon::Init(); | ||
| 145 | |||
| 146 | SDL_SetMainReady(); | ||
| 147 | |||
| 148 | SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4); | 148 | SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4); |
| 149 | SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3); | 149 | SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3); |
| 150 | SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE); | 150 | SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE); |
| @@ -201,11 +201,9 @@ EmuWindow_SDL2::EmuWindow_SDL2(bool fullscreen) { | |||
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | EmuWindow_SDL2::~EmuWindow_SDL2() { | 203 | EmuWindow_SDL2::~EmuWindow_SDL2() { |
| 204 | InputCommon::SDL::CloseSDLJoysticks(); | 204 | InputCommon::Shutdown(); |
| 205 | SDL_GL_DeleteContext(gl_context); | 205 | SDL_GL_DeleteContext(gl_context); |
| 206 | SDL_Quit(); | 206 | SDL_Quit(); |
| 207 | |||
| 208 | InputCommon::Shutdown(); | ||
| 209 | } | 207 | } |
| 210 | 208 | ||
| 211 | void EmuWindow_SDL2::SwapBuffers() { | 209 | void EmuWindow_SDL2::SwapBuffers() { |
| @@ -262,7 +260,6 @@ void EmuWindow_SDL2::PollEvents() { | |||
| 262 | is_open = false; | 260 | is_open = false; |
| 263 | break; | 261 | break; |
| 264 | default: | 262 | default: |
| 265 | InputCommon::SDL::HandleGameControllerEvent(event); | ||
| 266 | break; | 263 | break; |
| 267 | } | 264 | } |
| 268 | } | 265 | } |
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp index c6c66a787..245f25847 100644 --- a/src/yuzu_cmd/yuzu.cpp +++ b/src/yuzu_cmd/yuzu.cpp | |||
| @@ -114,9 +114,9 @@ int main(int argc, char** argv) { | |||
| 114 | }; | 114 | }; |
| 115 | 115 | ||
| 116 | while (optind < argc) { | 116 | while (optind < argc) { |
| 117 | char arg = getopt_long(argc, argv, "g:fhvp::", long_options, &option_index); | 117 | int arg = getopt_long(argc, argv, "g:fhvp::", long_options, &option_index); |
| 118 | if (arg != -1) { | 118 | if (arg != -1) { |
| 119 | switch (arg) { | 119 | switch (static_cast<char>(arg)) { |
| 120 | case 'g': | 120 | case 'g': |
| 121 | errno = 0; | 121 | errno = 0; |
| 122 | gdb_port = strtoul(optarg, &endarg, 0); | 122 | gdb_port = strtoul(optarg, &endarg, 0); |