diff options
Diffstat (limited to 'src/common')
36 files changed, 2664 insertions, 88 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 566695fde..a6dc31b53 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -1,3 +1,6 @@ | |||
| 1 | # SPDX-FileCopyrightText: 2018 yuzu Emulator Project | ||
| 2 | # SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 1 | if (DEFINED ENV{AZURECIREPO}) | 4 | if (DEFINED ENV{AZURECIREPO}) |
| 2 | set(BUILD_REPOSITORY $ENV{AZURECIREPO}) | 5 | set(BUILD_REPOSITORY $ENV{AZURECIREPO}) |
| 3 | endif() | 6 | endif() |
| @@ -41,8 +44,10 @@ add_custom_command(OUTPUT scm_rev.cpp | |||
| 41 | add_library(common STATIC | 44 | add_library(common STATIC |
| 42 | algorithm.h | 45 | algorithm.h |
| 43 | alignment.h | 46 | alignment.h |
| 47 | announce_multiplayer_room.h | ||
| 44 | assert.cpp | 48 | assert.cpp |
| 45 | assert.h | 49 | assert.h |
| 50 | atomic_helpers.h | ||
| 46 | atomic_ops.h | 51 | atomic_ops.h |
| 47 | detached_tasks.cpp | 52 | detached_tasks.cpp |
| 48 | detached_tasks.h | 53 | detached_tasks.h |
| @@ -64,6 +69,7 @@ add_library(common STATIC | |||
| 64 | expected.h | 69 | expected.h |
| 65 | fiber.cpp | 70 | fiber.cpp |
| 66 | fiber.h | 71 | fiber.h |
| 72 | fixed_point.h | ||
| 67 | fs/file.cpp | 73 | fs/file.cpp |
| 68 | fs/file.h | 74 | fs/file.h |
| 69 | fs/fs.cpp | 75 | fs/fs.cpp |
| @@ -109,6 +115,7 @@ add_library(common STATIC | |||
| 109 | parent_of_member.h | 115 | parent_of_member.h |
| 110 | point.h | 116 | point.h |
| 111 | quaternion.h | 117 | quaternion.h |
| 118 | reader_writer_queue.h | ||
| 112 | ring_buffer.h | 119 | ring_buffer.h |
| 113 | scm_rev.cpp | 120 | scm_rev.cpp |
| 114 | scm_rev.h | 121 | scm_rev.h |
diff --git a/src/common/alignment.h b/src/common/alignment.h index 8570c7d3c..7e897334b 100644 --- a/src/common/alignment.h +++ b/src/common/alignment.h | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | // This file is under the public domain. | 1 | // SPDX-FileCopyrightText: 2014 Jannik Vogel <email@jannikvogel.de> |
| 2 | // SPDX-License-Identifier: CC0-1.0 | ||
| 2 | 3 | ||
| 3 | #pragma once | 4 | #pragma once |
| 4 | 5 | ||
diff --git a/src/common/announce_multiplayer_room.h b/src/common/announce_multiplayer_room.h new file mode 100644 index 000000000..0ad9da2be --- /dev/null +++ b/src/common/announce_multiplayer_room.h | |||
| @@ -0,0 +1,143 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2017 Citra Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | #include <functional> | ||
| 8 | #include <string> | ||
| 9 | #include <vector> | ||
| 10 | #include "common/common_types.h" | ||
| 11 | #include "web_service/web_result.h" | ||
| 12 | |||
| 13 | namespace AnnounceMultiplayerRoom { | ||
| 14 | |||
| 15 | using MacAddress = std::array<u8, 6>; | ||
| 16 | |||
| 17 | struct GameInfo { | ||
| 18 | std::string name{""}; | ||
| 19 | u64 id{0}; | ||
| 20 | }; | ||
| 21 | |||
| 22 | struct Member { | ||
| 23 | std::string username; | ||
| 24 | std::string nickname; | ||
| 25 | std::string display_name; | ||
| 26 | std::string avatar_url; | ||
| 27 | MacAddress mac_address; | ||
| 28 | GameInfo game; | ||
| 29 | }; | ||
| 30 | |||
| 31 | struct RoomInformation { | ||
| 32 | std::string name; ///< Name of the server | ||
| 33 | std::string description; ///< Server description | ||
| 34 | u32 member_slots; ///< Maximum number of members in this room | ||
| 35 | u16 port; ///< The port of this room | ||
| 36 | GameInfo preferred_game; ///< Game to advertise that you want to play | ||
| 37 | std::string host_username; ///< Forum username of the host | ||
| 38 | bool enable_yuzu_mods; ///< Allow yuzu Moderators to moderate on this room | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct Room { | ||
| 42 | RoomInformation information; | ||
| 43 | |||
| 44 | std::string id; | ||
| 45 | std::string verify_uid; ///< UID used for verification | ||
| 46 | std::string ip; | ||
| 47 | u32 net_version; | ||
| 48 | bool has_password; | ||
| 49 | |||
| 50 | std::vector<Member> members; | ||
| 51 | }; | ||
| 52 | using RoomList = std::vector<Room>; | ||
| 53 | |||
| 54 | /** | ||
| 55 | * A AnnounceMultiplayerRoom interface class. A backend to submit/get to/from a web service should | ||
| 56 | * implement this interface. | ||
| 57 | */ | ||
| 58 | class Backend { | ||
| 59 | public: | ||
| 60 | virtual ~Backend() = default; | ||
| 61 | |||
| 62 | /** | ||
| 63 | * Sets the Information that gets used for the announce | ||
| 64 | * @param uid The Id of the room | ||
| 65 | * @param name The name of the room | ||
| 66 | * @param description The room description | ||
| 67 | * @param port The port of the room | ||
| 68 | * @param net_version The version of the libNetwork that gets used | ||
| 69 | * @param has_password True if the room is passowrd protected | ||
| 70 | * @param preferred_game The preferred game of the room | ||
| 71 | * @param preferred_game_id The title id of the preferred game | ||
| 72 | */ | ||
| 73 | virtual void SetRoomInformation(const std::string& name, const std::string& description, | ||
| 74 | const u16 port, const u32 max_player, const u32 net_version, | ||
| 75 | const bool has_password, const GameInfo& preferred_game) = 0; | ||
| 76 | /** | ||
| 77 | * Adds a player information to the data that gets announced | ||
| 78 | * @param nickname The nickname of the player | ||
| 79 | * @param mac_address The MAC Address of the player | ||
| 80 | * @param game_id The title id of the game the player plays | ||
| 81 | * @param game_name The name of the game the player plays | ||
| 82 | */ | ||
| 83 | virtual void AddPlayer(const Member& member) = 0; | ||
| 84 | |||
| 85 | /** | ||
| 86 | * Updates the data in the announce service. Re-register the room when required. | ||
| 87 | * @result The result of the update attempt | ||
| 88 | */ | ||
| 89 | virtual WebService::WebResult Update() = 0; | ||
| 90 | |||
| 91 | /** | ||
| 92 | * Registers the data in the announce service | ||
| 93 | * @result The result of the register attempt. When the result code is Success, A global Guid of | ||
| 94 | * the room which may be used for verification will be in the result's returned_data. | ||
| 95 | */ | ||
| 96 | virtual WebService::WebResult Register() = 0; | ||
| 97 | |||
| 98 | /** | ||
| 99 | * Empties the stored players | ||
| 100 | */ | ||
| 101 | virtual void ClearPlayers() = 0; | ||
| 102 | |||
| 103 | /** | ||
| 104 | * Get the room information from the announce service | ||
| 105 | * @result A list of all rooms the announce service has | ||
| 106 | */ | ||
| 107 | virtual RoomList GetRoomList() = 0; | ||
| 108 | |||
| 109 | /** | ||
| 110 | * Sends a delete message to the announce service | ||
| 111 | */ | ||
| 112 | virtual void Delete() = 0; | ||
| 113 | }; | ||
| 114 | |||
| 115 | /** | ||
| 116 | * Empty implementation of AnnounceMultiplayerRoom interface that drops all data. Used when a | ||
| 117 | * functional backend implementation is not available. | ||
| 118 | */ | ||
| 119 | class NullBackend : public Backend { | ||
| 120 | public: | ||
| 121 | ~NullBackend() = default; | ||
| 122 | void SetRoomInformation(const std::string& /*name*/, const std::string& /*description*/, | ||
| 123 | const u16 /*port*/, const u32 /*max_player*/, const u32 /*net_version*/, | ||
| 124 | const bool /*has_password*/, | ||
| 125 | const GameInfo& /*preferred_game*/) override {} | ||
| 126 | void AddPlayer(const Member& /*member*/) override {} | ||
| 127 | WebService::WebResult Update() override { | ||
| 128 | return WebService::WebResult{WebService::WebResult::Code::NoWebservice, | ||
| 129 | "WebService is missing", ""}; | ||
| 130 | } | ||
| 131 | WebService::WebResult Register() override { | ||
| 132 | return WebService::WebResult{WebService::WebResult::Code::NoWebservice, | ||
| 133 | "WebService is missing", ""}; | ||
| 134 | } | ||
| 135 | void ClearPlayers() override {} | ||
| 136 | RoomList GetRoomList() override { | ||
| 137 | return RoomList{}; | ||
| 138 | } | ||
| 139 | |||
| 140 | void Delete() override {} | ||
| 141 | }; | ||
| 142 | |||
| 143 | } // namespace AnnounceMultiplayerRoom | ||
diff --git a/src/common/atomic_helpers.h b/src/common/atomic_helpers.h new file mode 100644 index 000000000..bef5015c1 --- /dev/null +++ b/src/common/atomic_helpers.h | |||
| @@ -0,0 +1,775 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2013-2016 Cameron Desrochers | ||
| 2 | // SPDX-FileCopyrightText: 2015 Jeff Preshing | ||
| 3 | // SPDX-License-Identifier: BSD-2-Clause AND Zlib | ||
| 4 | |||
| 5 | // Distributed under the simplified BSD license (see the license file that | ||
| 6 | // should have come with this header). | ||
| 7 | // Uses Jeff Preshing's semaphore implementation (under the terms of its | ||
| 8 | // separate zlib license, embedded below). | ||
| 9 | |||
| 10 | #pragma once | ||
| 11 | |||
| 12 | // Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant) | ||
| 13 | // implementation of low-level memory barriers, plus a few semi-portable utility macros (for | ||
| 14 | // inlining and alignment). Also has a basic atomic type (limited to hardware-supported atomics with | ||
| 15 | // no memory ordering guarantees). Uses the AE_* prefix for macros (historical reasons), and the | ||
| 16 | // "moodycamel" namespace for symbols. | ||
| 17 | |||
| 18 | #include <cassert> | ||
| 19 | #include <cerrno> | ||
| 20 | #include <cstdint> | ||
| 21 | #include <ctime> | ||
| 22 | #include <type_traits> | ||
| 23 | |||
| 24 | // Platform detection | ||
| 25 | #if defined(__INTEL_COMPILER) | ||
| 26 | #define AE_ICC | ||
| 27 | #elif defined(_MSC_VER) | ||
| 28 | #define AE_VCPP | ||
| 29 | #elif defined(__GNUC__) | ||
| 30 | #define AE_GCC | ||
| 31 | #endif | ||
| 32 | |||
| 33 | #if defined(_M_IA64) || defined(__ia64__) | ||
| 34 | #define AE_ARCH_IA64 | ||
| 35 | #elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__) | ||
| 36 | #define AE_ARCH_X64 | ||
| 37 | #elif defined(_M_IX86) || defined(__i386__) | ||
| 38 | #define AE_ARCH_X86 | ||
| 39 | #elif defined(_M_PPC) || defined(__powerpc__) | ||
| 40 | #define AE_ARCH_PPC | ||
| 41 | #else | ||
| 42 | #define AE_ARCH_UNKNOWN | ||
| 43 | #endif | ||
| 44 | |||
| 45 | // AE_UNUSED | ||
| 46 | #define AE_UNUSED(x) ((void)x) | ||
| 47 | |||
| 48 | // AE_NO_TSAN/AE_TSAN_ANNOTATE_* | ||
| 49 | #if defined(__has_feature) | ||
| 50 | #if __has_feature(thread_sanitizer) | ||
| 51 | #if __cplusplus >= 201703L // inline variables require C++17 | ||
| 52 | namespace Common { | ||
| 53 | inline int ae_tsan_global; | ||
| 54 | } | ||
| 55 | #define AE_TSAN_ANNOTATE_RELEASE() \ | ||
| 56 | AnnotateHappensBefore(__FILE__, __LINE__, (void*)(&::moodycamel::ae_tsan_global)) | ||
| 57 | #define AE_TSAN_ANNOTATE_ACQUIRE() \ | ||
| 58 | AnnotateHappensAfter(__FILE__, __LINE__, (void*)(&::moodycamel::ae_tsan_global)) | ||
| 59 | extern "C" void AnnotateHappensBefore(const char*, int, void*); | ||
| 60 | extern "C" void AnnotateHappensAfter(const char*, int, void*); | ||
| 61 | #else // when we can't work with tsan, attempt to disable its warnings | ||
| 62 | #define AE_NO_TSAN __attribute__((no_sanitize("thread"))) | ||
| 63 | #endif | ||
| 64 | #endif | ||
| 65 | #endif | ||
| 66 | #ifndef AE_NO_TSAN | ||
| 67 | #define AE_NO_TSAN | ||
| 68 | #endif | ||
| 69 | #ifndef AE_TSAN_ANNOTATE_RELEASE | ||
| 70 | #define AE_TSAN_ANNOTATE_RELEASE() | ||
| 71 | #define AE_TSAN_ANNOTATE_ACQUIRE() | ||
| 72 | #endif | ||
| 73 | |||
| 74 | // AE_FORCEINLINE | ||
| 75 | #if defined(AE_VCPP) || defined(AE_ICC) | ||
| 76 | #define AE_FORCEINLINE __forceinline | ||
| 77 | #elif defined(AE_GCC) | ||
| 78 | //#define AE_FORCEINLINE __attribute__((always_inline)) | ||
| 79 | #define AE_FORCEINLINE inline | ||
| 80 | #else | ||
| 81 | #define AE_FORCEINLINE inline | ||
| 82 | #endif | ||
| 83 | |||
| 84 | // AE_ALIGN | ||
| 85 | #if defined(AE_VCPP) || defined(AE_ICC) | ||
| 86 | #define AE_ALIGN(x) __declspec(align(x)) | ||
| 87 | #elif defined(AE_GCC) | ||
| 88 | #define AE_ALIGN(x) __attribute__((aligned(x))) | ||
| 89 | #else | ||
| 90 | // Assume GCC compliant syntax... | ||
| 91 | #define AE_ALIGN(x) __attribute__((aligned(x))) | ||
| 92 | #endif | ||
| 93 | |||
| 94 | // Portable atomic fences implemented below: | ||
| 95 | |||
| 96 | namespace Common { | ||
| 97 | |||
| 98 | enum memory_order { | ||
| 99 | memory_order_relaxed, | ||
| 100 | memory_order_acquire, | ||
| 101 | memory_order_release, | ||
| 102 | memory_order_acq_rel, | ||
| 103 | memory_order_seq_cst, | ||
| 104 | |||
| 105 | // memory_order_sync: Forces a full sync: | ||
| 106 | // #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad | ||
| 107 | memory_order_sync = memory_order_seq_cst | ||
| 108 | }; | ||
| 109 | |||
| 110 | } // namespace Common | ||
| 111 | |||
| 112 | #if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || \ | ||
| 113 | (defined(AE_ICC) && __INTEL_COMPILER < 1600) | ||
| 114 | // VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences | ||
| 115 | |||
| 116 | #include <intrin.h> | ||
| 117 | |||
| 118 | #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86) | ||
| 119 | #define AeFullSync _mm_mfence | ||
| 120 | #define AeLiteSync _mm_mfence | ||
| 121 | #elif defined(AE_ARCH_IA64) | ||
| 122 | #define AeFullSync __mf | ||
| 123 | #define AeLiteSync __mf | ||
| 124 | #elif defined(AE_ARCH_PPC) | ||
| 125 | #include <ppcintrinsics.h> | ||
| 126 | #define AeFullSync __sync | ||
| 127 | #define AeLiteSync __lwsync | ||
| 128 | #endif | ||
| 129 | |||
| 130 | #ifdef AE_VCPP | ||
| 131 | #pragma warning(push) | ||
| 132 | #pragma warning(disable : 4365) // Disable erroneous 'conversion from long to unsigned int, | ||
| 133 | // signed/unsigned mismatch' error when using `assert` | ||
| 134 | #ifdef __cplusplus_cli | ||
| 135 | #pragma managed(push, off) | ||
| 136 | #endif | ||
| 137 | #endif | ||
| 138 | |||
| 139 | namespace Common { | ||
| 140 | |||
| 141 | AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN { | ||
| 142 | switch (order) { | ||
| 143 | case memory_order_relaxed: | ||
| 144 | break; | ||
| 145 | case memory_order_acquire: | ||
| 146 | _ReadBarrier(); | ||
| 147 | break; | ||
| 148 | case memory_order_release: | ||
| 149 | _WriteBarrier(); | ||
| 150 | break; | ||
| 151 | case memory_order_acq_rel: | ||
| 152 | _ReadWriteBarrier(); | ||
| 153 | break; | ||
| 154 | case memory_order_seq_cst: | ||
| 155 | _ReadWriteBarrier(); | ||
| 156 | break; | ||
| 157 | default: | ||
| 158 | assert(false); | ||
| 159 | } | ||
| 160 | } | ||
| 161 | |||
| 162 | // x86/x64 have a strong memory model -- all loads and stores have | ||
| 163 | // acquire and release semantics automatically (so only need compiler | ||
| 164 | // barriers for those). | ||
| 165 | #if defined(AE_ARCH_X86) || defined(AE_ARCH_X64) | ||
| 166 | AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN { | ||
| 167 | switch (order) { | ||
| 168 | case memory_order_relaxed: | ||
| 169 | break; | ||
| 170 | case memory_order_acquire: | ||
| 171 | _ReadBarrier(); | ||
| 172 | break; | ||
| 173 | case memory_order_release: | ||
| 174 | _WriteBarrier(); | ||
| 175 | break; | ||
| 176 | case memory_order_acq_rel: | ||
| 177 | _ReadWriteBarrier(); | ||
| 178 | break; | ||
| 179 | case memory_order_seq_cst: | ||
| 180 | _ReadWriteBarrier(); | ||
| 181 | AeFullSync(); | ||
| 182 | _ReadWriteBarrier(); | ||
| 183 | break; | ||
| 184 | default: | ||
| 185 | assert(false); | ||
| 186 | } | ||
| 187 | } | ||
| 188 | #else | ||
| 189 | AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN { | ||
| 190 | // Non-specialized arch, use heavier memory barriers everywhere just in case :-( | ||
| 191 | switch (order) { | ||
| 192 | case memory_order_relaxed: | ||
| 193 | break; | ||
| 194 | case memory_order_acquire: | ||
| 195 | _ReadBarrier(); | ||
| 196 | AeLiteSync(); | ||
| 197 | _ReadBarrier(); | ||
| 198 | break; | ||
| 199 | case memory_order_release: | ||
| 200 | _WriteBarrier(); | ||
| 201 | AeLiteSync(); | ||
| 202 | _WriteBarrier(); | ||
| 203 | break; | ||
| 204 | case memory_order_acq_rel: | ||
| 205 | _ReadWriteBarrier(); | ||
| 206 | AeLiteSync(); | ||
| 207 | _ReadWriteBarrier(); | ||
| 208 | break; | ||
| 209 | case memory_order_seq_cst: | ||
| 210 | _ReadWriteBarrier(); | ||
| 211 | AeFullSync(); | ||
| 212 | _ReadWriteBarrier(); | ||
| 213 | break; | ||
| 214 | default: | ||
| 215 | assert(false); | ||
| 216 | } | ||
| 217 | } | ||
| 218 | #endif | ||
| 219 | } // namespace Common | ||
| 220 | #else | ||
| 221 | // Use standard library of atomics | ||
| 222 | #include <atomic> | ||
| 223 | |||
| 224 | namespace Common { | ||
| 225 | |||
| 226 | AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN { | ||
| 227 | switch (order) { | ||
| 228 | case memory_order_relaxed: | ||
| 229 | break; | ||
| 230 | case memory_order_acquire: | ||
| 231 | std::atomic_signal_fence(std::memory_order_acquire); | ||
| 232 | break; | ||
| 233 | case memory_order_release: | ||
| 234 | std::atomic_signal_fence(std::memory_order_release); | ||
| 235 | break; | ||
| 236 | case memory_order_acq_rel: | ||
| 237 | std::atomic_signal_fence(std::memory_order_acq_rel); | ||
| 238 | break; | ||
| 239 | case memory_order_seq_cst: | ||
| 240 | std::atomic_signal_fence(std::memory_order_seq_cst); | ||
| 241 | break; | ||
| 242 | default: | ||
| 243 | assert(false); | ||
| 244 | } | ||
| 245 | } | ||
| 246 | |||
| 247 | AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN { | ||
| 248 | switch (order) { | ||
| 249 | case memory_order_relaxed: | ||
| 250 | break; | ||
| 251 | case memory_order_acquire: | ||
| 252 | AE_TSAN_ANNOTATE_ACQUIRE(); | ||
| 253 | std::atomic_thread_fence(std::memory_order_acquire); | ||
| 254 | break; | ||
| 255 | case memory_order_release: | ||
| 256 | AE_TSAN_ANNOTATE_RELEASE(); | ||
| 257 | std::atomic_thread_fence(std::memory_order_release); | ||
| 258 | break; | ||
| 259 | case memory_order_acq_rel: | ||
| 260 | AE_TSAN_ANNOTATE_ACQUIRE(); | ||
| 261 | AE_TSAN_ANNOTATE_RELEASE(); | ||
| 262 | std::atomic_thread_fence(std::memory_order_acq_rel); | ||
| 263 | break; | ||
| 264 | case memory_order_seq_cst: | ||
| 265 | AE_TSAN_ANNOTATE_ACQUIRE(); | ||
| 266 | AE_TSAN_ANNOTATE_RELEASE(); | ||
| 267 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 268 | break; | ||
| 269 | default: | ||
| 270 | assert(false); | ||
| 271 | } | ||
| 272 | } | ||
| 273 | |||
| 274 | } // namespace Common | ||
| 275 | |||
| 276 | #endif | ||
| 277 | |||
| 278 | #if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli)) | ||
| 279 | #define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC | ||
| 280 | #endif | ||
| 281 | |||
| 282 | #ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC | ||
| 283 | #include <atomic> | ||
| 284 | #endif | ||
| 285 | #include <utility> | ||
| 286 | |||
| 287 | // WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY: | ||
| 288 | // Provides basic support for atomic variables -- no memory ordering guarantees are provided. | ||
| 289 | // The guarantee of atomicity is only made for types that already have atomic load and store | ||
| 290 | // guarantees at the hardware level -- on most platforms this generally means aligned pointers and | ||
| 291 | // integers (only). | ||
| 292 | namespace Common { | ||
| 293 | template <typename T> | ||
| 294 | class weak_atomic { | ||
| 295 | public: | ||
| 296 | AE_NO_TSAN weak_atomic() : value() {} | ||
| 297 | #ifdef AE_VCPP | ||
| 298 | #pragma warning(push) | ||
| 299 | #pragma warning(disable : 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning | ||
| 300 | #endif | ||
| 301 | template <typename U> | ||
| 302 | AE_NO_TSAN weak_atomic(U&& x) : value(std::forward<U>(x)) {} | ||
| 303 | #ifdef __cplusplus_cli | ||
| 304 | // Work around bug with universal reference/nullptr combination that only appears when /clr is | ||
| 305 | // on | ||
| 306 | AE_NO_TSAN weak_atomic(nullptr_t) : value(nullptr) {} | ||
| 307 | #endif | ||
| 308 | AE_NO_TSAN weak_atomic(weak_atomic const& other) : value(other.load()) {} | ||
| 309 | AE_NO_TSAN weak_atomic(weak_atomic&& other) : value(std::move(other.load())) {} | ||
| 310 | #ifdef AE_VCPP | ||
| 311 | #pragma warning(pop) | ||
| 312 | #endif | ||
| 313 | |||
| 314 | AE_FORCEINLINE operator T() const AE_NO_TSAN { | ||
| 315 | return load(); | ||
| 316 | } | ||
| 317 | |||
| 318 | #ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC | ||
| 319 | template <typename U> | ||
| 320 | AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN { | ||
| 321 | value = std::forward<U>(x); | ||
| 322 | return *this; | ||
| 323 | } | ||
| 324 | AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN { | ||
| 325 | value = other.value; | ||
| 326 | return *this; | ||
| 327 | } | ||
| 328 | |||
| 329 | AE_FORCEINLINE T load() const AE_NO_TSAN { | ||
| 330 | return value; | ||
| 331 | } | ||
| 332 | |||
| 333 | AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN { | ||
| 334 | #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86) | ||
| 335 | if (sizeof(T) == 4) | ||
| 336 | return _InterlockedExchangeAdd((long volatile*)&value, (long)increment); | ||
| 337 | #if defined(_M_AMD64) | ||
| 338 | else if (sizeof(T) == 8) | ||
| 339 | return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment); | ||
| 340 | #endif | ||
| 341 | #else | ||
| 342 | #error Unsupported platform | ||
| 343 | #endif | ||
| 344 | assert(false && "T must be either a 32 or 64 bit type"); | ||
| 345 | return value; | ||
| 346 | } | ||
| 347 | |||
| 348 | AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN { | ||
| 349 | #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86) | ||
| 350 | if (sizeof(T) == 4) | ||
| 351 | return _InterlockedExchangeAdd((long volatile*)&value, (long)increment); | ||
| 352 | #if defined(_M_AMD64) | ||
| 353 | else if (sizeof(T) == 8) | ||
| 354 | return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment); | ||
| 355 | #endif | ||
| 356 | #else | ||
| 357 | #error Unsupported platform | ||
| 358 | #endif | ||
| 359 | assert(false && "T must be either a 32 or 64 bit type"); | ||
| 360 | return value; | ||
| 361 | } | ||
| 362 | #else | ||
| 363 | template <typename U> | ||
| 364 | AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN { | ||
| 365 | value.store(std::forward<U>(x), std::memory_order_relaxed); | ||
| 366 | return *this; | ||
| 367 | } | ||
| 368 | |||
| 369 | AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN { | ||
| 370 | value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed); | ||
| 371 | return *this; | ||
| 372 | } | ||
| 373 | |||
| 374 | AE_FORCEINLINE T load() const AE_NO_TSAN { | ||
| 375 | return value.load(std::memory_order_relaxed); | ||
| 376 | } | ||
| 377 | |||
| 378 | AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN { | ||
| 379 | return value.fetch_add(increment, std::memory_order_acquire); | ||
| 380 | } | ||
| 381 | |||
| 382 | AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN { | ||
| 383 | return value.fetch_add(increment, std::memory_order_release); | ||
| 384 | } | ||
| 385 | #endif | ||
| 386 | |||
| 387 | private: | ||
| 388 | #ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC | ||
| 389 | // No std::atomic support, but still need to circumvent compiler optimizations. | ||
| 390 | // `volatile` will make memory access slow, but is guaranteed to be reliable. | ||
| 391 | volatile T value; | ||
| 392 | #else | ||
| 393 | std::atomic<T> value; | ||
| 394 | #endif | ||
| 395 | }; | ||
| 396 | |||
| 397 | } // namespace Common | ||
| 398 | |||
| 399 | // Portable single-producer, single-consumer semaphore below: | ||
| 400 | |||
| 401 | #if defined(_WIN32) | ||
| 402 | // Avoid including windows.h in a header; we only need a handful of | ||
| 403 | // items, so we'll redeclare them here (this is relatively safe since | ||
| 404 | // the API generally has to remain stable between Windows versions). | ||
| 405 | // I know this is an ugly hack but it still beats polluting the global | ||
| 406 | // namespace with thousands of generic names or adding a .cpp for nothing. | ||
| 407 | extern "C" { | ||
| 408 | struct _SECURITY_ATTRIBUTES; | ||
| 409 | __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, | ||
| 410 | long lInitialCount, long lMaximumCount, | ||
| 411 | const wchar_t* lpName); | ||
| 412 | __declspec(dllimport) int __stdcall CloseHandle(void* hObject); | ||
| 413 | __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, | ||
| 414 | unsigned long dwMilliseconds); | ||
| 415 | __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, | ||
| 416 | long* lpPreviousCount); | ||
| 417 | } | ||
| 418 | #elif defined(__MACH__) | ||
| 419 | #include <mach/mach.h> | ||
| 420 | #elif defined(__unix__) | ||
| 421 | #include <semaphore.h> | ||
| 422 | #elif defined(FREERTOS) | ||
| 423 | #include <FreeRTOS.h> | ||
| 424 | #include <semphr.h> | ||
| 425 | #include <task.h> | ||
| 426 | #endif | ||
| 427 | |||
| 428 | namespace Common { | ||
| 429 | // Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's | ||
| 430 | // portable + lightweight semaphore implementations, originally from | ||
| 431 | // https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h | ||
| 432 | // LICENSE: | ||
| 433 | // Copyright (c) 2015 Jeff Preshing | ||
| 434 | // | ||
| 435 | // This software is provided 'as-is', without any express or implied | ||
| 436 | // warranty. In no event will the authors be held liable for any damages | ||
| 437 | // arising from the use of this software. | ||
| 438 | // | ||
| 439 | // Permission is granted to anyone to use this software for any purpose, | ||
| 440 | // including commercial applications, and to alter it and redistribute it | ||
| 441 | // freely, subject to the following restrictions: | ||
| 442 | // | ||
| 443 | // 1. The origin of this software must not be misrepresented; you must not | ||
| 444 | // claim that you wrote the original software. If you use this software | ||
| 445 | // in a product, an acknowledgement in the product documentation would be | ||
| 446 | // appreciated but is not required. | ||
| 447 | // 2. Altered source versions must be plainly marked as such, and must not be | ||
| 448 | // misrepresented as being the original software. | ||
| 449 | // 3. This notice may not be removed or altered from any source distribution. | ||
| 450 | namespace spsc_sema { | ||
| 451 | #if defined(_WIN32) | ||
| 452 | class Semaphore { | ||
| 453 | private: | ||
| 454 | void* m_hSema; | ||
| 455 | |||
| 456 | Semaphore(const Semaphore& other); | ||
| 457 | Semaphore& operator=(const Semaphore& other); | ||
| 458 | |||
| 459 | public: | ||
| 460 | AE_NO_TSAN Semaphore(int initialCount = 0) : m_hSema() { | ||
| 461 | assert(initialCount >= 0); | ||
| 462 | const long maxLong = 0x7fffffff; | ||
| 463 | m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr); | ||
| 464 | assert(m_hSema); | ||
| 465 | } | ||
| 466 | |||
| 467 | AE_NO_TSAN ~Semaphore() { | ||
| 468 | CloseHandle(m_hSema); | ||
| 469 | } | ||
| 470 | |||
| 471 | bool wait() AE_NO_TSAN { | ||
| 472 | const unsigned long infinite = 0xffffffff; | ||
| 473 | return WaitForSingleObject(m_hSema, infinite) == 0; | ||
| 474 | } | ||
| 475 | |||
| 476 | bool try_wait() AE_NO_TSAN { | ||
| 477 | return WaitForSingleObject(m_hSema, 0) == 0; | ||
| 478 | } | ||
| 479 | |||
| 480 | bool timed_wait(std::uint64_t usecs) AE_NO_TSAN { | ||
| 481 | return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) == 0; | ||
| 482 | } | ||
| 483 | |||
| 484 | void signal(int count = 1) AE_NO_TSAN { | ||
| 485 | while (!ReleaseSemaphore(m_hSema, count, nullptr)) | ||
| 486 | ; | ||
| 487 | } | ||
| 488 | }; | ||
| 489 | #elif defined(__MACH__) | ||
| 490 | //--------------------------------------------------------- | ||
| 491 | // Semaphore (Apple iOS and OSX) | ||
| 492 | // Can't use POSIX semaphores due to | ||
| 493 | // http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html | ||
| 494 | //--------------------------------------------------------- | ||
| 495 | class Semaphore { | ||
| 496 | private: | ||
| 497 | semaphore_t m_sema; | ||
| 498 | |||
| 499 | Semaphore(const Semaphore& other); | ||
| 500 | Semaphore& operator=(const Semaphore& other); | ||
| 501 | |||
| 502 | public: | ||
| 503 | AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() { | ||
| 504 | assert(initialCount >= 0); | ||
| 505 | kern_return_t rc = | ||
| 506 | semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount); | ||
| 507 | assert(rc == KERN_SUCCESS); | ||
| 508 | AE_UNUSED(rc); | ||
| 509 | } | ||
| 510 | |||
| 511 | AE_NO_TSAN ~Semaphore() { | ||
| 512 | semaphore_destroy(mach_task_self(), m_sema); | ||
| 513 | } | ||
| 514 | |||
| 515 | bool wait() AE_NO_TSAN { | ||
| 516 | return semaphore_wait(m_sema) == KERN_SUCCESS; | ||
| 517 | } | ||
| 518 | |||
| 519 | bool try_wait() AE_NO_TSAN { | ||
| 520 | return timed_wait(0); | ||
| 521 | } | ||
| 522 | |||
| 523 | bool timed_wait(std::uint64_t timeout_usecs) AE_NO_TSAN { | ||
| 524 | mach_timespec_t ts; | ||
| 525 | ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000); | ||
| 526 | ts.tv_nsec = static_cast<int>((timeout_usecs % 1000000) * 1000); | ||
| 527 | |||
| 528 | // added in OSX 10.10: | ||
| 529 | // https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html | ||
| 530 | kern_return_t rc = semaphore_timedwait(m_sema, ts); | ||
| 531 | return rc == KERN_SUCCESS; | ||
| 532 | } | ||
| 533 | |||
| 534 | void signal() AE_NO_TSAN { | ||
| 535 | while (semaphore_signal(m_sema) != KERN_SUCCESS) | ||
| 536 | ; | ||
| 537 | } | ||
| 538 | |||
| 539 | void signal(int count) AE_NO_TSAN { | ||
| 540 | while (count-- > 0) { | ||
| 541 | while (semaphore_signal(m_sema) != KERN_SUCCESS) | ||
| 542 | ; | ||
| 543 | } | ||
| 544 | } | ||
| 545 | }; | ||
| 546 | #elif defined(__unix__) | ||
| 547 | //--------------------------------------------------------- | ||
| 548 | // Semaphore (POSIX, Linux) | ||
| 549 | //--------------------------------------------------------- | ||
| 550 | class Semaphore { | ||
| 551 | private: | ||
| 552 | sem_t m_sema; | ||
| 553 | |||
| 554 | Semaphore(const Semaphore& other); | ||
| 555 | Semaphore& operator=(const Semaphore& other); | ||
| 556 | |||
| 557 | public: | ||
| 558 | AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() { | ||
| 559 | assert(initialCount >= 0); | ||
| 560 | int rc = sem_init(&m_sema, 0, static_cast<unsigned int>(initialCount)); | ||
| 561 | assert(rc == 0); | ||
| 562 | AE_UNUSED(rc); | ||
| 563 | } | ||
| 564 | |||
| 565 | AE_NO_TSAN ~Semaphore() { | ||
| 566 | sem_destroy(&m_sema); | ||
| 567 | } | ||
| 568 | |||
| 569 | bool wait() AE_NO_TSAN { | ||
| 570 | // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error | ||
| 571 | int rc; | ||
| 572 | do { | ||
| 573 | rc = sem_wait(&m_sema); | ||
| 574 | } while (rc == -1 && errno == EINTR); | ||
| 575 | return rc == 0; | ||
| 576 | } | ||
| 577 | |||
| 578 | bool try_wait() AE_NO_TSAN { | ||
| 579 | int rc; | ||
| 580 | do { | ||
| 581 | rc = sem_trywait(&m_sema); | ||
| 582 | } while (rc == -1 && errno == EINTR); | ||
| 583 | return rc == 0; | ||
| 584 | } | ||
| 585 | |||
| 586 | bool timed_wait(std::uint64_t usecs) AE_NO_TSAN { | ||
| 587 | struct timespec ts; | ||
| 588 | const int usecs_in_1_sec = 1000000; | ||
| 589 | const int nsecs_in_1_sec = 1000000000; | ||
| 590 | clock_gettime(CLOCK_REALTIME, &ts); | ||
| 591 | ts.tv_sec += static_cast<time_t>(usecs / usecs_in_1_sec); | ||
| 592 | ts.tv_nsec += static_cast<long>(usecs % usecs_in_1_sec) * 1000; | ||
| 593 | // sem_timedwait bombs if you have more than 1e9 in tv_nsec | ||
| 594 | // so we have to clean things up before passing it in | ||
| 595 | if (ts.tv_nsec >= nsecs_in_1_sec) { | ||
| 596 | ts.tv_nsec -= nsecs_in_1_sec; | ||
| 597 | ++ts.tv_sec; | ||
| 598 | } | ||
| 599 | |||
| 600 | int rc; | ||
| 601 | do { | ||
| 602 | rc = sem_timedwait(&m_sema, &ts); | ||
| 603 | } while (rc == -1 && errno == EINTR); | ||
| 604 | return rc == 0; | ||
| 605 | } | ||
| 606 | |||
| 607 | void signal() AE_NO_TSAN { | ||
| 608 | while (sem_post(&m_sema) == -1) | ||
| 609 | ; | ||
| 610 | } | ||
| 611 | |||
| 612 | void signal(int count) AE_NO_TSAN { | ||
| 613 | while (count-- > 0) { | ||
| 614 | while (sem_post(&m_sema) == -1) | ||
| 615 | ; | ||
| 616 | } | ||
| 617 | } | ||
| 618 | }; | ||
| 619 | #elif defined(FREERTOS) | ||
| 620 | //--------------------------------------------------------- | ||
| 621 | // Semaphore (FreeRTOS) | ||
| 622 | //--------------------------------------------------------- | ||
| 623 | class Semaphore { | ||
| 624 | private: | ||
| 625 | SemaphoreHandle_t m_sema; | ||
| 626 | |||
| 627 | Semaphore(const Semaphore& other); | ||
| 628 | Semaphore& operator=(const Semaphore& other); | ||
| 629 | |||
| 630 | public: | ||
| 631 | AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() { | ||
| 632 | assert(initialCount >= 0); | ||
| 633 | m_sema = xSemaphoreCreateCounting(static_cast<UBaseType_t>(~0ull), | ||
| 634 | static_cast<UBaseType_t>(initialCount)); | ||
| 635 | assert(m_sema); | ||
| 636 | } | ||
| 637 | |||
| 638 | AE_NO_TSAN ~Semaphore() { | ||
| 639 | vSemaphoreDelete(m_sema); | ||
| 640 | } | ||
| 641 | |||
| 642 | bool wait() AE_NO_TSAN { | ||
| 643 | return xSemaphoreTake(m_sema, portMAX_DELAY) == pdTRUE; | ||
| 644 | } | ||
| 645 | |||
| 646 | bool try_wait() AE_NO_TSAN { | ||
| 647 | // Note: In an ISR context, if this causes a task to unblock, | ||
| 648 | // the caller won't know about it | ||
| 649 | if (xPortIsInsideInterrupt()) | ||
| 650 | return xSemaphoreTakeFromISR(m_sema, NULL) == pdTRUE; | ||
| 651 | return xSemaphoreTake(m_sema, 0) == pdTRUE; | ||
| 652 | } | ||
| 653 | |||
| 654 | bool timed_wait(std::uint64_t usecs) AE_NO_TSAN { | ||
| 655 | std::uint64_t msecs = usecs / 1000; | ||
| 656 | TickType_t ticks = static_cast<TickType_t>(msecs / portTICK_PERIOD_MS); | ||
| 657 | if (ticks == 0) | ||
| 658 | return try_wait(); | ||
| 659 | return xSemaphoreTake(m_sema, ticks) == pdTRUE; | ||
| 660 | } | ||
| 661 | |||
| 662 | void signal() AE_NO_TSAN { | ||
| 663 | // Note: In an ISR context, if this causes a task to unblock, | ||
| 664 | // the caller won't know about it | ||
| 665 | BaseType_t rc; | ||
| 666 | if (xPortIsInsideInterrupt()) | ||
| 667 | rc = xSemaphoreGiveFromISR(m_sema, NULL); | ||
| 668 | else | ||
| 669 | rc = xSemaphoreGive(m_sema); | ||
| 670 | assert(rc == pdTRUE); | ||
| 671 | AE_UNUSED(rc); | ||
| 672 | } | ||
| 673 | |||
| 674 | void signal(int count) AE_NO_TSAN { | ||
| 675 | while (count-- > 0) | ||
| 676 | signal(); | ||
| 677 | } | ||
| 678 | }; | ||
| 679 | #else | ||
| 680 | #error Unsupported platform! (No semaphore wrapper available) | ||
| 681 | #endif | ||
| 682 | |||
| 683 | //--------------------------------------------------------- | ||
| 684 | // LightweightSemaphore | ||
| 685 | //--------------------------------------------------------- | ||
| 686 | class LightweightSemaphore { | ||
| 687 | public: | ||
| 688 | typedef std::make_signed<std::size_t>::type ssize_t; | ||
| 689 | |||
| 690 | private: | ||
| 691 | weak_atomic<ssize_t> m_count; | ||
| 692 | Semaphore m_sema; | ||
| 693 | |||
| 694 | bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) AE_NO_TSAN { | ||
| 695 | ssize_t oldCount; | ||
| 696 | // Is there a better way to set the initial spin count? | ||
| 697 | // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC, | ||
| 698 | // as threads start hitting the kernel semaphore. | ||
| 699 | int spin = 1024; | ||
| 700 | while (--spin >= 0) { | ||
| 701 | if (m_count.load() > 0) { | ||
| 702 | m_count.fetch_add_acquire(-1); | ||
| 703 | return true; | ||
| 704 | } | ||
| 705 | compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop. | ||
| 706 | } | ||
| 707 | oldCount = m_count.fetch_add_acquire(-1); | ||
| 708 | if (oldCount > 0) | ||
| 709 | return true; | ||
| 710 | if (timeout_usecs < 0) { | ||
| 711 | if (m_sema.wait()) | ||
| 712 | return true; | ||
| 713 | } | ||
| 714 | if (timeout_usecs > 0 && m_sema.timed_wait(static_cast<uint64_t>(timeout_usecs))) | ||
| 715 | return true; | ||
| 716 | // At this point, we've timed out waiting for the semaphore, but the | ||
| 717 | // count is still decremented indicating we may still be waiting on | ||
| 718 | // it. So we have to re-adjust the count, but only if the semaphore | ||
| 719 | // wasn't signaled enough times for us too since then. If it was, we | ||
| 720 | // need to release the semaphore too. | ||
| 721 | while (true) { | ||
| 722 | oldCount = m_count.fetch_add_release(1); | ||
| 723 | if (oldCount < 0) | ||
| 724 | return false; // successfully restored things to the way they were | ||
| 725 | // Oh, the producer thread just signaled the semaphore after all. Try again: | ||
| 726 | oldCount = m_count.fetch_add_acquire(-1); | ||
| 727 | if (oldCount > 0 && m_sema.try_wait()) | ||
| 728 | return true; | ||
| 729 | } | ||
| 730 | } | ||
| 731 | |||
| 732 | public: | ||
| 733 | AE_NO_TSAN LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount), m_sema() { | ||
| 734 | assert(initialCount >= 0); | ||
| 735 | } | ||
| 736 | |||
| 737 | bool tryWait() AE_NO_TSAN { | ||
| 738 | if (m_count.load() > 0) { | ||
| 739 | m_count.fetch_add_acquire(-1); | ||
| 740 | return true; | ||
| 741 | } | ||
| 742 | return false; | ||
| 743 | } | ||
| 744 | |||
| 745 | bool wait() AE_NO_TSAN { | ||
| 746 | return tryWait() || waitWithPartialSpinning(); | ||
| 747 | } | ||
| 748 | |||
| 749 | bool wait(std::int64_t timeout_usecs) AE_NO_TSAN { | ||
| 750 | return tryWait() || waitWithPartialSpinning(timeout_usecs); | ||
| 751 | } | ||
| 752 | |||
| 753 | void signal(ssize_t count = 1) AE_NO_TSAN { | ||
| 754 | assert(count >= 0); | ||
| 755 | ssize_t oldCount = m_count.fetch_add_release(count); | ||
| 756 | assert(oldCount >= -1); | ||
| 757 | if (oldCount < 0) { | ||
| 758 | m_sema.signal(1); | ||
| 759 | } | ||
| 760 | } | ||
| 761 | |||
| 762 | std::size_t availableApprox() const AE_NO_TSAN { | ||
| 763 | ssize_t count = m_count.load(); | ||
| 764 | return count > 0 ? static_cast<std::size_t>(count) : 0; | ||
| 765 | } | ||
| 766 | }; | ||
| 767 | } // namespace spsc_sema | ||
| 768 | } // namespace Common | ||
| 769 | |||
| 770 | #if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli)) | ||
| 771 | #pragma warning(pop) | ||
| 772 | #ifdef __cplusplus_cli | ||
| 773 | #pragma managed(pop) | ||
| 774 | #endif | ||
| 775 | #endif | ||
diff --git a/src/common/detached_tasks.cpp b/src/common/detached_tasks.cpp index ec31d0b88..da64848da 100644 --- a/src/common/detached_tasks.cpp +++ b/src/common/detached_tasks.cpp | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2018 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2018 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #include <thread> | 4 | #include <thread> |
| 6 | #include "common/assert.h" | 5 | #include "common/assert.h" |
diff --git a/src/common/detached_tasks.h b/src/common/detached_tasks.h index 5dd8fc27b..416a2d7f3 100644 --- a/src/common/detached_tasks.h +++ b/src/common/detached_tasks.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2018 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2018 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/error.cpp b/src/common/error.cpp index d4455e310..ddb03bd45 100644 --- a/src/common/error.cpp +++ b/src/common/error.cpp | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | // Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2013 Dolphin Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 3 | // Refer to the license.txt file included. | 3 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 4 | 4 | ||
| 5 | #include <cstddef> | 5 | #include <cstddef> |
| 6 | #ifdef _WIN32 | 6 | #ifdef _WIN32 |
diff --git a/src/common/error.h b/src/common/error.h index e084d4b0f..62a3bd835 100644 --- a/src/common/error.h +++ b/src/common/error.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | // Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2013 Dolphin Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 3 | // Refer to the license.txt file included. | 3 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
diff --git a/src/common/fixed_point.h b/src/common/fixed_point.h new file mode 100644 index 000000000..4a0f72cc9 --- /dev/null +++ b/src/common/fixed_point.h | |||
| @@ -0,0 +1,706 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2015 Evan Teran | ||
| 2 | // SPDX-License-Identifier: MIT | ||
| 3 | |||
| 4 | // From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h | ||
| 5 | // See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math | ||
| 6 | |||
| 7 | #ifndef FIXED_H_ | ||
| 8 | #define FIXED_H_ | ||
| 9 | |||
| 10 | #if __cplusplus >= 201402L | ||
| 11 | #define CONSTEXPR14 constexpr | ||
| 12 | #else | ||
| 13 | #define CONSTEXPR14 | ||
| 14 | #endif | ||
| 15 | |||
| 16 | #include <cstddef> // for size_t | ||
| 17 | #include <cstdint> | ||
| 18 | #include <exception> | ||
| 19 | #include <ostream> | ||
| 20 | #include <type_traits> | ||
| 21 | |||
| 22 | namespace Common { | ||
| 23 | |||
| 24 | template <size_t I, size_t F> | ||
| 25 | class FixedPoint; | ||
| 26 | |||
| 27 | namespace detail { | ||
| 28 | |||
| 29 | // helper templates to make magic with types :) | ||
| 30 | // these allow us to determine resonable types from | ||
| 31 | // a desired size, they also let us infer the next largest type | ||
| 32 | // from a type which is nice for the division op | ||
| 33 | template <size_t T> | ||
| 34 | struct type_from_size { | ||
| 35 | using value_type = void; | ||
| 36 | using unsigned_type = void; | ||
| 37 | using signed_type = void; | ||
| 38 | static constexpr bool is_specialized = false; | ||
| 39 | }; | ||
| 40 | |||
| 41 | #if defined(__GNUC__) && defined(__x86_64__) && !defined(__STRICT_ANSI__) | ||
| 42 | template <> | ||
| 43 | struct type_from_size<128> { | ||
| 44 | static constexpr bool is_specialized = true; | ||
| 45 | static constexpr size_t size = 128; | ||
| 46 | |||
| 47 | using value_type = __int128; | ||
| 48 | using unsigned_type = unsigned __int128; | ||
| 49 | using signed_type = __int128; | ||
| 50 | using next_size = type_from_size<256>; | ||
| 51 | }; | ||
| 52 | #endif | ||
| 53 | |||
| 54 | template <> | ||
| 55 | struct type_from_size<64> { | ||
| 56 | static constexpr bool is_specialized = true; | ||
| 57 | static constexpr size_t size = 64; | ||
| 58 | |||
| 59 | using value_type = int64_t; | ||
| 60 | using unsigned_type = std::make_unsigned<value_type>::type; | ||
| 61 | using signed_type = std::make_signed<value_type>::type; | ||
| 62 | using next_size = type_from_size<128>; | ||
| 63 | }; | ||
| 64 | |||
| 65 | template <> | ||
| 66 | struct type_from_size<32> { | ||
| 67 | static constexpr bool is_specialized = true; | ||
| 68 | static constexpr size_t size = 32; | ||
| 69 | |||
| 70 | using value_type = int32_t; | ||
| 71 | using unsigned_type = std::make_unsigned<value_type>::type; | ||
| 72 | using signed_type = std::make_signed<value_type>::type; | ||
| 73 | using next_size = type_from_size<64>; | ||
| 74 | }; | ||
| 75 | |||
| 76 | template <> | ||
| 77 | struct type_from_size<16> { | ||
| 78 | static constexpr bool is_specialized = true; | ||
| 79 | static constexpr size_t size = 16; | ||
| 80 | |||
| 81 | using value_type = int16_t; | ||
| 82 | using unsigned_type = std::make_unsigned<value_type>::type; | ||
| 83 | using signed_type = std::make_signed<value_type>::type; | ||
| 84 | using next_size = type_from_size<32>; | ||
| 85 | }; | ||
| 86 | |||
| 87 | template <> | ||
| 88 | struct type_from_size<8> { | ||
| 89 | static constexpr bool is_specialized = true; | ||
| 90 | static constexpr size_t size = 8; | ||
| 91 | |||
| 92 | using value_type = int8_t; | ||
| 93 | using unsigned_type = std::make_unsigned<value_type>::type; | ||
| 94 | using signed_type = std::make_signed<value_type>::type; | ||
| 95 | using next_size = type_from_size<16>; | ||
| 96 | }; | ||
| 97 | |||
| 98 | // this is to assist in adding support for non-native base | ||
| 99 | // types (for adding big-int support), this should be fine | ||
| 100 | // unless your bit-int class doesn't nicely support casting | ||
| 101 | template <class B, class N> | ||
| 102 | constexpr B next_to_base(N rhs) { | ||
| 103 | return static_cast<B>(rhs); | ||
| 104 | } | ||
| 105 | |||
| 106 | struct divide_by_zero : std::exception {}; | ||
| 107 | |||
| 108 | template <size_t I, size_t F> | ||
| 109 | CONSTEXPR14 FixedPoint<I, F> divide( | ||
| 110 | FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, | ||
| 111 | typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { | ||
| 112 | |||
| 113 | using next_type = typename FixedPoint<I, F>::next_type; | ||
| 114 | using base_type = typename FixedPoint<I, F>::base_type; | ||
| 115 | constexpr size_t fractional_bits = FixedPoint<I, F>::fractional_bits; | ||
| 116 | |||
| 117 | next_type t(numerator.to_raw()); | ||
| 118 | t <<= fractional_bits; | ||
| 119 | |||
| 120 | FixedPoint<I, F> quotient; | ||
| 121 | |||
| 122 | quotient = FixedPoint<I, F>::from_base(next_to_base<base_type>(t / denominator.to_raw())); | ||
| 123 | remainder = FixedPoint<I, F>::from_base(next_to_base<base_type>(t % denominator.to_raw())); | ||
| 124 | |||
| 125 | return quotient; | ||
| 126 | } | ||
| 127 | |||
| 128 | template <size_t I, size_t F> | ||
| 129 | CONSTEXPR14 FixedPoint<I, F> divide( | ||
| 130 | FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder, | ||
| 131 | typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { | ||
| 132 | |||
| 133 | using unsigned_type = typename FixedPoint<I, F>::unsigned_type; | ||
| 134 | |||
| 135 | constexpr int bits = FixedPoint<I, F>::total_bits; | ||
| 136 | |||
| 137 | if (denominator == 0) { | ||
| 138 | throw divide_by_zero(); | ||
| 139 | } else { | ||
| 140 | |||
| 141 | int sign = 0; | ||
| 142 | |||
| 143 | FixedPoint<I, F> quotient; | ||
| 144 | |||
| 145 | if (numerator < 0) { | ||
| 146 | sign ^= 1; | ||
| 147 | numerator = -numerator; | ||
| 148 | } | ||
| 149 | |||
| 150 | if (denominator < 0) { | ||
| 151 | sign ^= 1; | ||
| 152 | denominator = -denominator; | ||
| 153 | } | ||
| 154 | |||
| 155 | unsigned_type n = numerator.to_raw(); | ||
| 156 | unsigned_type d = denominator.to_raw(); | ||
| 157 | unsigned_type x = 1; | ||
| 158 | unsigned_type answer = 0; | ||
| 159 | |||
| 160 | // egyptian division algorithm | ||
| 161 | while ((n >= d) && (((d >> (bits - 1)) & 1) == 0)) { | ||
| 162 | x <<= 1; | ||
| 163 | d <<= 1; | ||
| 164 | } | ||
| 165 | |||
| 166 | while (x != 0) { | ||
| 167 | if (n >= d) { | ||
| 168 | n -= d; | ||
| 169 | answer += x; | ||
| 170 | } | ||
| 171 | |||
| 172 | x >>= 1; | ||
| 173 | d >>= 1; | ||
| 174 | } | ||
| 175 | |||
| 176 | unsigned_type l1 = n; | ||
| 177 | unsigned_type l2 = denominator.to_raw(); | ||
| 178 | |||
| 179 | // calculate the lower bits (needs to be unsigned) | ||
| 180 | while (l1 >> (bits - F) > 0) { | ||
| 181 | l1 >>= 1; | ||
| 182 | l2 >>= 1; | ||
| 183 | } | ||
| 184 | const unsigned_type lo = (l1 << F) / l2; | ||
| 185 | |||
| 186 | quotient = FixedPoint<I, F>::from_base((answer << F) | lo); | ||
| 187 | remainder = n; | ||
| 188 | |||
| 189 | if (sign) { | ||
| 190 | quotient = -quotient; | ||
| 191 | } | ||
| 192 | |||
| 193 | return quotient; | ||
| 194 | } | ||
| 195 | } | ||
| 196 | |||
| 197 | // this is the usual implementation of multiplication | ||
| 198 | template <size_t I, size_t F> | ||
| 199 | CONSTEXPR14 FixedPoint<I, F> multiply( | ||
| 200 | FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, | ||
| 201 | typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { | ||
| 202 | |||
| 203 | using next_type = typename FixedPoint<I, F>::next_type; | ||
| 204 | using base_type = typename FixedPoint<I, F>::base_type; | ||
| 205 | |||
| 206 | constexpr size_t fractional_bits = FixedPoint<I, F>::fractional_bits; | ||
| 207 | |||
| 208 | next_type t(static_cast<next_type>(lhs.to_raw()) * static_cast<next_type>(rhs.to_raw())); | ||
| 209 | t >>= fractional_bits; | ||
| 210 | |||
| 211 | return FixedPoint<I, F>::from_base(next_to_base<base_type>(t)); | ||
| 212 | } | ||
| 213 | |||
| 214 | // this is the fall back version we use when we don't have a next size | ||
| 215 | // it is slightly slower, but is more robust since it doesn't | ||
| 216 | // require and upgraded type | ||
| 217 | template <size_t I, size_t F> | ||
| 218 | CONSTEXPR14 FixedPoint<I, F> multiply( | ||
| 219 | FixedPoint<I, F> lhs, FixedPoint<I, F> rhs, | ||
| 220 | typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) { | ||
| 221 | |||
| 222 | using base_type = typename FixedPoint<I, F>::base_type; | ||
| 223 | |||
| 224 | constexpr size_t fractional_bits = FixedPoint<I, F>::fractional_bits; | ||
| 225 | constexpr base_type integer_mask = FixedPoint<I, F>::integer_mask; | ||
| 226 | constexpr base_type fractional_mask = FixedPoint<I, F>::fractional_mask; | ||
| 227 | |||
| 228 | // more costly but doesn't need a larger type | ||
| 229 | const base_type a_hi = (lhs.to_raw() & integer_mask) >> fractional_bits; | ||
| 230 | const base_type b_hi = (rhs.to_raw() & integer_mask) >> fractional_bits; | ||
| 231 | const base_type a_lo = (lhs.to_raw() & fractional_mask); | ||
| 232 | const base_type b_lo = (rhs.to_raw() & fractional_mask); | ||
| 233 | |||
| 234 | const base_type x1 = a_hi * b_hi; | ||
| 235 | const base_type x2 = a_hi * b_lo; | ||
| 236 | const base_type x3 = a_lo * b_hi; | ||
| 237 | const base_type x4 = a_lo * b_lo; | ||
| 238 | |||
| 239 | return FixedPoint<I, F>::from_base((x1 << fractional_bits) + (x3 + x2) + | ||
| 240 | (x4 >> fractional_bits)); | ||
| 241 | } | ||
| 242 | } // namespace detail | ||
| 243 | |||
| 244 | template <size_t I, size_t F> | ||
| 245 | class FixedPoint { | ||
| 246 | static_assert(detail::type_from_size<I + F>::is_specialized, "invalid combination of sizes"); | ||
| 247 | |||
| 248 | public: | ||
| 249 | static constexpr size_t fractional_bits = F; | ||
| 250 | static constexpr size_t integer_bits = I; | ||
| 251 | static constexpr size_t total_bits = I + F; | ||
| 252 | |||
| 253 | using base_type_info = detail::type_from_size<total_bits>; | ||
| 254 | |||
| 255 | using base_type = typename base_type_info::value_type; | ||
| 256 | using next_type = typename base_type_info::next_size::value_type; | ||
| 257 | using unsigned_type = typename base_type_info::unsigned_type; | ||
| 258 | |||
| 259 | public: | ||
| 260 | #ifdef __GNUC__ | ||
| 261 | #pragma GCC diagnostic push | ||
| 262 | #pragma GCC diagnostic ignored "-Woverflow" | ||
| 263 | #endif | ||
| 264 | static constexpr base_type fractional_mask = | ||
| 265 | ~(static_cast<unsigned_type>(~base_type(0)) << fractional_bits); | ||
| 266 | static constexpr base_type integer_mask = ~fractional_mask; | ||
| 267 | #ifdef __GNUC__ | ||
| 268 | #pragma GCC diagnostic pop | ||
| 269 | #endif | ||
| 270 | |||
| 271 | public: | ||
| 272 | static constexpr base_type one = base_type(1) << fractional_bits; | ||
| 273 | |||
| 274 | public: // constructors | ||
| 275 | FixedPoint() = default; | ||
| 276 | FixedPoint(const FixedPoint&) = default; | ||
| 277 | FixedPoint(FixedPoint&&) = default; | ||
| 278 | FixedPoint& operator=(const FixedPoint&) = default; | ||
| 279 | |||
| 280 | template <class Number> | ||
| 281 | constexpr FixedPoint( | ||
| 282 | Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr) | ||
| 283 | : data_(static_cast<base_type>(n * one)) {} | ||
| 284 | |||
| 285 | public: // conversion | ||
| 286 | template <size_t I2, size_t F2> | ||
| 287 | CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) { | ||
| 288 | static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types"); | ||
| 289 | using T = FixedPoint<I2, F2>; | ||
| 290 | |||
| 291 | const base_type fractional = (other.data_ & T::fractional_mask); | ||
| 292 | const base_type integer = (other.data_ & T::integer_mask) >> T::fractional_bits; | ||
| 293 | data_ = | ||
| 294 | (integer << fractional_bits) | (fractional << (fractional_bits - T::fractional_bits)); | ||
| 295 | } | ||
| 296 | |||
| 297 | private: | ||
| 298 | // this makes it simpler to create a FixedPoint point object from | ||
| 299 | // a native type without scaling | ||
| 300 | // use "FixedPoint::from_base" in order to perform this. | ||
| 301 | struct NoScale {}; | ||
| 302 | |||
| 303 | constexpr FixedPoint(base_type n, const NoScale&) : data_(n) {} | ||
| 304 | |||
| 305 | public: | ||
| 306 | static constexpr FixedPoint from_base(base_type n) { | ||
| 307 | return FixedPoint(n, NoScale()); | ||
| 308 | } | ||
| 309 | |||
| 310 | public: // comparison operators | ||
| 311 | constexpr bool operator==(FixedPoint rhs) const { | ||
| 312 | return data_ == rhs.data_; | ||
| 313 | } | ||
| 314 | |||
| 315 | constexpr bool operator!=(FixedPoint rhs) const { | ||
| 316 | return data_ != rhs.data_; | ||
| 317 | } | ||
| 318 | |||
| 319 | constexpr bool operator<(FixedPoint rhs) const { | ||
| 320 | return data_ < rhs.data_; | ||
| 321 | } | ||
| 322 | |||
| 323 | constexpr bool operator>(FixedPoint rhs) const { | ||
| 324 | return data_ > rhs.data_; | ||
| 325 | } | ||
| 326 | |||
| 327 | constexpr bool operator<=(FixedPoint rhs) const { | ||
| 328 | return data_ <= rhs.data_; | ||
| 329 | } | ||
| 330 | |||
| 331 | constexpr bool operator>=(FixedPoint rhs) const { | ||
| 332 | return data_ >= rhs.data_; | ||
| 333 | } | ||
| 334 | |||
| 335 | public: // unary operators | ||
| 336 | constexpr bool operator!() const { | ||
| 337 | return !data_; | ||
| 338 | } | ||
| 339 | |||
| 340 | constexpr FixedPoint operator~() const { | ||
| 341 | // NOTE(eteran): this will often appear to "just negate" the value | ||
| 342 | // that is not an error, it is because -x == (~x+1) | ||
| 343 | // and that "+1" is adding an infinitesimally small fraction to the | ||
| 344 | // complimented value | ||
| 345 | return FixedPoint::from_base(~data_); | ||
| 346 | } | ||
| 347 | |||
| 348 | constexpr FixedPoint operator-() const { | ||
| 349 | return FixedPoint::from_base(-data_); | ||
| 350 | } | ||
| 351 | |||
| 352 | constexpr FixedPoint operator+() const { | ||
| 353 | return FixedPoint::from_base(+data_); | ||
| 354 | } | ||
| 355 | |||
| 356 | CONSTEXPR14 FixedPoint& operator++() { | ||
| 357 | data_ += one; | ||
| 358 | return *this; | ||
| 359 | } | ||
| 360 | |||
| 361 | CONSTEXPR14 FixedPoint& operator--() { | ||
| 362 | data_ -= one; | ||
| 363 | return *this; | ||
| 364 | } | ||
| 365 | |||
| 366 | CONSTEXPR14 FixedPoint operator++(int) { | ||
| 367 | FixedPoint tmp(*this); | ||
| 368 | data_ += one; | ||
| 369 | return tmp; | ||
| 370 | } | ||
| 371 | |||
| 372 | CONSTEXPR14 FixedPoint operator--(int) { | ||
| 373 | FixedPoint tmp(*this); | ||
| 374 | data_ -= one; | ||
| 375 | return tmp; | ||
| 376 | } | ||
| 377 | |||
| 378 | public: // basic math operators | ||
| 379 | CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) { | ||
| 380 | data_ += n.data_; | ||
| 381 | return *this; | ||
| 382 | } | ||
| 383 | |||
| 384 | CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) { | ||
| 385 | data_ -= n.data_; | ||
| 386 | return *this; | ||
| 387 | } | ||
| 388 | |||
| 389 | CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) { | ||
| 390 | return assign(detail::multiply(*this, n)); | ||
| 391 | } | ||
| 392 | |||
| 393 | CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) { | ||
| 394 | FixedPoint temp; | ||
| 395 | return assign(detail::divide(*this, n, temp)); | ||
| 396 | } | ||
| 397 | |||
| 398 | private: | ||
| 399 | CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) { | ||
| 400 | data_ = rhs.data_; | ||
| 401 | return *this; | ||
| 402 | } | ||
| 403 | |||
| 404 | public: // binary math operators, effects underlying bit pattern since these | ||
| 405 | // don't really typically make sense for non-integer values | ||
| 406 | CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) { | ||
| 407 | data_ &= n.data_; | ||
| 408 | return *this; | ||
| 409 | } | ||
| 410 | |||
| 411 | CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) { | ||
| 412 | data_ |= n.data_; | ||
| 413 | return *this; | ||
| 414 | } | ||
| 415 | |||
| 416 | CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) { | ||
| 417 | data_ ^= n.data_; | ||
| 418 | return *this; | ||
| 419 | } | ||
| 420 | |||
| 421 | template <class Integer, | ||
| 422 | class = typename std::enable_if<std::is_integral<Integer>::value>::type> | ||
| 423 | CONSTEXPR14 FixedPoint& operator>>=(Integer n) { | ||
| 424 | data_ >>= n; | ||
| 425 | return *this; | ||
| 426 | } | ||
| 427 | |||
| 428 | template <class Integer, | ||
| 429 | class = typename std::enable_if<std::is_integral<Integer>::value>::type> | ||
| 430 | CONSTEXPR14 FixedPoint& operator<<=(Integer n) { | ||
| 431 | data_ <<= n; | ||
| 432 | return *this; | ||
| 433 | } | ||
| 434 | |||
| 435 | public: // conversion to basic types | ||
| 436 | constexpr void round_up() { | ||
| 437 | data_ += (data_ & fractional_mask) >> 1; | ||
| 438 | } | ||
| 439 | |||
| 440 | constexpr int to_int() { | ||
| 441 | round_up(); | ||
| 442 | return static_cast<int>((data_ & integer_mask) >> fractional_bits); | ||
| 443 | } | ||
| 444 | |||
| 445 | constexpr unsigned int to_uint() const { | ||
| 446 | round_up(); | ||
| 447 | return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); | ||
| 448 | } | ||
| 449 | |||
| 450 | constexpr int64_t to_long() { | ||
| 451 | round_up(); | ||
| 452 | return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); | ||
| 453 | } | ||
| 454 | |||
| 455 | constexpr int to_int_floor() const { | ||
| 456 | return static_cast<int>((data_ & integer_mask) >> fractional_bits); | ||
| 457 | } | ||
| 458 | |||
| 459 | constexpr int64_t to_long_floor() { | ||
| 460 | return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits); | ||
| 461 | } | ||
| 462 | |||
| 463 | constexpr unsigned int to_uint_floor() const { | ||
| 464 | return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits); | ||
| 465 | } | ||
| 466 | |||
| 467 | constexpr float to_float() const { | ||
| 468 | return static_cast<float>(data_) / FixedPoint::one; | ||
| 469 | } | ||
| 470 | |||
| 471 | constexpr double to_double() const { | ||
| 472 | return static_cast<double>(data_) / FixedPoint::one; | ||
| 473 | } | ||
| 474 | |||
| 475 | constexpr base_type to_raw() const { | ||
| 476 | return data_; | ||
| 477 | } | ||
| 478 | |||
| 479 | constexpr void clear_int() { | ||
| 480 | data_ &= fractional_mask; | ||
| 481 | } | ||
| 482 | |||
| 483 | constexpr base_type get_frac() const { | ||
| 484 | return data_ & fractional_mask; | ||
| 485 | } | ||
| 486 | |||
| 487 | public: | ||
| 488 | CONSTEXPR14 void swap(FixedPoint& rhs) { | ||
| 489 | using std::swap; | ||
| 490 | swap(data_, rhs.data_); | ||
| 491 | } | ||
| 492 | |||
| 493 | public: | ||
| 494 | base_type data_; | ||
| 495 | }; | ||
| 496 | |||
| 497 | // if we have the same fractional portion, but differing integer portions, we trivially upgrade the | ||
| 498 | // smaller type | ||
| 499 | template <size_t I1, size_t I2, size_t F> | ||
| 500 | CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type | ||
| 501 | operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | ||
| 502 | |||
| 503 | using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; | ||
| 504 | |||
| 505 | const T l = T::from_base(lhs.to_raw()); | ||
| 506 | const T r = T::from_base(rhs.to_raw()); | ||
| 507 | return l + r; | ||
| 508 | } | ||
| 509 | |||
| 510 | template <size_t I1, size_t I2, size_t F> | ||
| 511 | CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type | ||
| 512 | operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | ||
| 513 | |||
| 514 | using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; | ||
| 515 | |||
| 516 | const T l = T::from_base(lhs.to_raw()); | ||
| 517 | const T r = T::from_base(rhs.to_raw()); | ||
| 518 | return l - r; | ||
| 519 | } | ||
| 520 | |||
| 521 | template <size_t I1, size_t I2, size_t F> | ||
| 522 | CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type | ||
| 523 | operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | ||
| 524 | |||
| 525 | using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; | ||
| 526 | |||
| 527 | const T l = T::from_base(lhs.to_raw()); | ||
| 528 | const T r = T::from_base(rhs.to_raw()); | ||
| 529 | return l * r; | ||
| 530 | } | ||
| 531 | |||
| 532 | template <size_t I1, size_t I2, size_t F> | ||
| 533 | CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type | ||
| 534 | operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) { | ||
| 535 | |||
| 536 | using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type; | ||
| 537 | |||
| 538 | const T l = T::from_base(lhs.to_raw()); | ||
| 539 | const T r = T::from_base(rhs.to_raw()); | ||
| 540 | return l / r; | ||
| 541 | } | ||
| 542 | |||
| 543 | template <size_t I, size_t F> | ||
| 544 | std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) { | ||
| 545 | os << f.to_double(); | ||
| 546 | return os; | ||
| 547 | } | ||
| 548 | |||
| 549 | // basic math operators | ||
| 550 | template <size_t I, size_t F> | ||
| 551 | CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { | ||
| 552 | lhs += rhs; | ||
| 553 | return lhs; | ||
| 554 | } | ||
| 555 | template <size_t I, size_t F> | ||
| 556 | CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { | ||
| 557 | lhs -= rhs; | ||
| 558 | return lhs; | ||
| 559 | } | ||
| 560 | template <size_t I, size_t F> | ||
| 561 | CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { | ||
| 562 | lhs *= rhs; | ||
| 563 | return lhs; | ||
| 564 | } | ||
| 565 | template <size_t I, size_t F> | ||
| 566 | CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) { | ||
| 567 | lhs /= rhs; | ||
| 568 | return lhs; | ||
| 569 | } | ||
| 570 | |||
| 571 | template <size_t I, size_t F, class Number, | ||
| 572 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 573 | CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) { | ||
| 574 | lhs += FixedPoint<I, F>(rhs); | ||
| 575 | return lhs; | ||
| 576 | } | ||
| 577 | template <size_t I, size_t F, class Number, | ||
| 578 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 579 | CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) { | ||
| 580 | lhs -= FixedPoint<I, F>(rhs); | ||
| 581 | return lhs; | ||
| 582 | } | ||
| 583 | template <size_t I, size_t F, class Number, | ||
| 584 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 585 | CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) { | ||
| 586 | lhs *= FixedPoint<I, F>(rhs); | ||
| 587 | return lhs; | ||
| 588 | } | ||
| 589 | template <size_t I, size_t F, class Number, | ||
| 590 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 591 | CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) { | ||
| 592 | lhs /= FixedPoint<I, F>(rhs); | ||
| 593 | return lhs; | ||
| 594 | } | ||
| 595 | |||
| 596 | template <size_t I, size_t F, class Number, | ||
| 597 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 598 | CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) { | ||
| 599 | FixedPoint<I, F> tmp(lhs); | ||
| 600 | tmp += rhs; | ||
| 601 | return tmp; | ||
| 602 | } | ||
| 603 | template <size_t I, size_t F, class Number, | ||
| 604 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 605 | CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) { | ||
| 606 | FixedPoint<I, F> tmp(lhs); | ||
| 607 | tmp -= rhs; | ||
| 608 | return tmp; | ||
| 609 | } | ||
| 610 | template <size_t I, size_t F, class Number, | ||
| 611 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 612 | CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) { | ||
| 613 | FixedPoint<I, F> tmp(lhs); | ||
| 614 | tmp *= rhs; | ||
| 615 | return tmp; | ||
| 616 | } | ||
| 617 | template <size_t I, size_t F, class Number, | ||
| 618 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 619 | CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) { | ||
| 620 | FixedPoint<I, F> tmp(lhs); | ||
| 621 | tmp /= rhs; | ||
| 622 | return tmp; | ||
| 623 | } | ||
| 624 | |||
| 625 | // shift operators | ||
| 626 | template <size_t I, size_t F, class Integer, | ||
| 627 | class = typename std::enable_if<std::is_integral<Integer>::value>::type> | ||
| 628 | CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) { | ||
| 629 | lhs <<= rhs; | ||
| 630 | return lhs; | ||
| 631 | } | ||
| 632 | template <size_t I, size_t F, class Integer, | ||
| 633 | class = typename std::enable_if<std::is_integral<Integer>::value>::type> | ||
| 634 | CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) { | ||
| 635 | lhs >>= rhs; | ||
| 636 | return lhs; | ||
| 637 | } | ||
| 638 | |||
| 639 | // comparison operators | ||
| 640 | template <size_t I, size_t F, class Number, | ||
| 641 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 642 | constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) { | ||
| 643 | return lhs > FixedPoint<I, F>(rhs); | ||
| 644 | } | ||
| 645 | template <size_t I, size_t F, class Number, | ||
| 646 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 647 | constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) { | ||
| 648 | return lhs < FixedPoint<I, F>(rhs); | ||
| 649 | } | ||
| 650 | template <size_t I, size_t F, class Number, | ||
| 651 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 652 | constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) { | ||
| 653 | return lhs >= FixedPoint<I, F>(rhs); | ||
| 654 | } | ||
| 655 | template <size_t I, size_t F, class Number, | ||
| 656 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 657 | constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) { | ||
| 658 | return lhs <= FixedPoint<I, F>(rhs); | ||
| 659 | } | ||
| 660 | template <size_t I, size_t F, class Number, | ||
| 661 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 662 | constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) { | ||
| 663 | return lhs == FixedPoint<I, F>(rhs); | ||
| 664 | } | ||
| 665 | template <size_t I, size_t F, class Number, | ||
| 666 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 667 | constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) { | ||
| 668 | return lhs != FixedPoint<I, F>(rhs); | ||
| 669 | } | ||
| 670 | |||
| 671 | template <size_t I, size_t F, class Number, | ||
| 672 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 673 | constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) { | ||
| 674 | return FixedPoint<I, F>(lhs) > rhs; | ||
| 675 | } | ||
| 676 | template <size_t I, size_t F, class Number, | ||
| 677 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 678 | constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) { | ||
| 679 | return FixedPoint<I, F>(lhs) < rhs; | ||
| 680 | } | ||
| 681 | template <size_t I, size_t F, class Number, | ||
| 682 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 683 | constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) { | ||
| 684 | return FixedPoint<I, F>(lhs) >= rhs; | ||
| 685 | } | ||
| 686 | template <size_t I, size_t F, class Number, | ||
| 687 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 688 | constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) { | ||
| 689 | return FixedPoint<I, F>(lhs) <= rhs; | ||
| 690 | } | ||
| 691 | template <size_t I, size_t F, class Number, | ||
| 692 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 693 | constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) { | ||
| 694 | return FixedPoint<I, F>(lhs) == rhs; | ||
| 695 | } | ||
| 696 | template <size_t I, size_t F, class Number, | ||
| 697 | class = typename std::enable_if<std::is_arithmetic<Number>::value>::type> | ||
| 698 | constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) { | ||
| 699 | return FixedPoint<I, F>(lhs) != rhs; | ||
| 700 | } | ||
| 701 | |||
| 702 | } // namespace Common | ||
| 703 | |||
| 704 | #undef CONSTEXPR14 | ||
| 705 | |||
| 706 | #endif | ||
diff --git a/src/common/hash.h b/src/common/hash.h index 298930702..b6f3e6d6f 100644 --- a/src/common/hash.h +++ b/src/common/hash.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2015 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2015 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/input.h b/src/common/input.h index bb42aaacc..213aa2384 100644 --- a/src/common/input.h +++ b/src/common/input.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2017 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
| @@ -28,7 +27,7 @@ enum class InputType { | |||
| 28 | Color, | 27 | Color, |
| 29 | Vibration, | 28 | Vibration, |
| 30 | Nfc, | 29 | Nfc, |
| 31 | Ir, | 30 | IrSensor, |
| 32 | }; | 31 | }; |
| 33 | 32 | ||
| 34 | // Internal battery charge level | 33 | // Internal battery charge level |
| @@ -53,6 +52,15 @@ enum class PollingMode { | |||
| 53 | IR, | 52 | IR, |
| 54 | }; | 53 | }; |
| 55 | 54 | ||
| 55 | enum class CameraFormat { | ||
| 56 | Size320x240, | ||
| 57 | Size160x120, | ||
| 58 | Size80x60, | ||
| 59 | Size40x30, | ||
| 60 | Size20x15, | ||
| 61 | None, | ||
| 62 | }; | ||
| 63 | |||
| 56 | // Vibration reply from the controller | 64 | // Vibration reply from the controller |
| 57 | enum class VibrationError { | 65 | enum class VibrationError { |
| 58 | None, | 66 | None, |
| @@ -68,6 +76,13 @@ enum class PollingError { | |||
| 68 | Unknown, | 76 | Unknown, |
| 69 | }; | 77 | }; |
| 70 | 78 | ||
| 79 | // Ir camera reply from the controller | ||
| 80 | enum class CameraError { | ||
| 81 | None, | ||
| 82 | NotSupported, | ||
| 83 | Unknown, | ||
| 84 | }; | ||
| 85 | |||
| 71 | // Hint for amplification curve to be used | 86 | // Hint for amplification curve to be used |
| 72 | enum class VibrationAmplificationType { | 87 | enum class VibrationAmplificationType { |
| 73 | Linear, | 88 | Linear, |
| @@ -176,6 +191,12 @@ struct LedStatus { | |||
| 176 | bool led_4{}; | 191 | bool led_4{}; |
| 177 | }; | 192 | }; |
| 178 | 193 | ||
| 194 | // Raw data fom camera | ||
| 195 | struct CameraStatus { | ||
| 196 | CameraFormat format{CameraFormat::None}; | ||
| 197 | std::vector<u8> data{}; | ||
| 198 | }; | ||
| 199 | |||
| 179 | // List of buttons to be passed to Qt that can be translated | 200 | // List of buttons to be passed to Qt that can be translated |
| 180 | enum class ButtonNames { | 201 | enum class ButtonNames { |
| 181 | Undefined, | 202 | Undefined, |
| @@ -233,6 +254,7 @@ struct CallbackStatus { | |||
| 233 | BodyColorStatus color_status{}; | 254 | BodyColorStatus color_status{}; |
| 234 | BatteryStatus battery_status{}; | 255 | BatteryStatus battery_status{}; |
| 235 | VibrationStatus vibration_status{}; | 256 | VibrationStatus vibration_status{}; |
| 257 | CameraStatus camera_status{}; | ||
| 236 | }; | 258 | }; |
| 237 | 259 | ||
| 238 | // Triggered once every input change | 260 | // Triggered once every input change |
| @@ -281,6 +303,10 @@ public: | |||
| 281 | virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) { | 303 | virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) { |
| 282 | return PollingError::NotSupported; | 304 | return PollingError::NotSupported; |
| 283 | } | 305 | } |
| 306 | |||
| 307 | virtual CameraError SetCameraFormat([[maybe_unused]] CameraFormat camera_format) { | ||
| 308 | return CameraError::NotSupported; | ||
| 309 | } | ||
| 284 | }; | 310 | }; |
| 285 | 311 | ||
| 286 | /// An abstract class template for a factory that can create input devices. | 312 | /// An abstract class template for a factory that can create input devices. |
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp index b3793106d..8ce1c2fd1 100644 --- a/src/common/logging/backend.cpp +++ b/src/common/logging/backend.cpp | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #include <atomic> | 4 | #include <atomic> |
| 6 | #include <chrono> | 5 | #include <chrono> |
diff --git a/src/common/logging/backend.h b/src/common/logging/backend.h index a0e80fe3c..12e5e2498 100644 --- a/src/common/logging/backend.h +++ b/src/common/logging/backend.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/logging/filter.cpp b/src/common/logging/filter.cpp index 6de9bacbf..a959acb74 100644 --- a/src/common/logging/filter.cpp +++ b/src/common/logging/filter.cpp | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #include <algorithm> | 4 | #include <algorithm> |
| 6 | #include "common/logging/filter.h" | 5 | #include "common/logging/filter.h" |
diff --git a/src/common/logging/filter.h b/src/common/logging/filter.h index 29419f051..54d172cc0 100644 --- a/src/common/logging/filter.h +++ b/src/common/logging/filter.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/logging/log.h b/src/common/logging/log.h index 0c80d01ee..c00c01a9e 100644 --- a/src/common/logging/log.h +++ b/src/common/logging/log.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/logging/text_formatter.cpp b/src/common/logging/text_formatter.cpp index b2cad58d8..09398ea64 100644 --- a/src/common/logging/text_formatter.cpp +++ b/src/common/logging/text_formatter.cpp | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #include <array> | 4 | #include <array> |
| 6 | #include <cstdio> | 5 | #include <cstdio> |
diff --git a/src/common/logging/text_formatter.h b/src/common/logging/text_formatter.h index 92c0bf0c5..0d0ec4370 100644 --- a/src/common/logging/text_formatter.h +++ b/src/common/logging/text_formatter.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/microprofile.cpp b/src/common/microprofile.cpp index ee25dd37f..e6657c82f 100644 --- a/src/common/microprofile.cpp +++ b/src/common/microprofile.cpp | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2015 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2015 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | // Includes the MicroProfile implementation in this file for compilation | 4 | // Includes the MicroProfile implementation in this file for compilation |
| 6 | #define MICROPROFILE_IMPL 1 | 5 | #define MICROPROFILE_IMPL 1 |
diff --git a/src/common/microprofile.h b/src/common/microprofile.h index 54e7f3cc4..91d14d5e1 100644 --- a/src/common/microprofile.h +++ b/src/common/microprofile.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2015 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2015 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/microprofileui.h b/src/common/microprofileui.h index 41abe6b75..39ed18ffa 100644 --- a/src/common/microprofileui.h +++ b/src/common/microprofileui.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2015 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2015 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/param_package.cpp b/src/common/param_package.cpp index 462502e34..629babb81 100644 --- a/src/common/param_package.cpp +++ b/src/common/param_package.cpp | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2017 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #include <array> | 4 | #include <array> |
| 6 | #include <stdexcept> | 5 | #include <stdexcept> |
diff --git a/src/common/param_package.h b/src/common/param_package.h index c13e45479..d7c13cb1f 100644 --- a/src/common/param_package.h +++ b/src/common/param_package.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2017 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/quaternion.h b/src/common/quaternion.h index 4d0871eb4..5bb5f2af0 100644 --- a/src/common/quaternion.h +++ b/src/common/quaternion.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2016 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2016 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/reader_writer_queue.h b/src/common/reader_writer_queue.h new file mode 100644 index 000000000..60c41a8cb --- /dev/null +++ b/src/common/reader_writer_queue.h | |||
| @@ -0,0 +1,940 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2013-2020 Cameron Desrochers | ||
| 2 | // SPDX-License-Identifier: BSD-2-Clause | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <cassert> | ||
| 7 | #include <cstdint> | ||
| 8 | #include <cstdlib> // For malloc/free/abort & size_t | ||
| 9 | #include <memory> | ||
| 10 | #include <new> | ||
| 11 | #include <stdexcept> | ||
| 12 | #include <type_traits> | ||
| 13 | #include <utility> | ||
| 14 | |||
| 15 | #include "common/atomic_helpers.h" | ||
| 16 | |||
| 17 | #if __cplusplus > 199711L || _MSC_VER >= 1700 // C++11 or VS2012 | ||
| 18 | #include <chrono> | ||
| 19 | #endif | ||
| 20 | |||
| 21 | // A lock-free queue for a single-consumer, single-producer architecture. | ||
| 22 | // The queue is also wait-free in the common path (except if more memory | ||
| 23 | // needs to be allocated, in which case malloc is called). | ||
| 24 | // Allocates memory sparingly, and only once if the original maximum size | ||
| 25 | // estimate is never exceeded. | ||
| 26 | // Tested on x86/x64 processors, but semantics should be correct for all | ||
| 27 | // architectures (given the right implementations in atomicops.h), provided | ||
| 28 | // that aligned integer and pointer accesses are naturally atomic. | ||
| 29 | // Note that there should only be one consumer thread and producer thread; | ||
| 30 | // Switching roles of the threads, or using multiple consecutive threads for | ||
| 31 | // one role, is not safe unless properly synchronized. | ||
| 32 | // Using the queue exclusively from one thread is fine, though a bit silly. | ||
| 33 | |||
| 34 | #ifndef MOODYCAMEL_CACHE_LINE_SIZE | ||
| 35 | #define MOODYCAMEL_CACHE_LINE_SIZE 64 | ||
| 36 | #endif | ||
| 37 | |||
| 38 | #ifndef MOODYCAMEL_EXCEPTIONS_ENABLED | ||
| 39 | #if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || \ | ||
| 40 | (!defined(_MSC_VER) && !defined(__GNUC__)) | ||
| 41 | #define MOODYCAMEL_EXCEPTIONS_ENABLED | ||
| 42 | #endif | ||
| 43 | #endif | ||
| 44 | |||
| 45 | #ifndef MOODYCAMEL_HAS_EMPLACE | ||
| 46 | #if !defined(_MSC_VER) || \ | ||
| 47 | _MSC_VER >= 1800 // variadic templates: either a non-MS compiler or VS >= 2013 | ||
| 48 | #define MOODYCAMEL_HAS_EMPLACE 1 | ||
| 49 | #endif | ||
| 50 | #endif | ||
| 51 | |||
| 52 | #ifndef MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE | ||
| 53 | #if defined(__APPLE__) && defined(__MACH__) && __cplusplus >= 201703L | ||
| 54 | // This is required to find out what deployment target we are using | ||
| 55 | #include <CoreFoundation/CoreFoundation.h> | ||
| 56 | #if !defined(MAC_OS_X_VERSION_MIN_REQUIRED) || \ | ||
| 57 | MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_14 | ||
| 58 | // C++17 new(size_t, align_val_t) is not backwards-compatible with older versions of macOS, so we | ||
| 59 | // can't support over-alignment in this case | ||
| 60 | #define MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE | ||
| 61 | #endif | ||
| 62 | #endif | ||
| 63 | #endif | ||
| 64 | |||
| 65 | #ifndef MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE | ||
| 66 | #define MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE AE_ALIGN(MOODYCAMEL_CACHE_LINE_SIZE) | ||
| 67 | #endif | ||
| 68 | |||
| 69 | #ifdef AE_VCPP | ||
| 70 | #pragma warning(push) | ||
| 71 | #pragma warning(disable : 4324) // structure was padded due to __declspec(align()) | ||
| 72 | #pragma warning(disable : 4820) // padding was added | ||
| 73 | #pragma warning(disable : 4127) // conditional expression is constant | ||
| 74 | #endif | ||
| 75 | |||
| 76 | namespace Common { | ||
| 77 | |||
| 78 | template <typename T, size_t MAX_BLOCK_SIZE = 512> | ||
| 79 | class MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE ReaderWriterQueue { | ||
| 80 | // Design: Based on a queue-of-queues. The low-level queues are just | ||
| 81 | // circular buffers with front and tail indices indicating where the | ||
| 82 | // next element to dequeue is and where the next element can be enqueued, | ||
| 83 | // respectively. Each low-level queue is called a "block". Each block | ||
| 84 | // wastes exactly one element's worth of space to keep the design simple | ||
| 85 | // (if front == tail then the queue is empty, and can't be full). | ||
| 86 | // The high-level queue is a circular linked list of blocks; again there | ||
| 87 | // is a front and tail, but this time they are pointers to the blocks. | ||
| 88 | // The front block is where the next element to be dequeued is, provided | ||
| 89 | // the block is not empty. The back block is where elements are to be | ||
| 90 | // enqueued, provided the block is not full. | ||
| 91 | // The producer thread owns all the tail indices/pointers. The consumer | ||
| 92 | // thread owns all the front indices/pointers. Both threads read each | ||
| 93 | // other's variables, but only the owning thread updates them. E.g. After | ||
| 94 | // the consumer reads the producer's tail, the tail may change before the | ||
| 95 | // consumer is done dequeuing an object, but the consumer knows the tail | ||
| 96 | // will never go backwards, only forwards. | ||
| 97 | // If there is no room to enqueue an object, an additional block (of | ||
| 98 | // equal size to the last block) is added. Blocks are never removed. | ||
| 99 | |||
| 100 | public: | ||
| 101 | typedef T value_type; | ||
| 102 | |||
| 103 | // Constructs a queue that can hold at least `size` elements without further | ||
| 104 | // allocations. If more than MAX_BLOCK_SIZE elements are requested, | ||
| 105 | // then several blocks of MAX_BLOCK_SIZE each are reserved (including | ||
| 106 | // at least one extra buffer block). | ||
| 107 | AE_NO_TSAN explicit ReaderWriterQueue(size_t size = 15) | ||
| 108 | #ifndef NDEBUG | ||
| 109 | : enqueuing(false), dequeuing(false) | ||
| 110 | #endif | ||
| 111 | { | ||
| 112 | assert(MAX_BLOCK_SIZE == ceilToPow2(MAX_BLOCK_SIZE) && | ||
| 113 | "MAX_BLOCK_SIZE must be a power of 2"); | ||
| 114 | assert(MAX_BLOCK_SIZE >= 2 && "MAX_BLOCK_SIZE must be at least 2"); | ||
| 115 | |||
| 116 | Block* firstBlock = nullptr; | ||
| 117 | |||
| 118 | largestBlockSize = | ||
| 119 | ceilToPow2(size + 1); // We need a spare slot to fit size elements in the block | ||
| 120 | if (largestBlockSize > MAX_BLOCK_SIZE * 2) { | ||
| 121 | // We need a spare block in case the producer is writing to a different block the | ||
| 122 | // consumer is reading from, and wants to enqueue the maximum number of elements. We | ||
| 123 | // also need a spare element in each block to avoid the ambiguity between front == tail | ||
| 124 | // meaning "empty" and "full". So the effective number of slots that are guaranteed to | ||
| 125 | // be usable at any time is the block size - 1 times the number of blocks - 1. Solving | ||
| 126 | // for size and applying a ceiling to the division gives us (after simplifying): | ||
| 127 | size_t initialBlockCount = (size + MAX_BLOCK_SIZE * 2 - 3) / (MAX_BLOCK_SIZE - 1); | ||
| 128 | largestBlockSize = MAX_BLOCK_SIZE; | ||
| 129 | Block* lastBlock = nullptr; | ||
| 130 | for (size_t i = 0; i != initialBlockCount; ++i) { | ||
| 131 | auto block = make_block(largestBlockSize); | ||
| 132 | if (block == nullptr) { | ||
| 133 | #ifdef MOODYCAMEL_EXCEPTIONS_ENABLED | ||
| 134 | throw std::bad_alloc(); | ||
| 135 | #else | ||
| 136 | abort(); | ||
| 137 | #endif | ||
| 138 | } | ||
| 139 | if (firstBlock == nullptr) { | ||
| 140 | firstBlock = block; | ||
| 141 | } else { | ||
| 142 | lastBlock->next = block; | ||
| 143 | } | ||
| 144 | lastBlock = block; | ||
| 145 | block->next = firstBlock; | ||
| 146 | } | ||
| 147 | } else { | ||
| 148 | firstBlock = make_block(largestBlockSize); | ||
| 149 | if (firstBlock == nullptr) { | ||
| 150 | #ifdef MOODYCAMEL_EXCEPTIONS_ENABLED | ||
| 151 | throw std::bad_alloc(); | ||
| 152 | #else | ||
| 153 | abort(); | ||
| 154 | #endif | ||
| 155 | } | ||
| 156 | firstBlock->next = firstBlock; | ||
| 157 | } | ||
| 158 | frontBlock = firstBlock; | ||
| 159 | tailBlock = firstBlock; | ||
| 160 | |||
| 161 | // Make sure the reader/writer threads will have the initialized memory setup above: | ||
| 162 | fence(memory_order_sync); | ||
| 163 | } | ||
| 164 | |||
| 165 | // Note: The queue should not be accessed concurrently while it's | ||
| 166 | // being moved. It's up to the user to synchronize this. | ||
| 167 | AE_NO_TSAN ReaderWriterQueue(ReaderWriterQueue&& other) | ||
| 168 | : frontBlock(other.frontBlock.load()), tailBlock(other.tailBlock.load()), | ||
| 169 | largestBlockSize(other.largestBlockSize) | ||
| 170 | #ifndef NDEBUG | ||
| 171 | , | ||
| 172 | enqueuing(false), dequeuing(false) | ||
| 173 | #endif | ||
| 174 | { | ||
| 175 | other.largestBlockSize = 32; | ||
| 176 | Block* b = other.make_block(other.largestBlockSize); | ||
| 177 | if (b == nullptr) { | ||
| 178 | #ifdef MOODYCAMEL_EXCEPTIONS_ENABLED | ||
| 179 | throw std::bad_alloc(); | ||
| 180 | #else | ||
| 181 | abort(); | ||
| 182 | #endif | ||
| 183 | } | ||
| 184 | b->next = b; | ||
| 185 | other.frontBlock = b; | ||
| 186 | other.tailBlock = b; | ||
| 187 | } | ||
| 188 | |||
| 189 | // Note: The queue should not be accessed concurrently while it's | ||
| 190 | // being moved. It's up to the user to synchronize this. | ||
| 191 | ReaderWriterQueue& operator=(ReaderWriterQueue&& other) AE_NO_TSAN { | ||
| 192 | Block* b = frontBlock.load(); | ||
| 193 | frontBlock = other.frontBlock.load(); | ||
| 194 | other.frontBlock = b; | ||
| 195 | b = tailBlock.load(); | ||
| 196 | tailBlock = other.tailBlock.load(); | ||
| 197 | other.tailBlock = b; | ||
| 198 | std::swap(largestBlockSize, other.largestBlockSize); | ||
| 199 | return *this; | ||
| 200 | } | ||
| 201 | |||
| 202 | // Note: The queue should not be accessed concurrently while it's | ||
| 203 | // being deleted. It's up to the user to synchronize this. | ||
| 204 | AE_NO_TSAN ~ReaderWriterQueue() { | ||
| 205 | // Make sure we get the latest version of all variables from other CPUs: | ||
| 206 | fence(memory_order_sync); | ||
| 207 | |||
| 208 | // Destroy any remaining objects in queue and free memory | ||
| 209 | Block* frontBlock_ = frontBlock; | ||
| 210 | Block* block = frontBlock_; | ||
| 211 | do { | ||
| 212 | Block* nextBlock = block->next; | ||
| 213 | size_t blockFront = block->front; | ||
| 214 | size_t blockTail = block->tail; | ||
| 215 | |||
| 216 | for (size_t i = blockFront; i != blockTail; i = (i + 1) & block->sizeMask) { | ||
| 217 | auto element = reinterpret_cast<T*>(block->data + i * sizeof(T)); | ||
| 218 | element->~T(); | ||
| 219 | (void)element; | ||
| 220 | } | ||
| 221 | |||
| 222 | auto rawBlock = block->rawThis; | ||
| 223 | block->~Block(); | ||
| 224 | std::free(rawBlock); | ||
| 225 | block = nextBlock; | ||
| 226 | } while (block != frontBlock_); | ||
| 227 | } | ||
| 228 | |||
| 229 | // Enqueues a copy of element if there is room in the queue. | ||
| 230 | // Returns true if the element was enqueued, false otherwise. | ||
| 231 | // Does not allocate memory. | ||
| 232 | AE_FORCEINLINE bool try_enqueue(T const& element) AE_NO_TSAN { | ||
| 233 | return inner_enqueue<CannotAlloc>(element); | ||
| 234 | } | ||
| 235 | |||
| 236 | // Enqueues a moved copy of element if there is room in the queue. | ||
| 237 | // Returns true if the element was enqueued, false otherwise. | ||
| 238 | // Does not allocate memory. | ||
| 239 | AE_FORCEINLINE bool try_enqueue(T&& element) AE_NO_TSAN { | ||
| 240 | return inner_enqueue<CannotAlloc>(std::forward<T>(element)); | ||
| 241 | } | ||
| 242 | |||
| 243 | #if MOODYCAMEL_HAS_EMPLACE | ||
| 244 | // Like try_enqueue() but with emplace semantics (i.e. construct-in-place). | ||
| 245 | template <typename... Args> | ||
| 246 | AE_FORCEINLINE bool try_emplace(Args&&... args) AE_NO_TSAN { | ||
| 247 | return inner_enqueue<CannotAlloc>(std::forward<Args>(args)...); | ||
| 248 | } | ||
| 249 | #endif | ||
| 250 | |||
| 251 | // Enqueues a copy of element on the queue. | ||
| 252 | // Allocates an additional block of memory if needed. | ||
| 253 | // Only fails (returns false) if memory allocation fails. | ||
| 254 | AE_FORCEINLINE bool enqueue(T const& element) AE_NO_TSAN { | ||
| 255 | return inner_enqueue<CanAlloc>(element); | ||
| 256 | } | ||
| 257 | |||
| 258 | // Enqueues a moved copy of element on the queue. | ||
| 259 | // Allocates an additional block of memory if needed. | ||
| 260 | // Only fails (returns false) if memory allocation fails. | ||
| 261 | AE_FORCEINLINE bool enqueue(T&& element) AE_NO_TSAN { | ||
| 262 | return inner_enqueue<CanAlloc>(std::forward<T>(element)); | ||
| 263 | } | ||
| 264 | |||
| 265 | #if MOODYCAMEL_HAS_EMPLACE | ||
| 266 | // Like enqueue() but with emplace semantics (i.e. construct-in-place). | ||
| 267 | template <typename... Args> | ||
| 268 | AE_FORCEINLINE bool emplace(Args&&... args) AE_NO_TSAN { | ||
| 269 | return inner_enqueue<CanAlloc>(std::forward<Args>(args)...); | ||
| 270 | } | ||
| 271 | #endif | ||
| 272 | |||
| 273 | // Attempts to dequeue an element; if the queue is empty, | ||
| 274 | // returns false instead. If the queue has at least one element, | ||
| 275 | // moves front to result using operator=, then returns true. | ||
| 276 | template <typename U> | ||
| 277 | bool try_dequeue(U& result) AE_NO_TSAN { | ||
| 278 | #ifndef NDEBUG | ||
| 279 | ReentrantGuard guard(this->dequeuing); | ||
| 280 | #endif | ||
| 281 | |||
| 282 | // High-level pseudocode: | ||
| 283 | // Remember where the tail block is | ||
| 284 | // If the front block has an element in it, dequeue it | ||
| 285 | // Else | ||
| 286 | // If front block was the tail block when we entered the function, return false | ||
| 287 | // Else advance to next block and dequeue the item there | ||
| 288 | |||
| 289 | // Note that we have to use the value of the tail block from before we check if the front | ||
| 290 | // block is full or not, in case the front block is empty and then, before we check if the | ||
| 291 | // tail block is at the front block or not, the producer fills up the front block *and | ||
| 292 | // moves on*, which would make us skip a filled block. Seems unlikely, but was consistently | ||
| 293 | // reproducible in practice. | ||
| 294 | // In order to avoid overhead in the common case, though, we do a double-checked pattern | ||
| 295 | // where we have the fast path if the front block is not empty, then read the tail block, | ||
| 296 | // then re-read the front block and check if it's not empty again, then check if the tail | ||
| 297 | // block has advanced. | ||
| 298 | |||
| 299 | Block* frontBlock_ = frontBlock.load(); | ||
| 300 | size_t blockTail = frontBlock_->localTail; | ||
| 301 | size_t blockFront = frontBlock_->front.load(); | ||
| 302 | |||
| 303 | if (blockFront != blockTail || | ||
| 304 | blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) { | ||
| 305 | fence(memory_order_acquire); | ||
| 306 | |||
| 307 | non_empty_front_block: | ||
| 308 | // Front block not empty, dequeue from here | ||
| 309 | auto element = reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T)); | ||
| 310 | result = std::move(*element); | ||
| 311 | element->~T(); | ||
| 312 | |||
| 313 | blockFront = (blockFront + 1) & frontBlock_->sizeMask; | ||
| 314 | |||
| 315 | fence(memory_order_release); | ||
| 316 | frontBlock_->front = blockFront; | ||
| 317 | } else if (frontBlock_ != tailBlock.load()) { | ||
| 318 | fence(memory_order_acquire); | ||
| 319 | |||
| 320 | frontBlock_ = frontBlock.load(); | ||
| 321 | blockTail = frontBlock_->localTail = frontBlock_->tail.load(); | ||
| 322 | blockFront = frontBlock_->front.load(); | ||
| 323 | fence(memory_order_acquire); | ||
| 324 | |||
| 325 | if (blockFront != blockTail) { | ||
| 326 | // Oh look, the front block isn't empty after all | ||
| 327 | goto non_empty_front_block; | ||
| 328 | } | ||
| 329 | |||
| 330 | // Front block is empty but there's another block ahead, advance to it | ||
| 331 | Block* nextBlock = frontBlock_->next; | ||
| 332 | // Don't need an acquire fence here since next can only ever be set on the tailBlock, | ||
| 333 | // and we're not the tailBlock, and we did an acquire earlier after reading tailBlock | ||
| 334 | // which ensures next is up-to-date on this CPU in case we recently were at tailBlock. | ||
| 335 | |||
| 336 | size_t nextBlockFront = nextBlock->front.load(); | ||
| 337 | size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load(); | ||
| 338 | fence(memory_order_acquire); | ||
| 339 | |||
| 340 | // Since the tailBlock is only ever advanced after being written to, | ||
| 341 | // we know there's for sure an element to dequeue on it | ||
| 342 | assert(nextBlockFront != nextBlockTail); | ||
| 343 | AE_UNUSED(nextBlockTail); | ||
| 344 | |||
| 345 | // We're done with this block, let the producer use it if it needs | ||
| 346 | fence(memory_order_release); // Expose possibly pending changes to frontBlock->front | ||
| 347 | // from last dequeue | ||
| 348 | frontBlock = frontBlock_ = nextBlock; | ||
| 349 | |||
| 350 | compiler_fence(memory_order_release); // Not strictly needed | ||
| 351 | |||
| 352 | auto element = reinterpret_cast<T*>(frontBlock_->data + nextBlockFront * sizeof(T)); | ||
| 353 | |||
| 354 | result = std::move(*element); | ||
| 355 | element->~T(); | ||
| 356 | |||
| 357 | nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask; | ||
| 358 | |||
| 359 | fence(memory_order_release); | ||
| 360 | frontBlock_->front = nextBlockFront; | ||
| 361 | } else { | ||
| 362 | // No elements in current block and no other block to advance to | ||
| 363 | return false; | ||
| 364 | } | ||
| 365 | |||
| 366 | return true; | ||
| 367 | } | ||
| 368 | |||
| 369 | // Returns a pointer to the front element in the queue (the one that | ||
| 370 | // would be removed next by a call to `try_dequeue` or `pop`). If the | ||
| 371 | // queue appears empty at the time the method is called, nullptr is | ||
| 372 | // returned instead. | ||
| 373 | // Must be called only from the consumer thread. | ||
| 374 | T* peek() const AE_NO_TSAN { | ||
| 375 | #ifndef NDEBUG | ||
| 376 | ReentrantGuard guard(this->dequeuing); | ||
| 377 | #endif | ||
| 378 | // See try_dequeue() for reasoning | ||
| 379 | |||
| 380 | Block* frontBlock_ = frontBlock.load(); | ||
| 381 | size_t blockTail = frontBlock_->localTail; | ||
| 382 | size_t blockFront = frontBlock_->front.load(); | ||
| 383 | |||
| 384 | if (blockFront != blockTail || | ||
| 385 | blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) { | ||
| 386 | fence(memory_order_acquire); | ||
| 387 | non_empty_front_block: | ||
| 388 | return reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T)); | ||
| 389 | } else if (frontBlock_ != tailBlock.load()) { | ||
| 390 | fence(memory_order_acquire); | ||
| 391 | frontBlock_ = frontBlock.load(); | ||
| 392 | blockTail = frontBlock_->localTail = frontBlock_->tail.load(); | ||
| 393 | blockFront = frontBlock_->front.load(); | ||
| 394 | fence(memory_order_acquire); | ||
| 395 | |||
| 396 | if (blockFront != blockTail) { | ||
| 397 | goto non_empty_front_block; | ||
| 398 | } | ||
| 399 | |||
| 400 | Block* nextBlock = frontBlock_->next; | ||
| 401 | |||
| 402 | size_t nextBlockFront = nextBlock->front.load(); | ||
| 403 | fence(memory_order_acquire); | ||
| 404 | |||
| 405 | assert(nextBlockFront != nextBlock->tail.load()); | ||
| 406 | return reinterpret_cast<T*>(nextBlock->data + nextBlockFront * sizeof(T)); | ||
| 407 | } | ||
| 408 | |||
| 409 | return nullptr; | ||
| 410 | } | ||
| 411 | |||
| 412 | // Removes the front element from the queue, if any, without returning it. | ||
| 413 | // Returns true on success, or false if the queue appeared empty at the time | ||
| 414 | // `pop` was called. | ||
| 415 | bool pop() AE_NO_TSAN { | ||
| 416 | #ifndef NDEBUG | ||
| 417 | ReentrantGuard guard(this->dequeuing); | ||
| 418 | #endif | ||
| 419 | // See try_dequeue() for reasoning | ||
| 420 | |||
| 421 | Block* frontBlock_ = frontBlock.load(); | ||
| 422 | size_t blockTail = frontBlock_->localTail; | ||
| 423 | size_t blockFront = frontBlock_->front.load(); | ||
| 424 | |||
| 425 | if (blockFront != blockTail || | ||
| 426 | blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) { | ||
| 427 | fence(memory_order_acquire); | ||
| 428 | |||
| 429 | non_empty_front_block: | ||
| 430 | auto element = reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T)); | ||
| 431 | element->~T(); | ||
| 432 | |||
| 433 | blockFront = (blockFront + 1) & frontBlock_->sizeMask; | ||
| 434 | |||
| 435 | fence(memory_order_release); | ||
| 436 | frontBlock_->front = blockFront; | ||
| 437 | } else if (frontBlock_ != tailBlock.load()) { | ||
| 438 | fence(memory_order_acquire); | ||
| 439 | frontBlock_ = frontBlock.load(); | ||
| 440 | blockTail = frontBlock_->localTail = frontBlock_->tail.load(); | ||
| 441 | blockFront = frontBlock_->front.load(); | ||
| 442 | fence(memory_order_acquire); | ||
| 443 | |||
| 444 | if (blockFront != blockTail) { | ||
| 445 | goto non_empty_front_block; | ||
| 446 | } | ||
| 447 | |||
| 448 | // Front block is empty but there's another block ahead, advance to it | ||
| 449 | Block* nextBlock = frontBlock_->next; | ||
| 450 | |||
| 451 | size_t nextBlockFront = nextBlock->front.load(); | ||
| 452 | size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load(); | ||
| 453 | fence(memory_order_acquire); | ||
| 454 | |||
| 455 | assert(nextBlockFront != nextBlockTail); | ||
| 456 | AE_UNUSED(nextBlockTail); | ||
| 457 | |||
| 458 | fence(memory_order_release); | ||
| 459 | frontBlock = frontBlock_ = nextBlock; | ||
| 460 | |||
| 461 | compiler_fence(memory_order_release); | ||
| 462 | |||
| 463 | auto element = reinterpret_cast<T*>(frontBlock_->data + nextBlockFront * sizeof(T)); | ||
| 464 | element->~T(); | ||
| 465 | |||
| 466 | nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask; | ||
| 467 | |||
| 468 | fence(memory_order_release); | ||
| 469 | frontBlock_->front = nextBlockFront; | ||
| 470 | } else { | ||
| 471 | // No elements in current block and no other block to advance to | ||
| 472 | return false; | ||
| 473 | } | ||
| 474 | |||
| 475 | return true; | ||
| 476 | } | ||
| 477 | |||
| 478 | // Returns the approximate number of items currently in the queue. | ||
| 479 | // Safe to call from both the producer and consumer threads. | ||
| 480 | inline size_t size_approx() const AE_NO_TSAN { | ||
| 481 | size_t result = 0; | ||
| 482 | Block* frontBlock_ = frontBlock.load(); | ||
| 483 | Block* block = frontBlock_; | ||
| 484 | do { | ||
| 485 | fence(memory_order_acquire); | ||
| 486 | size_t blockFront = block->front.load(); | ||
| 487 | size_t blockTail = block->tail.load(); | ||
| 488 | result += (blockTail - blockFront) & block->sizeMask; | ||
| 489 | block = block->next.load(); | ||
| 490 | } while (block != frontBlock_); | ||
| 491 | return result; | ||
| 492 | } | ||
| 493 | |||
| 494 | // Returns the total number of items that could be enqueued without incurring | ||
| 495 | // an allocation when this queue is empty. | ||
| 496 | // Safe to call from both the producer and consumer threads. | ||
| 497 | // | ||
| 498 | // NOTE: The actual capacity during usage may be different depending on the consumer. | ||
| 499 | // If the consumer is removing elements concurrently, the producer cannot add to | ||
| 500 | // the block the consumer is removing from until it's completely empty, except in | ||
| 501 | // the case where the producer was writing to the same block the consumer was | ||
| 502 | // reading from the whole time. | ||
| 503 | inline size_t max_capacity() const { | ||
| 504 | size_t result = 0; | ||
| 505 | Block* frontBlock_ = frontBlock.load(); | ||
| 506 | Block* block = frontBlock_; | ||
| 507 | do { | ||
| 508 | fence(memory_order_acquire); | ||
| 509 | result += block->sizeMask; | ||
| 510 | block = block->next.load(); | ||
| 511 | } while (block != frontBlock_); | ||
| 512 | return result; | ||
| 513 | } | ||
| 514 | |||
| 515 | private: | ||
| 516 | enum AllocationMode { CanAlloc, CannotAlloc }; | ||
| 517 | |||
| 518 | #if MOODYCAMEL_HAS_EMPLACE | ||
| 519 | template <AllocationMode canAlloc, typename... Args> | ||
| 520 | bool inner_enqueue(Args&&... args) AE_NO_TSAN | ||
| 521 | #else | ||
| 522 | template <AllocationMode canAlloc, typename U> | ||
| 523 | bool inner_enqueue(U&& element) AE_NO_TSAN | ||
| 524 | #endif | ||
| 525 | { | ||
| 526 | #ifndef NDEBUG | ||
| 527 | ReentrantGuard guard(this->enqueuing); | ||
| 528 | #endif | ||
| 529 | |||
| 530 | // High-level pseudocode (assuming we're allowed to alloc a new block): | ||
| 531 | // If room in tail block, add to tail | ||
| 532 | // Else check next block | ||
| 533 | // If next block is not the head block, enqueue on next block | ||
| 534 | // Else create a new block and enqueue there | ||
| 535 | // Advance tail to the block we just enqueued to | ||
| 536 | |||
| 537 | Block* tailBlock_ = tailBlock.load(); | ||
| 538 | size_t blockFront = tailBlock_->localFront; | ||
| 539 | size_t blockTail = tailBlock_->tail.load(); | ||
| 540 | |||
| 541 | size_t nextBlockTail = (blockTail + 1) & tailBlock_->sizeMask; | ||
| 542 | if (nextBlockTail != blockFront || | ||
| 543 | nextBlockTail != (tailBlock_->localFront = tailBlock_->front.load())) { | ||
| 544 | fence(memory_order_acquire); | ||
| 545 | // This block has room for at least one more element | ||
| 546 | char* location = tailBlock_->data + blockTail * sizeof(T); | ||
| 547 | #if MOODYCAMEL_HAS_EMPLACE | ||
| 548 | new (location) T(std::forward<Args>(args)...); | ||
| 549 | #else | ||
| 550 | new (location) T(std::forward<U>(element)); | ||
| 551 | #endif | ||
| 552 | |||
| 553 | fence(memory_order_release); | ||
| 554 | tailBlock_->tail = nextBlockTail; | ||
| 555 | } else { | ||
| 556 | fence(memory_order_acquire); | ||
| 557 | if (tailBlock_->next.load() != frontBlock) { | ||
| 558 | // Note that the reason we can't advance to the frontBlock and start adding new | ||
| 559 | // entries there is because if we did, then dequeue would stay in that block, | ||
| 560 | // eventually reading the new values, instead of advancing to the next full block | ||
| 561 | // (whose values were enqueued first and so should be consumed first). | ||
| 562 | |||
| 563 | fence(memory_order_acquire); // Ensure we get latest writes if we got the latest | ||
| 564 | // frontBlock | ||
| 565 | |||
| 566 | // tailBlock is full, but there's a free block ahead, use it | ||
| 567 | Block* tailBlockNext = tailBlock_->next.load(); | ||
| 568 | size_t nextBlockFront = tailBlockNext->localFront = tailBlockNext->front.load(); | ||
| 569 | nextBlockTail = tailBlockNext->tail.load(); | ||
| 570 | fence(memory_order_acquire); | ||
| 571 | |||
| 572 | // This block must be empty since it's not the head block and we | ||
| 573 | // go through the blocks in a circle | ||
| 574 | assert(nextBlockFront == nextBlockTail); | ||
| 575 | tailBlockNext->localFront = nextBlockFront; | ||
| 576 | |||
| 577 | char* location = tailBlockNext->data + nextBlockTail * sizeof(T); | ||
| 578 | #if MOODYCAMEL_HAS_EMPLACE | ||
| 579 | new (location) T(std::forward<Args>(args)...); | ||
| 580 | #else | ||
| 581 | new (location) T(std::forward<U>(element)); | ||
| 582 | #endif | ||
| 583 | |||
| 584 | tailBlockNext->tail = (nextBlockTail + 1) & tailBlockNext->sizeMask; | ||
| 585 | |||
| 586 | fence(memory_order_release); | ||
| 587 | tailBlock = tailBlockNext; | ||
| 588 | } else if (canAlloc == CanAlloc) { | ||
| 589 | // tailBlock is full and there's no free block ahead; create a new block | ||
| 590 | auto newBlockSize = | ||
| 591 | largestBlockSize >= MAX_BLOCK_SIZE ? largestBlockSize : largestBlockSize * 2; | ||
| 592 | auto newBlock = make_block(newBlockSize); | ||
| 593 | if (newBlock == nullptr) { | ||
| 594 | // Could not allocate a block! | ||
| 595 | return false; | ||
| 596 | } | ||
| 597 | largestBlockSize = newBlockSize; | ||
| 598 | |||
| 599 | #if MOODYCAMEL_HAS_EMPLACE | ||
| 600 | new (newBlock->data) T(std::forward<Args>(args)...); | ||
| 601 | #else | ||
| 602 | new (newBlock->data) T(std::forward<U>(element)); | ||
| 603 | #endif | ||
| 604 | assert(newBlock->front == 0); | ||
| 605 | newBlock->tail = newBlock->localTail = 1; | ||
| 606 | |||
| 607 | newBlock->next = tailBlock_->next.load(); | ||
| 608 | tailBlock_->next = newBlock; | ||
| 609 | |||
| 610 | // Might be possible for the dequeue thread to see the new tailBlock->next | ||
| 611 | // *without* seeing the new tailBlock value, but this is OK since it can't | ||
| 612 | // advance to the next block until tailBlock is set anyway (because the only | ||
| 613 | // case where it could try to read the next is if it's already at the tailBlock, | ||
| 614 | // and it won't advance past tailBlock in any circumstance). | ||
| 615 | |||
| 616 | fence(memory_order_release); | ||
| 617 | tailBlock = newBlock; | ||
| 618 | } else if (canAlloc == CannotAlloc) { | ||
| 619 | // Would have had to allocate a new block to enqueue, but not allowed | ||
| 620 | return false; | ||
| 621 | } else { | ||
| 622 | assert(false && "Should be unreachable code"); | ||
| 623 | return false; | ||
| 624 | } | ||
| 625 | } | ||
| 626 | |||
| 627 | return true; | ||
| 628 | } | ||
| 629 | |||
| 630 | // Disable copying | ||
| 631 | ReaderWriterQueue(ReaderWriterQueue const&) {} | ||
| 632 | |||
| 633 | // Disable assignment | ||
| 634 | ReaderWriterQueue& operator=(ReaderWriterQueue const&) {} | ||
| 635 | |||
| 636 | AE_FORCEINLINE static size_t ceilToPow2(size_t x) { | ||
| 637 | // From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 | ||
| 638 | --x; | ||
| 639 | x |= x >> 1; | ||
| 640 | x |= x >> 2; | ||
| 641 | x |= x >> 4; | ||
| 642 | for (size_t i = 1; i < sizeof(size_t); i <<= 1) { | ||
| 643 | x |= x >> (i << 3); | ||
| 644 | } | ||
| 645 | ++x; | ||
| 646 | return x; | ||
| 647 | } | ||
| 648 | |||
| 649 | template <typename U> | ||
| 650 | static AE_FORCEINLINE char* align_for(char* ptr) AE_NO_TSAN { | ||
| 651 | const std::size_t alignment = std::alignment_of<U>::value; | ||
| 652 | return ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment; | ||
| 653 | } | ||
| 654 | |||
| 655 | private: | ||
| 656 | #ifndef NDEBUG | ||
| 657 | struct ReentrantGuard { | ||
| 658 | AE_NO_TSAN ReentrantGuard(weak_atomic<bool>& _inSection) : inSection(_inSection) { | ||
| 659 | assert(!inSection && | ||
| 660 | "Concurrent (or re-entrant) enqueue or dequeue operation detected (only one " | ||
| 661 | "thread at a time may hold the producer or consumer role)"); | ||
| 662 | inSection = true; | ||
| 663 | } | ||
| 664 | |||
| 665 | AE_NO_TSAN ~ReentrantGuard() { | ||
| 666 | inSection = false; | ||
| 667 | } | ||
| 668 | |||
| 669 | private: | ||
| 670 | ReentrantGuard& operator=(ReentrantGuard const&); | ||
| 671 | |||
| 672 | private: | ||
| 673 | weak_atomic<bool>& inSection; | ||
| 674 | }; | ||
| 675 | #endif | ||
| 676 | |||
| 677 | struct Block { | ||
| 678 | // Avoid false-sharing by putting highly contended variables on their own cache lines | ||
| 679 | weak_atomic<size_t> front; // (Atomic) Elements are read from here | ||
| 680 | size_t localTail; // An uncontended shadow copy of tail, owned by the consumer | ||
| 681 | |||
| 682 | char cachelineFiller0[MOODYCAMEL_CACHE_LINE_SIZE - sizeof(weak_atomic<size_t>) - | ||
| 683 | sizeof(size_t)]; | ||
| 684 | weak_atomic<size_t> tail; // (Atomic) Elements are enqueued here | ||
| 685 | size_t localFront; | ||
| 686 | |||
| 687 | char cachelineFiller1[MOODYCAMEL_CACHE_LINE_SIZE - sizeof(weak_atomic<size_t>) - | ||
| 688 | sizeof(size_t)]; // next isn't very contended, but we don't want it on | ||
| 689 | // the same cache line as tail (which is) | ||
| 690 | weak_atomic<Block*> next; // (Atomic) | ||
| 691 | |||
| 692 | char* data; // Contents (on heap) are aligned to T's alignment | ||
| 693 | |||
| 694 | const size_t sizeMask; | ||
| 695 | |||
| 696 | // size must be a power of two (and greater than 0) | ||
| 697 | AE_NO_TSAN Block(size_t const& _size, char* _rawThis, char* _data) | ||
| 698 | : front(0UL), localTail(0), tail(0UL), localFront(0), next(nullptr), data(_data), | ||
| 699 | sizeMask(_size - 1), rawThis(_rawThis) {} | ||
| 700 | |||
| 701 | private: | ||
| 702 | // C4512 - Assignment operator could not be generated | ||
| 703 | Block& operator=(Block const&); | ||
| 704 | |||
| 705 | public: | ||
| 706 | char* rawThis; | ||
| 707 | }; | ||
| 708 | |||
| 709 | static Block* make_block(size_t capacity) AE_NO_TSAN { | ||
| 710 | // Allocate enough memory for the block itself, as well as all the elements it will contain | ||
| 711 | auto size = sizeof(Block) + std::alignment_of<Block>::value - 1; | ||
| 712 | size += sizeof(T) * capacity + std::alignment_of<T>::value - 1; | ||
| 713 | auto newBlockRaw = static_cast<char*>(std::malloc(size)); | ||
| 714 | if (newBlockRaw == nullptr) { | ||
| 715 | return nullptr; | ||
| 716 | } | ||
| 717 | |||
| 718 | auto newBlockAligned = align_for<Block>(newBlockRaw); | ||
| 719 | auto newBlockData = align_for<T>(newBlockAligned + sizeof(Block)); | ||
| 720 | return new (newBlockAligned) Block(capacity, newBlockRaw, newBlockData); | ||
| 721 | } | ||
| 722 | |||
| 723 | private: | ||
| 724 | weak_atomic<Block*> frontBlock; // (Atomic) Elements are dequeued from this block | ||
| 725 | |||
| 726 | char cachelineFiller[MOODYCAMEL_CACHE_LINE_SIZE - sizeof(weak_atomic<Block*>)]; | ||
| 727 | weak_atomic<Block*> tailBlock; // (Atomic) Elements are enqueued to this block | ||
| 728 | |||
| 729 | size_t largestBlockSize; | ||
| 730 | |||
| 731 | #ifndef NDEBUG | ||
| 732 | weak_atomic<bool> enqueuing; | ||
| 733 | mutable weak_atomic<bool> dequeuing; | ||
| 734 | #endif | ||
| 735 | }; | ||
| 736 | |||
| 737 | // Like ReaderWriterQueue, but also providees blocking operations | ||
| 738 | template <typename T, size_t MAX_BLOCK_SIZE = 512> | ||
| 739 | class BlockingReaderWriterQueue { | ||
| 740 | private: | ||
| 741 | typedef ::Common::ReaderWriterQueue<T, MAX_BLOCK_SIZE> ReaderWriterQueue; | ||
| 742 | |||
| 743 | public: | ||
| 744 | explicit BlockingReaderWriterQueue(size_t size = 15) AE_NO_TSAN | ||
| 745 | : inner(size), | ||
| 746 | sema(new spsc_sema::LightweightSemaphore()) {} | ||
| 747 | |||
| 748 | BlockingReaderWriterQueue(BlockingReaderWriterQueue&& other) AE_NO_TSAN | ||
| 749 | : inner(std::move(other.inner)), | ||
| 750 | sema(std::move(other.sema)) {} | ||
| 751 | |||
| 752 | BlockingReaderWriterQueue& operator=(BlockingReaderWriterQueue&& other) AE_NO_TSAN { | ||
| 753 | std::swap(sema, other.sema); | ||
| 754 | std::swap(inner, other.inner); | ||
| 755 | return *this; | ||
| 756 | } | ||
| 757 | |||
| 758 | // Enqueues a copy of element if there is room in the queue. | ||
| 759 | // Returns true if the element was enqueued, false otherwise. | ||
| 760 | // Does not allocate memory. | ||
| 761 | AE_FORCEINLINE bool try_enqueue(T const& element) AE_NO_TSAN { | ||
| 762 | if (inner.try_enqueue(element)) { | ||
| 763 | sema->signal(); | ||
| 764 | return true; | ||
| 765 | } | ||
| 766 | return false; | ||
| 767 | } | ||
| 768 | |||
| 769 | // Enqueues a moved copy of element if there is room in the queue. | ||
| 770 | // Returns true if the element was enqueued, false otherwise. | ||
| 771 | // Does not allocate memory. | ||
| 772 | AE_FORCEINLINE bool try_enqueue(T&& element) AE_NO_TSAN { | ||
| 773 | if (inner.try_enqueue(std::forward<T>(element))) { | ||
| 774 | sema->signal(); | ||
| 775 | return true; | ||
| 776 | } | ||
| 777 | return false; | ||
| 778 | } | ||
| 779 | |||
| 780 | #if MOODYCAMEL_HAS_EMPLACE | ||
| 781 | // Like try_enqueue() but with emplace semantics (i.e. construct-in-place). | ||
| 782 | template <typename... Args> | ||
| 783 | AE_FORCEINLINE bool try_emplace(Args&&... args) AE_NO_TSAN { | ||
| 784 | if (inner.try_emplace(std::forward<Args>(args)...)) { | ||
| 785 | sema->signal(); | ||
| 786 | return true; | ||
| 787 | } | ||
| 788 | return false; | ||
| 789 | } | ||
| 790 | #endif | ||
| 791 | |||
| 792 | // Enqueues a copy of element on the queue. | ||
| 793 | // Allocates an additional block of memory if needed. | ||
| 794 | // Only fails (returns false) if memory allocation fails. | ||
| 795 | AE_FORCEINLINE bool enqueue(T const& element) AE_NO_TSAN { | ||
| 796 | if (inner.enqueue(element)) { | ||
| 797 | sema->signal(); | ||
| 798 | return true; | ||
| 799 | } | ||
| 800 | return false; | ||
| 801 | } | ||
| 802 | |||
| 803 | // Enqueues a moved copy of element on the queue. | ||
| 804 | // Allocates an additional block of memory if needed. | ||
| 805 | // Only fails (returns false) if memory allocation fails. | ||
| 806 | AE_FORCEINLINE bool enqueue(T&& element) AE_NO_TSAN { | ||
| 807 | if (inner.enqueue(std::forward<T>(element))) { | ||
| 808 | sema->signal(); | ||
| 809 | return true; | ||
| 810 | } | ||
| 811 | return false; | ||
| 812 | } | ||
| 813 | |||
| 814 | #if MOODYCAMEL_HAS_EMPLACE | ||
| 815 | // Like enqueue() but with emplace semantics (i.e. construct-in-place). | ||
| 816 | template <typename... Args> | ||
| 817 | AE_FORCEINLINE bool emplace(Args&&... args) AE_NO_TSAN { | ||
| 818 | if (inner.emplace(std::forward<Args>(args)...)) { | ||
| 819 | sema->signal(); | ||
| 820 | return true; | ||
| 821 | } | ||
| 822 | return false; | ||
| 823 | } | ||
| 824 | #endif | ||
| 825 | |||
| 826 | // Attempts to dequeue an element; if the queue is empty, | ||
| 827 | // returns false instead. If the queue has at least one element, | ||
| 828 | // moves front to result using operator=, then returns true. | ||
| 829 | template <typename U> | ||
| 830 | bool try_dequeue(U& result) AE_NO_TSAN { | ||
| 831 | if (sema->tryWait()) { | ||
| 832 | bool success = inner.try_dequeue(result); | ||
| 833 | assert(success); | ||
| 834 | AE_UNUSED(success); | ||
| 835 | return true; | ||
| 836 | } | ||
| 837 | return false; | ||
| 838 | } | ||
| 839 | |||
| 840 | // Attempts to dequeue an element; if the queue is empty, | ||
| 841 | // waits until an element is available, then dequeues it. | ||
| 842 | template <typename U> | ||
| 843 | void wait_dequeue(U& result) AE_NO_TSAN { | ||
| 844 | while (!sema->wait()) | ||
| 845 | ; | ||
| 846 | bool success = inner.try_dequeue(result); | ||
| 847 | AE_UNUSED(result); | ||
| 848 | assert(success); | ||
| 849 | AE_UNUSED(success); | ||
| 850 | } | ||
| 851 | |||
| 852 | // Attempts to dequeue an element; if the queue is empty, | ||
| 853 | // waits until an element is available up to the specified timeout, | ||
| 854 | // then dequeues it and returns true, or returns false if the timeout | ||
| 855 | // expires before an element can be dequeued. | ||
| 856 | // Using a negative timeout indicates an indefinite timeout, | ||
| 857 | // and is thus functionally equivalent to calling wait_dequeue. | ||
| 858 | template <typename U> | ||
| 859 | bool wait_dequeue_timed(U& result, std::int64_t timeout_usecs) AE_NO_TSAN { | ||
| 860 | if (!sema->wait(timeout_usecs)) { | ||
| 861 | return false; | ||
| 862 | } | ||
| 863 | bool success = inner.try_dequeue(result); | ||
| 864 | AE_UNUSED(result); | ||
| 865 | assert(success); | ||
| 866 | AE_UNUSED(success); | ||
| 867 | return true; | ||
| 868 | } | ||
| 869 | |||
| 870 | #if __cplusplus > 199711L || _MSC_VER >= 1700 | ||
| 871 | // Attempts to dequeue an element; if the queue is empty, | ||
| 872 | // waits until an element is available up to the specified timeout, | ||
| 873 | // then dequeues it and returns true, or returns false if the timeout | ||
| 874 | // expires before an element can be dequeued. | ||
| 875 | // Using a negative timeout indicates an indefinite timeout, | ||
| 876 | // and is thus functionally equivalent to calling wait_dequeue. | ||
| 877 | template <typename U, typename Rep, typename Period> | ||
| 878 | inline bool wait_dequeue_timed(U& result, | ||
| 879 | std::chrono::duration<Rep, Period> const& timeout) AE_NO_TSAN { | ||
| 880 | return wait_dequeue_timed( | ||
| 881 | result, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count()); | ||
| 882 | } | ||
| 883 | #endif | ||
| 884 | |||
| 885 | // Returns a pointer to the front element in the queue (the one that | ||
| 886 | // would be removed next by a call to `try_dequeue` or `pop`). If the | ||
| 887 | // queue appears empty at the time the method is called, nullptr is | ||
| 888 | // returned instead. | ||
| 889 | // Must be called only from the consumer thread. | ||
| 890 | AE_FORCEINLINE T* peek() const AE_NO_TSAN { | ||
| 891 | return inner.peek(); | ||
| 892 | } | ||
| 893 | |||
| 894 | // Removes the front element from the queue, if any, without returning it. | ||
| 895 | // Returns true on success, or false if the queue appeared empty at the time | ||
| 896 | // `pop` was called. | ||
| 897 | AE_FORCEINLINE bool pop() AE_NO_TSAN { | ||
| 898 | if (sema->tryWait()) { | ||
| 899 | bool result = inner.pop(); | ||
| 900 | assert(result); | ||
| 901 | AE_UNUSED(result); | ||
| 902 | return true; | ||
| 903 | } | ||
| 904 | return false; | ||
| 905 | } | ||
| 906 | |||
| 907 | // Returns the approximate number of items currently in the queue. | ||
| 908 | // Safe to call from both the producer and consumer threads. | ||
| 909 | AE_FORCEINLINE size_t size_approx() const AE_NO_TSAN { | ||
| 910 | return sema->availableApprox(); | ||
| 911 | } | ||
| 912 | |||
| 913 | // Returns the total number of items that could be enqueued without incurring | ||
| 914 | // an allocation when this queue is empty. | ||
| 915 | // Safe to call from both the producer and consumer threads. | ||
| 916 | // | ||
| 917 | // NOTE: The actual capacity during usage may be different depending on the consumer. | ||
| 918 | // If the consumer is removing elements concurrently, the producer cannot add to | ||
| 919 | // the block the consumer is removing from until it's completely empty, except in | ||
| 920 | // the case where the producer was writing to the same block the consumer was | ||
| 921 | // reading from the whole time. | ||
| 922 | AE_FORCEINLINE size_t max_capacity() const { | ||
| 923 | return inner.max_capacity(); | ||
| 924 | } | ||
| 925 | |||
| 926 | private: | ||
| 927 | // Disable copying & assignment | ||
| 928 | BlockingReaderWriterQueue(BlockingReaderWriterQueue const&) {} | ||
| 929 | BlockingReaderWriterQueue& operator=(BlockingReaderWriterQueue const&) {} | ||
| 930 | |||
| 931 | private: | ||
| 932 | ReaderWriterQueue inner; | ||
| 933 | std::unique_ptr<spsc_sema::LightweightSemaphore> sema; | ||
| 934 | }; | ||
| 935 | |||
| 936 | } // namespace Common | ||
| 937 | |||
| 938 | #ifdef AE_VCPP | ||
| 939 | #pragma warning(pop) | ||
| 940 | #endif | ||
diff --git a/src/common/scm_rev.cpp.in b/src/common/scm_rev.cpp.in index cc88994c6..f0c124d69 100644 --- a/src/common/scm_rev.cpp.in +++ b/src/common/scm_rev.cpp.in | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #include "common/scm_rev.h" | 4 | #include "common/scm_rev.h" |
| 6 | 5 | ||
diff --git a/src/common/scm_rev.h b/src/common/scm_rev.h index 563015ec9..88404316a 100644 --- a/src/common/scm_rev.h +++ b/src/common/scm_rev.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/scope_exit.h b/src/common/scope_exit.h index 35dac3a8f..e9c789c88 100644 --- a/src/common/scope_exit.h +++ b/src/common/scope_exit.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/settings.cpp b/src/common/settings.cpp index d4c52989a..1c7b6dfae 100644 --- a/src/common/settings.cpp +++ b/src/common/settings.cpp | |||
| @@ -62,7 +62,8 @@ void LogSettings() { | |||
| 62 | log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue()); | 62 | log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue()); |
| 63 | log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue()); | 63 | log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue()); |
| 64 | log_setting("Audio_OutputEngine", values.sink_id.GetValue()); | 64 | log_setting("Audio_OutputEngine", values.sink_id.GetValue()); |
| 65 | log_setting("Audio_OutputDevice", values.audio_device_id.GetValue()); | 65 | log_setting("Audio_OutputDevice", values.audio_output_device_id.GetValue()); |
| 66 | log_setting("Audio_InputDevice", values.audio_input_device_id.GetValue()); | ||
| 66 | log_setting("DataStorage_UseVirtualSd", values.use_virtual_sd.GetValue()); | 67 | log_setting("DataStorage_UseVirtualSd", values.use_virtual_sd.GetValue()); |
| 67 | log_path("DataStorage_CacheDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::CacheDir)); | 68 | log_path("DataStorage_CacheDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::CacheDir)); |
| 68 | log_path("DataStorage_ConfigDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::ConfigDir)); | 69 | log_path("DataStorage_ConfigDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::ConfigDir)); |
diff --git a/src/common/settings.h b/src/common/settings.h index 2bccb8642..1079cf8cb 100644 --- a/src/common/settings.h +++ b/src/common/settings.h | |||
| @@ -370,10 +370,12 @@ struct TouchFromButtonMap { | |||
| 370 | 370 | ||
| 371 | struct Values { | 371 | struct Values { |
| 372 | // Audio | 372 | // Audio |
| 373 | Setting<std::string> audio_device_id{"auto", "output_device"}; | ||
| 374 | Setting<std::string> sink_id{"auto", "output_engine"}; | 373 | Setting<std::string> sink_id{"auto", "output_engine"}; |
| 374 | Setting<std::string> audio_output_device_id{"auto", "output_device"}; | ||
| 375 | Setting<std::string> audio_input_device_id{"auto", "input_device"}; | ||
| 375 | Setting<bool> audio_muted{false, "audio_muted"}; | 376 | Setting<bool> audio_muted{false, "audio_muted"}; |
| 376 | SwitchableSetting<u8, true> volume{100, 0, 100, "volume"}; | 377 | SwitchableSetting<u8, true> volume{100, 0, 100, "volume"}; |
| 378 | Setting<bool> dump_audio_commands{false, "dump_audio_commands"}; | ||
| 377 | 379 | ||
| 378 | // Core | 380 | // Core |
| 379 | SwitchableSetting<bool> use_multi_core{true, "use_multi_core"}; | 381 | SwitchableSetting<bool> use_multi_core{true, "use_multi_core"}; |
| @@ -501,6 +503,9 @@ struct Values { | |||
| 501 | Setting<bool> enable_ring_controller{true, "enable_ring_controller"}; | 503 | Setting<bool> enable_ring_controller{true, "enable_ring_controller"}; |
| 502 | RingconRaw ringcon_analogs; | 504 | RingconRaw ringcon_analogs; |
| 503 | 505 | ||
| 506 | Setting<bool> enable_ir_sensor{false, "enable_ir_sensor"}; | ||
| 507 | Setting<std::string> ir_sensor_device{"auto", "ir_sensor_device"}; | ||
| 508 | |||
| 504 | // Data Storage | 509 | // Data Storage |
| 505 | Setting<bool> use_virtual_sd{true, "use_virtual_sd"}; | 510 | Setting<bool> use_virtual_sd{true, "use_virtual_sd"}; |
| 506 | Setting<bool> gamecard_inserted{false, "gamecard_inserted"}; | 511 | Setting<bool> gamecard_inserted{false, "gamecard_inserted"}; |
diff --git a/src/common/telemetry.cpp b/src/common/telemetry.cpp index 67261c55b..d26394359 100644 --- a/src/common/telemetry.cpp +++ b/src/common/telemetry.cpp | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2017 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #include <algorithm> | 4 | #include <algorithm> |
| 6 | #include <cstring> | 5 | #include <cstring> |
diff --git a/src/common/telemetry.h b/src/common/telemetry.h index f9a824a7d..ba633d5a5 100644 --- a/src/common/telemetry.h +++ b/src/common/telemetry.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2017 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/uint128.h b/src/common/uint128.h index 199d0f55e..f890ffec2 100644 --- a/src/common/uint128.h +++ b/src/common/uint128.h | |||
| @@ -31,17 +31,12 @@ namespace Common { | |||
| 31 | return _udiv128(r[1], r[0], d, &remainder); | 31 | return _udiv128(r[1], r[0], d, &remainder); |
| 32 | #endif | 32 | #endif |
| 33 | #else | 33 | #else |
| 34 | #ifdef __SIZEOF_INT128__ | ||
| 35 | const auto product = static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b); | ||
| 36 | return static_cast<u64>(product / d); | ||
| 37 | #else | ||
| 38 | const u64 diva = a / d; | 34 | const u64 diva = a / d; |
| 39 | const u64 moda = a % d; | 35 | const u64 moda = a % d; |
| 40 | const u64 divb = b / d; | 36 | const u64 divb = b / d; |
| 41 | const u64 modb = b % d; | 37 | const u64 modb = b % d; |
| 42 | return diva * b + moda * divb + moda * modb / d; | 38 | return diva * b + moda * divb + moda * modb / d; |
| 43 | #endif | 39 | #endif |
| 44 | #endif | ||
| 45 | } | 40 | } |
| 46 | 41 | ||
| 47 | // This function multiplies 2 u64 values and produces a u128 value; | 42 | // This function multiplies 2 u64 values and produces a u128 value; |
diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp index 6aaa8cdf9..8b08332ab 100644 --- a/src/common/x64/native_clock.cpp +++ b/src/common/x64/native_clock.cpp | |||
| @@ -75,8 +75,8 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen | |||
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | u64 NativeClock::GetRTSC() { | 77 | u64 NativeClock::GetRTSC() { |
| 78 | TimePoint current_time_point{}; | ||
| 79 | TimePoint new_time_point{}; | 78 | TimePoint new_time_point{}; |
| 79 | TimePoint current_time_point{}; | ||
| 80 | 80 | ||
| 81 | current_time_point.pack = Common::AtomicLoad128(time_point.pack.data()); | 81 | current_time_point.pack = Common::AtomicLoad128(time_point.pack.data()); |
| 82 | do { | 82 | do { |
diff --git a/src/common/x64/xbyak_abi.h b/src/common/x64/xbyak_abi.h index 87b3d63a4..67e6e63c8 100644 --- a/src/common/x64/xbyak_abi.h +++ b/src/common/x64/xbyak_abi.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2016 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2016 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||
diff --git a/src/common/x64/xbyak_util.h b/src/common/x64/xbyak_util.h index 44d2558f1..250e5cddb 100644 --- a/src/common/x64/xbyak_util.h +++ b/src/common/x64/xbyak_util.h | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | // Copyright 2016 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2016 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | // Refer to the license.txt file included. | ||
| 4 | 3 | ||
| 5 | #pragma once | 4 | #pragma once |
| 6 | 5 | ||