summaryrefslogtreecommitdiff
path: root/src/common/atomic_helpers.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/common/atomic_helpers.h')
-rw-r--r--src/common/atomic_helpers.h775
1 files changed, 775 insertions, 0 deletions
diff --git a/src/common/atomic_helpers.h b/src/common/atomic_helpers.h
new file mode 100644
index 000000000..bef5015c1
--- /dev/null
+++ b/src/common/atomic_helpers.h
@@ -0,0 +1,775 @@
1// SPDX-FileCopyrightText: 2013-2016 Cameron Desrochers
2// SPDX-FileCopyrightText: 2015 Jeff Preshing
3// SPDX-License-Identifier: BSD-2-Clause AND Zlib
4
5// Distributed under the simplified BSD license (see the license file that
6// should have come with this header).
7// Uses Jeff Preshing's semaphore implementation (under the terms of its
8// separate zlib license, embedded below).
9
10#pragma once
11
12// Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant)
13// implementation of low-level memory barriers, plus a few semi-portable utility macros (for
14// inlining and alignment). Also has a basic atomic type (limited to hardware-supported atomics with
15// no memory ordering guarantees). Uses the AE_* prefix for macros (historical reasons), and the
16// "moodycamel" namespace for symbols.
17
18#include <cassert>
19#include <cerrno>
20#include <cstdint>
21#include <ctime>
22#include <type_traits>
23
24// Platform detection
25#if defined(__INTEL_COMPILER)
26#define AE_ICC
27#elif defined(_MSC_VER)
28#define AE_VCPP
29#elif defined(__GNUC__)
30#define AE_GCC
31#endif
32
33#if defined(_M_IA64) || defined(__ia64__)
34#define AE_ARCH_IA64
35#elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
36#define AE_ARCH_X64
37#elif defined(_M_IX86) || defined(__i386__)
38#define AE_ARCH_X86
39#elif defined(_M_PPC) || defined(__powerpc__)
40#define AE_ARCH_PPC
41#else
42#define AE_ARCH_UNKNOWN
43#endif
44
45// AE_UNUSED
46#define AE_UNUSED(x) ((void)x)
47
48// AE_NO_TSAN/AE_TSAN_ANNOTATE_*
49#if defined(__has_feature)
50#if __has_feature(thread_sanitizer)
51#if __cplusplus >= 201703L // inline variables require C++17
52namespace Common {
53inline int ae_tsan_global;
54}
55#define AE_TSAN_ANNOTATE_RELEASE() \
56 AnnotateHappensBefore(__FILE__, __LINE__, (void*)(&::moodycamel::ae_tsan_global))
57#define AE_TSAN_ANNOTATE_ACQUIRE() \
58 AnnotateHappensAfter(__FILE__, __LINE__, (void*)(&::moodycamel::ae_tsan_global))
59extern "C" void AnnotateHappensBefore(const char*, int, void*);
60extern "C" void AnnotateHappensAfter(const char*, int, void*);
61#else // when we can't work with tsan, attempt to disable its warnings
62#define AE_NO_TSAN __attribute__((no_sanitize("thread")))
63#endif
64#endif
65#endif
66#ifndef AE_NO_TSAN
67#define AE_NO_TSAN
68#endif
69#ifndef AE_TSAN_ANNOTATE_RELEASE
70#define AE_TSAN_ANNOTATE_RELEASE()
71#define AE_TSAN_ANNOTATE_ACQUIRE()
72#endif
73
74// AE_FORCEINLINE
75#if defined(AE_VCPP) || defined(AE_ICC)
76#define AE_FORCEINLINE __forceinline
77#elif defined(AE_GCC)
78//#define AE_FORCEINLINE __attribute__((always_inline))
79#define AE_FORCEINLINE inline
80#else
81#define AE_FORCEINLINE inline
82#endif
83
84// AE_ALIGN
85#if defined(AE_VCPP) || defined(AE_ICC)
86#define AE_ALIGN(x) __declspec(align(x))
87#elif defined(AE_GCC)
88#define AE_ALIGN(x) __attribute__((aligned(x)))
89#else
90// Assume GCC compliant syntax...
91#define AE_ALIGN(x) __attribute__((aligned(x)))
92#endif
93
94// Portable atomic fences implemented below:
95
96namespace Common {
97
98enum memory_order {
99 memory_order_relaxed,
100 memory_order_acquire,
101 memory_order_release,
102 memory_order_acq_rel,
103 memory_order_seq_cst,
104
105 // memory_order_sync: Forces a full sync:
106 // #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad
107 memory_order_sync = memory_order_seq_cst
108};
109
110} // namespace Common
111
112#if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || \
113 (defined(AE_ICC) && __INTEL_COMPILER < 1600)
114// VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences
115
116#include <intrin.h>
117
118#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
119#define AeFullSync _mm_mfence
120#define AeLiteSync _mm_mfence
121#elif defined(AE_ARCH_IA64)
122#define AeFullSync __mf
123#define AeLiteSync __mf
124#elif defined(AE_ARCH_PPC)
125#include <ppcintrinsics.h>
126#define AeFullSync __sync
127#define AeLiteSync __lwsync
128#endif
129
130#ifdef AE_VCPP
131#pragma warning(push)
132#pragma warning(disable : 4365) // Disable erroneous 'conversion from long to unsigned int,
133 // signed/unsigned mismatch' error when using `assert`
134#ifdef __cplusplus_cli
135#pragma managed(push, off)
136#endif
137#endif
138
139namespace Common {
140
141AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN {
142 switch (order) {
143 case memory_order_relaxed:
144 break;
145 case memory_order_acquire:
146 _ReadBarrier();
147 break;
148 case memory_order_release:
149 _WriteBarrier();
150 break;
151 case memory_order_acq_rel:
152 _ReadWriteBarrier();
153 break;
154 case memory_order_seq_cst:
155 _ReadWriteBarrier();
156 break;
157 default:
158 assert(false);
159 }
160}
161
162// x86/x64 have a strong memory model -- all loads and stores have
163// acquire and release semantics automatically (so only need compiler
164// barriers for those).
165#if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
166AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN {
167 switch (order) {
168 case memory_order_relaxed:
169 break;
170 case memory_order_acquire:
171 _ReadBarrier();
172 break;
173 case memory_order_release:
174 _WriteBarrier();
175 break;
176 case memory_order_acq_rel:
177 _ReadWriteBarrier();
178 break;
179 case memory_order_seq_cst:
180 _ReadWriteBarrier();
181 AeFullSync();
182 _ReadWriteBarrier();
183 break;
184 default:
185 assert(false);
186 }
187}
188#else
189AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN {
190 // Non-specialized arch, use heavier memory barriers everywhere just in case :-(
191 switch (order) {
192 case memory_order_relaxed:
193 break;
194 case memory_order_acquire:
195 _ReadBarrier();
196 AeLiteSync();
197 _ReadBarrier();
198 break;
199 case memory_order_release:
200 _WriteBarrier();
201 AeLiteSync();
202 _WriteBarrier();
203 break;
204 case memory_order_acq_rel:
205 _ReadWriteBarrier();
206 AeLiteSync();
207 _ReadWriteBarrier();
208 break;
209 case memory_order_seq_cst:
210 _ReadWriteBarrier();
211 AeFullSync();
212 _ReadWriteBarrier();
213 break;
214 default:
215 assert(false);
216 }
217}
218#endif
219} // namespace Common
220#else
221// Use standard library of atomics
222#include <atomic>
223
224namespace Common {
225
226AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN {
227 switch (order) {
228 case memory_order_relaxed:
229 break;
230 case memory_order_acquire:
231 std::atomic_signal_fence(std::memory_order_acquire);
232 break;
233 case memory_order_release:
234 std::atomic_signal_fence(std::memory_order_release);
235 break;
236 case memory_order_acq_rel:
237 std::atomic_signal_fence(std::memory_order_acq_rel);
238 break;
239 case memory_order_seq_cst:
240 std::atomic_signal_fence(std::memory_order_seq_cst);
241 break;
242 default:
243 assert(false);
244 }
245}
246
247AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN {
248 switch (order) {
249 case memory_order_relaxed:
250 break;
251 case memory_order_acquire:
252 AE_TSAN_ANNOTATE_ACQUIRE();
253 std::atomic_thread_fence(std::memory_order_acquire);
254 break;
255 case memory_order_release:
256 AE_TSAN_ANNOTATE_RELEASE();
257 std::atomic_thread_fence(std::memory_order_release);
258 break;
259 case memory_order_acq_rel:
260 AE_TSAN_ANNOTATE_ACQUIRE();
261 AE_TSAN_ANNOTATE_RELEASE();
262 std::atomic_thread_fence(std::memory_order_acq_rel);
263 break;
264 case memory_order_seq_cst:
265 AE_TSAN_ANNOTATE_ACQUIRE();
266 AE_TSAN_ANNOTATE_RELEASE();
267 std::atomic_thread_fence(std::memory_order_seq_cst);
268 break;
269 default:
270 assert(false);
271 }
272}
273
274} // namespace Common
275
276#endif
277
278#if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli))
279#define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
280#endif
281
282#ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
283#include <atomic>
284#endif
285#include <utility>
286
287// WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY:
288// Provides basic support for atomic variables -- no memory ordering guarantees are provided.
289// The guarantee of atomicity is only made for types that already have atomic load and store
290// guarantees at the hardware level -- on most platforms this generally means aligned pointers and
291// integers (only).
292namespace Common {
293template <typename T>
294class weak_atomic {
295public:
296 AE_NO_TSAN weak_atomic() : value() {}
297#ifdef AE_VCPP
298#pragma warning(push)
299#pragma warning(disable : 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
300#endif
301 template <typename U>
302 AE_NO_TSAN weak_atomic(U&& x) : value(std::forward<U>(x)) {}
303#ifdef __cplusplus_cli
304 // Work around bug with universal reference/nullptr combination that only appears when /clr is
305 // on
306 AE_NO_TSAN weak_atomic(nullptr_t) : value(nullptr) {}
307#endif
308 AE_NO_TSAN weak_atomic(weak_atomic const& other) : value(other.load()) {}
309 AE_NO_TSAN weak_atomic(weak_atomic&& other) : value(std::move(other.load())) {}
310#ifdef AE_VCPP
311#pragma warning(pop)
312#endif
313
314 AE_FORCEINLINE operator T() const AE_NO_TSAN {
315 return load();
316 }
317
318#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
319 template <typename U>
320 AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN {
321 value = std::forward<U>(x);
322 return *this;
323 }
324 AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN {
325 value = other.value;
326 return *this;
327 }
328
329 AE_FORCEINLINE T load() const AE_NO_TSAN {
330 return value;
331 }
332
333 AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN {
334#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
335 if (sizeof(T) == 4)
336 return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
337#if defined(_M_AMD64)
338 else if (sizeof(T) == 8)
339 return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
340#endif
341#else
342#error Unsupported platform
343#endif
344 assert(false && "T must be either a 32 or 64 bit type");
345 return value;
346 }
347
348 AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN {
349#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
350 if (sizeof(T) == 4)
351 return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
352#if defined(_M_AMD64)
353 else if (sizeof(T) == 8)
354 return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
355#endif
356#else
357#error Unsupported platform
358#endif
359 assert(false && "T must be either a 32 or 64 bit type");
360 return value;
361 }
362#else
363 template <typename U>
364 AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN {
365 value.store(std::forward<U>(x), std::memory_order_relaxed);
366 return *this;
367 }
368
369 AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN {
370 value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed);
371 return *this;
372 }
373
374 AE_FORCEINLINE T load() const AE_NO_TSAN {
375 return value.load(std::memory_order_relaxed);
376 }
377
378 AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN {
379 return value.fetch_add(increment, std::memory_order_acquire);
380 }
381
382 AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN {
383 return value.fetch_add(increment, std::memory_order_release);
384 }
385#endif
386
387private:
388#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
389 // No std::atomic support, but still need to circumvent compiler optimizations.
390 // `volatile` will make memory access slow, but is guaranteed to be reliable.
391 volatile T value;
392#else
393 std::atomic<T> value;
394#endif
395};
396
397} // namespace Common
398
399// Portable single-producer, single-consumer semaphore below:
400
401#if defined(_WIN32)
402// Avoid including windows.h in a header; we only need a handful of
403// items, so we'll redeclare them here (this is relatively safe since
404// the API generally has to remain stable between Windows versions).
405// I know this is an ugly hack but it still beats polluting the global
406// namespace with thousands of generic names or adding a .cpp for nothing.
407extern "C" {
408struct _SECURITY_ATTRIBUTES;
409__declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes,
410 long lInitialCount, long lMaximumCount,
411 const wchar_t* lpName);
412__declspec(dllimport) int __stdcall CloseHandle(void* hObject);
413__declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle,
414 unsigned long dwMilliseconds);
415__declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount,
416 long* lpPreviousCount);
417}
418#elif defined(__MACH__)
419#include <mach/mach.h>
420#elif defined(__unix__)
421#include <semaphore.h>
422#elif defined(FREERTOS)
423#include <FreeRTOS.h>
424#include <semphr.h>
425#include <task.h>
426#endif
427
428namespace Common {
429// Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's
430// portable + lightweight semaphore implementations, originally from
431// https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
432// LICENSE:
433// Copyright (c) 2015 Jeff Preshing
434//
435// This software is provided 'as-is', without any express or implied
436// warranty. In no event will the authors be held liable for any damages
437// arising from the use of this software.
438//
439// Permission is granted to anyone to use this software for any purpose,
440// including commercial applications, and to alter it and redistribute it
441// freely, subject to the following restrictions:
442//
443// 1. The origin of this software must not be misrepresented; you must not
444// claim that you wrote the original software. If you use this software
445// in a product, an acknowledgement in the product documentation would be
446// appreciated but is not required.
447// 2. Altered source versions must be plainly marked as such, and must not be
448// misrepresented as being the original software.
449// 3. This notice may not be removed or altered from any source distribution.
450namespace spsc_sema {
451#if defined(_WIN32)
452class Semaphore {
453private:
454 void* m_hSema;
455
456 Semaphore(const Semaphore& other);
457 Semaphore& operator=(const Semaphore& other);
458
459public:
460 AE_NO_TSAN Semaphore(int initialCount = 0) : m_hSema() {
461 assert(initialCount >= 0);
462 const long maxLong = 0x7fffffff;
463 m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
464 assert(m_hSema);
465 }
466
467 AE_NO_TSAN ~Semaphore() {
468 CloseHandle(m_hSema);
469 }
470
471 bool wait() AE_NO_TSAN {
472 const unsigned long infinite = 0xffffffff;
473 return WaitForSingleObject(m_hSema, infinite) == 0;
474 }
475
476 bool try_wait() AE_NO_TSAN {
477 return WaitForSingleObject(m_hSema, 0) == 0;
478 }
479
480 bool timed_wait(std::uint64_t usecs) AE_NO_TSAN {
481 return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) == 0;
482 }
483
484 void signal(int count = 1) AE_NO_TSAN {
485 while (!ReleaseSemaphore(m_hSema, count, nullptr))
486 ;
487 }
488};
489#elif defined(__MACH__)
490//---------------------------------------------------------
491// Semaphore (Apple iOS and OSX)
492// Can't use POSIX semaphores due to
493// http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
494//---------------------------------------------------------
495class Semaphore {
496private:
497 semaphore_t m_sema;
498
499 Semaphore(const Semaphore& other);
500 Semaphore& operator=(const Semaphore& other);
501
502public:
503 AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() {
504 assert(initialCount >= 0);
505 kern_return_t rc =
506 semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
507 assert(rc == KERN_SUCCESS);
508 AE_UNUSED(rc);
509 }
510
511 AE_NO_TSAN ~Semaphore() {
512 semaphore_destroy(mach_task_self(), m_sema);
513 }
514
515 bool wait() AE_NO_TSAN {
516 return semaphore_wait(m_sema) == KERN_SUCCESS;
517 }
518
519 bool try_wait() AE_NO_TSAN {
520 return timed_wait(0);
521 }
522
523 bool timed_wait(std::uint64_t timeout_usecs) AE_NO_TSAN {
524 mach_timespec_t ts;
525 ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);
526 ts.tv_nsec = static_cast<int>((timeout_usecs % 1000000) * 1000);
527
528 // added in OSX 10.10:
529 // https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html
530 kern_return_t rc = semaphore_timedwait(m_sema, ts);
531 return rc == KERN_SUCCESS;
532 }
533
534 void signal() AE_NO_TSAN {
535 while (semaphore_signal(m_sema) != KERN_SUCCESS)
536 ;
537 }
538
539 void signal(int count) AE_NO_TSAN {
540 while (count-- > 0) {
541 while (semaphore_signal(m_sema) != KERN_SUCCESS)
542 ;
543 }
544 }
545};
546#elif defined(__unix__)
547//---------------------------------------------------------
548// Semaphore (POSIX, Linux)
549//---------------------------------------------------------
550class Semaphore {
551private:
552 sem_t m_sema;
553
554 Semaphore(const Semaphore& other);
555 Semaphore& operator=(const Semaphore& other);
556
557public:
558 AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() {
559 assert(initialCount >= 0);
560 int rc = sem_init(&m_sema, 0, static_cast<unsigned int>(initialCount));
561 assert(rc == 0);
562 AE_UNUSED(rc);
563 }
564
565 AE_NO_TSAN ~Semaphore() {
566 sem_destroy(&m_sema);
567 }
568
569 bool wait() AE_NO_TSAN {
570 // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
571 int rc;
572 do {
573 rc = sem_wait(&m_sema);
574 } while (rc == -1 && errno == EINTR);
575 return rc == 0;
576 }
577
578 bool try_wait() AE_NO_TSAN {
579 int rc;
580 do {
581 rc = sem_trywait(&m_sema);
582 } while (rc == -1 && errno == EINTR);
583 return rc == 0;
584 }
585
586 bool timed_wait(std::uint64_t usecs) AE_NO_TSAN {
587 struct timespec ts;
588 const int usecs_in_1_sec = 1000000;
589 const int nsecs_in_1_sec = 1000000000;
590 clock_gettime(CLOCK_REALTIME, &ts);
591 ts.tv_sec += static_cast<time_t>(usecs / usecs_in_1_sec);
592 ts.tv_nsec += static_cast<long>(usecs % usecs_in_1_sec) * 1000;
593 // sem_timedwait bombs if you have more than 1e9 in tv_nsec
594 // so we have to clean things up before passing it in
595 if (ts.tv_nsec >= nsecs_in_1_sec) {
596 ts.tv_nsec -= nsecs_in_1_sec;
597 ++ts.tv_sec;
598 }
599
600 int rc;
601 do {
602 rc = sem_timedwait(&m_sema, &ts);
603 } while (rc == -1 && errno == EINTR);
604 return rc == 0;
605 }
606
607 void signal() AE_NO_TSAN {
608 while (sem_post(&m_sema) == -1)
609 ;
610 }
611
612 void signal(int count) AE_NO_TSAN {
613 while (count-- > 0) {
614 while (sem_post(&m_sema) == -1)
615 ;
616 }
617 }
618};
619#elif defined(FREERTOS)
620//---------------------------------------------------------
621// Semaphore (FreeRTOS)
622//---------------------------------------------------------
623class Semaphore {
624private:
625 SemaphoreHandle_t m_sema;
626
627 Semaphore(const Semaphore& other);
628 Semaphore& operator=(const Semaphore& other);
629
630public:
631 AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() {
632 assert(initialCount >= 0);
633 m_sema = xSemaphoreCreateCounting(static_cast<UBaseType_t>(~0ull),
634 static_cast<UBaseType_t>(initialCount));
635 assert(m_sema);
636 }
637
638 AE_NO_TSAN ~Semaphore() {
639 vSemaphoreDelete(m_sema);
640 }
641
642 bool wait() AE_NO_TSAN {
643 return xSemaphoreTake(m_sema, portMAX_DELAY) == pdTRUE;
644 }
645
646 bool try_wait() AE_NO_TSAN {
647 // Note: In an ISR context, if this causes a task to unblock,
648 // the caller won't know about it
649 if (xPortIsInsideInterrupt())
650 return xSemaphoreTakeFromISR(m_sema, NULL) == pdTRUE;
651 return xSemaphoreTake(m_sema, 0) == pdTRUE;
652 }
653
654 bool timed_wait(std::uint64_t usecs) AE_NO_TSAN {
655 std::uint64_t msecs = usecs / 1000;
656 TickType_t ticks = static_cast<TickType_t>(msecs / portTICK_PERIOD_MS);
657 if (ticks == 0)
658 return try_wait();
659 return xSemaphoreTake(m_sema, ticks) == pdTRUE;
660 }
661
662 void signal() AE_NO_TSAN {
663 // Note: In an ISR context, if this causes a task to unblock,
664 // the caller won't know about it
665 BaseType_t rc;
666 if (xPortIsInsideInterrupt())
667 rc = xSemaphoreGiveFromISR(m_sema, NULL);
668 else
669 rc = xSemaphoreGive(m_sema);
670 assert(rc == pdTRUE);
671 AE_UNUSED(rc);
672 }
673
674 void signal(int count) AE_NO_TSAN {
675 while (count-- > 0)
676 signal();
677 }
678};
679#else
680#error Unsupported platform! (No semaphore wrapper available)
681#endif
682
683//---------------------------------------------------------
684// LightweightSemaphore
685//---------------------------------------------------------
686class LightweightSemaphore {
687public:
688 typedef std::make_signed<std::size_t>::type ssize_t;
689
690private:
691 weak_atomic<ssize_t> m_count;
692 Semaphore m_sema;
693
694 bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) AE_NO_TSAN {
695 ssize_t oldCount;
696 // Is there a better way to set the initial spin count?
697 // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
698 // as threads start hitting the kernel semaphore.
699 int spin = 1024;
700 while (--spin >= 0) {
701 if (m_count.load() > 0) {
702 m_count.fetch_add_acquire(-1);
703 return true;
704 }
705 compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop.
706 }
707 oldCount = m_count.fetch_add_acquire(-1);
708 if (oldCount > 0)
709 return true;
710 if (timeout_usecs < 0) {
711 if (m_sema.wait())
712 return true;
713 }
714 if (timeout_usecs > 0 && m_sema.timed_wait(static_cast<uint64_t>(timeout_usecs)))
715 return true;
716 // At this point, we've timed out waiting for the semaphore, but the
717 // count is still decremented indicating we may still be waiting on
718 // it. So we have to re-adjust the count, but only if the semaphore
719 // wasn't signaled enough times for us too since then. If it was, we
720 // need to release the semaphore too.
721 while (true) {
722 oldCount = m_count.fetch_add_release(1);
723 if (oldCount < 0)
724 return false; // successfully restored things to the way they were
725 // Oh, the producer thread just signaled the semaphore after all. Try again:
726 oldCount = m_count.fetch_add_acquire(-1);
727 if (oldCount > 0 && m_sema.try_wait())
728 return true;
729 }
730 }
731
732public:
733 AE_NO_TSAN LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount), m_sema() {
734 assert(initialCount >= 0);
735 }
736
737 bool tryWait() AE_NO_TSAN {
738 if (m_count.load() > 0) {
739 m_count.fetch_add_acquire(-1);
740 return true;
741 }
742 return false;
743 }
744
745 bool wait() AE_NO_TSAN {
746 return tryWait() || waitWithPartialSpinning();
747 }
748
749 bool wait(std::int64_t timeout_usecs) AE_NO_TSAN {
750 return tryWait() || waitWithPartialSpinning(timeout_usecs);
751 }
752
753 void signal(ssize_t count = 1) AE_NO_TSAN {
754 assert(count >= 0);
755 ssize_t oldCount = m_count.fetch_add_release(count);
756 assert(oldCount >= -1);
757 if (oldCount < 0) {
758 m_sema.signal(1);
759 }
760 }
761
762 std::size_t availableApprox() const AE_NO_TSAN {
763 ssize_t count = m_count.load();
764 return count > 0 ? static_cast<std::size_t>(count) : 0;
765 }
766};
767} // namespace spsc_sema
768} // namespace Common
769
770#if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))
771#pragma warning(pop)
772#ifdef __cplusplus_cli
773#pragma managed(pop)
774#endif
775#endif