summaryrefslogtreecommitdiff
path: root/src/common/atomic_gcc.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/common/atomic_gcc.h')
-rw-r--r--src/common/atomic_gcc.h110
1 files changed, 0 insertions, 110 deletions
diff --git a/src/common/atomic_gcc.h b/src/common/atomic_gcc.h
deleted file mode 100644
index 117e342f6..000000000
--- a/src/common/atomic_gcc.h
+++ /dev/null
@@ -1,110 +0,0 @@
1// Copyright 2013 Dolphin Emulator Project
2// Licensed under GPLv2
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common.h"
8
9// Atomic operations are performed in a single step by the CPU. It is
10// impossible for other threads to see the operation "half-done."
11//
12// Some atomic operations can be combined with different types of memory
13// barriers called "Acquire semantics" and "Release semantics", defined below.
14//
15// Acquire semantics: Future memory accesses cannot be relocated to before the
16// operation.
17//
18// Release semantics: Past memory accesses cannot be relocated to after the
19// operation.
20//
21// These barriers affect not only the compiler, but also the CPU.
22
23namespace Common
24{
25
26inline void AtomicAdd(volatile u32& target, u32 value) {
27 __sync_add_and_fetch(&target, value);
28}
29
30inline void AtomicAnd(volatile u32& target, u32 value) {
31 __sync_and_and_fetch(&target, value);
32}
33
34inline void AtomicDecrement(volatile u32& target) {
35 __sync_add_and_fetch(&target, -1);
36}
37
38inline void AtomicIncrement(volatile u32& target) {
39 __sync_add_and_fetch(&target, 1);
40}
41
42inline u32 AtomicLoad(volatile u32& src) {
43 return src; // 32-bit reads are always atomic.
44}
45inline u32 AtomicLoadAcquire(volatile u32& src) {
46 //keep the compiler from caching any memory references
47 u32 result = src; // 32-bit reads are always atomic.
48 //__sync_synchronize(); // TODO: May not be necessary.
49 // Compiler instruction only. x86 loads always have acquire semantics.
50 __asm__ __volatile__ ( "":::"memory" );
51 return result;
52}
53
54inline void AtomicOr(volatile u32& target, u32 value) {
55 __sync_or_and_fetch(&target, value);
56}
57
58inline void AtomicStore(volatile u32& dest, u32 value) {
59 dest = value; // 32-bit writes are always atomic.
60}
61inline void AtomicStoreRelease(volatile u32& dest, u32 value) {
62 __sync_lock_test_and_set(&dest, value); // TODO: Wrong! This function is has acquire semantics.
63}
64
65}
66
67// Old code kept here for reference in case we need the parts with __asm__ __volatile__.
68#if 0
69LONG SyncInterlockedIncrement(LONG *Dest)
70{
71#if defined(__GNUC__) && defined (__GNUC_MINOR__) && ((4 < __GNUC__) || (4 == __GNUC__ && 1 <= __GNUC_MINOR__))
72 return __sync_add_and_fetch(Dest, 1);
73#else
74 register int result;
75 __asm__ __volatile__("lock; xadd %0,%1"
76 : "=r" (result), "=m" (*Dest)
77 : "0" (1), "m" (*Dest)
78 : "memory");
79 return result;
80#endif
81}
82
83LONG SyncInterlockedExchangeAdd(LONG *Dest, LONG Val)
84{
85#if defined(__GNUC__) && defined (__GNUC_MINOR__) && ((4 < __GNUC__) || (4 == __GNUC__ && 1 <= __GNUC_MINOR__))
86 return __sync_add_and_fetch(Dest, Val);
87#else
88 register int result;
89 __asm__ __volatile__("lock; xadd %0,%1"
90 : "=r" (result), "=m" (*Dest)
91 : "0" (Val), "m" (*Dest)
92 : "memory");
93 return result;
94#endif
95}
96
97LONG SyncInterlockedExchange(LONG *Dest, LONG Val)
98{
99#if defined(__GNUC__) && defined (__GNUC_MINOR__) && ((4 < __GNUC__) || (4 == __GNUC__ && 1 <= __GNUC_MINOR__))
100 return __sync_lock_test_and_set(Dest, Val);
101#else
102 register int result;
103 __asm__ __volatile__("lock; xchg %0,%1"
104 : "=r" (result), "=m" (*Dest)
105 : "0" (Val), "m" (*Dest)
106 : "memory");
107 return result;
108#endif
109}
110#endif