HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
SYS_MemoryOrder.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: SYS_MemoryOrder.h ( UT Library, C++)
7  *
8  * COMMENTS: Enumerated type for memory order of atomic operations.
9  */
10 
11 #ifndef __SYS_MemoryOrder__
12 #define __SYS_MemoryOrder__
13 
14 #include "SYS_Compiler.h"
15 #include "SYS_Deprecated.h"
16 #include <atomic>
17 
18 #if defined(FORCE_NON_SIMD)
19  #define SYSloadFence()
20  #define SYSstoreFence()
21  #define SYSmemoryFence()
22 #else
23  #if defined(LINUX) && SYS_IS_GCC_GE(3, 4) && defined(__SSE2__)
24  #include <emmintrin.h>
25  #include <xmmintrin.h>
26  #elif defined(LINUX) && SYS_IS_GCC_GE(3, 4) && defined(__SSE__)
27  #include <xmmintrin.h>
28  #define SYSloadFence()
29  #define SYSmemoryFence()
30  #elif defined(WIN32)
31  #include <emmintrin.h>
32  #include <xmmintrin.h>
33  #elif defined(MBSD_INTEL)
34  #include <emmintrin.h>
35  #include <xmmintrin.h>
36  #elif defined(ARM64)
37  #include <sse2neon.h>
38  #else
39  #define SYSloadFence()
40  #define SYSstoreFence()
41  #define SYSmemoryFence()
42  #endif
43 #endif
44 
45 
46 using SYS_MemoryOrder = std::memory_order;
47 
48 /// Any reordering the compiler or hardware chooses to do is okay.
49 inline constexpr
50 SYS_MemoryOrder SYS_MEMORY_ORDER_RELAXED = std::memory_order_relaxed;
51 
52 /// Prevents any reads by the same thread that follow this in
53 /// program order from occurring before this read, i.e. a
54 /// compiler and CPU load fence is placed after the read.
55 inline constexpr
56 SYS_MemoryOrder SYS_MEMORY_ORDER_ACQUIRE = std::memory_order_acquire;
57 
59 inline constexpr
61 
62 /// Prevents any writes by the same thread that precede this in
63 /// program order from occurring after this write, i.e. a
64 /// compiler and CPU store fence is placed before the write.
65 inline constexpr
66 SYS_MemoryOrder SYS_MEMORY_ORDER_RELEASE = std::memory_order_release;
67 
69 inline constexpr
71 
72 /// A read-modify-write operation with this memory order is both an acquire
73 /// operation and a release operation. No memory reads or writes in the
74 /// current thread can be reordered before the load, nor after the store.
75 /// All writes in other threads that release the same atomic variable are
76 /// visible before the modification and the modification is visible in
77 /// other threads that acquire the same atomic variable.
78 inline constexpr
79 SYS_MemoryOrder SYS_MEMORY_ORDER_ACQ_REL = std::memory_order_acq_rel;
80 
81 /// The current operation will be executed before any loads or
82 /// stores that follow it in program order and after any loads
83 /// or stores that precede it. Moreover, sequential consistency
84 /// is assured meaning that all observers will see this operation
85 /// in the same order relative to any other MEMORY_ORDER_SEQ_CST
86 /// operations.
87 inline constexpr
88 SYS_MemoryOrder SYS_MEMORY_ORDER_SEQ_CST = std::memory_order_seq_cst;
89 
90 
91 #ifndef SYSloadFence
92 /// Memory load fence:
93 ///
94 /// This forces any memory reads from before this fence to finish before
95 /// any following memory reads, but does not enforce any ordering with
96 /// respect to memory writes.
97 inline void SYSloadFence()
98 {
99 #if defined(ARM64)
100  // sse2neon maps _mm_sfence() to __sync_synchronize(),
101  // which is a full memory barrier for both reads and writes.
102  _mm_sfence();
103 #else
104  _mm_lfence();
105 #endif
106 }
107 #endif
108 
109 #ifndef SYSstoreFence
110 /// Memory store fence:
111 ///
112 /// This forces any memory writes from before this fence to finish before
113 /// any following memory writes, but does not enforce any ordering with
114 /// respect to memory reads.
115 inline void SYSstoreFence()
116 {
117  _mm_sfence();
118 }
119 #endif
120 
121 #ifndef SYSmemoryFence
122 /// Full memory fence:
123 ///
124 /// This forces any memory reads or writes from before this fence to finish before
125 /// any following memory reads or writes.
126 /// NOTE: This is *not* equivalent to a load fence and a store fence, because
127 /// e.g., that would not ensure that any writes have finished before any
128 /// following reads or vice versa.
129 inline void SYSmemoryFence()
130 {
131 #if defined(ARM64)
132  // sse2neon maps _mm_sfence() to __sync_synchronize(),
133  // which is a full memory barrier for both reads and writes.
134  _mm_sfence();
135 #else
136  _mm_mfence();
137 #endif
138 }
139 #endif
140 
141 #endif
#define SYSloadFence()
constexpr SYS_MemoryOrder SYS_MEMORY_ORDER_SEQ_CST
constexpr SYS_MemoryOrder SYS_MEMORY_ORDER_RELEASE
#define SYS_DEPRECATED_REPLACE(__V__, __R__)
constexpr SYS_MemoryOrder SYS_MEMORY_ORDER_ACQ_REL
#define SYSstoreFence()
constexpr SYS_MemoryOrder SYS_MEMORY_ORDER_RELAXED
Any reordering the compiler or hardware chooses to do is okay.
std::memory_order SYS_MemoryOrder
#define SYSmemoryFence()
constexpr SYS_MemoryOrder SYS_MEMORY_ORDER_LOAD
constexpr SYS_MemoryOrder SYS_MEMORY_ORDER_ACQUIRE
constexpr SYS_MemoryOrder SYS_MEMORY_ORDER_STORE