blob: e19913084bcdc23756e34cc26fa95c3e7e2d0a87 [file] [log] [blame]
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -08001/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ANDROID_CUTILS_ATOMIC_H
18#define ANDROID_CUTILS_ATOMIC_H
19
20#include <stdint.h>
21#include <sys/types.h>
Hans Boehmcb344d42014-09-30 16:50:51 -070022#include <stdatomic.h>
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080023
Hans Boehmcb344d42014-09-30 16:50:51 -070024#ifndef ANDROID_ATOMIC_INLINE
25#define ANDROID_ATOMIC_INLINE static inline
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080026#endif
27
28/*
Hans Boehm5af78772014-07-11 18:30:13 -070029 * A handful of basic atomic operations.
30 * THESE ARE HERE FOR LEGACY REASONS ONLY. AVOID.
31 *
32 * PREFERRED ALTERNATIVES:
33 * - Use C++/C/pthread locks/mutexes whenever there is not a
34 * convincing reason to do otherwise. Note that very clever and
35 * complicated, but correct, lock-free code is often slower than
36 * using locks, especially where nontrivial data structures
37 * are involved.
38 * - C11 stdatomic.h.
39 * - Where supported, C++11 std::atomic<T> .
40 *
41 * PLEASE STOP READING HERE UNLESS YOU ARE TRYING TO UNDERSTAND
42 * OR UPDATE OLD CODE.
Andy McFadden8dfa47d2010-05-27 10:10:18 -070043 *
44 * The "acquire" and "release" terms can be defined intuitively in terms
45 * of the placement of memory barriers in a simple lock implementation:
46 * - wait until compare-and-swap(lock-is-free --> lock-is-held) succeeds
47 * - barrier
48 * - [do work]
49 * - barrier
50 * - store(lock-is-free)
51 * In very crude terms, the initial (acquire) barrier prevents any of the
52 * "work" from happening before the lock is held, and the later (release)
53 * barrier ensures that all of the work happens before the lock is released.
54 * (Think of cached writes, cache read-ahead, and instruction reordering
55 * around the CAS and store instructions.)
56 *
57 * The barriers must apply to both the compiler and the CPU. Note it is
58 * legal for instructions that occur before an "acquire" barrier to be
59 * moved down below it, and for instructions that occur after a "release"
60 * barrier to be moved up above it.
61 *
62 * The ARM-driven implementation we use here is short on subtlety,
63 * and actually requests a full barrier from the compiler and the CPU.
64 * The only difference between acquire and release is in whether they
65 * are issued before or after the atomic operation with which they
66 * are associated. To ease the transition to C/C++ atomic intrinsics,
67 * you should not rely on this, and instead assume that only the minimal
68 * acquire/release protection is provided.
69 *
70 * NOTE: all int32_t* values are expected to be aligned on 32-bit boundaries.
71 * If they are not, atomicity is not guaranteed.
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080072 */
73
Hans Boehmcb344d42014-09-30 16:50:51 -070074#if ANDROID_SMP == 0
75# define ANDROID_ATOMIC_ACQUIRE memory_order_relaxed
76# define ANDROID_ATOMIC_RELEASE memory_order_relaxed
77#else
78# define ANDROID_ATOMIC_ACQUIRE memory_order_acquire
79# define ANDROID_ATOMIC_RELEASE memory_order_release
80#endif
81
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080082/*
Andy McFadden8dfa47d2010-05-27 10:10:18 -070083 * Basic arithmetic and bitwise operations. These all provide a
84 * barrier with "release" ordering, and return the previous value.
85 *
86 * These have the same characteristics (e.g. what happens on overflow)
87 * as the equivalent non-atomic C operations.
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080088 */
Hans Boehmcb344d42014-09-30 16:50:51 -070089ANDROID_ATOMIC_INLINE
90int32_t android_atomic_inc(volatile int32_t* addr)
91{
92 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
93 /* Int32_t, if it exists, is the same as int_least32_t. */
94 return atomic_fetch_add_explicit(a, 1, ANDROID_ATOMIC_RELEASE);
95}
96
97ANDROID_ATOMIC_INLINE
98int32_t android_atomic_dec(volatile int32_t* addr)
99{
100 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
101 return atomic_fetch_sub_explicit(a, 1, ANDROID_ATOMIC_RELEASE);
102}
103
104ANDROID_ATOMIC_INLINE
105int32_t android_atomic_add(int32_t value, volatile int32_t* addr)
106{
107 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
108 return atomic_fetch_add_explicit(a, value, ANDROID_ATOMIC_RELEASE);
109}
110
111ANDROID_ATOMIC_INLINE
112int32_t android_atomic_and(int32_t value, volatile int32_t* addr)
113{
114 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
115 return atomic_fetch_and_explicit(a, value, ANDROID_ATOMIC_RELEASE);
116}
117
118ANDROID_ATOMIC_INLINE
119int32_t android_atomic_or(int32_t value, volatile int32_t* addr)
120{
121 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
122 return atomic_fetch_or_explicit(a, value, ANDROID_ATOMIC_RELEASE);
123}
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800124
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800125/*
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700126 * Perform an atomic load with "acquire" or "release" ordering.
Andy McFaddenac322da2010-05-19 22:33:28 -0700127 *
Hans Boehm5af78772014-07-11 18:30:13 -0700128 * Note that the notion of a "release" ordering for a load does not
129 * really fit into the C11 or C++11 memory model. The extra ordering
130 * is normally observable only by code using memory_order_relaxed
131 * atomics, or data races. In the rare cases in which such ordering
132 * is called for, use memory_order_relaxed atomics and a leading
133 * atomic_thread_fence (typically with memory_order_acquire,
134 * not memory_order_release!) instead. If you do not understand
135 * this comment, you are in the vast majority, and should not be
136 * using release loads or replacing them with anything other than
137 * locks or default sequentially consistent atomics.
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800138 */
Hans Boehmcb344d42014-09-30 16:50:51 -0700139ANDROID_ATOMIC_INLINE
140int32_t android_atomic_acquire_load(volatile const int32_t* addr)
141{
142 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
143 return atomic_load_explicit(a, ANDROID_ATOMIC_ACQUIRE);
144}
145
146ANDROID_ATOMIC_INLINE
147int32_t android_atomic_release_load(volatile const int32_t* addr)
148{
149 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
150 atomic_thread_fence(memory_order_seq_cst);
151 /* Any reasonable clients of this interface would probably prefer */
152 /* something weaker. But some remaining clients seem to be */
153 /* abusing this API in strange ways, e.g. by using it as a fence. */
154 /* Thus we are conservative until we can get rid of remaining */
155 /* clients (and this function). */
156 return atomic_load_explicit(a, memory_order_relaxed);
157}
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700158
159/*
160 * Perform an atomic store with "acquire" or "release" ordering.
161 *
Hans Boehmcb344d42014-09-30 16:50:51 -0700162 * Note that the notion of an "acquire" ordering for a store does not
Hans Boehm5af78772014-07-11 18:30:13 -0700163 * really fit into the C11 or C++11 memory model. The extra ordering
164 * is normally observable only by code using memory_order_relaxed
165 * atomics, or data races. In the rare cases in which such ordering
166 * is called for, use memory_order_relaxed atomics and a trailing
167 * atomic_thread_fence (typically with memory_order_release,
168 * not memory_order_acquire!) instead.
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700169 */
Hans Boehmcb344d42014-09-30 16:50:51 -0700170ANDROID_ATOMIC_INLINE
171void android_atomic_acquire_store(int32_t value, volatile int32_t* addr)
172{
173 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
174 atomic_store_explicit(a, value, memory_order_relaxed);
175 atomic_thread_fence(memory_order_seq_cst);
176 /* Again overly conservative to accomodate weird clients. */
177}
178
179ANDROID_ATOMIC_INLINE
180void android_atomic_release_store(int32_t value, volatile int32_t* addr)
181{
182 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
183 atomic_store_explicit(a, value, ANDROID_ATOMIC_RELEASE);
184}
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700185
186/*
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700187 * Compare-and-set operation with "acquire" or "release" ordering.
188 *
189 * This returns zero if the new value was successfully stored, which will
190 * only happen when *addr == oldvalue.
191 *
192 * (The return value is inverted from implementations on other platforms,
193 * but matches the ARM ldrex/strex result.)
194 *
195 * Implementations that use the release CAS in a loop may be less efficient
196 * than possible, because we re-issue the memory barrier on each iteration.
197 */
Hans Boehmcb344d42014-09-30 16:50:51 -0700198ANDROID_ATOMIC_INLINE
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700199int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue,
Hans Boehmcb344d42014-09-30 16:50:51 -0700200 volatile int32_t* addr)
201{
202 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
203 return (int)(!atomic_compare_exchange_strong_explicit(
204 a, &oldvalue, newvalue,
205 ANDROID_ATOMIC_ACQUIRE,
206 ANDROID_ATOMIC_ACQUIRE));
207}
208
209ANDROID_ATOMIC_INLINE
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700210int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
Hans Boehmcb344d42014-09-30 16:50:51 -0700211 volatile int32_t* addr)
212{
213 volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
214 return (int)(!atomic_compare_exchange_strong_explicit(
215 a, &oldvalue, newvalue,
216 ANDROID_ATOMIC_RELEASE,
217 memory_order_relaxed));
218}
219
220/*
221 * Fence primitives.
222 */
223ANDROID_ATOMIC_INLINE
224void android_compiler_barrier(void)
225{
226 __asm__ __volatile__ ("" : : : "memory");
227 /* Could probably also be: */
228 /* atomic_signal_fence(memory_order_seq_cst); */
229}
230
231ANDROID_ATOMIC_INLINE
232void android_memory_barrier(void)
233{
234#if ANDROID_SMP == 0
235 android_compiler_barrier();
236#else
237 atomic_thread_fence(memory_order_seq_cst);
238#endif
239}
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800240
Andy McFaddenac322da2010-05-19 22:33:28 -0700241/*
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700242 * Aliases for code using an older version of this header. These are now
243 * deprecated and should not be used. The definitions will be removed
244 * in a future release.
Andy McFaddenac322da2010-05-19 22:33:28 -0700245 */
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700246#define android_atomic_write android_atomic_release_store
247#define android_atomic_cmpxchg android_atomic_release_cas
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800248
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800249#endif // ANDROID_CUTILS_ATOMIC_H