blob: b9b18c44fa31903dc18c483cb59bd0893ba99ef0 [file] [log] [blame]
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -08001/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ANDROID_CUTILS_ATOMIC_H
18#define ANDROID_CUTILS_ATOMIC_H
19
20#include <stdint.h>
21#include <sys/types.h>
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
27/*
Hans Boehm5af78772014-07-11 18:30:13 -070028 * A handful of basic atomic operations.
29 * THESE ARE HERE FOR LEGACY REASONS ONLY. AVOID.
30 *
31 * PREFERRED ALTERNATIVES:
32 * - Use C++/C/pthread locks/mutexes whenever there is not a
33 * convincing reason to do otherwise. Note that very clever and
34 * complicated, but correct, lock-free code is often slower than
35 * using locks, especially where nontrivial data structures
36 * are involved.
37 * - C11 stdatomic.h.
38 * - Where supported, C++11 std::atomic<T> .
39 *
40 * PLEASE STOP READING HERE UNLESS YOU ARE TRYING TO UNDERSTAND
41 * OR UPDATE OLD CODE.
Andy McFadden8dfa47d2010-05-27 10:10:18 -070042 *
43 * The "acquire" and "release" terms can be defined intuitively in terms
44 * of the placement of memory barriers in a simple lock implementation:
45 * - wait until compare-and-swap(lock-is-free --> lock-is-held) succeeds
46 * - barrier
47 * - [do work]
48 * - barrier
49 * - store(lock-is-free)
50 * In very crude terms, the initial (acquire) barrier prevents any of the
51 * "work" from happening before the lock is held, and the later (release)
52 * barrier ensures that all of the work happens before the lock is released.
53 * (Think of cached writes, cache read-ahead, and instruction reordering
54 * around the CAS and store instructions.)
55 *
56 * The barriers must apply to both the compiler and the CPU. Note it is
57 * legal for instructions that occur before an "acquire" barrier to be
58 * moved down below it, and for instructions that occur after a "release"
59 * barrier to be moved up above it.
60 *
61 * The ARM-driven implementation we use here is short on subtlety,
62 * and actually requests a full barrier from the compiler and the CPU.
63 * The only difference between acquire and release is in whether they
64 * are issued before or after the atomic operation with which they
65 * are associated. To ease the transition to C/C++ atomic intrinsics,
66 * you should not rely on this, and instead assume that only the minimal
67 * acquire/release protection is provided.
68 *
69 * NOTE: all int32_t* values are expected to be aligned on 32-bit boundaries.
70 * If they are not, atomicity is not guaranteed.
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080071 */
72
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080073/*
Andy McFadden8dfa47d2010-05-27 10:10:18 -070074 * Basic arithmetic and bitwise operations. These all provide a
75 * barrier with "release" ordering, and return the previous value.
76 *
77 * These have the same characteristics (e.g. what happens on overflow)
78 * as the equivalent non-atomic C operations.
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080079 */
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080080int32_t android_atomic_inc(volatile int32_t* addr);
81int32_t android_atomic_dec(volatile int32_t* addr);
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080082int32_t android_atomic_add(int32_t value, volatile int32_t* addr);
83int32_t android_atomic_and(int32_t value, volatile int32_t* addr);
84int32_t android_atomic_or(int32_t value, volatile int32_t* addr);
85
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -080086/*
Andy McFadden8dfa47d2010-05-27 10:10:18 -070087 * Perform an atomic load with "acquire" or "release" ordering.
Andy McFaddenac322da2010-05-19 22:33:28 -070088 *
Hans Boehm5af78772014-07-11 18:30:13 -070089 * Note that the notion of a "release" ordering for a load does not
90 * really fit into the C11 or C++11 memory model. The extra ordering
91 * is normally observable only by code using memory_order_relaxed
92 * atomics, or data races. In the rare cases in which such ordering
93 * is called for, use memory_order_relaxed atomics and a leading
94 * atomic_thread_fence (typically with memory_order_acquire,
95 * not memory_order_release!) instead. If you do not understand
96 * this comment, you are in the vast majority, and should not be
97 * using release loads or replacing them with anything other than
98 * locks or default sequentially consistent atomics.
99 *
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700100 * This is only necessary if you need the memory barrier. A 32-bit read
101 * from a 32-bit aligned address is atomic on all supported platforms.
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800102 */
Carl Shapirod55f0ad2010-09-28 13:47:03 -0700103int32_t android_atomic_acquire_load(volatile const int32_t* addr);
104int32_t android_atomic_release_load(volatile const int32_t* addr);
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700105
Ashok Bhatc15d2ce2013-11-13 16:20:49 +0000106#if defined (__LP64__)
107int64_t android_atomic_acquire_load64(volatile const int64_t* addr);
108int64_t android_atomic_release_load64(volatile const int64_t* addr);
109#endif
110
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700111/*
112 * Perform an atomic store with "acquire" or "release" ordering.
113 *
Hans Boehm5af78772014-07-11 18:30:13 -0700114 * Note that the notion of a "acquire" ordering for a store does not
115 * really fit into the C11 or C++11 memory model. The extra ordering
116 * is normally observable only by code using memory_order_relaxed
117 * atomics, or data races. In the rare cases in which such ordering
118 * is called for, use memory_order_relaxed atomics and a trailing
119 * atomic_thread_fence (typically with memory_order_release,
120 * not memory_order_acquire!) instead.
121 *
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700122 * This is only necessary if you need the memory barrier. A 32-bit write
123 * to a 32-bit aligned address is atomic on all supported platforms.
124 */
125void android_atomic_acquire_store(int32_t value, volatile int32_t* addr);
126void android_atomic_release_store(int32_t value, volatile int32_t* addr);
127
Ashok Bhatc15d2ce2013-11-13 16:20:49 +0000128#if defined (__LP64__)
129void android_atomic_acquire_store64(int64_t value, volatile int64_t* addr);
130void android_atomic_release_store64(int64_t value, volatile int64_t* addr);
131#endif
132
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700133/*
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700134 * Compare-and-set operation with "acquire" or "release" ordering.
135 *
136 * This returns zero if the new value was successfully stored, which will
137 * only happen when *addr == oldvalue.
138 *
139 * (The return value is inverted from implementations on other platforms,
140 * but matches the ARM ldrex/strex result.)
141 *
142 * Implementations that use the release CAS in a loop may be less efficient
143 * than possible, because we re-issue the memory barrier on each iteration.
144 */
145int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue,
146 volatile int32_t* addr);
147int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800148 volatile int32_t* addr);
149
Ashok Bhatc15d2ce2013-11-13 16:20:49 +0000150#if defined (__LP64__)
151int64_t android_atomic_acquire_cas64(int64_t old_value, int64_t new_value,
152 volatile int64_t *ptr);
153int64_t android_atomic_release_cas64(int64_t old_value, int64_t new_value,
154 volatile int64_t *ptr);
155#endif
156
Andy McFaddenac322da2010-05-19 22:33:28 -0700157/*
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700158 * Aliases for code using an older version of this header. These are now
159 * deprecated and should not be used. The definitions will be removed
160 * in a future release.
Andy McFaddenac322da2010-05-19 22:33:28 -0700161 */
Andy McFadden8dfa47d2010-05-27 10:10:18 -0700162#define android_atomic_write android_atomic_release_store
163#define android_atomic_cmpxchg android_atomic_release_cas
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800164
The Android Open Source Projectdd7bc332009-03-03 19:32:55 -0800165#ifdef __cplusplus
166} // extern "C"
167#endif
168
169#endif // ANDROID_CUTILS_ATOMIC_H