Revert "Rewrite atomic.h to use stdatomic.h."
This reverts commit cb344d464009337c283431dec4441b70481d1a49.
Change-Id: Ib17cd8d00695062177ce863cedc4ec7148012317
diff --git a/include/cutils/atomic.h b/include/cutils/atomic.h
index e199130..79409a7 100644
--- a/include/cutils/atomic.h
+++ b/include/cutils/atomic.h
@@ -19,10 +19,9 @@
#include <stdint.h>
#include <sys/types.h>
-#include <stdatomic.h>
-#ifndef ANDROID_ATOMIC_INLINE
-#define ANDROID_ATOMIC_INLINE static inline
+#ifdef __cplusplus
+extern "C" {
#endif
/*
@@ -71,14 +70,6 @@
* If they are not, atomicity is not guaranteed.
*/
-#if ANDROID_SMP == 0
-# define ANDROID_ATOMIC_ACQUIRE memory_order_relaxed
-# define ANDROID_ATOMIC_RELEASE memory_order_relaxed
-#else
-# define ANDROID_ATOMIC_ACQUIRE memory_order_acquire
-# define ANDROID_ATOMIC_RELEASE memory_order_release
-#endif
-
/*
* Basic arithmetic and bitwise operations. These all provide a
* barrier with "release" ordering, and return the previous value.
@@ -86,41 +77,11 @@
* These have the same characteristics (e.g. what happens on overflow)
* as the equivalent non-atomic C operations.
*/
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_inc(volatile int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- /* Int32_t, if it exists, is the same as int_least32_t. */
- return atomic_fetch_add_explicit(a, 1, ANDROID_ATOMIC_RELEASE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_dec(volatile int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- return atomic_fetch_sub_explicit(a, 1, ANDROID_ATOMIC_RELEASE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_add(int32_t value, volatile int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- return atomic_fetch_add_explicit(a, value, ANDROID_ATOMIC_RELEASE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_and(int32_t value, volatile int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- return atomic_fetch_and_explicit(a, value, ANDROID_ATOMIC_RELEASE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_or(int32_t value, volatile int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- return atomic_fetch_or_explicit(a, value, ANDROID_ATOMIC_RELEASE);
-}
+int32_t android_atomic_inc(volatile int32_t* addr);
+int32_t android_atomic_dec(volatile int32_t* addr);
+int32_t android_atomic_add(int32_t value, volatile int32_t* addr);
+int32_t android_atomic_and(int32_t value, volatile int32_t* addr);
+int32_t android_atomic_or(int32_t value, volatile int32_t* addr);
/*
* Perform an atomic load with "acquire" or "release" ordering.
@@ -135,53 +96,29 @@
* this comment, you are in the vast majority, and should not be
* using release loads or replacing them with anything other than
* locks or default sequentially consistent atomics.
+ *
+ * This is only necessary if you need the memory barrier. A 32-bit read
+ * from a 32-bit aligned address is atomic on all supported platforms.
*/
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_acquire_load(volatile const int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- return atomic_load_explicit(a, ANDROID_ATOMIC_ACQUIRE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_release_load(volatile const int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- atomic_thread_fence(memory_order_seq_cst);
- /* Any reasonable clients of this interface would probably prefer */
- /* something weaker. But some remaining clients seem to be */
- /* abusing this API in strange ways, e.g. by using it as a fence. */
- /* Thus we are conservative until we can get rid of remaining */
- /* clients (and this function). */
- return atomic_load_explicit(a, memory_order_relaxed);
-}
+int32_t android_atomic_acquire_load(volatile const int32_t* addr);
+int32_t android_atomic_release_load(volatile const int32_t* addr);
/*
* Perform an atomic store with "acquire" or "release" ordering.
*
- * Note that the notion of an "acquire" ordering for a store does not
+ * Note that the notion of a "acquire" ordering for a store does not
* really fit into the C11 or C++11 memory model. The extra ordering
* is normally observable only by code using memory_order_relaxed
* atomics, or data races. In the rare cases in which such ordering
* is called for, use memory_order_relaxed atomics and a trailing
* atomic_thread_fence (typically with memory_order_release,
* not memory_order_acquire!) instead.
+ *
+ * This is only necessary if you need the memory barrier. A 32-bit write
+ * to a 32-bit aligned address is atomic on all supported platforms.
*/
-ANDROID_ATOMIC_INLINE
-void android_atomic_acquire_store(int32_t value, volatile int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- atomic_store_explicit(a, value, memory_order_relaxed);
- atomic_thread_fence(memory_order_seq_cst);
- /* Again overly conservative to accomodate weird clients. */
-}
-
-ANDROID_ATOMIC_INLINE
-void android_atomic_release_store(int32_t value, volatile int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- atomic_store_explicit(a, value, ANDROID_ATOMIC_RELEASE);
-}
+void android_atomic_acquire_store(int32_t value, volatile int32_t* addr);
+void android_atomic_release_store(int32_t value, volatile int32_t* addr);
/*
* Compare-and-set operation with "acquire" or "release" ordering.
@@ -195,48 +132,10 @@
* Implementations that use the release CAS in a loop may be less efficient
* than possible, because we re-issue the memory barrier on each iteration.
*/
-ANDROID_ATOMIC_INLINE
int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue,
- volatile int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- return (int)(!atomic_compare_exchange_strong_explicit(
- a, &oldvalue, newvalue,
- ANDROID_ATOMIC_ACQUIRE,
- ANDROID_ATOMIC_ACQUIRE));
-}
-
-ANDROID_ATOMIC_INLINE
+ volatile int32_t* addr);
int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
- volatile int32_t* addr)
-{
- volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
- return (int)(!atomic_compare_exchange_strong_explicit(
- a, &oldvalue, newvalue,
- ANDROID_ATOMIC_RELEASE,
- memory_order_relaxed));
-}
-
-/*
- * Fence primitives.
- */
-ANDROID_ATOMIC_INLINE
-void android_compiler_barrier(void)
-{
- __asm__ __volatile__ ("" : : : "memory");
- /* Could probably also be: */
- /* atomic_signal_fence(memory_order_seq_cst); */
-}
-
-ANDROID_ATOMIC_INLINE
-void android_memory_barrier(void)
-{
-#if ANDROID_SMP == 0
- android_compiler_barrier();
-#else
- atomic_thread_fence(memory_order_seq_cst);
-#endif
-}
+ volatile int32_t* addr);
/*
* Aliases for code using an older version of this header. These are now
@@ -246,4 +145,8 @@
#define android_atomic_write android_atomic_release_store
#define android_atomic_cmpxchg android_atomic_release_cas
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
#endif // ANDROID_CUTILS_ATOMIC_H