Merge "Upgrade bionic to tzdata2014g." into lmp-dev
diff --git a/libc/include/stdatomic.h b/libc/include/stdatomic.h
index 6854d8b..3db25a7 100644
--- a/libc/include/stdatomic.h
+++ b/libc/include/stdatomic.h
@@ -32,13 +32,33 @@
#include <sys/cdefs.h>
-#if defined(__cplusplus) && defined(_USING_LIBCXX) && \
- (__has_feature(cxx_atomic) || _GNUC_VER >= 407)
+
+#if defined(__cplusplus) && defined(_USING_LIBCXX)
+# ifdef __clang__
+# if __has_feature(cxx_atomic)
+# define _STDATOMIC_HAVE_ATOMIC
+# endif
+# else /* gcc */
+# if __GNUC_PREREQ(4, 7)
+# define _STDATOMIC_HAVE_ATOMIC
+# endif
+# endif
+#endif
+
+#ifdef _STDATOMIC_HAVE_ATOMIC
/* We have a usable C++ <atomic>; use it instead. */
#include <atomic>
+#undef _Atomic
+ /* Also defined by <atomic> for gcc. But not used in macros. */
+ /* Also a clang intrinsic. */
+ /* Should not be used by client code before this file is */
+ /* included. The definitions in <atomic> themselves see */
+ /* the old definition, as they should. */
+ /* Client code sees the following definition. */
+
#define _Atomic(t) std::atomic<t>
using std::atomic_is_lock_free;
@@ -123,14 +143,30 @@
* bits as a T.
*/
-#if __has_extension(c_atomic) || __has_extension(cxx_atomic)
-#define __CLANG_ATOMICS
-#elif __GNUC_PREREQ(4, 7)
-#define __GNUC_ATOMICS
-#elif defined(__GNUC__)
-#define __SYNC_ATOMICS
+#include <stddef.h> /* For ptrdiff_t. */
+#include <stdint.h> /* TODO: Should pollute namespace less. */
+#if __STDC_VERSION__ >= 201112L
+# include <uchar.h> /* For char16_t and char32_t. */
+#endif
+
+#ifdef __clang__
+# if __has_extension(c_atomic) || __has_extension(cxx_atomic)
+# define __CLANG_ATOMICS
+# else
+# error "stdatomic.h does not support your compiler"
+# endif
+# if __has_builtin(__sync_swap)
+# define __HAS_BUILTIN_SYNC_SWAP
+# endif
#else
-#error "stdatomic.h does not support your compiler"
+# if __GNUC_PREREQ(4, 7)
+# define __GNUC_ATOMICS
+# else
+# define __SYNC_ATOMICS
+# ifdef __cplusplus
+# define __ATOMICS_AVOID_DOT_INIT
+# endif
+# endif
#endif
/*
@@ -139,33 +175,53 @@
#ifdef __GCC_ATOMIC_BOOL_LOCK_FREE
#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_BOOL_LOCK_FREE 2 /* For all modern platforms */
#endif
#ifdef __GCC_ATOMIC_CHAR_LOCK_FREE
#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_CHAR_LOCK_FREE 2
#endif
#ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE
#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_CHAR16_T_LOCK_FREE 2
#endif
#ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE
#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_CHAR32_T_LOCK_FREE 2
#endif
#ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE
#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_WCHAR_T_LOCK_FREE 2
#endif
#ifdef __GCC_ATOMIC_SHORT_LOCK_FREE
#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_SHORT_LOCK_FREE 2
#endif
#ifdef __GCC_ATOMIC_INT_LOCK_FREE
#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_INT_LOCK_FREE 2
#endif
#ifdef __GCC_ATOMIC_LONG_LOCK_FREE
#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_LONG_LOCK_FREE 2
#endif
#ifdef __GCC_ATOMIC_LLONG_LOCK_FREE
#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_LLONG_LOCK_FREE 1 /* maybe */
#endif
#ifdef __GCC_ATOMIC_POINTER_LOCK_FREE
#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
+#elif defined(__SYNC_ATOMICS)
+#define ATOMIC_POINTER_LOCK_FREE 2
#endif
/*
@@ -176,7 +232,11 @@
#define ATOMIC_VAR_INIT(value) (value)
#define atomic_init(obj, value) __c11_atomic_init(obj, value)
#else
+#ifdef __ATOMICS_AVOID_DOT_INIT
+#define ATOMIC_VAR_INIT(value) { value }
+#else
#define ATOMIC_VAR_INIT(value) { .__val = (value) }
+#endif
#define atomic_init(obj, value) ((void)((obj)->__val = (value)))
#endif
@@ -228,7 +288,7 @@
*/
static __inline void
-atomic_thread_fence(memory_order __order __unused)
+atomic_thread_fence(memory_order __order __attribute__((unused)))
{
#ifdef __CLANG_ATOMICS
@@ -241,7 +301,7 @@
}
static __inline void
-atomic_signal_fence(memory_order __order __unused)
+atomic_signal_fence(memory_order __order __attribute__((unused)))
{
#ifdef __CLANG_ATOMICS
@@ -263,7 +323,7 @@
((void)(obj), (_Bool)1)
#elif defined(__CLANG_ATOMICS)
#define atomic_is_lock_free(obj) \
- __atomic_is_lock_free(sizeof(*(obj)), obj)
+ __c11_atomic_is_lock_free(sizeof(*(obj)))
#elif defined(__GNUC_ATOMICS)
#define atomic_is_lock_free(obj) \
__atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val)
@@ -276,7 +336,7 @@
* 7.17.6 Atomic integer types.
*/
-#if !__has_extension(c_atomic) && !__has_extension(cxx_atomic)
+#ifndef __CLANG_ATOMICS
/*
* No native support for _Atomic(). Place object in structure to prevent
* most forms of direct non-atomic access.
@@ -397,7 +457,7 @@
desired, success, failure) \
atomic_compare_exchange_strong_explicit(object, expected, \
desired, success, failure)
-#if __has_builtin(__sync_swap)
+#ifdef __HAS_BUILTIN_SYNC_SWAP
/* Clang provides a full-barrier atomic exchange - use it if available. */
#define atomic_exchange_explicit(object, desired, order) \
((void)(order), __sync_swap(&(object)->__val, desired))
@@ -477,7 +537,7 @@
atomic_bool __flag;
} atomic_flag;
-#define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(0) }
+#define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(false) }
static __inline bool
atomic_flag_test_and_set_explicit(volatile atomic_flag *__object,
diff --git a/tests/Android.mk b/tests/Android.mk
index b370b92..8184bf7 100644
--- a/tests/Android.mk
+++ b/tests/Android.mk
@@ -237,7 +237,8 @@
# -----------------------------------------------------------------------------
# Tests for the device using bionic's .so. Run with:
-# adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests
+# adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests32
+# adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests64
# -----------------------------------------------------------------------------
bionic-unit-tests_whole_static_libraries := \
libBionicTests \
@@ -269,7 +270,8 @@
# -----------------------------------------------------------------------------
# Tests for the device linked against bionic's static library. Run with:
-# adb shell /data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static
+# adb shell /data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static32
+# adb shell /data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static64
# -----------------------------------------------------------------------------
bionic-unit-tests-static_whole_static_libraries := \
libBionicTests \
diff --git a/tests/stdatomic_test.cpp b/tests/stdatomic_test.cpp
index 5e88c88..b7fb19b 100644
--- a/tests/stdatomic_test.cpp
+++ b/tests/stdatomic_test.cpp
@@ -14,11 +14,10 @@
* limitations under the License.
*/
-#include <gtest/gtest.h>
-
-#if !defined(__GLIBC__) /* TODO: fix our prebuilt toolchains! */
-
#include <stdatomic.h>
+#include <gtest/gtest.h>
+#include <pthread.h>
+#include <stdint.h>
TEST(stdatomic, LOCK_FREE) {
ASSERT_TRUE(ATOMIC_BOOL_LOCK_FREE);
@@ -64,14 +63,17 @@
TEST(stdatomic, atomic_is_lock_free) {
atomic_char small;
- atomic_intmax_t big;
ASSERT_TRUE(atomic_is_lock_free(&small));
+#if defined(__clang__) || __GNUC_PREREQ(4, 7)
+ // Otherwise stdatomic.h doesn't handle this.
+ atomic_intmax_t big;
// atomic_intmax_t(size = 64) is not lock free on mips32.
#if defined(__mips__) && !defined(__LP64__)
ASSERT_FALSE(atomic_is_lock_free(&big));
#else
ASSERT_TRUE(atomic_is_lock_free(&big));
#endif
+#endif
}
TEST(stdatomic, atomic_flag) {
@@ -167,4 +169,80 @@
ASSERT_EQ(0x002, atomic_load(&i));
}
-#endif
+// And a rudimentary test of acquire-release memory ordering:
+
+constexpr static uint_least32_t BIG = 10000000ul; // Assumed even below.
+
+struct three_atomics {
+ atomic_uint_least32_t x;
+ char a[123]; // Everything in different cache lines,
+ // increase chance of compiler getting alignment wrong.
+ atomic_uint_least32_t y;
+ char b[4013];
+ atomic_uint_least32_t z;
+};
+
+// Very simple acquire/release memory ordering sanity check.
+static void* writer(void* arg) {
+ three_atomics* a = reinterpret_cast<three_atomics*>(arg);
+ for (uint_least32_t i = 0; i <= BIG; i+=2) {
+ atomic_store_explicit(&a->x, i, memory_order_relaxed);
+ atomic_store_explicit(&a->z, i, memory_order_relaxed);
+ atomic_store_explicit(&a->y, i, memory_order_release);
+ atomic_store_explicit(&a->x, i+1, memory_order_relaxed);
+ atomic_store_explicit(&a->z, i+1, memory_order_relaxed);
+ atomic_store_explicit(&a->y, i+1, memory_order_release);
+ }
+ return 0;
+}
+
+static void* reader(void* arg) {
+ three_atomics* a = reinterpret_cast<three_atomics*>(arg);
+ uint_least32_t xval = 0, yval = 0, zval = 0;
+ size_t repeat = 0;
+ size_t repeat_limit = 1000;
+ while (yval != BIG + 1) {
+ yval = atomic_load_explicit(&a->y, memory_order_acquire);
+ zval = atomic_load_explicit(&a->z, memory_order_relaxed);
+ xval = atomic_load_explicit(&a->x, memory_order_relaxed);
+ // If we see a given value of y, the immediately preceding
+ // stores to z and x, or later ones, should also be visible.
+ if (zval < yval) {
+ // Cant just ASSERT, since we are in a non-void function.
+ ADD_FAILURE() << "acquire-release ordering violation: "
+ << zval << " < " << yval << ", " << xval << "\n";
+ return 0; // Only report once.
+ }
+ if (xval < yval) {
+ // Cant just ASSERT, since we are in a non-void function.
+ ADD_FAILURE() << "acquire-release ordering violation: "
+ << xval << " < " << yval << ", " << zval << "\n";
+ return 0; // Only report once.
+ }
+ if (repeat < repeat_limit) ++repeat;
+ }
+ // The following assertion is not technically guaranteed to hold.
+ // But if it fails to hold, this test was useless, and we have a
+ // serious scheduling issue that we should probably know about.
+ EXPECT_EQ(repeat, repeat_limit);
+ return 0;
+}
+
+TEST(stdatomic, ordering) {
+ // Run a memory ordering sanity test.
+ void* result;
+ three_atomics a;
+ atomic_init(&a.x, 0ul);
+ atomic_init(&a.y, 0ul);
+ atomic_init(&a.z, 0ul);
+ pthread_t t1,t2;
+ ASSERT_EQ(0, pthread_create(&t1, 0, reader, &a));
+ ASSERT_EQ(0, pthread_create(&t2, 0, writer, &a));
+ ASSERT_EQ(0, pthread_join(t1, &result));
+ EXPECT_EQ(0, result);
+ ASSERT_EQ(0, pthread_join(t2, &result));
+ EXPECT_EQ(0, result);
+ EXPECT_EQ(atomic_load_explicit(&a.x, memory_order_consume), BIG + 1);
+ EXPECT_EQ(atomic_load_explicit(&a.y, memory_order_seq_cst), BIG + 1);
+ EXPECT_EQ(atomic_load(&a.z), BIG + 1);
+}