Better obstruct optimization for ordering test

... to make sure that stores to the same locations are harder to
eliminate.

Also ensure that reader gets a chance to run by sleeping if necessary.

Bug: 308744279
Test: atest stdatomic
Test: Check that we don't usually sleep.
Change-Id: Iddab2a109525f96e065ac8331f227baa08dd8e22
diff --git a/tests/stdatomic_test.cpp b/tests/stdatomic_test.cpp
index 727af87..f5c6bb1 100644
--- a/tests/stdatomic_test.cpp
+++ b/tests/stdatomic_test.cpp
@@ -181,7 +181,8 @@
 
 // And a rudimentary test of acquire-release memory ordering:
 
-static constexpr uint_least32_t BIG = 30'000'000ul;  // Assumed even below.
+static constexpr uint_least32_t BIG = 30'000'000ul;
+static_assert((BIG % 2) == 0);  // Assumed below.
 
 struct three_atomics {
   atomic_uint_least32_t x;
@@ -192,6 +193,8 @@
   atomic_uint_least32_t z;
 };
 
+atomic_bool read_enough(false);
+
 // Very simple acquire/release memory ordering smoke test.
 static void* writer(void* arg) {
   three_atomics* a = reinterpret_cast<three_atomics*>(arg);
@@ -199,9 +202,18 @@
     atomic_store_explicit(&a->x, i, memory_order_relaxed);
     atomic_store_explicit(&a->z, i, memory_order_relaxed);
     atomic_store_explicit(&a->y, i, memory_order_release);
+
+    // Force stores to be visible in spite of being overwritten below.
+    asm volatile("" ::: "memory");
+
     atomic_store_explicit(&a->x, i+1, memory_order_relaxed);
     atomic_store_explicit(&a->z, i+1, memory_order_relaxed);
     atomic_store_explicit(&a->y, i+1, memory_order_release);
+    if (i >= BIG - 1000 && !atomic_load(&read_enough)) {
+      // Give reader a chance to catch up, at the expense of making the test
+      // less effective.
+      usleep(1000);
+    }
   }
   return nullptr;
 }
@@ -229,7 +241,11 @@
                     << xval << " < " << yval << ", " << zval <<  "\n";
       return nullptr; // Only report once.
     }
-    if (repeat < repeat_limit) ++repeat;
+    if (repeat < repeat_limit) {
+      ++repeat;
+    } else if (!atomic_load_explicit(&read_enough, memory_order_relaxed)) {
+      atomic_store_explicit(&read_enough, true, memory_order_relaxed);
+    }
   }
   // The following assertion is not technically guaranteed to hold.
   // But if it fails to hold, this test was useless, and we have a