Add an API for per-process disabling memory initialization.

Introduce an android_mallopt(M_DISABLE_MEMORY_MITIGATIONS) API call
that may be used to disable zero- or pattern-init on non-MTE hardware,
or memory tagging on MTE hardware. The intent is that this function
may be called at any time, including when there are multiple threads
running.

Disabling zero- or pattern-init is quite trivial, we just need to set
a global variable to 0 via a Scudo API call (although there will be
some separate work required on the Scudo side to make this operation
thread-safe).

It is a bit more tricky to disable MTE across a process, because
the kernel does not provide an API for disabling tag checking in all
threads in a process, only per-thread. We need to send a signal to each
of the process's threads with a handler that issues the required prctl
call, and lock thread creation for the duration of the API call to
avoid races between thread enumeration and calls to pthread_create().

Bug: 135772972
Change-Id: I81ece86ace916eb6b435ab516cd431ec4b48a3bf
diff --git a/libc/Android.bp b/libc/Android.bp
index c94be62..ba5ef11 100644
--- a/libc/Android.bp
+++ b/libc/Android.bp
@@ -62,6 +62,7 @@
     cppflags: [],
     include_dirs: [
         "bionic/libc/async_safe/include",
+        "bionic/libc/platform",
         // For android_filesystem_config.h.
         "system/core/libcutils/include",
     ],
@@ -1077,6 +1078,7 @@
         "bionic/mblen.cpp",
         "bionic/mbrtoc16.cpp",
         "bionic/mbrtoc32.cpp",
+        "bionic/memory_mitigation_state.cpp",
         "bionic/mempcpy.cpp",
         "bionic/mkdir.cpp",
         "bionic/mkfifo.cpp",
diff --git a/libc/bionic/malloc_common.cpp b/libc/bionic/malloc_common.cpp
index ed5537f..6b7006d 100644
--- a/libc/bionic/malloc_common.cpp
+++ b/libc/bionic/malloc_common.cpp
@@ -46,6 +46,7 @@
 #include "malloc_common.h"
 #include "malloc_limit.h"
 #include "malloc_tagged_pointers.h"
+#include "memory_mitigation_state.h"
 
 // =============================================================================
 // Global variables instantations.
@@ -326,6 +327,9 @@
       return MaybeInitGwpAsan(globals, *reinterpret_cast<bool*>(arg));
     });
   }
+  if (opcode == M_DISABLE_MEMORY_MITIGATIONS) {
+    return DisableMemoryMitigations(arg, arg_size);
+  }
   errno = ENOTSUP;
   return false;
 }
diff --git a/libc/bionic/malloc_common_dynamic.cpp b/libc/bionic/malloc_common_dynamic.cpp
index 6a82ae3..eeeaff9 100644
--- a/libc/bionic/malloc_common_dynamic.cpp
+++ b/libc/bionic/malloc_common_dynamic.cpp
@@ -70,6 +70,7 @@
 #include "malloc_common_dynamic.h"
 #include "malloc_heapprofd.h"
 #include "malloc_limit.h"
+#include "memory_mitigation_state.h"
 
 // =============================================================================
 // Global variables instantations.
@@ -533,6 +534,9 @@
       return MaybeInitGwpAsan(globals, *reinterpret_cast<bool*>(arg));
     });
   }
+  if (opcode == M_DISABLE_MEMORY_MITIGATIONS) {
+    return DisableMemoryMitigations(arg, arg_size);
+  }
   // Try heapprofd's mallopt, as it handles options not covered here.
   return HeapprofdMallopt(opcode, arg, arg_size);
 }
diff --git a/libc/bionic/memory_mitigation_state.cpp b/libc/bionic/memory_mitigation_state.cpp
new file mode 100644
index 0000000..82b0b7b
--- /dev/null
+++ b/libc/bionic/memory_mitigation_state.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "memory_mitigation_state.h"
+
+#include <dirent.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <stdatomic.h>
+#include <stdlib.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+
+#include <bionic/mte.h>
+#include <bionic/reserved_signals.h>
+
+#include "private/ScopedRWLock.h"
+#include "pthread_internal.h"
+
+extern "C" void scudo_malloc_set_zero_contents(int zero_contents);
+extern "C" void scudo_malloc_disable_memory_tagging();
+
+#ifdef ANDROID_EXPERIMENTAL_MTE
+static bool set_tcf_on_all_threads(int tcf) {
+  static int g_tcf;
+  g_tcf = tcf;
+
+  return android_run_on_all_threads(
+      [](void*) {
+        int tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
+        if (tagged_addr_ctrl < 0) {
+          return false;
+        }
+
+        tagged_addr_ctrl = (tagged_addr_ctrl & ~PR_MTE_TCF_MASK) | g_tcf;
+        if (prctl(PR_SET_TAGGED_ADDR_CTRL, tagged_addr_ctrl, 0, 0, 0) < 0) {
+          return false;
+        }
+        return true;
+      },
+      nullptr);
+}
+#endif
+
+bool DisableMemoryMitigations(void* arg, size_t arg_size) {
+  if (arg || arg_size) {
+    return false;
+  }
+
+#ifdef USE_SCUDO
+  scudo_malloc_set_zero_contents(0);
+
+#ifdef ANDROID_EXPERIMENTAL_MTE
+  if (mte_supported() && set_tcf_on_all_threads(PR_MTE_TCF_NONE)) {
+    scudo_malloc_disable_memory_tagging();
+  }
+#endif
+#endif
+
+  return true;
+}
diff --git a/libc/bionic/memory_mitigation_state.h b/libc/bionic/memory_mitigation_state.h
new file mode 100644
index 0000000..ffa1912
--- /dev/null
+++ b/libc/bionic/memory_mitigation_state.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <stddef.h>
+
+bool DisableMemoryMitigations(void* arg, size_t arg_size);
diff --git a/libc/bionic/pthread_create.cpp b/libc/bionic/pthread_create.cpp
index c528105..206d5fd 100644
--- a/libc/bionic/pthread_create.cpp
+++ b/libc/bionic/pthread_create.cpp
@@ -39,6 +39,7 @@
 
 #include <async_safe/log.h>
 
+#include "private/ScopedRWLock.h"
 #include "private/bionic_constants.h"
 #include "private/bionic_defs.h"
 #include "private/bionic_globals.h"
@@ -357,6 +358,7 @@
   return nullptr;
 }
 
+pthread_rwlock_t g_thread_creation_lock = PTHREAD_RWLOCK_INITIALIZER;
 
 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
 int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
@@ -406,6 +408,8 @@
   tls = &tls_descriptor;
 #endif
 
+  ScopedReadLock locker(&g_thread_creation_lock);
+
   sigset64_t block_all_mask;
   sigfillset64(&block_all_mask);
   __rt_sigprocmask(SIG_SETMASK, &block_all_mask, &thread->start_mask, sizeof(thread->start_mask));
diff --git a/libc/bionic/pthread_exit.cpp b/libc/bionic/pthread_exit.cpp
index 81dab57..bde95ec 100644
--- a/libc/bionic/pthread_exit.cpp
+++ b/libc/bionic/pthread_exit.cpp
@@ -35,6 +35,7 @@
 
 #include "private/bionic_constants.h"
 #include "private/bionic_defs.h"
+#include "private/ScopedRWLock.h"
 #include "private/ScopedSignalBlocker.h"
 #include "pthread_internal.h"
 
@@ -103,9 +104,18 @@
          !atomic_compare_exchange_weak(&thread->join_state, &old_state, THREAD_EXITED_NOT_JOINED)) {
   }
 
-  // We don't want to take a signal after unmapping the stack, the shadow call
-  // stack, or dynamic TLS memory.
-  ScopedSignalBlocker ssb;
+  // android_run_on_all_threads() needs to see signals blocked atomically with setting the
+  // terminating flag, so take the creation lock while doing these operations.
+  {
+    ScopedReadLock locker(&g_thread_creation_lock);
+    atomic_store(&thread->terminating, true);
+
+    // We don't want to take a signal after unmapping the stack, the shadow call stack, or dynamic
+    // TLS memory.
+    sigset64_t set;
+    sigfillset64(&set);
+    __rt_sigprocmask(SIG_BLOCK, &set, nullptr, sizeof(sigset64_t));
+  }
 
 #ifdef __aarch64__
   // Free the shadow call stack and guard pages.
diff --git a/libc/bionic/pthread_internal.cpp b/libc/bionic/pthread_internal.cpp
index e091158..6a7ee2f 100644
--- a/libc/bionic/pthread_internal.cpp
+++ b/libc/bionic/pthread_internal.cpp
@@ -29,12 +29,15 @@
 #include "pthread_internal.h"
 
 #include <errno.h>
+#include <semaphore.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sys/mman.h>
 
 #include <async_safe/log.h>
+#include <bionic/reserved_signals.h>
 
+#include "private/ErrnoRestorer.h"
 #include "private/ScopedRWLock.h"
 #include "private/bionic_futex.h"
 #include "private/bionic_tls.h"
@@ -115,3 +118,76 @@
   }
   return nullptr;
 }
+
+bool android_run_on_all_threads(bool (*func)(void*), void* arg) {
+  // Take the locks in this order to avoid inversion (pthread_create ->
+  // __pthread_internal_add).
+  ScopedWriteLock creation_locker(&g_thread_creation_lock);
+  ScopedReadLock list_locker(&g_thread_list_lock);
+
+  // Call the function directly for the current thread so that we don't need to worry about
+  // the consequences of synchronizing with ourselves.
+  if (!func(arg)) {
+    return false;
+  }
+
+  static sem_t g_sem;
+  if (sem_init(&g_sem, 0, 0) != 0) {
+    return false;
+  }
+
+  static bool (*g_func)(void*);
+  static void *g_arg;
+  g_func = func;
+  g_arg = arg;
+
+  static _Atomic(bool) g_retval;
+  atomic_init(&g_retval, true);
+
+  auto handler = [](int, siginfo_t*, void*) {
+    ErrnoRestorer restorer;
+    if (!g_func(g_arg)) {
+      atomic_store(&g_retval, false);
+    }
+    sem_post(&g_sem);
+  };
+
+  struct sigaction act = {}, oldact;
+  act.sa_flags = SA_SIGINFO;
+  act.sa_sigaction = handler;
+  sigfillset(&act.sa_mask);
+  if (sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &act, &oldact) != 0) {
+    sem_destroy(&g_sem);
+    return false;
+  }
+
+  pid_t my_pid = getpid();
+  size_t num_tids = 0;
+  for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
+    // The function is called directly for the current thread above, so no need to send a signal to
+    // ourselves to call it here.
+    if (t == __get_thread()) continue;
+
+    // If a thread is terminating (has blocked signals) or has already terminated, our signal will
+    // never be received, so we need to check for that condition and skip the thread if it is the
+    // case.
+    if (atomic_load(&t->terminating)) continue;
+
+    if (tgkill(my_pid, t->tid, BIONIC_SIGNAL_RUN_ON_ALL_THREADS) == 0) {
+      ++num_tids;
+    } else {
+      atomic_store(&g_retval, false);
+    }
+  }
+
+  for (size_t i = 0; i != num_tids; ++i) {
+    if (TEMP_FAILURE_RETRY(sem_wait(&g_sem)) != 0) {
+      atomic_store(&g_retval, false);
+      break;
+    }
+  }
+
+  sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &oldact, 0);
+  sem_destroy(&g_sem);
+  return atomic_load(&g_retval);
+}
diff --git a/libc/bionic/pthread_internal.h b/libc/bionic/pthread_internal.h
index 1f055f5..071a5bc 100644
--- a/libc/bionic/pthread_internal.h
+++ b/libc/bionic/pthread_internal.h
@@ -132,6 +132,11 @@
   // top of the stack quickly, which would otherwise require special logic for the main thread.
   uintptr_t stack_top;
 
+  // Whether the thread is in the process of terminating (has blocked signals), or has already
+  // terminated. This is used by android_run_on_all_threads() to avoid sending a signal to a thread
+  // that will never receive it.
+  _Atomic(bool) terminating;
+
   Lock startup_handshake_lock;
 
   void* mmap_base;
@@ -242,3 +247,7 @@
 __LIBC_HIDDEN__ extern void __bionic_atfork_run_prepare();
 __LIBC_HIDDEN__ extern void __bionic_atfork_run_child();
 __LIBC_HIDDEN__ extern void __bionic_atfork_run_parent();
+
+extern "C" bool android_run_on_all_threads(bool (*func)(void*), void* arg);
+
+extern pthread_rwlock_t g_thread_creation_lock;
diff --git a/libc/libc.map.txt b/libc/libc.map.txt
index a224eab..64f583a 100644
--- a/libc/libc.map.txt
+++ b/libc/libc.map.txt
@@ -1718,6 +1718,7 @@
     android_gethostbyaddrfornetcontext;
     android_gethostbynamefornet;
     android_gethostbynamefornetcontext;
+    android_run_on_all_threads;
     android_unsafe_frame_pointer_chase;
     arc4random_addrandom; # arm x86
     arc4random_stir; # arm x86
diff --git a/libc/platform/bionic/malloc.h b/libc/platform/bionic/malloc.h
index f9eb03f..16ef3a0 100644
--- a/libc/platform/bionic/malloc.h
+++ b/libc/platform/bionic/malloc.h
@@ -105,6 +105,13 @@
   //   arg_size = sizeof(bool)
   M_INITIALIZE_GWP_ASAN = 10,
 #define M_INITIALIZE_GWP_ASAN M_INITIALIZE_GWP_ASAN
+  // Disable heap initialization across the whole process. If the hardware supports memory
+  // tagging, it also disables memory tagging. May be called at any time including
+  // when multiple threads are running. arg and arg_size are unused and must be set to 0.
+  // Note that the memory mitigations are only implemented in scudo and therefore this API call will
+  // have no effect when using another allocator.
+  M_DISABLE_MEMORY_MITIGATIONS = 11,
+#define M_DISABLE_MEMORY_MITIGATIONS M_DISABLE_MEMORY_MITIGATIONS
 };
 
 enum HeapTaggingLevel {
diff --git a/libc/platform/bionic/reserved_signals.h b/libc/platform/bionic/reserved_signals.h
index e8e517e..dab58af 100644
--- a/libc/platform/bionic/reserved_signals.h
+++ b/libc/platform/bionic/reserved_signals.h
@@ -43,9 +43,7 @@
 //   37 (__SIGRTMIN + 5)        coverage (libprofile-extras)
 //   38 (__SIGRTMIN + 6)        heapprofd ART managed heap dumps
 //   39 (__SIGRTMIN + 7)        fdtrack
-//
-// If you change this, also change __ndk_legacy___libc_current_sigrtmin
-// in <android/legacy_signal_inlines.h> to match.
+//   40 (__SIGRTMIN + 8)        android_run_on_all_threads (bionic/pthread_internal.cpp)
 
 #define BIONIC_SIGNAL_POSIX_TIMERS (__SIGRTMIN + 0)
 #define BIONIC_SIGNAL_BACKTRACE (__SIGRTMIN + 1)
@@ -53,8 +51,9 @@
 #define BIONIC_SIGNAL_PROFILER (__SIGRTMIN + 4)
 #define BIONIC_SIGNAL_ART_PROFILER (__SIGRTMIN + 6)
 #define BIONIC_SIGNAL_FDTRACK (__SIGRTMIN + 7)
+#define BIONIC_SIGNAL_RUN_ON_ALL_THREADS (__SIGRTMIN + 8)
 
-#define __SIGRT_RESERVED 8
+#define __SIGRT_RESERVED 9
 static inline __always_inline sigset64_t filter_reserved_signals(sigset64_t sigset, int how) {
   int (*block)(sigset64_t*, int);
   int (*unblock)(sigset64_t*, int);
@@ -83,5 +82,6 @@
   unblock(&sigset, __SIGRTMIN + 5);
   unblock(&sigset, __SIGRTMIN + 6);
   unblock(&sigset, __SIGRTMIN + 7);
+  unblock(&sigset, __SIGRTMIN + 8);
   return sigset;
 }
diff --git a/tests/malloc_test.cpp b/tests/malloc_test.cpp
index d692cf9..4ea6d2b 100644
--- a/tests/malloc_test.cpp
+++ b/tests/malloc_test.cpp
@@ -20,6 +20,7 @@
 #include <limits.h>
 #include <malloc.h>
 #include <pthread.h>
+#include <semaphore.h>
 #include <signal.h>
 #include <stdint.h>
 #include <stdio.h>
@@ -45,6 +46,7 @@
 #include "SignalUtils.h"
 
 #include "platform/bionic/malloc.h"
+#include "platform/bionic/mte.h"
 #include "platform/bionic/mte_kernel.h"
 #include "platform/bionic/reserved_signals.h"
 #include "private/bionic_config.h"
@@ -1259,3 +1261,39 @@
   GTEST_SKIP() << "bionic extension";
 #endif
 }
+
+TEST(android_mallopt, disable_memory_mitigations) {
+#if defined(__BIONIC__)
+  if (!mte_supported()) {
+    GTEST_SKIP() << "This function can only be tested with MTE";
+  }
+
+#ifdef ANDROID_EXPERIMENTAL_MTE
+  sem_t sem;
+  ASSERT_EQ(0, sem_init(&sem, 0, 0));
+
+  pthread_t thread;
+  ASSERT_EQ(0, pthread_create(
+                   &thread, nullptr,
+                   [](void* ptr) -> void* {
+                     auto* sem = reinterpret_cast<sem_t*>(ptr);
+                     sem_wait(sem);
+                     return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
+                   },
+                   &sem));
+
+  ASSERT_TRUE(android_mallopt(M_DISABLE_MEMORY_MITIGATIONS, nullptr, 0));
+  ASSERT_EQ(0, sem_post(&sem));
+
+  int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
+  ASSERT_EQ(PR_MTE_TCF_NONE, my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
+
+  void* retval;
+  ASSERT_EQ(0, pthread_join(thread, &retval));
+  int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
+  ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
+#endif
+#else
+  GTEST_SKIP() << "bionic extension";
+#endif
+}
diff --git a/tests/pthread_test.cpp b/tests/pthread_test.cpp
index 851b86f..d9ad3cc 100644
--- a/tests/pthread_test.cpp
+++ b/tests/pthread_test.cpp
@@ -2975,3 +2975,48 @@
   spin_helper.UnSpin();
   ASSERT_EQ(0, pthread_join(t, nullptr));
 }
+
+extern "C" bool android_run_on_all_threads(bool (*func)(void*), void* arg);
+
+TEST(pthread, run_on_all_threads) {
+#if defined(__BIONIC__)
+  pthread_t t;
+  ASSERT_EQ(
+      0, pthread_create(
+             &t, nullptr,
+             [](void*) -> void* {
+               pthread_attr_t detached;
+               if (pthread_attr_init(&detached) != 0 ||
+                   pthread_attr_setdetachstate(&detached, PTHREAD_CREATE_DETACHED) != 0) {
+                 return reinterpret_cast<void*>(errno);
+               }
+
+               for (int i = 0; i != 1000; ++i) {
+                 pthread_t t1, t2;
+                 if (pthread_create(
+                         &t1, &detached, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
+                     pthread_create(
+                         &t2, nullptr, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
+                     pthread_join(t2, nullptr) != 0) {
+                   return reinterpret_cast<void*>(errno);
+                 }
+               }
+
+               if (pthread_attr_destroy(&detached) != 0) {
+                 return reinterpret_cast<void*>(errno);
+               }
+               return nullptr;
+             },
+             nullptr));
+
+  for (int i = 0; i != 1000; ++i) {
+    ASSERT_TRUE(android_run_on_all_threads([](void* arg) { return arg == nullptr; }, nullptr));
+  }
+
+  void *retval;
+  ASSERT_EQ(0, pthread_join(t, &retval));
+  ASSERT_EQ(nullptr, retval);
+#else
+  GTEST_SKIP() << "bionic-only test";
+#endif
+}
diff --git a/tests/struct_layout_test.cpp b/tests/struct_layout_test.cpp
index 9da702c..00fd4d5 100644
--- a/tests/struct_layout_test.cpp
+++ b/tests/struct_layout_test.cpp
@@ -30,7 +30,7 @@
 #define CHECK_OFFSET(name, field, offset) \
     check_offset(#name, #field, offsetof(name, field), offset);
 #ifdef __LP64__
-  CHECK_SIZE(pthread_internal_t, 768);
+  CHECK_SIZE(pthread_internal_t, 776);
   CHECK_OFFSET(pthread_internal_t, next, 0);
   CHECK_OFFSET(pthread_internal_t, prev, 8);
   CHECK_OFFSET(pthread_internal_t, tid, 16);
@@ -44,17 +44,17 @@
   CHECK_OFFSET(pthread_internal_t, alternate_signal_stack, 128);
   CHECK_OFFSET(pthread_internal_t, shadow_call_stack_guard_region, 136);
   CHECK_OFFSET(pthread_internal_t, stack_top, 144);
-  CHECK_OFFSET(pthread_internal_t, startup_handshake_lock, 152);
-  CHECK_OFFSET(pthread_internal_t, mmap_base, 160);
-  CHECK_OFFSET(pthread_internal_t, mmap_size, 168);
-  CHECK_OFFSET(pthread_internal_t, mmap_base_unguarded, 176);
-  CHECK_OFFSET(pthread_internal_t, mmap_size_unguarded, 184);
-  CHECK_OFFSET(pthread_internal_t, vma_name_buffer, 192);
-  CHECK_OFFSET(pthread_internal_t, thread_local_dtors, 224);
-  CHECK_OFFSET(pthread_internal_t, current_dlerror, 232);
-  CHECK_OFFSET(pthread_internal_t, dlerror_buffer, 240);
-  CHECK_OFFSET(pthread_internal_t, bionic_tls, 752);
-  CHECK_OFFSET(pthread_internal_t, errno_value, 760);
+  CHECK_OFFSET(pthread_internal_t, startup_handshake_lock, 156);
+  CHECK_OFFSET(pthread_internal_t, mmap_base, 168);
+  CHECK_OFFSET(pthread_internal_t, mmap_size, 176);
+  CHECK_OFFSET(pthread_internal_t, mmap_base_unguarded, 184);
+  CHECK_OFFSET(pthread_internal_t, mmap_size_unguarded, 192);
+  CHECK_OFFSET(pthread_internal_t, vma_name_buffer, 200);
+  CHECK_OFFSET(pthread_internal_t, thread_local_dtors, 232);
+  CHECK_OFFSET(pthread_internal_t, current_dlerror, 240);
+  CHECK_OFFSET(pthread_internal_t, dlerror_buffer, 248);
+  CHECK_OFFSET(pthread_internal_t, bionic_tls, 760);
+  CHECK_OFFSET(pthread_internal_t, errno_value, 768);
   CHECK_SIZE(bionic_tls, 12200);
   CHECK_OFFSET(bionic_tls, key_data, 0);
   CHECK_OFFSET(bionic_tls, locale, 2080);
@@ -71,7 +71,7 @@
   CHECK_OFFSET(bionic_tls, fdtrack_disabled, 12192);
   CHECK_OFFSET(bionic_tls, padding, 12193);
 #else
-  CHECK_SIZE(pthread_internal_t, 664);
+  CHECK_SIZE(pthread_internal_t, 668);
   CHECK_OFFSET(pthread_internal_t, next, 0);
   CHECK_OFFSET(pthread_internal_t, prev, 4);
   CHECK_OFFSET(pthread_internal_t, tid, 8);
@@ -85,17 +85,17 @@
   CHECK_OFFSET(pthread_internal_t, alternate_signal_stack, 68);
   CHECK_OFFSET(pthread_internal_t, shadow_call_stack_guard_region, 72);
   CHECK_OFFSET(pthread_internal_t, stack_top, 76);
-  CHECK_OFFSET(pthread_internal_t, startup_handshake_lock, 80);
-  CHECK_OFFSET(pthread_internal_t, mmap_base, 88);
-  CHECK_OFFSET(pthread_internal_t, mmap_size, 92);
-  CHECK_OFFSET(pthread_internal_t, mmap_base_unguarded, 96);
-  CHECK_OFFSET(pthread_internal_t, mmap_size_unguarded, 100);
-  CHECK_OFFSET(pthread_internal_t, vma_name_buffer, 104);
-  CHECK_OFFSET(pthread_internal_t, thread_local_dtors, 136);
-  CHECK_OFFSET(pthread_internal_t, current_dlerror, 140);
-  CHECK_OFFSET(pthread_internal_t, dlerror_buffer, 144);
-  CHECK_OFFSET(pthread_internal_t, bionic_tls, 656);
-  CHECK_OFFSET(pthread_internal_t, errno_value, 660);
+  CHECK_OFFSET(pthread_internal_t, startup_handshake_lock, 84);
+  CHECK_OFFSET(pthread_internal_t, mmap_base, 92);
+  CHECK_OFFSET(pthread_internal_t, mmap_size, 96);
+  CHECK_OFFSET(pthread_internal_t, mmap_base_unguarded, 100);
+  CHECK_OFFSET(pthread_internal_t, mmap_size_unguarded, 104);
+  CHECK_OFFSET(pthread_internal_t, vma_name_buffer, 108);
+  CHECK_OFFSET(pthread_internal_t, thread_local_dtors, 140);
+  CHECK_OFFSET(pthread_internal_t, current_dlerror, 144);
+  CHECK_OFFSET(pthread_internal_t, dlerror_buffer, 148);
+  CHECK_OFFSET(pthread_internal_t, bionic_tls, 660);
+  CHECK_OFFSET(pthread_internal_t, errno_value, 664);
   CHECK_SIZE(bionic_tls, 11080);
   CHECK_OFFSET(bionic_tls, key_data, 0);
   CHECK_OFFSET(bionic_tls, locale, 1040);