Add a platform API for setting an allocation limit.

Introduce an M_SET_ALLOCATION_LIMIT enumerator for android_mallopt(),
which can be used to set an upper bound on the total size of all
allocations made using the memory allocation APIs.

This is useful for programs such as audioextractor and mediaserver
which need to set such a limit as a security mitigation. Currently
these programs are using setrlimit(RLIMIT_AS) which isn't exactly
what these programs want to control. RLIMIT_AS is also problematic
under sanitizers which allocate large amounts of address space as
shadow memory, and is especially problematic under shadow call stack,
which requires 16MB of address space per thread.

Add new unit tests for bionic.

Add new unit tests for malloc debug that verify that when the limit
is enabled, malloc debug still functions for nearly every allocation
function.

Bug: 118642754
Test: Ran bionic-unit-tests/bionic-unit-tests-static.
Test: Ran malloc debug tests and perfetto integration tests.
Change-Id: I735403c4d2c87f00fb2cdef81d00af0af446b2bb
diff --git a/libc/bionic/malloc_heapprofd.cpp b/libc/bionic/malloc_heapprofd.cpp
index 9cab67a..eda54ce 100644
--- a/libc/bionic/malloc_heapprofd.cpp
+++ b/libc/bionic/malloc_heapprofd.cpp
@@ -32,6 +32,7 @@
 
 #include <dlfcn.h>
 #include <fcntl.h>
+#include <signal.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
@@ -118,10 +119,27 @@
     return;
   }
 
-  if (!atomic_exchange(&gHeapprofdInitInProgress, true)) {
-    __libc_globals.mutate([](libc_globals* globals) {
-      atomic_store(&globals->current_dispatch_table, &__heapprofd_init_dispatch);
-    });
+  // Checking this variable is only necessary when this could conflict with
+  // the change to enable the allocation limit. All other places will
+  // not ever have a conflict modifying the globals.
+  if (!atomic_exchange(&gGlobalsMutating, true)) {
+    if (!atomic_exchange(&gHeapprofdInitInProgress, true)) {
+      __libc_globals.mutate([](libc_globals* globals) {
+        atomic_store(&globals->default_dispatch_table, &__heapprofd_init_dispatch);
+        auto dispatch_table = GetDispatchTable();
+        if (dispatch_table == nullptr || dispatch_table == &globals->malloc_dispatch_table) {
+          atomic_store(&globals->current_dispatch_table, &__heapprofd_init_dispatch);
+        }
+      });
+    }
+    atomic_store(&gGlobalsMutating, false);
+  } else {
+    // The only way you can get to this point is if the signal has been
+    // blocked by a call to HeapprofdMaskSignal. The raise below will
+    // do nothing until a call to HeapprofdUnmaskSignal, which will cause
+    // the signal to be resent. Using this avoids the need for a busy loop
+    // waiting for gGlobalsMutating to change back to false.
+    raise(kHeapprofdSignal);
   }
 }
 
@@ -212,6 +230,24 @@
   sigaction(kHeapprofdSignal, &action, nullptr);
 }
 
+extern "C" int __rt_sigprocmask(int, const sigset64_t*, sigset64_t*, size_t);
+
+void HeapprofdMaskSignal() {
+  sigset64_t mask_set;
+  // Need to use this function instead because sigprocmask64 filters
+  // out this signal.
+  __rt_sigprocmask(SIG_SETMASK, nullptr, &mask_set, sizeof(mask_set));
+  sigaddset64(&mask_set, kHeapprofdSignal);
+  __rt_sigprocmask(SIG_SETMASK, &mask_set, nullptr, sizeof(mask_set));
+}
+
+void HeapprofdUnmaskSignal() {
+  sigset64_t mask_set;
+  __rt_sigprocmask(SIG_SETMASK, nullptr, &mask_set, sizeof(mask_set));
+  sigdelset64(&mask_set, kHeapprofdSignal);
+  __rt_sigprocmask(SIG_SETMASK, &mask_set, nullptr, sizeof(mask_set));
+}
+
 static void DisplayError(int) {
   error_log("Cannot install heapprofd while malloc debug/malloc hooks are enabled.");
 }
@@ -251,9 +287,11 @@
 }
 
 static void* InitHeapprofd(void*) {
+  pthread_mutex_lock(&gGlobalsMutateLock);
   __libc_globals.mutate([](libc_globals* globals) {
     CommonInstallHooks(globals);
   });
+  pthread_mutex_unlock(&gGlobalsMutateLock);
 
   // Allow to install hook again to re-initialize heap profiling after the
   // current session finished.
@@ -263,9 +301,15 @@
 
 extern "C" void* MallocInitHeapprofdHook(size_t bytes) {
   if (!atomic_exchange(&gHeapprofdInitHookInstalled, true)) {
+    pthread_mutex_lock(&gGlobalsMutateLock);
     __libc_globals.mutate([](libc_globals* globals) {
-      atomic_store(&globals->current_dispatch_table, nullptr);
+      auto old_dispatch = GetDefaultDispatchTable();
+      atomic_store(&globals->default_dispatch_table, nullptr);
+      if (GetDispatchTable() == old_dispatch) {
+        atomic_store(&globals->current_dispatch_table, nullptr);
+      }
     });
+    pthread_mutex_unlock(&gGlobalsMutateLock);
 
     pthread_t thread_id;
     if (pthread_create(&thread_id, nullptr, InitHeapprofd, nullptr) != 0) {
@@ -295,9 +339,15 @@
 
 static bool DispatchReset() {
   if (!atomic_exchange(&gHeapprofdInitInProgress, true)) {
+    pthread_mutex_lock(&gGlobalsMutateLock);
     __libc_globals.mutate([](libc_globals* globals) {
-      atomic_store(&globals->current_dispatch_table, nullptr);
+      auto old_dispatch = GetDefaultDispatchTable();
+      atomic_store(&globals->default_dispatch_table, nullptr);
+      if (GetDispatchTable() == old_dispatch) {
+        atomic_store(&globals->current_dispatch_table, nullptr);
+      }
     });
+    pthread_mutex_unlock(&gGlobalsMutateLock);
     atomic_store(&gHeapprofdInitInProgress, false);
     return true;
   }