Merge "Add a platform API for setting an allocation limit."
diff --git a/libc/Android.bp b/libc/Android.bp
index 068df98..a27b1ce 100644
--- a/libc/Android.bp
+++ b/libc/Android.bp
@@ -1310,6 +1310,7 @@
     defaults: ["libc_defaults"],
     srcs: libc_common_src_files + [
         "bionic/malloc_common.cpp",
+        "bionic/malloc_limit.cpp",
     ],
     multilib: {
         lib32: {
@@ -1505,6 +1506,7 @@
         "bionic/malloc_common.cpp",
         "bionic/malloc_common_dynamic.cpp",
         "bionic/malloc_heapprofd.cpp",
+        "bionic/malloc_limit.cpp",
         "bionic/NetdClient.cpp",
         "arch-common/bionic/crtend_so.S",
     ],
@@ -1515,6 +1517,7 @@
     srcs: [
         "bionic/dl_iterate_phdr_static.cpp",
         "bionic/malloc_common.cpp",
+        "bionic/malloc_limit.cpp",
     ],
 }
 
diff --git a/libc/bionic/malloc_common.cpp b/libc/bionic/malloc_common.cpp
index bb3aade..f817281 100644
--- a/libc/bionic/malloc_common.cpp
+++ b/libc/bionic/malloc_common.cpp
@@ -46,8 +46,10 @@
 #include <stdio.h>
 
 #include <private/bionic_config.h>
+#include <private/bionic_malloc.h>
 
 #include "malloc_common.h"
+#include "malloc_limit.h"
 
 // =============================================================================
 // Global variables instantations.
@@ -278,8 +280,10 @@
 // Platform-internal mallopt variant.
 // =============================================================================
 #if defined(LIBC_STATIC)
-extern "C" bool android_mallopt(int, void*, size_t) {
-  // There are no options supported on static executables.
+extern "C" bool android_mallopt(int opcode, void* arg, size_t arg_size) {
+  if (opcode == M_SET_ALLOCATION_LIMIT_BYTES) {
+    return LimitEnable(arg, arg_size);
+  }
   errno = ENOTSUP;
   return false;
 }
diff --git a/libc/bionic/malloc_common.h b/libc/bionic/malloc_common.h
index 7bfa33a..a40501d 100644
--- a/libc/bionic/malloc_common.h
+++ b/libc/bionic/malloc_common.h
@@ -66,6 +66,10 @@
   return atomic_load_explicit(&__libc_globals->current_dispatch_table, memory_order_acquire);
 }
 
+static inline const MallocDispatch* GetDefaultDispatchTable() {
+  return atomic_load_explicit(&__libc_globals->default_dispatch_table, memory_order_acquire);
+}
+
 // =============================================================================
 // Log functions
 // =============================================================================
diff --git a/libc/bionic/malloc_common_dynamic.cpp b/libc/bionic/malloc_common_dynamic.cpp
index 9656718..40f497e 100644
--- a/libc/bionic/malloc_common_dynamic.cpp
+++ b/libc/bionic/malloc_common_dynamic.cpp
@@ -57,12 +57,22 @@
 #include <private/bionic_config.h>
 #include <private/bionic_defs.h>
 #include <private/bionic_malloc_dispatch.h>
+#include <private/bionic_malloc.h>
 
 #include <sys/system_properties.h>
 
 #include "malloc_common.h"
 #include "malloc_common_dynamic.h"
 #include "malloc_heapprofd.h"
+#include "malloc_limit.h"
+
+// =============================================================================
+// Global variables instantations.
+// =============================================================================
+pthread_mutex_t gGlobalsMutateLock = PTHREAD_MUTEX_INITIALIZER;
+
+_Atomic bool gGlobalsMutating = false;
+// =============================================================================
 
 static constexpr MallocDispatch __libc_malloc_default_dispatch
   __attribute__((unused)) = {
@@ -292,7 +302,10 @@
 
   // Do a pointer swap so that all of the functions become valid at once to
   // avoid any initialization order problems.
-  atomic_store(&globals->current_dispatch_table, &globals->malloc_dispatch_table);
+  atomic_store(&globals->default_dispatch_table, &globals->malloc_dispatch_table);
+  if (GetDispatchTable() == nullptr) {
+    atomic_store(&globals->current_dispatch_table, &globals->malloc_dispatch_table);
+  }
 
   info_log("%s: malloc %s enabled", getprogname(), prefix);
 
@@ -431,6 +444,9 @@
 // Platform-internal mallopt variant.
 // =============================================================================
 extern "C" bool android_mallopt(int opcode, void* arg, size_t arg_size) {
+  if (opcode == M_SET_ALLOCATION_LIMIT_BYTES) {
+    return LimitEnable(arg, arg_size);
+  }
   return HeapprofdMallopt(opcode, arg, arg_size);
 }
 // =============================================================================
diff --git a/libc/bionic/malloc_common_dynamic.h b/libc/bionic/malloc_common_dynamic.h
index 8794ed0..755af8f 100644
--- a/libc/bionic/malloc_common_dynamic.h
+++ b/libc/bionic/malloc_common_dynamic.h
@@ -28,7 +28,8 @@
 
 #pragma once
 
-#include <stdbool.h>
+#include <pthread.h>
+#include <stdatomic.h>
 
 #include <private/bionic_globals.h>
 #include <private/bionic_malloc_dispatch.h>
@@ -40,3 +41,7 @@
 void* LoadSharedLibrary(const char* shared_lib, const char* prefix, MallocDispatch* dispatch_table);
 
 bool FinishInstallHooks(libc_globals* globals, const char* options, const char* prefix);
+
+// Lock for globals, to guarantee that only one thread is doing a mutate.
+extern pthread_mutex_t gGlobalsMutateLock;
+extern _Atomic bool gGlobalsMutating;
diff --git a/libc/bionic/malloc_heapprofd.cpp b/libc/bionic/malloc_heapprofd.cpp
index 9cab67a..eda54ce 100644
--- a/libc/bionic/malloc_heapprofd.cpp
+++ b/libc/bionic/malloc_heapprofd.cpp
@@ -32,6 +32,7 @@
 
 #include <dlfcn.h>
 #include <fcntl.h>
+#include <signal.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
@@ -118,10 +119,27 @@
     return;
   }
 
-  if (!atomic_exchange(&gHeapprofdInitInProgress, true)) {
-    __libc_globals.mutate([](libc_globals* globals) {
-      atomic_store(&globals->current_dispatch_table, &__heapprofd_init_dispatch);
-    });
+  // Checking this variable is only necessary when this could conflict with
+  // the change to enable the allocation limit. All other places will
+  // not ever have a conflict modifying the globals.
+  if (!atomic_exchange(&gGlobalsMutating, true)) {
+    if (!atomic_exchange(&gHeapprofdInitInProgress, true)) {
+      __libc_globals.mutate([](libc_globals* globals) {
+        atomic_store(&globals->default_dispatch_table, &__heapprofd_init_dispatch);
+        auto dispatch_table = GetDispatchTable();
+        if (dispatch_table == nullptr || dispatch_table == &globals->malloc_dispatch_table) {
+          atomic_store(&globals->current_dispatch_table, &__heapprofd_init_dispatch);
+        }
+      });
+    }
+    atomic_store(&gGlobalsMutating, false);
+  } else {
+    // The only way you can get to this point is if the signal has been
+    // blocked by a call to HeapprofdMaskSignal. The raise below will
+    // do nothing until a call to HeapprofdUnmaskSignal, which will cause
+    // the signal to be resent. Using this avoids the need for a busy loop
+    // waiting for gGlobalsMutating to change back to false.
+    raise(kHeapprofdSignal);
   }
 }
 
@@ -212,6 +230,24 @@
   sigaction(kHeapprofdSignal, &action, nullptr);
 }
 
+extern "C" int __rt_sigprocmask(int, const sigset64_t*, sigset64_t*, size_t);
+
+void HeapprofdMaskSignal() {
+  sigset64_t mask_set;
+  // Need to use this function instead because sigprocmask64 filters
+  // out this signal.
+  __rt_sigprocmask(SIG_SETMASK, nullptr, &mask_set, sizeof(mask_set));
+  sigaddset64(&mask_set, kHeapprofdSignal);
+  __rt_sigprocmask(SIG_SETMASK, &mask_set, nullptr, sizeof(mask_set));
+}
+
+void HeapprofdUnmaskSignal() {
+  sigset64_t mask_set;
+  __rt_sigprocmask(SIG_SETMASK, nullptr, &mask_set, sizeof(mask_set));
+  sigdelset64(&mask_set, kHeapprofdSignal);
+  __rt_sigprocmask(SIG_SETMASK, &mask_set, nullptr, sizeof(mask_set));
+}
+
 static void DisplayError(int) {
   error_log("Cannot install heapprofd while malloc debug/malloc hooks are enabled.");
 }
@@ -251,9 +287,11 @@
 }
 
 static void* InitHeapprofd(void*) {
+  pthread_mutex_lock(&gGlobalsMutateLock);
   __libc_globals.mutate([](libc_globals* globals) {
     CommonInstallHooks(globals);
   });
+  pthread_mutex_unlock(&gGlobalsMutateLock);
 
   // Allow to install hook again to re-initialize heap profiling after the
   // current session finished.
@@ -263,9 +301,15 @@
 
 extern "C" void* MallocInitHeapprofdHook(size_t bytes) {
   if (!atomic_exchange(&gHeapprofdInitHookInstalled, true)) {
+    pthread_mutex_lock(&gGlobalsMutateLock);
     __libc_globals.mutate([](libc_globals* globals) {
-      atomic_store(&globals->current_dispatch_table, nullptr);
+      auto old_dispatch = GetDefaultDispatchTable();
+      atomic_store(&globals->default_dispatch_table, nullptr);
+      if (GetDispatchTable() == old_dispatch) {
+        atomic_store(&globals->current_dispatch_table, nullptr);
+      }
     });
+    pthread_mutex_unlock(&gGlobalsMutateLock);
 
     pthread_t thread_id;
     if (pthread_create(&thread_id, nullptr, InitHeapprofd, nullptr) != 0) {
@@ -295,9 +339,15 @@
 
 static bool DispatchReset() {
   if (!atomic_exchange(&gHeapprofdInitInProgress, true)) {
+    pthread_mutex_lock(&gGlobalsMutateLock);
     __libc_globals.mutate([](libc_globals* globals) {
-      atomic_store(&globals->current_dispatch_table, nullptr);
+      auto old_dispatch = GetDefaultDispatchTable();
+      atomic_store(&globals->default_dispatch_table, nullptr);
+      if (GetDispatchTable() == old_dispatch) {
+        atomic_store(&globals->current_dispatch_table, nullptr);
+      }
     });
+    pthread_mutex_unlock(&gGlobalsMutateLock);
     atomic_store(&gHeapprofdInitInProgress, false);
     return true;
   }
diff --git a/libc/bionic/malloc_heapprofd.h b/libc/bionic/malloc_heapprofd.h
index 5a766fc..9e846b6 100644
--- a/libc/bionic/malloc_heapprofd.h
+++ b/libc/bionic/malloc_heapprofd.h
@@ -40,4 +40,8 @@
 
 void HeapprofdInstallErrorSignalHandler();
 
+void HeapprofdMaskSignal();
+
+void HeapprofdUnmaskSignal();
+
 bool HeapprofdMallopt(int optcode, void* arg, size_t arg_size);
diff --git a/libc/bionic/malloc_limit.cpp b/libc/bionic/malloc_limit.cpp
new file mode 100644
index 0000000..ca20e00
--- /dev/null
+++ b/libc/bionic/malloc_limit.cpp
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdatomic.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <private/bionic_malloc_dispatch.h>
+
+#include "malloc_common.h"
+#include "malloc_common_dynamic.h"
+#include "malloc_heapprofd.h"
+#include "malloc_limit.h"
+
+__BEGIN_DECLS
+static void* LimitCalloc(size_t n_elements, size_t elem_size);
+static void LimitFree(void* mem);
+static void* LimitMalloc(size_t bytes);
+static void* LimitMemalign(size_t alignment, size_t bytes);
+static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size);
+static void* LimitRealloc(void* old_mem, size_t bytes);
+static void* LimitAlignedAlloc(size_t alignment, size_t size);
+#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
+static void* LimitPvalloc(size_t bytes);
+static void* LimitValloc(size_t bytes);
+#endif
+
+// Pass through functions.
+static size_t LimitUsableSize(const void* mem);
+static struct mallinfo LimitMallinfo();
+static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg);
+static void LimitMallocDisable();
+static void LimitMallocEnable();
+static int LimitMallocInfo(int options, FILE* fp);
+static int LimitMallopt(int param, int value);
+__END_DECLS
+
+static constexpr MallocDispatch __limit_dispatch
+  __attribute__((unused)) = {
+    LimitCalloc,
+    LimitFree,
+    LimitMallinfo,
+    LimitMalloc,
+    LimitUsableSize,
+    LimitMemalign,
+    LimitPosixMemalign,
+#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
+    LimitPvalloc,
+#endif
+    LimitRealloc,
+#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
+    LimitValloc,
+#endif
+    LimitIterate,
+    LimitMallocDisable,
+    LimitMallocEnable,
+    LimitMallopt,
+    LimitAlignedAlloc,
+    LimitMallocInfo,
+  };
+
+static _Atomic uint64_t gAllocated;
+static uint64_t gAllocLimit;
+
+static inline bool CheckLimit(size_t bytes) {
+  uint64_t total;
+  if (__predict_false(__builtin_add_overflow(
+                          atomic_load_explicit(&gAllocated, memory_order_relaxed), bytes, &total) ||
+                      total > gAllocLimit)) {
+    return false;
+  }
+  return true;
+}
+
+static inline void* IncrementLimit(void* mem) {
+  if (__predict_false(mem == nullptr)) {
+    return nullptr;
+  }
+  atomic_fetch_add(&gAllocated, LimitUsableSize(mem));
+  return mem;
+}
+
+void* LimitCalloc(size_t n_elements, size_t elem_size) {
+  size_t total;
+  if (__builtin_add_overflow(n_elements, elem_size, &total) || !CheckLimit(total)) {
+    warning_log("malloc_limit: calloc(%zu, %zu) exceeds limit %" PRId64, n_elements, elem_size,
+                gAllocLimit);
+    return nullptr;
+  }
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return IncrementLimit(dispatch_table->calloc(n_elements, elem_size));
+  }
+  return IncrementLimit(Malloc(calloc)(n_elements, elem_size));
+}
+
+void LimitFree(void* mem) {
+  atomic_fetch_sub(&gAllocated, LimitUsableSize(mem));
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return dispatch_table->free(mem);
+  }
+  return Malloc(free)(mem);
+}
+
+void* LimitMalloc(size_t bytes) {
+  if (!CheckLimit(bytes)) {
+    warning_log("malloc_limit: malloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
+    return nullptr;
+  }
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return IncrementLimit(dispatch_table->malloc(bytes));
+  }
+  return IncrementLimit(Malloc(malloc)(bytes));
+}
+
+static void* LimitMemalign(size_t alignment, size_t bytes) {
+  if (!CheckLimit(bytes)) {
+    warning_log("malloc_limit: memalign(%zu, %zu) exceeds limit %" PRId64, alignment, bytes,
+                gAllocLimit);
+    return nullptr;
+  }
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return IncrementLimit(dispatch_table->memalign(alignment, bytes));
+  }
+  return IncrementLimit(Malloc(memalign)(alignment, bytes));
+}
+
+static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size) {
+  if (!CheckLimit(size)) {
+    warning_log("malloc_limit: posix_memalign(%zu, %zu) exceeds limit %" PRId64, alignment, size,
+                gAllocLimit);
+    return ENOMEM;
+  }
+  int retval;
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    retval = dispatch_table->posix_memalign(memptr, alignment, size);
+  } else {
+    retval = Malloc(posix_memalign)(memptr, alignment, size);
+  }
+  if (__predict_false(retval != 0)) {
+    return retval;
+  }
+  IncrementLimit(*memptr);
+  return 0;
+}
+
+static void* LimitAlignedAlloc(size_t alignment, size_t size) {
+  if (!CheckLimit(size)) {
+    warning_log("malloc_limit: aligned_alloc(%zu, %zu) exceeds limit %" PRId64, alignment, size,
+                gAllocLimit);
+    return nullptr;
+  }
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return IncrementLimit(dispatch_table->aligned_alloc(alignment, size));
+  }
+  return IncrementLimit(Malloc(aligned_alloc)(alignment, size));
+}
+
+static void* LimitRealloc(void* old_mem, size_t bytes) {
+  size_t old_usable_size = LimitUsableSize(old_mem);
+  void* new_ptr;
+  // Need to check the size only if the allocation will increase in size.
+  if (bytes > old_usable_size && !CheckLimit(bytes - old_usable_size)) {
+    warning_log("malloc_limit: realloc(%p, %zu) exceeds limit %" PRId64, old_mem, bytes,
+                gAllocLimit);
+    // Free the old pointer.
+    LimitFree(old_mem);
+    return nullptr;
+  }
+
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    new_ptr = dispatch_table->realloc(old_mem, bytes);
+  } else {
+    new_ptr = Malloc(realloc)(old_mem, bytes);
+  }
+
+  if (__predict_false(new_ptr == nullptr)) {
+    // This acts as if the pointer was freed.
+    atomic_fetch_sub(&gAllocated, old_usable_size);
+    return nullptr;
+  }
+
+  size_t new_usable_size = LimitUsableSize(new_ptr);
+  // Assumes that most allocations increase in size, rather than shrink.
+  if (__predict_false(old_usable_size > new_usable_size)) {
+    atomic_fetch_sub(&gAllocated, old_usable_size - new_usable_size);
+  } else {
+    atomic_fetch_add(&gAllocated, new_usable_size - old_usable_size);
+  }
+  return new_ptr;
+}
+
+#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
+static void* LimitPvalloc(size_t bytes) {
+  if (!CheckLimit(bytes)) {
+    warning_log("malloc_limit: pvalloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
+    return nullptr;
+  }
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return IncrementLimit(dispatch_table->pvalloc(bytes));
+  }
+  return IncrementLimit(Malloc(pvalloc)(bytes));
+}
+
+static void* LimitValloc(size_t bytes) {
+  if (!CheckLimit(bytes)) {
+    warning_log("malloc_limit: valloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
+    return nullptr;
+  }
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return IncrementLimit(dispatch_table->valloc(bytes));
+  }
+  return IncrementLimit(Malloc(valloc)(bytes));
+}
+#endif
+
+#if defined(LIBC_STATIC)
+static bool EnableLimitDispatchTable() {
+  // This is the only valid way to modify the dispatch tables for a
+  // static executable so no locks are necessary.
+  __libc_globals.mutate([](libc_globals* globals) {
+    atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
+  });
+  return true;
+}
+#else
+static bool EnableLimitDispatchTable() {
+  HeapprofdMaskSignal();
+  pthread_mutex_lock(&gGlobalsMutateLock);
+  // All other code that calls mutate will grab the gGlobalsMutateLock.
+  // However, there is one case where the lock cannot be acquired, in the
+  // signal handler that enables heapprofd. In order to avoid having two
+  // threads calling mutate at the same time, use an atomic variable to
+  // verify that only this function or the signal handler are calling mutate.
+  // If this function is called at the same time as the signal handler is
+  // being called, allow up to five ms for the signal handler to complete
+  // before failing.
+  bool enabled = false;
+  size_t max_tries = 5;
+  while (true) {
+    if (!atomic_exchange(&gGlobalsMutating, true)) {
+      __libc_globals.mutate([](libc_globals* globals) {
+        atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
+      });
+      atomic_store(&gGlobalsMutating, false);
+      enabled = true;
+      break;
+    }
+    if (--max_tries == 0) {
+      break;
+    }
+    usleep(1000);
+  }
+  pthread_mutex_unlock(&gGlobalsMutateLock);
+  HeapprofdUnmaskSignal();
+  if (enabled) {
+    info_log("malloc_limit: Allocation limit enabled, max size %" PRId64 " bytes\n", gAllocLimit);
+  } else {
+    error_log("malloc_limit: Failed to enable allocation limit.");
+  }
+  return enabled;
+}
+#endif
+
+bool LimitEnable(void* arg, size_t arg_size) {
+  if (arg == nullptr || arg_size != sizeof(size_t)) {
+    errno = EINVAL;
+    return false;
+  }
+
+  static _Atomic bool limit_enabled;
+  if (atomic_exchange(&limit_enabled, true)) {
+    // The limit can only be enabled once.
+    error_log("malloc_limit: The allocation limit has already been set, it can only be set once.");
+    return false;
+  }
+
+  gAllocLimit = *reinterpret_cast<size_t*>(arg);
+#if __has_feature(hwaddress_sanitizer)
+  size_t current_allocated = __sanitizer_get_current_allocated_bytes();
+#else
+  size_t current_allocated;
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    current_allocated = dispatch_table->mallinfo().uordblks;
+  } else {
+    current_allocated = Malloc(mallinfo)().uordblks;
+  }
+#endif
+  atomic_store(&gAllocated, current_allocated);
+
+  return EnableLimitDispatchTable();
+}
+
+static size_t LimitUsableSize(const void* mem) {
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return dispatch_table->malloc_usable_size(mem);
+  }
+  return Malloc(malloc_usable_size)(mem);
+}
+
+static struct mallinfo LimitMallinfo() {
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return dispatch_table->mallinfo();
+  }
+  return Malloc(mallinfo)();
+}
+
+static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg) {
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return dispatch_table->iterate(base, size, callback, arg);
+  }
+  return Malloc(iterate)(base, size, callback, arg);
+}
+
+static void LimitMallocDisable() {
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    dispatch_table->malloc_disable();
+  } else {
+    Malloc(malloc_disable)();
+  }
+}
+
+static void LimitMallocEnable() {
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    dispatch_table->malloc_enable();
+  } else {
+    Malloc(malloc_enable)();
+  }
+}
+
+static int LimitMallocInfo(int options, FILE* fp) {
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return dispatch_table->malloc_info(options, fp);
+  }
+  return Malloc(malloc_info)(options, fp);
+}
+
+static int LimitMallopt(int param, int value) {
+  auto dispatch_table = GetDefaultDispatchTable();
+  if (__predict_false(dispatch_table != nullptr)) {
+    return dispatch_table->mallopt(param, value);
+  }
+  return Malloc(mallopt)(param, value);
+}
diff --git a/libc/bionic/malloc_limit.h b/libc/bionic/malloc_limit.h
new file mode 100644
index 0000000..282598f
--- /dev/null
+++ b/libc/bionic/malloc_limit.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+// Function prototypes.
+bool LimitEnable(void* arg, size_t arg_size);
diff --git a/libc/libc.map.txt b/libc/libc.map.txt
index 7d30c26..e094967 100644
--- a/libc/libc.map.txt
+++ b/libc/libc.map.txt
@@ -1481,7 +1481,7 @@
     # Used by libandroid_net
     android_getaddrinfofornet; # apex
 
-    # Used by libandroid_runtime
+    # Used by libandroid_runtime and libmedia
     android_mallopt; # apex
     gMallocLeakZygoteChild; # apex
 } LIBC_P;
diff --git a/libc/malloc_debug/Android.bp b/libc/malloc_debug/Android.bp
index f808d0c..bcbd7da 100644
--- a/libc/malloc_debug/Android.bp
+++ b/libc/malloc_debug/Android.bp
@@ -145,6 +145,8 @@
 cc_test {
     name: "malloc_debug_system_tests",
 
+    include_dirs: ["bionic/libc"],
+
     srcs: [
         "tests/malloc_debug_system_tests.cpp",
     ],
diff --git a/libc/malloc_debug/tests/malloc_debug_system_tests.cpp b/libc/malloc_debug/tests/malloc_debug_system_tests.cpp
index ccefb25..4fcd04c 100644
--- a/libc/malloc_debug/tests/malloc_debug_system_tests.cpp
+++ b/libc/malloc_debug/tests/malloc_debug_system_tests.cpp
@@ -37,13 +37,15 @@
 #include <time.h>
 #include <unistd.h>
 
+#include <android-base/stringprintf.h>
 #include <gtest/gtest.h>
-
 #include <log/log.h>
 
 #include <string>
 #include <vector>
 
+#include "private/bionic_malloc.h"
+
 static constexpr time_t kTimeoutSeconds = 5;
 
 static void Exec(const char* test_name, const char* debug_options, pid_t* pid) {
@@ -60,13 +62,15 @@
     ASSERT_NE(0, dup2(fds[1], STDERR_FILENO));
 
     std::vector<const char*> args;
-    args.push_back(testing::internal::GetArgvs()[0].c_str());
+    // Get a copy of this argument so it doesn't disappear on us.
+    std::string exec(testing::internal::GetArgvs()[0]);
+    args.push_back(exec.c_str());
     args.push_back("--gtest_also_run_disabled_tests");
     std::string filter_arg = std::string("--gtest_filter=") + test_name;
     args.push_back(filter_arg.c_str());
     args.push_back(nullptr);
     execv(args[0], reinterpret_cast<char* const*>(const_cast<char**>(args.data())));
-    exit(1);
+    exit(20);
   }
   ASSERT_NE(-1, *pid);
   close(fds[1]);
@@ -196,16 +200,217 @@
   ASSERT_NO_FATAL_FAILURE(FindStrings(pid, std::vector<const char*>{"malloc debug enabled"}));
 }
 
-TEST(MallocTests, DISABLED_leak_memory) {
+static void SetAllocationLimit() {
+  // Set to a large value, this is only to enable the limit code and
+  // verify that malloc debug is still called properly.
+  size_t limit = 500 * 1024 * 1024;
+  ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
+}
+
+static void AlignedAlloc() {
+  void* ptr = aligned_alloc(64, 1152);
+  ASSERT_TRUE(ptr != nullptr);
+  memset(ptr, 0, 1152);
+}
+
+TEST(MallocTests, DISABLED_leak_memory_aligned_alloc) {
+  AlignedAlloc();
+}
+
+TEST(MallocTests, DISABLED_leak_memory_limit_aligned_alloc) {
+  SetAllocationLimit();
+  AlignedAlloc();
+}
+
+static void Calloc() {
+  void* ptr = calloc(1, 1123);
+  ASSERT_TRUE(ptr != nullptr);
+  memset(ptr, 1, 1123);
+}
+
+TEST(MallocTests, DISABLED_leak_memory_calloc) {
+  Calloc();
+}
+
+TEST(MallocTests, DISABLED_leak_memory_limit_calloc) {
+  SetAllocationLimit();
+  Calloc();
+}
+
+static void Malloc() {
   void* ptr = malloc(1123);
   ASSERT_TRUE(ptr != nullptr);
   memset(ptr, 0, 1123);
 }
 
-TEST(MallocDebugSystemTest, verify_leak) {
-  pid_t pid;
-  ASSERT_NO_FATAL_FAILURE(Exec("MallocTests.DISABLED_leak_memory", "backtrace leak_track", &pid));
+TEST(MallocTests, DISABLED_leak_memory_malloc) {
+  Malloc();
+}
 
-  ASSERT_NO_FATAL_FAILURE(FindStrings(
-      pid, std::vector<const char*>{"malloc debug enabled", "leaked block of size 1123 at"}));
+TEST(MallocTests, DISABLED_leak_memory_limit_malloc) {
+  SetAllocationLimit();
+  Malloc();
+}
+
+static void Memalign() {
+  void* ptr = memalign(64, 1123);
+  ASSERT_TRUE(ptr != nullptr);
+  memset(ptr, 0, 1123);
+}
+
+TEST(MallocTests, DISABLED_leak_memory_memalign) {
+  Memalign();
+}
+
+TEST(MallocTests, DISABLED_leak_memory_limit_memalign) {
+  SetAllocationLimit();
+  Memalign();
+}
+
+static void PosixMemalign() {
+  void* ptr;
+  ASSERT_EQ(0, posix_memalign(&ptr, 64, 1123));
+  ASSERT_TRUE(ptr != nullptr);
+  memset(ptr, 0, 1123);
+}
+
+TEST(MallocTests, DISABLED_leak_memory_posix_memalign) {
+  PosixMemalign();
+}
+
+TEST(MallocTests, DISABLED_leak_memory_limit_posix_memalign) {
+  SetAllocationLimit();
+  PosixMemalign();
+}
+
+static void Reallocarray() {
+  void* ptr = reallocarray(nullptr, 1, 1123);
+  ASSERT_TRUE(ptr != nullptr);
+  memset(ptr, 0, 1123);
+}
+
+TEST(MallocTests, DISABLED_leak_memory_reallocarray) {
+  Reallocarray();
+}
+
+TEST(MallocTests, DISABLED_leak_memory_limit_reallocarray) {
+  SetAllocationLimit();
+  Reallocarray();
+}
+
+static void Realloc() {
+  void* ptr = realloc(nullptr, 1123);
+  ASSERT_TRUE(ptr != nullptr);
+  memset(ptr, 0, 1123);
+}
+
+TEST(MallocTests, DISABLED_leak_memory_realloc) {
+  Realloc();
+}
+
+TEST(MallocTests, DISABLED_leak_memory_limit_realloc) {
+  SetAllocationLimit();
+  Realloc();
+}
+
+#if !defined(__LP64__)
+extern "C" void* pvalloc(size_t);
+
+static void Pvalloc() {
+  void* ptr = pvalloc(1123);
+  ASSERT_TRUE(ptr != nullptr);
+  memset(ptr, 0, 1123);
+}
+
+TEST(MallocTests, DISABLED_leak_memory_pvalloc) {
+  Pvalloc();
+}
+
+TEST(MallocTests, DISABLED_leak_memory_limit_pvalloc) {
+  SetAllocationLimit();
+  Pvalloc();
+}
+
+extern "C" void* valloc(size_t);
+
+static void Valloc() {
+  void* ptr = valloc(1123);
+  ASSERT_TRUE(ptr != nullptr);
+  memset(ptr, 0, 1123);
+}
+
+TEST(MallocTests, DISABLED_leak_memory_valloc) {
+  Valloc();
+}
+
+TEST(MallocTests, DISABLED_leak_memory_limit_valloc) {
+  SetAllocationLimit();
+  Valloc();
+}
+#endif
+
+static void VerifyLeak(const char* test_prefix) {
+  struct FunctionInfo {
+    const char* name;
+    size_t size;
+  };
+  static FunctionInfo functions[] = {
+    {
+      "aligned_alloc",
+      1152,
+    },
+    {
+      "calloc",
+      1123,
+    },
+    {
+      "malloc",
+      1123,
+    },
+    {
+      "memalign",
+      1123,
+    },
+    {
+      "posix_memalign",
+      1123,
+    },
+    {
+      "reallocarray",
+      1123,
+    },
+    {
+      "realloc",
+      1123,
+    },
+#if !defined(__LP64__)
+    {
+      "pvalloc",
+      4096,
+    },
+    {
+      "valloc",
+      1123,
+    }
+#endif
+  };
+
+  for (size_t i = 0; i < sizeof(functions) / sizeof(FunctionInfo); i++) {
+    pid_t pid;
+    SCOPED_TRACE(testing::Message() << functions[i].name << " expected size " << functions[i].size);
+    std::string test = std::string("MallocTests.DISABLED_") + test_prefix + functions[i].name;
+    EXPECT_NO_FATAL_FAILURE(Exec(test.c_str(), "backtrace leak_track", &pid));
+
+    std::string expected_leak = android::base::StringPrintf("leaked block of size %zu at", functions[i].size);
+    EXPECT_NO_FATAL_FAILURE(FindStrings(
+        pid, std::vector<const char*>{"malloc debug enabled", expected_leak.c_str()}));
+  }
+}
+
+TEST(MallocDebugSystemTest, verify_leak) {
+  VerifyLeak("leak_memory_");
+}
+
+TEST(MallocDebugSystemTest, verify_leak_allocation_limit) {
+  VerifyLeak("leak_memory_limit_");
 }
diff --git a/libc/private/bionic_globals.h b/libc/private/bionic_globals.h
index 447b3b9..d73079e 100644
--- a/libc/private/bionic_globals.h
+++ b/libc/private/bionic_globals.h
@@ -55,6 +55,9 @@
   // The malloc_dispatch_table is modified by malloc debug, malloc hooks,
   // and heaprofd. Only one of these modes can be active at any given time.
   _Atomic(const MallocDispatch*) current_dispatch_table;
+  // This pointer is only used by the allocation limit code when both a
+  // limit is enabled and some other hook is enabled at the same time.
+  _Atomic(const MallocDispatch*) default_dispatch_table;
   MallocDispatch malloc_dispatch_table;
 };
 
diff --git a/libc/private/bionic_malloc.h b/libc/private/bionic_malloc.h
index 5f4a75d..a1744aa 100644
--- a/libc/private/bionic_malloc.h
+++ b/libc/private/bionic_malloc.h
@@ -39,6 +39,12 @@
 #define M_INIT_ZYGOTE_CHILD_PROFILING M_INIT_ZYGOTE_CHILD_PROFILING
   M_RESET_HOOKS = 2,
 #define M_RESET_HOOKS M_RESET_HOOKS
+  // Set an upper bound on the total size in bytes of all allocations made
+  // using the memory allocation APIs.
+  //   arg = size_t*
+  //   arg_size = sizeof(size_t)
+  M_SET_ALLOCATION_LIMIT_BYTES = 3,
+#define M_SET_ALLOCATION_LIMIT_BYTES M_SET_ALLOCATION_LIMIT_BYTES
 };
 
 // Manipulates bionic-specific handling of memory allocation APIs such as
diff --git a/tests/malloc_test.cpp b/tests/malloc_test.cpp
index bc6a37b..9380680 100644
--- a/tests/malloc_test.cpp
+++ b/tests/malloc_test.cpp
@@ -18,12 +18,17 @@
 
 #include <elf.h>
 #include <limits.h>
+#include <pthread.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
 #include <malloc.h>
 #include <unistd.h>
 
+#include <atomic>
 #include <tinyxml2.h>
 
 #include <android-base/file.h>
@@ -676,3 +681,239 @@
   GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
 #endif
 }
+
+#if defined(__BIONIC__)
+template <typename FuncType>
+void CheckAllocationFunction(FuncType func) {
+  // Assumes that no more than 108MB of memory is allocated before this.
+  size_t limit = 128 * 1024 * 1024;
+  ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
+  if (!func(20 * 1024 * 1024))
+    exit(1);
+  if (func(128 * 1024 * 1024))
+    exit(1);
+  exit(0);
+}
+#endif
+
+TEST(android_mallopt, set_allocation_limit) {
+#if defined(__BIONIC__)
+  EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
+              testing::ExitedWithCode(0), "");
+  EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
+              testing::ExitedWithCode(0), "");
+  EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
+              testing::ExitedWithCode(0), "");
+  EXPECT_EXIT(CheckAllocationFunction(
+                  [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
+              testing::ExitedWithCode(0), "");
+  EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
+                void* ptr;
+                return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
+              }),
+              testing::ExitedWithCode(0), "");
+  EXPECT_EXIT(CheckAllocationFunction(
+                  [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
+              testing::ExitedWithCode(0), "");
+  EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
+                void* p = malloc(1024 * 1024);
+                return realloc(p, bytes) != nullptr;
+              }),
+              testing::ExitedWithCode(0), "");
+#if !defined(__LP64__)
+  EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
+              testing::ExitedWithCode(0), "");
+  EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
+              testing::ExitedWithCode(0), "");
+#endif
+#else
+  GTEST_LOG_(INFO) << "This tests a bionic extension.\n";
+#endif
+}
+
+TEST(android_mallopt, set_allocation_limit_multiple) {
+#if defined(__BIONIC__)
+  // Only the first set should work.
+  size_t limit = 256 * 1024 * 1024;
+  ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
+  limit = 32 * 1024 * 1024;
+  ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
+#else
+  GTEST_LOG_(INFO) << "This tests a bionic extension.\n";
+#endif
+}
+
+#if defined(__BIONIC__)
+static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
+
+static size_t GetMaxAllocations() {
+  size_t max_pointers = 0;
+  void* ptrs[20];
+  for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
+    ptrs[i] = malloc(kAllocationSize);
+    if (ptrs[i] == nullptr) {
+      max_pointers = i;
+      break;
+    }
+  }
+  for (size_t i = 0; i < max_pointers; i++) {
+    free(ptrs[i]);
+  }
+  return max_pointers;
+}
+
+static void VerifyMaxPointers(size_t max_pointers) {
+  // Now verify that we can allocate the same number as before.
+  void* ptrs[20];
+  for (size_t i = 0; i < max_pointers; i++) {
+    ptrs[i] = malloc(kAllocationSize);
+    ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
+  }
+
+  // Make sure the next allocation still fails.
+  ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
+  for (size_t i = 0; i < max_pointers; i++) {
+    free(ptrs[i]);
+  }
+}
+#endif
+
+TEST(android_mallopt, set_allocation_limit_realloc_increase) {
+#if defined(__BIONIC__)
+  size_t limit = 128 * 1024 * 1024;
+  ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
+
+  size_t max_pointers = GetMaxAllocations();
+  ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
+
+  void* memory = malloc(10 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+
+  // Increase size.
+  memory = realloc(memory, 20 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+  memory = realloc(memory, 40 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+  memory = realloc(memory, 60 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+  memory = realloc(memory, 80 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+  // Now push past limit.
+  memory = realloc(memory, 130 * 1024 * 1024);
+  ASSERT_TRUE(memory == nullptr);
+
+  VerifyMaxPointers(max_pointers);
+#else
+  GTEST_LOG_(INFO) << "This tests a bionic extension.\n";
+#endif
+}
+
+TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
+#if defined(__BIONIC__)
+  size_t limit = 100 * 1024 * 1024;
+  ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
+
+  size_t max_pointers = GetMaxAllocations();
+  ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
+
+  void* memory = malloc(80 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+
+  // Decrease size.
+  memory = realloc(memory, 60 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+  memory = realloc(memory, 40 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+  memory = realloc(memory, 20 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+  memory = realloc(memory, 10 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+  free(memory);
+
+  VerifyMaxPointers(max_pointers);
+#else
+  GTEST_LOG_(INFO) << "This tests a bionic extension.\n";
+#endif
+}
+
+TEST(android_mallopt, set_allocation_limit_realloc_free) {
+#if defined(__BIONIC__)
+  size_t limit = 100 * 1024 * 1024;
+  ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
+
+  size_t max_pointers = GetMaxAllocations();
+  ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
+
+  void* memory = malloc(60 * 1024 * 1024);
+  ASSERT_TRUE(memory != nullptr);
+
+  memory = realloc(memory, 0);
+  ASSERT_TRUE(memory == nullptr);
+
+  VerifyMaxPointers(max_pointers);
+#else
+  GTEST_LOG_(INFO) << "This tests a bionic extension.\n";
+#endif
+}
+
+#if defined(__BIONIC__)
+static void* SetAllocationLimit(void* data) {
+  std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
+  while (!go->load()) {
+  }
+  size_t limit = 500 * 1024 * 1024;
+  if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
+    return reinterpret_cast<void*>(-1);
+  }
+  return nullptr;
+}
+
+static void SetAllocationLimitMultipleThreads() {
+  std::atomic_bool go;
+  go = false;
+
+  static constexpr size_t kNumThreads = 4;
+  pthread_t threads[kNumThreads];
+  for (size_t i = 0; i < kNumThreads; i++) {
+    ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
+  }
+
+  // Let them go all at once.
+  go = true;
+  ASSERT_EQ(0, kill(getpid(), __SIGRTMIN + 4));
+
+  size_t num_successful = 0;
+  for (size_t i = 0; i < kNumThreads; i++) {
+    void* result;
+    ASSERT_EQ(0, pthread_join(threads[i], &result));
+    if (result != nullptr) {
+      num_successful++;
+    }
+  }
+  ASSERT_EQ(1U, num_successful);
+  exit(0);
+}
+#endif
+
+TEST(android_mallopt, set_allocation_limit_multiple_threads) {
+#if defined(__BIONIC__)
+  if (IsDynamic()) {
+    ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
+  }
+
+  // Run this a number of times as a stress test.
+  for (size_t i = 0; i < 100; i++) {
+    // Not using ASSERT_EXIT because errors messages are not displayed.
+    pid_t pid;
+    if ((pid = fork()) == 0) {
+      ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
+    }
+    ASSERT_NE(-1, pid);
+    int status;
+    ASSERT_EQ(pid, wait(&status));
+    ASSERT_EQ(0, WEXITSTATUS(status));
+  }
+#else
+  GTEST_LOG_(INFO) << "This tests a bionic extension.\n";
+#endif
+}