Merge "Fix deadlock/timeout in thread unwinding."
diff --git a/libc/malloc_debug/Android.bp b/libc/malloc_debug/Android.bp
index b04cfe1..760039b 100644
--- a/libc/malloc_debug/Android.bp
+++ b/libc/malloc_debug/Android.bp
@@ -173,6 +173,7 @@
 
     shared_libs: [
         "libbase",
+        "libbacktrace",
         "liblog",
         "libunwindstack",
     ],
diff --git a/libc/malloc_debug/malloc_debug.cpp b/libc/malloc_debug/malloc_debug.cpp
index 6eaac7d..609f030 100644
--- a/libc/malloc_debug/malloc_debug.cpp
+++ b/libc/malloc_debug/malloc_debug.cpp
@@ -30,11 +30,13 @@
 #include <inttypes.h>
 #include <malloc.h>
 #include <pthread.h>
+#include <signal.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sys/cdefs.h>
 #include <sys/param.h>
+#include <sys/syscall.h>
 #include <unistd.h>
 
 #include <mutex>
@@ -44,8 +46,9 @@
 #include <android-base/properties.h>
 #include <android-base/stringprintf.h>
 #include <bionic/malloc_tagged_pointers.h>
-#include <private/bionic_malloc_dispatch.h>
+#include <platform/bionic/reserved_signals.h>
 #include <private/MallocXmlElem.h>
+#include <private/bionic_malloc_dispatch.h>
 
 #include "Config.h"
 #include "DebugData.h"
@@ -131,6 +134,40 @@
 };
 pthread_rwlock_t ScopedConcurrentLock::lock_;
 
+// Use this because the sigprocmask* functions filter out the reserved bionic
+// signals including the signal this code blocks.
+static inline int __rt_sigprocmask(int how, const sigset64_t* new_set, sigset64_t* old_set,
+                                   size_t sigset_size) {
+  return syscall(SYS_rt_sigprocmask, how, new_set, old_set, sigset_size);
+}
+
+// Need to block the backtrace signal while in malloc debug routines
+// otherwise there is a chance of a deadlock and timeout when unwinding.
+// This can occur if a thread is paused while owning a malloc debug
+// internal lock.
+class ScopedBacktraceSignalBlocker {
+ public:
+  ScopedBacktraceSignalBlocker() {
+    sigemptyset64(&backtrace_set_);
+    sigaddset64(&backtrace_set_, BIONIC_SIGNAL_BACKTRACE);
+    sigset64_t old_set;
+    __rt_sigprocmask(SIG_BLOCK, &backtrace_set_, &old_set, sizeof(backtrace_set_));
+    if (sigismember64(&old_set, BIONIC_SIGNAL_BACKTRACE)) {
+      unblock_ = false;
+    }
+  }
+
+  ~ScopedBacktraceSignalBlocker() {
+    if (unblock_) {
+      __rt_sigprocmask(SIG_UNBLOCK, &backtrace_set_, nullptr, sizeof(backtrace_set_));
+    }
+  }
+
+ private:
+  bool unblock_ = true;
+  sigset64_t backtrace_set_;
+};
+
 static void InitAtfork() {
   static pthread_once_t atfork_init = PTHREAD_ONCE_INIT;
   pthread_once(&atfork_init, []() {
@@ -334,8 +371,8 @@
 void debug_get_malloc_leak_info(uint8_t** info, size_t* overall_size, size_t* info_size,
                                 size_t* total_memory, size_t* backtrace_size) {
   ScopedConcurrentLock lock;
-
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   // Verify the arguments.
   if (info == nullptr || overall_size == nullptr || info_size == nullptr || total_memory == nullptr ||
@@ -370,6 +407,7 @@
   }
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   if (!VerifyPointer(pointer, "malloc_usable_size")) {
     return 0;
@@ -434,6 +472,7 @@
   }
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   void* pointer = InternalMalloc(size);
 
@@ -510,6 +549,7 @@
   }
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   if (g_debug->config().options() & RECORD_ALLOCS) {
     g_debug->record->AddEntry(new FreeEntry(pointer));
@@ -528,6 +568,7 @@
   }
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   if (bytes == 0) {
     bytes = 1;
@@ -607,6 +648,7 @@
   }
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   if (pointer == nullptr) {
     pointer = InternalMalloc(bytes);
@@ -726,6 +768,7 @@
   }
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   size_t size;
   if (__builtin_mul_overflow(nmemb, bytes, &size)) {
@@ -792,6 +835,7 @@
 
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   // Avoid any issues where allocations are made that will be freed
   // in the fclose.
@@ -880,6 +924,7 @@
   }
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   if (!(g_debug->config().options() & BACKTRACE)) {
     return 0;
@@ -938,6 +983,7 @@
 
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   std::lock_guard<std::mutex> guard(g_dump_lock);
 
@@ -953,6 +999,7 @@
 void debug_dump_heap(const char* file_name) {
   ScopedConcurrentLock lock;
   ScopedDisableDebugCalls disable;
+  ScopedBacktraceSignalBlocker blocked;
 
   std::lock_guard<std::mutex> guard(g_dump_lock);
 
diff --git a/libc/malloc_debug/tests/malloc_debug_system_tests.cpp b/libc/malloc_debug/tests/malloc_debug_system_tests.cpp
index ac19cf9..1bfe61e 100644
--- a/libc/malloc_debug/tests/malloc_debug_system_tests.cpp
+++ b/libc/malloc_debug/tests/malloc_debug_system_tests.cpp
@@ -42,12 +42,20 @@
 #include <gtest/gtest.h>
 #include <log/log_read.h>
 
+#include <atomic>
 #include <string>
 #include <thread>
 #include <vector>
 
+#include <backtrace/Backtrace.h>
+#include <backtrace/BacktraceMap.h>
+
 #include <bionic/malloc.h>
 
+// All DISABLED_ tests are designed to be executed after malloc debug
+// is enabled. These tests don't run be default, and are executed
+// by wrappers that will enable various malloc debug features.
+
 static constexpr time_t kTimeoutSeconds = 10;
 
 extern "C" bool GetInitialArgs(const char*** args, size_t* num_args) {
@@ -525,3 +533,42 @@
   ASSERT_NO_FATAL_FAILURE(FindStrings(pid, std::vector<const char*>{"malloc debug enabled"},
                           std::vector<const char*>{" HAS INVALID TAG ", "USED AFTER FREE ", "UNKNOWN POINTER "}));
 }
+
+TEST(MallocTests, DISABLED_malloc_and_backtrace_deadlock) {
+  std::atomic_bool running(false);
+  pid_t tid;
+  std::thread thread([&tid, &running] {
+    tid = gettid();
+    running = true;
+    while (running) {
+      void* ptr = malloc(200);
+      if (ptr == nullptr) {
+        return;
+      }
+      free(ptr);
+    }
+  });
+
+  while (!running) {
+  }
+
+  static constexpr size_t kNumUnwinds = 1000;
+  for (size_t i = 0; i < kNumUnwinds; i++) {
+    std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), tid));
+    ASSERT_TRUE(backtrace->Unwind(0)) << "Failed on unwind " << i;
+    ASSERT_LT(1, backtrace->NumFrames()) << "Failed on unwind " << i;
+  }
+  running = false;
+  thread.join();
+}
+
+TEST(MallocDebugSystemTest, malloc_and_backtrace_deadlock) {
+  pid_t pid;
+  ASSERT_NO_FATAL_FAILURE(Exec("MallocTests.DISABLED_malloc_and_backtrace_deadlock",
+                               "verbose verify_pointers", &pid, 0, 180));
+
+  // Make sure that malloc debug is enabled and that no timeouts occur during
+  // unwinds.
+  ASSERT_NO_FATAL_FAILURE(FindStrings(pid, std::vector<const char*>{"malloc debug enabled"},
+                                      std::vector<const char*>{"Timed out waiting for "}));
+}
diff --git a/libc/platform/bionic/reserved_signals.h b/libc/platform/bionic/reserved_signals.h
index c90fc06..e8e517e 100644
--- a/libc/platform/bionic/reserved_signals.h
+++ b/libc/platform/bionic/reserved_signals.h
@@ -48,6 +48,7 @@
 // in <android/legacy_signal_inlines.h> to match.
 
 #define BIONIC_SIGNAL_POSIX_TIMERS (__SIGRTMIN + 0)
+#define BIONIC_SIGNAL_BACKTRACE (__SIGRTMIN + 1)
 #define BIONIC_SIGNAL_DEBUGGER (__SIGRTMIN + 3)
 #define BIONIC_SIGNAL_PROFILER (__SIGRTMIN + 4)
 #define BIONIC_SIGNAL_ART_PROFILER (__SIGRTMIN + 6)