Do not hold hash table lock while backtracing.
There is a deadlock if holding the hash table lock while trying to do
a backtrace. Change the code so that the hash table lock is only held
while actually modifying either g_hash_table, or while modifying an
entry from g_hash_table.
Bug: 22423683
(cherry picked from commit 9fee99b06013787054a312449b94115038e2ad7c)
Change-Id: I72173bfe6f824ceaceea625c24e7851b87467135
diff --git a/libc/bionic/malloc_debug_leak.cpp b/libc/bionic/malloc_debug_leak.cpp
index 64f2112..6a46667 100644
--- a/libc/bionic/malloc_debug_leak.cpp
+++ b/libc/bionic/malloc_debug_leak.cpp
@@ -133,8 +133,9 @@
size |= SIZE_FLAG_ZYGOTE_CHILD;
}
+ // Keep the lock held for as little time as possible to prevent deadlocks.
+ ScopedPthreadMutexLocker locker(&g_hash_table->lock);
HashEntry* entry = find_entry(g_hash_table, slot, backtrace, numEntries, size);
-
if (entry != NULL) {
entry->allocations++;
} else {
@@ -302,8 +303,6 @@
void* base = g_malloc_dispatch->malloc(size);
if (base != NULL) {
- ScopedPthreadMutexLocker locker(&g_hash_table->lock);
-
uintptr_t backtrace[BACKTRACE_SIZE];
size_t numEntries = GET_BACKTRACE(backtrace, BACKTRACE_SIZE);
@@ -328,8 +327,6 @@
return;
}
- ScopedPthreadMutexLocker locker(&g_hash_table->lock);
-
// check the guard to make sure it is valid
AllocationEntry* header = to_header(mem);
@@ -342,6 +339,7 @@
}
}
+ ScopedPthreadMutexLocker locker(&g_hash_table->lock);
if (header->guard == GUARD || is_valid_entry(header->entry)) {
// decrement the allocations
HashEntry* entry = header->entry;