Merge "Cleans up logging and comments in ifaddrs."
diff --git a/libc/bionic/android_unsafe_frame_pointer_chase.cpp b/libc/bionic/android_unsafe_frame_pointer_chase.cpp
index 0fb086e..e25867b 100644
--- a/libc/bionic/android_unsafe_frame_pointer_chase.cpp
+++ b/libc/bionic/android_unsafe_frame_pointer_chase.cpp
@@ -57,6 +57,12 @@
auto begin = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
uintptr_t end = __get_thread()->stack_top;
+
+ stack_t ss;
+ if (sigaltstack(nullptr, &ss) == 0 && (ss.ss_flags & SS_ONSTACK)) {
+ end = reinterpret_cast<uintptr_t>(ss.ss_sp) + ss.ss_size;
+ }
+
size_t num_frames = 0;
while (1) {
auto* frame = reinterpret_cast<frame_record*>(begin);
diff --git a/libc/bionic/malloc_heapprofd.cpp b/libc/bionic/malloc_heapprofd.cpp
index 198d2f0..b2a9e3e 100644
--- a/libc/bionic/malloc_heapprofd.cpp
+++ b/libc/bionic/malloc_heapprofd.cpp
@@ -173,26 +173,46 @@
// not ever have a conflict modifying the globals.
if (!atomic_exchange(&gGlobalsMutating, true)) {
if (!atomic_exchange(&gHeapprofdInitInProgress, true)) {
- // If the backing dispatch is GWP-ASan, we should use GWP-ASan as the
- // intermediate dispatch table during initialisation. It may be possible
- // at this point in time that heapprofd is *already* the default dispatch,
- // and as such we don't want to use heapprofd as the backing store
- // (otherwise infinite recursion occurs).
- gPreviousDefaultDispatchTable = nullptr;
const MallocDispatch* default_dispatch = GetDefaultDispatchTable();
- if (DispatchIsGwpAsan(default_dispatch)) {
+
+ // Below, we initialize heapprofd lazily by redirecting libc's malloc() to
+ // call MallocInitHeapprofdHook, which spawns off a thread and initializes
+ // heapprofd. During the short period between now and when heapprofd is
+ // initialized, allocations may need to be serviced. There are three
+ // possible configurations:
+
+ if (default_dispatch == nullptr) {
+ // 1. No malloc hooking has been done (heapprofd, GWP-ASan, etc.). In
+ // this case, everything but malloc() should come from the system
+ // allocator.
+ gPreviousDefaultDispatchTable = nullptr;
+ gEphemeralDispatch = *NativeAllocatorDispatch();
+ } else if (DispatchIsGwpAsan(default_dispatch)) {
+ // 2. GWP-ASan was installed. We should use GWP-ASan for everything but
+ // malloc() in the interim period before heapprofd is properly
+ // installed. After heapprofd is finished installing, we will use
+ // GWP-ASan as heapprofd's backing allocator to allow heapprofd and
+ // GWP-ASan to coexist.
gPreviousDefaultDispatchTable = default_dispatch;
+ gEphemeralDispatch = *default_dispatch;
+ } else {
+ // 3. It may be possible at this point in time that heapprofd is
+ // *already* the default dispatch, and as such we don't want to use
+ // heapprofd as the backing store for itself (otherwise infinite
+ // recursion occurs). We will use the system allocator functions. Note:
+ // We've checked that no other malloc interceptors are being used by
+ // validating `gHeapprofdIncompatibleHooks` above, so we don't need to
+ // worry about that case here.
+ gPreviousDefaultDispatchTable = nullptr;
+ gEphemeralDispatch = *NativeAllocatorDispatch();
}
- __libc_globals.mutate([](libc_globals* globals) {
- // Wholesale copy the malloc dispatch table here. If the current/default
- // dispatch table is pointing to the malloc_dispatch_table, we can't
- // modify it as it may be racy. This dispatch table copy is ephemeral,
- // and the dispatch tables will be resolved back to the global
- // malloc_dispatch_table after initialization finishes.
- gEphemeralDispatch = globals->malloc_dispatch_table;
- gEphemeralDispatch.malloc = MallocInitHeapprofdHook;
+ // Now, replace the malloc function so that the next call to malloc() will
+ // initialize heapprofd.
+ gEphemeralDispatch.malloc = MallocInitHeapprofdHook;
+ // And finally, install these new malloc-family interceptors.
+ __libc_globals.mutate([](libc_globals* globals) {
atomic_store(&globals->default_dispatch_table, &gEphemeralDispatch);
if (!MallocLimitInstalled()) {
atomic_store(&globals->current_dispatch_table, &gEphemeralDispatch);
diff --git a/tests/Android.bp b/tests/Android.bp
index d1afd51..b840f36 100644
--- a/tests/Android.bp
+++ b/tests/Android.bp
@@ -39,10 +39,14 @@
"-D__STDC_LIMIT_MACROS",
],
header_libs: ["bionic_libc_platform_headers"],
- // Make the bionic tests implicitly test bionic's shadow call stack support.
+ // Ensure that the tests exercise shadow call stack support and
+ // the hint space PAC/BTI instructions.
arch: {
arm64: {
- cflags: ["-fsanitize=shadow-call-stack"],
+ cflags: [
+ "-fsanitize=shadow-call-stack",
+ "-mbranch-protection=standard",
+ ],
},
},
stl: "libc++",
diff --git a/tests/android_unsafe_frame_pointer_chase_test.cpp b/tests/android_unsafe_frame_pointer_chase_test.cpp
index dd04c33..7fa50e1 100644
--- a/tests/android_unsafe_frame_pointer_chase_test.cpp
+++ b/tests/android_unsafe_frame_pointer_chase_test.cpp
@@ -18,6 +18,8 @@
#if defined(__BIONIC__)
+#include <sys/mman.h>
+
#include "platform/bionic/android_unsafe_frame_pointer_chase.h"
// Prevent tail calls inside recurse.
@@ -72,21 +74,25 @@
EXPECT_TRUE(CheckFrames(frames, size));
}
-static void *BacktraceThread(void *) {
+static const char* tester_func() {
size_t size = recurse(kNumFrames, 0, 0);
uintptr_t frames[kNumFrames + 2];
size_t size2 = recurse(kNumFrames, frames, kNumFrames + 2);
if (size2 != size) {
- return (void*)"size2 != size";
+ return "size2 != size";
}
if (!CheckFrames(frames, size)) {
- return (void*)"CheckFrames failed";
+ return "CheckFrames failed";
}
return nullptr;
}
+static void* BacktraceThread(void*) {
+ return (void*)tester_func();
+}
+
TEST(android_unsafe_frame_pointer_chase, pthread) {
pthread_t t;
ASSERT_EQ(0, pthread_create(&t, nullptr, BacktraceThread, nullptr));
@@ -95,4 +101,58 @@
EXPECT_EQ(nullptr, reinterpret_cast<char*>(retval));
}
+static bool g_handler_called;
+static const char* g_handler_tester_result;
+
+static void BacktraceHandler(int) {
+ g_handler_called = true;
+ g_handler_tester_result = tester_func();
+}
+
+static constexpr size_t kStackSize = 16384;
+
+static void* SignalBacktraceThread(void* sp) {
+ stack_t ss;
+ ss.ss_sp = sp;
+ ss.ss_flags = 0;
+ ss.ss_size = kStackSize;
+ sigaltstack(&ss, nullptr);
+
+ struct sigaction s = {};
+ s.sa_handler = BacktraceHandler;
+ s.sa_flags = SA_ONSTACK;
+ sigaction(SIGRTMIN, &s, nullptr);
+
+ raise(SIGRTMIN);
+ return nullptr;
+}
+
+TEST(android_unsafe_frame_pointer_chase, sigaltstack) {
+ // Create threads where the alternate stack appears both after and before the regular stack, and
+ // call android_unsafe_frame_pointer_chase from a signal handler. Without handling for the
+ // alternate signal stack, this would cause false negatives or potential false positives in the
+ // android_unsafe_frame_pointer_chase function.
+ void* stacks =
+ mmap(nullptr, kStackSize * 2, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
+
+ for (unsigned i = 0; i != 2; ++i) {
+ pthread_t t;
+ pthread_attr_t attr;
+ ASSERT_EQ(0, pthread_attr_init(&attr));
+ ASSERT_EQ(0, pthread_attr_setstack(&attr, reinterpret_cast<char*>(stacks) + kStackSize * i,
+ kStackSize));
+
+ ASSERT_EQ(0, pthread_create(&t, &attr, SignalBacktraceThread,
+ reinterpret_cast<char*>(stacks) + kStackSize * (1 - i)));
+ void* retval;
+ ASSERT_EQ(0, pthread_join(t, &retval));
+
+ EXPECT_TRUE(g_handler_called);
+ EXPECT_EQ(nullptr, g_handler_tester_result);
+ g_handler_called = false;
+ }
+
+ munmap(stacks, kStackSize * 2);
+}
+
#endif // __BIONIC__