[MTE] allocate ring buffer for stack history
Test: atest memtag_stack_dlopen_test
Bug: 309446520
Change-Id: Ibf477bcfb832c5eba0244e86cdac5517f054eb49
diff --git a/libc/bionic/heap_tagging.cpp b/libc/bionic/heap_tagging.cpp
index c8a025f..de9b460 100644
--- a/libc/bionic/heap_tagging.cpp
+++ b/libc/bionic/heap_tagging.cpp
@@ -52,6 +52,8 @@
heap_tagging_level = __libc_shared_globals()->initial_heap_tagging_level;
#endif
+ __libc_memtag_stack_abi = __libc_shared_globals()->initial_memtag_stack_abi;
+
__libc_globals.mutate([](libc_globals* globals) {
switch (heap_tagging_level) {
case M_HEAP_TAGGING_LEVEL_TBI:
diff --git a/libc/bionic/libc_init_common.cpp b/libc/bionic/libc_init_common.cpp
index c82c52e..939e4e1 100644
--- a/libc/bionic/libc_init_common.cpp
+++ b/libc/bionic/libc_init_common.cpp
@@ -58,6 +58,7 @@
__LIBC_HIDDEN__ constinit WriteProtected<libc_globals> __libc_globals;
__LIBC_HIDDEN__ constinit _Atomic(bool) __libc_memtag_stack;
+__LIBC_HIDDEN__ constinit bool __libc_memtag_stack_abi;
// Not public, but well-known in the BSDs.
__BIONIC_WEAK_VARIABLE_FOR_NATIVE_BRIDGE
diff --git a/libc/bionic/libc_init_static.cpp b/libc/bionic/libc_init_static.cpp
index 3da0a92..ac97376 100644
--- a/libc/bionic/libc_init_static.cpp
+++ b/libc/bionic/libc_init_static.cpp
@@ -289,11 +289,7 @@
// We can't short-circuit the environment override, as `stack` is still inherited from the
// binary's settings.
- if (get_environment_memtag_setting(&level)) {
- if (level == M_HEAP_TAGGING_LEVEL_NONE || level == M_HEAP_TAGGING_LEVEL_TBI) {
- *stack = false;
- }
- }
+ get_environment_memtag_setting(&level);
return level;
}
@@ -329,13 +325,14 @@
bool memtag_stack = false;
HeapTaggingLevel level =
__get_tagging_level(memtag_dynamic_entries, phdr_start, phdr_ct, load_bias, &memtag_stack);
- // This is used by the linker (in linker.cpp) to communicate than any library linked by this
- // executable enables memtag-stack.
- if (__libc_shared_globals()->initial_memtag_stack) {
- if (!memtag_stack) {
- async_safe_format_log(ANDROID_LOG_INFO, "libc", "enabling PROT_MTE as requested by linker");
- }
+ // initial_memtag_stack is used by the linker (in linker.cpp) to communicate than any library
+ // linked by this executable enables memtag-stack.
+ // memtag_stack is also set for static executables if they request memtag stack via the note,
+ // in which case it will differ from initial_memtag_stack.
+ if (__libc_shared_globals()->initial_memtag_stack || memtag_stack) {
memtag_stack = true;
+ __libc_shared_globals()->initial_memtag_stack_abi = true;
+ __get_bionic_tcb()->tls_slot(TLS_SLOT_STACK_MTE) = __allocate_stack_mte_ringbuffer(0, nullptr);
}
if (int64_t timed_upgrade = __get_memtag_upgrade_secs()) {
if (level == M_HEAP_TAGGING_LEVEL_ASYNC) {
diff --git a/libc/bionic/pthread_create.cpp b/libc/bionic/pthread_create.cpp
index 5bd4f16..a8d09eb 100644
--- a/libc/bionic/pthread_create.cpp
+++ b/libc/bionic/pthread_create.cpp
@@ -65,6 +65,7 @@
}
void __init_bionic_tls_ptrs(bionic_tcb* tcb, bionic_tls* tls) {
+ tcb->thread()->bionic_tcb = tcb;
tcb->thread()->bionic_tls = tls;
tcb->tls_slot(TLS_SLOT_BIONIC_TLS) = tls;
}
@@ -443,6 +444,14 @@
ScopedReadLock locker(&g_thread_creation_lock);
+// This has to be done under g_thread_creation_lock or g_thread_list_lock to avoid racing with
+// __pthread_internal_remap_stack_with_mte.
+#ifdef __aarch64__
+ if (__libc_memtag_stack_abi) {
+ tcb->tls_slot(TLS_SLOT_STACK_MTE) = __allocate_stack_mte_ringbuffer(0, thread);
+ }
+#endif
+
sigset64_t block_all_mask;
sigfillset64(&block_all_mask);
__rt_sigprocmask(SIG_SETMASK, &block_all_mask, &thread->start_mask, sizeof(thread->start_mask));
diff --git a/libc/bionic/pthread_internal.cpp b/libc/bionic/pthread_internal.cpp
index 2342aff..3c8f9d5 100644
--- a/libc/bionic/pthread_internal.cpp
+++ b/libc/bionic/pthread_internal.cpp
@@ -33,10 +33,12 @@
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
+#include <sys/prctl.h>
#include <async_safe/log.h>
#include <bionic/reserved_signals.h>
+#include "bionic/tls_defines.h"
#include "private/ErrnoRestorer.h"
#include "private/ScopedRWLock.h"
#include "private/bionic_futex.h"
@@ -71,8 +73,21 @@
g_thread_list = thread->next;
}
}
+// N.B. that this is NOT the pagesize, but 4096. This is hardcoded in the codegen.
+// See
+// https://github.com/search?q=repo%3Allvm/llvm-project%20AArch64StackTagging%3A%3AinsertBaseTaggedPointer&type=code
+constexpr size_t kStackMteRingbufferSizeMultiplier = 4096;
static void __pthread_internal_free(pthread_internal_t* thread) {
+#ifdef __aarch64__
+ if (void* stack_mte_tls = thread->bionic_tcb->tls_slot(TLS_SLOT_STACK_MTE)) {
+ size_t size =
+ kStackMteRingbufferSizeMultiplier * (reinterpret_cast<uintptr_t>(stack_mte_tls) >> 56ULL);
+ void* ptr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(stack_mte_tls) &
+ ((1ULL << 56ULL) - 1ULL));
+ munmap(ptr, size);
+ }
+#endif
if (thread->mmap_size != 0) {
// Free mapped space, including thread stack and pthread_internal_t.
munmap(thread->mmap_base, thread->mmap_size);
@@ -176,12 +191,70 @@
async_safe_fatal("stack not found in /proc/self/maps");
}
+__LIBC_HIDDEN__ void* __allocate_stack_mte_ringbuffer(size_t n, pthread_internal_t* thread) {
+ if (n > 7) async_safe_fatal("error: invalid mte stack ring buffer size");
+ // Allocation needs to be aligned to 2*size to make the fancy code-gen work.
+ // So we allocate 3*size - pagesz bytes, which will always contain size bytes
+ // aligned to 2*size, and unmap the unneeded part.
+ // See
+ // https://github.com/search?q=repo%3Allvm/llvm-project%20AArch64StackTagging%3A%3AinsertBaseTaggedPointer&type=code
+ //
+ // In the worst case, we get an allocation that is one page past the properly
+ // aligned address, in which case we have to unmap the previous
+ // 2*size - pagesz bytes. In that case, we still have size properly aligned
+ // bytes left.
+ size_t size = (1 << n) * kStackMteRingbufferSizeMultiplier;
+ size_t pgsize = page_size();
+
+ size_t alloc_size = __BIONIC_ALIGN(3 * size - pgsize, pgsize);
+ void* allocation_ptr =
+ mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (allocation_ptr == MAP_FAILED)
+ async_safe_fatal("error: failed to allocate stack mte ring buffer");
+ uintptr_t allocation = reinterpret_cast<uintptr_t>(allocation_ptr);
+
+ size_t alignment = 2 * size;
+ uintptr_t aligned_allocation = __BIONIC_ALIGN(allocation, alignment);
+ if (allocation != aligned_allocation) {
+ munmap(reinterpret_cast<void*>(allocation), aligned_allocation - allocation);
+ }
+ if (aligned_allocation + size != allocation + alloc_size) {
+ munmap(reinterpret_cast<void*>(aligned_allocation + size),
+ (allocation + alloc_size) - (aligned_allocation + size));
+ }
+
+ const char* name;
+ if (thread == nullptr) {
+ name = "stack_mte_ring:main";
+ } else {
+ // The kernel doesn't copy the name string, but this variable will last at least as long as the
+ // mapped area. We unmap the ring buffer before unmapping the rest of the thread storage.
+ auto& name_buffer = thread->stack_mte_ringbuffer_vma_name_buffer;
+ static_assert(arraysize(name_buffer) >= arraysize("stack_mte_ring:") + 11 + 1);
+ async_safe_format_buffer(name_buffer, arraysize(name_buffer), "stack_mte_ring:%d", thread->tid);
+ name = name_buffer;
+ }
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<void*>(aligned_allocation), size, name);
+
+ // We store the size in the top byte of the pointer (which is ignored)
+ return reinterpret_cast<void*>(aligned_allocation | ((1ULL << n) << 56ULL));
+}
+
void __pthread_internal_remap_stack_with_mte() {
#if defined(__aarch64__)
- // If process doesn't have MTE enabled, we don't need to do anything.
+ ScopedWriteLock creation_locker(&g_thread_creation_lock);
+ ScopedReadLock list_locker(&g_thread_list_lock);
+ // If process already uses memtag-stack ABI, we don't need to do anything.
+ if (__libc_memtag_stack_abi) return;
+ __libc_memtag_stack_abi = true;
+
+ for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
+ if (t->terminating) continue;
+ t->bionic_tcb->tls_slot(TLS_SLOT_STACK_MTE) =
+ __allocate_stack_mte_ringbuffer(0, t->is_main() ? nullptr : t);
+ }
if (!atomic_load(&__libc_globals->memtag)) return;
- bool prev = atomic_exchange(&__libc_memtag_stack, true);
- if (prev) return;
+ if (atomic_exchange(&__libc_memtag_stack, true)) return;
uintptr_t lo, hi;
__find_main_stack_limits(&lo, &hi);
@@ -189,8 +262,6 @@
PROT_READ | PROT_WRITE | PROT_MTE | PROT_GROWSDOWN)) {
async_safe_fatal("error: failed to set PROT_MTE on main thread");
}
- ScopedWriteLock creation_locker(&g_thread_creation_lock);
- ScopedReadLock list_locker(&g_thread_list_lock);
for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
if (t->terminating || t->is_main()) continue;
if (mprotect(t->mmap_base_unguarded, t->mmap_size_unguarded,
diff --git a/libc/bionic/pthread_internal.h b/libc/bionic/pthread_internal.h
index 091f711..b8412d1 100644
--- a/libc/bionic/pthread_internal.h
+++ b/libc/bionic/pthread_internal.h
@@ -178,6 +178,10 @@
bionic_tls* bionic_tls;
int errno_value;
+
+ bionic_tcb* bionic_tcb;
+ char stack_mte_ringbuffer_vma_name_buffer[32];
+
bool is_main() { return start_routine == nullptr; }
};
@@ -209,6 +213,7 @@
__LIBC_HIDDEN__ void __pthread_internal_remove(pthread_internal_t* thread);
__LIBC_HIDDEN__ void __pthread_internal_remove_and_free(pthread_internal_t* thread);
__LIBC_HIDDEN__ void __find_main_stack_limits(uintptr_t* low, uintptr_t* high);
+__LIBC_HIDDEN__ void* __allocate_stack_mte_ringbuffer(size_t n, pthread_internal_t* thread);
static inline __always_inline bionic_tcb* __get_bionic_tcb() {
return reinterpret_cast<bionic_tcb*>(&__get_tls()[MIN_TLS_SLOT]);