Introduce a new heap tagging level, M_HEAP_TAGGING_LEVEL_SYNC.

The SYNC tagging level enables stack trace collection for allocations and
deallocations, which allows allocation and deallocation stack traces to
appear in tombstones when encountering a tag check fault in synchronous tag
checking mode.

Bug: 135772972
Change-Id: Ibda9f51b29d2c8e2c993fc74425dea7bfa23ab1e
diff --git a/libc/bionic/heap_tagging.cpp b/libc/bionic/heap_tagging.cpp
index e5e8ec3..c3aa823 100644
--- a/libc/bionic/heap_tagging.cpp
+++ b/libc/bionic/heap_tagging.cpp
@@ -34,13 +34,12 @@
 #include <platform/bionic/mte_kernel.h>
 
 extern "C" void scudo_malloc_disable_memory_tagging();
+extern "C" void scudo_malloc_set_track_allocation_stacks(int);
 
 static HeapTaggingLevel heap_tagging_level = M_HEAP_TAGGING_LEVEL_NONE;
 
 void SetDefaultHeapTaggingLevel() {
 #if defined(__aarch64__)
-#define PR_SET_TAGGED_ADDR_CTRL 55
-#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
 #ifdef ANDROID_EXPERIMENTAL_MTE
   // First, try enabling MTE in asynchronous mode, with tag 0 excluded. This will fail if the kernel
   // or hardware doesn't support MTE, and we will fall back to just enabling tagged pointers in
@@ -81,34 +80,48 @@
 
   switch (tag_level) {
     case M_HEAP_TAGGING_LEVEL_NONE:
+#if defined(USE_SCUDO)
+      scudo_malloc_disable_memory_tagging();
+#endif
+      if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) {
+        __libc_globals.mutate([](libc_globals* globals) {
+          // Preserve the untag mask (we still want to untag pointers when passing them to the
+          // allocator), but clear the fixed tag and the check mask, so that pointers are no longer
+          // tagged and checks no longer happen.
+          globals->heap_pointer_tag = static_cast<uintptr_t>(0xffull << UNTAG_SHIFT);
+        });
+      }
       break;
     case M_HEAP_TAGGING_LEVEL_TBI:
     case M_HEAP_TAGGING_LEVEL_ASYNC:
+    case M_HEAP_TAGGING_LEVEL_SYNC:
       if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE) {
         error_log(
             "SetHeapTaggingLevel: re-enabling tagging after it was disabled is not supported");
-      } else {
-        error_log("SetHeapTaggingLevel: switching between TBI and ASYNC is not supported");
+        return false;
+      } else if (tag_level == M_HEAP_TAGGING_LEVEL_TBI ||
+                 heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) {
+        error_log("SetHeapTaggingLevel: switching between TBI and ASYNC/SYNC is not supported");
+        return false;
       }
-      return false;
+
+      if (tag_level == M_HEAP_TAGGING_LEVEL_ASYNC) {
+#if defined(USE_SCUDO)
+        scudo_malloc_set_track_allocation_stacks(0);
+#endif
+      } else if (tag_level == M_HEAP_TAGGING_LEVEL_ASYNC) {
+#if defined(USE_SCUDO)
+        scudo_malloc_set_track_allocation_stacks(1);
+#endif
+      }
+      break;
     default:
       error_log("SetHeapTaggingLevel: unknown tagging level");
       return false;
   }
+
   heap_tagging_level = tag_level;
   info_log("SetHeapTaggingLevel: tag level set to %d", tag_level);
 
-  if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE) {
-#if defined(USE_SCUDO)
-    scudo_malloc_disable_memory_tagging();
-#endif
-    __libc_globals.mutate([](libc_globals* globals) {
-      // Preserve the untag mask (we still want to untag pointers when passing them to the
-      // allocator if we were doing so before), but clear the fixed tag and the check mask,
-      // so that pointers are no longer tagged and checks no longer happen.
-      globals->heap_pointer_tag &= 0xffull << UNTAG_SHIFT;
-    });
-  }
-
   return true;
 }
diff --git a/libc/bionic/malloc_common_dynamic.cpp b/libc/bionic/malloc_common_dynamic.cpp
index da87c33..6a82ae3 100644
--- a/libc/bionic/malloc_common_dynamic.cpp
+++ b/libc/bionic/malloc_common_dynamic.cpp
@@ -366,6 +366,9 @@
   return true;
 }
 
+extern "C" const char* __scudo_get_stack_depot_addr();
+extern "C" const char* __scudo_get_region_info_addr();
+
 // Initializes memory allocation framework once per process.
 static void MallocInitImpl(libc_globals* globals) {
   char prop[PROP_VALUE_MAX];
@@ -373,6 +376,11 @@
 
   MaybeInitGwpAsanFromLibc(globals);
 
+#if defined(USE_SCUDO)
+  __libc_shared_globals()->scudo_stack_depot = __scudo_get_stack_depot_addr();
+  __libc_shared_globals()->scudo_region_info = __scudo_get_region_info_addr();
+#endif
+
   // Prefer malloc debug since it existed first and is a more complete
   // malloc interceptor than the hooks.
   bool hook_installed = false;
diff --git a/libc/platform/bionic/malloc.h b/libc/platform/bionic/malloc.h
index 0ea7e3c..f9eb03f 100644
--- a/libc/platform/bionic/malloc.h
+++ b/libc/platform/bionic/malloc.h
@@ -116,6 +116,8 @@
   M_HEAP_TAGGING_LEVEL_TBI = 1,
   // Enable heap tagging if supported, at a level appropriate for asynchronous memory tag checks.
   M_HEAP_TAGGING_LEVEL_ASYNC = 2,
+  // Enable heap tagging if supported, at a level appropriate for synchronous memory tag checks.
+  M_HEAP_TAGGING_LEVEL_SYNC = 3,
 };
 
 // Manipulates bionic-specific handling of memory allocation APIs such as
diff --git a/libc/private/bionic_globals.h b/libc/private/bionic_globals.h
index 54605db..5c9b726 100644
--- a/libc/private/bionic_globals.h
+++ b/libc/private/bionic_globals.h
@@ -103,6 +103,9 @@
 
   const gwp_asan::AllocatorState* gwp_asan_state = nullptr;
   const gwp_asan::AllocationMetadata* gwp_asan_metadata = nullptr;
+
+  const char* scudo_stack_depot = nullptr;
+  const char* scudo_region_info = nullptr;
 };
 
 __LIBC_HIDDEN__ libc_shared_globals* __libc_shared_globals();