Merge "Convert bionic dlext test zips to Android.bp"
diff --git a/TEST_MAPPING b/TEST_MAPPING
index da16e65..d809e71 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -24,5 +24,11 @@
     {
       "name": "memunreachable_unit_test"
     }
+  ],
+
+  "hwasan-postsubmit": [
+    {
+      "name": "CtsBionicTestCases"
+    }
   ]
 }
diff --git a/libc/bionic/android_profiling_dynamic.cpp b/libc/bionic/android_profiling_dynamic.cpp
index 9d92a6d..4fafd67 100644
--- a/libc/bionic/android_profiling_dynamic.cpp
+++ b/libc/bionic/android_profiling_dynamic.cpp
@@ -68,8 +68,6 @@
   // does not get loaded for a) non-apps, b) non-profilable apps on user. The default signal
   // disposition is to crash. We do not want the target to crash if we accidentally target a
   // non-app or non-profilable process.
-  //
-  // This does *not* get run for processes that statically link libc, and those will still crash.
   signal(BIONIC_SIGNAL_ART_PROFILER, SIG_IGN);
 }
 
diff --git a/libc/bionic/bionic_systrace.cpp b/libc/bionic/bionic_systrace.cpp
index fd97712..cf5cd82 100644
--- a/libc/bionic/bionic_systrace.cpp
+++ b/libc/bionic/bionic_systrace.cpp
@@ -86,7 +86,21 @@
     return;
   }
 
-  TEMP_FAILURE_RETRY(write(trace_marker_fd, "E|", 2));
+  // This code is intentionally "sub-optimal"; do not optimize this by inlining
+  // the E| string into the write.
+  //
+  // This is because if the const char* string passed to write(trace_marker) is not
+  // in resident memory (e.g. the page of the .rodata section that contains it has
+  // been paged out, or the anonymous page that contained a heap-based string is
+  // swapped in zram), the ftrace code will NOT page it in and instead report
+  // <faulted>.
+  //
+  // We "fix" this by putting the string on the stack, which is more unlikely
+  // to be paged out and pass the pointer to that instead.
+  //
+  // See b/197620214 for more context on this.
+  volatile char buf[2]{'E', '|'};
+  TEMP_FAILURE_RETRY(write(trace_marker_fd, const_cast<const char*>(buf), 2));
 }
 
 ScopedTrace::ScopedTrace(const char* message) : called_end_(false) {
diff --git a/libc/bionic/libc_init_static.cpp b/libc/bionic/libc_init_static.cpp
index 3a8513f..67e692c 100644
--- a/libc/bionic/libc_init_static.cpp
+++ b/libc/bionic/libc_init_static.cpp
@@ -42,6 +42,7 @@
 #include "platform/bionic/macros.h"
 #include "platform/bionic/mte.h"
 #include "platform/bionic/page.h"
+#include "platform/bionic/reserved_signals.h"
 #include "private/KernelArgumentBlock.h"
 #include "private/bionic_asm.h"
 #include "private/bionic_asm_note.h"
@@ -331,6 +332,15 @@
 void __libc_init_mte(const void*, size_t, uintptr_t) {}
 #endif  // __aarch64__
 
+void __libc_init_profiling_handlers() {
+  // The dynamic variant of this function is more interesting, but this
+  // at least ensures that static binaries aren't killed by the kernel's
+  // default disposition for these two real-time signals that would have
+  // handlers installed if this was a dynamic binary.
+  signal(BIONIC_SIGNAL_PROFILER, SIG_IGN);
+  signal(BIONIC_SIGNAL_ART_PROFILER, SIG_IGN);
+}
+
 __noreturn static void __real_libc_init(void *raw_args,
                                         void (*onexit)(void) __unused,
                                         int (*slingshot)(int, char**, char**),
@@ -351,6 +361,7 @@
   __libc_init_mte(reinterpret_cast<ElfW(Phdr)*>(getauxval(AT_PHDR)), getauxval(AT_PHNUM),
                   /*load_bias = */ 0);
   __libc_init_scudo();
+  __libc_init_profiling_handlers();
   __libc_init_fork_handler();
 
   call_ifunc_resolvers();
diff --git a/tests/dlext_test.cpp b/tests/dlext_test.cpp
index 4cb08b9..ea28822 100644
--- a/tests/dlext_test.cpp
+++ b/tests/dlext_test.cpp
@@ -714,7 +714,7 @@
 }
 
 std::string DlExtRelroSharingTest::FindMappingName(void* ptr) {
-  uint64_t addr = reinterpret_cast<uint64_t>(ptr);
+  uint64_t addr = reinterpret_cast<uint64_t>(untag_address(ptr));
   std::string found_name = "<not found>";
 
   EXPECT_TRUE(android::procinfo::ReadMapFile("/proc/self/maps",
diff --git a/tests/heap_tagging_level_test.cpp b/tests/heap_tagging_level_test.cpp
index 5f5904f..edbd995 100644
--- a/tests/heap_tagging_level_test.cpp
+++ b/tests/heap_tagging_level_test.cpp
@@ -52,6 +52,9 @@
   if (mte_supported()) {
     GTEST_SKIP() << "Tagged pointers are not used on MTE hardware.";
   }
+  if (running_with_hwasan()) {
+    GTEST_SKIP() << "Tagged heap pointers feature is disabled under HWASan.";
+  }
 
   void *x = malloc(1);
 
@@ -119,6 +122,9 @@
 
 TEST(heap_tagging_level, none_pointers_untagged) {
 #if defined(__BIONIC__)
+  if (running_with_hwasan()) {
+    GTEST_SKIP() << "HWASan is unaffected by heap tagging level.";
+  }
   EXPECT_TRUE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_NONE));
   std::unique_ptr<int[]> p = std::make_unique<int[]>(4);
   EXPECT_EQ(untag_address(p.get()), p.get());
@@ -135,7 +141,13 @@
 
   EXPECT_FALSE(SetHeapTaggingLevel(static_cast<HeapTaggingLevel>(12345)));
 
-  if (mte_supported() && running_with_mte()) {
+  if (running_with_hwasan()) {
+    // NONE -> ...
+    EXPECT_FALSE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_TBI));
+    EXPECT_FALSE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_ASYNC));
+    EXPECT_FALSE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_SYNC));
+    EXPECT_TRUE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_NONE));
+  } else if (mte_supported() && running_with_mte()) {
     // ASYNC -> ...
     EXPECT_FALSE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_TBI));
     EXPECT_TRUE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_ASYNC));
diff --git a/tests/leak_test.cpp b/tests/leak_test.cpp
index 80618e5..0a881e1 100644
--- a/tests/leak_test.cpp
+++ b/tests/leak_test.cpp
@@ -112,11 +112,17 @@
 TEST(pthread_leak, join) {
   SKIP_WITH_NATIVE_BRIDGE;  // http://b/37920774
 
+  // Warm up. HWASan allocates an extra page on the first iteration, but never after.
+  pthread_t thread;
+  ASSERT_EQ(0, pthread_create(
+                   &thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
+  ASSERT_EQ(0, pthread_join(thread, nullptr));
+
   LeakChecker lc;
 
   for (int i = 0; i < 100; ++i) {
-    pthread_t thread;
-    ASSERT_EQ(0, pthread_create(&thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
+    ASSERT_EQ(0, pthread_create(
+                     &thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
     ASSERT_EQ(0, pthread_join(thread, nullptr));
   }
 }
diff --git a/tests/malloc_test.cpp b/tests/malloc_test.cpp
index 74d5238..1386e30 100644
--- a/tests/malloc_test.cpp
+++ b/tests/malloc_test.cpp
@@ -700,7 +700,10 @@
   FILE* fp = fdopen(tf.fd, "w+");
   tf.release();
   ASSERT_TRUE(fp != nullptr);
-  ASSERT_EQ(0, malloc_info(0, fp));
+  if (malloc_info(0, fp) != 0) {
+    *allocator_scudo = false;
+    return;
+  }
   ASSERT_EQ(0, fclose(fp));
 
   std::string contents;
@@ -872,7 +875,7 @@
 }
 #endif
 
-TEST(malloc, align_check) {
+void AlignCheck() {
   // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
   // for a discussion of type alignment.
   ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
@@ -896,22 +899,35 @@
 
 #if defined(__ANDROID__)
   // On Android, there is a lot of code that expects certain alignments:
-  // - Allocations of a size that rounds up to a multiple of 16 bytes
-  //   must have at least 16 byte alignment.
-  // - Allocations of a size that rounds up to a multiple of 8 bytes and
-  //   not 16 bytes, are only required to have at least 8 byte alignment.
-  // This is regardless of whether it is in a 32 bit or 64 bit environment.
+  //  1. Allocations of a size that rounds up to a multiple of 16 bytes
+  //     must have at least 16 byte alignment.
+  //  2. Allocations of a size that rounds up to a multiple of 8 bytes and
+  //     not 16 bytes, are only required to have at least 8 byte alignment.
+  // In addition, on Android clang has been configured for 64 bit such that:
+  //  3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
+  //  4. Allocations > 8 bytes must be aligned to at least 16 bytes.
+  // For 32 bit environments, only the first two requirements must be met.
 
   // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
   // a discussion of this alignment mess. The code below is enforcing
   // strong-alignment, since who knows what code depends on this behavior now.
+  // As mentioned before, for 64 bit this will enforce the higher
+  // requirement since clang expects this behavior on Android now.
   for (size_t i = 1; i <= 128; i++) {
+#if defined(__LP64__)
+    if (i <= 8) {
+      AndroidVerifyAlignment(i, 8);
+    } else {
+      AndroidVerifyAlignment(i, 16);
+    }
+#else
     size_t rounded = (i + 7) & ~7;
     if ((rounded % 16) == 0) {
       AndroidVerifyAlignment(i, 16);
     } else {
       AndroidVerifyAlignment(i, 8);
     }
+#endif
     if (::testing::Test::HasFatalFailure()) {
       return;
     }
@@ -919,6 +935,22 @@
 #endif
 }
 
+TEST(malloc, align_check) {
+  AlignCheck();
+}
+
+// Force GWP-ASan on and verify all alignment checks still pass.
+TEST(malloc, align_check_gwp_asan) {
+#if defined(__BIONIC__)
+  bool force_init = true;
+  ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &force_init, sizeof(force_init)));
+
+  AlignCheck();
+#else
+  GTEST_SKIP() << "bionic-only test";
+#endif
+}
+
 // Jemalloc doesn't pass this test right now, so leave it as disabled.
 TEST(malloc, DISABLED_alloc_after_fork) {
   // Both of these need to be a power of 2.