Merge "Treat static binaries "the same" for the profiling signals."
diff --git a/TEST_MAPPING b/TEST_MAPPING
index da16e65..d809e71 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -24,5 +24,11 @@
{
"name": "memunreachable_unit_test"
}
+ ],
+
+ "hwasan-postsubmit": [
+ {
+ "name": "CtsBionicTestCases"
+ }
]
}
diff --git a/libc/bionic/bionic_systrace.cpp b/libc/bionic/bionic_systrace.cpp
index fd97712..cf5cd82 100644
--- a/libc/bionic/bionic_systrace.cpp
+++ b/libc/bionic/bionic_systrace.cpp
@@ -86,7 +86,21 @@
return;
}
- TEMP_FAILURE_RETRY(write(trace_marker_fd, "E|", 2));
+ // This code is intentionally "sub-optimal"; do not optimize this by inlining
+ // the E| string into the write.
+ //
+ // This is because if the const char* string passed to write(trace_marker) is not
+ // in resident memory (e.g. the page of the .rodata section that contains it has
+ // been paged out, or the anonymous page that contained a heap-based string is
+ // swapped in zram), the ftrace code will NOT page it in and instead report
+ // <faulted>.
+ //
+ // We "fix" this by putting the string on the stack, which is more unlikely
+ // to be paged out and pass the pointer to that instead.
+ //
+ // See b/197620214 for more context on this.
+ volatile char buf[2]{'E', '|'};
+ TEMP_FAILURE_RETRY(write(trace_marker_fd, const_cast<const char*>(buf), 2));
}
ScopedTrace::ScopedTrace(const char* message) : called_end_(false) {
diff --git a/libc/bionic/gwp_asan_wrappers.cpp b/libc/bionic/gwp_asan_wrappers.cpp
index 6eb1749..8c51347 100644
--- a/libc/bionic/gwp_asan_wrappers.cpp
+++ b/libc/bionic/gwp_asan_wrappers.cpp
@@ -277,3 +277,14 @@
bool DispatchIsGwpAsan(const MallocDispatch* dispatch) {
return dispatch == &gwp_asan_dispatch;
}
+
+bool EnableGwpAsan(bool force_init) {
+ if (GwpAsanInitialized) {
+ return true;
+ }
+
+ bool ret_value;
+ __libc_globals.mutate(
+ [&](libc_globals* globals) { ret_value = MaybeInitGwpAsan(globals, force_init); });
+ return ret_value;
+}
diff --git a/libc/bionic/gwp_asan_wrappers.h b/libc/bionic/gwp_asan_wrappers.h
index a39d50b..c568681 100644
--- a/libc/bionic/gwp_asan_wrappers.h
+++ b/libc/bionic/gwp_asan_wrappers.h
@@ -32,6 +32,9 @@
#include <private/bionic_malloc_dispatch.h>
#include <stddef.h>
+// Enable GWP-ASan, used by android_mallopt.
+bool EnableGwpAsan(bool force_init);
+
// Hooks for libc to possibly install GWP-ASan.
bool MaybeInitGwpAsanFromLibc(libc_globals* globals);
diff --git a/libc/bionic/malloc_common.cpp b/libc/bionic/malloc_common.cpp
index c91efa0..38168ee 100644
--- a/libc/bionic/malloc_common.cpp
+++ b/libc/bionic/malloc_common.cpp
@@ -330,9 +330,8 @@
errno = EINVAL;
return false;
}
- __libc_globals.mutate([&](libc_globals* globals) {
- return MaybeInitGwpAsan(globals, *reinterpret_cast<bool*>(arg));
- });
+
+ return EnableGwpAsan(*reinterpret_cast<bool*>(arg));
}
errno = ENOTSUP;
return false;
diff --git a/libc/bionic/malloc_common_dynamic.cpp b/libc/bionic/malloc_common_dynamic.cpp
index 31d1e69..1f58fda 100644
--- a/libc/bionic/malloc_common_dynamic.cpp
+++ b/libc/bionic/malloc_common_dynamic.cpp
@@ -530,9 +530,8 @@
errno = EINVAL;
return false;
}
- __libc_globals.mutate([&](libc_globals* globals) {
- return MaybeInitGwpAsan(globals, *reinterpret_cast<bool*>(arg));
- });
+
+ return EnableGwpAsan(*reinterpret_cast<bool*>(arg));
}
// Try heapprofd's mallopt, as it handles options not covered here.
return HeapprofdMallopt(opcode, arg, arg_size);
diff --git a/tests/dlext_test.cpp b/tests/dlext_test.cpp
index e3caf0e..47214b8 100644
--- a/tests/dlext_test.cpp
+++ b/tests/dlext_test.cpp
@@ -714,7 +714,7 @@
}
std::string DlExtRelroSharingTest::FindMappingName(void* ptr) {
- uint64_t addr = reinterpret_cast<uint64_t>(ptr);
+ uint64_t addr = reinterpret_cast<uint64_t>(untag_address(ptr));
std::string found_name = "<not found>";
EXPECT_TRUE(android::procinfo::ReadMapFile("/proc/self/maps",
diff --git a/tests/heap_tagging_level_test.cpp b/tests/heap_tagging_level_test.cpp
index 5f5904f..edbd995 100644
--- a/tests/heap_tagging_level_test.cpp
+++ b/tests/heap_tagging_level_test.cpp
@@ -52,6 +52,9 @@
if (mte_supported()) {
GTEST_SKIP() << "Tagged pointers are not used on MTE hardware.";
}
+ if (running_with_hwasan()) {
+ GTEST_SKIP() << "Tagged heap pointers feature is disabled under HWASan.";
+ }
void *x = malloc(1);
@@ -119,6 +122,9 @@
TEST(heap_tagging_level, none_pointers_untagged) {
#if defined(__BIONIC__)
+ if (running_with_hwasan()) {
+ GTEST_SKIP() << "HWASan is unaffected by heap tagging level.";
+ }
EXPECT_TRUE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_NONE));
std::unique_ptr<int[]> p = std::make_unique<int[]>(4);
EXPECT_EQ(untag_address(p.get()), p.get());
@@ -135,7 +141,13 @@
EXPECT_FALSE(SetHeapTaggingLevel(static_cast<HeapTaggingLevel>(12345)));
- if (mte_supported() && running_with_mte()) {
+ if (running_with_hwasan()) {
+ // NONE -> ...
+ EXPECT_FALSE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_TBI));
+ EXPECT_FALSE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_ASYNC));
+ EXPECT_FALSE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_SYNC));
+ EXPECT_TRUE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_NONE));
+ } else if (mte_supported() && running_with_mte()) {
// ASYNC -> ...
EXPECT_FALSE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_TBI));
EXPECT_TRUE(SetHeapTaggingLevel(M_HEAP_TAGGING_LEVEL_ASYNC));
diff --git a/tests/leak_test.cpp b/tests/leak_test.cpp
index 80618e5..0a881e1 100644
--- a/tests/leak_test.cpp
+++ b/tests/leak_test.cpp
@@ -112,11 +112,17 @@
TEST(pthread_leak, join) {
SKIP_WITH_NATIVE_BRIDGE; // http://b/37920774
+ // Warm up. HWASan allocates an extra page on the first iteration, but never after.
+ pthread_t thread;
+ ASSERT_EQ(0, pthread_create(
+ &thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
+ ASSERT_EQ(0, pthread_join(thread, nullptr));
+
LeakChecker lc;
for (int i = 0; i < 100; ++i) {
- pthread_t thread;
- ASSERT_EQ(0, pthread_create(&thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
+ ASSERT_EQ(0, pthread_create(
+ &thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
ASSERT_EQ(0, pthread_join(thread, nullptr));
}
}
diff --git a/tests/malloc_test.cpp b/tests/malloc_test.cpp
index eae44ce..1386e30 100644
--- a/tests/malloc_test.cpp
+++ b/tests/malloc_test.cpp
@@ -700,7 +700,10 @@
FILE* fp = fdopen(tf.fd, "w+");
tf.release();
ASSERT_TRUE(fp != nullptr);
- ASSERT_EQ(0, malloc_info(0, fp));
+ if (malloc_info(0, fp) != 0) {
+ *allocator_scudo = false;
+ return;
+ }
ASSERT_EQ(0, fclose(fp));
std::string contents;
@@ -872,7 +875,7 @@
}
#endif
-TEST(malloc, align_check) {
+void AlignCheck() {
// See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
// for a discussion of type alignment.
ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
@@ -896,22 +899,35 @@
#if defined(__ANDROID__)
// On Android, there is a lot of code that expects certain alignments:
- // - Allocations of a size that rounds up to a multiple of 16 bytes
- // must have at least 16 byte alignment.
- // - Allocations of a size that rounds up to a multiple of 8 bytes and
- // not 16 bytes, are only required to have at least 8 byte alignment.
- // This is regardless of whether it is in a 32 bit or 64 bit environment.
+ // 1. Allocations of a size that rounds up to a multiple of 16 bytes
+ // must have at least 16 byte alignment.
+ // 2. Allocations of a size that rounds up to a multiple of 8 bytes and
+ // not 16 bytes, are only required to have at least 8 byte alignment.
+ // In addition, on Android clang has been configured for 64 bit such that:
+ // 3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
+ // 4. Allocations > 8 bytes must be aligned to at least 16 bytes.
+ // For 32 bit environments, only the first two requirements must be met.
// See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
// a discussion of this alignment mess. The code below is enforcing
// strong-alignment, since who knows what code depends on this behavior now.
+ // As mentioned before, for 64 bit this will enforce the higher
+ // requirement since clang expects this behavior on Android now.
for (size_t i = 1; i <= 128; i++) {
+#if defined(__LP64__)
+ if (i <= 8) {
+ AndroidVerifyAlignment(i, 8);
+ } else {
+ AndroidVerifyAlignment(i, 16);
+ }
+#else
size_t rounded = (i + 7) & ~7;
if ((rounded % 16) == 0) {
AndroidVerifyAlignment(i, 16);
} else {
AndroidVerifyAlignment(i, 8);
}
+#endif
if (::testing::Test::HasFatalFailure()) {
return;
}
@@ -919,6 +935,22 @@
#endif
}
+TEST(malloc, align_check) {
+ AlignCheck();
+}
+
+// Force GWP-ASan on and verify all alignment checks still pass.
+TEST(malloc, align_check_gwp_asan) {
+#if defined(__BIONIC__)
+ bool force_init = true;
+ ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &force_init, sizeof(force_init)));
+
+ AlignCheck();
+#else
+ GTEST_SKIP() << "bionic-only test";
+#endif
+}
+
// Jemalloc doesn't pass this test right now, so leave it as disabled.
TEST(malloc, DISABLED_alloc_after_fork) {
// Both of these need to be a power of 2.
@@ -1268,6 +1300,22 @@
#endif
}
+TEST(android_mallopt, force_init_gwp_asan) {
+#if defined(__BIONIC__)
+ bool force_init = true;
+ ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &force_init, sizeof(force_init)));
+
+ // Verify that trying to do the call again also passes no matter the
+ // value of force_init.
+ force_init = false;
+ ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &force_init, sizeof(force_init)));
+ force_init = true;
+ ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &force_init, sizeof(force_init)));
+#else
+ GTEST_SKIP() << "bionic extension";
+#endif
+}
+
void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
std::vector<void*> allocs;
constexpr int kMaxBytesToCheckZero = 64;