Fix libmemunreachable vs hwasan conflict.
* Scan memory in a non-sanitized function.
* Don't scan hwasan shadow mappings.
Shadow is ~33G and never contains any heap pointers. The scan takes
about a minute on my device.
Bug: 112438058
Test: memunreachable_test with SANITIZE_TARGET=hwaddress
the test still fails, but now there are no hwasan reports or new
tombstones, and it completes in less than a minute
Change-Id: Ibe4811c67609ca9fe317edc250993b915570a638
diff --git a/libmemunreachable/HeapWalker.cpp b/libmemunreachable/HeapWalker.cpp
index 89837f7..e11f079 100644
--- a/libmemunreachable/HeapWalker.cpp
+++ b/libmemunreachable/HeapWalker.cpp
@@ -59,12 +59,19 @@
}
}
+// Sanitizers may consider certain memory inaccessible through certain pointers.
+// With MTE this will need to use unchecked instructions or disable tag checking globally.
+static uintptr_t ReadWordAtAddressUnsafe(uintptr_t word_ptr)
+ __attribute__((no_sanitize("address", "hwaddress"))) {
+ return *reinterpret_cast<uintptr_t*>(word_ptr);
+}
+
bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
walking_ptr_ = word_ptr;
// This access may segfault if the process under test has done something strange,
// for example mprotect(PROT_NONE) on a native heap page. If so, it will be
// caught and handled by mmaping a zero page over the faulting page.
- uintptr_t value = *reinterpret_cast<uintptr_t*>(word_ptr);
+ uintptr_t value = ReadWordAtAddressUnsafe(word_ptr);
walking_ptr_ = 0;
if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
diff --git a/libmemunreachable/MemUnreachable.cpp b/libmemunreachable/MemUnreachable.cpp
index 3d7b8a8..299c320 100644
--- a/libmemunreachable/MemUnreachable.cpp
+++ b/libmemunreachable/MemUnreachable.cpp
@@ -217,6 +217,10 @@
return ret == 0;
}
+static bool is_sanitizer_mapping(const allocator::string& s) {
+ return s == "[anon:low shadow]" || s == "[anon:high shadow]" || has_prefix(s, "[anon:hwasan");
+}
+
bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
allocator::vector<Mapping>& heap_mappings,
allocator::vector<Mapping>& anon_mappings,
@@ -258,7 +262,8 @@
} else if (mapping_name.size() == 0) {
globals_mappings.emplace_back(*it);
} else if (has_prefix(mapping_name, "[anon:") &&
- mapping_name != "[anon:leak_detector_malloc]") {
+ mapping_name != "[anon:leak_detector_malloc]" &&
+ !is_sanitizer_mapping(mapping_name)) {
// TODO(ccross): it would be nice to treat named anonymous mappings as
// possible leaks, but naming something in a .bss or .data section makes
// it impossible to distinguish them from mmaped and then named mappings.