Merge "Add _Fork()." into main
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 9304013..60a4f61 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -10,6 +10,9 @@
"name": "linker-unit-tests"
},
{
+ "name": "CtsBionicAppTestCases"
+ },
+ {
"name": "CtsBionicTestCases"
},
{
@@ -54,6 +57,9 @@
"name": "linker-unit-tests"
},
{
+ "name": "CtsBionicAppTestCases"
+ },
+ {
"name": "CtsBionicTestCases"
},
{
@@ -88,10 +94,5 @@
{
"name": "CtsBionicTestCases"
}
- ],
- "postsubmit": [
- {
- "name": "CtsBionicAppTestCases"
- }
]
}
diff --git a/libc/bionic/__bionic_get_shell_path.cpp b/libc/bionic/__bionic_get_shell_path.cpp
index 5d22e00..3ea256d 100644
--- a/libc/bionic/__bionic_get_shell_path.cpp
+++ b/libc/bionic/__bionic_get_shell_path.cpp
@@ -29,10 +29,14 @@
#include "private/__bionic_get_shell_path.h"
const char* __bionic_get_shell_path() {
+ // For the host Bionic, we use the standard /bin/sh.
+ // Since P there's a /bin -> /system/bin symlink that means this will work
+ // for the device too, but as long as the NDK supports earlier API levels,
+ // we should probably make sure that this works in static binaries run on
+ // those OS versions too.
#if !defined(__ANDROID__)
- // For the host Bionic, use the standard /bin/sh
return "/bin/sh";
#else
return "/system/bin/sh";
-#endif // if !defined(__ANDROID__)
+#endif
}
diff --git a/libc/include/android/crash_detail.h b/libc/include/android/crash_detail.h
index 6744385..1889f9f 100644
--- a/libc/include/android/crash_detail.h
+++ b/libc/include/android/crash_detail.h
@@ -34,8 +34,6 @@
*/
#include <stddef.h>
-#include <stdint.h>
-#include <string.h>
#include <sys/cdefs.h>
__BEGIN_DECLS
diff --git a/libc/include/android/set_abort_message.h b/libc/include/android/set_abort_message.h
index 2525242..a778057 100644
--- a/libc/include/android/set_abort_message.h
+++ b/libc/include/android/set_abort_message.h
@@ -43,11 +43,30 @@
typedef struct crash_detail_t crash_detail_t;
/**
- * android_set_abort_message() sets the abort message that will be shown
- * by [debuggerd](https://source.android.com/devices/tech/debug/native-crash).
+ * android_set_abort_message() sets the abort message passed to
+ * [debuggerd](https://source.android.com/devices/tech/debug/native-crash)
+ * for inclusion in any crash.
+ *
* This is meant for use by libraries that deliberately abort so that they can
* provide an explanation. It is used within bionic to implement assert() and
- * all FORTIFY/fdsan aborts.
+ * all FORTIFY and fdsan failures.
+ *
+ * The message appears directly in logcat at the time of crash. It will
+ * also be added to both the tombstone proto in the crash_detail field, and
+ * in the tombstone text format.
+ *
+ * Tombstone proto definition:
+ * https://cs.android.com/android/platform/superproject/main/+/main:system/core/debuggerd/proto/tombstone.proto
+ *
+ * An app can get hold of these for any `REASON_CRASH_NATIVE` instance of
+ * `android.app.ApplicationExitInfo`.
+ * https://developer.android.com/reference/android/app/ApplicationExitInfo#getTraceInputStream()
+ *
+ * The given message is copied at the time this function is called, and does
+ * not need to be valid until the crash actually happens, but typically this
+ * function is called immediately before aborting. See <android/crash_detail.h>
+ * for API more suited to the use case where the caller doesn't _expect_ a
+ * crash but would like to see the information _if_ a crash happens.
*/
void android_set_abort_message(const char* _Nullable __msg);
diff --git a/libc/include/bits/page_size.h b/libc/include/bits/page_size.h
index ca434e5..df0cb7f 100644
--- a/libc/include/bits/page_size.h
+++ b/libc/include/bits/page_size.h
@@ -32,7 +32,7 @@
__BEGIN_DECLS
-#if !defined(__BIONIC_NO_PAGE_SIZE_MACRO)
+#if !defined(__BIONIC_NO_PAGE_SIZE_MACRO) || defined(__BIONIC_DEPRECATED_PAGE_SIZE_MACRO)
#define PAGE_SIZE 4096
#define PAGE_MASK (~(PAGE_SIZE - 1))
#endif
diff --git a/linker/Android.bp b/linker/Android.bp
index 0533ae9..55daf22 100644
--- a/linker/Android.bp
+++ b/linker/Android.bp
@@ -116,6 +116,7 @@
"libziparchive",
"libbase",
"libz",
+ "libprocinfo", // For procinfo::MappedFileSize()
"libasync_safe",
@@ -573,6 +574,7 @@
"libasync_safe",
"libbase",
"liblog_for_runtime_apex",
+ "libprocinfo", // For procinfo::MappedFileSize()
],
data_libs: [
diff --git a/linker/linker.cpp b/linker/linker.cpp
index b0caedd..e54a524 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -3364,7 +3364,7 @@
"\"%s\" has text relocations",
get_realpath());
add_dlwarning(get_realpath(), "text relocations");
- if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
+ if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno));
return false;
}
@@ -3380,7 +3380,7 @@
#if !defined(__LP64__)
if (has_text_relocations) {
// All relocations are done, we can protect our segments back to read-only.
- if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
+ if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
DL_ERR("can't protect segments for \"%s\": %s",
get_realpath(), strerror(errno));
return false;
@@ -3418,7 +3418,7 @@
}
bool soinfo::protect_relro() {
- if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
+ if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
get_realpath(), strerror(errno));
return false;
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index d6592af..1860ccc 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -201,6 +201,7 @@
const ElfW(Phdr)* phdr;
size_t phdr_count;
ElfW(Addr) entry_point;
+ bool should_pad_segments;
};
static ExecutableInfo get_executable_info(const char* arg_path) {
@@ -293,6 +294,7 @@
result.phdr = elf_reader.loaded_phdr();
result.phdr_count = elf_reader.phdr_count();
result.entry_point = elf_reader.entry_point();
+ result.should_pad_segments = elf_reader.should_pad_segments();
return result;
}
@@ -366,6 +368,7 @@
somain = si;
si->phdr = exe_info.phdr;
si->phnum = exe_info.phdr_count;
+ si->set_should_pad_segments(exe_info.should_pad_segments);
get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias);
si->size = phdr_table_get_load_size(si->phdr, si->phnum);
si->dynamic = nullptr;
@@ -399,7 +402,7 @@
auto note_gnu_property = GnuPropertySection(somain);
if (note_gnu_property.IsBTICompatible() &&
(phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
- ¬e_gnu_property) < 0)) {
+ somain->should_pad_segments(), ¬e_gnu_property) < 0)) {
__linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(),
strerror(errno));
}
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 82b37a4..821f30d 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -46,6 +46,8 @@
#include "private/CFIShadow.h" // For kLibraryAlignment
#include "private/elf_note.h"
+#include <procinfo/process_map.h>
+
static int GetTargetElfMachine() {
#if defined(__arm__)
return EM_ARM;
@@ -196,7 +198,7 @@
// For Armv8.5-A loaded executable segments may require PROT_BTI.
if (note_gnu_property_.IsBTICompatible()) {
did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
- ¬e_gnu_property_) == 0);
+ should_pad_segments_, ¬e_gnu_property_) == 0);
}
#endif
}
@@ -756,6 +758,41 @@
return true;
}
+static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ size_t phdr_idx, ElfW(Addr)* p_memsz,
+ ElfW(Addr)* p_filesz, bool should_pad_segments) {
+ const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
+ const ElfW(Phdr)* next = nullptr;
+ size_t next_idx = phdr_idx + 1;
+
+ if (phdr->p_align == kPageSize || !should_pad_segments) {
+ return;
+ }
+
+ if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
+ next = &phdr_table[next_idx];
+ }
+
+ // If this is the last LOAD segment, no extension is needed
+ if (!next || *p_memsz != *p_filesz) {
+ return;
+ }
+
+ ElfW(Addr) next_start = page_start(next->p_vaddr);
+ ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
+
+ // If adjacent segment mappings overlap, no extension is needed.
+ if (curr_end >= next_start) {
+ return;
+ }
+
+ // Extend the LOAD segment mapping to be contiguous with that of
+ // the next LOAD segment.
+ ElfW(Addr) extend = next_start - curr_end;
+ *p_memsz += extend;
+ *p_filesz += extend;
+}
+
bool ElfReader::LoadSegments() {
for (size_t i = 0; i < phdr_num_; ++i) {
const ElfW(Phdr)* phdr = &phdr_table_[i];
@@ -764,18 +801,22 @@
continue;
}
+ ElfW(Addr) p_memsz = phdr->p_memsz;
+ ElfW(Addr) p_filesz = phdr->p_filesz;
+ _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
+
// Segment addresses in memory.
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
- ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
+ ElfW(Addr) seg_end = seg_start + p_memsz;
ElfW(Addr) seg_page_start = page_start(seg_start);
ElfW(Addr) seg_page_end = page_end(seg_end);
- ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
+ ElfW(Addr) seg_file_end = seg_start + p_filesz;
// File offsets.
ElfW(Addr) file_start = phdr->p_offset;
- ElfW(Addr) file_end = file_start + phdr->p_filesz;
+ ElfW(Addr) file_end = file_start + p_filesz;
ElfW(Addr) file_page_start = page_start(file_start);
ElfW(Addr) file_length = file_end - file_page_start;
@@ -785,12 +826,12 @@
return false;
}
- if (file_end > static_cast<size_t>(file_size_)) {
+ if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
" p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
reinterpret_cast<void*>(phdr->p_filesz),
- reinterpret_cast<void*>(file_end), file_size_);
+ reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
return false;
}
@@ -828,10 +869,25 @@
}
}
- // if the segment is writable, and does not end on a page boundary,
- // zero-fill it until the page limit.
- if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
- memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
+ // if the segment is writable, and its memory map extends beyond
+ // the segment contents on file (p_filesz); zero-fill it until the
+ // end of the mapping backed by the file, rounded to the next
+ // page boundary; as this portion of the mapping corresponds to either
+ // garbage (partial page at the end) or data from other segments.
+ //
+ // If any part of the mapping extends beyond the file size there is
+ // no need to zero it since that region is not touchable by userspace
+ // and attempting to do so will causes the kernel to throw a SIGBUS.
+ //
+ // See: system/libprocinfo/include/procinfo/process_map_size.h
+ uint64_t file_backed_size = ::android::procinfo::MappedFileSize(seg_page_start,
+ page_end(seg_page_start + file_length),
+ file_offset_ + file_page_start, file_size_);
+ // _seg_file_end = unextended seg_file_end
+ uint64_t _seg_file_end = seg_start + phdr->p_filesz;
+ uint64_t zero_fill_len = file_backed_size - (_seg_file_end - seg_page_start);
+ if ((phdr->p_flags & PF_W) != 0 && zero_fill_len > 0) {
+ memset(reinterpret_cast<void*>(_seg_file_end), 0, zero_fill_len);
}
seg_file_end = page_end(seg_file_end);
@@ -864,17 +920,21 @@
* phdr_table_protect_segments and phdr_table_unprotect_segments.
*/
static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, int extra_prot_flags) {
- const ElfW(Phdr)* phdr = phdr_table;
- const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
+ ElfW(Addr) load_bias, int extra_prot_flags,
+ bool should_pad_segments) {
+ for (size_t i = 0; i < phdr_count; ++i) {
+ const ElfW(Phdr)* phdr = &phdr_table[i];
- for (; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
continue;
}
- ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
- ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+ ElfW(Addr) p_memsz = phdr->p_memsz;
+ ElfW(Addr) p_filesz = phdr->p_filesz;
+ _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
+
+ ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
+ ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
if ((prot & PROT_WRITE) != 0) {
@@ -909,19 +969,21 @@
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
+ * should_pad_segments -> Are segments extended to avoid gaps in the memory map
* prop -> GnuPropertySection or nullptr
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ const GnuPropertySection* prop __unused) {
int prot = 0;
#if defined(__aarch64__)
if ((prop != nullptr) && prop->IsBTICompatible()) {
prot |= PROT_BTI;
}
#endif
- return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
+ return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
}
/* Change the protection of all loaded segments in memory to writable.
@@ -937,19 +999,82 @@
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
+ * should_pad_segments -> Are segments extended to avoid gaps in the memory map
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
- size_t phdr_count, ElfW(Addr) load_bias) {
- return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
+ size_t phdr_count, ElfW(Addr) load_bias,
+ bool should_pad_segments) {
+ return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
+ should_pad_segments);
+}
+
+static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
+ const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
+ bool should_pad_segments) {
+ // Find the index and phdr of the LOAD containing the GNU_RELRO segment
+ for (size_t index = 0; index < phdr_count; ++index) {
+ const ElfW(Phdr)* phdr = &phdr_table[index];
+
+ if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
+ // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
+ // LOAD segment mem size, we need to protect only a partial region of the
+ // LOAD segment and therefore cannot avoid a VMA split.
+ //
+ // Note: Don't check the page-aligned mem sizes since the extended protection
+ // may incorrectly write protect non-relocation data.
+ //
+ // Example:
+ //
+ // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
+ // ----------------------------------------------------------------
+ // | | | | |
+ // SEG X | RO | RO | RW | | SEG Y
+ // | | | | |
+ // ----------------------------------------------------------------
+ // | | |
+ // | | |
+ // | | |
+ // relro_vaddr relro_vaddr relro_vaddr
+ // (load_vaddr) + +
+ // relro_memsz load_memsz
+ //
+ // ----------------------------------------------------------------
+ // | PAGE | PAGE |
+ // ----------------------------------------------------------------
+ // | Potential |
+ // |----- Extended RO ----|
+ // | Protection |
+ //
+ // If the check below uses page aligned mem sizes it will cause incorrect write
+ // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
+ if (relro_phdr->p_memsz < phdr->p_memsz) {
+ return;
+ }
+
+ ElfW(Addr) p_memsz = phdr->p_memsz;
+ ElfW(Addr) p_filesz = phdr->p_filesz;
+
+ // Attempt extending the VMA (mprotect range). Without extending the range,
+ // mprotect will only RO protect a part of the extended RW LOAD segment, which
+ // will leave an extra split RW VMA (the gap).
+ _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
+ should_pad_segments);
+
+ *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
+ return;
+ }
+ }
}
/* Used internally by phdr_table_protect_gnu_relro and
* phdr_table_unprotect_gnu_relro.
*/
static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, int prot_flags) {
+ ElfW(Addr) load_bias, int prot_flags,
+ bool should_pad_segments) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
@@ -976,6 +1101,8 @@
// that it starts on a page boundary.
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+ _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
+ should_pad_segments);
int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
seg_page_end - seg_page_start,
@@ -1000,12 +1127,14 @@
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
+ * should_pad_segments -> Were segments extended to avoid gaps in the memory map
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
-int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
- size_t phdr_count, ElfW(Addr) load_bias) {
- return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
+int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ ElfW(Addr) load_bias, bool should_pad_segments) {
+ return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
+ should_pad_segments);
}
/* Serialize the GNU relro segments to the given file descriptor. This can be
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index e5b87bb..4deed33 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -128,13 +128,14 @@
size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count);
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr);
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ const GnuPropertySection* prop = nullptr);
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias);
+ ElfW(Addr) load_bias, bool should_pad_segments);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias);
+ ElfW(Addr) load_bias, bool should_pad_segments);
int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int fd, size_t* file_offset);
diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp
index 952dade..5b58895 100644
--- a/linker/linker_relocate.cpp
+++ b/linker/linker_relocate.cpp
@@ -187,7 +187,8 @@
auto protect_segments = [&]() {
// Make .text executable.
if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias) < 0) {
+ relocator.si->load_bias,
+ relocator.si->should_pad_segments()) < 0) {
DL_ERR("can't protect segments for \"%s\": %s",
relocator.si->get_realpath(), strerror(errno));
return false;
@@ -197,7 +198,8 @@
auto unprotect_segments = [&]() {
// Make .text writable.
if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias) < 0) {
+ relocator.si->load_bias,
+ relocator.si->should_pad_segments()) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %s",
relocator.si->get_realpath(), strerror(errno));
return false;
diff --git a/tests/Android.bp b/tests/Android.bp
index 78c2c10..4e9192e 100644
--- a/tests/Android.bp
+++ b/tests/Android.bp
@@ -785,6 +785,7 @@
],
static_libs: [
"libbase",
+ "libprocinfo",
],
include_dirs: [
"bionic/libc",
diff --git a/tests/__cxa_demangle_test.cpp b/tests/__cxa_demangle_test.cpp
index d400619..e13410c 100644
--- a/tests/__cxa_demangle_test.cpp
+++ b/tests/__cxa_demangle_test.cpp
@@ -28,11 +28,39 @@
#include <cxxabi.h>
#include <gtest/gtest.h>
+#include <string.h>
TEST(__cxa_demangle, cxa_demangle_fuzz_152588929) {
#if defined(__aarch64__)
+ // Test the C++ demangler on an invalid mangled string. libc++abi currently
+ // parses it like so:
+ // (1 "\006") (I (L e "eeEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" E) E)
+ // There are a few interesting things about this mangled input:
+ // - The IA64 C++ ABI specifies that an FP literal's hex chars are lowercase.
+ // The libc++abi demangler currently accepts uppercase A-F digits, which is
+ // confusing because 'E' is supposed to mark the end of the <expr-primary>.
+ // - libc++abi uses snprintf("%a") which puts an unspecified number of bits
+ // in the digit before the decimal point.
+ // - The identifier name is "\006", and the IA64 C++ ABI spec is explicit
+ // about not specifying the encoding for characters outside of
+ // [_A-Za-z0-9].
+ // - The 'e' type is documented as "long double, __float80", and in practice
+ // the length of the literal depends on the arch. For arm64, it is a
+ // 128-bit FP type encoded using 32 hex chars. The situation with x86-64
+ // Android OTOH is messy because Clang uses 'g' for its 128-bit
+ // long double.
char* p = abi::__cxa_demangle("1\006ILeeeEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE", 0, 0, 0);
- ASSERT_STREQ("\x6<-0x1.cecececececececececececececep+11983", p);
+ if (p && !strcmp(p, "\x6<-0x1.cecececececececececececececep+11983")) {
+ // Prior to llvm.org/D77924, libc++abi left off the "L>" suffix.
+ } else if (p && !strcmp(p, "\x6<-0x1.cecececececececececececececep+11983L>")) {
+ // After llvm.org/D77924, the "L>" suffix is present. libc++abi
+ // accepts A-F digits but decodes each using (digit - 'a' + 10), turning 'E'
+ // into -18.
+ } else {
+ // TODO: Remove the other accepted outputs, because libc++abi probably
+ // should reject this input.
+ ASSERT_EQ(nullptr, p) << p;
+ }
free(p);
#endif
}
diff --git a/tests/dlext_test.cpp b/tests/dlext_test.cpp
index d078e50..b702725 100644
--- a/tests/dlext_test.cpp
+++ b/tests/dlext_test.cpp
@@ -31,6 +31,7 @@
#include <android-base/test_utils.h>
#include <sys/mman.h>
+#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
#include <sys/wait.h>
@@ -2046,6 +2047,11 @@
-1, 0));
ASSERT_TRUE(reinterpret_cast<void*>(reserved_addr) != MAP_FAILED);
+ struct stat file_stat;
+ int ret = TEMP_FAILURE_RETRY(stat(private_library_absolute_path.c_str(), &file_stat));
+ ASSERT_EQ(ret, 0) << "Failed to stat library";
+ size_t file_size = file_stat.st_size;
+
for (const auto& rec : maps_to_copy) {
uintptr_t offset = rec.addr_start - addr_start;
size_t size = rec.addr_end - rec.addr_start;
@@ -2053,7 +2059,11 @@
void* map = mmap(addr, size, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
ASSERT_TRUE(map != MAP_FAILED);
- memcpy(map, reinterpret_cast<void*>(rec.addr_start), size);
+ // Attempting the below memcpy from a portion of the map that is off the end of
+ // the backing file will cause the kernel to throw a SIGBUS
+ size_t _size = ::android::procinfo::MappedFileSize(rec.addr_start, rec.addr_end,
+ rec.offset, file_size);
+ memcpy(map, reinterpret_cast<void*>(rec.addr_start), _size);
mprotect(map, size, rec.perms);
}