Merge "Revert^2 "Convert Android.mk under bionic to Android.bp"" into main
diff --git a/benchmarks/linker_relocation/include/linker_reloc_bench_asm.h b/benchmarks/linker_relocation/include/linker_reloc_bench_asm.h
index 885e47f..ea63e36 100644
--- a/benchmarks/linker_relocation/include/linker_reloc_bench_asm.h
+++ b/benchmarks/linker_relocation/include/linker_reloc_bench_asm.h
@@ -44,9 +44,7 @@
#elif defined(__riscv)
-// No `lga` in clang unless https://reviews.llvm.org/D107278 lands.
-// `la` is equivalent when using PIC (which we do) though.
-#define GOT_RELOC(sym) la a0, sym
+#define GOT_RELOC(sym) lga a0, sym
#define CALL(sym) call sym@plt
#define DATA_WORD(val) .quad val
#define MAIN .globl main; main: li a0, 0; ret
diff --git a/benchmarks/util.h b/benchmarks/util.h
index 99eed5f..347dc35 100644
--- a/benchmarks/util.h
+++ b/benchmarks/util.h
@@ -71,7 +71,7 @@
bool LockToCPU(int cpu_to_lock);
-static __inline __attribute__ ((__always_inline__)) void MakeAllocationResident(
+static inline __attribute__((__always_inline__)) void MakeAllocationResident(
void* ptr, size_t nbytes, int pagesize) {
uint8_t* data = reinterpret_cast<uint8_t*>(ptr);
for (size_t i = 0; i < nbytes; i += pagesize) {
diff --git a/libc/bionic/execinfo.cpp b/libc/bionic/execinfo.cpp
index d129f7c..e53a037 100644
--- a/libc/bionic/execinfo.cpp
+++ b/libc/bionic/execinfo.cpp
@@ -73,6 +73,11 @@
#elif defined(__aarch64__)
// All instructions are 4 bytes long, skip back one instruction.
ip -= 4;
+#elif defined(__riscv)
+ // C instructions are the shortest at 2 bytes long. (Unlike thumb, it's
+ // non-trivial to recognize C instructions when going backwards in the
+ // instruction stream.)
+ ip -= 2;
#elif defined(__i386__) || defined(__x86_64__)
// It's difficult to decode exactly where the previous instruction is,
// so subtract 1 to estimate where the instruction lives.
diff --git a/libc/bionic/posix_timers.cpp b/libc/bionic/posix_timers.cpp
index f522516..ccbbfcf 100644
--- a/libc/bionic/posix_timers.cpp
+++ b/libc/bionic/posix_timers.cpp
@@ -34,6 +34,8 @@
#include <string.h>
#include <time.h>
+#include "private/bionic_lock.h"
+
// System calls.
extern "C" int __rt_sigprocmask(int, const sigset64_t*, sigset64_t*, size_t);
extern "C" int __rt_sigtimedwait(const sigset64_t*, siginfo_t*, const timespec*, size_t);
@@ -60,6 +62,7 @@
int sigev_notify;
// The fields below are only needed for a SIGEV_THREAD timer.
+ Lock startup_handshake_lock;
pthread_t callback_thread;
void (*callback)(sigval_t);
sigval_t callback_argument;
@@ -73,6 +76,18 @@
static void* __timer_thread_start(void* arg) {
PosixTimer* timer = reinterpret_cast<PosixTimer*>(arg);
+ // Check that our parent managed to create the kernel timer and bail if not...
+ timer->startup_handshake_lock.lock();
+ if (timer->kernel_timer_id == -1) {
+ free(timer);
+ return nullptr;
+ }
+
+ // Give ourselves a specific meaningful name now we have a kernel timer.
+ char name[16]; // 16 is the kernel-imposed limit.
+ snprintf(name, sizeof(name), "POSIX timer %d", to_kernel_timer_id(timer));
+ pthread_setname_np(timer->callback_thread, name);
+
sigset64_t sigset = {};
sigaddset64(&sigset, TIMER_SIGNAL);
@@ -109,6 +124,7 @@
return -1;
}
+ timer->kernel_timer_id = -1;
timer->sigev_notify = (evp == nullptr) ? SIGEV_SIGNAL : evp->sigev_notify;
// If not a SIGEV_THREAD timer, the kernel can handle it without our help.
@@ -149,6 +165,10 @@
sigaddset64(&sigset, TIMER_SIGNAL);
sigset64_t old_sigset;
+ // Prevent the child thread from running until the timer has been created.
+ timer->startup_handshake_lock.init(false);
+ timer->startup_handshake_lock.lock();
+
// Use __rt_sigprocmask instead of sigprocmask64 to avoid filtering out TIMER_SIGNAL.
__rt_sigprocmask(SIG_BLOCK, &sigset, &old_sigset, sizeof(sigset));
@@ -162,21 +182,21 @@
return -1;
}
+ // Try to create the kernel timer.
sigevent se = *evp;
se.sigev_signo = TIMER_SIGNAL;
se.sigev_notify = SIGEV_THREAD_ID;
se.sigev_notify_thread_id = pthread_gettid_np(timer->callback_thread);
- if (__timer_create(clock_id, &se, &timer->kernel_timer_id) == -1) {
- __timer_thread_stop(timer);
+ rc = __timer_create(clock_id, &se, &timer->kernel_timer_id);
+
+ // Let the child run (whether we created the kernel timer or not).
+ timer->startup_handshake_lock.unlock();
+ // If __timer_create(2) failed, the child will kill itself and free the
+ // timer struct, so we just need to exit.
+ if (rc == -1) {
return -1;
}
- // Give the thread a specific meaningful name.
- // It can't do this itself because the kernel timer isn't created until after it's running.
- char name[16]; // 16 is the kernel-imposed limit.
- snprintf(name, sizeof(name), "POSIX timer %d", to_kernel_timer_id(timer));
- pthread_setname_np(timer->callback_thread, name);
-
*timer_id = timer;
return 0;
}
diff --git a/libc/bionic/pthread_internal.h b/libc/bionic/pthread_internal.h
index 091f711..c2abdea 100644
--- a/libc/bionic/pthread_internal.h
+++ b/libc/bionic/pthread_internal.h
@@ -240,7 +240,7 @@
// On LP64, we could use more but there's no obvious advantage to doing
// so, and the various media processes use RLIMIT_AS as a way to limit
// the amount of allocation they'll do.
-#define PTHREAD_GUARD_SIZE max_page_size()
+#define PTHREAD_GUARD_SIZE max_android_page_size()
// SIGSTKSZ (8KiB) is not big enough.
// An snprintf to a stack buffer of size PATH_MAX consumes ~7KiB of stack.
diff --git a/libc/bionic/sys_statvfs.cpp b/libc/bionic/sys_statvfs.cpp
index b3a0aca..3a05c3f 100644
--- a/libc/bionic/sys_statvfs.cpp
+++ b/libc/bionic/sys_statvfs.cpp
@@ -17,7 +17,7 @@
#include <sys/statfs.h>
#include <sys/statvfs.h>
-static __inline void __bionic_statfs_to_statvfs(const struct statfs* src, struct statvfs* dst) {
+static inline void __bionic_statfs_to_statvfs(const struct statfs* src, struct statvfs* dst) {
dst->f_bsize = src->f_bsize;
dst->f_frsize = src->f_frsize;
dst->f_blocks = src->f_blocks;
diff --git a/libc/bionic/sys_thread_properties.cpp b/libc/bionic/sys_thread_properties.cpp
index d1a73b7..d7188f5 100644
--- a/libc/bionic/sys_thread_properties.cpp
+++ b/libc/bionic/sys_thread_properties.cpp
@@ -77,35 +77,9 @@
// Find the thread-pointer register for the given thread.
void** tp_reg = nullptr;
-#if defined(__x86_64__)
- {
- ErrnoRestorer errno_restorer;
- errno = 0;
- uintptr_t fs_base = ptrace(PTRACE_PEEKUSER, tid, offsetof(user_regs_struct, fs_base), nullptr);
- if (errno == 0) {
- tp_reg = reinterpret_cast<void**>(fs_base);
- }
- }
-#elif defined(__i386__)
- struct user_regs_struct regs;
- struct iovec pt_iov = {
- .iov_base = ®s,
- .iov_len = sizeof(regs),
- };
-
- if (ptrace(PTRACE_GETREGSET, tid, NT_PRSTATUS, &pt_iov) == 0) {
- struct user_desc u_info;
- u_info.entry_number = regs.xgs >> 3;
- if (ptrace(PTRACE_GET_THREAD_AREA, tid, u_info.entry_number, &u_info) == 0) {
- tp_reg = reinterpret_cast<void**>(u_info.base_addr);
- }
- }
-#elif defined(__aarch64__)
+#if defined(__aarch64__)
uint64_t reg;
- struct iovec pt_iov {
- .iov_base = ®, .iov_len = sizeof(reg),
- };
-
+ struct iovec pt_iov { .iov_base = ®, .iov_len = sizeof(reg) };
if (ptrace(PTRACE_GETREGSET, tid, NT_ARM_TLS, &pt_iov) == 0) {
tp_reg = reinterpret_cast<void**>(reg);
}
@@ -114,6 +88,31 @@
// Reset the tp_reg if ptrace was unsuccessful.
tp_reg = nullptr;
}
+#elif defined(__i386__)
+ struct user_regs_struct regs;
+ struct iovec pt_iov = { .iov_base = ®s, .iov_len = sizeof(regs) };
+ if (ptrace(PTRACE_GETREGSET, tid, NT_PRSTATUS, &pt_iov) == 0) {
+ struct user_desc u_info;
+ u_info.entry_number = regs.xgs >> 3;
+ if (ptrace(PTRACE_GET_THREAD_AREA, tid, u_info.entry_number, &u_info) == 0) {
+ tp_reg = reinterpret_cast<void**>(u_info.base_addr);
+ }
+ }
+#elif defined(__riscv)
+ struct user_regs_struct regs;
+ struct iovec pt_iov = { .iov_base = ®s, .iov_len = sizeof(regs) };
+ if (ptrace(PTRACE_GETREGSET, tid, NT_PRSTATUS, &pt_iov) == 0) {
+ tp_reg = reinterpret_cast<void**>(regs.tp);
+ }
+#elif defined(__x86_64__)
+ {
+ ErrnoRestorer errno_restorer;
+ errno = 0;
+ uintptr_t fs_base = ptrace(PTRACE_PEEKUSER, tid, offsetof(user_regs_struct, fs_base), nullptr);
+ if (errno == 0) {
+ tp_reg = reinterpret_cast<void**>(fs_base);
+ }
+ }
#endif
if (tp_reg == nullptr) {
diff --git a/libc/include/bits/threads_inlines.h b/libc/include/bits/threads_inlines.h
index 5878e0a..459866e 100644
--- a/libc/include/bits/threads_inlines.h
+++ b/libc/include/bits/threads_inlines.h
@@ -124,7 +124,7 @@
};
#pragma clang diagnostic pop
-static inline void* _Nonnull __bionic_thrd_trampoline(void* _Nonnull __arg) {
+static __inline void* _Nonnull __bionic_thrd_trampoline(void* _Nonnull __arg) {
struct __bionic_thrd_data __data =
*__BIONIC_CAST(static_cast, struct __bionic_thrd_data*, __arg);
free(__arg);
diff --git a/libc/include/strings.h b/libc/include/strings.h
index 2f4f764..4b8cc08 100644
--- a/libc/include/strings.h
+++ b/libc/include/strings.h
@@ -61,13 +61,13 @@
/** Deprecated. Use memmove() instead. */
#define bcopy(b1, b2, len) __bionic_bcopy((b1), (b2), (len))
-static __inline__ __always_inline void __bionic_bcopy(const void* _Nonnull b1, void* _Nonnull b2, size_t len) {
+static __inline __always_inline void __bionic_bcopy(const void* _Nonnull b1, void* _Nonnull b2, size_t len) {
__builtin_memmove(b2, b1, len);
}
/** Deprecated. Use memset() instead. */
#define bzero(b, len) __bionic_bzero((b), (len))
-static __inline__ __always_inline void __bionic_bzero(void* _Nonnull b, size_t len) {
+static __inline __always_inline void __bionic_bzero(void* _Nonnull b, size_t len) {
__builtin_memset(b, 0, len);
}
diff --git a/libc/include/sys/cdefs.h b/libc/include/sys/cdefs.h
index 7625d38..5d1718e 100644
--- a/libc/include/sys/cdefs.h
+++ b/libc/include/sys/cdefs.h
@@ -87,9 +87,12 @@
#define __STRING(x) #x
#define ___STRING(x) __STRING(x)
-#if defined(__cplusplus)
-#define __inline inline /* convert to C++ keyword */
-#endif /* !__cplusplus */
+// C++ has `inline` as a keyword, as does C99, but ANSI C (aka C89 aka C90)
+// does not. Everything accepts the `__inline__` extension though. We could
+// just use that directly in our own code, but there's historical precedent
+// for `__inline` meaning it's still used in upstream BSD code (and potentially
+// downstream in vendor or app code).
+#define __inline __inline__
#define __always_inline __attribute__((__always_inline__))
#define __attribute_const__ __attribute__((__const__))
@@ -260,7 +263,7 @@
* them available externally. FORTIFY'ed functions try to be as close to possible as 'invisible';
* having stack protectors detracts from that (b/182948263).
*/
-# define __BIONIC_FORTIFY_INLINE static __inline__ __attribute__((__no_stack_protector__)) \
+# define __BIONIC_FORTIFY_INLINE static __inline __attribute__((__no_stack_protector__)) \
__always_inline __VERSIONER_FORTIFY_INLINE
/*
* We should use __BIONIC_FORTIFY_VARIADIC instead of __BIONIC_FORTIFY_INLINE
@@ -268,9 +271,9 @@
* The __always_inline attribute is useless, misleading, and could trigger
* clang compiler bug to incorrectly inline variadic functions.
*/
-# define __BIONIC_FORTIFY_VARIADIC static __inline__
+# define __BIONIC_FORTIFY_VARIADIC static __inline
/* Error functions don't have bodies, so they can just be static. */
-# define __BIONIC_ERROR_FUNCTION_VISIBILITY static __attribute__((__unused__))
+# define __BIONIC_ERROR_FUNCTION_VISIBILITY static __unused
#else
/* Further increase sharing for some inline functions */
# define __pass_object_size_n(n)
diff --git a/libc/malloc_debug/MapData.cpp b/libc/malloc_debug/MapData.cpp
index b22c109..c58882a 100644
--- a/libc/malloc_debug/MapData.cpp
+++ b/libc/malloc_debug/MapData.cpp
@@ -34,6 +34,8 @@
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
+#include <sys/uio.h>
+#include <unistd.h>
#include <vector>
@@ -69,148 +71,132 @@
MapEntry* entry = new MapEntry(start, end, offset, name, name_len, flags);
if (!(flags & PROT_READ)) {
- // Any unreadable map will just get a zero load bias.
- entry->load_bias = 0;
- entry->init = true;
- entry->valid = false;
+ // This will make sure that an unreadable map will prevent attempts to read
+ // elf data from the map.
+ entry->SetInvalid();
}
return entry;
}
-template <typename T>
-static inline bool get_val(MapEntry* entry, uintptr_t addr, T* store) {
- if (!(entry->flags & PROT_READ) || addr < entry->start || addr + sizeof(T) > entry->end) {
- return false;
+void MapEntry::Init() {
+ if (init_) {
+ return;
}
- // Make sure the address is aligned properly.
- if (addr & (sizeof(T) - 1)) {
- return false;
- }
- *store = *reinterpret_cast<T*>(addr);
- return true;
-}
+ init_ = true;
-static bool valid_elf(MapEntry* entry) {
- uintptr_t addr = entry->start;
- uintptr_t end;
- if (__builtin_add_overflow(addr, SELFMAG, &end) || end >= entry->end) {
- return false;
+ uintptr_t end_addr;
+ if (__builtin_add_overflow(start_, SELFMAG, &end_addr) || end_addr >= end_) {
+ return;
}
- return memcmp(reinterpret_cast<void*>(addr), ELFMAG, SELFMAG) == 0;
-}
-
-static void read_loadbias(MapEntry* entry) {
- entry->load_bias = 0;
- uintptr_t addr = entry->start;
ElfW(Ehdr) ehdr;
- if (!get_val<ElfW(Half)>(entry, addr + offsetof(ElfW(Ehdr), e_phnum), &ehdr.e_phnum)) {
- return;
+ struct iovec src_io = {.iov_base = reinterpret_cast<void*>(start_), .iov_len = SELFMAG};
+ struct iovec dst_io = {.iov_base = ehdr.e_ident, .iov_len = SELFMAG};
+ ssize_t rc = process_vm_readv(getpid(), &dst_io, 1, &src_io, 1, 0);
+ valid_ = rc == SELFMAG && IS_ELF(ehdr);
+}
+
+uintptr_t MapEntry::GetLoadBias() {
+ if (!valid_) {
+ return 0;
}
- if (!get_val<ElfW(Off)>(entry, addr + offsetof(ElfW(Ehdr), e_phoff), &ehdr.e_phoff)) {
- return;
+
+ if (load_bias_read_) {
+ return load_bias_;
}
- addr += ehdr.e_phoff;
+
+ load_bias_read_ = true;
+
+ ElfW(Ehdr) ehdr;
+ struct iovec src_io = {.iov_base = reinterpret_cast<void*>(start_), .iov_len = sizeof(ehdr)};
+ struct iovec dst_io = {.iov_base = &ehdr, .iov_len = sizeof(ehdr)};
+ ssize_t rc = process_vm_readv(getpid(), &dst_io, 1, &src_io, 1, 0);
+ if (rc != sizeof(ehdr)) {
+ return 0;
+ }
+
+ uintptr_t addr = start_ + ehdr.e_phoff;
for (size_t i = 0; i < ehdr.e_phnum; i++) {
ElfW(Phdr) phdr;
- if (!get_val<ElfW(Word)>(entry, addr + offsetof(ElfW(Phdr), p_type), &phdr.p_type)) {
- return;
- }
- if (!get_val<ElfW(Word)>(entry, addr + offsetof(ElfW(Phdr), p_flags), &phdr.p_flags)) {
- return;
- }
- if (!get_val<ElfW(Off)>(entry, addr + offsetof(ElfW(Phdr), p_offset), &phdr.p_offset)) {
- return;
+
+ src_io.iov_base = reinterpret_cast<void*>(addr);
+ src_io.iov_len = sizeof(phdr);
+ dst_io.iov_base = &phdr;
+ dst_io.iov_len = sizeof(phdr);
+ rc = process_vm_readv(getpid(), &dst_io, 1, &src_io, 1, 0);
+ if (rc != sizeof(phdr)) {
+ return 0;
}
if ((phdr.p_type == PT_LOAD) && (phdr.p_flags & PF_X) ) {
- if (!get_val<ElfW(Addr)>(entry, addr + offsetof(ElfW(Phdr), p_vaddr), &phdr.p_vaddr)) {
- return;
- }
- entry->load_bias = phdr.p_vaddr - phdr.p_offset;
- return;
+ load_bias_ = phdr.p_vaddr - phdr.p_offset;
+ return load_bias_;
}
addr += sizeof(phdr);
}
+ return 0;
}
-static void inline init(MapEntry* entry) {
- if (entry->init) {
- return;
- }
- entry->init = true;
- if (valid_elf(entry)) {
- entry->valid = true;
- read_loadbias(entry);
- }
-}
-
-bool MapData::ReadMaps() {
+void MapData::ReadMaps() {
+ std::lock_guard<std::mutex> lock(m_);
FILE* fp = fopen("/proc/self/maps", "re");
if (fp == nullptr) {
- return false;
+ return;
}
+ ClearEntries();
+
std::vector<char> buffer(1024);
while (fgets(buffer.data(), buffer.size(), fp) != nullptr) {
MapEntry* entry = parse_line(buffer.data());
if (entry == nullptr) {
- fclose(fp);
- return false;
+ break;
}
-
- auto it = entries_.find(entry);
- if (it == entries_.end()) {
- entries_.insert(entry);
- } else {
- delete entry;
- }
+ entries_.insert(entry);
}
fclose(fp);
- return true;
}
-MapData::~MapData() {
+void MapData::ClearEntries() {
for (auto* entry : entries_) {
delete entry;
}
entries_.clear();
}
+MapData::~MapData() {
+ ClearEntries();
+}
+
// Find the containing map info for the PC.
const MapEntry* MapData::find(uintptr_t pc, uintptr_t* rel_pc) {
MapEntry pc_entry(pc);
std::lock_guard<std::mutex> lock(m_);
-
auto it = entries_.find(&pc_entry);
if (it == entries_.end()) {
- ReadMaps();
- }
- it = entries_.find(&pc_entry);
- if (it == entries_.end()) {
return nullptr;
}
MapEntry* entry = *it;
- init(entry);
+ entry->Init();
if (rel_pc != nullptr) {
// Need to check to see if this is a read-execute map and the read-only
// map is the previous one.
- if (!entry->valid && it != entries_.begin()) {
+ if (!entry->valid() && it != entries_.begin()) {
MapEntry* prev_entry = *--it;
- if (prev_entry->flags == PROT_READ && prev_entry->offset < entry->offset &&
- prev_entry->name == entry->name) {
- init(prev_entry);
+ if (prev_entry->flags() == PROT_READ && prev_entry->offset() < entry->offset() &&
+ prev_entry->name() == entry->name()) {
+ prev_entry->Init();
- if (prev_entry->valid) {
- entry->elf_start_offset = prev_entry->offset;
- *rel_pc = pc - entry->start + entry->offset + prev_entry->load_bias;
+ if (prev_entry->valid()) {
+ entry->set_elf_start_offset(prev_entry->offset());
+ *rel_pc = pc - entry->start() + entry->offset() + prev_entry->GetLoadBias();
return entry;
}
}
}
- *rel_pc = pc - entry->start + entry->offset + entry->load_bias;
+ *rel_pc = pc - entry->start() + entry->offset() + entry->GetLoadBias();
}
return entry;
}
diff --git a/libc/malloc_debug/MapData.h b/libc/malloc_debug/MapData.h
index f2b3c1c..13bf9cb 100644
--- a/libc/malloc_debug/MapData.h
+++ b/libc/malloc_debug/MapData.h
@@ -36,26 +36,50 @@
#include <platform/bionic/macros.h>
-struct MapEntry {
- MapEntry(uintptr_t start, uintptr_t end, uintptr_t offset, const char* name, size_t name_len, int flags)
- : start(start), end(end), offset(offset), name(name, name_len), flags(flags) {}
+class MapEntry {
+ public:
+ MapEntry() = default;
+ MapEntry(uintptr_t start, uintptr_t end, uintptr_t offset, const char* name, size_t name_len,
+ int flags)
+ : start_(start), end_(end), offset_(offset), name_(name, name_len), flags_(flags) {}
- explicit MapEntry(uintptr_t pc) : start(pc), end(pc) {}
+ explicit MapEntry(uintptr_t pc) : start_(pc), end_(pc) {}
- uintptr_t start;
- uintptr_t end;
- uintptr_t offset;
- uintptr_t load_bias;
- uintptr_t elf_start_offset = 0;
- std::string name;
- int flags;
- bool init = false;
- bool valid = false;
+ void Init();
+
+ uintptr_t GetLoadBias();
+
+ void SetInvalid() {
+ valid_ = false;
+ init_ = true;
+ load_bias_read_ = true;
+ }
+
+ bool valid() { return valid_; }
+ uintptr_t start() const { return start_; }
+ uintptr_t end() const { return end_; }
+ uintptr_t offset() const { return offset_; }
+ uintptr_t elf_start_offset() const { return elf_start_offset_; }
+ void set_elf_start_offset(uintptr_t elf_start_offset) { elf_start_offset_ = elf_start_offset; }
+ const std::string& name() const { return name_; }
+ int flags() const { return flags_; }
+
+ private:
+ uintptr_t start_;
+ uintptr_t end_;
+ uintptr_t offset_;
+ uintptr_t load_bias_ = 0;
+ uintptr_t elf_start_offset_ = 0;
+ std::string name_;
+ int flags_;
+ bool init_ = false;
+ bool valid_ = false;
+ bool load_bias_read_ = false;
};
// Ordering comparator that returns equivalence for overlapping entries
struct compare_entries {
- bool operator()(const MapEntry* a, const MapEntry* b) const { return a->end <= b->start; }
+ bool operator()(const MapEntry* a, const MapEntry* b) const { return a->end() <= b->start(); }
};
class MapData {
@@ -65,11 +89,15 @@
const MapEntry* find(uintptr_t pc, uintptr_t* rel_pc = nullptr);
- private:
- bool ReadMaps();
+ size_t NumMaps() { return entries_.size(); }
+ void ReadMaps();
+
+ private:
std::mutex m_;
std::set<MapEntry*, compare_entries> entries_;
+ void ClearEntries();
+
BIONIC_DISALLOW_COPY_AND_ASSIGN(MapData);
};
diff --git a/libc/malloc_debug/backtrace.cpp b/libc/malloc_debug/backtrace.cpp
index ecb3a80..6a32fca 100644
--- a/libc/malloc_debug/backtrace.cpp
+++ b/libc/malloc_debug/backtrace.cpp
@@ -50,7 +50,7 @@
typedef struct _Unwind_Context __unwind_context;
static MapData g_map_data;
-static const MapEntry* g_current_code_map = nullptr;
+static MapEntry g_current_code_map;
static _Unwind_Reason_Code find_current_map(__unwind_context* context, void*) {
uintptr_t ip = _Unwind_GetIP(context);
@@ -58,11 +58,15 @@
if (ip == 0) {
return _URC_END_OF_STACK;
}
- g_current_code_map = g_map_data.find(ip);
+ auto map = g_map_data.find(ip);
+ if (map != nullptr) {
+ g_current_code_map = *map;
+ }
return _URC_END_OF_STACK;
}
void backtrace_startup() {
+ g_map_data.ReadMaps();
_Unwind_Backtrace(find_current_map, nullptr);
}
@@ -98,7 +102,8 @@
}
// Do not record the frames that fall in our own shared library.
- if (g_current_code_map && (ip >= g_current_code_map->start) && ip < g_current_code_map->end) {
+ if (g_current_code_map.start() != 0 && (ip >= g_current_code_map.start()) &&
+ ip < g_current_code_map.end()) {
return _URC_NO_REASON;
}
@@ -113,6 +118,10 @@
}
std::string backtrace_string(const uintptr_t* frames, size_t frame_count) {
+ if (g_map_data.NumMaps() == 0) {
+ g_map_data.ReadMaps();
+ }
+
std::string str;
for (size_t frame_num = 0; frame_num < frame_count; frame_num++) {
@@ -130,14 +139,15 @@
uintptr_t rel_pc = offset;
const MapEntry* entry = g_map_data.find(frames[frame_num], &rel_pc);
- const char* soname = (entry != nullptr) ? entry->name.c_str() : info.dli_fname;
+ const char* soname = (entry != nullptr) ? entry->name().c_str() : info.dli_fname;
if (soname == nullptr) {
soname = "<unknown>";
}
char offset_buf[128];
- if (entry != nullptr && entry->elf_start_offset != 0) {
- snprintf(offset_buf, sizeof(offset_buf), " (offset 0x%" PRIxPTR ")", entry->elf_start_offset);
+ if (entry != nullptr && entry->elf_start_offset() != 0) {
+ snprintf(offset_buf, sizeof(offset_buf), " (offset 0x%" PRIxPTR ")",
+ entry->elf_start_offset());
} else {
offset_buf[0] = '\0';
}
@@ -167,5 +177,6 @@
}
void backtrace_log(const uintptr_t* frames, size_t frame_count) {
+ g_map_data.ReadMaps();
error_log_string(backtrace_string(frames, frame_count).c_str());
}
diff --git a/libc/platform/bionic/page.h b/libc/platform/bionic/page.h
index 65faba4..4dbe4ba 100644
--- a/libc/platform/bionic/page.h
+++ b/libc/platform/bionic/page.h
@@ -32,11 +32,13 @@
#endif
}
-constexpr size_t max_page_size() {
+// The maximum page size supported on any Android device. As
+// of API level 35, this is limited by ART.
+constexpr size_t max_android_page_size() {
#if defined(PAGE_SIZE)
return PAGE_SIZE;
#else
- return 65536;
+ return 16384;
#endif
}
diff --git a/libc/private/WriteProtected.h b/libc/private/WriteProtected.h
index bbe35e5..f269125 100644
--- a/libc/private/WriteProtected.h
+++ b/libc/private/WriteProtected.h
@@ -30,11 +30,11 @@
template <typename T>
union WriteProtectedContents {
T value;
- char padding[max_page_size()];
+ char padding[max_android_page_size()];
WriteProtectedContents() = default;
BIONIC_DISALLOW_COPY_AND_ASSIGN(WriteProtectedContents);
-} __attribute__((aligned(max_page_size())));
+} __attribute__((aligned(max_android_page_size())));
// Write protected wrapper class that aligns its contents to a page boundary,
// and sets the memory protection to be non-writable, except when being modified
@@ -42,8 +42,8 @@
template <typename T>
class WriteProtected {
public:
- static_assert(sizeof(T) < max_page_size(),
- "WriteProtected only supports contents up to max_page_size()");
+ static_assert(sizeof(T) < max_android_page_size(),
+ "WriteProtected only supports contents up to max_android_page_size()");
WriteProtected() = default;
BIONIC_DISALLOW_COPY_AND_ASSIGN(WriteProtected);
@@ -89,7 +89,7 @@
// ourselves.
addr = untag_address(addr);
#endif
- if (mprotect(reinterpret_cast<void*>(addr), max_page_size(), prot) == -1) {
+ if (mprotect(reinterpret_cast<void*>(addr), max_android_page_size(), prot) == -1) {
async_safe_fatal("WriteProtected mprotect %x failed: %s", prot, strerror(errno));
}
}
diff --git a/libdl/libdl_cfi.cpp b/libdl/libdl_cfi.cpp
index 23cd7f5..8adc342 100644
--- a/libdl/libdl_cfi.cpp
+++ b/libdl/libdl_cfi.cpp
@@ -26,15 +26,15 @@
// dlopen/dlclose.
static struct {
uintptr_t v;
- char padding[max_page_size() - sizeof(v)];
-} shadow_base_storage alignas(max_page_size());
+ char padding[max_android_page_size() - sizeof(v)];
+} shadow_base_storage alignas(max_android_page_size());
// __cfi_init is called by the loader as soon as the shadow is mapped. This may happen very early
// during startup, before libdl.so global constructors, and, on i386, even before __libc_sysinfo is
// initialized. This function should not do any system calls.
extern "C" uintptr_t* __cfi_init(uintptr_t shadow_base) {
shadow_base_storage.v = shadow_base;
- static_assert(sizeof(shadow_base_storage) == max_page_size(), "");
+ static_assert(sizeof(shadow_base_storage) == max_android_page_size(), "");
return &shadow_base_storage.v;
}
diff --git a/linker/Android.bp b/linker/Android.bp
index e1a5a91..1ede380 100644
--- a/linker/Android.bp
+++ b/linker/Android.bp
@@ -367,7 +367,9 @@
"liblinker_main",
"liblinker_malloc",
- "libc++_static",
+ // Use a version of libc++ built without exceptions, because accessing EH globals uses
+ // ELF TLS, which is not supported in the loader.
+ "libc++_static_noexcept",
"libc_nomalloc",
"libc_dynamic_dispatch",
"libm",
@@ -434,7 +436,7 @@
"linker_debuggerd_android.cpp",
],
static_libs: [
- "libc++demangle",
+ "libc++demangle_noexcept",
"libdebuggerd_handler_fallback",
],
},
diff --git a/tests/time_test.cpp b/tests/time_test.cpp
index ca8e260..baafbf6 100644
--- a/tests/time_test.cpp
+++ b/tests/time_test.cpp
@@ -31,6 +31,8 @@
#include <thread>
#include "SignalUtils.h"
+#include "android-base/file.h"
+#include "android-base/strings.h"
#include "utils.h"
using namespace std::chrono_literals;
@@ -797,21 +799,41 @@
ASSERT_EQ(1, timer_create_NULL_signal_handler_invocation_count);
}
-TEST(time, timer_create_EINVAL) {
- clockid_t invalid_clock = 16;
+static int GetThreadCount() {
+ std::string status;
+ if (android::base::ReadFileToString("/proc/self/status", &status)) {
+ for (const auto& line : android::base::Split(status, "\n")) {
+ int thread_count;
+ if (sscanf(line.c_str(), "Threads: %d", &thread_count) == 1) {
+ return thread_count;
+ }
+ }
+ }
+ return -1;
+}
- // A SIGEV_SIGNAL timer is easy; the kernel does all that.
+TEST(time, timer_create_EINVAL) {
+ const clockid_t kInvalidClock = 16;
+
+ // A SIGEV_SIGNAL timer failure is easy; that's the kernel's problem.
timer_t timer_id;
- ASSERT_EQ(-1, timer_create(invalid_clock, nullptr, &timer_id));
+ ASSERT_EQ(-1, timer_create(kInvalidClock, nullptr, &timer_id));
ASSERT_ERRNO(EINVAL);
- // A SIGEV_THREAD timer is more interesting because we have stuff to clean up.
- sigevent se;
- memset(&se, 0, sizeof(se));
+ // A SIGEV_THREAD timer failure is more interesting because we have a thread
+ // to clean up (https://issuetracker.google.com/340125671).
+ sigevent se = {};
se.sigev_notify = SIGEV_THREAD;
se.sigev_notify_function = NoOpNotifyFunction;
- ASSERT_EQ(-1, timer_create(invalid_clock, &se, &timer_id));
+ ASSERT_EQ(-1, timer_create(kInvalidClock, &se, &timer_id));
ASSERT_ERRNO(EINVAL);
+
+ // timer_create() doesn't guarantee that the thread will be dead _before_
+ // it returns because that would require extra synchronization that's
+ // unnecessary in the normal (successful) case. A timeout here means we
+ // leaked a thread.
+ while (GetThreadCount() > 1) {
+ }
}
TEST(time, timer_create_multiple) {
diff --git a/tests/utils.h b/tests/utils.h
index dcb08f5..3c83b73 100644
--- a/tests/utils.h
+++ b/tests/utils.h
@@ -38,6 +38,7 @@
#endif
#include <atomic>
+#include <iomanip>
#include <string>
#include <regex>
@@ -253,7 +254,7 @@
AssertChildExited(pid, expected_exit_status, &error_msg);
if (expected_output_regex != nullptr) {
if (!std::regex_search(output_, std::regex(expected_output_regex))) {
- FAIL() << "regex " << expected_output_regex << " didn't match " << output_;
+ FAIL() << "regex " << std::quoted(expected_output_regex) << " didn't match " << std::quoted(output_);
}
}
}