Merge changes I3b4e4293,Ice110c6e into main
* changes:
linker: LoadSegments: Load 4KiB ELFs on 16KiB page-sized systems
linker: LoadSegments: Preparatory work for 16KiB App Compat
diff --git a/linker/Android.bp b/linker/Android.bp
index 563cf3d..a06ca29 100644
--- a/linker/Android.bp
+++ b/linker/Android.bp
@@ -184,6 +184,7 @@
"linker_mapped_file_fragment.cpp",
"linker_note_gnu_property.cpp",
"linker_phdr.cpp",
+ "linker_phdr_16kib_compat.cpp",
"linker_relocate.cpp",
"linker_sdk_versions.cpp",
"linker_soinfo.cpp",
diff --git a/linker/linker.cpp b/linker/linker.cpp
index bcc2500..88d02dc 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -640,6 +640,11 @@
si_->set_gap_start(elf_reader.gap_start());
si_->set_gap_size(elf_reader.gap_size());
si_->set_should_pad_segments(elf_reader.should_pad_segments());
+ si_->set_should_use_16kib_app_compat(elf_reader.should_use_16kib_app_compat());
+ if (si_->should_use_16kib_app_compat()) {
+ si_->set_compat_relro_start(elf_reader.compat_relro_start());
+ si_->set_compat_relro_size(elf_reader.compat_relro_size());
+ }
return true;
}
@@ -3361,7 +3366,8 @@
"\"%s\" has text relocations",
get_realpath());
add_dlwarning(get_realpath(), "text relocations");
- if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+ if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_,
+ should_use_16kib_app_compat_) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %m", get_realpath());
return false;
}
@@ -3377,7 +3383,8 @@
#if !defined(__LP64__)
if (has_text_relocations) {
// All relocations are done, we can protect our segments back to read-only.
- if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+ if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_,
+ should_use_16kib_app_compat_) < 0) {
DL_ERR("can't protect segments for \"%s\": %m", get_realpath());
return false;
}
@@ -3412,9 +3419,18 @@
}
bool soinfo::protect_relro() {
- if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
- DL_ERR("can't enable GNU RELRO protection for \"%s\": %m", get_realpath());
- return false;
+ if (should_use_16kib_app_compat_) {
+ if (phdr_table_protect_gnu_relro_16kib_compat(compat_relro_start_, compat_relro_size_) < 0) {
+ DL_ERR("can't enable COMPAT GNU RELRO protection for \"%s\": %s", get_realpath(),
+ strerror(errno));
+ return false;
+ }
+ } else {
+ if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_,
+ should_use_16kib_app_compat_) < 0) {
+ DL_ERR("can't enable GNU RELRO protection for \"%s\": %m", get_realpath());
+ return false;
+ }
}
return true;
}
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index 74cd517..48ed723 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -370,8 +370,9 @@
// Apply appropriate protections here if it is needed.
auto note_gnu_property = GnuPropertySection(somain);
if (note_gnu_property.IsBTICompatible() &&
- (phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
- somain->should_pad_segments(), ¬e_gnu_property) < 0)) {
+ (phdr_table_protect_segments(
+ somain->phdr, somain->phnum, somain->load_bias, somain->should_pad_segments(),
+ somain->should_use_16kib_app_compat(), ¬e_gnu_property) < 0)) {
__linker_error("error: can't protect segments for \"%s\": %m", exe_info.path.c_str());
}
}
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index b7db4cd..7691031 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -47,6 +47,7 @@
#include "private/elf_note.h"
#include <android-base/file.h>
+#include <android-base/properties.h>
static int GetTargetElfMachine() {
#if defined(__arm__)
@@ -139,11 +140,6 @@
**/
-#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
-#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
- MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
- MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
-
static const size_t kPageSize = page_size();
/*
@@ -182,6 +178,14 @@
did_read_ = true;
}
+ if (kPageSize == 0x4000 && phdr_table_get_minimum_alignment(phdr_table_, phdr_num_) == 0x1000) {
+ // This prop needs to be read on 16KiB devices for each ELF where min_palign is 4KiB.
+ // It cannot be cached since the developer may toggle app compat on/off.
+ // This check will be removed once app compat is made the default on 16KiB devices.
+ should_use_16kib_app_compat_ =
+ ::android::base::GetBoolProperty("bionic.linker.16kb.app_compat.enabled", false);
+ }
+
return did_read_;
}
@@ -197,8 +201,9 @@
#if defined(__aarch64__)
// For Armv8.5-A loaded executable segments may require PROT_BTI.
if (note_gnu_property_.IsBTICompatible()) {
- did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
- should_pad_segments_, ¬e_gnu_property_) == 0);
+ did_load_ =
+ (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_, should_pad_segments_,
+ should_use_16kib_app_compat_, ¬e_gnu_property_) == 0);
}
#endif
}
@@ -690,6 +695,13 @@
return false;
}
+ if (should_use_16kib_app_compat_) {
+ // Reserve additional space for aligning the permission boundary in compat loading
+ // Up to kPageSize-kCompatPageSize additional space is needed, but reservation
+ // is done with mmap which gives kPageSize multiple-sized reservations.
+ load_size_ += kPageSize;
+ }
+
uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
void* start;
@@ -725,6 +737,13 @@
load_start_ = start;
load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
+
+ if (should_use_16kib_app_compat_) {
+ // In compat mode make the initial mapping RW since the ELF contents will be read
+ // into it; instead of mapped over it.
+ mprotect(reinterpret_cast<void*>(start), load_size_, PROT_READ | PROT_WRITE);
+ }
+
return true;
}
@@ -808,8 +827,15 @@
}
static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- size_t phdr_idx, ElfW(Addr)* p_memsz,
- ElfW(Addr)* p_filesz, bool should_pad_segments) {
+ size_t phdr_idx, ElfW(Addr)* p_memsz,
+ ElfW(Addr)* p_filesz, bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
+ // NOTE: Segment extension is only applicable where the ELF's max-page-size > runtime page size;
+ // to save kernel VMA slab memory. 16KiB compat mode is the exact opposite scenario.
+ if (should_use_16kib_app_compat) {
+ return;
+ }
+
const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
const ElfW(Phdr)* next = nullptr;
size_t next_idx = phdr_idx + 1;
@@ -879,6 +905,13 @@
}
void ElfReader::ZeroFillSegment(const ElfW(Phdr)* phdr) {
+ // NOTE: In 16KiB app compat mode, the ELF mapping is anonymous, meaning that
+ // RW segments are COW-ed from the kernel's zero page. So there is no need to
+ // explicitly zero-fill until the last page's limit.
+ if (should_use_16kib_app_compat_) {
+ return;
+ }
+
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
@@ -898,6 +931,12 @@
}
void ElfReader::DropPaddingPages(const ElfW(Phdr)* phdr, uint64_t seg_file_end) {
+ // NOTE: Padding pages are only applicable where the ELF's max-page-size > runtime page size;
+ // 16KiB compat mode is the exact opposite scenario.
+ if (should_use_16kib_app_compat_) {
+ return;
+ }
+
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
@@ -926,6 +965,12 @@
bool ElfReader::MapBssSection(const ElfW(Phdr)* phdr, ElfW(Addr) seg_page_end,
ElfW(Addr) seg_file_end) {
+ // NOTE: We do not need to handle .bss in 16KiB compat mode since the mapping
+ // reservation is anonymous and RW to begin with.
+ if (should_use_16kib_app_compat_) {
+ return true;
+ }
+
// seg_file_end is now the first page address after the file content.
seg_file_end = page_end(seg_file_end);
@@ -952,15 +997,27 @@
}
bool ElfReader::LoadSegments() {
+ // NOTE: The compat(legacy) page size (4096) must be used when aligning
+ // the 4KiB segments for loading in compat mode. The larger 16KiB page size
+ // will lead to overwriting adjacent segments since the ELF's segment(s)
+ // are not 16KiB aligned.
+ size_t seg_align = should_use_16kib_app_compat_ ? kCompatPageSize : kPageSize;
+
size_t min_palign = phdr_table_get_minimum_alignment(phdr_table_, phdr_num_);
- // Only enforce this on 16 KB systems. Apps may rely on undefined behavior
- // here on 4 KB systems, which is the norm before this change is introduced.
- if (kPageSize >= 16384 && min_palign < kPageSize) {
+ // Only enforce this on 16 KB systems with app compat disabled.
+ // Apps may rely on undefined behavior here on 4 KB systems,
+ // which is the norm before this change is introduced
+ if (kPageSize >= 16384 && min_palign < kPageSize && !should_use_16kib_app_compat_) {
DL_ERR("\"%s\" program alignment (%zu) cannot be smaller than system page size (%zu)",
name_.c_str(), min_palign, kPageSize);
return false;
}
+ if (!Setup16KiBAppCompat()) {
+ DL_ERR("\"%s\" failed to setup 16KiB App Compat", name_.c_str());
+ return false;
+ }
+
for (size_t i = 0; i < phdr_num_; ++i) {
const ElfW(Phdr)* phdr = &phdr_table_[i];
@@ -970,13 +1027,14 @@
ElfW(Addr) p_memsz = phdr->p_memsz;
ElfW(Addr) p_filesz = phdr->p_filesz;
- _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
+ _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_,
+ should_use_16kib_app_compat_);
// Segment addresses in memory.
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
ElfW(Addr) seg_end = seg_start + p_memsz;
- ElfW(Addr) seg_page_end = page_end(seg_end);
+ ElfW(Addr) seg_page_end = align_up(seg_end, seg_align);
ElfW(Addr) seg_file_end = seg_start + p_filesz;
@@ -984,7 +1042,7 @@
ElfW(Addr) file_start = phdr->p_offset;
ElfW(Addr) file_end = file_start + p_filesz;
- ElfW(Addr) file_page_start = page_start(file_start);
+ ElfW(Addr) file_page_start = align_down(file_start, seg_align);
ElfW(Addr) file_length = file_end - file_page_start;
if (file_size_ <= 0) {
@@ -1017,8 +1075,14 @@
}
// Pass the file_length, since it may have been extended by _extend_load_segment_vma().
- if (!MapSegment(i, file_length)) {
- return false;
+ if (should_use_16kib_app_compat_) {
+ if (!CompatMapSegment(i, file_length)) {
+ return false;
+ }
+ } else {
+ if (!MapSegment(i, file_length)) {
+ return false;
+ }
}
}
@@ -1039,7 +1103,7 @@
*/
static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int extra_prot_flags,
- bool should_pad_segments) {
+ bool should_pad_segments, bool should_use_16kib_app_compat) {
for (size_t i = 0; i < phdr_count; ++i) {
const ElfW(Phdr)* phdr = &phdr_table[i];
@@ -1049,7 +1113,8 @@
ElfW(Addr) p_memsz = phdr->p_memsz;
ElfW(Addr) p_filesz = phdr->p_filesz;
- _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
+ _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments,
+ should_use_16kib_app_compat);
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
@@ -1088,12 +1153,14 @@
* phdr_count -> number of entries in tables
* load_bias -> load bias
* should_pad_segments -> Are segments extended to avoid gaps in the memory map
+ * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
* prop -> GnuPropertySection or nullptr
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat,
const GnuPropertySection* prop __unused) {
int prot = 0;
#if defined(__aarch64__)
@@ -1101,7 +1168,8 @@
prot |= PROT_BTI;
}
#endif
- return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
+ return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments,
+ should_use_16kib_app_compat);
}
/* Change the protection of all loaded segments in memory to writable.
@@ -1118,20 +1186,22 @@
* phdr_count -> number of entries in tables
* load_bias -> load bias
* should_pad_segments -> Are segments extended to avoid gaps in the memory map
+ * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
-int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
- size_t phdr_count, ElfW(Addr) load_bias,
- bool should_pad_segments) {
+int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
- should_pad_segments);
+ should_pad_segments, should_use_16kib_app_compat);
}
static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
- bool should_pad_segments) {
+ bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
// Find the index and phdr of the LOAD containing the GNU_RELRO segment
for (size_t index = 0; index < phdr_count; ++index) {
const ElfW(Phdr)* phdr = &phdr_table[index];
@@ -1179,7 +1249,7 @@
// mprotect will only RO protect a part of the extended RW LOAD segment, which
// will leave an extra split RW VMA (the gap).
_extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
- should_pad_segments);
+ should_pad_segments, should_use_16kib_app_compat);
*seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
return;
@@ -1192,7 +1262,8 @@
*/
static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int prot_flags,
- bool should_pad_segments) {
+ bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
@@ -1220,7 +1291,7 @@
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
_extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
- should_pad_segments);
+ should_pad_segments, should_use_16kib_app_compat);
int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
seg_page_end - seg_page_start,
@@ -1246,13 +1317,29 @@
* phdr_count -> number of entries in tables
* load_bias -> load bias
* should_pad_segments -> Were segments extended to avoid gaps in the memory map
+ * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, bool should_pad_segments) {
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
- should_pad_segments);
+ should_pad_segments, should_use_16kib_app_compat);
+}
+
+/*
+ * Apply RX protection to the compat relro region of the ELF being loaded in
+ * 16KiB compat mode.
+ *
+ * Input:
+ * start -> start address of the compat relro region.
+ * size -> size of the compat relro region in bytes.
+ * Return:
+ * 0 on success, -1 on failure (error code in errno).
+ */
+int phdr_table_protect_gnu_relro_16kib_compat(ElfW(Addr) start, ElfW(Addr) size) {
+ return mprotect(reinterpret_cast<void*>(start), size, PROT_READ | PROT_EXEC);
}
/* Serialize the GNU relro segments to the given file descriptor. This can be
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index 1d6bbe3..2f159f3 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -39,6 +39,13 @@
#include "linker_mapped_file_fragment.h"
#include "linker_note_gnu_property.h"
+#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
+#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
+ MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
+ MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
+
+static constexpr size_t kCompatPageSize = 0x1000;
+
class ElfReader {
public:
ElfReader();
@@ -59,6 +66,9 @@
bool is_mapped_by_caller() const { return mapped_by_caller_; }
ElfW(Addr) entry_point() const { return header_.e_entry + load_bias_; }
bool should_pad_segments() const { return should_pad_segments_; }
+ bool should_use_16kib_app_compat() const { return should_use_16kib_app_compat_; }
+ ElfW(Addr) compat_relro_start() const { return compat_relro_start_; }
+ ElfW(Addr) compat_relro_size() const { return compat_relro_size_; }
private:
[[nodiscard]] bool ReadElfHeader();
@@ -69,10 +79,14 @@
[[nodiscard]] bool ReadPadSegmentNote();
[[nodiscard]] bool ReserveAddressSpace(address_space_params* address_space);
[[nodiscard]] bool MapSegment(size_t seg_idx, size_t len);
+ [[nodiscard]] bool CompatMapSegment(size_t seg_idx, size_t len);
void ZeroFillSegment(const ElfW(Phdr)* phdr);
void DropPaddingPages(const ElfW(Phdr)* phdr, uint64_t seg_file_end);
[[nodiscard]] bool MapBssSection(const ElfW(Phdr)* phdr, ElfW(Addr) seg_page_end,
ElfW(Addr) seg_file_end);
+ [[nodiscard]] bool IsEligibleFor16KiBAppCompat(ElfW(Addr)* vaddr);
+ [[nodiscard]] bool HasAtMostOneRelroSegment(const ElfW(Phdr)** relro_phdr);
+ [[nodiscard]] bool Setup16KiBAppCompat();
[[nodiscard]] bool LoadSegments();
[[nodiscard]] bool FindPhdr();
[[nodiscard]] bool FindGnuPropertySection();
@@ -123,6 +137,13 @@
// Pad gaps between segments when memory mapping?
bool should_pad_segments_ = false;
+ // Use app compat mode when loading 4KiB max-page-size ELFs on 16KiB page-size devices?
+ bool should_use_16kib_app_compat_ = false;
+
+ // RELRO region for 16KiB compat loading
+ ElfW(Addr) compat_relro_start_ = 0;
+ ElfW(Addr) compat_relro_size_ = 0;
+
// Only used by AArch64 at the moment.
GnuPropertySection note_gnu_property_ __unused;
};
@@ -135,13 +156,18 @@
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat,
const GnuPropertySection* prop = nullptr);
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, bool should_pad_segments);
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, bool should_pad_segments);
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat);
+
+int phdr_table_protect_gnu_relro_16kib_compat(ElfW(Addr) start, ElfW(Addr) size);
int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int fd, size_t* file_offset);
diff --git a/linker/linker_phdr_16kib_compat.cpp b/linker/linker_phdr_16kib_compat.cpp
new file mode 100644
index 0000000..e084635
--- /dev/null
+++ b/linker/linker_phdr_16kib_compat.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "linker_phdr.h"
+
+#include <linux/prctl.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+
+#include "linker_debug.h"
+#include "linker_dlwarning.h"
+#include "linker_globals.h"
+
+#include "platform/bionic/macros.h"
+#include "platform/bionic/page.h"
+
+#include <string>
+
+static inline bool segment_contains_prefix(const ElfW(Phdr)* segment, const ElfW(Phdr)* prefix) {
+ return segment && prefix && segment->p_vaddr == prefix->p_vaddr;
+}
+
+/*
+ * Returns true if the ELF contains at most 1 RELRO segment; and populates @relro_phdr
+ * with the relro phdr or nullptr if none.
+ *
+ * Returns false if more than 1 RELRO segments are found.
+ */
+bool ElfReader::HasAtMostOneRelroSegment(const ElfW(Phdr)** relro_phdr) {
+ const ElfW(Phdr)* relro = nullptr;
+ for (size_t i = 0; i < phdr_num_; ++i) {
+ const ElfW(Phdr)* phdr = &phdr_table_[i];
+
+ if (phdr->p_type != PT_GNU_RELRO) {
+ continue;
+ }
+
+ if (relro == nullptr) {
+ relro = phdr;
+ } else {
+ return false;
+ }
+ }
+
+ *relro_phdr = relro;
+
+ return true;
+}
+
+/*
+ * In 16KiB compatibility mode ELFs with the following segment layout
+ * can be loaded successfully:
+ *
+ * ┌────────────┬─────────────────────────┬────────────┐
+ * │ │ │ │
+ * │ (RO|RX)* │ (RW - RELRO prefix)? │ (RW)* │
+ * │ │ │ │
+ * └────────────┴─────────────────────────┴────────────┘
+ *
+ * In other words, compatible layouts have:
+ * - zero or more RO or RX segments;
+ * - followed by zero or one RELRO prefix;
+ * - followed by zero or more RW segments (this can include the RW
+ * suffix from the segment containing the RELRO prefix, if any)
+ *
+ * In 16KiB compat mode, after relocation, the ELF is layout in virtual
+ * memory is as shown below:
+ * ┌──────────────────────────────────────┬────────────┐
+ * │ │ │
+ * │ (RX)? │ (RW)? │
+ * │ │ │
+ * └──────────────────────────────────────┴────────────┘
+ *
+ * In compat mode:
+ * - the RO and RX segments along with the RELRO prefix are protected
+ * as RX;
+ * - and the RW segments along with RW suffix from the relro segment,
+ * if any; are RW protected.
+ *
+ * This allows for the single RX|RW permission boundary to be aligned with
+ * a 16KiB page boundary; since a single page cannot share multiple
+ * permissions.
+ *
+ * IsEligibleFor16KiBAppCompat() identifies compatible ELFs and populates @vaddr
+ * with the boundary between RX|RW portions.
+ *
+ * Returns true if the ELF can be loaded in compat mode, else false.
+ */
+bool ElfReader::IsEligibleFor16KiBAppCompat(ElfW(Addr)* vaddr) {
+ const ElfW(Phdr)* relro_phdr = nullptr;
+ if (!HasAtMostOneRelroSegment(&relro_phdr)) {
+ DL_WARN("\"%s\": Compat loading failed: Multiple RELRO segments found", name_.c_str());
+ return false;
+ }
+
+ const ElfW(Phdr)* last_rw = nullptr;
+ const ElfW(Phdr)* first_rw = nullptr;
+
+ for (size_t i = 0; i < phdr_num_; ++i) {
+ const ElfW(Phdr)* curr = &phdr_table_[i];
+ const ElfW(Phdr)* prev = (i > 0) ? &phdr_table_[i - 1] : nullptr;
+
+ if (curr->p_type != PT_LOAD) {
+ continue;
+ }
+
+ int prot = PFLAGS_TO_PROT(curr->p_flags);
+
+ if ((prot & PROT_WRITE) && (prot & PROT_READ)) {
+ if (!first_rw) {
+ first_rw = curr;
+ }
+
+ if (last_rw && last_rw != prev) {
+ DL_WARN("\"%s\": Compat loading failed: ELF contains multiple non-adjacent RW segments",
+ name_.c_str());
+ return false;
+ }
+
+ last_rw = curr;
+ }
+ }
+
+ if (!relro_phdr) {
+ *vaddr = align_down(first_rw->p_vaddr, kCompatPageSize);
+ return true;
+ }
+
+ // The RELRO segment is present, it must be the prefix of the first RW segment.
+ if (!segment_contains_prefix(first_rw, relro_phdr)) {
+ DL_WARN("\"%s\": Compat loading failed: RELRO is not in the first RW segment",
+ name_.c_str());
+ return false;
+ }
+
+ uint64_t end;
+ if (__builtin_add_overflow(relro_phdr->p_vaddr, relro_phdr->p_memsz, &end)) {
+ DL_WARN("\"%s\": Compat loading failed: relro vaddr + memsz overflowed", name_.c_str());
+ return false;
+ }
+
+ *vaddr = align_up(end, kCompatPageSize);
+ return true;
+}
+
+/*
+ * Returns the offset/shift needed to align @vaddr to a page boundary.
+ */
+static inline ElfW(Addr) perm_boundary_offset(const ElfW(Addr) addr) {
+ ElfW(Addr) offset = page_offset(addr);
+
+ return offset ? page_size() - offset : 0;
+}
+
+bool ElfReader::Setup16KiBAppCompat() {
+ if (!should_use_16kib_app_compat_) {
+ return true;
+ }
+
+ ElfW(Addr) rx_rw_boundary; // Permission bounadry for compat mode
+ if (!IsEligibleFor16KiBAppCompat(&rx_rw_boundary)) {
+ return false;
+ }
+
+ // Adjust the load_bias to position the RX|RW boundary on a page boundary
+ load_bias_ += perm_boundary_offset(rx_rw_boundary);
+
+ // RW region (.data, .bss ...)
+ ElfW(Addr) rw_start = load_bias_ + rx_rw_boundary;
+ ElfW(Addr) rw_size = load_size_ - (rw_start - reinterpret_cast<ElfW(Addr)>(load_start_));
+
+ CHECK(rw_start % getpagesize() == 0);
+ CHECK(rw_size % getpagesize() == 0);
+
+ // Compat RELRO (RX) region (.text, .data.relro, ...)
+ compat_relro_start_ = reinterpret_cast<ElfW(Addr)>(load_start_);
+ compat_relro_size_ = load_size_ - rw_size;
+
+ // Label the ELF VMA, since compat mode uses anonymous mappings.
+ std::string compat_name = name_ + " (compat loaded)";
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, load_start_, load_size_, compat_name.c_str());
+
+ return true;
+}
+
+bool ElfReader::CompatMapSegment(size_t seg_idx, size_t len) {
+ const ElfW(Phdr)* phdr = &phdr_table_[seg_idx];
+
+ // NOTE: The compat(legacy) page size (4096) must be used when aligning
+ // the 4KiB segments for loading (reading). The larger 16KiB page size
+ // will lead to overwriting adjacent segments since the ELF's segment(s)
+ // are not 16KiB aligned.
+
+ void* start = reinterpret_cast<void*>(align_down(phdr->p_vaddr + load_bias_, kCompatPageSize));
+
+ // The ELF could be being loaded directly from a zipped APK,
+ // the zip offset must be added to find the segment offset.
+ const ElfW(Addr) offset = file_offset_ + align_down(phdr->p_offset, kCompatPageSize);
+
+ int prot = PFLAGS_TO_PROT(phdr->p_flags);
+
+ CHECK(should_use_16kib_app_compat_);
+
+ // Since the 4KiB max-page-size ELF is not properly aligned, loading it by
+ // directly mmapping the ELF file is not feasible.
+ // Instead, read the ELF contents into the anonymous RW mapping.
+ if (TEMP_FAILURE_RETRY(pread64(fd_, start, len, offset)) == -1) {
+ DL_ERR("Compat loading: \"%s\" failed to read LOAD segment %zu: %m", name_.c_str(), seg_idx);
+ return false;
+ }
+
+ return true;
+}
diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp
index bcb1efc..0470f87 100644
--- a/linker/linker_relocate.cpp
+++ b/linker/linker_relocate.cpp
@@ -188,8 +188,8 @@
auto protect_segments = [&]() {
// Make .text executable.
if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias,
- relocator.si->should_pad_segments()) < 0) {
+ relocator.si->load_bias, relocator.si->should_pad_segments(),
+ relocator.si->should_use_16kib_app_compat()) < 0) {
DL_ERR("can't protect segments for \"%s\": %m", relocator.si->get_realpath());
return false;
}
@@ -198,8 +198,8 @@
auto unprotect_segments = [&]() {
// Make .text writable.
if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias,
- relocator.si->should_pad_segments()) < 0) {
+ relocator.si->load_bias, relocator.si->should_pad_segments(),
+ relocator.si->should_use_16kib_app_compat()) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %m",
relocator.si->get_realpath());
return false;
diff --git a/linker/linker_soinfo.h b/linker/linker_soinfo.h
index 9a13af2..a776c1f 100644
--- a/linker/linker_soinfo.h
+++ b/linker/linker_soinfo.h
@@ -369,6 +369,17 @@
}
bool should_pad_segments() const { return should_pad_segments_; }
+ void set_should_use_16kib_app_compat(bool should_use_16kib_app_compat) {
+ should_use_16kib_app_compat_ = should_use_16kib_app_compat;
+ }
+ bool should_use_16kib_app_compat() const { return should_use_16kib_app_compat_; }
+
+ void set_compat_relro_start(ElfW(Addr) start) { compat_relro_start_ = start; }
+ ElfW(Addr) compat_relro_start() const { return compat_relro_start_; }
+
+ void set_compat_relro_size(ElfW(Addr) size) { compat_relro_size_ = size; }
+ ElfW(Addr) compat_relro_size() const { return compat_relro_start_; }
+
private:
bool is_image_linked() const;
void set_image_linked();
@@ -455,6 +466,13 @@
// Pad gaps between segments when memory mapping?
bool should_pad_segments_ = false;
+
+ // Use app compat mode when loading 4KiB max-page-size ELFs on 16KiB page-size devices?
+ bool should_use_16kib_app_compat_ = false;
+
+ // RELRO region for 16KiB compat loading
+ ElfW(Addr) compat_relro_start_ = 0;
+ ElfW(Addr) compat_relro_size_ = 0;
};
// This function is used by dlvsym() to calculate hash of sym_ver