linker: LoadSegments: Preparatory work for 16KiB App Compat
Introduce should_use_16kib_app_compat_ to ElfReader and pipe it through
to soinfo.
Introduce seg_align and use align_up()/align_down to align the segment
start and end, as it offers more flexiblility than
page_start()/page_end().
Use should_use_16kib_app_compat_ to skip steps that won't be needed in
compatbility mode.
No functional change is introduced in this patch.
Bug: 339709616
Test: atest linker-unit-tests
Change-Id: Ice110c6e2ad54a2ca65e70eb119d9e2b7973a963
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
diff --git a/linker/linker.cpp b/linker/linker.cpp
index bcc2500..5146584 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -640,6 +640,7 @@
si_->set_gap_start(elf_reader.gap_start());
si_->set_gap_size(elf_reader.gap_size());
si_->set_should_pad_segments(elf_reader.should_pad_segments());
+ si_->set_should_use_16kib_app_compat(elf_reader.should_use_16kib_app_compat());
return true;
}
@@ -3361,7 +3362,8 @@
"\"%s\" has text relocations",
get_realpath());
add_dlwarning(get_realpath(), "text relocations");
- if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+ if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_,
+ should_use_16kib_app_compat_) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %m", get_realpath());
return false;
}
@@ -3377,7 +3379,8 @@
#if !defined(__LP64__)
if (has_text_relocations) {
// All relocations are done, we can protect our segments back to read-only.
- if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+ if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_,
+ should_use_16kib_app_compat_) < 0) {
DL_ERR("can't protect segments for \"%s\": %m", get_realpath());
return false;
}
@@ -3412,7 +3415,8 @@
}
bool soinfo::protect_relro() {
- if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+ if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_,
+ should_use_16kib_app_compat_) < 0) {
DL_ERR("can't enable GNU RELRO protection for \"%s\": %m", get_realpath());
return false;
}
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index 74cd517..48ed723 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -370,8 +370,9 @@
// Apply appropriate protections here if it is needed.
auto note_gnu_property = GnuPropertySection(somain);
if (note_gnu_property.IsBTICompatible() &&
- (phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
- somain->should_pad_segments(), ¬e_gnu_property) < 0)) {
+ (phdr_table_protect_segments(
+ somain->phdr, somain->phnum, somain->load_bias, somain->should_pad_segments(),
+ somain->should_use_16kib_app_compat(), ¬e_gnu_property) < 0)) {
__linker_error("error: can't protect segments for \"%s\": %m", exe_info.path.c_str());
}
}
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index b7db4cd..2a1c05b 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -47,6 +47,7 @@
#include "private/elf_note.h"
#include <android-base/file.h>
+#include <android-base/properties.h>
static int GetTargetElfMachine() {
#if defined(__arm__)
@@ -182,6 +183,14 @@
did_read_ = true;
}
+ if (kPageSize == 0x4000 && phdr_table_get_minimum_alignment(phdr_table_, phdr_num_) == 0x1000) {
+ // This prop needs to be read on 16KiB devices for each ELF where min_palign is 4KiB.
+ // It cannot be cached since the developer may toggle app compat on/off.
+ // This check will be removed once app compat is made the default on 16KiB devices.
+ should_use_16kib_app_compat_ =
+ ::android::base::GetBoolProperty("bionic.linker.16kb.app_compat.enabled", false);
+ }
+
return did_read_;
}
@@ -197,8 +206,9 @@
#if defined(__aarch64__)
// For Armv8.5-A loaded executable segments may require PROT_BTI.
if (note_gnu_property_.IsBTICompatible()) {
- did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
- should_pad_segments_, ¬e_gnu_property_) == 0);
+ did_load_ =
+ (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_, should_pad_segments_,
+ should_use_16kib_app_compat_, ¬e_gnu_property_) == 0);
}
#endif
}
@@ -808,8 +818,15 @@
}
static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- size_t phdr_idx, ElfW(Addr)* p_memsz,
- ElfW(Addr)* p_filesz, bool should_pad_segments) {
+ size_t phdr_idx, ElfW(Addr)* p_memsz,
+ ElfW(Addr)* p_filesz, bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
+ // NOTE: Segment extension is only applicable where the ELF's max-page-size > runtime page size;
+ // to save kernel VMA slab memory. 16KiB compat mode is the exact opposite scenario.
+ if (should_use_16kib_app_compat) {
+ return;
+ }
+
const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
const ElfW(Phdr)* next = nullptr;
size_t next_idx = phdr_idx + 1;
@@ -879,6 +896,13 @@
}
void ElfReader::ZeroFillSegment(const ElfW(Phdr)* phdr) {
+ // NOTE: In 16KiB app compat mode, the ELF mapping is anonymous, meaning that
+ // RW segments are COW-ed from the kernel's zero page. So there is no need to
+ // explicitly zero-fill until the last page's limit.
+ if (should_use_16kib_app_compat_) {
+ return;
+ }
+
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
@@ -898,6 +922,12 @@
}
void ElfReader::DropPaddingPages(const ElfW(Phdr)* phdr, uint64_t seg_file_end) {
+ // NOTE: Padding pages are only applicable where the ELF's max-page-size > runtime page size;
+ // 16KiB compat mode is the exact opposite scenario.
+ if (should_use_16kib_app_compat_) {
+ return;
+ }
+
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
uint64_t unextended_seg_file_end = seg_start + phdr->p_filesz;
@@ -926,6 +956,12 @@
bool ElfReader::MapBssSection(const ElfW(Phdr)* phdr, ElfW(Addr) seg_page_end,
ElfW(Addr) seg_file_end) {
+ // NOTE: We do not need to handle .bss in 16KiB compat mode since the mapping
+ // reservation is anonymous and RW to begin with.
+ if (should_use_16kib_app_compat_) {
+ return true;
+ }
+
// seg_file_end is now the first page address after the file content.
seg_file_end = page_end(seg_file_end);
@@ -952,10 +988,13 @@
}
bool ElfReader::LoadSegments() {
+ size_t seg_align = kPageSize;
+
size_t min_palign = phdr_table_get_minimum_alignment(phdr_table_, phdr_num_);
- // Only enforce this on 16 KB systems. Apps may rely on undefined behavior
- // here on 4 KB systems, which is the norm before this change is introduced.
- if (kPageSize >= 16384 && min_palign < kPageSize) {
+ // Only enforce this on 16 KB systems with app compat disabled.
+ // Apps may rely on undefined behavior here on 4 KB systems,
+ // which is the norm before this change is introduced
+ if (kPageSize >= 16384 && min_palign < kPageSize && !should_use_16kib_app_compat_) {
DL_ERR("\"%s\" program alignment (%zu) cannot be smaller than system page size (%zu)",
name_.c_str(), min_palign, kPageSize);
return false;
@@ -970,13 +1009,14 @@
ElfW(Addr) p_memsz = phdr->p_memsz;
ElfW(Addr) p_filesz = phdr->p_filesz;
- _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
+ _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_,
+ should_use_16kib_app_compat_);
// Segment addresses in memory.
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
ElfW(Addr) seg_end = seg_start + p_memsz;
- ElfW(Addr) seg_page_end = page_end(seg_end);
+ ElfW(Addr) seg_page_end = align_up(seg_end, seg_align);
ElfW(Addr) seg_file_end = seg_start + p_filesz;
@@ -984,7 +1024,7 @@
ElfW(Addr) file_start = phdr->p_offset;
ElfW(Addr) file_end = file_start + p_filesz;
- ElfW(Addr) file_page_start = page_start(file_start);
+ ElfW(Addr) file_page_start = align_down(file_start, seg_align);
ElfW(Addr) file_length = file_end - file_page_start;
if (file_size_ <= 0) {
@@ -1039,7 +1079,7 @@
*/
static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int extra_prot_flags,
- bool should_pad_segments) {
+ bool should_pad_segments, bool should_use_16kib_app_compat) {
for (size_t i = 0; i < phdr_count; ++i) {
const ElfW(Phdr)* phdr = &phdr_table[i];
@@ -1049,7 +1089,8 @@
ElfW(Addr) p_memsz = phdr->p_memsz;
ElfW(Addr) p_filesz = phdr->p_filesz;
- _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
+ _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments,
+ should_use_16kib_app_compat);
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
@@ -1088,12 +1129,14 @@
* phdr_count -> number of entries in tables
* load_bias -> load bias
* should_pad_segments -> Are segments extended to avoid gaps in the memory map
+ * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
* prop -> GnuPropertySection or nullptr
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat,
const GnuPropertySection* prop __unused) {
int prot = 0;
#if defined(__aarch64__)
@@ -1101,7 +1144,8 @@
prot |= PROT_BTI;
}
#endif
- return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
+ return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments,
+ should_use_16kib_app_compat);
}
/* Change the protection of all loaded segments in memory to writable.
@@ -1118,20 +1162,22 @@
* phdr_count -> number of entries in tables
* load_bias -> load bias
* should_pad_segments -> Are segments extended to avoid gaps in the memory map
+ * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
-int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
- size_t phdr_count, ElfW(Addr) load_bias,
- bool should_pad_segments) {
+int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
- should_pad_segments);
+ should_pad_segments, should_use_16kib_app_compat);
}
static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
- bool should_pad_segments) {
+ bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
// Find the index and phdr of the LOAD containing the GNU_RELRO segment
for (size_t index = 0; index < phdr_count; ++index) {
const ElfW(Phdr)* phdr = &phdr_table[index];
@@ -1179,7 +1225,7 @@
// mprotect will only RO protect a part of the extended RW LOAD segment, which
// will leave an extra split RW VMA (the gap).
_extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
- should_pad_segments);
+ should_pad_segments, should_use_16kib_app_compat);
*seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
return;
@@ -1192,7 +1238,8 @@
*/
static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int prot_flags,
- bool should_pad_segments) {
+ bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
@@ -1220,7 +1267,7 @@
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
_extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
- should_pad_segments);
+ should_pad_segments, should_use_16kib_app_compat);
int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
seg_page_end - seg_page_start,
@@ -1246,13 +1293,15 @@
* phdr_count -> number of entries in tables
* load_bias -> load bias
* should_pad_segments -> Were segments extended to avoid gaps in the memory map
+ * should_use_16kib_app_compat -> Is the ELF being loaded in 16KiB app compat mode.
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, bool should_pad_segments) {
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat) {
return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
- should_pad_segments);
+ should_pad_segments, should_use_16kib_app_compat);
}
/* Serialize the GNU relro segments to the given file descriptor. This can be
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index 1d6bbe3..a30fe33 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -59,6 +59,7 @@
bool is_mapped_by_caller() const { return mapped_by_caller_; }
ElfW(Addr) entry_point() const { return header_.e_entry + load_bias_; }
bool should_pad_segments() const { return should_pad_segments_; }
+ bool should_use_16kib_app_compat() const { return should_use_16kib_app_compat_; }
private:
[[nodiscard]] bool ReadElfHeader();
@@ -123,6 +124,9 @@
// Pad gaps between segments when memory mapping?
bool should_pad_segments_ = false;
+ // Use app compat mode when loading 4KiB max-page-size ELFs on 16KiB page-size devices?
+ bool should_use_16kib_app_compat_ = false;
+
// Only used by AArch64 at the moment.
GnuPropertySection note_gnu_property_ __unused;
};
@@ -135,13 +139,16 @@
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat,
const GnuPropertySection* prop = nullptr);
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, bool should_pad_segments);
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, bool should_pad_segments);
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ bool should_use_16kib_app_compat);
int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int fd, size_t* file_offset);
diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp
index bcb1efc..0470f87 100644
--- a/linker/linker_relocate.cpp
+++ b/linker/linker_relocate.cpp
@@ -188,8 +188,8 @@
auto protect_segments = [&]() {
// Make .text executable.
if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias,
- relocator.si->should_pad_segments()) < 0) {
+ relocator.si->load_bias, relocator.si->should_pad_segments(),
+ relocator.si->should_use_16kib_app_compat()) < 0) {
DL_ERR("can't protect segments for \"%s\": %m", relocator.si->get_realpath());
return false;
}
@@ -198,8 +198,8 @@
auto unprotect_segments = [&]() {
// Make .text writable.
if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias,
- relocator.si->should_pad_segments()) < 0) {
+ relocator.si->load_bias, relocator.si->should_pad_segments(),
+ relocator.si->should_use_16kib_app_compat()) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %m",
relocator.si->get_realpath());
return false;
diff --git a/linker/linker_soinfo.h b/linker/linker_soinfo.h
index 9a13af2..1886f29 100644
--- a/linker/linker_soinfo.h
+++ b/linker/linker_soinfo.h
@@ -369,6 +369,11 @@
}
bool should_pad_segments() const { return should_pad_segments_; }
+ void set_should_use_16kib_app_compat(bool should_use_16kib_app_compat) {
+ should_use_16kib_app_compat_ = should_use_16kib_app_compat;
+ }
+ bool should_use_16kib_app_compat() const { return should_use_16kib_app_compat_; }
+
private:
bool is_image_linked() const;
void set_image_linked();
@@ -455,6 +460,9 @@
// Pad gaps between segments when memory mapping?
bool should_pad_segments_ = false;
+
+ // Use app compat mode when loading 4KiB max-page-size ELFs on 16KiB page-size devices?
+ bool should_use_16kib_app_compat_ = false;
};
// This function is used by dlvsym() to calculate hash of sym_ver