Revert "bionic: loader: Extend LOAD segment VMAs"
Revert submission 2803156-loader_crt_pad_segment
Reason for revert: b/324952273
Reverted changes: /q/submissionid:2803156-loader_crt_pad_segment
Change-Id: I8af115c426c0113914abbf8fbd3e74c0d89408d1
diff --git a/linker/linker.cpp b/linker/linker.cpp
index 389e77d..724f821 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -3354,7 +3354,7 @@
"\"%s\" has text relocations",
get_realpath());
add_dlwarning(get_realpath(), "text relocations");
- if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+ if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno));
return false;
}
@@ -3370,7 +3370,7 @@
#if !defined(__LP64__)
if (has_text_relocations) {
// All relocations are done, we can protect our segments back to read-only.
- if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+ if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
DL_ERR("can't protect segments for \"%s\": %s",
get_realpath(), strerror(errno));
return false;
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index 018a5eb..5f5eba4 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -201,7 +201,6 @@
const ElfW(Phdr)* phdr;
size_t phdr_count;
ElfW(Addr) entry_point;
- bool should_pad_segments;
};
static ExecutableInfo get_executable_info(const char* arg_path) {
@@ -294,7 +293,6 @@
result.phdr = elf_reader.loaded_phdr();
result.phdr_count = elf_reader.phdr_count();
result.entry_point = elf_reader.entry_point();
- result.should_pad_segments = elf_reader.should_pad_segments();
return result;
}
@@ -368,7 +366,6 @@
somain = si;
si->phdr = exe_info.phdr;
si->phnum = exe_info.phdr_count;
- si->set_should_pad_segments(exe_info.should_pad_segments);
get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias);
si->size = phdr_table_get_load_size(si->phdr, si->phnum);
si->dynamic = nullptr;
@@ -402,7 +399,7 @@
auto note_gnu_property = GnuPropertySection(somain);
if (note_gnu_property.IsBTICompatible() &&
(phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
- somain->should_pad_segments(), ¬e_gnu_property) < 0)) {
+ ¬e_gnu_property) < 0)) {
__linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(),
strerror(errno));
}
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 1547cae..85cd949 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -196,7 +196,7 @@
// For Armv8.5-A loaded executable segments may require PROT_BTI.
if (note_gnu_property_.IsBTICompatible()) {
did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
- should_pad_segments_, ¬e_gnu_property_) == 0);
+ ¬e_gnu_property_) == 0);
}
#endif
}
@@ -747,36 +747,6 @@
return true;
}
-static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- size_t phdr_idx, ElfW(Addr)* p_memsz,
- ElfW(Addr)* p_filesz) {
- const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
- const ElfW(Phdr)* next = nullptr;
- size_t next_idx = phdr_idx + 1;
- if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
- next = &phdr_table[next_idx];
- }
-
- // If this is the last LOAD segment, no extension is needed
- if (!next || *p_memsz != *p_filesz) {
- return;
- }
-
- ElfW(Addr) next_start = page_start(next->p_vaddr);
- ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
-
- // If adjacent segment mappings overlap, no extension is needed.
- if (curr_end >= next_start) {
- return;
- }
-
- // Extend the LOAD segment mapping to be contiguous with that of
- // the next LOAD segment.
- ElfW(Addr) extend = next_start - curr_end;
- *p_memsz += extend;
- *p_filesz += extend;
-}
-
bool ElfReader::LoadSegments() {
for (size_t i = 0; i < phdr_num_; ++i) {
const ElfW(Phdr)* phdr = &phdr_table_[i];
@@ -785,24 +755,18 @@
continue;
}
- ElfW(Addr) p_memsz = phdr->p_memsz;
- ElfW(Addr) p_filesz = phdr->p_filesz;
- if (phdr->p_align > kPageSize && should_pad_segments_) {
- _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz);
- }
-
// Segment addresses in memory.
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
- ElfW(Addr) seg_end = seg_start + p_memsz;
+ ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
ElfW(Addr) seg_page_start = page_start(seg_start);
ElfW(Addr) seg_page_end = page_end(seg_end);
- ElfW(Addr) seg_file_end = seg_start + p_filesz;
+ ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
// File offsets.
ElfW(Addr) file_start = phdr->p_offset;
- ElfW(Addr) file_end = file_start + p_filesz;
+ ElfW(Addr) file_end = file_start + phdr->p_filesz;
ElfW(Addr) file_page_start = page_start(file_start);
ElfW(Addr) file_length = file_end - file_page_start;
@@ -812,12 +776,12 @@
return false;
}
- if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
+ if (file_end > static_cast<size_t>(file_size_)) {
DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
" p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
reinterpret_cast<void*>(phdr->p_filesz),
- reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
+ reinterpret_cast<void*>(file_end), file_size_);
return false;
}
@@ -857,18 +821,8 @@
// if the segment is writable, and does not end on a page boundary,
// zero-fill it until the page limit.
- //
- // The intention is to zero the partial page at that may exist at the
- // end of a file backed mapping. With the extended seg_file_end, this
- // file offset as calculated from the mapping start can overrun the end
- // of the file. However pages in that range cannot be touched by userspace
- // because the kernel will not be able to handle a file map fault past the
- // extent of the file. No need to try zeroing this untouchable region.
- // Zero the partial page at the end of the original unextended seg_file_end.
- ElfW(Addr) seg_file_end_orig = seg_start + phdr->p_filesz;
- if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end_orig) > 0) {
- memset(reinterpret_cast<void*>(seg_file_end_orig), 0,
- kPageSize - page_offset(seg_file_end_orig));
+ if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
+ memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
}
seg_file_end = page_end(seg_file_end);
@@ -901,23 +855,17 @@
* phdr_table_protect_segments and phdr_table_unprotect_segments.
*/
static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, int extra_prot_flags,
- bool should_pad_segments) {
- for (size_t i = 0; i < phdr_count; ++i) {
- const ElfW(Phdr)* phdr = &phdr_table[i];
+ ElfW(Addr) load_bias, int extra_prot_flags) {
+ const ElfW(Phdr)* phdr = phdr_table;
+ const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
+ for (; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
continue;
}
- ElfW(Addr) p_memsz = phdr->p_memsz;
- ElfW(Addr) p_filesz = phdr->p_filesz;
- if (phdr->p_align > kPageSize && should_pad_segments) {
- _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz);
- }
-
- ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
- ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
+ ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+ ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
if ((prot & PROT_WRITE) != 0) {
@@ -952,21 +900,19 @@
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
- * should_pad_segments -> Are segments extended to avoid gaps in the memory map
* prop -> GnuPropertySection or nullptr
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, bool should_pad_segments,
- const GnuPropertySection* prop __unused) {
+ ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
int prot = 0;
#if defined(__aarch64__)
if ((prop != nullptr) && prop->IsBTICompatible()) {
prot |= PROT_BTI;
}
#endif
- return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
+ return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
}
/* Change the protection of all loaded segments in memory to writable.
@@ -982,15 +928,12 @@
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
- * should_pad_segments -> Are segments extended to avoid gaps in the memory map
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
- size_t phdr_count, ElfW(Addr) load_bias,
- bool should_pad_segments) {
- return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
- should_pad_segments);
+ size_t phdr_count, ElfW(Addr) load_bias) {
+ return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
}
/* Used internally by phdr_table_protect_gnu_relro and
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index 239ecb9..e5b87bb 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -128,11 +128,10 @@
size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count);
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, bool should_pad_segments,
- const GnuPropertySection* prop = nullptr);
+ ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr);
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, bool should_pad_segments);
+ ElfW(Addr) load_bias);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias);
diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp
index 5b58895..952dade 100644
--- a/linker/linker_relocate.cpp
+++ b/linker/linker_relocate.cpp
@@ -187,8 +187,7 @@
auto protect_segments = [&]() {
// Make .text executable.
if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias,
- relocator.si->should_pad_segments()) < 0) {
+ relocator.si->load_bias) < 0) {
DL_ERR("can't protect segments for \"%s\": %s",
relocator.si->get_realpath(), strerror(errno));
return false;
@@ -198,8 +197,7 @@
auto unprotect_segments = [&]() {
// Make .text writable.
if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias,
- relocator.si->should_pad_segments()) < 0) {
+ relocator.si->load_bias) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %s",
relocator.si->get_realpath(), strerror(errno));
return false;