Merge "Remove unused define" into main
diff --git a/libc/bionic/android_set_abort_message.cpp b/libc/bionic/android_set_abort_message.cpp
index d5f8cb9..53d7576 100644
--- a/libc/bionic/android_set_abort_message.cpp
+++ b/libc/bionic/android_set_abort_message.cpp
@@ -28,9 +28,13 @@
 
 #include <android/set_abort_message.h>
 
+#include <async_safe/log.h>
+#include <bionic/set_abort_message_internal.h>
+
+#include <bits/stdatomic.h>
 #include <pthread.h>
-#include <stdint.h>
 #include <stddef.h>
+#include <stdint.h>
 #include <string.h>
 #include <sys/mman.h>
 #include <sys/prctl.h>
@@ -56,6 +60,8 @@
               "The in-memory layout of magic_abort_msg_t is not consistent with what automated "
               "tools expect.");
 
+static _Atomic(crash_detail_t*) free_head = nullptr;
+
 [[clang::optnone]]
 static void fill_abort_message_magic(magic_abort_msg_t* new_magic_abort_message) {
   // 128-bit magic for the abort message. Chosen by fair dice roll.
@@ -97,3 +103,65 @@
   strcpy(new_magic_abort_message->msg.msg, msg);
   __libc_shared_globals()->abort_msg = &new_magic_abort_message->msg;
 }
+
+__BIONIC_WEAK_FOR_NATIVE_BRIDGE
+crash_detail_t* android_register_crash_detail(const void* name, size_t name_size, const void* data,
+                                              size_t data_size) {
+  auto populate_crash_detail = [&](crash_detail_t* result) {
+    result->name = reinterpret_cast<const char*>(name);
+    result->name_size = name_size;
+    result->data = reinterpret_cast<const char*>(data);
+    result->data_size = data_size;
+  };
+  // This is a atomic fast-path for RAII use-cases where the app keeps creating and deleting
+  // crash details for short periods of time to capture detailed scopes.
+  if (crash_detail_t* head = atomic_load(&free_head)) {
+    while (head != nullptr && !atomic_compare_exchange_strong(&free_head, &head, head->prev_free)) {
+      // intentionally left blank.
+    }
+    if (head) {
+      head->prev_free = nullptr;
+      populate_crash_detail(head);
+      return head;
+    }
+  }
+  ScopedPthreadMutexLocker locker(&__libc_shared_globals()->crash_detail_page_lock);
+  struct crash_detail_page_t* prev = nullptr;
+  struct crash_detail_page_t* page = __libc_shared_globals()->crash_detail_page;
+  if (page != nullptr && page->used == kNumCrashDetails) {
+    prev = page;
+    page = nullptr;
+  }
+  if (page == nullptr) {
+    size_t size = sizeof(crash_detail_page_t);
+    void* map = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
+    if (map == MAP_FAILED) {
+      async_safe_format_log(ANDROID_LOG_ERROR, "libc", "failed to allocate crash_detail_page: %m");
+      return nullptr;
+    }
+    prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map, size, "crash details");
+    page = reinterpret_cast<struct crash_detail_page_t*>(map);
+    page->prev = prev;
+    __libc_shared_globals()->crash_detail_page = page;
+  }
+  crash_detail_t* result = &page->crash_details[page->used];
+  populate_crash_detail(result);
+  page->used++;
+  return result;
+}
+
+__BIONIC_WEAK_FOR_NATIVE_BRIDGE
+void android_unregister_crash_detail(crash_detail_t* crash_detail) {
+  if (crash_detail) {
+    if (crash_detail->prev_free) {
+      // removing already removed would mess up the free-list by creating a circle.
+      return;
+    }
+    crash_detail->data = nullptr;
+    crash_detail->name = nullptr;
+    crash_detail_t* prev = atomic_load(&free_head);
+    do {
+      crash_detail->prev_free = prev;
+    } while (!atomic_compare_exchange_strong(&free_head, &prev, crash_detail));
+  }
+}
diff --git a/libc/include/android/set_abort_message.h b/libc/include/android/set_abort_message.h
index 35867ac..e92c6ec 100644
--- a/libc/include/android/set_abort_message.h
+++ b/libc/include/android/set_abort_message.h
@@ -30,13 +30,18 @@
 
 /**
  * @file android/set_abort_message.h
- * @brief The android_set_abort_message() function.
+ * @brief Attach extra information to android crashes.
  */
 
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
 #include <sys/cdefs.h>
 
 __BEGIN_DECLS
 
+typedef struct crash_detail_t crash_detail_t;
+
 /**
  * android_set_abort_message() sets the abort message that will be shown
  * by [debuggerd](https://source.android.com/devices/tech/debug/native-crash).
@@ -46,4 +51,51 @@
  */
 void android_set_abort_message(const char* _Nullable __msg);
 
+/**
+ * Register a new buffer to get logged into tombstones for crashes.
+ *
+ * It will be added to both the tombstone proto in the crash_detail field, and
+ * in the tombstone text format.
+ *
+ * Tombstone proto definition:
+ *   https://cs.android.com/android/platform/superproject/main/+/main:system/core/debuggerd/proto/tombstone.proto
+ *
+ * The lifetime of name and data has to be valid until the program crashes, or until
+ * android_unregister_crash_detail is called.
+ *
+ * Example usage:
+ *   const char* stageName = "garbage_collection";
+ *   crash_detail_t* cd = android_register_crash_detail("stage", stageName, strlen(stageName));
+ *   do_garbage_collection();
+ *   android_unregister_crash_detail(cd);
+ *
+ * If this example crashes in do_garbage_collection, a line will show up in the textual representation of the tombstone:
+ *   Extra crash detail: stage: 'garbage_collection'
+ *
+ * Introduced in API 35.
+ *
+ * \param name identifying name for this extra data.
+ *             this should generally be a human-readable debug string, but we are treating
+ *             it as arbitrary bytes because it could be corrupted by the crash.
+ * \param name_size number of bytes of the buffer pointed to by name
+ * \param data a buffer containing the extra detail bytes
+ * \param data_size number of bytes of the buffer pointed to by data
+ *
+ * \return a handle to the extra crash detail for use with android_unregister_crash_detail.
+ */
+crash_detail_t* _Nullable android_register_crash_detail(
+    const void* _Nonnull name, size_t name_size, const void* _Nonnull data, size_t data_size) __INTRODUCED_IN(35);
+
+/**
+ * Unregister crash detail from being logged into tombstones.
+ *
+ * After this function returns, the lifetime of the objects crash_detail was
+ * constructed from no longer needs to be valid.
+ *
+ * Introduced in API 35.
+ *
+ * \param crash_detail the crash_detail that should be removed.
+ */
+void android_unregister_crash_detail(crash_detail_t* _Nonnull crash_detail) __INTRODUCED_IN(35);
+
 __END_DECLS
diff --git a/libc/libc.map.txt b/libc/libc.map.txt
index 156e9ee..ecdb25c 100644
--- a/libc/libc.map.txt
+++ b/libc/libc.map.txt
@@ -1586,6 +1586,8 @@
 
 LIBC_V { # introduced=VanillaIceCream
   global:
+    android_register_crash_detail;
+    android_unregister_crash_detail;
     epoll_pwait2;
     epoll_pwait2_64;
     localtime_rz;
diff --git a/libc/platform/bionic/set_abort_message_internal.h b/libc/platform/bionic/set_abort_message_internal.h
new file mode 100644
index 0000000..4dff3ac
--- /dev/null
+++ b/libc/platform/bionic/set_abort_message_internal.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <android/set_abort_message.h>
+#include <stddef.h>
+#include <sys/cdefs.h>
+
+struct crash_detail_t {
+  const char* name;
+  size_t name_size;
+  const char* data;
+  size_t data_size;
+  crash_detail_t* prev_free;
+};
+
+constexpr auto kNumCrashDetails = 128;
+
+struct crash_detail_page_t {
+  struct crash_detail_page_t* prev;
+  size_t used;
+  struct crash_detail_t crash_details[kNumCrashDetails];
+};
diff --git a/libc/private/bionic_globals.h b/libc/private/bionic_globals.h
index 08a61f0..6f1e389 100644
--- a/libc/private/bionic_globals.h
+++ b/libc/private/bionic_globals.h
@@ -84,6 +84,7 @@
 __LIBC_HIDDEN__ extern WriteProtected<libc_globals> __libc_globals;
 
 struct abort_msg_t;
+struct crash_detail_page_t;
 namespace gwp_asan {
 struct AllocatorState;
 struct AllocationMetadata;
@@ -138,6 +139,8 @@
   int64_t heap_tagging_upgrade_timer_sec = 0;
 
   void (*memtag_stack_dlopen_callback)() = nullptr;
+  pthread_mutex_t crash_detail_page_lock = PTHREAD_MUTEX_INITIALIZER;
+  crash_detail_page_t* crash_detail_page = nullptr;
 };
 
 __LIBC_HIDDEN__ libc_shared_globals* __libc_shared_globals();
diff --git a/linker/linker.cpp b/linker/linker.cpp
index 60c8e31..724f821 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -3354,7 +3354,7 @@
                               "\"%s\" has text relocations",
                               get_realpath());
     add_dlwarning(get_realpath(), "text relocations");
-    if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+    if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
       DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno));
       return false;
     }
@@ -3370,7 +3370,7 @@
 #if !defined(__LP64__)
   if (has_text_relocations) {
     // All relocations are done, we can protect our segments back to read-only.
-    if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+    if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
       DL_ERR("can't protect segments for \"%s\": %s",
              get_realpath(), strerror(errno));
       return false;
@@ -3408,7 +3408,7 @@
 }
 
 bool soinfo::protect_relro() {
-  if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+  if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
     DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
            get_realpath(), strerror(errno));
     return false;
diff --git a/linker/linker_debuggerd_android.cpp b/linker/linker_debuggerd_android.cpp
index 444da78..9ded143 100644
--- a/linker/linker_debuggerd_android.cpp
+++ b/linker/linker_debuggerd_android.cpp
@@ -45,6 +45,7 @@
       .scudo_ring_buffer = __libc_shared_globals()->scudo_ring_buffer,
       .scudo_ring_buffer_size = __libc_shared_globals()->scudo_ring_buffer_size,
       .scudo_stack_depot_size = __libc_shared_globals()->scudo_stack_depot_size,
+      .crash_detail_page = __libc_shared_globals()->crash_detail_page,
   };
 }
 
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index 018a5eb..5f5eba4 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -201,7 +201,6 @@
   const ElfW(Phdr)* phdr;
   size_t phdr_count;
   ElfW(Addr) entry_point;
-  bool should_pad_segments;
 };
 
 static ExecutableInfo get_executable_info(const char* arg_path) {
@@ -294,7 +293,6 @@
   result.phdr = elf_reader.loaded_phdr();
   result.phdr_count = elf_reader.phdr_count();
   result.entry_point = elf_reader.entry_point();
-  result.should_pad_segments = elf_reader.should_pad_segments();
   return result;
 }
 
@@ -368,7 +366,6 @@
   somain = si;
   si->phdr = exe_info.phdr;
   si->phnum = exe_info.phdr_count;
-  si->set_should_pad_segments(exe_info.should_pad_segments);
   get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias);
   si->size = phdr_table_get_load_size(si->phdr, si->phnum);
   si->dynamic = nullptr;
@@ -402,7 +399,7 @@
     auto note_gnu_property = GnuPropertySection(somain);
     if (note_gnu_property.IsBTICompatible() &&
         (phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
-                                     somain->should_pad_segments(), &note_gnu_property) < 0)) {
+                                     &note_gnu_property) < 0)) {
       __linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(),
                      strerror(errno));
     }
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 6c501bc..82b37a4 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -196,7 +196,7 @@
     // For Armv8.5-A loaded executable segments may require PROT_BTI.
     if (note_gnu_property_.IsBTICompatible()) {
       did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
-                                               should_pad_segments_, &note_gnu_property_) == 0);
+                                               &note_gnu_property_) == 0);
     }
 #endif
   }
@@ -728,8 +728,9 @@
     // at most 1 PT_NOTE mapped at anytime during this search.
     MappedFileFragment note_fragment;
     if (!note_fragment.Map(fd_, file_offset_, phdr->p_offset, phdr->p_memsz)) {
-      DL_ERR("\"%s\": PT_NOTE mmap(nullptr, %zu, PROT_READ, MAP_PRIVATE, %d, %p) failed: %m",
-             name_.c_str(), phdr->p_memsz, fd_, page_start(file_offset_ + phdr->p_offset));
+      DL_ERR("\"%s\": PT_NOTE mmap(nullptr, %p, PROT_READ, MAP_PRIVATE, %d, %p) failed: %m",
+             name_.c_str(), reinterpret_cast<void*>(phdr->p_memsz), fd_,
+             reinterpret_cast<void*>(page_start(file_offset_ + phdr->p_offset)));
       return false;
     }
 
@@ -755,36 +756,6 @@
   return true;
 }
 
-static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                             size_t phdr_idx, ElfW(Addr)* p_memsz,
-                                             ElfW(Addr)* p_filesz) {
-  const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
-  const ElfW(Phdr)* next = nullptr;
-  size_t next_idx = phdr_idx + 1;
-  if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
-    next = &phdr_table[next_idx];
-  }
-
-  // If this is the last LOAD segment, no extension is needed
-  if (!next || *p_memsz != *p_filesz) {
-    return;
-  }
-
-  ElfW(Addr) next_start = page_start(next->p_vaddr);
-  ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
-
-  // If adjacent segment mappings overlap, no extension is needed.
-  if (curr_end >= next_start) {
-    return;
-  }
-
-  // Extend the LOAD segment mapping to be contiguous with that of
-  // the next LOAD segment.
-  ElfW(Addr) extend = next_start - curr_end;
-  *p_memsz += extend;
-  *p_filesz += extend;
-}
-
 bool ElfReader::LoadSegments() {
   for (size_t i = 0; i < phdr_num_; ++i) {
     const ElfW(Phdr)* phdr = &phdr_table_[i];
@@ -793,24 +764,18 @@
       continue;
     }
 
-    ElfW(Addr) p_memsz = phdr->p_memsz;
-    ElfW(Addr) p_filesz = phdr->p_filesz;
-    if (phdr->p_align > kPageSize && should_pad_segments_) {
-      _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz);
-    }
-
     // Segment addresses in memory.
     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
-    ElfW(Addr) seg_end = seg_start + p_memsz;
+    ElfW(Addr) seg_end   = seg_start + phdr->p_memsz;
 
     ElfW(Addr) seg_page_start = page_start(seg_start);
     ElfW(Addr) seg_page_end = page_end(seg_end);
 
-    ElfW(Addr) seg_file_end = seg_start + p_filesz;
+    ElfW(Addr) seg_file_end   = seg_start + phdr->p_filesz;
 
     // File offsets.
     ElfW(Addr) file_start = phdr->p_offset;
-    ElfW(Addr) file_end = file_start + p_filesz;
+    ElfW(Addr) file_end   = file_start + phdr->p_filesz;
 
     ElfW(Addr) file_page_start = page_start(file_start);
     ElfW(Addr) file_length = file_end - file_page_start;
@@ -820,12 +785,12 @@
       return false;
     }
 
-    if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
+    if (file_end > static_cast<size_t>(file_size_)) {
       DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
           " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
           name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
           reinterpret_cast<void*>(phdr->p_filesz),
-          reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
+          reinterpret_cast<void*>(file_end), file_size_);
       return false;
     }
 
@@ -865,18 +830,8 @@
 
     // if the segment is writable, and does not end on a page boundary,
     // zero-fill it until the page limit.
-    //
-    // The intention is to zero the partial page at that may exist at the
-    // end of a file backed mapping. With the extended seg_file_end, this
-    // file offset as calculated from the mapping start can overrun the end
-    // of the file. However pages in that range cannot be touched by userspace
-    // because the kernel will not be able to handle a file map fault past the
-    // extent of the file. No need to try zeroing this untouchable region.
-    // Zero the partial page at the end of the original unextended seg_file_end.
-    ElfW(Addr) seg_file_end_orig = seg_start + phdr->p_filesz;
-    if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end_orig) > 0) {
-      memset(reinterpret_cast<void*>(seg_file_end_orig), 0,
-             kPageSize - page_offset(seg_file_end_orig));
+    if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
+      memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
     }
 
     seg_file_end = page_end(seg_file_end);
@@ -909,23 +864,17 @@
  * phdr_table_protect_segments and phdr_table_unprotect_segments.
  */
 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                     ElfW(Addr) load_bias, int extra_prot_flags,
-                                     bool should_pad_segments) {
-  for (size_t i = 0; i < phdr_count; ++i) {
-    const ElfW(Phdr)* phdr = &phdr_table[i];
+                                     ElfW(Addr) load_bias, int extra_prot_flags) {
+  const ElfW(Phdr)* phdr = phdr_table;
+  const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
 
+  for (; phdr < phdr_limit; phdr++) {
     if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
       continue;
     }
 
-    ElfW(Addr) p_memsz = phdr->p_memsz;
-    ElfW(Addr) p_filesz = phdr->p_filesz;
-    if (phdr->p_align > kPageSize && should_pad_segments) {
-      _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz);
-    }
-
-    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
-    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
+    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
 
     int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
     if ((prot & PROT_WRITE) != 0) {
@@ -960,21 +909,19 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
- *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
  *   prop        -> GnuPropertySection or nullptr
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                ElfW(Addr) load_bias, bool should_pad_segments,
-                                const GnuPropertySection* prop __unused) {
+                                ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
   int prot = 0;
 #if defined(__aarch64__)
   if ((prop != nullptr) && prop->IsBTICompatible()) {
     prot |= PROT_BTI;
   }
 #endif
-  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
+  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
 }
 
 /* Change the protection of all loaded segments in memory to writable.
@@ -990,53 +937,19 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
- *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
-                                  size_t phdr_count, ElfW(Addr) load_bias,
-                                  bool should_pad_segments) {
-  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
-                                   should_pad_segments);
-}
-
-static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
-                                              const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                              ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end) {
-  // Find the index and phdr of the LOAD containing the GNU_RELRO segment
-  for (size_t index = 0; index < phdr_count; ++index) {
-    const ElfW(Phdr)* phdr = &phdr_table[index];
-
-    if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
-      // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
-      // LOAD segment mem size, we need to protect only a partial region of the
-      // LOAD segment and therefore cannot avoid a VMA split.
-      if (relro_phdr->p_memsz < phdr->p_memsz) {
-        break;
-      }
-
-      ElfW(Addr) p_memsz = phdr->p_memsz;
-      ElfW(Addr) p_filesz = phdr->p_filesz;
-
-      // Attempt extending the VMA (mprotect range). Without extending the range
-      // mprotect will only RO protect a part of the extend RW LOAD segment, which will
-      // leave an extra split RW VMA (the gap).
-      _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz);
-
-      *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
-
-      break;
-    }
-  }
+                                  size_t phdr_count, ElfW(Addr) load_bias) {
+  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
 }
 
 /* Used internally by phdr_table_protect_gnu_relro and
  * phdr_table_unprotect_gnu_relro.
  */
 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                          ElfW(Addr) load_bias, int prot_flags,
-                                          bool should_pad_segments) {
+                                          ElfW(Addr) load_bias, int prot_flags) {
   const ElfW(Phdr)* phdr = phdr_table;
   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
 
@@ -1061,16 +974,8 @@
     //       the program is likely to fail at runtime. So in effect the
     //       linker must only emit a PT_GNU_RELRO segment if it ensures
     //       that it starts on a page boundary.
-    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
-    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz + load_bias);
-
-    // Before extending the RO protection, we need to ensure that the segments were extended
-    // by bionic, because the kernel won't map gaps so it usually contains unrelated
-    // mappings which will be incorrectly protected as RO likely leading to
-    // segmentation fault.
-    if (phdr->p_align > kPageSize && should_pad_segments) {
-      _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end);
-    }
+    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
 
     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
                        seg_page_end - seg_page_start,
@@ -1095,14 +1000,12 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
- *   should_pad_segments -> Were segments extended to avoid gaps in the memory map
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
-int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                 ElfW(Addr) load_bias, bool should_pad_segments) {
-  return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
-                                        should_pad_segments);
+int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
+                                 size_t phdr_count, ElfW(Addr) load_bias) {
+  return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
 }
 
 /* Serialize the GNU relro segments to the given file descriptor. This can be
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index 4deed33..e5b87bb 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -128,14 +128,13 @@
 size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count);
 
 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                ElfW(Addr) load_bias, bool should_pad_segments,
-                                const GnuPropertySection* prop = nullptr);
+                                ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr);
 
 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                  ElfW(Addr) load_bias, bool should_pad_segments);
+                                  ElfW(Addr) load_bias);
 
 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                 ElfW(Addr) load_bias, bool should_pad_segments);
+                                 ElfW(Addr) load_bias);
 
 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
                                    ElfW(Addr) load_bias, int fd, size_t* file_offset);
diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp
index 5b58895..952dade 100644
--- a/linker/linker_relocate.cpp
+++ b/linker/linker_relocate.cpp
@@ -187,8 +187,7 @@
   auto protect_segments = [&]() {
     // Make .text executable.
     if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
-                                    relocator.si->load_bias,
-                                    relocator.si->should_pad_segments()) < 0) {
+                                    relocator.si->load_bias) < 0) {
       DL_ERR("can't protect segments for \"%s\": %s",
              relocator.si->get_realpath(), strerror(errno));
       return false;
@@ -198,8 +197,7 @@
   auto unprotect_segments = [&]() {
     // Make .text writable.
     if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
-                                      relocator.si->load_bias,
-                                      relocator.si->should_pad_segments()) < 0) {
+                                      relocator.si->load_bias) < 0) {
       DL_ERR("can't unprotect loadable segments for \"%s\": %s",
              relocator.si->get_realpath(), strerror(errno));
       return false;
diff --git a/tests/dlext_test.cpp b/tests/dlext_test.cpp
index 6883da9..d078e50 100644
--- a/tests/dlext_test.cpp
+++ b/tests/dlext_test.cpp
@@ -31,7 +31,6 @@
 #include <android-base/test_utils.h>
 
 #include <sys/mman.h>
-#include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/vfs.h>
 #include <sys/wait.h>
@@ -2047,11 +2046,6 @@
                                                              -1, 0));
   ASSERT_TRUE(reinterpret_cast<void*>(reserved_addr) != MAP_FAILED);
 
-  struct stat file_stat;
-  int ret = TEMP_FAILURE_RETRY(stat(private_library_absolute_path.c_str(), &file_stat));
-  ASSERT_EQ(ret, 0) << "Failed to stat library";
-  size_t file_size = file_stat.st_size;
-
   for (const auto& rec : maps_to_copy) {
     uintptr_t offset = rec.addr_start - addr_start;
     size_t size = rec.addr_end - rec.addr_start;
@@ -2059,13 +2053,7 @@
     void* map = mmap(addr, size, PROT_READ | PROT_WRITE,
                      MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
     ASSERT_TRUE(map != MAP_FAILED);
-    size_t seg_size = size;
-    // See comment on file map fault in ElfReader::LoadSegments()
-    // bionic/linker/linker_phdr.cpp
-    if (rec.offset + size > file_size) {
-      seg_size = file_size - rec.offset;
-    }
-    memcpy(map, reinterpret_cast<void*>(rec.addr_start), seg_size);
+    memcpy(map, reinterpret_cast<void*>(rec.addr_start), size);
     mprotect(map, size, rec.perms);
   }