Merge changes from topic "loader_crt_pad_segment" into main

* changes:
  bionic: loader: Extend GNU_RELRO protection
  bionic: loader: Extend LOAD segment VMAs
diff --git a/linker/linker.cpp b/linker/linker.cpp
index a12388c..d680206 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -3346,7 +3346,7 @@
                               "\"%s\" has text relocations",
                               get_realpath());
     add_dlwarning(get_realpath(), "text relocations");
-    if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
+    if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
       DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno));
       return false;
     }
@@ -3362,7 +3362,7 @@
 #if !defined(__LP64__)
   if (has_text_relocations) {
     // All relocations are done, we can protect our segments back to read-only.
-    if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
+    if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
       DL_ERR("can't protect segments for \"%s\": %s",
              get_realpath(), strerror(errno));
       return false;
@@ -3400,7 +3400,7 @@
 }
 
 bool soinfo::protect_relro() {
-  if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
+  if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
     DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
            get_realpath(), strerror(errno));
     return false;
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index 5f5eba4..018a5eb 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -201,6 +201,7 @@
   const ElfW(Phdr)* phdr;
   size_t phdr_count;
   ElfW(Addr) entry_point;
+  bool should_pad_segments;
 };
 
 static ExecutableInfo get_executable_info(const char* arg_path) {
@@ -293,6 +294,7 @@
   result.phdr = elf_reader.loaded_phdr();
   result.phdr_count = elf_reader.phdr_count();
   result.entry_point = elf_reader.entry_point();
+  result.should_pad_segments = elf_reader.should_pad_segments();
   return result;
 }
 
@@ -366,6 +368,7 @@
   somain = si;
   si->phdr = exe_info.phdr;
   si->phnum = exe_info.phdr_count;
+  si->set_should_pad_segments(exe_info.should_pad_segments);
   get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias);
   si->size = phdr_table_get_load_size(si->phdr, si->phnum);
   si->dynamic = nullptr;
@@ -399,7 +402,7 @@
     auto note_gnu_property = GnuPropertySection(somain);
     if (note_gnu_property.IsBTICompatible() &&
         (phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
-                                     &note_gnu_property) < 0)) {
+                                     somain->should_pad_segments(), &note_gnu_property) < 0)) {
       __linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(),
                      strerror(errno));
     }
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 8925e62..af0ef1d 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -196,7 +196,7 @@
     // For Armv8.5-A loaded executable segments may require PROT_BTI.
     if (note_gnu_property_.IsBTICompatible()) {
       did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
-                                               &note_gnu_property_) == 0);
+                                               should_pad_segments_, &note_gnu_property_) == 0);
     }
 #endif
   }
@@ -748,6 +748,36 @@
   return true;
 }
 
+static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+                                             size_t phdr_idx, ElfW(Addr)* p_memsz,
+                                             ElfW(Addr)* p_filesz) {
+  const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
+  const ElfW(Phdr)* next = nullptr;
+  size_t next_idx = phdr_idx + 1;
+  if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
+    next = &phdr_table[next_idx];
+  }
+
+  // If this is the last LOAD segment, no extension is needed
+  if (!next || *p_memsz != *p_filesz) {
+    return;
+  }
+
+  ElfW(Addr) next_start = page_start(next->p_vaddr);
+  ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
+
+  // If adjacent segment mappings overlap, no extension is needed.
+  if (curr_end >= next_start) {
+    return;
+  }
+
+  // Extend the LOAD segment mapping to be contiguous with that of
+  // the next LOAD segment.
+  ElfW(Addr) extend = next_start - curr_end;
+  *p_memsz += extend;
+  *p_filesz += extend;
+}
+
 bool ElfReader::LoadSegments() {
   for (size_t i = 0; i < phdr_num_; ++i) {
     const ElfW(Phdr)* phdr = &phdr_table_[i];
@@ -756,18 +786,24 @@
       continue;
     }
 
+    ElfW(Addr) p_memsz = phdr->p_memsz;
+    ElfW(Addr) p_filesz = phdr->p_filesz;
+    if (phdr->p_align > kPageSize && should_pad_segments_) {
+      _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz);
+    }
+
     // Segment addresses in memory.
     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
-    ElfW(Addr) seg_end   = seg_start + phdr->p_memsz;
+    ElfW(Addr) seg_end = seg_start + p_memsz;
 
     ElfW(Addr) seg_page_start = page_start(seg_start);
     ElfW(Addr) seg_page_end = page_end(seg_end);
 
-    ElfW(Addr) seg_file_end   = seg_start + phdr->p_filesz;
+    ElfW(Addr) seg_file_end = seg_start + p_filesz;
 
     // File offsets.
     ElfW(Addr) file_start = phdr->p_offset;
-    ElfW(Addr) file_end   = file_start + phdr->p_filesz;
+    ElfW(Addr) file_end = file_start + p_filesz;
 
     ElfW(Addr) file_page_start = page_start(file_start);
     ElfW(Addr) file_length = file_end - file_page_start;
@@ -777,12 +813,12 @@
       return false;
     }
 
-    if (file_end > static_cast<size_t>(file_size_)) {
+    if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
       DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
           " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
           name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
           reinterpret_cast<void*>(phdr->p_filesz),
-          reinterpret_cast<void*>(file_end), file_size_);
+          reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
       return false;
     }
 
@@ -822,8 +858,18 @@
 
     // if the segment is writable, and does not end on a page boundary,
     // zero-fill it until the page limit.
-    if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
-      memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
+    //
+    // The intention is to zero the partial page at that may exist at the
+    // end of a file backed mapping. With the extended seg_file_end, this
+    // file offset as calculated from the mapping start can overrun the end
+    // of the file. However pages in that range cannot be touched by userspace
+    // because the kernel will not be able to handle a file map fault past the
+    // extent of the file. No need to try zeroing this untouchable region.
+    // Zero the partial page at the end of the original unextended seg_file_end.
+    ElfW(Addr) seg_file_end_orig = seg_start + phdr->p_filesz;
+    if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end_orig) > 0) {
+      memset(reinterpret_cast<void*>(seg_file_end_orig), 0,
+             kPageSize - page_offset(seg_file_end_orig));
     }
 
     seg_file_end = page_end(seg_file_end);
@@ -856,17 +902,23 @@
  * phdr_table_protect_segments and phdr_table_unprotect_segments.
  */
 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                     ElfW(Addr) load_bias, int extra_prot_flags) {
-  const ElfW(Phdr)* phdr = phdr_table;
-  const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
+                                     ElfW(Addr) load_bias, int extra_prot_flags,
+                                     bool should_pad_segments) {
+  for (size_t i = 0; i < phdr_count; ++i) {
+    const ElfW(Phdr)* phdr = &phdr_table[i];
 
-  for (; phdr < phdr_limit; phdr++) {
     if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
       continue;
     }
 
-    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
-    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    ElfW(Addr) p_memsz = phdr->p_memsz;
+    ElfW(Addr) p_filesz = phdr->p_filesz;
+    if (phdr->p_align > kPageSize && should_pad_segments) {
+      _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz);
+    }
+
+    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
+    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
 
     int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
     if ((prot & PROT_WRITE) != 0) {
@@ -901,19 +953,21 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
+ *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
  *   prop        -> GnuPropertySection or nullptr
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
+                                ElfW(Addr) load_bias, bool should_pad_segments,
+                                const GnuPropertySection* prop __unused) {
   int prot = 0;
 #if defined(__aarch64__)
   if ((prop != nullptr) && prop->IsBTICompatible()) {
     prot |= PROT_BTI;
   }
 #endif
-  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
+  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
 }
 
 /* Change the protection of all loaded segments in memory to writable.
@@ -929,19 +983,53 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
+ *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
-                                  size_t phdr_count, ElfW(Addr) load_bias) {
-  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
+                                  size_t phdr_count, ElfW(Addr) load_bias,
+                                  bool should_pad_segments) {
+  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
+                                   should_pad_segments);
+}
+
+static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
+                                              const ElfW(Phdr)* phdr_table, size_t phdr_count,
+                                              ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end) {
+  // Find the index and phdr of the LOAD containing the GNU_RELRO segment
+  for (size_t index = 0; index < phdr_count; ++index) {
+    const ElfW(Phdr)* phdr = &phdr_table[index];
+
+    if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
+      // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
+      // LOAD segment mem size, we need to protect only a partial region of the
+      // LOAD segment and therefore cannot avoid a VMA split.
+      if (relro_phdr->p_memsz < phdr->p_memsz) {
+        break;
+      }
+
+      ElfW(Addr) p_memsz = phdr->p_memsz;
+      ElfW(Addr) p_filesz = phdr->p_filesz;
+
+      // Attempt extending the VMA (mprotect range). Without extending the range
+      // mprotect will only RO protect a part of the extend RW LOAD segment, which will
+      // leave an extra split RW VMA (the gap).
+      _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz);
+
+      *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
+
+      break;
+    }
+  }
 }
 
 /* Used internally by phdr_table_protect_gnu_relro and
  * phdr_table_unprotect_gnu_relro.
  */
 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                          ElfW(Addr) load_bias, int prot_flags) {
+                                          ElfW(Addr) load_bias, int prot_flags,
+                                          bool should_pad_segments) {
   const ElfW(Phdr)* phdr = phdr_table;
   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
 
@@ -966,8 +1054,16 @@
     //       the program is likely to fail at runtime. So in effect the
     //       linker must only emit a PT_GNU_RELRO segment if it ensures
     //       that it starts on a page boundary.
-    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
-    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
+    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz + load_bias);
+
+    // Before extending the RO protection, we need to ensure that the segments were extended
+    // by bionic, because the kernel won't map gaps so it usually contains unrelated
+    // mappings which will be incorrectly protected as RO likely leading to
+    // segmentation fault.
+    if (phdr->p_align > kPageSize && should_pad_segments) {
+      _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end);
+    }
 
     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
                        seg_page_end - seg_page_start,
@@ -992,12 +1088,14 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
+ *   should_pad_segments -> Were segments extended to avoid gaps in the memory map
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
-int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
-                                 size_t phdr_count, ElfW(Addr) load_bias) {
-  return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
+int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+                                 ElfW(Addr) load_bias, bool should_pad_segments) {
+  return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
+                                        should_pad_segments);
 }
 
 /* Serialize the GNU relro segments to the given file descriptor. This can be
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index e5b87bb..4deed33 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -128,13 +128,14 @@
 size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count);
 
 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr);
+                                ElfW(Addr) load_bias, bool should_pad_segments,
+                                const GnuPropertySection* prop = nullptr);
 
 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                  ElfW(Addr) load_bias);
+                                  ElfW(Addr) load_bias, bool should_pad_segments);
 
 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                 ElfW(Addr) load_bias);
+                                 ElfW(Addr) load_bias, bool should_pad_segments);
 
 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
                                    ElfW(Addr) load_bias, int fd, size_t* file_offset);
diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp
index 952dade..5b58895 100644
--- a/linker/linker_relocate.cpp
+++ b/linker/linker_relocate.cpp
@@ -187,7 +187,8 @@
   auto protect_segments = [&]() {
     // Make .text executable.
     if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
-                                    relocator.si->load_bias) < 0) {
+                                    relocator.si->load_bias,
+                                    relocator.si->should_pad_segments()) < 0) {
       DL_ERR("can't protect segments for \"%s\": %s",
              relocator.si->get_realpath(), strerror(errno));
       return false;
@@ -197,7 +198,8 @@
   auto unprotect_segments = [&]() {
     // Make .text writable.
     if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
-                                      relocator.si->load_bias) < 0) {
+                                      relocator.si->load_bias,
+                                      relocator.si->should_pad_segments()) < 0) {
       DL_ERR("can't unprotect loadable segments for \"%s\": %s",
              relocator.si->get_realpath(), strerror(errno));
       return false;
diff --git a/tests/dlext_test.cpp b/tests/dlext_test.cpp
index d078e50..6883da9 100644
--- a/tests/dlext_test.cpp
+++ b/tests/dlext_test.cpp
@@ -31,6 +31,7 @@
 #include <android-base/test_utils.h>
 
 #include <sys/mman.h>
+#include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/vfs.h>
 #include <sys/wait.h>
@@ -2046,6 +2047,11 @@
                                                              -1, 0));
   ASSERT_TRUE(reinterpret_cast<void*>(reserved_addr) != MAP_FAILED);
 
+  struct stat file_stat;
+  int ret = TEMP_FAILURE_RETRY(stat(private_library_absolute_path.c_str(), &file_stat));
+  ASSERT_EQ(ret, 0) << "Failed to stat library";
+  size_t file_size = file_stat.st_size;
+
   for (const auto& rec : maps_to_copy) {
     uintptr_t offset = rec.addr_start - addr_start;
     size_t size = rec.addr_end - rec.addr_start;
@@ -2053,7 +2059,13 @@
     void* map = mmap(addr, size, PROT_READ | PROT_WRITE,
                      MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
     ASSERT_TRUE(map != MAP_FAILED);
-    memcpy(map, reinterpret_cast<void*>(rec.addr_start), size);
+    size_t seg_size = size;
+    // See comment on file map fault in ElfReader::LoadSegments()
+    // bionic/linker/linker_phdr.cpp
+    if (rec.offset + size > file_size) {
+      seg_size = file_size - rec.offset;
+    }
+    memcpy(map, reinterpret_cast<void*>(rec.addr_start), seg_size);
     mprotect(map, size, rec.perms);
   }