RELAND: bionic: loader: Extend GNU_RELRO protection

If the LOAD segment VMAs are extended to prevent creating additional
VMAs, the the protection extent of the GNU_RELRO segment must also
be updated to match. Otherwise, the partial mprotect will reintroduce
an additional VMA due to the split protections.

Update the GNU_RELRO protection range when the ELF was loaded by the
bionic loader. Be careful not to attempt any fix up for ELFs not loaded
by us (e.g. ELF loaded by the kernel) since these don't have the
extended VMA fix to begin with.

Consider a system with 4KB page size and the ELF files with 64K
alignment. e.g:

$ readelf -Wl /system/lib64/bootstrap/libc.so | grep 'Type\|LOAD'

Type           Offset   VirtAddr           PhysAddr           FileSiz  MemSiz   Flg Align
LOAD           0x000000 0x0000000000000000 0x0000000000000000 0x0441a8 0x0441a8 R   0x10000
LOAD           0x0441b0 0x00000000000541b0 0x00000000000541b0 0x091860 0x091860 R E 0x10000
LOAD           0x0d5a10 0x00000000000f5a10 0x00000000000f5a10 0x003d40 0x003d40 RW  0x10000
LOAD           0x0d9760 0x0000000000109760 0x0000000000109760 0x0005c0 0x459844 RW  0x10000

Before this patch:

$ cat /proc/1/maps | grep -A1 libc.so

7f468f069000-7f468f0bd000 r--p 00000000 fe:09 20635520                   /system/lib64/bootstrap/libc.so
7f468f0bd000-7f468f15e000 r-xp 00044000 fe:09 20635520                   /system/lib64/bootstrap/libc.so
7f468f15e000-7f468f163000 r--p 000d5000 fe:09 20635520                   /system/lib64/bootstrap/libc.so
7f468f163000-7f468f172000 rw-p 000da000 fe:09 20635520                   /system/lib64/bootstrap/libc.so
7f468f172000-7f468f173000 rw-p 000d9000 fe:09 20635520                   /system/lib64/bootstrap/libc.so
7f468f173000-7f468f5c4000 rw-p 00000000 00:00 0                          [anon:.bss]

1 extra RW VMA at offset 0x000da000 (3 RW mappings in total)

After this patch:

$ cat /proc/1/maps | grep -A1 libc.so

7f5a50225000-7f5a50279000 r--p 00000000 fe:09 20635520                   /system/lib64/bootstrap/libc.so
7f5a50279000-7f5a5031a000 r-xp 00044000 fe:09 20635520                   /system/lib64/bootstrap/libc.so
7f5a5031a000-7f5a5032e000 r--p 000d5000 fe:09 20635520                   /system/lib64/bootstrap/libc.so
7f5a5032e000-7f5a5032f000 rw-p 000d9000 fe:09 20635520                   /system/lib64/bootstrap/libc.so
7f5a5032f000-7f5a50780000 rw-p 00000000 00:00 0                          [anon:.bss]

Removed RW VMA at offset 0x000da000 (2 RW mappings in total)

Bug: 316403210
Bug: 300367402
Bug: 307803052
Bug: 312550202
Test: atest -c linker-unit-tests
Test: atest -c bionic-unit-tests
Change-Id: I9cd04574190ef4c727308363a8cb1120c36e53e0
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 07b54c5..821f30d 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -1010,11 +1010,71 @@
                                    should_pad_segments);
 }
 
+static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
+                                              const ElfW(Phdr)* phdr_table, size_t phdr_count,
+                                              ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
+                                              bool should_pad_segments) {
+  // Find the index and phdr of the LOAD containing the GNU_RELRO segment
+  for (size_t index = 0; index < phdr_count; ++index) {
+    const ElfW(Phdr)* phdr = &phdr_table[index];
+
+    if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
+      // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
+      // LOAD segment mem size, we need to protect only a partial region of the
+      // LOAD segment and therefore cannot avoid a VMA split.
+      //
+      // Note: Don't check the page-aligned mem sizes since the extended protection
+      // may incorrectly write protect non-relocation data.
+      //
+      // Example:
+      //
+      //               |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
+      //       ----------------------------------------------------------------
+      //               |            |        |             |        |
+      //        SEG X  |     RO     |   RO   |     RW      |        |   SEG Y
+      //               |            |        |             |        |
+      //       ----------------------------------------------------------------
+      //                            |        |             |
+      //                            |        |             |
+      //                            |        |             |
+      //                    relro_vaddr   relro_vaddr   relro_vaddr
+      //                    (load_vaddr)       +            +
+      //                                  relro_memsz   load_memsz
+      //
+      //       ----------------------------------------------------------------
+      //               |         PAGE        |         PAGE         |
+      //       ----------------------------------------------------------------
+      //                                     |       Potential      |
+      //                                     |----- Extended RO ----|
+      //                                     |      Protection      |
+      //
+      // If the check below uses  page aligned mem sizes it will cause incorrect write
+      // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
+      if (relro_phdr->p_memsz < phdr->p_memsz) {
+        return;
+      }
+
+      ElfW(Addr) p_memsz = phdr->p_memsz;
+      ElfW(Addr) p_filesz = phdr->p_filesz;
+
+      // Attempt extending the VMA (mprotect range). Without extending the range,
+      // mprotect will only RO protect a part of the extended RW LOAD segment, which
+      // will leave an extra split RW VMA (the gap).
+      _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
+                               should_pad_segments);
+
+      *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
+      return;
+    }
+  }
+}
+
 /* Used internally by phdr_table_protect_gnu_relro and
  * phdr_table_unprotect_gnu_relro.
  */
 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                          ElfW(Addr) load_bias, int prot_flags) {
+                                          ElfW(Addr) load_bias, int prot_flags,
+                                          bool should_pad_segments) {
   const ElfW(Phdr)* phdr = phdr_table;
   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
 
@@ -1041,6 +1101,8 @@
     //       that it starts on a page boundary.
     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
+                               should_pad_segments);
 
     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
                        seg_page_end - seg_page_start,
@@ -1065,12 +1127,14 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
+ *   should_pad_segments -> Were segments extended to avoid gaps in the memory map
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
-int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
-                                 size_t phdr_count, ElfW(Addr) load_bias) {
-  return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
+int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+                                 ElfW(Addr) load_bias, bool should_pad_segments) {
+  return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
+                                        should_pad_segments);
 }
 
 /* Serialize the GNU relro segments to the given file descriptor. This can be