Pagetable-friendly shared library address randomization.

Add inaccessible gaps between shared libraries to make it harder for the
attackers to defeat ASLR by random probing.

To avoid excessive page table bloat, only do this when a library is
about to cross a huge page boundary, effectively allowing several
smaller libraries to be lumped together.

Bug: 158113540
Test: look at /proc/$$/maps
Change-Id: I39c0100b81f72447e8b3c6faafa561111492bf8c
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index 5d1cfc2..4cb48f5 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -49,6 +49,8 @@
   size_t phdr_count() const { return phdr_num_; }
   ElfW(Addr) load_start() const { return reinterpret_cast<ElfW(Addr)>(load_start_); }
   size_t load_size() const { return load_size_; }
+  ElfW(Addr) gap_start() const { return reinterpret_cast<ElfW(Addr)>(gap_start_); }
+  size_t gap_size() const { return gap_size_; }
   ElfW(Addr) load_bias() const { return load_bias_; }
   const ElfW(Phdr)* loaded_phdr() const { return loaded_phdr_; }
   const ElfW(Dyn)* dynamic() const { return dynamic_; }
@@ -96,6 +98,10 @@
   void* load_start_;
   // Size in bytes of reserved address space.
   size_t load_size_;
+  // First page of inaccessible gap mapping reserved for this DSO.
+  void* gap_start_;
+  // Size in bytes of the gap mapping.
+  size_t gap_size_;
   // Load bias.
   ElfW(Addr) load_bias_;