Merge "Make memtag_handle_longjmp precise" into main
diff --git a/benchmarks/Android.bp b/benchmarks/Android.bp
index 82bca21..d1cdfec 100644
--- a/benchmarks/Android.bp
+++ b/benchmarks/Android.bp
@@ -58,7 +58,7 @@
         "stdio_benchmark.cpp",
         "stdlib_benchmark.cpp",
         "string_benchmark.cpp",
-        "syscall_mmap_benchmark.cpp",
+        "syscall_mm_benchmark.cpp",
         "time_benchmark.cpp",
         "unistd_benchmark.cpp",
         "wctype_benchmark.cpp",
diff --git a/benchmarks/ctype_benchmark.cpp b/benchmarks/ctype_benchmark.cpp
index eab0133..c6c23b0 100644
--- a/benchmarks/ctype_benchmark.cpp
+++ b/benchmarks/ctype_benchmark.cpp
@@ -19,65 +19,78 @@
 #include <benchmark/benchmark.h>
 #include "util.h"
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalnum_y1, isalnum('A'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalnum_y2, isalnum('a'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalnum_y3, isalnum('0'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalnum_n, isalnum('_'));
+// Avoid optimization.
+volatile int A = 'A';
+volatile int a = 'a';
+volatile int X = 'X';
+volatile int x = 'x';
+volatile int backspace = '\b';
+volatile int del = '\x7f';
+volatile int space = ' ';
+volatile int tab = '\t';
+volatile int zero = '0';
+volatile int underscore = '_';
+volatile int top_bit_set = 0x88;
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalpha_y1, isalpha('A'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalpha_y2, isalpha('a'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalpha_n, isalpha('_'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalnum_y1, isalnum(A));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalnum_y2, isalnum(a));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalnum_y3, isalnum(zero));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalnum_n, isalnum(underscore));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isascii_y, isascii('x'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isascii_n, isascii(0x88));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalpha_y1, isalpha(A));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalpha_y2, isalpha(a));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isalpha_n, isalpha(underscore));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isblank_y1, isblank(' '));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isblank_y2, isblank('\t'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isblank_n, isblank('_'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isascii_y, isascii(x));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isascii_n, isascii(top_bit_set));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_iscntrl_y1, iscntrl('\b'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_iscntrl_y2, iscntrl('\x7f'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_iscntrl_n, iscntrl('_'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isblank_y1, isblank(space));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isblank_y2, isblank(tab));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isblank_n, isblank(underscore));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isdigit_y, iscntrl('0'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isdigit_n, iscntrl('_'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_iscntrl_y1, iscntrl(backspace));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_iscntrl_y2, iscntrl(del));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_iscntrl_n, iscntrl(underscore));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_y1, isgraph('A'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_y2, isgraph('a'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_y3, isgraph('0'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_y4, isgraph('_'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_n, isgraph(' '));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isdigit_y, iscntrl(zero));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isdigit_n, iscntrl(underscore));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_islower_y, islower('x'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_islower_n, islower('X'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_y1, isgraph(A));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_y2, isgraph(a));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_y3, isgraph(zero));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_y4, isgraph(underscore));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isgraph_n, isgraph(space));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y1, isprint('A'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y2, isprint('a'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y3, isprint('0'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y4, isprint('_'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y5, isprint(' '));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_n, isprint('\b'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_islower_y, islower(x));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_islower_n, islower(X));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_ispunct_y, ispunct('_'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_ispunct_n, ispunct('A'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y1, isprint(A));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y2, isprint(a));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y3, isprint(zero));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y4, isprint(underscore));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_y5, isprint(space));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isprint_n, isprint(backspace));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isspace_y1, isspace(' '));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isspace_y2, isspace('\t'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isspace_n, isspace('A'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_ispunct_y, ispunct(underscore));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_ispunct_n, ispunct(A));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isupper_y, isupper('X'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isupper_n, isupper('x'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isspace_y1, isspace(space));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isspace_y2, isspace(tab));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isspace_n, isspace(A));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isxdigit_y1, isxdigit('0'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isxdigit_y2, isxdigit('a'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isxdigit_y3, isxdigit('A'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isxdigit_n, isxdigit('_'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isupper_y, isupper(X));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isupper_n, isupper(x));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_toascii_y, isascii('x'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_toascii_n, isascii(0x88));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isxdigit_y1, isxdigit(zero));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isxdigit_y2, isxdigit(a));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isxdigit_y3, isxdigit(A));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_isxdigit_n, isxdigit(underscore));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_tolower_y, tolower('X'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_tolower_n, tolower('x'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_toascii_y, isascii(x));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_toascii_n, isascii(top_bit_set));
 
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_toupper_y, toupper('x'));
-BIONIC_TRIVIAL_BENCHMARK(BM_ctype_toupper_n, toupper('X'));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_tolower_y, tolower(X));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_tolower_n, tolower(x));
+
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_toupper_y, toupper(x));
+BIONIC_TRIVIAL_BENCHMARK(BM_ctype_toupper_n, toupper(X));
diff --git a/benchmarks/suites/syscall.xml b/benchmarks/suites/syscall.xml
index c253a3f..94b1cf5 100644
--- a/benchmarks/suites/syscall.xml
+++ b/benchmarks/suites/syscall.xml
@@ -49,4 +49,72 @@
   <iterations>10</iterations>
   <args>AT_MULTI_PAGE_SIZES</args>
 </fn>
+<fn>
+  <name>BM_syscall_mmap_anon_mmap_only</name>
+  <iterations>10</iterations>
+  <args>AT_MULTI_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_anon_munmap_only</name>
+  <iterations>10</iterations>
+  <args>AT_MULTI_PAGE_SIZES</args>
+</fn>
 
+<!-- madvise tests -->
+<fn>
+  <name>BM_syscall_mmap_anon_madvise_dontneed</name>
+  <iterations>10</iterations>
+  <args>AT_MULTI_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_anon_madvise_pageout</name>
+  <iterations>10</iterations>
+  <args>AT_MULTI_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_anon_madvise_free</name>
+  <iterations>10</iterations>
+  <args>AT_MULTI_PAGE_SIZES</args>
+</fn>
+
+<!-- mprotect tests -->
+<fn>
+  <name>BM_syscall_mmap_anon_mprotect_rw_to_rd</name>
+  <iterations>10</iterations>
+  <args>AT_All_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_anon_mprotect_rw_to_none</name>
+  <iterations>10</iterations>
+  <args>AT_All_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_anon_mprotect_rd_to_none</name>
+  <iterations>10</iterations>
+  <args>AT_All_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_file_mprotect_rw_to_rd</name>
+  <iterations>10</iterations>
+  <args>AT_All_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_file_mprotect_rw_to_none</name>
+  <iterations>10</iterations>
+  <args>AT_All_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_file_mprotect_none_to_rw</name>
+  <iterations>10</iterations>
+  <args>AT_All_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_file_mprotect_none_to_rd</name>
+  <iterations>10</iterations>
+  <args>AT_All_PAGE_SIZES</args>
+</fn>
+<fn>
+  <name>BM_syscall_mmap_file_mprotect_rd_to_none</name>
+  <iterations>10</iterations>
+  <args>AT_All_PAGE_SIZES</args>
+</fn>
diff --git a/benchmarks/syscall_mmap_benchmark.cpp b/benchmarks/syscall_mm_benchmark.cpp
similarity index 66%
rename from benchmarks/syscall_mmap_benchmark.cpp
rename to benchmarks/syscall_mm_benchmark.cpp
index 34ffa2e..04c4bea 100644
--- a/benchmarks/syscall_mmap_benchmark.cpp
+++ b/benchmarks/syscall_mm_benchmark.cpp
@@ -38,6 +38,12 @@
   int64_t size;
 };
 
+struct MprotectParams {
+  int from_prot;
+  int to_prot;
+  int64_t size;
+};
+
 template <BenchmarkType type>
 void MmapBenchmarkImpl(benchmark::State& state, const struct MmapParams& params, int fd,
                        void* area = nullptr) {
@@ -307,3 +313,152 @@
   MadviseBenchmark(state, params, MADV_FREE);
 }
 BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_madvise_free, "AT_MULTI_PAGE_SIZES");
+
+void MprotectBenchmark(benchmark::State& state, const struct MprotectParams& params, void* addr) {
+  for (auto _ : state) {
+    state.PauseTiming();
+    /*
+     * Guarantee that physical memory pages are allocated for this region to prevent
+     * segmentation fault when using mprotect to change permissions.
+     */
+    if (params.from_prot & PROT_WRITE) {
+      MakeAllocationResident(addr, params.size, page_sz);
+    }
+    state.ResumeTiming();
+
+    if (mprotect(addr, params.size, params.to_prot) != 0) {
+      state.SkipWithError(android::base::StringPrintf("mprotect failed: %m"));
+      break;
+    }
+
+    state.PauseTiming();
+    // Revert back to the original protection
+    int res = mprotect(addr, params.size, params.from_prot);
+    state.ResumeTiming();
+    if (res != 0) {
+      state.SkipWithError(
+          android::base::StringPrintf("mprotect failed to revert to original prot: %m"));
+      break;
+    }
+  }
+}
+
+static void MprotectBenchmarkWithMmapAnon(benchmark::State& state,
+                                          const struct MprotectParams& params) {
+  void* addr = mmap(nullptr, params.size, params.from_prot, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+  if (addr == MAP_FAILED) {
+    state.SkipWithError(android::base::StringPrintf("mmap failed: %m"));
+    return;
+  }
+
+  MprotectBenchmark(state, params, addr);
+
+  if (munmap(addr, params.size) != 0)
+    state.SkipWithError(android::base::StringPrintf("munmap failed: %m"));
+}
+
+static void BM_syscall_mmap_anon_mprotect_rw_to_rd(benchmark::State& state) {
+  struct MprotectParams params = {
+      .from_prot = PROT_READ | PROT_WRITE,
+      .to_prot = PROT_READ,
+      .size = state.range(0),
+  };
+  MprotectBenchmarkWithMmapAnon(state, params);
+}
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_mprotect_rw_to_rd, "AT_All_PAGE_SIZES");
+
+static void BM_syscall_mmap_anon_mprotect_rw_to_none(benchmark::State& state) {
+  struct MprotectParams params = {
+      .from_prot = PROT_READ | PROT_WRITE,
+      .to_prot = PROT_NONE,
+      .size = state.range(0),
+  };
+  MprotectBenchmarkWithMmapAnon(state, params);
+}
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_mprotect_rw_to_none, "AT_All_PAGE_SIZES");
+
+static void BM_syscall_mmap_anon_mprotect_rd_to_none(benchmark::State& state) {
+  struct MprotectParams params = {
+      .from_prot = PROT_READ,
+      .to_prot = PROT_NONE,
+      .size = state.range(0),
+  };
+  MprotectBenchmarkWithMmapAnon(state, params);
+}
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_mprotect_rd_to_none, "AT_All_PAGE_SIZES");
+
+static void MprotectBenchmarkWithMmapFile(benchmark::State& state,
+                                          const struct MprotectParams& params) {
+  TemporaryFile tf;
+
+  if (tf.fd < 0) {
+    state.SkipWithError(android::base::StringPrintf("failed to create a temporary file: %m"));
+    return;
+  }
+
+  if (params.size > 0 && ftruncate(tf.fd, params.size)) {
+    state.SkipWithError(android::base::StringPrintf("ftruncate failed: %m"));
+    return;
+  }
+
+  void* addr = mmap(nullptr, params.size, params.from_prot, MAP_PRIVATE, tf.fd, 0);
+  if (addr == MAP_FAILED) {
+    state.SkipWithError(android::base::StringPrintf("mmap failed: %m"));
+    return;
+  }
+
+  MprotectBenchmark(state, params, addr);
+
+  if (munmap(addr, params.size) != 0)
+    state.SkipWithError(android::base::StringPrintf("munmap failed: %m"));
+}
+
+static void BM_syscall_mmap_file_mprotect_rw_to_rd(benchmark::State& state) {
+  struct MprotectParams params = {
+      .from_prot = PROT_READ | PROT_WRITE,
+      .to_prot = PROT_READ,
+      .size = state.range(0),
+  };
+  MprotectBenchmarkWithMmapFile(state, params);
+}
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_rw_to_rd, "AT_All_PAGE_SIZES");
+
+static void BM_syscall_mmap_file_mprotect_rw_to_none(benchmark::State& state) {
+  struct MprotectParams params = {
+      .from_prot = PROT_READ | PROT_WRITE,
+      .to_prot = PROT_NONE,
+      .size = state.range(0),
+  };
+  MprotectBenchmarkWithMmapFile(state, params);
+}
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_rw_to_none, "AT_All_PAGE_SIZES");
+
+static void BM_syscall_mmap_file_mprotect_none_to_rw(benchmark::State& state) {
+  struct MprotectParams params = {
+      .from_prot = PROT_NONE,
+      .to_prot = PROT_READ | PROT_WRITE,
+      .size = state.range(0),
+  };
+  MprotectBenchmarkWithMmapFile(state, params);
+}
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_none_to_rw, "AT_All_PAGE_SIZES");
+
+static void BM_syscall_mmap_file_mprotect_none_to_rd(benchmark::State& state) {
+  struct MprotectParams params = {
+      .from_prot = PROT_NONE,
+      .to_prot = PROT_READ,
+      .size = state.range(0),
+  };
+  MprotectBenchmarkWithMmapFile(state, params);
+}
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_none_to_rd, "AT_All_PAGE_SIZES");
+
+static void BM_syscall_mmap_file_mprotect_rd_to_none(benchmark::State& state) {
+  struct MprotectParams params = {
+      .from_prot = PROT_READ,
+      .to_prot = PROT_NONE,
+      .size = state.range(0),
+  };
+  MprotectBenchmarkWithMmapFile(state, params);
+}
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_rd_to_none, "AT_All_PAGE_SIZES");
diff --git a/libc/bionic/fdsan.cpp b/libc/bionic/fdsan.cpp
index 84d2c94..0b0678b 100644
--- a/libc/bionic/fdsan.cpp
+++ b/libc/bionic/fdsan.cpp
@@ -219,6 +219,8 @@
       return "ZipArchive";
     case ANDROID_FDSAN_OWNER_TYPE_NATIVE_HANDLE:
       return "native_handle_t";
+    case ANDROID_FDSAN_OWNER_TYPE_PARCEL:
+      return "Parcel";
 
     case ANDROID_FDSAN_OWNER_TYPE_GENERIC_00:
     default:
diff --git a/libc/bionic/system_property_set.cpp b/libc/bionic/system_property_set.cpp
index f7999db..6e49bce 100644
--- a/libc/bionic/system_property_set.cpp
+++ b/libc/bionic/system_property_set.cpp
@@ -49,21 +49,34 @@
 #include "private/ScopedFd.h"
 
 static const char property_service_socket[] = "/dev/socket/" PROP_SERVICE_NAME;
+static const char property_service_for_system_socket[] =
+    "/dev/socket/" PROP_SERVICE_FOR_SYSTEM_NAME;
 static const char* kServiceVersionPropertyName = "ro.property_service.version";
 
 class PropertyServiceConnection {
  public:
-  PropertyServiceConnection() : last_error_(0) {
+  PropertyServiceConnection(const char* name) : last_error_(0) {
     socket_.reset(::socket(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0));
     if (socket_.get() == -1) {
       last_error_ = errno;
       return;
     }
 
-    const size_t namelen = strlen(property_service_socket);
+    // If we're trying to set "sys.powerctl" from a privileged process, use the special
+    // socket. Because this socket is only accessible to privileged processes, it can't
+    // be DoSed directly by malicious apps. (The shell user should be able to reboot,
+    // though, so we don't just always use the special socket for "sys.powerctl".)
+    // See b/262237198 for context
+    const char* socket = property_service_socket;
+    if (strcmp(name, "sys.powerctl") == 0 &&
+        access(property_service_for_system_socket, W_OK) == 0) {
+      socket = property_service_for_system_socket;
+    }
+
+    const size_t namelen = strlen(socket);
     sockaddr_un addr;
     memset(&addr, 0, sizeof(addr));
-    strlcpy(addr.sun_path, property_service_socket, sizeof(addr.sun_path));
+    strlcpy(addr.sun_path, socket, sizeof(addr.sun_path));
     addr.sun_family = AF_LOCAL;
     socklen_t alen = namelen + offsetof(sockaddr_un, sun_path) + 1;
 
@@ -176,7 +189,7 @@
 };
 
 static int send_prop_msg(const prop_msg* msg) {
-  PropertyServiceConnection connection;
+  PropertyServiceConnection connection(msg->name);
   if (!connection.IsValid()) {
     return connection.GetLastError();
   }
@@ -269,7 +282,7 @@
     // New protocol only allows long values for ro. properties only.
     if (strlen(value) >= PROP_VALUE_MAX && strncmp(key, "ro.", 3) != 0) return -1;
     // Use proper protocol
-    PropertyServiceConnection connection;
+    PropertyServiceConnection connection(key);
     if (!connection.IsValid()) {
       errno = connection.GetLastError();
       async_safe_format_log(ANDROID_LOG_WARN, "libc",
diff --git a/libc/include/android/fdsan.h b/libc/include/android/fdsan.h
index 3de0649..4540498 100644
--- a/libc/include/android/fdsan.h
+++ b/libc/include/android/fdsan.h
@@ -126,6 +126,9 @@
 
   /* native_handle_t */
   ANDROID_FDSAN_OWNER_TYPE_NATIVE_HANDLE = 13,
+
+  /* android::Parcel */
+  ANDROID_FDSAN_OWNER_TYPE_PARCEL = 14,
 };
 
 /*
diff --git a/libc/include/bits/fcntl.h b/libc/include/bits/fcntl.h
index 597aa6e..ee5a6e1 100644
--- a/libc/include/bits/fcntl.h
+++ b/libc/include/bits/fcntl.h
@@ -43,6 +43,6 @@
  *
  * The return value depends on the operation.
  */
-int fcntl(int __fd, int __cmd, ...);
+int fcntl(int __fd, int __op, ...);
 
 __END_DECLS
diff --git a/libc/include/bits/ioctl.h b/libc/include/bits/ioctl.h
index fd31a58..260eb7d 100644
--- a/libc/include/bits/ioctl.h
+++ b/libc/include/bits/ioctl.h
@@ -40,7 +40,7 @@
 /**
  * [ioctl(2)](http://man7.org/linux/man-pages/man2/ioctl.2.html) operates on device files.
  */
-int ioctl(int __fd, int __request, ...);
+int ioctl(int __fd, int __op, ...);
 
 /*
  * Work around unsigned -> signed conversion warnings: many common ioctl
@@ -57,7 +57,7 @@
  */
 #if !defined(BIONIC_IOCTL_NO_SIGNEDNESS_OVERLOAD)
 /* enable_if(1) just exists to break overloading ties. */
-int ioctl(int __fd, unsigned __request, ...) __overloadable __enable_if(1, "") __RENAME(ioctl);
+int ioctl(int __fd, unsigned __op, ...) __overloadable __enable_if(1, "") __RENAME(ioctl);
 #endif
 
 __END_DECLS
diff --git a/libc/include/bits/lockf.h b/libc/include/bits/lockf.h
index ec6e53c..d9f5987 100644
--- a/libc/include/bits/lockf.h
+++ b/libc/include/bits/lockf.h
@@ -56,12 +56,12 @@
  *
  * See also flock().
  */
-int lockf(int __fd, int __cmd, off_t __length) __RENAME_IF_FILE_OFFSET64(lockf64) __INTRODUCED_IN(24);
+int lockf(int __fd, int __op, off_t __length) __RENAME_IF_FILE_OFFSET64(lockf64) __INTRODUCED_IN(24);
 
 /**
  * Like lockf() but allows using a 64-bit length
  * even from a 32-bit process without `_FILE_OFFSET_BITS=64`.
  */
-int lockf64(int __fd, int __cmd, off64_t __length) __INTRODUCED_IN(24);
+int lockf64(int __fd, int __op, off64_t __length) __INTRODUCED_IN(24);
 
 __END_DECLS
diff --git a/libc/include/sys/_system_properties.h b/libc/include/sys/_system_properties.h
index 079c825..078e857 100644
--- a/libc/include/sys/_system_properties.h
+++ b/libc/include/sys/_system_properties.h
@@ -41,6 +41,7 @@
 __BEGIN_DECLS
 
 #define PROP_SERVICE_NAME "property_service"
+#define PROP_SERVICE_FOR_SYSTEM_NAME "property_service_for_system"
 #define PROP_DIRNAME "/dev/__properties__"
 
 #define PROP_MSG_SETPROP 1
diff --git a/libc/include/sys/msg.h b/libc/include/sys/msg.h
index ad481a0..26071b1 100644
--- a/libc/include/sys/msg.h
+++ b/libc/include/sys/msg.h
@@ -46,7 +46,7 @@
 typedef __kernel_ulong_t msglen_t;
 
 /** Not useful on Android; disallowed by SELinux. */
-int msgctl(int __msg_id, int __cmd, struct msqid_ds* _Nullable __buf) __INTRODUCED_IN(26);
+int msgctl(int __msg_id, int __op, struct msqid_ds* _Nullable __buf) __INTRODUCED_IN(26);
 /** Not useful on Android; disallowed by SELinux. */
 int msgget(key_t __key, int __flags) __INTRODUCED_IN(26);
 /** Not useful on Android; disallowed by SELinux. */
diff --git a/libc/include/sys/prctl.h b/libc/include/sys/prctl.h
index ff03c33..1c80415 100644
--- a/libc/include/sys/prctl.h
+++ b/libc/include/sys/prctl.h
@@ -45,6 +45,6 @@
  *
  * Returns -1 and sets `errno` on failure; success values vary by option.
  */
-int prctl(int __option, ...);
+int prctl(int __op, ...);
 
 __END_DECLS
diff --git a/libc/include/sys/ptrace.h b/libc/include/sys/ptrace.h
index 022fc3a..66b30a1 100644
--- a/libc/include/sys/ptrace.h
+++ b/libc/include/sys/ptrace.h
@@ -59,7 +59,7 @@
 #define PT_GETSIGINFO PTRACE_GETSIGINFO
 #define PT_SETSIGINFO PTRACE_SETSIGINFO
 
-long ptrace(int __request, ...);
+long ptrace(int __op, ...);
 
 __END_DECLS
 
diff --git a/libc/include/sys/quota.h b/libc/include/sys/quota.h
index 79c653d..37f8925 100644
--- a/libc/include/sys/quota.h
+++ b/libc/include/sys/quota.h
@@ -51,6 +51,6 @@
  *
  * Available since API level 26.
  */
-int quotactl(int __cmd, const char* _Nullable __special, int __id, char* __BIONIC_COMPLICATED_NULLNESS __addr) __INTRODUCED_IN(26);
+int quotactl(int __op, const char* _Nullable __special, int __id, char* __BIONIC_COMPLICATED_NULLNESS __addr) __INTRODUCED_IN(26);
 
 __END_DECLS
diff --git a/libc/include/sys/reboot.h b/libc/include/sys/reboot.h
index 156d947..5d9e1a7 100644
--- a/libc/include/sys/reboot.h
+++ b/libc/include/sys/reboot.h
@@ -55,6 +55,6 @@
  * Does not return on successful reboot, returns 0 if CAD was successfully enabled/disabled,
  * and returns -1 and sets `errno` on failure.
  */
-int reboot(int __cmd);
+int reboot(int __op);
 
 __END_DECLS
diff --git a/libc/include/sys/sem.h b/libc/include/sys/sem.h
index f4256e2..5682282 100644
--- a/libc/include/sys/sem.h
+++ b/libc/include/sys/sem.h
@@ -51,7 +51,7 @@
   void* _Nullable __pad;
 };
 
-int semctl(int __sem_id, int __sem_num, int __cmd, ...) __INTRODUCED_IN(26);
+int semctl(int __sem_id, int __sem_num, int __op, ...) __INTRODUCED_IN(26);
 int semget(key_t __key, int __sem_count, int __flags) __INTRODUCED_IN(26);
 int semop(int __sem_id, struct sembuf* _Nonnull __ops, size_t __op_count) __INTRODUCED_IN(26);
 
diff --git a/libc/include/sys/shm.h b/libc/include/sys/shm.h
index 9d58046..fb6f20c 100644
--- a/libc/include/sys/shm.h
+++ b/libc/include/sys/shm.h
@@ -49,7 +49,7 @@
 /** Not useful on Android; disallowed by SELinux. */
 void* _Nonnull shmat(int __shm_id, const void* _Nullable __addr, int __flags) __INTRODUCED_IN(26);
 /** Not useful on Android; disallowed by SELinux. */
-int shmctl(int __shm_id, int __cmd, struct shmid_ds* _Nullable __buf) __INTRODUCED_IN(26);
+int shmctl(int __shm_id, int __op, struct shmid_ds* _Nullable __buf) __INTRODUCED_IN(26);
 /** Not useful on Android; disallowed by SELinux. */
 int shmdt(const void* _Nonnull __addr) __INTRODUCED_IN(26);
 /** Not useful on Android; disallowed by SELinux. */
diff --git a/libc/include/time.h b/libc/include/time.h
index 45c5c34..f448851 100644
--- a/libc/include/time.h
+++ b/libc/include/time.h
@@ -365,13 +365,14 @@
 
 /**
  * [clock_nanosleep(2)](http://man7.org/linux/man-pages/man2/clock_nanosleep.2.html)
- * sleeps for the given time as measured by the given clock.
+ * sleeps for the given time (or until the given time if the TIMER_ABSTIME flag
+ * is used), as measured by the given clock.
  *
  * Returns 0 on success, and returns -1 and returns an error number on failure.
  * If the sleep was interrupted by a signal, the return value will be `EINTR`
  * and `remainder` will be the amount of time remaining.
  */
-int clock_nanosleep(clockid_t __clock, int __flags, const struct timespec* _Nonnull __duration, struct timespec* _Nullable __remainder);
+int clock_nanosleep(clockid_t __clock, int __flags, const struct timespec* _Nonnull __time, struct timespec* _Nullable __remainder);
 
 /**
  * [clock_settime(2)](http://man7.org/linux/man-pages/man2/clock_settime.2.html)
diff --git a/libc/include/unistd.h b/libc/include/unistd.h
index b94197e..ee772a5 100644
--- a/libc/include/unistd.h
+++ b/libc/include/unistd.h
@@ -89,7 +89,11 @@
 
 /**
  * _Fork() creates a new process. _Fork() differs from fork() in that it does
- * not run any handlers set by pthread_atfork().
+ * not run any handlers set by pthread_atfork(). In addition to any user-defined
+ * ones, bionic uses pthread_atfork() handlers to ensure consistency of its own
+ * state, so the child should only call
+ * [POSIX async-safe](https://man7.org/linux/man-pages/man7/signal-safety.7.html)
+ * functions.
  *
  * Returns 0 in the child, the pid of the child in the parent,
  * and returns -1 and sets `errno` on failure.
diff --git a/linker/Android.bp b/linker/Android.bp
index 55daf22..0533ae9 100644
--- a/linker/Android.bp
+++ b/linker/Android.bp
@@ -116,7 +116,6 @@
         "libziparchive",
         "libbase",
         "libz",
-        "libprocinfo", // For procinfo::MappedFileSize()
 
         "libasync_safe",
 
@@ -574,7 +573,6 @@
         "libasync_safe",
         "libbase",
         "liblog_for_runtime_apex",
-        "libprocinfo", // For procinfo::MappedFileSize()
     ],
 
     data_libs: [
diff --git a/linker/linker.cpp b/linker/linker.cpp
index e54a524..b0caedd 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -3364,7 +3364,7 @@
                               "\"%s\" has text relocations",
                               get_realpath());
     add_dlwarning(get_realpath(), "text relocations");
-    if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+    if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
       DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno));
       return false;
     }
@@ -3380,7 +3380,7 @@
 #if !defined(__LP64__)
   if (has_text_relocations) {
     // All relocations are done, we can protect our segments back to read-only.
-    if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+    if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
       DL_ERR("can't protect segments for \"%s\": %s",
              get_realpath(), strerror(errno));
       return false;
@@ -3418,7 +3418,7 @@
 }
 
 bool soinfo::protect_relro() {
-  if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
+  if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
     DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
            get_realpath(), strerror(errno));
     return false;
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index 1860ccc..d6592af 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -201,7 +201,6 @@
   const ElfW(Phdr)* phdr;
   size_t phdr_count;
   ElfW(Addr) entry_point;
-  bool should_pad_segments;
 };
 
 static ExecutableInfo get_executable_info(const char* arg_path) {
@@ -294,7 +293,6 @@
   result.phdr = elf_reader.loaded_phdr();
   result.phdr_count = elf_reader.phdr_count();
   result.entry_point = elf_reader.entry_point();
-  result.should_pad_segments = elf_reader.should_pad_segments();
   return result;
 }
 
@@ -368,7 +366,6 @@
   somain = si;
   si->phdr = exe_info.phdr;
   si->phnum = exe_info.phdr_count;
-  si->set_should_pad_segments(exe_info.should_pad_segments);
   get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias);
   si->size = phdr_table_get_load_size(si->phdr, si->phnum);
   si->dynamic = nullptr;
@@ -402,7 +399,7 @@
     auto note_gnu_property = GnuPropertySection(somain);
     if (note_gnu_property.IsBTICompatible() &&
         (phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
-                                     somain->should_pad_segments(), &note_gnu_property) < 0)) {
+                                     &note_gnu_property) < 0)) {
       __linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(),
                      strerror(errno));
     }
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 821f30d..82b37a4 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -46,8 +46,6 @@
 #include "private/CFIShadow.h" // For kLibraryAlignment
 #include "private/elf_note.h"
 
-#include <procinfo/process_map.h>
-
 static int GetTargetElfMachine() {
 #if defined(__arm__)
   return EM_ARM;
@@ -198,7 +196,7 @@
     // For Armv8.5-A loaded executable segments may require PROT_BTI.
     if (note_gnu_property_.IsBTICompatible()) {
       did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
-                                               should_pad_segments_, &note_gnu_property_) == 0);
+                                               &note_gnu_property_) == 0);
     }
 #endif
   }
@@ -758,41 +756,6 @@
   return true;
 }
 
-static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                             size_t phdr_idx, ElfW(Addr)* p_memsz,
-                                             ElfW(Addr)* p_filesz, bool should_pad_segments) {
-  const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
-  const ElfW(Phdr)* next = nullptr;
-  size_t next_idx = phdr_idx + 1;
-
-  if (phdr->p_align == kPageSize || !should_pad_segments) {
-    return;
-  }
-
-  if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
-    next = &phdr_table[next_idx];
-  }
-
-  // If this is the last LOAD segment, no extension is needed
-  if (!next || *p_memsz != *p_filesz) {
-    return;
-  }
-
-  ElfW(Addr) next_start = page_start(next->p_vaddr);
-  ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
-
-  // If adjacent segment mappings overlap, no extension is needed.
-  if (curr_end >= next_start) {
-    return;
-  }
-
-  // Extend the LOAD segment mapping to be contiguous with that of
-  // the next LOAD segment.
-  ElfW(Addr) extend = next_start - curr_end;
-  *p_memsz += extend;
-  *p_filesz += extend;
-}
-
 bool ElfReader::LoadSegments() {
   for (size_t i = 0; i < phdr_num_; ++i) {
     const ElfW(Phdr)* phdr = &phdr_table_[i];
@@ -801,22 +764,18 @@
       continue;
     }
 
-    ElfW(Addr) p_memsz = phdr->p_memsz;
-    ElfW(Addr) p_filesz = phdr->p_filesz;
-    _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
-
     // Segment addresses in memory.
     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
-    ElfW(Addr) seg_end = seg_start + p_memsz;
+    ElfW(Addr) seg_end   = seg_start + phdr->p_memsz;
 
     ElfW(Addr) seg_page_start = page_start(seg_start);
     ElfW(Addr) seg_page_end = page_end(seg_end);
 
-    ElfW(Addr) seg_file_end = seg_start + p_filesz;
+    ElfW(Addr) seg_file_end   = seg_start + phdr->p_filesz;
 
     // File offsets.
     ElfW(Addr) file_start = phdr->p_offset;
-    ElfW(Addr) file_end = file_start + p_filesz;
+    ElfW(Addr) file_end   = file_start + phdr->p_filesz;
 
     ElfW(Addr) file_page_start = page_start(file_start);
     ElfW(Addr) file_length = file_end - file_page_start;
@@ -826,12 +785,12 @@
       return false;
     }
 
-    if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
+    if (file_end > static_cast<size_t>(file_size_)) {
       DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
           " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
           name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
           reinterpret_cast<void*>(phdr->p_filesz),
-          reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
+          reinterpret_cast<void*>(file_end), file_size_);
       return false;
     }
 
@@ -869,25 +828,10 @@
       }
     }
 
-    // if the segment is writable, and its memory map extends beyond
-    // the segment contents on file (p_filesz); zero-fill it until the
-    // end of the mapping backed by the file, rounded to the next
-    // page boundary; as this portion of the mapping corresponds to either
-    // garbage (partial page at the end) or data from other segments.
-    //
-    // If any part of the mapping extends beyond the file size there is
-    // no need to zero it since that region is not touchable by userspace
-    // and attempting to do so will causes the kernel to throw a SIGBUS.
-    //
-    // See: system/libprocinfo/include/procinfo/process_map_size.h
-    uint64_t file_backed_size = ::android::procinfo::MappedFileSize(seg_page_start,
-                                page_end(seg_page_start + file_length),
-                                file_offset_ + file_page_start, file_size_);
-    // _seg_file_end = unextended seg_file_end
-    uint64_t _seg_file_end = seg_start + phdr->p_filesz;
-    uint64_t zero_fill_len = file_backed_size - (_seg_file_end - seg_page_start);
-    if ((phdr->p_flags & PF_W) != 0 && zero_fill_len > 0) {
-      memset(reinterpret_cast<void*>(_seg_file_end), 0, zero_fill_len);
+    // if the segment is writable, and does not end on a page boundary,
+    // zero-fill it until the page limit.
+    if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
+      memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
     }
 
     seg_file_end = page_end(seg_file_end);
@@ -920,21 +864,17 @@
  * phdr_table_protect_segments and phdr_table_unprotect_segments.
  */
 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                     ElfW(Addr) load_bias, int extra_prot_flags,
-                                     bool should_pad_segments) {
-  for (size_t i = 0; i < phdr_count; ++i) {
-    const ElfW(Phdr)* phdr = &phdr_table[i];
+                                     ElfW(Addr) load_bias, int extra_prot_flags) {
+  const ElfW(Phdr)* phdr = phdr_table;
+  const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
 
+  for (; phdr < phdr_limit; phdr++) {
     if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
       continue;
     }
 
-    ElfW(Addr) p_memsz = phdr->p_memsz;
-    ElfW(Addr) p_filesz = phdr->p_filesz;
-    _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
-
-    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
-    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
+    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
 
     int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
     if ((prot & PROT_WRITE) != 0) {
@@ -969,21 +909,19 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
- *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
  *   prop        -> GnuPropertySection or nullptr
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                ElfW(Addr) load_bias, bool should_pad_segments,
-                                const GnuPropertySection* prop __unused) {
+                                ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
   int prot = 0;
 #if defined(__aarch64__)
   if ((prop != nullptr) && prop->IsBTICompatible()) {
     prot |= PROT_BTI;
   }
 #endif
-  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
+  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
 }
 
 /* Change the protection of all loaded segments in memory to writable.
@@ -999,82 +937,19 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
- *   should_pad_segments -> Are segments extended to avoid gaps in the memory map
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
-                                  size_t phdr_count, ElfW(Addr) load_bias,
-                                  bool should_pad_segments) {
-  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
-                                   should_pad_segments);
-}
-
-static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
-                                              const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                              ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
-                                              bool should_pad_segments) {
-  // Find the index and phdr of the LOAD containing the GNU_RELRO segment
-  for (size_t index = 0; index < phdr_count; ++index) {
-    const ElfW(Phdr)* phdr = &phdr_table[index];
-
-    if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
-      // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
-      // LOAD segment mem size, we need to protect only a partial region of the
-      // LOAD segment and therefore cannot avoid a VMA split.
-      //
-      // Note: Don't check the page-aligned mem sizes since the extended protection
-      // may incorrectly write protect non-relocation data.
-      //
-      // Example:
-      //
-      //               |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
-      //       ----------------------------------------------------------------
-      //               |            |        |             |        |
-      //        SEG X  |     RO     |   RO   |     RW      |        |   SEG Y
-      //               |            |        |             |        |
-      //       ----------------------------------------------------------------
-      //                            |        |             |
-      //                            |        |             |
-      //                            |        |             |
-      //                    relro_vaddr   relro_vaddr   relro_vaddr
-      //                    (load_vaddr)       +            +
-      //                                  relro_memsz   load_memsz
-      //
-      //       ----------------------------------------------------------------
-      //               |         PAGE        |         PAGE         |
-      //       ----------------------------------------------------------------
-      //                                     |       Potential      |
-      //                                     |----- Extended RO ----|
-      //                                     |      Protection      |
-      //
-      // If the check below uses  page aligned mem sizes it will cause incorrect write
-      // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
-      if (relro_phdr->p_memsz < phdr->p_memsz) {
-        return;
-      }
-
-      ElfW(Addr) p_memsz = phdr->p_memsz;
-      ElfW(Addr) p_filesz = phdr->p_filesz;
-
-      // Attempt extending the VMA (mprotect range). Without extending the range,
-      // mprotect will only RO protect a part of the extended RW LOAD segment, which
-      // will leave an extra split RW VMA (the gap).
-      _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
-                               should_pad_segments);
-
-      *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
-      return;
-    }
-  }
+                                  size_t phdr_count, ElfW(Addr) load_bias) {
+  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
 }
 
 /* Used internally by phdr_table_protect_gnu_relro and
  * phdr_table_unprotect_gnu_relro.
  */
 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                          ElfW(Addr) load_bias, int prot_flags,
-                                          bool should_pad_segments) {
+                                          ElfW(Addr) load_bias, int prot_flags) {
   const ElfW(Phdr)* phdr = phdr_table;
   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
 
@@ -1101,8 +976,6 @@
     //       that it starts on a page boundary.
     ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
     ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
-    _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
-                               should_pad_segments);
 
     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
                        seg_page_end - seg_page_start,
@@ -1127,14 +1000,12 @@
  *   phdr_table  -> program header table
  *   phdr_count  -> number of entries in tables
  *   load_bias   -> load bias
- *   should_pad_segments -> Were segments extended to avoid gaps in the memory map
  * Return:
  *   0 on success, -1 on failure (error code in errno).
  */
-int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                 ElfW(Addr) load_bias, bool should_pad_segments) {
-  return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
-                                        should_pad_segments);
+int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
+                                 size_t phdr_count, ElfW(Addr) load_bias) {
+  return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
 }
 
 /* Serialize the GNU relro segments to the given file descriptor. This can be
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index 4deed33..e5b87bb 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -128,14 +128,13 @@
 size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count);
 
 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                ElfW(Addr) load_bias, bool should_pad_segments,
-                                const GnuPropertySection* prop = nullptr);
+                                ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr);
 
 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                  ElfW(Addr) load_bias, bool should_pad_segments);
+                                  ElfW(Addr) load_bias);
 
 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
-                                 ElfW(Addr) load_bias, bool should_pad_segments);
+                                 ElfW(Addr) load_bias);
 
 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
                                    ElfW(Addr) load_bias, int fd, size_t* file_offset);
diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp
index 5b58895..952dade 100644
--- a/linker/linker_relocate.cpp
+++ b/linker/linker_relocate.cpp
@@ -187,8 +187,7 @@
   auto protect_segments = [&]() {
     // Make .text executable.
     if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
-                                    relocator.si->load_bias,
-                                    relocator.si->should_pad_segments()) < 0) {
+                                    relocator.si->load_bias) < 0) {
       DL_ERR("can't protect segments for \"%s\": %s",
              relocator.si->get_realpath(), strerror(errno));
       return false;
@@ -198,8 +197,7 @@
   auto unprotect_segments = [&]() {
     // Make .text writable.
     if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
-                                      relocator.si->load_bias,
-                                      relocator.si->should_pad_segments()) < 0) {
+                                      relocator.si->load_bias) < 0) {
       DL_ERR("can't unprotect loadable segments for \"%s\": %s",
              relocator.si->get_realpath(), strerror(errno));
       return false;
diff --git a/tests/Android.bp b/tests/Android.bp
index 4e9192e..0f4a942 100644
--- a/tests/Android.bp
+++ b/tests/Android.bp
@@ -785,7 +785,6 @@
     ],
     static_libs: [
         "libbase",
-        "libprocinfo",
     ],
     include_dirs: [
         "bionic/libc",
@@ -835,6 +834,7 @@
         "cfi_test_helper",
         "cfi_test_helper2",
         "elftls_dlopen_ie_error_helper",
+        "elftls_dtv_resize_helper",
         "exec_linker_helper",
         "exec_linker_helper_lib",
         "heap_tagging_async_helper",
@@ -937,6 +937,8 @@
         "libtest_elftls_dynamic_filler_1",
         "libtest_elftls_dynamic_filler_2",
         "libtest_elftls_dynamic_filler_3",
+        "libtest_elftls_dynamic_filler_4",
+        "libtest_elftls_dynamic_filler_5",
         "libtest_elftls_shared_var",
         "libtest_elftls_shared_var_ie",
         "libtest_elftls_tprel",
diff --git a/tests/dlext_test.cpp b/tests/dlext_test.cpp
index b702725..d078e50 100644
--- a/tests/dlext_test.cpp
+++ b/tests/dlext_test.cpp
@@ -31,7 +31,6 @@
 #include <android-base/test_utils.h>
 
 #include <sys/mman.h>
-#include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/vfs.h>
 #include <sys/wait.h>
@@ -2047,11 +2046,6 @@
                                                              -1, 0));
   ASSERT_TRUE(reinterpret_cast<void*>(reserved_addr) != MAP_FAILED);
 
-  struct stat file_stat;
-  int ret = TEMP_FAILURE_RETRY(stat(private_library_absolute_path.c_str(), &file_stat));
-  ASSERT_EQ(ret, 0) << "Failed to stat library";
-  size_t file_size = file_stat.st_size;
-
   for (const auto& rec : maps_to_copy) {
     uintptr_t offset = rec.addr_start - addr_start;
     size_t size = rec.addr_end - rec.addr_start;
@@ -2059,11 +2053,7 @@
     void* map = mmap(addr, size, PROT_READ | PROT_WRITE,
                      MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
     ASSERT_TRUE(map != MAP_FAILED);
-    // Attempting the below memcpy from a portion of the map that is off the end of
-    // the backing file will cause the kernel to throw a SIGBUS
-    size_t _size = ::android::procinfo::MappedFileSize(rec.addr_start, rec.addr_end,
-                                                       rec.offset, file_size);
-    memcpy(map, reinterpret_cast<void*>(rec.addr_start), _size);
+    memcpy(map, reinterpret_cast<void*>(rec.addr_start), size);
     mprotect(map, size, rec.perms);
   }
 
diff --git a/tests/elftls_dl_test.cpp b/tests/elftls_dl_test.cpp
index e2fa3a0..bcb2b40 100644
--- a/tests/elftls_dl_test.cpp
+++ b/tests/elftls_dl_test.cpp
@@ -29,10 +29,9 @@
 #include <dlfcn.h>
 #include <link.h>
 
-#include <android-base/file.h>
-#include <android-base/test_utils.h>
 #include <gtest/gtest.h>
 
+#include <string>
 #include <thread>
 
 #include "gtest_globals.h"
@@ -155,71 +154,11 @@
 
 TEST(elftls_dl, dtv_resize) {
 #if defined(__BIONIC__)
-#define LOAD_LIB(soname) ({                           \
-    auto lib = dlopen(soname, RTLD_LOCAL | RTLD_NOW); \
-    ASSERT_NE(nullptr, lib);                          \
-    reinterpret_cast<int(*)()>(dlsym(lib, "bump"));   \
-  })
-
-  auto dtv = []() -> TlsDtv* { return __get_tcb_dtv(__get_bionic_tcb()); };
-
-  static_assert(sizeof(TlsDtv) == 3 * sizeof(void*),
-                "This test assumes that the Dtv has a 3-word header");
-
-  // Initially there are 4 modules (5 w/ hwasan):
-  //  - the main test executable
-  //  - libc
-  //  - libtest_elftls_shared_var
-  //  - libtest_elftls_tprel
-  //  - w/ hwasan: libclang_rt.hwasan
-
-  // The initial DTV is an empty DTV with no generation and a size of 0.
-  TlsDtv* zero_dtv = dtv();
-  ASSERT_EQ(0u, zero_dtv->count);
-  ASSERT_EQ(nullptr, zero_dtv->next);
-  ASSERT_EQ(kTlsGenerationNone, zero_dtv->generation);
-
-  // Load module 5 (6 w/ hwasan).
-  auto func1 = LOAD_LIB("libtest_elftls_dynamic_filler_1.so");
-  ASSERT_EQ(101, func1());
-
-  // After loading one module, the DTV should be initialized to the next
-  // power-of-2 size (including the header).
-  TlsDtv* initial_dtv = dtv();
-  ASSERT_EQ(running_with_hwasan() ? 13u : 5u, dtv()->count);
-  ASSERT_EQ(zero_dtv, initial_dtv->next);
-  ASSERT_LT(0u, initial_dtv->generation);
-
-  // Load module 6 (7 w/ hwasan).
-  auto func2 = LOAD_LIB("libtest_elftls_dynamic_filler_2.so");
-  ASSERT_EQ(102, func1());
-
-#if defined(__aarch64__)
-  // The arm64 TLSDESC resolver doesn't update the DTV if it is new enough for
-  // the given access.
-  ASSERT_EQ(running_with_hwasan() ? 13u : 5u, dtv()->count);
-#else
-  // __tls_get_addr updates the DTV anytime the generation counter changes.
-  ASSERT_EQ(13u, dtv()->count);
-#endif
-
-  ASSERT_EQ(201, func2());
-  TlsDtv* new_dtv = dtv();
-  if (!running_with_hwasan()) {
-    ASSERT_NE(initial_dtv, new_dtv);
-    ASSERT_EQ(initial_dtv, new_dtv->next);
-  }
-  ASSERT_EQ(13u, new_dtv->count);
-
-  // Load module 7 (8 w/ hwasan).
-  auto func3 = LOAD_LIB("libtest_elftls_dynamic_filler_3.so");
-  ASSERT_EQ(103, func1());
-  ASSERT_EQ(202, func2());
-  ASSERT_EQ(301, func3());
-
-  ASSERT_EQ(new_dtv, dtv());
-
-#undef LOAD_LIB
+  std::string helper = GetTestlibRoot() + "/elftls_dtv_resize_helper";
+  chmod(helper.c_str(), 0755);  // TODO: "x" lost in CTS, b/34945607
+  ExecTestHelper eth;
+  eth.SetArgs({helper.c_str(), nullptr});
+  eth.Run([&]() { execve(helper.c_str(), eth.GetArgs(), eth.GetEnv()); }, 0, nullptr);
 #else
   GTEST_SKIP() << "test doesn't apply to glibc";
 #endif
diff --git a/tests/libs/Android.bp b/tests/libs/Android.bp
index 06ee132..f640552 100644
--- a/tests/libs/Android.bp
+++ b/tests/libs/Android.bp
@@ -123,6 +123,39 @@
     ],
 }
 
+cc_test_library {
+    name: "libtest_elftls_dynamic_filler_4",
+    defaults: ["bionic_testlib_defaults"],
+    srcs: ["elftls_dynamic_filler.cpp"],
+    cflags: [
+        "-DTLS_FILLER=400",
+    ],
+}
+
+cc_test_library {
+    name: "libtest_elftls_dynamic_filler_5",
+    defaults: ["bionic_testlib_defaults"],
+    srcs: ["elftls_dynamic_filler.cpp"],
+    cflags: [
+        "-DTLS_FILLER=500",
+    ],
+}
+
+cc_test {
+    name: "elftls_dtv_resize_helper",
+    defaults: [
+        "bionic_testlib_defaults",
+        "bionic_targets_only",
+    ],
+    srcs: ["elftls_dtv_resize_helper.cpp"],
+    include_dirs: [
+        "bionic/libc",
+    ],
+    static_libs: [
+        "libbase",
+    ],
+}
+
 // -----------------------------------------------------------------------------
 // Library to test gnu-styled hash
 // -----------------------------------------------------------------------------
diff --git a/tests/libs/elftls_dtv_resize_helper.cpp b/tests/libs/elftls_dtv_resize_helper.cpp
new file mode 100644
index 0000000..340d5df
--- /dev/null
+++ b/tests/libs/elftls_dtv_resize_helper.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <functional>
+#include <iostream>
+
+#include "bionic/pthread_internal.h"
+
+constexpr bool kDumpModulesForDebugging = false;
+
+// The old external/libcxx doesn't have operator<< for nullptr.
+// TODO(b/175635923): Remove this hack after upgrading libc++.
+template <class T>
+T fix_nullptr(T&& arg) {
+  return arg;
+}
+void* fix_nullptr(nullptr_t arg) {
+  return static_cast<void*>(arg);
+}
+
+template <class Val1, class Val2, class Compare>
+void check(int line, const char* val1_expr, Val1&& val1, const char* val2_expr, Val2&& val2,
+           Compare compare) {
+  if (!compare(val1, val2)) {
+    std::cerr << __FILE__ << ":" << line << ": assertion failed: LHS(" << val1_expr << ") is "
+              << fix_nullptr(val1) << ", RHS(" << val2_expr << ") is " << fix_nullptr(val2) << "\n"
+              << std::flush;
+    abort();
+  }
+}
+
+#define ASSERT_EQ(val1, val2) check(__LINE__, #val1, val1, #val2, val2, std::equal_to())
+#define ASSERT_NE(val1, val2) check(__LINE__, #val1, val1, #val2, val2, std::not_equal_to())
+#define ASSERT_LT(val1, val2) check(__LINE__, #val1, val1, #val2, val2, std::less())
+#define ASSERT_LE(val1, val2) check(__LINE__, #val1, val1, #val2, val2, std::less_equal())
+
+static size_t highest_loaded_modid() {
+  size_t result = 0;
+  auto update_result = [](struct dl_phdr_info* info, size_t size __unused, void* data) {
+    size_t& result = *reinterpret_cast<size_t*>(data);
+    if (kDumpModulesForDebugging) {
+      fprintf(stderr, "module %s: TLS modid %zu\n", info->dlpi_name, info->dlpi_tls_modid);
+    }
+    result = std::max(result, info->dlpi_tls_modid);
+    return 0;
+  };
+  dl_iterate_phdr(update_result, &result);
+  return result;
+}
+
+static TlsDtv* dtv() {
+  return __get_tcb_dtv(__get_bionic_tcb());
+}
+
+static size_t highest_modid_in_dtv() {
+  TlsDtv* current_dtv = dtv();
+  size_t result = 0;
+  for (size_t i = 0; i < current_dtv->count; ++i) {
+    if (current_dtv->modules[i] != nullptr) {
+      result = __tls_module_idx_to_id(i);
+    }
+  }
+  return result;
+}
+
+// Unused, but ensures that the test executable has a TLS segment. With a
+// new-enough libc++_static.a, the test executable will tend to has a TLS
+// segment to hold the libc++ EH globals pointer.
+__thread int g_tls_var_placeholder = 42;
+
+int main() {
+  // Prevent this TLS variable from being optimized away.
+  ASSERT_EQ(42, g_tls_var_placeholder);
+
+  auto load_lib = [](const char* soname) {
+    void* lib = dlopen(soname, RTLD_LOCAL | RTLD_NOW);
+    ASSERT_NE(nullptr, lib);
+    auto func = reinterpret_cast<int (*)()>(dlsym(lib, "bump"));
+    ASSERT_NE(nullptr, func);
+    return func;
+  };
+
+  static_assert(sizeof(TlsDtv) == 3 * sizeof(void*),
+                "This test assumes that the Dtv has a 3-word header");
+
+  // Initially there are 2-4 modules:
+  //  - 1: test executable
+  //  - 2: libc
+  //  - 3: libc++ (when using a new-enough libc++)
+  //  - 4: libclang_rt.hwasan (when running with HWASan)
+  size_t first_filler_modid = highest_loaded_modid() + 1;
+  ASSERT_LE(2, highest_loaded_modid());
+  ASSERT_LE(highest_loaded_modid(), 4);
+
+  // The initial DTV is an empty DTV with no generation and a size of 0.
+  TlsDtv* zero_dtv = dtv();
+  ASSERT_EQ(0u, zero_dtv->count);
+  ASSERT_EQ(nullptr, zero_dtv->next);
+  ASSERT_EQ(kTlsGenerationNone, zero_dtv->generation);
+
+  // Load a module. The DTV is still empty unless the TLS variable is accessed.
+  auto func1 = load_lib("libtest_elftls_dynamic_filler_1.so");
+  ASSERT_EQ(zero_dtv, dtv());
+  ASSERT_EQ(first_filler_modid, highest_loaded_modid());
+
+  // After accessing a TLS variable, the DTV should be initialized. It should be
+  // 8 words in size, with a 5-entry capacity.
+  ASSERT_EQ(101, func1());
+  TlsDtv* initial_dtv = dtv();
+  ASSERT_EQ(5u, dtv()->count);
+  ASSERT_EQ(zero_dtv, initial_dtv->next);
+  ASSERT_LT(0u, initial_dtv->generation);
+  ASSERT_EQ(first_filler_modid, highest_modid_in_dtv());
+  ASSERT_NE(nullptr, initial_dtv->modules[__tls_module_id_to_idx(first_filler_modid)]);
+
+  size_t current_generation = initial_dtv->generation;
+
+  // Fill the rest of the DTV up. (i.e. Ensure that exactly 5 modules with TLS
+  // segments are loaded.)
+  auto fill_entry = [&](size_t modid, const char* soname, int tls_var_value) {
+    if (highest_modid_in_dtv() == modid - 1) {
+      auto func = load_lib(soname);
+
+      // Loading the module doesn't affect the DTV yet.
+      ASSERT_EQ(initial_dtv, dtv());
+      ASSERT_EQ(modid, highest_loaded_modid());
+      ASSERT_EQ(modid - 1, highest_modid_in_dtv());
+      ASSERT_EQ(current_generation, initial_dtv->generation);
+
+      // Access the TLS variable, which will allocate it in the DTV.
+      ASSERT_EQ(tls_var_value, func());
+
+      // Verify allocation and a bumped generation.
+      ASSERT_EQ(initial_dtv, dtv());
+      ASSERT_EQ(modid, highest_modid_in_dtv());
+      ASSERT_LT(current_generation, initial_dtv->generation);
+      current_generation = initial_dtv->generation;
+    }
+  };
+
+  fill_entry(4u, "libtest_elftls_dynamic_filler_2.so", 201);
+  fill_entry(5u, "libtest_elftls_dynamic_filler_3.so", 301);
+  ASSERT_EQ(5u, highest_modid_in_dtv());
+
+  // Load module 6, which will require doubling the size of the DTV.
+  auto func4 = load_lib("libtest_elftls_dynamic_filler_4.so");
+  ASSERT_EQ(6u, highest_loaded_modid());
+  ASSERT_EQ(5u, highest_modid_in_dtv());
+  ASSERT_EQ(initial_dtv, dtv());
+
+  // Access a TLS variable from the first filler module.
+  ASSERT_EQ(102, func1());
+  ASSERT_EQ(5u, highest_modid_in_dtv());
+#if defined(__aarch64__)
+  // The arm64 TLSDESC resolver doesn't update the DTV if it is new enough for
+  // the given access.
+  ASSERT_EQ(initial_dtv, dtv());
+  ASSERT_EQ(5u, dtv()->count);
+  ASSERT_EQ(current_generation, dtv()->generation);
+#else
+  // __tls_get_addr updates the DTV anytime the generation counter changes, but
+  // the highest modid in the DTV is still 5, because module 6 hasn't been
+  // allocated yet.
+  ASSERT_NE(initial_dtv, dtv());
+  ASSERT_EQ(13u, dtv()->count);
+  ASSERT_LT(current_generation, dtv()->generation);
+#endif
+
+  // Accessing the TLS variable in the latest module will always expand the DTV.
+  ASSERT_EQ(401, func4());
+  TlsDtv* new_dtv = dtv();
+  ASSERT_NE(initial_dtv, new_dtv);
+  ASSERT_EQ(initial_dtv, new_dtv->next);
+  ASSERT_EQ(13u, new_dtv->count);
+  ASSERT_LT(current_generation, new_dtv->generation);
+  ASSERT_EQ(6u, highest_modid_in_dtv());
+  current_generation = new_dtv->generation;
+
+  // Load one more filler, module 7.
+  auto func5 = load_lib("libtest_elftls_dynamic_filler_5.so");
+  ASSERT_EQ(103, func1());
+  ASSERT_EQ(402, func4());
+  ASSERT_EQ(6u, highest_modid_in_dtv());
+  ASSERT_EQ(501, func5());
+  ASSERT_EQ(7u, highest_modid_in_dtv());
+
+  // Verify that no new DTV has been allocated.
+  ASSERT_EQ(new_dtv, dtv());
+  ASSERT_EQ(13u, new_dtv->count);
+  ASSERT_LT(current_generation, new_dtv->generation);
+
+  return 0;
+}