Remove PAGE_SIZE call sites.

To enable experiments with non-4KiB page sizes, introduce
an inline page_size() function that will either return the runtime
page size (if PAGE_SIZE is not 4096) or a constant 4096 (elsewhere).
This should ensure that there are no changes to the generated code on
unaffected platforms.

Test: source build/envsetup.sh
      lunch aosp_cf_arm64_16k_phone-userdebug
      m -j32 installclean
      m -j32
Test: launch_cvd \
  -kernel_path /path/to/out/android14-5.15/dist/Image \
  -initramfs_path /path/to/out/android14-5.15/dist/initramfs.img \
  -userdata_format=ext4
Bug: 277272383
Bug: 230790254
Change-Id: Ic0ed98b67f7c6b845804b90a4e16649f2fc94028
diff --git a/linker/linker.cpp b/linker/linker.cpp
index 17b574f..5c26944 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -962,7 +962,7 @@
   }
 
   // Check if it is properly stored
-  if (entry.method != kCompressStored || (entry.offset % PAGE_SIZE) != 0) {
+  if (entry.method != kCompressStored || (entry.offset % page_size()) != 0) {
     close(fd);
     return -1;
   }
@@ -1171,7 +1171,7 @@
          "load_library(ns=%s, task=%s, flags=0x%x, realpath=%s, search_linked_namespaces=%d)",
          ns->get_name(), name, rtld_flags, realpath.c_str(), search_linked_namespaces);
 
-  if ((file_offset % PAGE_SIZE) != 0) {
+  if ((file_offset % page_size()) != 0) {
     DL_OPEN_ERR("file offset for the library \"%s\" is not page-aligned: %" PRId64, name, file_offset);
     return false;
   }
diff --git a/linker/linker_cfi.cpp b/linker/linker_cfi.cpp
index 6bc2615..247a25d 100644
--- a/linker/linker_cfi.cpp
+++ b/linker/linker_cfi.cpp
@@ -50,8 +50,8 @@
   ShadowWrite(uint16_t* s, uint16_t* e) {
     shadow_start = reinterpret_cast<char*>(s);
     shadow_end = reinterpret_cast<char*>(e);
-    aligned_start = reinterpret_cast<char*>(PAGE_START(reinterpret_cast<uintptr_t>(shadow_start)));
-    aligned_end = reinterpret_cast<char*>(PAGE_END(reinterpret_cast<uintptr_t>(shadow_end)));
+    aligned_start = reinterpret_cast<char*>(page_start(reinterpret_cast<uintptr_t>(shadow_start)));
+    aligned_end = reinterpret_cast<char*>(page_end(reinterpret_cast<uintptr_t>(shadow_end)));
     tmp_start =
         reinterpret_cast<char*>(mmap(nullptr, aligned_end - aligned_start, PROT_READ | PROT_WRITE,
                                      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
@@ -204,7 +204,7 @@
   shadow_start = reinterpret_cast<uintptr_t* (*)(uintptr_t)>(cfi_init)(p);
   CHECK(shadow_start != nullptr);
   CHECK(*shadow_start == p);
-  mprotect(shadow_start, PAGE_SIZE, PROT_READ);
+  mprotect(shadow_start, page_size(), PROT_READ);
   return true;
 }
 
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index 26e2228..f7c8ea9 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -580,8 +580,8 @@
     }
 
     ElfW(Addr) seg_start = phdr->p_vaddr + si->load_bias;
-    ElfW(Addr) seg_page_end = PAGE_END(seg_start + phdr->p_memsz);
-    ElfW(Addr) seg_file_end = PAGE_END(seg_start + phdr->p_filesz);
+    ElfW(Addr) seg_page_end = page_end(seg_start + phdr->p_memsz);
+    ElfW(Addr) seg_file_end = page_end(seg_start + phdr->p_filesz);
 
     if (seg_page_end > seg_file_end) {
       prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
diff --git a/linker/linker_mapped_file_fragment.cpp b/linker/linker_mapped_file_fragment.cpp
index cbe5f66..6be1b2b 100644
--- a/linker/linker_mapped_file_fragment.cpp
+++ b/linker/linker_mapped_file_fragment.cpp
@@ -29,6 +29,7 @@
 #include "linker_mapped_file_fragment.h"
 #include "linker_debug.h"
 #include "linker_utils.h"
+#include "platform/bionic/page.h"
 
 #include <inttypes.h>
 #include <stdlib.h>
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 73cfced..97ae709 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -116,7 +116,7 @@
   can only memory-map at page boundaries, this means that the bias is
   computed as:
 
-       load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
+       load_bias = phdr0_load_address - page_start(phdr0->p_vaddr)
 
   (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
           possible wrap around UINT32_MAX for possible large p_vaddr values).
@@ -124,11 +124,11 @@
   And that the phdr0_load_address must start at a page boundary, with
   the segment's real content starting at:
 
-       phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
+       phdr0_load_address + page_offset(phdr0->p_vaddr)
 
   Note that ELF requires the following condition to make the mmap()-ing work:
 
-      PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
+      page_offset(phdr0->p_vaddr) == page_offset(phdr0->p_offset)
 
   The load_bias must be added to any p_vaddr value read from the ELF file to
   determine the corresponding memory address.
@@ -529,8 +529,8 @@
     min_vaddr = 0;
   }
 
-  min_vaddr = PAGE_START(min_vaddr);
-  max_vaddr = PAGE_END(max_vaddr);
+  min_vaddr = page_start(min_vaddr);
+  max_vaddr = page_end(max_vaddr);
 
   if (out_min_vaddr != nullptr) {
     *out_min_vaddr = min_vaddr;
@@ -545,7 +545,7 @@
 // program header table. Used to determine whether the file should be loaded at
 // a specific virtual address alignment for use with huge pages.
 size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count) {
-  size_t maximum_alignment = PAGE_SIZE;
+  size_t maximum_alignment = page_size();
 
   for (size_t i = 0; i < phdr_count; ++i) {
     const ElfW(Phdr)* phdr = &phdr_table[i];
@@ -563,7 +563,7 @@
 #if defined(__LP64__)
   return maximum_alignment;
 #else
-  return PAGE_SIZE;
+  return page_size();
 #endif
 }
 
@@ -574,7 +574,7 @@
   int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
   // Reserve enough space to properly align the library's start address.
   mapping_align = std::max(mapping_align, start_align);
-  if (mapping_align == PAGE_SIZE) {
+  if (mapping_align == page_size()) {
     void* mmap_ptr = mmap(nullptr, size, PROT_NONE, mmap_flags, -1, 0);
     if (mmap_ptr == MAP_FAILED) {
       return nullptr;
@@ -593,7 +593,7 @@
   constexpr size_t kMaxGapUnits = 32;
   // Allocate enough space so that the end of the desired region aligned up is still inside the
   // mapping.
-  size_t mmap_size = align_up(size, mapping_align) + mapping_align - PAGE_SIZE;
+  size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
   uint8_t* mmap_ptr =
       reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
   if (mmap_ptr == MAP_FAILED) {
@@ -610,7 +610,7 @@
     mapping_align = std::max(mapping_align, kGapAlignment);
     gap_size =
         kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
-    mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - PAGE_SIZE;
+    mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
     mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
     if (mmap_ptr == MAP_FAILED) {
       return nullptr;
@@ -665,12 +665,12 @@
              load_size_ - address_space->reserved_size, load_size_, name_.c_str());
       return false;
     }
-    size_t start_alignment = PAGE_SIZE;
+    size_t start_alignment = page_size();
     if (get_transparent_hugepages_supported() && get_application_target_sdk_version() >= 31) {
       size_t maximum_alignment = phdr_table_get_maximum_alignment(phdr_table_, phdr_num_);
       // Limit alignment to PMD size as other alignments reduce the number of
       // bits available for ASLR for no benefit.
-      start_alignment = maximum_alignment == kPmdSize ? kPmdSize : PAGE_SIZE;
+      start_alignment = maximum_alignment == kPmdSize ? kPmdSize : page_size();
     }
     start = ReserveWithAlignmentPadding(load_size_, kLibraryAlignment, start_alignment, &gap_start_,
                                         &gap_size_);
@@ -706,8 +706,8 @@
     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
     ElfW(Addr) seg_end   = seg_start + phdr->p_memsz;
 
-    ElfW(Addr) seg_page_start = PAGE_START(seg_start);
-    ElfW(Addr) seg_page_end   = PAGE_END(seg_end);
+    ElfW(Addr) seg_page_start = page_start(seg_start);
+    ElfW(Addr) seg_page_end = page_end(seg_end);
 
     ElfW(Addr) seg_file_end   = seg_start + phdr->p_filesz;
 
@@ -715,7 +715,7 @@
     ElfW(Addr) file_start = phdr->p_offset;
     ElfW(Addr) file_end   = file_start + phdr->p_filesz;
 
-    ElfW(Addr) file_page_start = PAGE_START(file_start);
+    ElfW(Addr) file_page_start = page_start(file_start);
     ElfW(Addr) file_length = file_end - file_page_start;
 
     if (file_size_ <= 0) {
@@ -768,11 +768,11 @@
 
     // if the segment is writable, and does not end on a page boundary,
     // zero-fill it until the page limit.
-    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
-      memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
+    if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
+      memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
     }
 
-    seg_file_end = PAGE_END(seg_file_end);
+    seg_file_end = page_end(seg_file_end);
 
     // seg_file_end is now the first page address after the file
     // content. If seg_end is larger, we need to zero anything
@@ -811,8 +811,8 @@
       continue;
     }
 
-    ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
-    ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
 
     int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
     if ((prot & PROT_WRITE) != 0) {
@@ -912,8 +912,8 @@
     //       the program is likely to fail at runtime. So in effect the
     //       linker must only emit a PT_GNU_RELRO segment if it ensures
     //       that it starts on a page boundary.
-    ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
-    ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
 
     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
                        seg_page_end - seg_page_start,
@@ -972,8 +972,8 @@
       continue;
     }
 
-    ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
-    ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
     ssize_t size = seg_page_end - seg_page_start;
 
     ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
@@ -1035,8 +1035,8 @@
       continue;
     }
 
-    ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
-    ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+    ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
+    ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
 
     char* file_base = static_cast<char*>(temp_mapping) + *file_offset;
     char* mem_base = reinterpret_cast<char*>(seg_page_start);
@@ -1053,15 +1053,15 @@
     while (match_offset < size) {
       // Skip over dissimilar pages.
       while (match_offset < size &&
-             memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
-        match_offset += PAGE_SIZE;
+             memcmp(mem_base + match_offset, file_base + match_offset, page_size()) != 0) {
+        match_offset += page_size();
       }
 
       // Count similar pages.
       size_t mismatch_offset = match_offset;
       while (mismatch_offset < size &&
-             memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
-        mismatch_offset += PAGE_SIZE;
+             memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, page_size()) == 0) {
+        mismatch_offset += page_size();
       }
 
       // Map over similar pages.
diff --git a/linker/linker_utils.cpp b/linker/linker_utils.cpp
index 29110ed..cd03eed 100644
--- a/linker/linker_utils.cpp
+++ b/linker/linker_utils.cpp
@@ -169,12 +169,6 @@
   return true;
 }
 
-constexpr off64_t kPageMask = ~static_cast<off64_t>(PAGE_SIZE-1);
-
-off64_t page_start(off64_t offset) {
-  return offset & kPageMask;
-}
-
 bool safe_add(off64_t* out, off64_t a, size_t b) {
   CHECK(a >= 0);
   if (static_cast<uint64_t>(INT64_MAX - a) < b) {
@@ -185,10 +179,6 @@
   return true;
 }
 
-size_t page_offset(off64_t offset) {
-  return static_cast<size_t>(offset & (PAGE_SIZE-1));
-}
-
 void split_path(const char* path, const char* delimiters,
                 std::vector<std::string>* paths) {
   if (path != nullptr && path[0] != 0) {
diff --git a/linker/linker_utils.h b/linker/linker_utils.h
index 5073b10..3049e53 100644
--- a/linker/linker_utils.h
+++ b/linker/linker_utils.h
@@ -55,7 +55,5 @@
 
 std::string dirname(const char* path);
 
-off64_t page_start(off64_t offset);
-size_t page_offset(off64_t offset);
 bool safe_add(off64_t* out, off64_t a, size_t b);
 bool is_first_stage_init();
diff --git a/linker/linker_utils_test.cpp b/linker/linker_utils_test.cpp
index 44907da..0b881d4 100644
--- a/linker/linker_utils_test.cpp
+++ b/linker/linker_utils_test.cpp
@@ -33,6 +33,7 @@
 #include <gtest/gtest.h>
 
 #include "linker_utils.h"
+#include "platform/bionic/page.h"
 
 TEST(linker_utils, format_string) {
   std::vector<std::pair<std::string, std::string>> params = {{ "LIB", "lib32"}, { "SDKVER", "42"}};
@@ -104,9 +105,9 @@
 }
 
 TEST(linker_utils, page_start) {
-  ASSERT_EQ(0x0001000, page_start(0x0001000));
-  ASSERT_EQ(0x3002000, page_start(0x300222f));
-  ASSERT_EQ(0x6001000, page_start(0x6001fff));
+  ASSERT_EQ(0x0001000U, page_start(0x0001000));
+  ASSERT_EQ(0x3002000U, page_start(0x300222f));
+  ASSERT_EQ(0x6001000U, page_start(0x6001fff));
 }
 
 TEST(linker_utils, page_offset) {