<bionic/macros.h>: replace align_up()/align_down() with clang's builtins.
Change-Id: If39cf028e9a22fc2008ee9d1ba683a4d0d038325
diff --git a/linker/linker_note_gnu_property.cpp b/linker/linker_note_gnu_property.cpp
index 082a604..d221b8d 100644
--- a/linker/linker_note_gnu_property.cpp
+++ b/linker/linker_note_gnu_property.cpp
@@ -137,7 +137,7 @@
// Loop on program property array.
const ElfW(Prop)* property = reinterpret_cast<const ElfW(Prop)*>(¬e_nhdr->n_desc[offset]);
const ElfW(Word) property_size =
- align_up(sizeof(ElfW(Prop)) + property->pr_datasz, sizeof(ElfW(Addr)));
+ __builtin_align_up(sizeof(ElfW(Prop)) + property->pr_datasz, sizeof(ElfW(Addr)));
if ((note_nhdr->nhdr.n_descsz - offset) < property_size) {
DL_ERR_AND_LOG(
"\"%s\" .note.gnu.property: property descriptor size is "
diff --git a/linker/linker_note_gnu_property_test.cpp b/linker/linker_note_gnu_property_test.cpp
index 960118c..2a5eddc 100644
--- a/linker/linker_note_gnu_property_test.cpp
+++ b/linker/linker_note_gnu_property_test.cpp
@@ -107,7 +107,7 @@
template <typename T>
bool push(ElfW(Word) pr_type, ElfW(Word) pr_datasz, const T* pr_data) {
// Must be aligned.
- const uintptr_t addition = align_up(pr_datasz, sizeof(ElfW(Addr)));
+ const uintptr_t addition = __builtin_align_up(pr_datasz, sizeof(ElfW(Addr)));
if ((offset() + addition) > kMaxSectionSize) {
return false;
}
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 8bcd76c..f3b0f3d 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -606,23 +606,22 @@
// page size of the platform.
#if defined(__LP64__)
constexpr size_t kGapAlignment = 2 * 1024 * 1024;
-#else
- constexpr size_t kGapAlignment = 0;
#endif
// Maximum gap size, in the units of kGapAlignment.
constexpr size_t kMaxGapUnits = 32;
// Allocate enough space so that the end of the desired region aligned up is still inside the
// mapping.
- size_t mmap_size = align_up(size, mapping_align) + mapping_align - page_size();
+ size_t mmap_size = __builtin_align_up(size, mapping_align) + mapping_align - page_size();
uint8_t* mmap_ptr =
reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
if (mmap_ptr == MAP_FAILED) {
return nullptr;
}
size_t gap_size = 0;
- size_t first_byte = reinterpret_cast<size_t>(align_up(mmap_ptr, mapping_align));
- size_t last_byte = reinterpret_cast<size_t>(align_down(mmap_ptr + mmap_size, mapping_align) - 1);
- if (kGapAlignment && first_byte / kGapAlignment != last_byte / kGapAlignment) {
+ size_t first_byte = reinterpret_cast<size_t>(__builtin_align_up(mmap_ptr, mapping_align));
+ size_t last_byte = reinterpret_cast<size_t>(__builtin_align_down(mmap_ptr + mmap_size, mapping_align) - 1);
+#if defined(__LP64__)
+ if (first_byte / kGapAlignment != last_byte / kGapAlignment) {
// This library crosses a 2MB boundary and will fragment a new huge page.
// Lets take advantage of that and insert a random number of inaccessible huge pages before that
// to improve address randomization and make it harder to locate this library code by probing.
@@ -630,23 +629,24 @@
mapping_align = std::max(mapping_align, kGapAlignment);
gap_size =
kGapAlignment * (is_first_stage_init() ? 1 : arc4random_uniform(kMaxGapUnits - 1) + 1);
- mmap_size = align_up(size + gap_size, mapping_align) + mapping_align - page_size();
+ mmap_size = __builtin_align_up(size + gap_size, mapping_align) + mapping_align - page_size();
mmap_ptr = reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
if (mmap_ptr == MAP_FAILED) {
return nullptr;
}
}
+#endif
- uint8_t *gap_end, *gap_start;
+ uint8_t* gap_end = mmap_ptr + mmap_size;
+#if defined(__LP64__)
if (gap_size) {
- gap_end = align_down(mmap_ptr + mmap_size, kGapAlignment);
- gap_start = gap_end - gap_size;
- } else {
- gap_start = gap_end = mmap_ptr + mmap_size;
+ gap_end = __builtin_align_down(gap_end, kGapAlignment);
}
+#endif
+ uint8_t* gap_start = gap_end - gap_size;
- uint8_t* first = align_up(mmap_ptr, mapping_align);
- uint8_t* last = align_down(gap_start, mapping_align) - size;
+ uint8_t* first = __builtin_align_up(mmap_ptr, mapping_align);
+ uint8_t* last = __builtin_align_down(gap_start, mapping_align) - size;
// arc4random* is not available in first stage init because /dev/urandom hasn't yet been
// created. Don't randomize then.
@@ -1017,7 +1017,7 @@
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
ElfW(Addr) seg_end = seg_start + p_memsz;
- ElfW(Addr) seg_page_end = align_up(seg_end, seg_align);
+ ElfW(Addr) seg_page_end = __builtin_align_up(seg_end, seg_align);
ElfW(Addr) seg_file_end = seg_start + p_filesz;
@@ -1025,7 +1025,7 @@
ElfW(Addr) file_start = phdr->p_offset;
ElfW(Addr) file_end = file_start + p_filesz;
- ElfW(Addr) file_page_start = align_down(file_start, seg_align);
+ ElfW(Addr) file_page_start = __builtin_align_down(file_start, seg_align);
ElfW(Addr) file_length = file_end - file_page_start;
if (file_size_ <= 0) {
diff --git a/linker/linker_phdr_16kib_compat.cpp b/linker/linker_phdr_16kib_compat.cpp
index bad20ba..d3783cf 100644
--- a/linker/linker_phdr_16kib_compat.cpp
+++ b/linker/linker_phdr_16kib_compat.cpp
@@ -158,7 +158,7 @@
}
if (!relro_phdr) {
- *vaddr = align_down(first_rw->p_vaddr, kCompatPageSize);
+ *vaddr = __builtin_align_down(first_rw->p_vaddr, kCompatPageSize);
return true;
}
@@ -175,7 +175,7 @@
return false;
}
- *vaddr = align_up(end, kCompatPageSize);
+ *vaddr = __builtin_align_up(end, kCompatPageSize);
return true;
}
@@ -227,11 +227,11 @@
// will lead to overwriting adjacent segments since the ELF's segment(s)
// are not 16KiB aligned.
- void* start = reinterpret_cast<void*>(align_down(phdr->p_vaddr + load_bias_, kCompatPageSize));
+ void* start = reinterpret_cast<void*>(__builtin_align_down(phdr->p_vaddr + load_bias_, kCompatPageSize));
// The ELF could be being loaded directly from a zipped APK,
// the zip offset must be added to find the segment offset.
- const ElfW(Addr) offset = file_offset_ + align_down(phdr->p_offset, kCompatPageSize);
+ const ElfW(Addr) offset = file_offset_ + __builtin_align_down(phdr->p_offset, kCompatPageSize);
CHECK(should_use_16kib_app_compat_);