Merge "Make tests/libs/CHECK.h work on the host" into main
diff --git a/benchmarks/bionic_benchmarks.cpp b/benchmarks/bionic_benchmarks.cpp
index c0f956b..81f1842 100644
--- a/benchmarks/bionic_benchmarks.cpp
+++ b/benchmarks/bionic_benchmarks.cpp
@@ -523,39 +523,39 @@
int page_sz = getpagesize();
std::vector<int> sub_page_sizes = {page_sz / 2, page_sz / 4, page_sz / 8};
- std::vector<int> multi_page_sizes = {page_sz, page_sz * 2, page_sz * 3, page_sz * 10,
- page_sz * 100};
+ std::vector<int> multi_page_sizes = {page_sz, page_sz * 2, page_sz * 3, page_sz * 10,
+ page_sz * 25, page_sz * 50, page_sz * 75, page_sz * 100};
std::vector<int> all_page_sizes(sub_page_sizes);
all_page_sizes.insert(all_page_sizes.end(), multi_page_sizes.begin(), multi_page_sizes.end());
- std::map<std::string, args_vector_t> args_shorthand {
- {"AT_COMMON_SIZES", GetArgs(kCommonSizes)},
- {"AT_SMALL_SIZES", GetArgs(kSmallSizes)},
- {"AT_MEDIUM_SIZES", GetArgs(kMediumSizes)},
- {"AT_LARGE_SIZES", GetArgs(kLargeSizes)},
- {"AT_ALL_SIZES", GetArgs(all_sizes)},
- {"AT_SUB_PAGE_SIZES", GetArgs(sub_page_sizes)},
- {"AT_MULTI_PAGE_SIZES", GetArgs(multi_page_sizes)},
- {"AT_All_PAGE_SIZES", GetArgs(all_page_sizes)},
+ std::map<std::string, args_vector_t> args_shorthand{
+ {"AT_COMMON_SIZES", GetArgs(kCommonSizes)},
+ {"AT_SMALL_SIZES", GetArgs(kSmallSizes)},
+ {"AT_MEDIUM_SIZES", GetArgs(kMediumSizes)},
+ {"AT_LARGE_SIZES", GetArgs(kLargeSizes)},
+ {"AT_ALL_SIZES", GetArgs(all_sizes)},
+ {"AT_SUB_PAGE_SIZES", GetArgs(sub_page_sizes)},
+ {"AT_MULTI_PAGE_SIZES", GetArgs(multi_page_sizes)},
+ {"AT_ALL_PAGE_SIZES", GetArgs(all_page_sizes)},
- {"AT_ALIGNED_ONEBUF", GetArgs(kCommonSizes, 0)},
- {"AT_ALIGNED_ONEBUF_SMALL", GetArgs(kSmallSizes, 0)},
- {"AT_ALIGNED_ONEBUF_MEDIUM", GetArgs(kMediumSizes, 0)},
- {"AT_ALIGNED_ONEBUF_LARGE", GetArgs(kLargeSizes, 0)},
- {"AT_ALIGNED_ONEBUF_ALL", GetArgs(all_sizes, 0)},
+ {"AT_ALIGNED_ONEBUF", GetArgs(kCommonSizes, 0)},
+ {"AT_ALIGNED_ONEBUF_SMALL", GetArgs(kSmallSizes, 0)},
+ {"AT_ALIGNED_ONEBUF_MEDIUM", GetArgs(kMediumSizes, 0)},
+ {"AT_ALIGNED_ONEBUF_LARGE", GetArgs(kLargeSizes, 0)},
+ {"AT_ALIGNED_ONEBUF_ALL", GetArgs(all_sizes, 0)},
- {"AT_ALIGNED_TWOBUF", GetArgs(kCommonSizes, 0, 0)},
- {"AT_ALIGNED_TWOBUF_SMALL", GetArgs(kSmallSizes, 0, 0)},
- {"AT_ALIGNED_TWOBUF_MEDIUM", GetArgs(kMediumSizes, 0, 0)},
- {"AT_ALIGNED_TWOBUF_LARGE", GetArgs(kLargeSizes, 0, 0)},
- {"AT_ALIGNED_TWOBUF_ALL", GetArgs(all_sizes, 0, 0)},
+ {"AT_ALIGNED_TWOBUF", GetArgs(kCommonSizes, 0, 0)},
+ {"AT_ALIGNED_TWOBUF_SMALL", GetArgs(kSmallSizes, 0, 0)},
+ {"AT_ALIGNED_TWOBUF_MEDIUM", GetArgs(kMediumSizes, 0, 0)},
+ {"AT_ALIGNED_TWOBUF_LARGE", GetArgs(kLargeSizes, 0, 0)},
+ {"AT_ALIGNED_TWOBUF_ALL", GetArgs(all_sizes, 0, 0)},
- // Do not exceed 512. that is about the largest number of properties
- // that can be created with the current property area size.
- {"NUM_PROPS", args_vector_t{ {1}, {4}, {16}, {64}, {128}, {256}, {512} }},
+ // Do not exceed 512. that is about the largest number of properties
+ // that can be created with the current property area size.
+ {"NUM_PROPS", args_vector_t{{1}, {4}, {16}, {64}, {128}, {256}, {512}}},
- {"MATH_COMMON", args_vector_t{ {0}, {1}, {2}, {3} }},
- {"MATH_SINCOS_COMMON", args_vector_t{ {0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} }},
+ {"MATH_COMMON", args_vector_t{{0}, {1}, {2}, {3}}},
+ {"MATH_SINCOS_COMMON", args_vector_t{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}},
};
args_vector_t args_onebuf;
diff --git a/benchmarks/suites/syscall.xml b/benchmarks/suites/syscall.xml
index 94b1cf5..5a0f904 100644
--- a/benchmarks/suites/syscall.xml
+++ b/benchmarks/suites/syscall.xml
@@ -2,42 +2,42 @@
<fn>
<name>BM_syscall_mmap_anon_rw</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_anon_noreserve</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_anon_none</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_anon_rw_fixed</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_anon_none_fixed</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_file_rd_priv</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_file_rw_shared</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_file_rw_priv_fixed_start</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_file_rw_priv_fixed_mid</name>
@@ -81,40 +81,40 @@
<fn>
<name>BM_syscall_mmap_anon_mprotect_rw_to_rd</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_anon_mprotect_rw_to_none</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_anon_mprotect_rd_to_none</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_file_mprotect_rw_to_rd</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_file_mprotect_rw_to_none</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_file_mprotect_none_to_rw</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_file_mprotect_none_to_rd</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
<fn>
<name>BM_syscall_mmap_file_mprotect_rd_to_none</name>
<iterations>10</iterations>
- <args>AT_All_PAGE_SIZES</args>
+ <args>AT_ALL_PAGE_SIZES</args>
</fn>
diff --git a/benchmarks/syscall_mm_benchmark.cpp b/benchmarks/syscall_mm_benchmark.cpp
index 04c4bea..1f22f31 100644
--- a/benchmarks/syscall_mm_benchmark.cpp
+++ b/benchmarks/syscall_mm_benchmark.cpp
@@ -39,8 +39,8 @@
};
struct MprotectParams {
- int from_prot;
- int to_prot;
+ int initial_prot;
+ int mprotect_prot;
int64_t size;
};
@@ -134,7 +134,7 @@
MmapBenchmark(state, params, 0);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_rw, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_rw, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_anon_noreserve(benchmark::State& state) {
struct MmapParams params = {
@@ -145,7 +145,7 @@
MmapBenchmark(state, params, 0);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_noreserve, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_noreserve, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_anon_none(benchmark::State& state) {
struct MmapParams params = {
@@ -156,7 +156,7 @@
MmapBenchmark(state, params, 0);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_none, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_none, "AT_ALL_PAGE_SIZES");
// anon fixed mmap
static void BM_syscall_mmap_anon_rw_fixed(benchmark::State& state) {
@@ -168,7 +168,7 @@
MmapFixedBenchmark(state, params, -1, params.size, 0);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_rw_fixed, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_rw_fixed, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_anon_none_fixed(benchmark::State& state) {
struct MmapParams params = {
@@ -179,7 +179,7 @@
MmapFixedBenchmark(state, params, -1, params.size, 0);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_none_fixed, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_none_fixed, "AT_ALL_PAGE_SIZES");
// file mmap
static void BM_syscall_mmap_file_rd_priv(benchmark::State& state) {
@@ -191,7 +191,7 @@
MmapFileBenchmark(state, params, params.size, 0);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_rd_priv, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_rd_priv, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_file_rw_shared(benchmark::State& state) {
struct MmapParams params = {
@@ -202,7 +202,7 @@
MmapFileBenchmark(state, params, params.size, 0);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_rw_shared, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_rw_shared, "AT_ALL_PAGE_SIZES");
// file fixed mmap
static void BM_syscall_mmap_file_rw_priv_fixed_start(benchmark::State& state) {
@@ -215,7 +215,7 @@
// allocate 3x area and map at the start
MmapFileBenchmark(state, params, params.size * 3, 0);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_rw_priv_fixed_start, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_rw_priv_fixed_start, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_file_rw_priv_fixed_mid(benchmark::State& state) {
struct MmapParams params = {
@@ -321,19 +321,19 @@
* Guarantee that physical memory pages are allocated for this region to prevent
* segmentation fault when using mprotect to change permissions.
*/
- if (params.from_prot & PROT_WRITE) {
+ if (params.initial_prot & PROT_WRITE) {
MakeAllocationResident(addr, params.size, page_sz);
}
state.ResumeTiming();
- if (mprotect(addr, params.size, params.to_prot) != 0) {
+ if (mprotect(addr, params.size, params.mprotect_prot) != 0) {
state.SkipWithError(android::base::StringPrintf("mprotect failed: %m"));
break;
}
state.PauseTiming();
// Revert back to the original protection
- int res = mprotect(addr, params.size, params.from_prot);
+ int res = mprotect(addr, params.size, params.initial_prot);
state.ResumeTiming();
if (res != 0) {
state.SkipWithError(
@@ -345,7 +345,7 @@
static void MprotectBenchmarkWithMmapAnon(benchmark::State& state,
const struct MprotectParams& params) {
- void* addr = mmap(nullptr, params.size, params.from_prot, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ void* addr = mmap(nullptr, params.size, params.initial_prot, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
if (addr == MAP_FAILED) {
state.SkipWithError(android::base::StringPrintf("mmap failed: %m"));
return;
@@ -359,33 +359,33 @@
static void BM_syscall_mmap_anon_mprotect_rw_to_rd(benchmark::State& state) {
struct MprotectParams params = {
- .from_prot = PROT_READ | PROT_WRITE,
- .to_prot = PROT_READ,
+ .initial_prot = PROT_READ | PROT_WRITE,
+ .mprotect_prot = PROT_READ,
.size = state.range(0),
};
MprotectBenchmarkWithMmapAnon(state, params);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_mprotect_rw_to_rd, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_mprotect_rw_to_rd, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_anon_mprotect_rw_to_none(benchmark::State& state) {
struct MprotectParams params = {
- .from_prot = PROT_READ | PROT_WRITE,
- .to_prot = PROT_NONE,
+ .initial_prot = PROT_READ | PROT_WRITE,
+ .mprotect_prot = PROT_NONE,
.size = state.range(0),
};
MprotectBenchmarkWithMmapAnon(state, params);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_mprotect_rw_to_none, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_mprotect_rw_to_none, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_anon_mprotect_rd_to_none(benchmark::State& state) {
struct MprotectParams params = {
- .from_prot = PROT_READ,
- .to_prot = PROT_NONE,
+ .initial_prot = PROT_READ,
+ .mprotect_prot = PROT_NONE,
.size = state.range(0),
};
MprotectBenchmarkWithMmapAnon(state, params);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_mprotect_rd_to_none, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_anon_mprotect_rd_to_none, "AT_ALL_PAGE_SIZES");
static void MprotectBenchmarkWithMmapFile(benchmark::State& state,
const struct MprotectParams& params) {
@@ -401,7 +401,7 @@
return;
}
- void* addr = mmap(nullptr, params.size, params.from_prot, MAP_PRIVATE, tf.fd, 0);
+ void* addr = mmap(nullptr, params.size, params.initial_prot, MAP_PRIVATE, tf.fd, 0);
if (addr == MAP_FAILED) {
state.SkipWithError(android::base::StringPrintf("mmap failed: %m"));
return;
@@ -415,50 +415,50 @@
static void BM_syscall_mmap_file_mprotect_rw_to_rd(benchmark::State& state) {
struct MprotectParams params = {
- .from_prot = PROT_READ | PROT_WRITE,
- .to_prot = PROT_READ,
+ .initial_prot = PROT_READ | PROT_WRITE,
+ .mprotect_prot = PROT_READ,
.size = state.range(0),
};
MprotectBenchmarkWithMmapFile(state, params);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_rw_to_rd, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_rw_to_rd, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_file_mprotect_rw_to_none(benchmark::State& state) {
struct MprotectParams params = {
- .from_prot = PROT_READ | PROT_WRITE,
- .to_prot = PROT_NONE,
+ .initial_prot = PROT_READ | PROT_WRITE,
+ .mprotect_prot = PROT_NONE,
.size = state.range(0),
};
MprotectBenchmarkWithMmapFile(state, params);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_rw_to_none, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_rw_to_none, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_file_mprotect_none_to_rw(benchmark::State& state) {
struct MprotectParams params = {
- .from_prot = PROT_NONE,
- .to_prot = PROT_READ | PROT_WRITE,
+ .initial_prot = PROT_NONE,
+ .mprotect_prot = PROT_READ | PROT_WRITE,
.size = state.range(0),
};
MprotectBenchmarkWithMmapFile(state, params);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_none_to_rw, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_none_to_rw, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_file_mprotect_none_to_rd(benchmark::State& state) {
struct MprotectParams params = {
- .from_prot = PROT_NONE,
- .to_prot = PROT_READ,
+ .initial_prot = PROT_NONE,
+ .mprotect_prot = PROT_READ,
.size = state.range(0),
};
MprotectBenchmarkWithMmapFile(state, params);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_none_to_rd, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_none_to_rd, "AT_ALL_PAGE_SIZES");
static void BM_syscall_mmap_file_mprotect_rd_to_none(benchmark::State& state) {
struct MprotectParams params = {
- .from_prot = PROT_READ,
- .to_prot = PROT_NONE,
+ .initial_prot = PROT_READ,
+ .mprotect_prot = PROT_NONE,
.size = state.range(0),
};
MprotectBenchmarkWithMmapFile(state, params);
}
-BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_rd_to_none, "AT_All_PAGE_SIZES");
+BIONIC_BENCHMARK_WITH_ARG(BM_syscall_mmap_file_mprotect_rd_to_none, "AT_ALL_PAGE_SIZES");
diff --git a/libc/Android.bp b/libc/Android.bp
index 7098ab1..4020ede 100644
--- a/libc/Android.bp
+++ b/libc/Android.bp
@@ -2061,11 +2061,10 @@
cc_object {
name: "crtbrand",
- // crtbrand.c needs <stdint.h> and a #define for the platform SDK version.
local_include_dirs: [
- "include",
"private", // crtbrand.S depends on private/bionic_asm_note.h
],
+ // crtbrand.S needs to know the platform SDK version.
product_variables: {
platform_sdk_version: {
asflags: ["-DPLATFORM_SDK_VERSION=%d"],
@@ -2092,7 +2091,6 @@
cc_object {
name: "crtend_so",
local_include_dirs: [
- "include",
"private", // crtend_so.S depends on private/bionic_asm_arm64.h
],
srcs: ["arch-common/bionic/crtend_so.S"],
@@ -2146,7 +2144,6 @@
// name clash between gcc and bionic.
name: "crtend_android",
local_include_dirs: [
- "include",
"private", // crtend.S depends on private/bionic_asm_arm64.h
],
srcs: ["arch-common/bionic/crtend.S"],
@@ -2157,7 +2154,6 @@
cc_object {
name: "crt_pad_segment",
local_include_dirs: [
- "include",
"private", // crt_pad_segment.S depends on private/bionic_asm_note.h
],
srcs: ["arch-common/bionic/crt_pad_segment.S"],
diff --git a/libc/include/android/versioning.h b/libc/include/android/versioning.h
index 08fe45d..c5adc02 100644
--- a/libc/include/android/versioning.h
+++ b/libc/include/android/versioning.h
@@ -76,3 +76,9 @@
#define __VERSIONER_FORTIFY_INLINE
#endif // defined(__BIONIC_VERSIONER)
+
+// Vendor modules do not follow SDK versioning. Ignore NDK guards for vendor modules.
+#if defined(__ANDROID_VENDOR__)
+#undef __BIONIC_AVAILABILITY
+#define __BIONIC_AVAILABILITY(x)
+#endif // defined(__ANDROID_VENDOR__)
diff --git a/linker/Android.bp b/linker/Android.bp
index 0533ae9..f87a92e 100644
--- a/linker/Android.bp
+++ b/linker/Android.bp
@@ -573,6 +573,7 @@
"libasync_safe",
"libbase",
"liblog_for_runtime_apex",
+ "libprocinfo", // For procinfo::MappedFileSize()
],
data_libs: [
diff --git a/linker/linker.cpp b/linker/linker.cpp
index b0caedd..81869b3 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -2866,11 +2866,12 @@
TlsSegment tls_segment;
if (__bionic_get_tls_segment(phdr, phnum, load_bias, &tls_segment)) {
+ // The loader does not (currently) support ELF TLS, so it shouldn't have
+ // a TLS segment.
+ CHECK(!relocating_linker && "TLS not supported in loader");
if (!__bionic_check_tls_alignment(&tls_segment.alignment)) {
- if (!relocating_linker) {
- DL_ERR("TLS segment alignment in \"%s\" is not a power of 2: %zu",
- get_realpath(), tls_segment.alignment);
- }
+ DL_ERR("TLS segment alignment in \"%s\" is not a power of 2: %zu", get_realpath(),
+ tls_segment.alignment);
return false;
}
tls_ = std::make_unique<soinfo_tls>();
@@ -3364,7 +3365,7 @@
"\"%s\" has text relocations",
get_realpath());
add_dlwarning(get_realpath(), "text relocations");
- if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
+ if (phdr_table_unprotect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %s", get_realpath(), strerror(errno));
return false;
}
@@ -3380,7 +3381,7 @@
#if !defined(__LP64__)
if (has_text_relocations) {
// All relocations are done, we can protect our segments back to read-only.
- if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
+ if (phdr_table_protect_segments(phdr, phnum, load_bias, should_pad_segments_) < 0) {
DL_ERR("can't protect segments for \"%s\": %s",
get_realpath(), strerror(errno));
return false;
@@ -3418,7 +3419,7 @@
}
bool soinfo::protect_relro() {
- if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
+ if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias, should_pad_segments_) < 0) {
DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
get_realpath(), strerror(errno));
return false;
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index d6592af..c9dcfa3 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -201,6 +201,7 @@
const ElfW(Phdr)* phdr;
size_t phdr_count;
ElfW(Addr) entry_point;
+ bool should_pad_segments;
};
static ExecutableInfo get_executable_info(const char* arg_path) {
@@ -293,6 +294,7 @@
result.phdr = elf_reader.loaded_phdr();
result.phdr_count = elf_reader.phdr_count();
result.entry_point = elf_reader.entry_point();
+ result.should_pad_segments = elf_reader.should_pad_segments();
return result;
}
@@ -366,6 +368,7 @@
somain = si;
si->phdr = exe_info.phdr;
si->phnum = exe_info.phdr_count;
+ si->set_should_pad_segments(exe_info.should_pad_segments);
get_elf_base_from_phdr(si->phdr, si->phnum, &si->base, &si->load_bias);
si->size = phdr_table_get_load_size(si->phdr, si->phnum);
si->dynamic = nullptr;
@@ -399,7 +402,7 @@
auto note_gnu_property = GnuPropertySection(somain);
if (note_gnu_property.IsBTICompatible() &&
(phdr_table_protect_segments(somain->phdr, somain->phnum, somain->load_bias,
- ¬e_gnu_property) < 0)) {
+ somain->should_pad_segments(), ¬e_gnu_property) < 0)) {
__linker_error("error: can't protect segments for \"%s\": %s", exe_info.path.c_str(),
strerror(errno));
}
@@ -493,6 +496,12 @@
}
si->increment_ref_count();
}
+
+ // Exit early for ldd. We don't want to run the code that was loaded, so skip
+ // the constructor calls. Skip CFI setup because it would call __cfi_init in
+ // libdl.so.
+ if (g_is_ldd) _exit(EXIT_SUCCESS);
+
#if defined(__aarch64__)
// This has to happen after the find_libraries, which will have collected any possible
// libraries that request memtag_stack in the dynamic section.
@@ -826,8 +835,6 @@
ElfW(Addr) start_address = linker_main(args, exe_to_load);
- if (g_is_ldd) _exit(EXIT_SUCCESS);
-
INFO("[ Jumping to _start (%p)... ]", reinterpret_cast<void*>(start_address));
// Return the address that the calling assembly stub should jump to.
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp
index 82b37a4..074012d 100644
--- a/linker/linker_phdr.cpp
+++ b/linker/linker_phdr.cpp
@@ -196,7 +196,7 @@
// For Armv8.5-A loaded executable segments may require PROT_BTI.
if (note_gnu_property_.IsBTICompatible()) {
did_load_ = (phdr_table_protect_segments(phdr_table_, phdr_num_, load_bias_,
- ¬e_gnu_property_) == 0);
+ should_pad_segments_, ¬e_gnu_property_) == 0);
}
#endif
}
@@ -756,6 +756,41 @@
return true;
}
+static inline void _extend_load_segment_vma(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ size_t phdr_idx, ElfW(Addr)* p_memsz,
+ ElfW(Addr)* p_filesz, bool should_pad_segments) {
+ const ElfW(Phdr)* phdr = &phdr_table[phdr_idx];
+ const ElfW(Phdr)* next = nullptr;
+ size_t next_idx = phdr_idx + 1;
+
+ if (phdr->p_align == kPageSize || !should_pad_segments) {
+ return;
+ }
+
+ if (next_idx < phdr_count && phdr_table[next_idx].p_type == PT_LOAD) {
+ next = &phdr_table[next_idx];
+ }
+
+ // If this is the last LOAD segment, no extension is needed
+ if (!next || *p_memsz != *p_filesz) {
+ return;
+ }
+
+ ElfW(Addr) next_start = page_start(next->p_vaddr);
+ ElfW(Addr) curr_end = page_end(phdr->p_vaddr + *p_memsz);
+
+ // If adjacent segment mappings overlap, no extension is needed.
+ if (curr_end >= next_start) {
+ return;
+ }
+
+ // Extend the LOAD segment mapping to be contiguous with that of
+ // the next LOAD segment.
+ ElfW(Addr) extend = next_start - curr_end;
+ *p_memsz += extend;
+ *p_filesz += extend;
+}
+
bool ElfReader::LoadSegments() {
for (size_t i = 0; i < phdr_num_; ++i) {
const ElfW(Phdr)* phdr = &phdr_table_[i];
@@ -764,18 +799,22 @@
continue;
}
+ ElfW(Addr) p_memsz = phdr->p_memsz;
+ ElfW(Addr) p_filesz = phdr->p_filesz;
+ _extend_load_segment_vma(phdr_table_, phdr_num_, i, &p_memsz, &p_filesz, should_pad_segments_);
+
// Segment addresses in memory.
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
- ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
+ ElfW(Addr) seg_end = seg_start + p_memsz;
ElfW(Addr) seg_page_start = page_start(seg_start);
ElfW(Addr) seg_page_end = page_end(seg_end);
- ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
+ ElfW(Addr) seg_file_end = seg_start + p_filesz;
// File offsets.
ElfW(Addr) file_start = phdr->p_offset;
- ElfW(Addr) file_end = file_start + phdr->p_filesz;
+ ElfW(Addr) file_end = file_start + p_filesz;
ElfW(Addr) file_page_start = page_start(file_start);
ElfW(Addr) file_length = file_end - file_page_start;
@@ -785,12 +824,12 @@
return false;
}
- if (file_end > static_cast<size_t>(file_size_)) {
+ if (file_start + phdr->p_filesz > static_cast<size_t>(file_size_)) {
DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
" p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
reinterpret_cast<void*>(phdr->p_filesz),
- reinterpret_cast<void*>(file_end), file_size_);
+ reinterpret_cast<void*>(file_start + phdr->p_filesz), file_size_);
return false;
}
@@ -830,8 +869,18 @@
// if the segment is writable, and does not end on a page boundary,
// zero-fill it until the page limit.
- if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
- memset(reinterpret_cast<void*>(seg_file_end), 0, page_size() - page_offset(seg_file_end));
+ //
+ // Do not attempt to zero the extended region past the first partial page,
+ // since doing so may:
+ // 1) Result in a SIGBUS, as the region is not backed by the underlying
+ // file.
+ // 2) Break the COW backing, faulting in new anon pages for a region
+ // that will not be used.
+
+ // _seg_file_end = unextended seg_file_end
+ uint64_t _seg_file_end = seg_start + phdr->p_filesz;
+ if ((phdr->p_flags & PF_W) != 0 && page_offset(_seg_file_end) > 0) {
+ memset(reinterpret_cast<void*>(_seg_file_end), 0, kPageSize - page_offset(_seg_file_end));
}
seg_file_end = page_end(seg_file_end);
@@ -864,17 +913,21 @@
* phdr_table_protect_segments and phdr_table_unprotect_segments.
*/
static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, int extra_prot_flags) {
- const ElfW(Phdr)* phdr = phdr_table;
- const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
+ ElfW(Addr) load_bias, int extra_prot_flags,
+ bool should_pad_segments) {
+ for (size_t i = 0; i < phdr_count; ++i) {
+ const ElfW(Phdr)* phdr = &phdr_table[i];
- for (; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
continue;
}
- ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
- ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+ ElfW(Addr) p_memsz = phdr->p_memsz;
+ ElfW(Addr) p_filesz = phdr->p_filesz;
+ _extend_load_segment_vma(phdr_table, phdr_count, i, &p_memsz, &p_filesz, should_pad_segments);
+
+ ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr + load_bias);
+ ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
int prot = PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags;
if ((prot & PROT_WRITE) != 0) {
@@ -909,19 +962,21 @@
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
+ * should_pad_segments -> Are segments extended to avoid gaps in the memory map
* prop -> GnuPropertySection or nullptr
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, const GnuPropertySection* prop __unused) {
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ const GnuPropertySection* prop __unused) {
int prot = 0;
#if defined(__aarch64__)
if ((prop != nullptr) && prop->IsBTICompatible()) {
prot |= PROT_BTI;
}
#endif
- return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot);
+ return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, prot, should_pad_segments);
}
/* Change the protection of all loaded segments in memory to writable.
@@ -937,19 +992,82 @@
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
+ * should_pad_segments -> Are segments extended to avoid gaps in the memory map
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
- size_t phdr_count, ElfW(Addr) load_bias) {
- return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
+ size_t phdr_count, ElfW(Addr) load_bias,
+ bool should_pad_segments) {
+ return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE,
+ should_pad_segments);
+}
+
+static inline void _extend_gnu_relro_prot_end(const ElfW(Phdr)* relro_phdr,
+ const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ ElfW(Addr) load_bias, ElfW(Addr)* seg_page_end,
+ bool should_pad_segments) {
+ // Find the index and phdr of the LOAD containing the GNU_RELRO segment
+ for (size_t index = 0; index < phdr_count; ++index) {
+ const ElfW(Phdr)* phdr = &phdr_table[index];
+
+ if (phdr->p_type == PT_LOAD && phdr->p_vaddr == relro_phdr->p_vaddr) {
+ // If the PT_GNU_RELRO mem size is not at least as large as the corresponding
+ // LOAD segment mem size, we need to protect only a partial region of the
+ // LOAD segment and therefore cannot avoid a VMA split.
+ //
+ // Note: Don't check the page-aligned mem sizes since the extended protection
+ // may incorrectly write protect non-relocation data.
+ //
+ // Example:
+ //
+ // |---- 3K ----|-- 1K --|---- 3K ---- |-- 1K --|
+ // ----------------------------------------------------------------
+ // | | | | |
+ // SEG X | RO | RO | RW | | SEG Y
+ // | | | | |
+ // ----------------------------------------------------------------
+ // | | |
+ // | | |
+ // | | |
+ // relro_vaddr relro_vaddr relro_vaddr
+ // (load_vaddr) + +
+ // relro_memsz load_memsz
+ //
+ // ----------------------------------------------------------------
+ // | PAGE | PAGE |
+ // ----------------------------------------------------------------
+ // | Potential |
+ // |----- Extended RO ----|
+ // | Protection |
+ //
+ // If the check below uses page aligned mem sizes it will cause incorrect write
+ // protection of the 3K RW part of the LOAD segment containing the GNU_RELRO.
+ if (relro_phdr->p_memsz < phdr->p_memsz) {
+ return;
+ }
+
+ ElfW(Addr) p_memsz = phdr->p_memsz;
+ ElfW(Addr) p_filesz = phdr->p_filesz;
+
+ // Attempt extending the VMA (mprotect range). Without extending the range,
+ // mprotect will only RO protect a part of the extended RW LOAD segment, which
+ // will leave an extra split RW VMA (the gap).
+ _extend_load_segment_vma(phdr_table, phdr_count, index, &p_memsz, &p_filesz,
+ should_pad_segments);
+
+ *seg_page_end = page_end(phdr->p_vaddr + p_memsz + load_bias);
+ return;
+ }
+ }
}
/* Used internally by phdr_table_protect_gnu_relro and
* phdr_table_unprotect_gnu_relro.
*/
static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, int prot_flags) {
+ ElfW(Addr) load_bias, int prot_flags,
+ bool should_pad_segments) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
@@ -976,6 +1094,8 @@
// that it starts on a page boundary.
ElfW(Addr) seg_page_start = page_start(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = page_end(phdr->p_vaddr + phdr->p_memsz) + load_bias;
+ _extend_gnu_relro_prot_end(phdr, phdr_table, phdr_count, load_bias, &seg_page_end,
+ should_pad_segments);
int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
seg_page_end - seg_page_start,
@@ -1000,12 +1120,14 @@
* phdr_table -> program header table
* phdr_count -> number of entries in tables
* load_bias -> load bias
+ * should_pad_segments -> Were segments extended to avoid gaps in the memory map
* Return:
* 0 on success, -1 on failure (error code in errno).
*/
-int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
- size_t phdr_count, ElfW(Addr) load_bias) {
- return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
+int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
+ ElfW(Addr) load_bias, bool should_pad_segments) {
+ return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ,
+ should_pad_segments);
}
/* Serialize the GNU relro segments to the given file descriptor. This can be
diff --git a/linker/linker_phdr.h b/linker/linker_phdr.h
index e5b87bb..4deed33 100644
--- a/linker/linker_phdr.h
+++ b/linker/linker_phdr.h
@@ -128,13 +128,14 @@
size_t phdr_table_get_maximum_alignment(const ElfW(Phdr)* phdr_table, size_t phdr_count);
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias, const GnuPropertySection* prop = nullptr);
+ ElfW(Addr) load_bias, bool should_pad_segments,
+ const GnuPropertySection* prop = nullptr);
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias);
+ ElfW(Addr) load_bias, bool should_pad_segments);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
- ElfW(Addr) load_bias);
+ ElfW(Addr) load_bias, bool should_pad_segments);
int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int fd, size_t* file_offset);
diff --git a/linker/linker_relocate.cpp b/linker/linker_relocate.cpp
index 952dade..080570d 100644
--- a/linker/linker_relocate.cpp
+++ b/linker/linker_relocate.cpp
@@ -187,7 +187,8 @@
auto protect_segments = [&]() {
// Make .text executable.
if (phdr_table_protect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias) < 0) {
+ relocator.si->load_bias,
+ relocator.si->should_pad_segments()) < 0) {
DL_ERR("can't protect segments for \"%s\": %s",
relocator.si->get_realpath(), strerror(errno));
return false;
@@ -197,7 +198,8 @@
auto unprotect_segments = [&]() {
// Make .text writable.
if (phdr_table_unprotect_segments(relocator.si->phdr, relocator.si->phnum,
- relocator.si->load_bias) < 0) {
+ relocator.si->load_bias,
+ relocator.si->should_pad_segments()) < 0) {
DL_ERR("can't unprotect loadable segments for \"%s\": %s",
relocator.si->get_realpath(), strerror(errno));
return false;
@@ -419,6 +421,7 @@
} else {
CHECK(found_in->get_tls() != nullptr); // We rejected a missing TLS segment above.
module_id = found_in->get_tls()->module_id;
+ CHECK(module_id != kTlsUninitializedModuleId);
}
trace_reloc("RELO TLS_DTPMOD %16p <- %zu %s",
rel_target, module_id, sym_name);
@@ -586,6 +589,11 @@
}
bool soinfo::relocate(const SymbolLookupList& lookup_list) {
+ // For ldd, don't apply relocations because TLS segments are not registered.
+ // We don't care whether ldd diagnoses unresolved symbols.
+ if (g_is_ldd) {
+ return true;
+ }
VersionTracker version_tracker;
diff --git a/linker/linker_soinfo.cpp b/linker/linker_soinfo.cpp
index 3e7506c..802c06a 100644
--- a/linker/linker_soinfo.cpp
+++ b/linker/linker_soinfo.cpp
@@ -503,15 +503,13 @@
}
void soinfo::call_pre_init_constructors() {
- if (g_is_ldd) return;
-
// DT_PREINIT_ARRAY functions are called before any other constructors for executables,
// but ignored in a shared library.
call_array("DT_PREINIT_ARRAY", preinit_array_, preinit_array_count_, false, get_realpath());
}
void soinfo::call_constructors() {
- if (constructors_called || g_is_ldd) {
+ if (constructors_called) {
return;
}
diff --git a/linker/linker_tls.cpp b/linker/linker_tls.cpp
index 97892f4..e90b8cb 100644
--- a/linker/linker_tls.cpp
+++ b/linker/linker_tls.cpp
@@ -31,14 +31,15 @@
#include <vector>
#include "async_safe/CHECK.h"
+#include "linker_globals.h"
+#include "linker_main.h"
+#include "linker_soinfo.h"
#include "private/ScopedRWLock.h"
#include "private/ScopedSignalBlocker.h"
#include "private/bionic_defs.h"
#include "private/bionic_elf_tls.h"
#include "private/bionic_globals.h"
#include "private/linker_native_bridge.h"
-#include "linker_main.h"
-#include "linker_soinfo.h"
static bool g_static_tls_finished;
static std::vector<TlsModule> g_tls_modules;
@@ -109,7 +110,11 @@
void linker_setup_exe_static_tls(const char* progname) {
soinfo* somain = solist_get_somain();
StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
- if (somain->get_tls() == nullptr) {
+
+ // For ldd, don't add the executable's TLS segment to the static TLS layout.
+ // It is likely to trigger the underaligned TLS segment error on arm32/arm64
+ // when the ldd argument is actually a shared object.
+ if (somain->get_tls() == nullptr || g_is_ldd) {
layout.reserve_exe_segment_and_tcb(nullptr, progname);
} else {
register_tls_module(somain, layout.reserve_exe_segment_and_tcb(&somain->get_tls()->segment, progname));
@@ -133,6 +138,11 @@
}
void register_soinfo_tls(soinfo* si) {
+ // ldd skips registration of the executable's TLS segment above to avoid the
+ // arm32/arm64 underalignment error. For consistency, also skip registration
+ // of TLS segments here, for shared objects.
+ if (g_is_ldd) return;
+
soinfo_tls* si_tls = si->get_tls();
if (si_tls == nullptr || si_tls->module_id != kTlsUninitializedModuleId) {
return;
diff --git a/tests/Android.bp b/tests/Android.bp
index 0f4a942..89d2267 100644
--- a/tests/Android.bp
+++ b/tests/Android.bp
@@ -785,6 +785,7 @@
],
static_libs: [
"libbase",
+ "libprocinfo",
],
include_dirs: [
"bionic/libc",
diff --git a/tests/dlext_test.cpp b/tests/dlext_test.cpp
index a0f037a..d55e5b4 100644
--- a/tests/dlext_test.cpp
+++ b/tests/dlext_test.cpp
@@ -31,6 +31,7 @@
#include <android-base/test_utils.h>
#include <sys/mman.h>
+#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
#include <sys/wait.h>
@@ -2046,6 +2047,11 @@
-1, 0));
ASSERT_TRUE(reinterpret_cast<void*>(reserved_addr) != MAP_FAILED);
+ struct stat file_stat;
+ int ret = TEMP_FAILURE_RETRY(stat(private_library_absolute_path.c_str(), &file_stat));
+ ASSERT_EQ(ret, 0) << "Failed to stat library";
+ size_t file_size = file_stat.st_size;
+
for (const auto& rec : maps_to_copy) {
uintptr_t offset = rec.addr_start - addr_start;
size_t size = rec.addr_end - rec.addr_start;
@@ -2053,7 +2059,11 @@
void* map = mmap(addr, size, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
ASSERT_TRUE(map != MAP_FAILED);
- memcpy(map, reinterpret_cast<void*>(rec.addr_start), size);
+ // Attempting the below memcpy from a portion of the map that is off the end of
+ // the backing file will cause the kernel to throw a SIGBUS
+ size_t _size = ::android::procinfo::MappedFileSize(rec.addr_start, rec.addr_end,
+ rec.offset, file_size);
+ memcpy(map, reinterpret_cast<void*>(rec.addr_start), _size);
mprotect(map, size, rec.perms);
}