blob: 54354a82692388fb2f7feedde5fa624919cfc3cd [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080039#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070040#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080041#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080042#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020043
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080044#include "private/bionic_prctl.h"
Evgenii Stepanov0a3637d2016-07-06 13:20:59 -070045#include "private/CFIShadow.h" // For kLibraryAlignment
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080046
Elliott Hughesb5140262014-12-02 16:16:29 -080047static int GetTargetElfMachine() {
48#if defined(__arm__)
49 return EM_ARM;
50#elif defined(__aarch64__)
51 return EM_AARCH64;
52#elif defined(__i386__)
53 return EM_386;
54#elif defined(__mips__)
55 return EM_MIPS;
56#elif defined(__x86_64__)
57 return EM_X86_64;
58#endif
59}
60
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020061/**
62 TECHNICAL NOTE ON ELF LOADING.
63
64 An ELF file's program header table contains one or more PT_LOAD
65 segments, which corresponds to portions of the file that need to
66 be mapped into the process' address space.
67
68 Each loadable segment has the following important properties:
69
70 p_offset -> segment file offset
71 p_filesz -> segment file size
72 p_memsz -> segment memory size (always >= p_filesz)
73 p_vaddr -> segment's virtual address
74 p_flags -> segment flags (e.g. readable, writable, executable)
75
Elliott Hughes0266ae52014-02-10 17:46:57 -080076 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020077
78 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
79 ranges of virtual addresses. A few rules apply:
80
81 - the virtual address ranges should not overlap.
82
83 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
84 between them should always be initialized to 0.
85
86 - ranges do not necessarily start or end at page boundaries. Two distinct
87 segments can have their start and end on the same page. In this case, the
88 page inherits the mapping flags of the latter segment.
89
90 Finally, the real load addrs of each segment is not p_vaddr. Instead the
91 loader decides where to load the first segment, then will load all others
92 relative to the first one to respect the initial range layout.
93
94 For example, consider the following list:
95
96 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
97 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
98
99 This corresponds to two segments that cover these virtual address ranges:
100
101 0x30000...0x34000
102 0x40000...0x48000
103
104 If the loader decides to load the first segment at address 0xa0000000
105 then the segments' load address ranges will be:
106
107 0xa0030000...0xa0034000
108 0xa0040000...0xa0048000
109
110 In other words, all segments must be loaded at an address that has the same
111 constant offset from their p_vaddr value. This offset is computed as the
112 difference between the first segment's load address, and its p_vaddr value.
113
114 However, in practice, segments do _not_ start at page boundaries. Since we
115 can only memory-map at page boundaries, this means that the bias is
116 computed as:
117
118 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
119
120 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
121 possible wrap around UINT32_MAX for possible large p_vaddr values).
122
123 And that the phdr0_load_address must start at a page boundary, with
124 the segment's real content starting at:
125
126 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
127
128 Note that ELF requires the following condition to make the mmap()-ing work:
129
130 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
131
132 The load_bias must be added to any p_vaddr value read from the ELF file to
133 determine the corresponding memory address.
134
135 **/
136
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800137#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200138#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
139 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
140 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
141
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700142ElfReader::ElfReader()
143 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
144 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800145 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
146 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700147}
148
149bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
Jiyong Park02586a22017-05-20 01:01:24 +0900150 if (did_read_) {
151 return true;
152 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700153 name_ = name;
154 fd_ = fd;
155 file_offset_ = file_offset;
156 file_size_ = file_size;
157
158 if (ReadElfHeader() &&
159 VerifyElfHeader() &&
160 ReadProgramHeaders() &&
161 ReadSectionHeaders() &&
162 ReadDynamicSection()) {
163 did_read_ = true;
164 }
165
166 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200167}
168
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000169bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700170 CHECK(did_read_);
Jiyong Park02586a22017-05-20 01:01:24 +0900171 if (did_load_) {
172 return true;
173 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700174 if (ReserveAddressSpace(extinfo) &&
175 LoadSegments() &&
176 FindPhdr()) {
177 did_load_ = true;
178 }
179
180 return did_load_;
181}
182
183const char* ElfReader::get_string(ElfW(Word) index) const {
184 CHECK(strtab_ != nullptr);
185 CHECK(index < strtab_size_);
186
187 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800188}
189
190bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700191 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800192 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700193 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800194 return false;
195 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700196
Elliott Hughes650be4e2013-03-05 18:47:58 -0800197 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700198 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700199 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800200 return false;
201 }
202 return true;
203}
204
Elliott Hughes72007ee2017-04-19 17:44:57 -0700205static const char* EM_to_string(int em) {
206 if (em == EM_386) return "EM_386";
207 if (em == EM_AARCH64) return "EM_AARCH64";
208 if (em == EM_ARM) return "EM_ARM";
209 if (em == EM_MIPS) return "EM_MIPS";
210 if (em == EM_X86_64) return "EM_X86_64";
211 return "EM_???";
212}
213
Elliott Hughes650be4e2013-03-05 18:47:58 -0800214bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700215 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughesa8971512018-06-27 14:39:06 -0700216 DL_ERR("\"%s\" has bad ELF magic: %02x%02x%02x%02x", name_.c_str(),
217 header_.e_ident[0], header_.e_ident[1], header_.e_ident[2], header_.e_ident[3]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800218 return false;
219 }
220
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700221 // Try to give a clear diagnostic for ELF class mismatches, since they're
222 // an easy mistake to make during the 32-bit/64-bit transition period.
223 int elf_class = header_.e_ident[EI_CLASS];
224#if defined(__LP64__)
225 if (elf_class != ELFCLASS64) {
226 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700227 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700228 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700229 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700230 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800231 return false;
232 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700233#else
234 if (elf_class != ELFCLASS32) {
235 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700236 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700237 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700238 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700239 }
240 return false;
241 }
242#endif
243
Elliott Hughes650be4e2013-03-05 18:47:58 -0800244 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700245 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800246 return false;
247 }
248
249 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700250 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800251 return false;
252 }
253
254 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700255 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800256 return false;
257 }
258
Elliott Hughesb5140262014-12-02 16:16:29 -0800259 if (header_.e_machine != GetTargetElfMachine()) {
Elliott Hughes72007ee2017-04-19 17:44:57 -0700260 DL_ERR("\"%s\" has unexpected e_machine: %d (%s)", name_.c_str(), header_.e_machine,
261 EM_to_string(header_.e_machine));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800262 return false;
263 }
264
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700265 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800266 // Fail if app is targeting Android O or above
267 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
268 DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
269 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
270 return false;
271 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800272 DL_WARN_documented_change(__ANDROID_API_O__,
273 "invalid-elf-header_section-headers-enforced-for-api-level-26",
274 "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
275 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800276 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700277 }
278
279 if (header_.e_shstrndx == 0) {
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800280 // Fail if app is targeting Android O or above
281 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
282 DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
283 return false;
284 }
285
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800286 DL_WARN_documented_change(__ANDROID_API_O__,
287 "invalid-elf-header_section-headers-enforced-for-api-level-26",
288 "\"%s\" has invalid e_shstrndx", name_.c_str());
Dimitry Ivanov12b91872016-11-16 12:29:37 -0800289 add_dlwarning(name_.c_str(), "has invalid ELF header");
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700290 }
291
Elliott Hughes650be4e2013-03-05 18:47:58 -0800292 return true;
293}
294
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700295bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800296 off64_t range_start;
297 off64_t range_end;
298
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700299 // Only header can be located at the 0 offset... This function called to
300 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700301 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700302
303 return offset > 0 &&
304 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800305 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700306 (range_start < file_size_) &&
307 (range_end <= file_size_) &&
308 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800309}
310
Elliott Hughes650be4e2013-03-05 18:47:58 -0800311// Loads the program header table from an ELF file into a read-only private
312// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700313bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800314 phdr_num_ = header_.e_phnum;
315
316 // Like the kernel, we only accept program header tables that
317 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800318 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700319 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800320 return false;
321 }
322
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800323 // Boundary checks
324 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700325 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
326 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
327 name_.c_str(),
328 static_cast<size_t>(header_.e_phoff),
329 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800330 return false;
331 }
332
333 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700334 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800335 return false;
336 }
337
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700338 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800339 return true;
340}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200341
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700342bool ElfReader::ReadSectionHeaders() {
343 shdr_num_ = header_.e_shnum;
344
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800345 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700346 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800347 return false;
348 }
349
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800350 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700351 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
352 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
353 name_.c_str(),
354 static_cast<size_t>(header_.e_shoff),
355 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800356 return false;
357 }
358
359 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700360 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
361 return false;
362 }
363
364 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
365 return true;
366}
367
368bool ElfReader::ReadDynamicSection() {
369 // 1. Find .dynamic section (in section headers)
370 const ElfW(Shdr)* dynamic_shdr = nullptr;
371 for (size_t i = 0; i < shdr_num_; ++i) {
372 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
373 dynamic_shdr = &shdr_table_ [i];
374 break;
375 }
376 }
377
378 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700379 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700380 return false;
381 }
382
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700383 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
384 size_t pt_dynamic_offset = 0;
385 size_t pt_dynamic_filesz = 0;
386 for (size_t i = 0; i < phdr_num_; ++i) {
387 const ElfW(Phdr)* phdr = &phdr_table_[i];
388 if (phdr->p_type == PT_DYNAMIC) {
389 pt_dynamic_offset = phdr->p_offset;
390 pt_dynamic_filesz = phdr->p_filesz;
391 }
392 }
393
394 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800395 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
396 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
397 "expected to match PT_DYNAMIC offset: 0x%zx",
398 name_.c_str(),
399 static_cast<size_t>(dynamic_shdr->sh_offset),
400 pt_dynamic_offset);
401 return false;
402 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800403 DL_WARN_documented_change(__ANDROID_API_O__,
404 "invalid-elf-header_section-headers-enforced-for-api-level-26",
405 "\"%s\" .dynamic section has invalid offset: 0x%zx "
406 "(expected to match PT_DYNAMIC offset 0x%zx)",
407 name_.c_str(),
408 static_cast<size_t>(dynamic_shdr->sh_offset),
409 pt_dynamic_offset);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800410 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700411 }
412
413 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800414 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
415 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
416 "expected to match PT_DYNAMIC filesz: 0x%zx",
417 name_.c_str(),
418 static_cast<size_t>(dynamic_shdr->sh_size),
419 pt_dynamic_filesz);
420 return false;
421 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800422 DL_WARN_documented_change(__ANDROID_API_O__,
423 "invalid-elf-header_section-headers-enforced-for-api-level-26",
424 "\"%s\" .dynamic section has invalid size: 0x%zx "
425 "(expected to match PT_DYNAMIC filesz 0x%zx)",
426 name_.c_str(),
427 static_cast<size_t>(dynamic_shdr->sh_size),
428 pt_dynamic_filesz);
Dimitry Ivanove30c17f2016-12-28 16:21:49 -0800429 add_dlwarning(name_.c_str(), "invalid .dynamic section");
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700430 }
431
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700432 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700433 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
434 name_.c_str(),
435 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700436 return false;
437 }
438
439 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
440
441 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700442 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
443 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700444 return false;
445 }
446
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700447 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
448 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800449 return false;
450 }
451
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700452 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
453 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
454 return false;
455 }
456
457 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
458
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700459 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
460 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
461 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800462 return false;
463 }
464
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700465 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
466 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
467 return false;
468 }
469
470 strtab_ = static_cast<const char*>(strtab_fragment_.data());
471 strtab_size_ = strtab_fragment_.size();
472 return true;
473}
474
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800475/* Returns the size of the extent of all the possibly non-contiguous
476 * loadable segments in an ELF program header table. This corresponds
477 * to the page-aligned size in bytes that needs to be reserved in the
478 * process' address space. If there are no loadable segments, 0 is
479 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200480 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700481 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800482 * set to the minimum and maximum addresses of pages to be reserved,
483 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200484 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800485size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
486 ElfW(Addr)* out_min_vaddr,
487 ElfW(Addr)* out_max_vaddr) {
488 ElfW(Addr) min_vaddr = UINTPTR_MAX;
489 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200490
Elliott Hughes0266ae52014-02-10 17:46:57 -0800491 bool found_pt_load = false;
492 for (size_t i = 0; i < phdr_count; ++i) {
493 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200494
Elliott Hughes0266ae52014-02-10 17:46:57 -0800495 if (phdr->p_type != PT_LOAD) {
496 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200497 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800498 found_pt_load = true;
499
500 if (phdr->p_vaddr < min_vaddr) {
501 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200502 }
503
Elliott Hughes0266ae52014-02-10 17:46:57 -0800504 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
505 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
506 }
507 }
508 if (!found_pt_load) {
509 min_vaddr = 0;
510 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200511
Elliott Hughes0266ae52014-02-10 17:46:57 -0800512 min_vaddr = PAGE_START(min_vaddr);
513 max_vaddr = PAGE_END(max_vaddr);
514
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700515 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800516 *out_min_vaddr = min_vaddr;
517 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700518 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800519 *out_max_vaddr = max_vaddr;
520 }
521 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200522}
523
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700524// Reserve a virtual address range such that if it's limits were extended to the next 2**align
525// boundary, it would not overlap with any existing mappings.
526static void* ReserveAligned(void* hint, size_t size, size_t align) {
527 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
528 // Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
529 // with it.
530 // FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
531 // mapping at the requested address?
532 if (align == PAGE_SIZE || hint != nullptr) {
533 void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
534 if (mmap_ptr == MAP_FAILED) {
535 return nullptr;
536 }
537 return mmap_ptr;
538 }
539
540 // Allocate enough space so that the end of the desired region aligned up is still inside the
541 // mapping.
542 size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
543 uint8_t* mmap_ptr =
544 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
545 if (mmap_ptr == MAP_FAILED) {
546 return nullptr;
547 }
548
549 uint8_t* first = align_up(mmap_ptr, align);
550 uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
Jiyong Park31cd08f2018-06-01 19:18:56 +0900551
552 // arc4random* is not available in init because /dev/urandom hasn't yet been
553 // created. Don't randomize then.
554 size_t n = is_init() ? 0 : arc4random_uniform((last - first) / PAGE_SIZE + 1);
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700555 uint8_t* start = first + n * PAGE_SIZE;
556 munmap(mmap_ptr, start - mmap_ptr);
557 munmap(start + size, mmap_ptr + mmap_size - (start + size));
558 return start;
559}
560
Elliott Hughes650be4e2013-03-05 18:47:58 -0800561// Reserve a virtual address range big enough to hold all loadable
562// segments of a program header table. This is done by creating a
563// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000564bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800565 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800566 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800567 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700568 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800569 return false;
570 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200571
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800572 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000573 void* start;
574 size_t reserved_size = 0;
575 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700576 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700577 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700578 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000579
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700580 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000581 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
582 reserved_size = extinfo->reserved_size;
583 reserved_hint = false;
584 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
585 reserved_size = extinfo->reserved_size;
586 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700587
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700588 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700589 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700590 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
591 mmap_hint = extinfo->reserved_addr;
592 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700593 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000594 }
595
596 if (load_size_ > reserved_size) {
597 if (!reserved_hint) {
598 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700599 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000600 return false;
601 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700602 start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
603 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700604 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000605 return false;
606 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700607 if (strict_hint && (start != mmap_hint)) {
608 munmap(start, load_size_);
609 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
610 load_size_, mmap_hint, name_.c_str());
611 return false;
612 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000613 } else {
614 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800615 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800616 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200617
Elliott Hughes650be4e2013-03-05 18:47:58 -0800618 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800619 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800620 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200621}
622
Elliott Hughes650be4e2013-03-05 18:47:58 -0800623bool ElfReader::LoadSegments() {
624 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800625 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200626
Elliott Hughes650be4e2013-03-05 18:47:58 -0800627 if (phdr->p_type != PT_LOAD) {
628 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200629 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800630
631 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800632 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
633 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800634
Elliott Hughes0266ae52014-02-10 17:46:57 -0800635 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
636 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800637
Elliott Hughes0266ae52014-02-10 17:46:57 -0800638 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800639
640 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800641 ElfW(Addr) file_start = phdr->p_offset;
642 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800643
Elliott Hughes0266ae52014-02-10 17:46:57 -0800644 ElfW(Addr) file_page_start = PAGE_START(file_start);
645 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800646
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700647 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700648 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700649 return false;
650 }
651
skvalex93ce3542015-08-20 01:06:42 +0300652 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700653 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
654 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700655 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700656 reinterpret_cast<void*>(phdr->p_filesz),
657 reinterpret_cast<void*>(file_end), file_size_);
658 return false;
659 }
660
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700661 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700662 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700663 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800664 // W + E PT_LOAD segments are not allowed in O.
Elliott Hughes5bc78c82016-11-16 11:35:43 -0800665 if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800666 DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800667 return false;
668 }
Elliott Hughes9076b0c2018-02-28 11:29:45 -0800669 DL_WARN_documented_change(__ANDROID_API_O__,
670 "writable-and-executable-segments-enforced-for-api-level-26",
671 "\"%s\" has load segments that are both writable and executable",
672 name_.c_str());
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800673 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700674 }
675
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700676 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700677 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700678 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700679 MAP_FIXED|MAP_PRIVATE,
680 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700681 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700682 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700683 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700684 return false;
685 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800686 }
687
688 // if the segment is writable, and does not end on a page boundary,
689 // zero-fill it until the page limit.
690 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800691 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800692 }
693
694 seg_file_end = PAGE_END(seg_file_end);
695
696 // seg_file_end is now the first page address after the file
697 // content. If seg_end is larger, we need to zero anything
698 // between them. This is done by using a private anonymous
699 // map for all extra pages.
700 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800701 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800702 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800703 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800704 PFLAGS_TO_PROT(phdr->p_flags),
705 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
706 -1,
707 0);
708 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700709 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800710 return false;
711 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800712
713 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800714 }
715 }
716 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200717}
718
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000719/* Used internally. Used to set the protection bits of all loaded segments
720 * with optional extra flags (i.e. really PROT_WRITE). Used by
721 * phdr_table_protect_segments and phdr_table_unprotect_segments.
722 */
723static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
724 ElfW(Addr) load_bias, int extra_prot_flags) {
725 const ElfW(Phdr)* phdr = phdr_table;
726 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
727
728 for (; phdr < phdr_limit; phdr++) {
729 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
730 continue;
731 }
732
733 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
734 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
735
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700736 int prot = PFLAGS_TO_PROT(phdr->p_flags);
737 if ((extra_prot_flags & PROT_WRITE) != 0) {
738 // make sure we're never simultaneously writable / executable
739 prot &= ~PROT_EXEC;
740 }
741
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000742 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
743 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700744 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000745 if (ret < 0) {
746 return -1;
747 }
748 }
749 return 0;
750}
751
752/* Restore the original protection modes for all loadable segments.
753 * You should only call this after phdr_table_unprotect_segments and
754 * applying all relocations.
755 *
756 * Input:
757 * phdr_table -> program header table
758 * phdr_count -> number of entries in tables
759 * load_bias -> load bias
760 * Return:
761 * 0 on error, -1 on failure (error code in errno).
762 */
763int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
764 size_t phdr_count, ElfW(Addr) load_bias) {
765 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
766}
767
768/* Change the protection of all loaded segments in memory to writable.
769 * This is useful before performing relocations. Once completed, you
770 * will have to call phdr_table_protect_segments to restore the original
771 * protection flags on all segments.
772 *
773 * Note that some writable segments can also have their content turned
774 * to read-only by calling phdr_table_protect_gnu_relro. This is no
775 * performed here.
776 *
777 * Input:
778 * phdr_table -> program header table
779 * phdr_count -> number of entries in tables
780 * load_bias -> load bias
781 * Return:
782 * 0 on error, -1 on failure (error code in errno).
783 */
784int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
785 size_t phdr_count, ElfW(Addr) load_bias) {
786 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
787}
788
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200789/* Used internally by phdr_table_protect_gnu_relro and
790 * phdr_table_unprotect_gnu_relro.
791 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800792static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
793 ElfW(Addr) load_bias, int prot_flags) {
794 const ElfW(Phdr)* phdr = phdr_table;
795 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200796
Elliott Hughes0266ae52014-02-10 17:46:57 -0800797 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
798 if (phdr->p_type != PT_GNU_RELRO) {
799 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200800 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800801
802 // Tricky: what happens when the relro segment does not start
803 // or end at page boundaries? We're going to be over-protective
804 // here and put every page touched by the segment as read-only.
805
806 // This seems to match Ian Lance Taylor's description of the
807 // feature at http://www.airs.com/blog/archives/189.
808
809 // Extract:
810 // Note that the current dynamic linker code will only work
811 // correctly if the PT_GNU_RELRO segment starts on a page
812 // boundary. This is because the dynamic linker rounds the
813 // p_vaddr field down to the previous page boundary. If
814 // there is anything on the page which should not be read-only,
815 // the program is likely to fail at runtime. So in effect the
816 // linker must only emit a PT_GNU_RELRO segment if it ensures
817 // that it starts on a page boundary.
818 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
819 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
820
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800821 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800822 seg_page_end - seg_page_start,
823 prot_flags);
824 if (ret < 0) {
825 return -1;
826 }
827 }
828 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200829}
830
831/* Apply GNU relro protection if specified by the program header. This will
832 * turn some of the pages of a writable PT_LOAD segment to read-only, as
833 * specified by one or more PT_GNU_RELRO segments. This must be always
834 * performed after relocations.
835 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200836 * The areas typically covered are .got and .data.rel.ro, these are
837 * read-only from the program's POV, but contain absolute addresses
838 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200839 *
840 * Input:
841 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700842 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200843 * load_bias -> load bias
844 * Return:
845 * 0 on error, -1 on failure (error code in errno).
846 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700847int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
848 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800849 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200850}
851
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000852/* Serialize the GNU relro segments to the given file descriptor. This can be
853 * performed after relocations to allow another process to later share the
854 * relocated segment, if it was loaded at the same address.
855 *
856 * Input:
857 * phdr_table -> program header table
858 * phdr_count -> number of entries in tables
859 * load_bias -> load bias
860 * fd -> writable file descriptor to use
861 * Return:
862 * 0 on error, -1 on failure (error code in errno).
863 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700864int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
865 size_t phdr_count,
866 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000867 int fd) {
868 const ElfW(Phdr)* phdr = phdr_table;
869 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
870 ssize_t file_offset = 0;
871
872 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
873 if (phdr->p_type != PT_GNU_RELRO) {
874 continue;
875 }
876
877 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
878 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
879 ssize_t size = seg_page_end - seg_page_start;
880
881 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
882 if (written != size) {
883 return -1;
884 }
885 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
886 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
887 if (map == MAP_FAILED) {
888 return -1;
889 }
890 file_offset += size;
891 }
892 return 0;
893}
894
895/* Where possible, replace the GNU relro segments with mappings of the given
896 * file descriptor. This can be performed after relocations to allow a file
897 * previously created by phdr_table_serialize_gnu_relro in another process to
898 * replace the dirty relocated pages, saving memory, if it was loaded at the
899 * same address. We have to compare the data before we map over it, since some
900 * parts of the relro segment may not be identical due to other libraries in
901 * the process being loaded at different addresses.
902 *
903 * Input:
904 * phdr_table -> program header table
905 * phdr_count -> number of entries in tables
906 * load_bias -> load bias
907 * fd -> readable file descriptor to use
908 * Return:
909 * 0 on error, -1 on failure (error code in errno).
910 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700911int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
912 size_t phdr_count,
913 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000914 int fd) {
915 // Map the file at a temporary location so we can compare its contents.
916 struct stat file_stat;
917 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
918 return -1;
919 }
920 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700921 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100922 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700923 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100924 if (temp_mapping == MAP_FAILED) {
925 return -1;
926 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000927 }
928 size_t file_offset = 0;
929
930 // Iterate over the relro segments and compare/remap the pages.
931 const ElfW(Phdr)* phdr = phdr_table;
932 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
933
934 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
935 if (phdr->p_type != PT_GNU_RELRO) {
936 continue;
937 }
938
939 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
940 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
941
942 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
943 char* mem_base = reinterpret_cast<char*>(seg_page_start);
944 size_t match_offset = 0;
945 size_t size = seg_page_end - seg_page_start;
946
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100947 if (file_size - file_offset < size) {
948 // File is too short to compare to this segment. The contents are likely
949 // different as well (it's probably for a different library version) so
950 // just don't bother checking.
951 break;
952 }
953
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000954 while (match_offset < size) {
955 // Skip over dissimilar pages.
956 while (match_offset < size &&
957 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
958 match_offset += PAGE_SIZE;
959 }
960
961 // Count similar pages.
962 size_t mismatch_offset = match_offset;
963 while (mismatch_offset < size &&
964 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
965 mismatch_offset += PAGE_SIZE;
966 }
967
968 // Map over similar pages.
969 if (mismatch_offset > match_offset) {
970 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
971 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
972 if (map == MAP_FAILED) {
973 munmap(temp_mapping, file_size);
974 return -1;
975 }
976 }
977
978 match_offset = mismatch_offset;
979 }
980
981 // Add to the base file offset in case there are multiple relro segments.
982 file_offset += size;
983 }
984 munmap(temp_mapping, file_size);
985 return 0;
986}
987
988
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700989#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200990
991# ifndef PT_ARM_EXIDX
992# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
993# endif
994
995/* Return the address and size of the .ARM.exidx section in memory,
996 * if present.
997 *
998 * Input:
999 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001000 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001001 * load_bias -> load bias
1002 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001003 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001004 * arm_exidx_count -> number of items in table (0 on failure).
1005 * Return:
1006 * 0 on error, -1 on failure (_no_ error code in errno)
1007 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001008int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1009 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001010 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001011 const ElfW(Phdr)* phdr = phdr_table;
1012 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001013
Elliott Hughes0266ae52014-02-10 17:46:57 -08001014 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1015 if (phdr->p_type != PT_ARM_EXIDX) {
1016 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001017 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001018
1019 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001020 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001021 return 0;
1022 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001023 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -08001024 *arm_exidx_count = 0;
1025 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001026}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -07001027#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001028
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001029/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001030 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001031 *
1032 * Input:
1033 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -07001034 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001035 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001036 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -07001037 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +08001038 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001039 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +02001040 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001041 */
Elliott Hughes0266ae52014-02-10 17:46:57 -08001042void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +08001043 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1044 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -07001045 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001046 for (size_t i = 0; i<phdr_count; ++i) {
1047 const ElfW(Phdr)& phdr = phdr_table[i];
1048 if (phdr.p_type == PT_DYNAMIC) {
1049 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +08001050 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -07001051 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +08001052 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -07001053 return;
1054 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001055 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001056}
1057
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001058/* Return the program interpreter string, or nullptr if missing.
1059 *
1060 * Input:
1061 * phdr_table -> program header table
1062 * phdr_count -> number of entries in tables
1063 * load_bias -> load bias
1064 * Return:
1065 * pointer to the program interpreter string.
1066 */
1067const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1068 ElfW(Addr) load_bias) {
1069 for (size_t i = 0; i<phdr_count; ++i) {
1070 const ElfW(Phdr)& phdr = phdr_table[i];
1071 if (phdr.p_type == PT_INTERP) {
1072 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1073 }
1074 }
1075 return nullptr;
1076}
1077
Robert Grosse4544d9f2014-10-15 14:32:19 -07001078// Sets loaded_phdr_ to the address of the program header table as it appears
1079// in the loaded segments in memory. This is in contrast with phdr_table_,
1080// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001081bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001082 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001083
Elliott Hughes650be4e2013-03-05 18:47:58 -08001084 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001085 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001086 if (phdr->p_type == PT_PHDR) {
1087 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001088 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001089 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001090
Elliott Hughes650be4e2013-03-05 18:47:58 -08001091 // Otherwise, check the first loadable segment. If its file offset
1092 // is 0, it starts with the ELF header, and we can trivially find the
1093 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001094 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001095 if (phdr->p_type == PT_LOAD) {
1096 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001097 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001098 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001099 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001100 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001101 }
1102 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001103 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001104 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001105
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001106 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001107 return false;
1108}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001109
Elliott Hughes650be4e2013-03-05 18:47:58 -08001110// Ensures that our program header is actually within a loadable
1111// segment. This should help catch badly-formed ELF files that
1112// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001113bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1114 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1115 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001116 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001117 if (phdr->p_type != PT_LOAD) {
1118 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001119 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001120 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1121 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001122 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001123 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001124 return true;
1125 }
1126 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001127 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1128 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001129 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001130}