blob: 4c4ce17468510c51097b238aee7759cdfd9b08b9 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
39#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080040#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020041
Elliott Hughesb5140262014-12-02 16:16:29 -080042static int GetTargetElfMachine() {
43#if defined(__arm__)
44 return EM_ARM;
45#elif defined(__aarch64__)
46 return EM_AARCH64;
47#elif defined(__i386__)
48 return EM_386;
49#elif defined(__mips__)
50 return EM_MIPS;
51#elif defined(__x86_64__)
52 return EM_X86_64;
53#endif
54}
55
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020056/**
57 TECHNICAL NOTE ON ELF LOADING.
58
59 An ELF file's program header table contains one or more PT_LOAD
60 segments, which corresponds to portions of the file that need to
61 be mapped into the process' address space.
62
63 Each loadable segment has the following important properties:
64
65 p_offset -> segment file offset
66 p_filesz -> segment file size
67 p_memsz -> segment memory size (always >= p_filesz)
68 p_vaddr -> segment's virtual address
69 p_flags -> segment flags (e.g. readable, writable, executable)
70
Elliott Hughes0266ae52014-02-10 17:46:57 -080071 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020072
73 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
74 ranges of virtual addresses. A few rules apply:
75
76 - the virtual address ranges should not overlap.
77
78 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
79 between them should always be initialized to 0.
80
81 - ranges do not necessarily start or end at page boundaries. Two distinct
82 segments can have their start and end on the same page. In this case, the
83 page inherits the mapping flags of the latter segment.
84
85 Finally, the real load addrs of each segment is not p_vaddr. Instead the
86 loader decides where to load the first segment, then will load all others
87 relative to the first one to respect the initial range layout.
88
89 For example, consider the following list:
90
91 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
92 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
93
94 This corresponds to two segments that cover these virtual address ranges:
95
96 0x30000...0x34000
97 0x40000...0x48000
98
99 If the loader decides to load the first segment at address 0xa0000000
100 then the segments' load address ranges will be:
101
102 0xa0030000...0xa0034000
103 0xa0040000...0xa0048000
104
105 In other words, all segments must be loaded at an address that has the same
106 constant offset from their p_vaddr value. This offset is computed as the
107 difference between the first segment's load address, and its p_vaddr value.
108
109 However, in practice, segments do _not_ start at page boundaries. Since we
110 can only memory-map at page boundaries, this means that the bias is
111 computed as:
112
113 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
114
115 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
116 possible wrap around UINT32_MAX for possible large p_vaddr values).
117
118 And that the phdr0_load_address must start at a page boundary, with
119 the segment's real content starting at:
120
121 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
122
123 Note that ELF requires the following condition to make the mmap()-ing work:
124
125 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
126
127 The load_bias must be added to any p_vaddr value read from the ELF file to
128 determine the corresponding memory address.
129
130 **/
131
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800132#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200133#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
134 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
135 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
136
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700137ElfReader::ElfReader()
138 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
139 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
140 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr) {
141}
142
143bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
144 CHECK(!did_read_);
145 CHECK(!did_load_);
146 name_ = name;
147 fd_ = fd;
148 file_offset_ = file_offset;
149 file_size_ = file_size;
150
151 if (ReadElfHeader() &&
152 VerifyElfHeader() &&
153 ReadProgramHeaders() &&
154 ReadSectionHeaders() &&
155 ReadDynamicSection()) {
156 did_read_ = true;
157 }
158
159 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200160}
161
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000162bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700163 CHECK(did_read_);
164 CHECK(!did_load_);
165 if (ReserveAddressSpace(extinfo) &&
166 LoadSegments() &&
167 FindPhdr()) {
168 did_load_ = true;
169 }
170
171 return did_load_;
172}
173
174const char* ElfReader::get_string(ElfW(Word) index) const {
175 CHECK(strtab_ != nullptr);
176 CHECK(index < strtab_size_);
177
178 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800179}
180
181bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700182 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800183 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700184 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800185 return false;
186 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700187
Elliott Hughes650be4e2013-03-05 18:47:58 -0800188 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700189 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700190 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800191 return false;
192 }
193 return true;
194}
195
196bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700197 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700198 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800199 return false;
200 }
201
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700202 // Try to give a clear diagnostic for ELF class mismatches, since they're
203 // an easy mistake to make during the 32-bit/64-bit transition period.
204 int elf_class = header_.e_ident[EI_CLASS];
205#if defined(__LP64__)
206 if (elf_class != ELFCLASS64) {
207 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700208 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700209 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700210 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700211 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800212 return false;
213 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700214#else
215 if (elf_class != ELFCLASS32) {
216 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700217 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700218 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700219 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700220 }
221 return false;
222 }
223#endif
224
Elliott Hughes650be4e2013-03-05 18:47:58 -0800225 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700226 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800227 return false;
228 }
229
230 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700231 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800232 return false;
233 }
234
235 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700236 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800237 return false;
238 }
239
Elliott Hughesb5140262014-12-02 16:16:29 -0800240 if (header_.e_machine != GetTargetElfMachine()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700241 DL_ERR("\"%s\" has unexpected e_machine: %d", name_.c_str(), header_.e_machine);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800242 return false;
243 }
244
245 return true;
246}
247
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800248bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size) {
249 off64_t range_start;
250 off64_t range_end;
251
252 return safe_add(&range_start, file_offset_, offset) &&
253 safe_add(&range_end, range_start, size) &&
254 range_start < file_size_ &&
255 range_end <= file_size_;
256}
257
Elliott Hughes650be4e2013-03-05 18:47:58 -0800258// Loads the program header table from an ELF file into a read-only private
259// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700260bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800261 phdr_num_ = header_.e_phnum;
262
263 // Like the kernel, we only accept program header tables that
264 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800265 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700266 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800267 return false;
268 }
269
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800270 // Boundary checks
271 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
272 if (!CheckFileRange(header_.e_phoff, size)) {
273 DL_ERR("\"%s\" has invalid phdr offset/size", name_.c_str());
274 return false;
275 }
276
277 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700278 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800279 return false;
280 }
281
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700282 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800283 return true;
284}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200285
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700286bool ElfReader::ReadSectionHeaders() {
287 shdr_num_ = header_.e_shnum;
288
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800289 if (shdr_num_ == 0) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800290 DL_ERR("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800291 return false;
292 }
293
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800294 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
295 if (!CheckFileRange(header_.e_shoff, size)) {
296 DL_ERR("\"%s\" has invalid shdr offset/size", name_.c_str());
297 return false;
298 }
299
300 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700301 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
302 return false;
303 }
304
305 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
306 return true;
307}
308
309bool ElfReader::ReadDynamicSection() {
310 // 1. Find .dynamic section (in section headers)
311 const ElfW(Shdr)* dynamic_shdr = nullptr;
312 for (size_t i = 0; i < shdr_num_; ++i) {
313 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
314 dynamic_shdr = &shdr_table_ [i];
315 break;
316 }
317 }
318
319 if (dynamic_shdr == nullptr) {
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800320 DL_ERR("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700321 return false;
322 }
323
324 if (dynamic_shdr->sh_link >= shdr_num_) {
325 DL_ERR("\"%s\" .dynamic section has invalid sh_link: %d", name_.c_str(), dynamic_shdr->sh_link);
326 return false;
327 }
328
329 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
330
331 if (strtab_shdr->sh_type != SHT_STRTAB) {
332 DL_ERR("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
333 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
334 return false;
335 }
336
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800337 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
338 DL_ERR("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
339 PRINT("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
340 return false;
341 }
342
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700343 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
344 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
345 return false;
346 }
347
348 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
349
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800350 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
351 DL_ERR("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
352 name_.c_str());
353 return false;
354 }
355
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700356 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
357 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
358 return false;
359 }
360
361 strtab_ = static_cast<const char*>(strtab_fragment_.data());
362 strtab_size_ = strtab_fragment_.size();
363 return true;
364}
365
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800366/* Returns the size of the extent of all the possibly non-contiguous
367 * loadable segments in an ELF program header table. This corresponds
368 * to the page-aligned size in bytes that needs to be reserved in the
369 * process' address space. If there are no loadable segments, 0 is
370 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200371 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700372 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800373 * set to the minimum and maximum addresses of pages to be reserved,
374 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200375 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800376size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
377 ElfW(Addr)* out_min_vaddr,
378 ElfW(Addr)* out_max_vaddr) {
379 ElfW(Addr) min_vaddr = UINTPTR_MAX;
380 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200381
Elliott Hughes0266ae52014-02-10 17:46:57 -0800382 bool found_pt_load = false;
383 for (size_t i = 0; i < phdr_count; ++i) {
384 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200385
Elliott Hughes0266ae52014-02-10 17:46:57 -0800386 if (phdr->p_type != PT_LOAD) {
387 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200388 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800389 found_pt_load = true;
390
391 if (phdr->p_vaddr < min_vaddr) {
392 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200393 }
394
Elliott Hughes0266ae52014-02-10 17:46:57 -0800395 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
396 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
397 }
398 }
399 if (!found_pt_load) {
400 min_vaddr = 0;
401 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200402
Elliott Hughes0266ae52014-02-10 17:46:57 -0800403 min_vaddr = PAGE_START(min_vaddr);
404 max_vaddr = PAGE_END(max_vaddr);
405
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700406 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800407 *out_min_vaddr = min_vaddr;
408 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700409 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800410 *out_max_vaddr = max_vaddr;
411 }
412 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200413}
414
Elliott Hughes650be4e2013-03-05 18:47:58 -0800415// Reserve a virtual address range big enough to hold all loadable
416// segments of a program header table. This is done by creating a
417// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000418bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800419 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800420 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800421 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700422 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800423 return false;
424 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200425
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800426 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000427 void* start;
428 size_t reserved_size = 0;
429 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700430 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700431 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700432 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000433
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700434 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000435 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
436 reserved_size = extinfo->reserved_size;
437 reserved_hint = false;
438 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
439 reserved_size = extinfo->reserved_size;
440 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700441
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700442 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700443 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700444 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
445 mmap_hint = extinfo->reserved_addr;
446 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700447 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000448 }
449
450 if (load_size_ > reserved_size) {
451 if (!reserved_hint) {
452 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700453 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000454 return false;
455 }
456 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700457 start = mmap(mmap_hint, load_size_, PROT_NONE, mmap_flags, -1, 0);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000458 if (start == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700459 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000460 return false;
461 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700462 if (strict_hint && (start != mmap_hint)) {
463 munmap(start, load_size_);
464 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
465 load_size_, mmap_hint, name_.c_str());
466 return false;
467 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000468 } else {
469 start = extinfo->reserved_addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800470 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200471
Elliott Hughes650be4e2013-03-05 18:47:58 -0800472 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800473 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800474 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200475}
476
Elliott Hughes650be4e2013-03-05 18:47:58 -0800477bool ElfReader::LoadSegments() {
478 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800479 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200480
Elliott Hughes650be4e2013-03-05 18:47:58 -0800481 if (phdr->p_type != PT_LOAD) {
482 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200483 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800484
485 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800486 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
487 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800488
Elliott Hughes0266ae52014-02-10 17:46:57 -0800489 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
490 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800491
Elliott Hughes0266ae52014-02-10 17:46:57 -0800492 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800493
494 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800495 ElfW(Addr) file_start = phdr->p_offset;
496 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800497
Elliott Hughes0266ae52014-02-10 17:46:57 -0800498 ElfW(Addr) file_page_start = PAGE_START(file_start);
499 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800500
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700501 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700502 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700503 return false;
504 }
505
skvalex93ce3542015-08-20 01:06:42 +0300506 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700507 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
508 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700509 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700510 reinterpret_cast<void*>(phdr->p_filesz),
511 reinterpret_cast<void*>(file_end), file_size_);
512 return false;
513 }
514
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700515 if (file_length != 0) {
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700516 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700517 file_length,
518 PFLAGS_TO_PROT(phdr->p_flags),
519 MAP_FIXED|MAP_PRIVATE,
520 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700521 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700522 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700523 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700524 return false;
525 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800526 }
527
528 // if the segment is writable, and does not end on a page boundary,
529 // zero-fill it until the page limit.
530 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800531 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800532 }
533
534 seg_file_end = PAGE_END(seg_file_end);
535
536 // seg_file_end is now the first page address after the file
537 // content. If seg_end is larger, we need to zero anything
538 // between them. This is done by using a private anonymous
539 // map for all extra pages.
540 if (seg_page_end > seg_file_end) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800541 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Elliott Hughes650be4e2013-03-05 18:47:58 -0800542 seg_page_end - seg_file_end,
543 PFLAGS_TO_PROT(phdr->p_flags),
544 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
545 -1,
546 0);
547 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700548 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800549 return false;
550 }
551 }
552 }
553 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200554}
555
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000556/* Used internally. Used to set the protection bits of all loaded segments
557 * with optional extra flags (i.e. really PROT_WRITE). Used by
558 * phdr_table_protect_segments and phdr_table_unprotect_segments.
559 */
560static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
561 ElfW(Addr) load_bias, int extra_prot_flags) {
562 const ElfW(Phdr)* phdr = phdr_table;
563 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
564
565 for (; phdr < phdr_limit; phdr++) {
566 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
567 continue;
568 }
569
570 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
571 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
572
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700573 int prot = PFLAGS_TO_PROT(phdr->p_flags);
574 if ((extra_prot_flags & PROT_WRITE) != 0) {
575 // make sure we're never simultaneously writable / executable
576 prot &= ~PROT_EXEC;
577 }
578
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000579 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
580 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700581 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000582 if (ret < 0) {
583 return -1;
584 }
585 }
586 return 0;
587}
588
589/* Restore the original protection modes for all loadable segments.
590 * You should only call this after phdr_table_unprotect_segments and
591 * applying all relocations.
592 *
593 * Input:
594 * phdr_table -> program header table
595 * phdr_count -> number of entries in tables
596 * load_bias -> load bias
597 * Return:
598 * 0 on error, -1 on failure (error code in errno).
599 */
600int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
601 size_t phdr_count, ElfW(Addr) load_bias) {
602 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
603}
604
605/* Change the protection of all loaded segments in memory to writable.
606 * This is useful before performing relocations. Once completed, you
607 * will have to call phdr_table_protect_segments to restore the original
608 * protection flags on all segments.
609 *
610 * Note that some writable segments can also have their content turned
611 * to read-only by calling phdr_table_protect_gnu_relro. This is no
612 * performed here.
613 *
614 * Input:
615 * phdr_table -> program header table
616 * phdr_count -> number of entries in tables
617 * load_bias -> load bias
618 * Return:
619 * 0 on error, -1 on failure (error code in errno).
620 */
621int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
622 size_t phdr_count, ElfW(Addr) load_bias) {
623 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
624}
625
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200626/* Used internally by phdr_table_protect_gnu_relro and
627 * phdr_table_unprotect_gnu_relro.
628 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800629static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
630 ElfW(Addr) load_bias, int prot_flags) {
631 const ElfW(Phdr)* phdr = phdr_table;
632 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200633
Elliott Hughes0266ae52014-02-10 17:46:57 -0800634 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
635 if (phdr->p_type != PT_GNU_RELRO) {
636 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200637 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800638
639 // Tricky: what happens when the relro segment does not start
640 // or end at page boundaries? We're going to be over-protective
641 // here and put every page touched by the segment as read-only.
642
643 // This seems to match Ian Lance Taylor's description of the
644 // feature at http://www.airs.com/blog/archives/189.
645
646 // Extract:
647 // Note that the current dynamic linker code will only work
648 // correctly if the PT_GNU_RELRO segment starts on a page
649 // boundary. This is because the dynamic linker rounds the
650 // p_vaddr field down to the previous page boundary. If
651 // there is anything on the page which should not be read-only,
652 // the program is likely to fail at runtime. So in effect the
653 // linker must only emit a PT_GNU_RELRO segment if it ensures
654 // that it starts on a page boundary.
655 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
656 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
657
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800658 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800659 seg_page_end - seg_page_start,
660 prot_flags);
661 if (ret < 0) {
662 return -1;
663 }
664 }
665 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200666}
667
668/* Apply GNU relro protection if specified by the program header. This will
669 * turn some of the pages of a writable PT_LOAD segment to read-only, as
670 * specified by one or more PT_GNU_RELRO segments. This must be always
671 * performed after relocations.
672 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200673 * The areas typically covered are .got and .data.rel.ro, these are
674 * read-only from the program's POV, but contain absolute addresses
675 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200676 *
677 * Input:
678 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700679 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200680 * load_bias -> load bias
681 * Return:
682 * 0 on error, -1 on failure (error code in errno).
683 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700684int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
685 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800686 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200687}
688
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000689/* Serialize the GNU relro segments to the given file descriptor. This can be
690 * performed after relocations to allow another process to later share the
691 * relocated segment, if it was loaded at the same address.
692 *
693 * Input:
694 * phdr_table -> program header table
695 * phdr_count -> number of entries in tables
696 * load_bias -> load bias
697 * fd -> writable file descriptor to use
698 * Return:
699 * 0 on error, -1 on failure (error code in errno).
700 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700701int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
702 size_t phdr_count,
703 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000704 int fd) {
705 const ElfW(Phdr)* phdr = phdr_table;
706 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
707 ssize_t file_offset = 0;
708
709 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
710 if (phdr->p_type != PT_GNU_RELRO) {
711 continue;
712 }
713
714 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
715 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
716 ssize_t size = seg_page_end - seg_page_start;
717
718 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
719 if (written != size) {
720 return -1;
721 }
722 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
723 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
724 if (map == MAP_FAILED) {
725 return -1;
726 }
727 file_offset += size;
728 }
729 return 0;
730}
731
732/* Where possible, replace the GNU relro segments with mappings of the given
733 * file descriptor. This can be performed after relocations to allow a file
734 * previously created by phdr_table_serialize_gnu_relro in another process to
735 * replace the dirty relocated pages, saving memory, if it was loaded at the
736 * same address. We have to compare the data before we map over it, since some
737 * parts of the relro segment may not be identical due to other libraries in
738 * the process being loaded at different addresses.
739 *
740 * Input:
741 * phdr_table -> program header table
742 * phdr_count -> number of entries in tables
743 * load_bias -> load bias
744 * fd -> readable file descriptor to use
745 * Return:
746 * 0 on error, -1 on failure (error code in errno).
747 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700748int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
749 size_t phdr_count,
750 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000751 int fd) {
752 // Map the file at a temporary location so we can compare its contents.
753 struct stat file_stat;
754 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
755 return -1;
756 }
757 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700758 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100759 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700760 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100761 if (temp_mapping == MAP_FAILED) {
762 return -1;
763 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000764 }
765 size_t file_offset = 0;
766
767 // Iterate over the relro segments and compare/remap the pages.
768 const ElfW(Phdr)* phdr = phdr_table;
769 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
770
771 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
772 if (phdr->p_type != PT_GNU_RELRO) {
773 continue;
774 }
775
776 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
777 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
778
779 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
780 char* mem_base = reinterpret_cast<char*>(seg_page_start);
781 size_t match_offset = 0;
782 size_t size = seg_page_end - seg_page_start;
783
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100784 if (file_size - file_offset < size) {
785 // File is too short to compare to this segment. The contents are likely
786 // different as well (it's probably for a different library version) so
787 // just don't bother checking.
788 break;
789 }
790
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000791 while (match_offset < size) {
792 // Skip over dissimilar pages.
793 while (match_offset < size &&
794 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
795 match_offset += PAGE_SIZE;
796 }
797
798 // Count similar pages.
799 size_t mismatch_offset = match_offset;
800 while (mismatch_offset < size &&
801 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
802 mismatch_offset += PAGE_SIZE;
803 }
804
805 // Map over similar pages.
806 if (mismatch_offset > match_offset) {
807 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
808 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
809 if (map == MAP_FAILED) {
810 munmap(temp_mapping, file_size);
811 return -1;
812 }
813 }
814
815 match_offset = mismatch_offset;
816 }
817
818 // Add to the base file offset in case there are multiple relro segments.
819 file_offset += size;
820 }
821 munmap(temp_mapping, file_size);
822 return 0;
823}
824
825
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700826#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200827
828# ifndef PT_ARM_EXIDX
829# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
830# endif
831
832/* Return the address and size of the .ARM.exidx section in memory,
833 * if present.
834 *
835 * Input:
836 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700837 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200838 * load_bias -> load bias
839 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700840 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200841 * arm_exidx_count -> number of items in table (0 on failure).
842 * Return:
843 * 0 on error, -1 on failure (_no_ error code in errno)
844 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800845int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
846 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800847 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800848 const ElfW(Phdr)* phdr = phdr_table;
849 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200850
Elliott Hughes0266ae52014-02-10 17:46:57 -0800851 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
852 if (phdr->p_type != PT_ARM_EXIDX) {
853 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200854 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800855
856 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800857 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800858 return 0;
859 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700860 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800861 *arm_exidx_count = 0;
862 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200863}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700864#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200865
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200866/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700867 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200868 *
869 * Input:
870 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700871 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200872 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200873 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700874 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +0800875 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200876 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200877 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200878 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800879void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +0800880 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
881 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -0700882 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700883 for (size_t i = 0; i<phdr_count; ++i) {
884 const ElfW(Phdr)& phdr = phdr_table[i];
885 if (phdr.p_type == PT_DYNAMIC) {
886 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +0800887 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700888 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +0800889 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -0700890 return;
891 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800892 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200893}
894
Evgenii Stepanovd640b222015-07-10 17:54:01 -0700895/* Return the program interpreter string, or nullptr if missing.
896 *
897 * Input:
898 * phdr_table -> program header table
899 * phdr_count -> number of entries in tables
900 * load_bias -> load bias
901 * Return:
902 * pointer to the program interpreter string.
903 */
904const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
905 ElfW(Addr) load_bias) {
906 for (size_t i = 0; i<phdr_count; ++i) {
907 const ElfW(Phdr)& phdr = phdr_table[i];
908 if (phdr.p_type == PT_INTERP) {
909 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
910 }
911 }
912 return nullptr;
913}
914
Robert Grosse4544d9f2014-10-15 14:32:19 -0700915// Sets loaded_phdr_ to the address of the program header table as it appears
916// in the loaded segments in memory. This is in contrast with phdr_table_,
917// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -0800918bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800919 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200920
Elliott Hughes650be4e2013-03-05 18:47:58 -0800921 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800922 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800923 if (phdr->p_type == PT_PHDR) {
924 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200925 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800926 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200927
Elliott Hughes650be4e2013-03-05 18:47:58 -0800928 // Otherwise, check the first loadable segment. If its file offset
929 // is 0, it starts with the ELF header, and we can trivially find the
930 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800931 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800932 if (phdr->p_type == PT_LOAD) {
933 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800934 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800935 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800936 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800937 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800938 }
939 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200940 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800941 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200942
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700943 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800944 return false;
945}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200946
Elliott Hughes650be4e2013-03-05 18:47:58 -0800947// Ensures that our program header is actually within a loadable
948// segment. This should help catch badly-formed ELF files that
949// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800950bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
951 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
952 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700953 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800954 if (phdr->p_type != PT_LOAD) {
955 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200956 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800957 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
958 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800959 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800960 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800961 return true;
962 }
963 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700964 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
965 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800966 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200967}