blob: e81e3256d509e8827072aabd3724624030e53f07 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
39#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080040#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020041
Elliott Hughesb5140262014-12-02 16:16:29 -080042static int GetTargetElfMachine() {
43#if defined(__arm__)
44 return EM_ARM;
45#elif defined(__aarch64__)
46 return EM_AARCH64;
47#elif defined(__i386__)
48 return EM_386;
49#elif defined(__mips__)
50 return EM_MIPS;
51#elif defined(__x86_64__)
52 return EM_X86_64;
53#endif
54}
55
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020056/**
57 TECHNICAL NOTE ON ELF LOADING.
58
59 An ELF file's program header table contains one or more PT_LOAD
60 segments, which corresponds to portions of the file that need to
61 be mapped into the process' address space.
62
63 Each loadable segment has the following important properties:
64
65 p_offset -> segment file offset
66 p_filesz -> segment file size
67 p_memsz -> segment memory size (always >= p_filesz)
68 p_vaddr -> segment's virtual address
69 p_flags -> segment flags (e.g. readable, writable, executable)
70
Elliott Hughes0266ae52014-02-10 17:46:57 -080071 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020072
73 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
74 ranges of virtual addresses. A few rules apply:
75
76 - the virtual address ranges should not overlap.
77
78 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
79 between them should always be initialized to 0.
80
81 - ranges do not necessarily start or end at page boundaries. Two distinct
82 segments can have their start and end on the same page. In this case, the
83 page inherits the mapping flags of the latter segment.
84
85 Finally, the real load addrs of each segment is not p_vaddr. Instead the
86 loader decides where to load the first segment, then will load all others
87 relative to the first one to respect the initial range layout.
88
89 For example, consider the following list:
90
91 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
92 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
93
94 This corresponds to two segments that cover these virtual address ranges:
95
96 0x30000...0x34000
97 0x40000...0x48000
98
99 If the loader decides to load the first segment at address 0xa0000000
100 then the segments' load address ranges will be:
101
102 0xa0030000...0xa0034000
103 0xa0040000...0xa0048000
104
105 In other words, all segments must be loaded at an address that has the same
106 constant offset from their p_vaddr value. This offset is computed as the
107 difference between the first segment's load address, and its p_vaddr value.
108
109 However, in practice, segments do _not_ start at page boundaries. Since we
110 can only memory-map at page boundaries, this means that the bias is
111 computed as:
112
113 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
114
115 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
116 possible wrap around UINT32_MAX for possible large p_vaddr values).
117
118 And that the phdr0_load_address must start at a page boundary, with
119 the segment's real content starting at:
120
121 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
122
123 Note that ELF requires the following condition to make the mmap()-ing work:
124
125 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
126
127 The load_bias must be added to any p_vaddr value read from the ELF file to
128 determine the corresponding memory address.
129
130 **/
131
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800132#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200133#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
134 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
135 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
136
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700137ElfReader::ElfReader()
138 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
139 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800140 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
141 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700142}
143
144bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
145 CHECK(!did_read_);
146 CHECK(!did_load_);
147 name_ = name;
148 fd_ = fd;
149 file_offset_ = file_offset;
150 file_size_ = file_size;
151
152 if (ReadElfHeader() &&
153 VerifyElfHeader() &&
154 ReadProgramHeaders() &&
155 ReadSectionHeaders() &&
156 ReadDynamicSection()) {
157 did_read_ = true;
158 }
159
160 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200161}
162
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000163bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700164 CHECK(did_read_);
165 CHECK(!did_load_);
166 if (ReserveAddressSpace(extinfo) &&
167 LoadSegments() &&
168 FindPhdr()) {
169 did_load_ = true;
170 }
171
172 return did_load_;
173}
174
175const char* ElfReader::get_string(ElfW(Word) index) const {
176 CHECK(strtab_ != nullptr);
177 CHECK(index < strtab_size_);
178
179 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800180}
181
182bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700183 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800184 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700185 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800186 return false;
187 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700188
Elliott Hughes650be4e2013-03-05 18:47:58 -0800189 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700190 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700191 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800192 return false;
193 }
194 return true;
195}
196
197bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700198 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700199 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800200 return false;
201 }
202
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700203 // Try to give a clear diagnostic for ELF class mismatches, since they're
204 // an easy mistake to make during the 32-bit/64-bit transition period.
205 int elf_class = header_.e_ident[EI_CLASS];
206#if defined(__LP64__)
207 if (elf_class != ELFCLASS64) {
208 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700209 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700210 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700211 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700212 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800213 return false;
214 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700215#else
216 if (elf_class != ELFCLASS32) {
217 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700218 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700219 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700220 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700221 }
222 return false;
223 }
224#endif
225
Elliott Hughes650be4e2013-03-05 18:47:58 -0800226 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700227 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800228 return false;
229 }
230
231 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700232 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800233 return false;
234 }
235
236 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700237 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800238 return false;
239 }
240
Elliott Hughesb5140262014-12-02 16:16:29 -0800241 if (header_.e_machine != GetTargetElfMachine()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700242 DL_ERR("\"%s\" has unexpected e_machine: %d", name_.c_str(), header_.e_machine);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800243 return false;
244 }
245
246 return true;
247}
248
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800249bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size) {
250 off64_t range_start;
251 off64_t range_end;
252
253 return safe_add(&range_start, file_offset_, offset) &&
254 safe_add(&range_end, range_start, size) &&
255 range_start < file_size_ &&
256 range_end <= file_size_;
257}
258
Elliott Hughes650be4e2013-03-05 18:47:58 -0800259// Loads the program header table from an ELF file into a read-only private
260// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700261bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800262 phdr_num_ = header_.e_phnum;
263
264 // Like the kernel, we only accept program header tables that
265 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800266 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700267 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800268 return false;
269 }
270
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800271 // Boundary checks
272 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
273 if (!CheckFileRange(header_.e_phoff, size)) {
274 DL_ERR("\"%s\" has invalid phdr offset/size", name_.c_str());
275 return false;
276 }
277
278 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700279 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800280 return false;
281 }
282
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700283 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800284 return true;
285}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200286
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700287bool ElfReader::ReadSectionHeaders() {
288 shdr_num_ = header_.e_shnum;
289
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800290 if (shdr_num_ == 0) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800291 DL_ERR("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800292 return false;
293 }
294
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800295 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
296 if (!CheckFileRange(header_.e_shoff, size)) {
297 DL_ERR("\"%s\" has invalid shdr offset/size", name_.c_str());
298 return false;
299 }
300
301 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700302 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
303 return false;
304 }
305
306 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
307 return true;
308}
309
310bool ElfReader::ReadDynamicSection() {
311 // 1. Find .dynamic section (in section headers)
312 const ElfW(Shdr)* dynamic_shdr = nullptr;
313 for (size_t i = 0; i < shdr_num_; ++i) {
314 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
315 dynamic_shdr = &shdr_table_ [i];
316 break;
317 }
318 }
319
320 if (dynamic_shdr == nullptr) {
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800321 DL_ERR("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700322 return false;
323 }
324
325 if (dynamic_shdr->sh_link >= shdr_num_) {
326 DL_ERR("\"%s\" .dynamic section has invalid sh_link: %d", name_.c_str(), dynamic_shdr->sh_link);
327 return false;
328 }
329
330 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
331
332 if (strtab_shdr->sh_type != SHT_STRTAB) {
333 DL_ERR("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
334 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
335 return false;
336 }
337
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800338 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
339 DL_ERR("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
340 PRINT("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
341 return false;
342 }
343
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700344 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
345 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
346 return false;
347 }
348
349 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
350
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800351 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
352 DL_ERR("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
353 name_.c_str());
354 return false;
355 }
356
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700357 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
358 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
359 return false;
360 }
361
362 strtab_ = static_cast<const char*>(strtab_fragment_.data());
363 strtab_size_ = strtab_fragment_.size();
364 return true;
365}
366
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800367/* Returns the size of the extent of all the possibly non-contiguous
368 * loadable segments in an ELF program header table. This corresponds
369 * to the page-aligned size in bytes that needs to be reserved in the
370 * process' address space. If there are no loadable segments, 0 is
371 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200372 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700373 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800374 * set to the minimum and maximum addresses of pages to be reserved,
375 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200376 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800377size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
378 ElfW(Addr)* out_min_vaddr,
379 ElfW(Addr)* out_max_vaddr) {
380 ElfW(Addr) min_vaddr = UINTPTR_MAX;
381 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200382
Elliott Hughes0266ae52014-02-10 17:46:57 -0800383 bool found_pt_load = false;
384 for (size_t i = 0; i < phdr_count; ++i) {
385 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200386
Elliott Hughes0266ae52014-02-10 17:46:57 -0800387 if (phdr->p_type != PT_LOAD) {
388 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200389 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800390 found_pt_load = true;
391
392 if (phdr->p_vaddr < min_vaddr) {
393 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200394 }
395
Elliott Hughes0266ae52014-02-10 17:46:57 -0800396 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
397 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
398 }
399 }
400 if (!found_pt_load) {
401 min_vaddr = 0;
402 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200403
Elliott Hughes0266ae52014-02-10 17:46:57 -0800404 min_vaddr = PAGE_START(min_vaddr);
405 max_vaddr = PAGE_END(max_vaddr);
406
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700407 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800408 *out_min_vaddr = min_vaddr;
409 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700410 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800411 *out_max_vaddr = max_vaddr;
412 }
413 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200414}
415
Elliott Hughes650be4e2013-03-05 18:47:58 -0800416// Reserve a virtual address range big enough to hold all loadable
417// segments of a program header table. This is done by creating a
418// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000419bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800420 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800421 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800422 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700423 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800424 return false;
425 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200426
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800427 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000428 void* start;
429 size_t reserved_size = 0;
430 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700431 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700432 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700433 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000434
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700435 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000436 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
437 reserved_size = extinfo->reserved_size;
438 reserved_hint = false;
439 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
440 reserved_size = extinfo->reserved_size;
441 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700442
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700443 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700444 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700445 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
446 mmap_hint = extinfo->reserved_addr;
447 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700448 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000449 }
450
451 if (load_size_ > reserved_size) {
452 if (!reserved_hint) {
453 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700454 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000455 return false;
456 }
457 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700458 start = mmap(mmap_hint, load_size_, PROT_NONE, mmap_flags, -1, 0);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000459 if (start == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700460 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000461 return false;
462 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700463 if (strict_hint && (start != mmap_hint)) {
464 munmap(start, load_size_);
465 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
466 load_size_, mmap_hint, name_.c_str());
467 return false;
468 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000469 } else {
470 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800471 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800472 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200473
Elliott Hughes650be4e2013-03-05 18:47:58 -0800474 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800475 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800476 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200477}
478
Elliott Hughes650be4e2013-03-05 18:47:58 -0800479bool ElfReader::LoadSegments() {
480 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800481 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200482
Elliott Hughes650be4e2013-03-05 18:47:58 -0800483 if (phdr->p_type != PT_LOAD) {
484 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200485 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800486
487 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800488 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
489 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800490
Elliott Hughes0266ae52014-02-10 17:46:57 -0800491 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
492 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800493
Elliott Hughes0266ae52014-02-10 17:46:57 -0800494 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800495
496 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800497 ElfW(Addr) file_start = phdr->p_offset;
498 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800499
Elliott Hughes0266ae52014-02-10 17:46:57 -0800500 ElfW(Addr) file_page_start = PAGE_START(file_start);
501 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800502
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700503 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700504 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700505 return false;
506 }
507
skvalex93ce3542015-08-20 01:06:42 +0300508 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700509 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
510 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700511 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700512 reinterpret_cast<void*>(phdr->p_filesz),
513 reinterpret_cast<void*>(file_end), file_size_);
514 return false;
515 }
516
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700517 if (file_length != 0) {
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700518 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700519 file_length,
520 PFLAGS_TO_PROT(phdr->p_flags),
521 MAP_FIXED|MAP_PRIVATE,
522 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700523 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700524 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700525 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700526 return false;
527 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800528 }
529
530 // if the segment is writable, and does not end on a page boundary,
531 // zero-fill it until the page limit.
532 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800533 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800534 }
535
536 seg_file_end = PAGE_END(seg_file_end);
537
538 // seg_file_end is now the first page address after the file
539 // content. If seg_end is larger, we need to zero anything
540 // between them. This is done by using a private anonymous
541 // map for all extra pages.
542 if (seg_page_end > seg_file_end) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800543 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Elliott Hughes650be4e2013-03-05 18:47:58 -0800544 seg_page_end - seg_file_end,
545 PFLAGS_TO_PROT(phdr->p_flags),
546 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
547 -1,
548 0);
549 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700550 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800551 return false;
552 }
553 }
554 }
555 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200556}
557
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000558/* Used internally. Used to set the protection bits of all loaded segments
559 * with optional extra flags (i.e. really PROT_WRITE). Used by
560 * phdr_table_protect_segments and phdr_table_unprotect_segments.
561 */
562static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
563 ElfW(Addr) load_bias, int extra_prot_flags) {
564 const ElfW(Phdr)* phdr = phdr_table;
565 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
566
567 for (; phdr < phdr_limit; phdr++) {
568 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
569 continue;
570 }
571
572 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
573 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
574
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700575 int prot = PFLAGS_TO_PROT(phdr->p_flags);
576 if ((extra_prot_flags & PROT_WRITE) != 0) {
577 // make sure we're never simultaneously writable / executable
578 prot &= ~PROT_EXEC;
579 }
580
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000581 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
582 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700583 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000584 if (ret < 0) {
585 return -1;
586 }
587 }
588 return 0;
589}
590
591/* Restore the original protection modes for all loadable segments.
592 * You should only call this after phdr_table_unprotect_segments and
593 * applying all relocations.
594 *
595 * Input:
596 * phdr_table -> program header table
597 * phdr_count -> number of entries in tables
598 * load_bias -> load bias
599 * Return:
600 * 0 on error, -1 on failure (error code in errno).
601 */
602int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
603 size_t phdr_count, ElfW(Addr) load_bias) {
604 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
605}
606
607/* Change the protection of all loaded segments in memory to writable.
608 * This is useful before performing relocations. Once completed, you
609 * will have to call phdr_table_protect_segments to restore the original
610 * protection flags on all segments.
611 *
612 * Note that some writable segments can also have their content turned
613 * to read-only by calling phdr_table_protect_gnu_relro. This is no
614 * performed here.
615 *
616 * Input:
617 * phdr_table -> program header table
618 * phdr_count -> number of entries in tables
619 * load_bias -> load bias
620 * Return:
621 * 0 on error, -1 on failure (error code in errno).
622 */
623int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
624 size_t phdr_count, ElfW(Addr) load_bias) {
625 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
626}
627
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200628/* Used internally by phdr_table_protect_gnu_relro and
629 * phdr_table_unprotect_gnu_relro.
630 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800631static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
632 ElfW(Addr) load_bias, int prot_flags) {
633 const ElfW(Phdr)* phdr = phdr_table;
634 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200635
Elliott Hughes0266ae52014-02-10 17:46:57 -0800636 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
637 if (phdr->p_type != PT_GNU_RELRO) {
638 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200639 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800640
641 // Tricky: what happens when the relro segment does not start
642 // or end at page boundaries? We're going to be over-protective
643 // here and put every page touched by the segment as read-only.
644
645 // This seems to match Ian Lance Taylor's description of the
646 // feature at http://www.airs.com/blog/archives/189.
647
648 // Extract:
649 // Note that the current dynamic linker code will only work
650 // correctly if the PT_GNU_RELRO segment starts on a page
651 // boundary. This is because the dynamic linker rounds the
652 // p_vaddr field down to the previous page boundary. If
653 // there is anything on the page which should not be read-only,
654 // the program is likely to fail at runtime. So in effect the
655 // linker must only emit a PT_GNU_RELRO segment if it ensures
656 // that it starts on a page boundary.
657 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
658 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
659
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800660 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800661 seg_page_end - seg_page_start,
662 prot_flags);
663 if (ret < 0) {
664 return -1;
665 }
666 }
667 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200668}
669
670/* Apply GNU relro protection if specified by the program header. This will
671 * turn some of the pages of a writable PT_LOAD segment to read-only, as
672 * specified by one or more PT_GNU_RELRO segments. This must be always
673 * performed after relocations.
674 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200675 * The areas typically covered are .got and .data.rel.ro, these are
676 * read-only from the program's POV, but contain absolute addresses
677 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200678 *
679 * Input:
680 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700681 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200682 * load_bias -> load bias
683 * Return:
684 * 0 on error, -1 on failure (error code in errno).
685 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700686int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
687 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800688 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200689}
690
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000691/* Serialize the GNU relro segments to the given file descriptor. This can be
692 * performed after relocations to allow another process to later share the
693 * relocated segment, if it was loaded at the same address.
694 *
695 * Input:
696 * phdr_table -> program header table
697 * phdr_count -> number of entries in tables
698 * load_bias -> load bias
699 * fd -> writable file descriptor to use
700 * Return:
701 * 0 on error, -1 on failure (error code in errno).
702 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700703int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
704 size_t phdr_count,
705 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000706 int fd) {
707 const ElfW(Phdr)* phdr = phdr_table;
708 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
709 ssize_t file_offset = 0;
710
711 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
712 if (phdr->p_type != PT_GNU_RELRO) {
713 continue;
714 }
715
716 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
717 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
718 ssize_t size = seg_page_end - seg_page_start;
719
720 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
721 if (written != size) {
722 return -1;
723 }
724 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
725 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
726 if (map == MAP_FAILED) {
727 return -1;
728 }
729 file_offset += size;
730 }
731 return 0;
732}
733
734/* Where possible, replace the GNU relro segments with mappings of the given
735 * file descriptor. This can be performed after relocations to allow a file
736 * previously created by phdr_table_serialize_gnu_relro in another process to
737 * replace the dirty relocated pages, saving memory, if it was loaded at the
738 * same address. We have to compare the data before we map over it, since some
739 * parts of the relro segment may not be identical due to other libraries in
740 * the process being loaded at different addresses.
741 *
742 * Input:
743 * phdr_table -> program header table
744 * phdr_count -> number of entries in tables
745 * load_bias -> load bias
746 * fd -> readable file descriptor to use
747 * Return:
748 * 0 on error, -1 on failure (error code in errno).
749 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700750int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
751 size_t phdr_count,
752 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000753 int fd) {
754 // Map the file at a temporary location so we can compare its contents.
755 struct stat file_stat;
756 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
757 return -1;
758 }
759 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700760 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100761 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700762 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100763 if (temp_mapping == MAP_FAILED) {
764 return -1;
765 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000766 }
767 size_t file_offset = 0;
768
769 // Iterate over the relro segments and compare/remap the pages.
770 const ElfW(Phdr)* phdr = phdr_table;
771 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
772
773 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
774 if (phdr->p_type != PT_GNU_RELRO) {
775 continue;
776 }
777
778 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
779 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
780
781 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
782 char* mem_base = reinterpret_cast<char*>(seg_page_start);
783 size_t match_offset = 0;
784 size_t size = seg_page_end - seg_page_start;
785
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100786 if (file_size - file_offset < size) {
787 // File is too short to compare to this segment. The contents are likely
788 // different as well (it's probably for a different library version) so
789 // just don't bother checking.
790 break;
791 }
792
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000793 while (match_offset < size) {
794 // Skip over dissimilar pages.
795 while (match_offset < size &&
796 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
797 match_offset += PAGE_SIZE;
798 }
799
800 // Count similar pages.
801 size_t mismatch_offset = match_offset;
802 while (mismatch_offset < size &&
803 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
804 mismatch_offset += PAGE_SIZE;
805 }
806
807 // Map over similar pages.
808 if (mismatch_offset > match_offset) {
809 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
810 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
811 if (map == MAP_FAILED) {
812 munmap(temp_mapping, file_size);
813 return -1;
814 }
815 }
816
817 match_offset = mismatch_offset;
818 }
819
820 // Add to the base file offset in case there are multiple relro segments.
821 file_offset += size;
822 }
823 munmap(temp_mapping, file_size);
824 return 0;
825}
826
827
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700828#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200829
830# ifndef PT_ARM_EXIDX
831# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
832# endif
833
834/* Return the address and size of the .ARM.exidx section in memory,
835 * if present.
836 *
837 * Input:
838 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700839 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200840 * load_bias -> load bias
841 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700842 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200843 * arm_exidx_count -> number of items in table (0 on failure).
844 * Return:
845 * 0 on error, -1 on failure (_no_ error code in errno)
846 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800847int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
848 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800849 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800850 const ElfW(Phdr)* phdr = phdr_table;
851 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200852
Elliott Hughes0266ae52014-02-10 17:46:57 -0800853 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
854 if (phdr->p_type != PT_ARM_EXIDX) {
855 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200856 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800857
858 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800859 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800860 return 0;
861 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700862 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800863 *arm_exidx_count = 0;
864 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200865}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700866#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200867
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200868/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700869 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200870 *
871 * Input:
872 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700873 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200874 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200875 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700876 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +0800877 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200878 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200879 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200880 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800881void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +0800882 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
883 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -0700884 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700885 for (size_t i = 0; i<phdr_count; ++i) {
886 const ElfW(Phdr)& phdr = phdr_table[i];
887 if (phdr.p_type == PT_DYNAMIC) {
888 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +0800889 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700890 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +0800891 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -0700892 return;
893 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800894 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200895}
896
Evgenii Stepanovd640b222015-07-10 17:54:01 -0700897/* Return the program interpreter string, or nullptr if missing.
898 *
899 * Input:
900 * phdr_table -> program header table
901 * phdr_count -> number of entries in tables
902 * load_bias -> load bias
903 * Return:
904 * pointer to the program interpreter string.
905 */
906const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
907 ElfW(Addr) load_bias) {
908 for (size_t i = 0; i<phdr_count; ++i) {
909 const ElfW(Phdr)& phdr = phdr_table[i];
910 if (phdr.p_type == PT_INTERP) {
911 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
912 }
913 }
914 return nullptr;
915}
916
Robert Grosse4544d9f2014-10-15 14:32:19 -0700917// Sets loaded_phdr_ to the address of the program header table as it appears
918// in the loaded segments in memory. This is in contrast with phdr_table_,
919// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -0800920bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800921 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200922
Elliott Hughes650be4e2013-03-05 18:47:58 -0800923 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800924 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800925 if (phdr->p_type == PT_PHDR) {
926 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200927 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800928 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200929
Elliott Hughes650be4e2013-03-05 18:47:58 -0800930 // Otherwise, check the first loadable segment. If its file offset
931 // is 0, it starts with the ELF header, and we can trivially find the
932 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800933 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800934 if (phdr->p_type == PT_LOAD) {
935 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800936 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800937 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800938 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800939 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800940 }
941 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200942 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800943 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200944
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700945 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800946 return false;
947}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200948
Elliott Hughes650be4e2013-03-05 18:47:58 -0800949// Ensures that our program header is actually within a loadable
950// segment. This should help catch badly-formed ELF files that
951// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800952bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
953 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
954 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700955 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800956 if (phdr->p_type != PT_LOAD) {
957 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200958 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800959 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
960 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800961 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800962 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800963 return true;
964 }
965 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700966 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
967 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800968 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200969}