blob: 5b0ee491a492e3949d34b970571f6fffa1c89607 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
Elliott Hughes4cc5a602016-11-15 16:54:16 -080039#include "linker_dlwarning.h"
Dimitry Ivanov48ec2882016-08-04 11:50:36 -070040#include "linker_globals.h"
Elliott Hughes650be4e2013-03-05 18:47:58 -080041#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080042#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020043
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080044#include "private/bionic_prctl.h"
45
Elliott Hughesb5140262014-12-02 16:16:29 -080046static int GetTargetElfMachine() {
47#if defined(__arm__)
48 return EM_ARM;
49#elif defined(__aarch64__)
50 return EM_AARCH64;
51#elif defined(__i386__)
52 return EM_386;
53#elif defined(__mips__)
54 return EM_MIPS;
55#elif defined(__x86_64__)
56 return EM_X86_64;
57#endif
58}
59
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020060/**
61 TECHNICAL NOTE ON ELF LOADING.
62
63 An ELF file's program header table contains one or more PT_LOAD
64 segments, which corresponds to portions of the file that need to
65 be mapped into the process' address space.
66
67 Each loadable segment has the following important properties:
68
69 p_offset -> segment file offset
70 p_filesz -> segment file size
71 p_memsz -> segment memory size (always >= p_filesz)
72 p_vaddr -> segment's virtual address
73 p_flags -> segment flags (e.g. readable, writable, executable)
74
Elliott Hughes0266ae52014-02-10 17:46:57 -080075 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020076
77 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
78 ranges of virtual addresses. A few rules apply:
79
80 - the virtual address ranges should not overlap.
81
82 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
83 between them should always be initialized to 0.
84
85 - ranges do not necessarily start or end at page boundaries. Two distinct
86 segments can have their start and end on the same page. In this case, the
87 page inherits the mapping flags of the latter segment.
88
89 Finally, the real load addrs of each segment is not p_vaddr. Instead the
90 loader decides where to load the first segment, then will load all others
91 relative to the first one to respect the initial range layout.
92
93 For example, consider the following list:
94
95 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
96 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
97
98 This corresponds to two segments that cover these virtual address ranges:
99
100 0x30000...0x34000
101 0x40000...0x48000
102
103 If the loader decides to load the first segment at address 0xa0000000
104 then the segments' load address ranges will be:
105
106 0xa0030000...0xa0034000
107 0xa0040000...0xa0048000
108
109 In other words, all segments must be loaded at an address that has the same
110 constant offset from their p_vaddr value. This offset is computed as the
111 difference between the first segment's load address, and its p_vaddr value.
112
113 However, in practice, segments do _not_ start at page boundaries. Since we
114 can only memory-map at page boundaries, this means that the bias is
115 computed as:
116
117 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
118
119 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
120 possible wrap around UINT32_MAX for possible large p_vaddr values).
121
122 And that the phdr0_load_address must start at a page boundary, with
123 the segment's real content starting at:
124
125 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
126
127 Note that ELF requires the following condition to make the mmap()-ing work:
128
129 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
130
131 The load_bias must be added to any p_vaddr value read from the ELF file to
132 determine the corresponding memory address.
133
134 **/
135
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800136#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200137#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
138 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
139 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
140
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700141ElfReader::ElfReader()
142 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
143 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800144 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
145 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700146}
147
148bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
149 CHECK(!did_read_);
150 CHECK(!did_load_);
151 name_ = name;
152 fd_ = fd;
153 file_offset_ = file_offset;
154 file_size_ = file_size;
155
156 if (ReadElfHeader() &&
157 VerifyElfHeader() &&
158 ReadProgramHeaders() &&
159 ReadSectionHeaders() &&
160 ReadDynamicSection()) {
161 did_read_ = true;
162 }
163
164 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200165}
166
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000167bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700168 CHECK(did_read_);
169 CHECK(!did_load_);
170 if (ReserveAddressSpace(extinfo) &&
171 LoadSegments() &&
172 FindPhdr()) {
173 did_load_ = true;
174 }
175
176 return did_load_;
177}
178
179const char* ElfReader::get_string(ElfW(Word) index) const {
180 CHECK(strtab_ != nullptr);
181 CHECK(index < strtab_size_);
182
183 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800184}
185
186bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700187 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800188 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700189 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800190 return false;
191 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700192
Elliott Hughes650be4e2013-03-05 18:47:58 -0800193 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700194 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700195 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800196 return false;
197 }
198 return true;
199}
200
201bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700202 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700203 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800204 return false;
205 }
206
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700207 // Try to give a clear diagnostic for ELF class mismatches, since they're
208 // an easy mistake to make during the 32-bit/64-bit transition period.
209 int elf_class = header_.e_ident[EI_CLASS];
210#if defined(__LP64__)
211 if (elf_class != ELFCLASS64) {
212 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700213 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700214 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700215 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700216 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800217 return false;
218 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700219#else
220 if (elf_class != ELFCLASS32) {
221 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700222 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700223 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700224 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700225 }
226 return false;
227 }
228#endif
229
Elliott Hughes650be4e2013-03-05 18:47:58 -0800230 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700231 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800232 return false;
233 }
234
235 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700236 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800237 return false;
238 }
239
240 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700241 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800242 return false;
243 }
244
Elliott Hughesb5140262014-12-02 16:16:29 -0800245 if (header_.e_machine != GetTargetElfMachine()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700246 DL_ERR("\"%s\" has unexpected e_machine: %d", name_.c_str(), header_.e_machine);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800247 return false;
248 }
249
Dimitry Ivanovc73eec12016-07-22 12:50:59 -0700250 if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
251 DL_ERR("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
252 name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
253 return false;
254 }
255
256 if (header_.e_shstrndx == 0) {
257 DL_ERR("\"%s\" has invalid e_shstrndx", name_.c_str());
258 return false;
259 }
260
Elliott Hughes650be4e2013-03-05 18:47:58 -0800261 return true;
262}
263
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700264bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800265 off64_t range_start;
266 off64_t range_end;
267
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700268 // Only header can be located at the 0 offset... This function called to
269 // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
Dimitry Ivanovebe5af72016-07-14 11:15:44 -0700270 // at offset 0.
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700271
272 return offset > 0 &&
273 safe_add(&range_start, file_offset_, offset) &&
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800274 safe_add(&range_end, range_start, size) &&
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700275 (range_start < file_size_) &&
276 (range_end <= file_size_) &&
277 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800278}
279
Elliott Hughes650be4e2013-03-05 18:47:58 -0800280// Loads the program header table from an ELF file into a read-only private
281// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700282bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800283 phdr_num_ = header_.e_phnum;
284
285 // Like the kernel, we only accept program header tables that
286 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800287 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700288 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800289 return false;
290 }
291
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800292 // Boundary checks
293 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700294 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
295 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
296 name_.c_str(),
297 static_cast<size_t>(header_.e_phoff),
298 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800299 return false;
300 }
301
302 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700303 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800304 return false;
305 }
306
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700307 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800308 return true;
309}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200310
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700311bool ElfReader::ReadSectionHeaders() {
312 shdr_num_ = header_.e_shnum;
313
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800314 if (shdr_num_ == 0) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700315 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800316 return false;
317 }
318
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800319 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700320 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
321 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
322 name_.c_str(),
323 static_cast<size_t>(header_.e_shoff),
324 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800325 return false;
326 }
327
328 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700329 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
330 return false;
331 }
332
333 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
334 return true;
335}
336
337bool ElfReader::ReadDynamicSection() {
338 // 1. Find .dynamic section (in section headers)
339 const ElfW(Shdr)* dynamic_shdr = nullptr;
340 for (size_t i = 0; i < shdr_num_; ++i) {
341 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
342 dynamic_shdr = &shdr_table_ [i];
343 break;
344 }
345 }
346
347 if (dynamic_shdr == nullptr) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700348 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700349 return false;
350 }
351
Dimitry Ivanov0c9d30f2016-07-13 17:06:36 -0700352 // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
353 size_t pt_dynamic_offset = 0;
354 size_t pt_dynamic_filesz = 0;
355 for (size_t i = 0; i < phdr_num_; ++i) {
356 const ElfW(Phdr)* phdr = &phdr_table_[i];
357 if (phdr->p_type == PT_DYNAMIC) {
358 pt_dynamic_offset = phdr->p_offset;
359 pt_dynamic_filesz = phdr->p_filesz;
360 }
361 }
362
363 if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
364 DL_ERR("\"%s\" .dynamic section has invalid offset: 0x%zx, "
365 "expected to match PT_DYNAMIC offset: 0x%zx",
366 name_.c_str(),
367 static_cast<size_t>(dynamic_shdr->sh_offset),
368 pt_dynamic_offset);
369 return false;
370 }
371
372 if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
373 DL_ERR("\"%s\" .dynamic section has invalid size: 0x%zx, "
374 "expected to match PT_DYNAMIC filesz: 0x%zx",
375 name_.c_str(),
376 static_cast<size_t>(dynamic_shdr->sh_size),
377 pt_dynamic_filesz);
378 return false;
379 }
380
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700381 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700382 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
383 name_.c_str(),
384 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700385 return false;
386 }
387
388 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
389
390 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700391 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
392 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700393 return false;
394 }
395
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700396 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
397 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800398 return false;
399 }
400
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700401 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
402 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
403 return false;
404 }
405
406 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
407
Dimitry Ivanovbd906752016-08-08 17:12:18 -0700408 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
409 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
410 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800411 return false;
412 }
413
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700414 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
415 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
416 return false;
417 }
418
419 strtab_ = static_cast<const char*>(strtab_fragment_.data());
420 strtab_size_ = strtab_fragment_.size();
421 return true;
422}
423
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800424/* Returns the size of the extent of all the possibly non-contiguous
425 * loadable segments in an ELF program header table. This corresponds
426 * to the page-aligned size in bytes that needs to be reserved in the
427 * process' address space. If there are no loadable segments, 0 is
428 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200429 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700430 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800431 * set to the minimum and maximum addresses of pages to be reserved,
432 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200433 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800434size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
435 ElfW(Addr)* out_min_vaddr,
436 ElfW(Addr)* out_max_vaddr) {
437 ElfW(Addr) min_vaddr = UINTPTR_MAX;
438 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200439
Elliott Hughes0266ae52014-02-10 17:46:57 -0800440 bool found_pt_load = false;
441 for (size_t i = 0; i < phdr_count; ++i) {
442 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200443
Elliott Hughes0266ae52014-02-10 17:46:57 -0800444 if (phdr->p_type != PT_LOAD) {
445 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200446 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800447 found_pt_load = true;
448
449 if (phdr->p_vaddr < min_vaddr) {
450 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200451 }
452
Elliott Hughes0266ae52014-02-10 17:46:57 -0800453 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
454 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
455 }
456 }
457 if (!found_pt_load) {
458 min_vaddr = 0;
459 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200460
Elliott Hughes0266ae52014-02-10 17:46:57 -0800461 min_vaddr = PAGE_START(min_vaddr);
462 max_vaddr = PAGE_END(max_vaddr);
463
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700464 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800465 *out_min_vaddr = min_vaddr;
466 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700467 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800468 *out_max_vaddr = max_vaddr;
469 }
470 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200471}
472
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700473// Reserve a virtual address range such that if it's limits were extended to the next 2**align
474// boundary, it would not overlap with any existing mappings.
475static void* ReserveAligned(void* hint, size_t size, size_t align) {
476 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
477 // Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
478 // with it.
479 // FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
480 // mapping at the requested address?
481 if (align == PAGE_SIZE || hint != nullptr) {
482 void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
483 if (mmap_ptr == MAP_FAILED) {
484 return nullptr;
485 }
486 return mmap_ptr;
487 }
488
489 // Allocate enough space so that the end of the desired region aligned up is still inside the
490 // mapping.
491 size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
492 uint8_t* mmap_ptr =
493 reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
494 if (mmap_ptr == MAP_FAILED) {
495 return nullptr;
496 }
497
498 uint8_t* first = align_up(mmap_ptr, align);
499 uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
500 size_t n = arc4random_uniform((last - first) / PAGE_SIZE + 1);
501 uint8_t* start = first + n * PAGE_SIZE;
502 munmap(mmap_ptr, start - mmap_ptr);
503 munmap(start + size, mmap_ptr + mmap_size - (start + size));
504 return start;
505}
506
Elliott Hughes650be4e2013-03-05 18:47:58 -0800507// Reserve a virtual address range big enough to hold all loadable
508// segments of a program header table. This is done by creating a
509// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000510bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800511 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800512 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800513 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700514 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800515 return false;
516 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200517
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800518 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000519 void* start;
520 size_t reserved_size = 0;
521 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700522 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700523 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700524 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000525
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700526 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000527 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
528 reserved_size = extinfo->reserved_size;
529 reserved_hint = false;
530 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
531 reserved_size = extinfo->reserved_size;
532 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700533
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700534 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700535 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700536 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
537 mmap_hint = extinfo->reserved_addr;
538 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700539 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000540 }
541
542 if (load_size_ > reserved_size) {
543 if (!reserved_hint) {
544 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700545 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000546 return false;
547 }
Evgenii Stepanovd13e9a62016-07-15 16:31:42 -0700548 start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
549 if (start == nullptr) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700550 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000551 return false;
552 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700553 if (strict_hint && (start != mmap_hint)) {
554 munmap(start, load_size_);
555 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
556 load_size_, mmap_hint, name_.c_str());
557 return false;
558 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000559 } else {
560 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800561 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800562 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200563
Elliott Hughes650be4e2013-03-05 18:47:58 -0800564 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800565 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800566 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200567}
568
Elliott Hughes650be4e2013-03-05 18:47:58 -0800569bool ElfReader::LoadSegments() {
570 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800571 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200572
Elliott Hughes650be4e2013-03-05 18:47:58 -0800573 if (phdr->p_type != PT_LOAD) {
574 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200575 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800576
577 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800578 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
579 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800580
Elliott Hughes0266ae52014-02-10 17:46:57 -0800581 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
582 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800583
Elliott Hughes0266ae52014-02-10 17:46:57 -0800584 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800585
586 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800587 ElfW(Addr) file_start = phdr->p_offset;
588 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800589
Elliott Hughes0266ae52014-02-10 17:46:57 -0800590 ElfW(Addr) file_page_start = PAGE_START(file_start);
591 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800592
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700593 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700594 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700595 return false;
596 }
597
skvalex93ce3542015-08-20 01:06:42 +0300598 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700599 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
600 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700601 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700602 reinterpret_cast<void*>(phdr->p_filesz),
603 reinterpret_cast<void*>(file_end), file_size_);
604 return false;
605 }
606
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700607 if (file_length != 0) {
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700608 int prot = PFLAGS_TO_PROT(phdr->p_flags);
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700609 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
Elliott Hughes4cc5a602016-11-15 16:54:16 -0800610 // W + E PT_LOAD segments are not allowed in O.
611 if (get_application_target_sdk_version() > 25) {
612 DL_ERR_AND_LOG("\"%s\": W + E load segments are not allowed", name_.c_str());
613 return false;
614 }
615 DL_WARN("\"%s\": W + E load segments are not allowed", name_.c_str());
616 add_dlwarning(name_.c_str(), "W+E load segments");
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700617 }
618
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700619 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700620 file_length,
Dimitry Ivanov9700bab2016-08-10 18:54:06 -0700621 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700622 MAP_FIXED|MAP_PRIVATE,
623 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700624 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700625 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700626 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700627 return false;
628 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800629 }
630
631 // if the segment is writable, and does not end on a page boundary,
632 // zero-fill it until the page limit.
633 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800634 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800635 }
636
637 seg_file_end = PAGE_END(seg_file_end);
638
639 // seg_file_end is now the first page address after the file
640 // content. If seg_end is larger, we need to zero anything
641 // between them. This is done by using a private anonymous
642 // map for all extra pages.
643 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800644 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800645 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800646 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800647 PFLAGS_TO_PROT(phdr->p_flags),
648 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
649 -1,
650 0);
651 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700652 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800653 return false;
654 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800655
656 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800657 }
658 }
659 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200660}
661
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000662/* Used internally. Used to set the protection bits of all loaded segments
663 * with optional extra flags (i.e. really PROT_WRITE). Used by
664 * phdr_table_protect_segments and phdr_table_unprotect_segments.
665 */
666static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
667 ElfW(Addr) load_bias, int extra_prot_flags) {
668 const ElfW(Phdr)* phdr = phdr_table;
669 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
670
671 for (; phdr < phdr_limit; phdr++) {
672 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
673 continue;
674 }
675
676 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
677 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
678
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700679 int prot = PFLAGS_TO_PROT(phdr->p_flags);
680 if ((extra_prot_flags & PROT_WRITE) != 0) {
681 // make sure we're never simultaneously writable / executable
682 prot &= ~PROT_EXEC;
683 }
684
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000685 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
686 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700687 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000688 if (ret < 0) {
689 return -1;
690 }
691 }
692 return 0;
693}
694
695/* Restore the original protection modes for all loadable segments.
696 * You should only call this after phdr_table_unprotect_segments and
697 * applying all relocations.
698 *
699 * Input:
700 * phdr_table -> program header table
701 * phdr_count -> number of entries in tables
702 * load_bias -> load bias
703 * Return:
704 * 0 on error, -1 on failure (error code in errno).
705 */
706int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
707 size_t phdr_count, ElfW(Addr) load_bias) {
708 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
709}
710
711/* Change the protection of all loaded segments in memory to writable.
712 * This is useful before performing relocations. Once completed, you
713 * will have to call phdr_table_protect_segments to restore the original
714 * protection flags on all segments.
715 *
716 * Note that some writable segments can also have their content turned
717 * to read-only by calling phdr_table_protect_gnu_relro. This is no
718 * performed here.
719 *
720 * Input:
721 * phdr_table -> program header table
722 * phdr_count -> number of entries in tables
723 * load_bias -> load bias
724 * Return:
725 * 0 on error, -1 on failure (error code in errno).
726 */
727int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
728 size_t phdr_count, ElfW(Addr) load_bias) {
729 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
730}
731
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200732/* Used internally by phdr_table_protect_gnu_relro and
733 * phdr_table_unprotect_gnu_relro.
734 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800735static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
736 ElfW(Addr) load_bias, int prot_flags) {
737 const ElfW(Phdr)* phdr = phdr_table;
738 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200739
Elliott Hughes0266ae52014-02-10 17:46:57 -0800740 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
741 if (phdr->p_type != PT_GNU_RELRO) {
742 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200743 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800744
745 // Tricky: what happens when the relro segment does not start
746 // or end at page boundaries? We're going to be over-protective
747 // here and put every page touched by the segment as read-only.
748
749 // This seems to match Ian Lance Taylor's description of the
750 // feature at http://www.airs.com/blog/archives/189.
751
752 // Extract:
753 // Note that the current dynamic linker code will only work
754 // correctly if the PT_GNU_RELRO segment starts on a page
755 // boundary. This is because the dynamic linker rounds the
756 // p_vaddr field down to the previous page boundary. If
757 // there is anything on the page which should not be read-only,
758 // the program is likely to fail at runtime. So in effect the
759 // linker must only emit a PT_GNU_RELRO segment if it ensures
760 // that it starts on a page boundary.
761 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
762 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
763
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800764 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800765 seg_page_end - seg_page_start,
766 prot_flags);
767 if (ret < 0) {
768 return -1;
769 }
770 }
771 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200772}
773
774/* Apply GNU relro protection if specified by the program header. This will
775 * turn some of the pages of a writable PT_LOAD segment to read-only, as
776 * specified by one or more PT_GNU_RELRO segments. This must be always
777 * performed after relocations.
778 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200779 * The areas typically covered are .got and .data.rel.ro, these are
780 * read-only from the program's POV, but contain absolute addresses
781 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200782 *
783 * Input:
784 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700785 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200786 * load_bias -> load bias
787 * Return:
788 * 0 on error, -1 on failure (error code in errno).
789 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700790int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
791 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800792 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200793}
794
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000795/* Serialize the GNU relro segments to the given file descriptor. This can be
796 * performed after relocations to allow another process to later share the
797 * relocated segment, if it was loaded at the same address.
798 *
799 * Input:
800 * phdr_table -> program header table
801 * phdr_count -> number of entries in tables
802 * load_bias -> load bias
803 * fd -> writable file descriptor to use
804 * Return:
805 * 0 on error, -1 on failure (error code in errno).
806 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700807int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
808 size_t phdr_count,
809 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000810 int fd) {
811 const ElfW(Phdr)* phdr = phdr_table;
812 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
813 ssize_t file_offset = 0;
814
815 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
816 if (phdr->p_type != PT_GNU_RELRO) {
817 continue;
818 }
819
820 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
821 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
822 ssize_t size = seg_page_end - seg_page_start;
823
824 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
825 if (written != size) {
826 return -1;
827 }
828 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
829 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
830 if (map == MAP_FAILED) {
831 return -1;
832 }
833 file_offset += size;
834 }
835 return 0;
836}
837
838/* Where possible, replace the GNU relro segments with mappings of the given
839 * file descriptor. This can be performed after relocations to allow a file
840 * previously created by phdr_table_serialize_gnu_relro in another process to
841 * replace the dirty relocated pages, saving memory, if it was loaded at the
842 * same address. We have to compare the data before we map over it, since some
843 * parts of the relro segment may not be identical due to other libraries in
844 * the process being loaded at different addresses.
845 *
846 * Input:
847 * phdr_table -> program header table
848 * phdr_count -> number of entries in tables
849 * load_bias -> load bias
850 * fd -> readable file descriptor to use
851 * Return:
852 * 0 on error, -1 on failure (error code in errno).
853 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700854int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
855 size_t phdr_count,
856 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000857 int fd) {
858 // Map the file at a temporary location so we can compare its contents.
859 struct stat file_stat;
860 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
861 return -1;
862 }
863 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700864 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100865 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700866 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100867 if (temp_mapping == MAP_FAILED) {
868 return -1;
869 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000870 }
871 size_t file_offset = 0;
872
873 // Iterate over the relro segments and compare/remap the pages.
874 const ElfW(Phdr)* phdr = phdr_table;
875 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
876
877 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
878 if (phdr->p_type != PT_GNU_RELRO) {
879 continue;
880 }
881
882 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
883 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
884
885 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
886 char* mem_base = reinterpret_cast<char*>(seg_page_start);
887 size_t match_offset = 0;
888 size_t size = seg_page_end - seg_page_start;
889
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100890 if (file_size - file_offset < size) {
891 // File is too short to compare to this segment. The contents are likely
892 // different as well (it's probably for a different library version) so
893 // just don't bother checking.
894 break;
895 }
896
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000897 while (match_offset < size) {
898 // Skip over dissimilar pages.
899 while (match_offset < size &&
900 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
901 match_offset += PAGE_SIZE;
902 }
903
904 // Count similar pages.
905 size_t mismatch_offset = match_offset;
906 while (mismatch_offset < size &&
907 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
908 mismatch_offset += PAGE_SIZE;
909 }
910
911 // Map over similar pages.
912 if (mismatch_offset > match_offset) {
913 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
914 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
915 if (map == MAP_FAILED) {
916 munmap(temp_mapping, file_size);
917 return -1;
918 }
919 }
920
921 match_offset = mismatch_offset;
922 }
923
924 // Add to the base file offset in case there are multiple relro segments.
925 file_offset += size;
926 }
927 munmap(temp_mapping, file_size);
928 return 0;
929}
930
931
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700932#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200933
934# ifndef PT_ARM_EXIDX
935# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
936# endif
937
938/* Return the address and size of the .ARM.exidx section in memory,
939 * if present.
940 *
941 * Input:
942 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700943 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200944 * load_bias -> load bias
945 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700946 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200947 * arm_exidx_count -> number of items in table (0 on failure).
948 * Return:
949 * 0 on error, -1 on failure (_no_ error code in errno)
950 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800951int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
952 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800953 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800954 const ElfW(Phdr)* phdr = phdr_table;
955 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200956
Elliott Hughes0266ae52014-02-10 17:46:57 -0800957 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
958 if (phdr->p_type != PT_ARM_EXIDX) {
959 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200960 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800961
962 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800963 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800964 return 0;
965 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700966 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800967 *arm_exidx_count = 0;
968 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200969}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700970#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200971
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200972/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700973 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200974 *
975 * Input:
976 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700977 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200978 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200979 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700980 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +0800981 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200982 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200983 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200984 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800985void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +0800986 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
987 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -0700988 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700989 for (size_t i = 0; i<phdr_count; ++i) {
990 const ElfW(Phdr)& phdr = phdr_table[i];
991 if (phdr.p_type == PT_DYNAMIC) {
992 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +0800993 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700994 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +0800995 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -0700996 return;
997 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800998 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200999}
1000
Evgenii Stepanovd640b222015-07-10 17:54:01 -07001001/* Return the program interpreter string, or nullptr if missing.
1002 *
1003 * Input:
1004 * phdr_table -> program header table
1005 * phdr_count -> number of entries in tables
1006 * load_bias -> load bias
1007 * Return:
1008 * pointer to the program interpreter string.
1009 */
1010const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1011 ElfW(Addr) load_bias) {
1012 for (size_t i = 0; i<phdr_count; ++i) {
1013 const ElfW(Phdr)& phdr = phdr_table[i];
1014 if (phdr.p_type == PT_INTERP) {
1015 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1016 }
1017 }
1018 return nullptr;
1019}
1020
Robert Grosse4544d9f2014-10-15 14:32:19 -07001021// Sets loaded_phdr_ to the address of the program header table as it appears
1022// in the loaded segments in memory. This is in contrast with phdr_table_,
1023// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -08001024bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001025 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001026
Elliott Hughes650be4e2013-03-05 18:47:58 -08001027 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001028 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001029 if (phdr->p_type == PT_PHDR) {
1030 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001031 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001032 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001033
Elliott Hughes650be4e2013-03-05 18:47:58 -08001034 // Otherwise, check the first loadable segment. If its file offset
1035 // is 0, it starts with the ELF header, and we can trivially find the
1036 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001037 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001038 if (phdr->p_type == PT_LOAD) {
1039 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001040 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001041 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001042 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001043 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001044 }
1045 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001046 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001047 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001048
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001049 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001050 return false;
1051}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001052
Elliott Hughes650be4e2013-03-05 18:47:58 -08001053// Ensures that our program header is actually within a loadable
1054// segment. This should help catch badly-formed ELF files that
1055// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001056bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1057 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1058 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001059 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001060 if (phdr->p_type != PT_LOAD) {
1061 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001062 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001063 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1064 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001065 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001066 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001067 return true;
1068 }
1069 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001070 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1071 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001072 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001073}