blob: 89837f7d200513c2bf92b512f3da0eac081ecce9 [file] [log] [blame]
Colin Cross7add50d2016-01-14 15:35:40 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Colin Crossba5d9ff2016-04-26 16:51:32 -070017#include <errno.h>
Colin Cross7add50d2016-01-14 15:35:40 -080018#include <inttypes.h>
Colin Crossba5d9ff2016-04-26 16:51:32 -070019#include <sys/mman.h>
20#include <unistd.h>
Colin Cross7add50d2016-01-14 15:35:40 -080021
22#include <map>
23#include <utility>
24
25#include "Allocator.h"
26#include "HeapWalker.h"
Colin Cross8e8f34c2016-03-02 17:53:39 -080027#include "LeakFolding.h"
Colin Crossba5d9ff2016-04-26 16:51:32 -070028#include "ScopedSignalHandler.h"
Colin Cross7add50d2016-01-14 15:35:40 -080029#include "log.h"
30
Colin Crossa9939e92017-06-21 13:13:00 -070031namespace android {
32
Colin Cross7add50d2016-01-14 15:35:40 -080033bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
34 if (end == begin) {
35 end = begin + 1;
36 }
Colin Cross8e8f34c2016-03-02 17:53:39 -080037 Range range{begin, end};
Colin Cross3ca19762018-11-28 17:01:59 -080038 if (valid_mappings_range_.end != 0 &&
39 (begin < valid_mappings_range_.begin || end > valid_mappings_range_.end)) {
40 MEM_LOG_ALWAYS_FATAL("allocation %p-%p is outside mapping range %p-%p",
41 reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end),
42 reinterpret_cast<void*>(valid_mappings_range_.begin),
43 reinterpret_cast<void*>(valid_mappings_range_.end));
44 }
Colin Cross8e8f34c2016-03-02 17:53:39 -080045 auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
Colin Cross7add50d2016-01-14 15:35:40 -080046 if (inserted.second) {
47 valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
48 valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
Colin Cross8e8f34c2016-03-02 17:53:39 -080049 allocation_bytes_ += range.size();
Colin Cross7add50d2016-01-14 15:35:40 -080050 return true;
51 } else {
52 Range overlap = inserted.first->first;
Colin Crosscecd6402016-04-26 17:10:04 -070053 if (overlap != range) {
Christopher Ferris47dea712017-05-03 17:34:29 -070054 MEM_ALOGE("range %p-%p overlaps with existing range %p-%p", reinterpret_cast<void*>(begin),
55 reinterpret_cast<void*>(end), reinterpret_cast<void*>(overlap.begin),
56 reinterpret_cast<void*>(overlap.end));
Colin Crosscecd6402016-04-26 17:10:04 -070057 }
Colin Cross7add50d2016-01-14 15:35:40 -080058 return false;
59 }
60}
61
Colin Crossba5d9ff2016-04-26 16:51:32 -070062bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
63 walking_ptr_ = word_ptr;
64 // This access may segfault if the process under test has done something strange,
65 // for example mprotect(PROT_NONE) on a native heap page. If so, it will be
66 // caught and handled by mmaping a zero page over the faulting page.
67 uintptr_t value = *reinterpret_cast<uintptr_t*>(word_ptr);
68 walking_ptr_ = 0;
69 if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
70 AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
Colin Cross8e8f34c2016-03-02 17:53:39 -080071 if (it != allocations_.end()) {
72 *range = it->first;
73 *info = &it->second;
74 return true;
75 }
76 }
77 return false;
78}
79
80void HeapWalker::RecurseRoot(const Range& root) {
81 allocator::vector<Range> to_do(1, root, allocator_);
Colin Cross7add50d2016-01-14 15:35:40 -080082 while (!to_do.empty()) {
83 Range range = to_do.back();
84 to_do.pop_back();
Colin Cross8e8f34c2016-03-02 17:53:39 -080085
Colin Crossd780dcb2018-11-27 16:14:53 -080086 walking_range_ = range;
Colin Cross8e8f34c2016-03-02 17:53:39 -080087 ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
88 if (!ref_info->referenced_from_root) {
89 ref_info->referenced_from_root = true;
90 to_do.push_back(ref_range);
Colin Cross7add50d2016-01-14 15:35:40 -080091 }
Colin Cross8e8f34c2016-03-02 17:53:39 -080092 });
Colin Crossd780dcb2018-11-27 16:14:53 -080093 walking_range_ = Range{0, 0};
Colin Cross7add50d2016-01-14 15:35:40 -080094 }
95}
96
Colin Cross3ca19762018-11-28 17:01:59 -080097void HeapWalker::Mapping(uintptr_t begin, uintptr_t end) {
98 valid_mappings_range_.begin = std::min(valid_mappings_range_.begin, begin);
99 valid_mappings_range_.end = std::max(valid_mappings_range_.end, end);
100}
101
Colin Cross7add50d2016-01-14 15:35:40 -0800102void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
103 roots_.push_back(Range{begin, end});
104}
105
106void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
107 root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
108}
109
110size_t HeapWalker::Allocations() {
111 return allocations_.size();
112}
113
114size_t HeapWalker::AllocationBytes() {
115 return allocation_bytes_;
116}
117
118bool HeapWalker::DetectLeaks() {
Colin Cross8e8f34c2016-03-02 17:53:39 -0800119 // Recursively walk pointers from roots to mark referenced allocations
Colin Cross7add50d2016-01-14 15:35:40 -0800120 for (auto it = roots_.begin(); it != roots_.end(); it++) {
Colin Cross8e8f34c2016-03-02 17:53:39 -0800121 RecurseRoot(*it);
Colin Cross7add50d2016-01-14 15:35:40 -0800122 }
123
124 Range vals;
125 vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
126 vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
Colin Cross7add50d2016-01-14 15:35:40 -0800127
Colin Cross8e8f34c2016-03-02 17:53:39 -0800128 RecurseRoot(vals);
Colin Cross7add50d2016-01-14 15:35:40 -0800129
Colin Crossd780dcb2018-11-27 16:14:53 -0800130 if (segv_page_count_ > 0) {
131 MEM_ALOGE("%zu pages skipped due to segfaults", segv_page_count_);
132 }
133
Colin Cross7add50d2016-01-14 15:35:40 -0800134 return true;
135}
136
Colin Crossa83881e2017-06-22 10:50:05 -0700137bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
138 size_t* leak_bytes_out) {
Colin Cross7add50d2016-01-14 15:35:40 -0800139 leaked.clear();
140
141 size_t num_leaks = 0;
142 size_t leak_bytes = 0;
143 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
144 if (!it->second.referenced_from_root) {
145 num_leaks++;
146 leak_bytes += it->first.end - it->first.begin;
147 }
148 }
149
150 size_t n = 0;
151 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
152 if (!it->second.referenced_from_root) {
Colin Cross8e8f34c2016-03-02 17:53:39 -0800153 if (n++ < limit) {
Colin Cross7add50d2016-01-14 15:35:40 -0800154 leaked.push_back(it->first);
155 }
156 }
157 }
158
159 if (num_leaks_out) {
160 *num_leaks_out = num_leaks;
161 }
162 if (leak_bytes_out) {
163 *leak_bytes_out = leak_bytes;
164 }
165
166 return true;
167}
Colin Crossba5d9ff2016-04-26 16:51:32 -0700168
169static bool MapOverPage(void* addr) {
170 const size_t page_size = sysconf(_SC_PAGE_SIZE);
Colin Crossa83881e2017-06-22 10:50:05 -0700171 void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
Colin Crossba5d9ff2016-04-26 16:51:32 -0700172
Colin Crossa83881e2017-06-22 10:50:05 -0700173 void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
Colin Crossba5d9ff2016-04-26 16:51:32 -0700174 if (ret == MAP_FAILED) {
Christopher Ferris47dea712017-05-03 17:34:29 -0700175 MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
Colin Crossba5d9ff2016-04-26 16:51:32 -0700176 return false;
177 }
178
179 return true;
180}
181
Colin Crossa83881e2017-06-22 10:50:05 -0700182void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
183 void* /*uctx*/) {
Colin Crossba5d9ff2016-04-26 16:51:32 -0700184 uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
185 if (addr != walking_ptr_) {
186 handler.reset();
187 return;
188 }
Colin Crossd780dcb2018-11-27 16:14:53 -0800189 if (!segv_logged_) {
190 MEM_ALOGW("failed to read page at %p, signal %d", si->si_addr, signal);
191 if (walking_range_.begin != 0U) {
192 MEM_ALOGW("while walking range %p-%p", reinterpret_cast<void*>(walking_range_.begin),
193 reinterpret_cast<void*>(walking_range_.end));
194 }
195 segv_logged_ = true;
196 }
197 segv_page_count_++;
Colin Crossba5d9ff2016-04-26 16:51:32 -0700198 if (!MapOverPage(si->si_addr)) {
199 handler.reset();
200 }
201}
202
203ScopedSignalHandler::SignalFn ScopedSignalHandler::handler_;
Colin Crossa9939e92017-06-21 13:13:00 -0700204
205} // namespace android