blob: cb3f1a52cc4c44f9fcb65f70e148d29f95e16204 [file] [log] [blame]
Colin Cross7add50d2016-01-14 15:35:40 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <inttypes.h>
18
19#include <functional>
20#include <iomanip>
21#include <mutex>
22#include <string>
23#include <sstream>
24
25#include <backtrace.h>
26#include <android-base/macros.h>
27
28#include "Allocator.h"
29#include "HeapWalker.h"
30#include "LeakPipe.h"
31#include "ProcessMappings.h"
32#include "PtracerThread.h"
33#include "ScopedDisableMalloc.h"
34#include "Semaphore.h"
35#include "ThreadCapture.h"
36
37#include "memunreachable/memunreachable.h"
38#include "bionic.h"
39#include "log.h"
40
41const size_t Leak::contents_length;
42
43using namespace std::chrono_literals;
44
45class MemUnreachable {
46 public:
47 MemUnreachable(pid_t pid, Allocator<void> allocator) : pid_(pid), allocator_(allocator),
48 heap_walker_(allocator_) {}
49 bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
50 const allocator::vector<Mapping>& mappings);
51 bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
52 size_t* num_leaks, size_t* leak_bytes);
53 size_t Allocations() { return heap_walker_.Allocations(); }
54 size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
55 private:
56 bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
57 allocator::vector<Mapping>& heap_mappings,
58 allocator::vector<Mapping>& anon_mappings,
59 allocator::vector<Mapping>& globals_mappings,
60 allocator::vector<Mapping>& stack_mappings);
61 DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
62 pid_t pid_;
63 Allocator<void> allocator_;
64 HeapWalker heap_walker_;
65};
66
67static void HeapIterate(const Mapping& heap_mapping,
68 const std::function<void(uintptr_t, size_t)>& func) {
69 malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
70 [](uintptr_t base, size_t size, void* arg) {
71 auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
72 (*f)(base, size);
73 }, const_cast<void*>(reinterpret_cast<const void*>(&func)));
74}
75
76bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
77 const allocator::vector<Mapping>& mappings) {
78 ALOGI("searching process %d for allocations", pid_);
79 allocator::vector<Mapping> heap_mappings{mappings};
80 allocator::vector<Mapping> anon_mappings{mappings};
81 allocator::vector<Mapping> globals_mappings{mappings};
82 allocator::vector<Mapping> stack_mappings{mappings};
83 if (!ClassifyMappings(mappings, heap_mappings, anon_mappings,
84 globals_mappings, stack_mappings)) {
85 return false;
86 }
87
88 for (auto it = heap_mappings.begin(); it != heap_mappings.end(); it++) {
89 ALOGV("Heap mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
90 HeapIterate(*it, [&](uintptr_t base, size_t size) {
91 heap_walker_.Allocation(base, base + size);
92 });
93 }
94
95 for (auto it = anon_mappings.begin(); it != anon_mappings.end(); it++) {
96 ALOGV("Anon mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
97 heap_walker_.Allocation(it->begin, it->end);
98 }
99
100 for (auto it = globals_mappings.begin(); it != globals_mappings.end(); it++) {
101 ALOGV("Globals mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
102 heap_walker_.Root(it->begin, it->end);
103 }
104
105 for (auto thread_it = threads.begin(); thread_it != threads.end(); thread_it++) {
106 for (auto it = stack_mappings.begin(); it != stack_mappings.end(); it++) {
107 if (thread_it->stack.first >= it->begin && thread_it->stack.first <= it->end) {
108 ALOGV("Stack %" PRIxPTR "-%" PRIxPTR " %s", thread_it->stack.first, it->end, it->name);
109 heap_walker_.Root(thread_it->stack.first, it->end);
110 }
111 }
112 heap_walker_.Root(thread_it->regs);
113 }
114
115 ALOGI("searching done");
116
117 return true;
118}
119
120bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
121 size_t* num_leaks, size_t* leak_bytes) {
122 ALOGI("sweeping process %d for unreachable memory", pid_);
123 leaks.clear();
124
125 allocator::vector<Range> leaked{allocator_};
126 if (!heap_walker_.Leaked(leaked, limit, num_leaks, leak_bytes)) {
127 return false;
128 }
129
130 for (auto it = leaked.begin(); it != leaked.end(); it++) {
131 Leak leak{};
132 leak.begin = it->begin;
133 leak.size = it->end - it->begin;;
134 memcpy(leak.contents, reinterpret_cast<void*>(it->begin),
135 std::min(leak.size, Leak::contents_length));
136 ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it->begin),
137 leak.backtrace_frames, leak.backtrace_length);
138 if (num_backtrace_frames > 0) {
139 leak.num_backtrace_frames = num_backtrace_frames;
140 }
141 leaks.emplace_back(leak);
142 }
143
144 ALOGI("sweeping done");
145
146 return true;
147}
148
149static bool has_prefix(const allocator::string& s, const char* prefix) {
150 int ret = s.compare(0, strlen(prefix), prefix);
151 return ret == 0;
152}
153
154bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
155 allocator::vector<Mapping>& heap_mappings,
156 allocator::vector<Mapping>& anon_mappings,
157 allocator::vector<Mapping>& globals_mappings,
158 allocator::vector<Mapping>& stack_mappings)
159{
160 heap_mappings.clear();
161 anon_mappings.clear();
162 globals_mappings.clear();
163 stack_mappings.clear();
164
165 allocator::string current_lib{allocator_};
166
167 for (auto it = mappings.begin(); it != mappings.end(); it++) {
168 if (it->execute) {
169 current_lib = it->name;
170 continue;
171 }
172
173 if (!it->read) {
174 continue;
175 }
176
177 const allocator::string mapping_name{it->name, allocator_};
178 if (mapping_name == "[anon:.bss]") {
179 // named .bss section
180 globals_mappings.emplace_back(*it);
181 } else if (mapping_name == current_lib) {
182 // .rodata or .data section
183 globals_mappings.emplace_back(*it);
184 } else if (mapping_name == "[anon:libc_malloc]") {
185 // named malloc mapping
186 heap_mappings.emplace_back(*it);
187 } else if (has_prefix(mapping_name, "/dev/ashmem/dalvik")) {
188 // named dalvik heap mapping
189 globals_mappings.emplace_back(*it);
190 } else if (has_prefix(mapping_name, "[stack")) {
191 // named stack mapping
192 stack_mappings.emplace_back(*it);
193 } else if (mapping_name.size() == 0) {
194 globals_mappings.emplace_back(*it);
195 } else if (has_prefix(mapping_name, "[anon:") && mapping_name != "[anon:leak_detector_malloc]") {
196 // TODO(ccross): it would be nice to treat named anonymous mappings as
197 // possible leaks, but naming something in a .bss or .data section makes
198 // it impossible to distinguish them from mmaped and then named mappings.
199 globals_mappings.emplace_back(*it);
200 }
201 }
202
203 return true;
204}
205
206bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
207 int parent_pid = getpid();
208 int parent_tid = gettid();
209
210 Heap heap;
211
212 Semaphore continue_parent_sem;
213 LeakPipe pipe;
214
215 PtracerThread thread{[&]() -> int {
216 /////////////////////////////////////////////
217 // Collection thread
218 /////////////////////////////////////////////
219 ALOGI("collecting thread info for process %d...", parent_pid);
220
221 ThreadCapture thread_capture(parent_pid, heap);
222 allocator::vector<ThreadInfo> thread_info(heap);
223 allocator::vector<Mapping> mappings(heap);
224
225 // ptrace all the threads
226 if (!thread_capture.CaptureThreads()) {
227 return 1;
228 }
229
230 // collect register contents and stacks
231 if (!thread_capture.CapturedThreadInfo(thread_info)) {
232 return 1;
233 }
234
235 // snapshot /proc/pid/maps
236 if (!ProcessMappings(parent_pid, mappings)) {
237 return 1;
238 }
239
240 // malloc must be enabled to call fork, at_fork handlers take the same
241 // locks as ScopedDisableMalloc. All threads are paused in ptrace, so
242 // memory state is still consistent. Unfreeze the original thread so it
243 // can drop the malloc locks, it will block until the collection thread
244 // exits.
245 thread_capture.ReleaseThread(parent_tid);
246 continue_parent_sem.Post();
247
248 // fork a process to do the heap walking
249 int ret = fork();
250 if (ret < 0) {
251 return 1;
252 } else if (ret == 0) {
253 /////////////////////////////////////////////
254 // Heap walker process
255 /////////////////////////////////////////////
256 // Examine memory state in the child using the data collected above and
257 // the CoW snapshot of the process memory contents.
258
259 if (!pipe.OpenSender()) {
260 _exit(1);
261 }
262
263 MemUnreachable unreachable{parent_pid, heap};
264
265 if (!unreachable.CollectAllocations(thread_info, mappings)) {
266 _exit(2);
267 }
268 size_t num_allocations = unreachable.Allocations();
269 size_t allocation_bytes = unreachable.AllocationBytes();
270
271 allocator::vector<Leak> leaks{heap};
272
273 size_t num_leaks = 0;
274 size_t leak_bytes = 0;
275 bool ok = unreachable.GetUnreachableMemory(leaks, limit, &num_leaks, &leak_bytes);
276
277 ok = ok && pipe.Sender().Send(num_allocations);
278 ok = ok && pipe.Sender().Send(allocation_bytes);
279 ok = ok && pipe.Sender().Send(num_leaks);
280 ok = ok && pipe.Sender().Send(leak_bytes);
281 ok = ok && pipe.Sender().SendVector(leaks);
282
283 if (!ok) {
284 _exit(3);
285 }
286
287 _exit(0);
288 } else {
289 // Nothing left to do in the collection thread, return immediately,
290 // releasing all the captured threads.
291 ALOGI("collection thread done");
292 return 0;
293 }
294 }};
295
296 /////////////////////////////////////////////
297 // Original thread
298 /////////////////////////////////////////////
299
300 {
301 // Disable malloc to get a consistent view of memory
302 ScopedDisableMalloc disable_malloc;
303
304 // Start the collection thread
305 thread.Start();
306
307 // Wait for the collection thread to signal that it is ready to fork the
308 // heap walker process.
309 continue_parent_sem.Wait(100s);
310
311 // Re-enable malloc so the collection thread can fork.
312 }
313
314 // Wait for the collection thread to exit
315 int ret = thread.Join();
316 if (ret != 0) {
317 return false;
318 }
319
320 // Get a pipe from the heap walker process. Transferring a new pipe fd
321 // ensures no other forked processes can have it open, so when the heap
322 // walker process dies the remote side of the pipe will close.
323 if (!pipe.OpenReceiver()) {
324 return false;
325 }
326
327 bool ok = true;
328 ok = ok && pipe.Receiver().Receive(&info.num_allocations);
329 ok = ok && pipe.Receiver().Receive(&info.allocation_bytes);
330 ok = ok && pipe.Receiver().Receive(&info.num_leaks);
331 ok = ok && pipe.Receiver().Receive(&info.leak_bytes);
332 ok = ok && pipe.Receiver().ReceiveVector(info.leaks);
333 if (!ok) {
334 return false;
335 }
336
337 ALOGI("unreachable memory detection done");
338 ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s",
339 info.leak_bytes, info.num_leaks, info.num_leaks == 1 ? "" : "s",
340 info.allocation_bytes, info.num_allocations, info.num_allocations == 1 ? "" : "s");
341
342 return true;
343}
344
345std::string Leak::ToString(bool log_contents) const {
346
347 std::ostringstream oss;
348
349 oss << " " << std::dec << size;
350 oss << " bytes at ";
351 oss << std::hex << begin;
352 oss << std::endl;
353
354 if (log_contents) {
355 const int bytes_per_line = 16;
356 const size_t bytes = std::min(size, contents_length);
357
358 if (bytes == size) {
359 oss << " contents:" << std::endl;
360 } else {
361 oss << " first " << bytes << " bytes of contents:" << std::endl;
362 }
363
364 for (size_t i = 0; i < bytes; i += bytes_per_line) {
365 oss << " " << std::hex << begin + i << ": ";
366 size_t j;
367 oss << std::setfill('0');
368 for (j = i; j < bytes && j < i + bytes_per_line; j++) {
369 oss << std::setw(2) << static_cast<int>(contents[j]) << " ";
370 }
371 oss << std::setfill(' ');
372 for (; j < i + bytes_per_line; j++) {
373 oss << " ";
374 }
375 for (j = i; j < bytes && j < i + bytes_per_line; j++) {
376 char c = contents[j];
377 if (c < ' ' || c >= 0x7f) {
378 c = '.';
379 }
380 oss << c;
381 }
382 oss << std::endl;
383 }
384 }
385 if (num_backtrace_frames > 0) {
386 oss << backtrace_string(backtrace_frames, num_backtrace_frames);
387 }
388
389 return oss.str();
390}
391
392std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
393 std::ostringstream oss;
394 oss << " " << leak_bytes << " bytes in ";
395 oss << num_leaks << " unreachable allocation" << (num_leaks == 1 ? "" : "s");
396 oss << std::endl;
397
398 for (auto it = leaks.begin(); it != leaks.end(); it++) {
399 oss << it->ToString(log_contents);
400 }
401
402 return oss.str();
403}
404
405std::string GetUnreachableMemoryString(bool log_contents, size_t limit) {
406 UnreachableMemoryInfo info;
407 if (!GetUnreachableMemory(info, limit)) {
408 return "Failed to get unreachable memory";
409 }
410
411 return info.ToString(log_contents);
412}
413
414bool LogUnreachableMemory(bool log_contents, size_t limit) {
415 UnreachableMemoryInfo info;
416 if (!GetUnreachableMemory(info, limit)) {
417 return false;
418 }
419
420 for (auto it = info.leaks.begin(); it != info.leaks.end(); it++) {
421 ALOGE("%s", it->ToString(log_contents).c_str());
422 }
423 return true;
424}
425
426
427bool NoLeaks() {
428 UnreachableMemoryInfo info;
429 if (!GetUnreachableMemory(info, 0)) {
430 return false;
431 }
432
433 return info.num_leaks == 0;
434}