blob: 1a0c33ddeebb0dc79de2a34c3a48c3e261d81836 [file] [log] [blame]
Colin Crossbcb4ed32016-01-14 15:35:40 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <inttypes.h>
18
19#include <map>
20#include <utility>
21
22#include "Allocator.h"
23#include "HeapWalker.h"
24#include "log.h"
25
26bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
27 if (end == begin) {
28 end = begin + 1;
29 }
30 auto inserted = allocations_.insert(std::pair<Range, RangeInfo>(Range{begin, end}, RangeInfo{false, false}));
31 if (inserted.second) {
32 valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
33 valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
34 allocation_bytes_ += end - begin;
35 return true;
36 } else {
37 Range overlap = inserted.first->first;
38 ALOGE("range %p-%p overlaps with existing range %p-%p",
39 reinterpret_cast<void*>(begin),
40 reinterpret_cast<void*>(end),
41 reinterpret_cast<void*>(overlap.begin),
42 reinterpret_cast<void*>(overlap.end));
43 return false;
44 }
45}
46
47void HeapWalker::Walk(const Range& range, bool RangeInfo::*flag) {
48 allocator::vector<Range> to_do(1, range, allocator_);
49 while (!to_do.empty()) {
50 Range range = to_do.back();
51 to_do.pop_back();
52 uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
53 // TODO(ccross): we might need to consider a pointer to the end of a buffer
54 // to be inside the buffer, which means the common case of a pointer to the
55 // beginning of a buffer may keep two ranges live.
56 for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
57 uintptr_t val = *reinterpret_cast<uintptr_t*>(i);
58 if (val >= valid_allocations_range_.begin && val < valid_allocations_range_.end) {
59 RangeMap::iterator it = allocations_.find(Range{val, val + 1});
60 if (it != allocations_.end()) {
61 if (!(it->second.*flag)) {
62 to_do.push_back(it->first);
63 it->second.*flag = true;
64 }
65 }
66 }
67 }
68 }
69}
70
71void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
72 roots_.push_back(Range{begin, end});
73}
74
75void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
76 root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
77}
78
79size_t HeapWalker::Allocations() {
80 return allocations_.size();
81}
82
83size_t HeapWalker::AllocationBytes() {
84 return allocation_bytes_;
85}
86
87bool HeapWalker::DetectLeaks() {
88 for (auto it = roots_.begin(); it != roots_.end(); it++) {
89 Walk(*it, &RangeInfo::referenced_from_root);
90 }
91
92 Range vals;
93 vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
94 vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
95 Walk(vals, &RangeInfo::referenced_from_root);
96
97 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
98 if (!it->second.referenced_from_root) {
99 Walk(it->first, &RangeInfo::referenced_from_leak);
100 }
101 }
102
103 return true;
104}
105
106bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
107 size_t* num_leaks_out, size_t* leak_bytes_out) {
108 DetectLeaks();
109 leaked.clear();
110
111 size_t num_leaks = 0;
112 size_t leak_bytes = 0;
113 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
114 if (!it->second.referenced_from_root) {
115 num_leaks++;
116 leak_bytes += it->first.end - it->first.begin;
117 }
118 }
119
120 size_t n = 0;
121 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
122 if (!it->second.referenced_from_root) {
123 if (n++ <= limit) {
124 leaked.push_back(it->first);
125 }
126 }
127 }
128
129 if (num_leaks_out) {
130 *num_leaks_out = num_leaks;
131 }
132 if (leak_bytes_out) {
133 *leak_bytes_out = leak_bytes;
134 }
135
136 return true;
137}