blob: 9483e59e43d87f12b01e954d27975003d7c6fc47 [file] [log] [blame]
Peter Collingbournef8622522020-04-07 14:07:32 -07001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
liyong381b89c2022-05-24 16:37:10 +080017#include <stdint.h>
18#include <unistd.h>
19
20#include <vector>
21
Peter Collingbournef8622522020-04-07 14:07:32 -070022#include "libdebuggerd/scudo.h"
Peter Collingbourne1a1f7d72021-03-08 16:53:54 -080023#include "libdebuggerd/tombstone.h"
Peter Collingbournef8622522020-04-07 14:07:32 -070024
Christopher Ferris3b7b7ba2022-03-15 16:56:09 -070025#include "unwindstack/AndroidUnwinder.h"
Peter Collingbournef8622522020-04-07 14:07:32 -070026#include "unwindstack/Memory.h"
Peter Collingbournef8622522020-04-07 14:07:32 -070027
Peter Collingbourne1a1f7d72021-03-08 16:53:54 -080028#include <android-base/macros.h>
Peter Collingbournef8622522020-04-07 14:07:32 -070029#include <bionic/macros.h>
30
Peter Collingbourne1a1f7d72021-03-08 16:53:54 -080031#include "tombstone.pb.h"
32
liyong381b89c2022-05-24 16:37:10 +080033bool ScudoCrashData::SetErrorInfo(unwindstack::Memory* process_memory,
34 const ProcessInfo& process_info) {
Peter Collingbournef8622522020-04-07 14:07:32 -070035 if (!process_info.has_fault_address) {
liyong381b89c2022-05-24 16:37:10 +080036 return false;
Peter Collingbournef8622522020-04-07 14:07:32 -070037 }
38
liyong381b89c2022-05-24 16:37:10 +080039 std::vector<char> stack_depot(__scudo_get_stack_depot_size());
40 if (!process_memory->ReadFully(process_info.scudo_stack_depot, stack_depot.data(),
41 stack_depot.size())) {
42 return false;
43 }
44 std::vector<char> region_info(__scudo_get_region_info_size());
45 if (!process_memory->ReadFully(process_info.scudo_region_info, region_info.data(),
46 region_info.size())) {
47 return false;
48 }
49 std::vector<char> ring_buffer(__scudo_get_ring_buffer_size());
50 if (!process_memory->ReadFully(process_info.scudo_ring_buffer, ring_buffer.data(),
51 ring_buffer.size())) {
52 return false;
53 }
54
55 uintptr_t page_size = getpagesize();
Peter Collingbournef8622522020-04-07 14:07:32 -070056
Mitch Phillipse4adff02021-01-21 20:41:50 -080057 untagged_fault_addr_ = process_info.untagged_fault_address;
liyong381b89c2022-05-24 16:37:10 +080058 uintptr_t fault_page = untagged_fault_addr_ & ~(page_size - 1);
Peter Collingbournef8622522020-04-07 14:07:32 -070059
liyong381b89c2022-05-24 16:37:10 +080060 // Attempt to get 16 pages before the fault page and 16 pages after.
61 constexpr size_t kExtraPages = 16;
62 std::vector<char> memory(page_size * (kExtraPages * 2 + 1));
63
64 // Read faulting page first.
65 size_t memory_index = kExtraPages;
66 if (!process_memory->ReadFully(fault_page, &memory[memory_index * page_size], page_size)) {
67 return false;
Peter Collingbournef8622522020-04-07 14:07:32 -070068 }
69
liyong381b89c2022-05-24 16:37:10 +080070 // Attempt to read the pages after the fault page, stop as soon as we
71 // fail to read.
72 uintptr_t read_addr = fault_page;
73 if (!__builtin_add_overflow(fault_page, page_size, &read_addr)) {
74 memory_index++;
75 for (size_t i = 0; i < kExtraPages; i++, memory_index++) {
76 if (!process_memory->ReadFully(read_addr, &memory[memory_index * page_size], page_size)) {
77 break;
78 }
79 if (__builtin_add_overflow(read_addr, page_size, &read_addr)) {
80 break;
81 }
82 }
83 }
84 uintptr_t memory_end = read_addr;
85
86 // Attempt to read the pages before the fault page, stop as soon as we
87 // fail to read.
88 memory_index = kExtraPages;
89 if (fault_page > 0) {
90 read_addr = fault_page - page_size;
91 for (size_t i = 0; i < kExtraPages; i++, memory_index--) {
92 if (!process_memory->ReadFully(read_addr, &memory[(memory_index - 1) * page_size],
93 page_size)) {
94 break;
95 }
96 if (read_addr == 0) {
97 memory_index--;
98 break;
99 }
100 read_addr -= page_size;
101 }
102 }
103 size_t start_memory_index = memory_index;
104 uintptr_t memory_begin = fault_page - (kExtraPages - memory_index) * page_size;
105
106 std::vector<long> memory_tags((memory_end - memory_begin) / kTagGranuleSize);
107 read_addr = memory_begin;
108 for (size_t i = 0; i < memory_tags.size(); i++) {
109 memory_tags[i] = process_memory->ReadTag(read_addr);
110 read_addr += kTagGranuleSize;
Peter Collingbournef8622522020-04-07 14:07:32 -0700111 }
112
liyong381b89c2022-05-24 16:37:10 +0800113 __scudo_get_error_info(
114 &error_info_, process_info.maybe_tagged_fault_address, stack_depot.data(), region_info.data(),
115 ring_buffer.data(), &memory[start_memory_index * page_size],
116 reinterpret_cast<const char*>(memory_tags.data()), memory_begin, memory_end - memory_begin);
Peter Collingbournef8622522020-04-07 14:07:32 -0700117
liyong381b89c2022-05-24 16:37:10 +0800118 return true;
Peter Collingbournef8622522020-04-07 14:07:32 -0700119}
120
121bool ScudoCrashData::CrashIsMine() const {
122 return error_info_.reports[0].error_type != UNKNOWN;
123}
124
Peter Collingbourne1a1f7d72021-03-08 16:53:54 -0800125void ScudoCrashData::FillInCause(Cause* cause, const scudo_error_report* report,
Christopher Ferris3b7b7ba2022-03-15 16:56:09 -0700126 unwindstack::AndroidUnwinder* unwinder) const {
Peter Collingbourne1a1f7d72021-03-08 16:53:54 -0800127 MemoryError* memory_error = cause->mutable_memory_error();
128 HeapObject* heap_object = memory_error->mutable_heap();
129
130 memory_error->set_tool(MemoryError_Tool_SCUDO);
131 switch (report->error_type) {
132 case USE_AFTER_FREE:
133 memory_error->set_type(MemoryError_Type_USE_AFTER_FREE);
134 break;
135 case BUFFER_OVERFLOW:
136 memory_error->set_type(MemoryError_Type_BUFFER_OVERFLOW);
137 break;
138 case BUFFER_UNDERFLOW:
139 memory_error->set_type(MemoryError_Type_BUFFER_UNDERFLOW);
140 break;
141 default:
142 memory_error->set_type(MemoryError_Type_UNKNOWN);
143 break;
144 }
145
146 heap_object->set_address(report->allocation_address);
147 heap_object->set_size(report->allocation_size);
Peter Collingbourne1a1f7d72021-03-08 16:53:54 -0800148
149 heap_object->set_allocation_tid(report->allocation_tid);
150 for (size_t i = 0; i < arraysize(report->allocation_trace) && report->allocation_trace[i]; ++i) {
151 unwindstack::FrameData frame_data = unwinder->BuildFrameFromPcOnly(report->allocation_trace[i]);
152 BacktraceFrame* f = heap_object->add_allocation_backtrace();
Christopher Ferris22ad09b2021-11-10 17:25:11 -0800153 fill_in_backtrace_frame(f, frame_data);
Peter Collingbourne1a1f7d72021-03-08 16:53:54 -0800154 }
155
156 heap_object->set_deallocation_tid(report->deallocation_tid);
157 for (size_t i = 0; i < arraysize(report->deallocation_trace) && report->deallocation_trace[i];
158 ++i) {
159 unwindstack::FrameData frame_data =
160 unwinder->BuildFrameFromPcOnly(report->deallocation_trace[i]);
161 BacktraceFrame* f = heap_object->add_deallocation_backtrace();
Christopher Ferris22ad09b2021-11-10 17:25:11 -0800162 fill_in_backtrace_frame(f, frame_data);
Peter Collingbourne1a1f7d72021-03-08 16:53:54 -0800163 }
164
165 set_human_readable_cause(cause, untagged_fault_addr_);
166}
167
Christopher Ferris3b7b7ba2022-03-15 16:56:09 -0700168void ScudoCrashData::AddCauseProtos(Tombstone* tombstone,
169 unwindstack::AndroidUnwinder* unwinder) const {
Peter Collingbourne1a1f7d72021-03-08 16:53:54 -0800170 size_t report_num = 0;
171 while (report_num < sizeof(error_info_.reports) / sizeof(error_info_.reports[0]) &&
172 error_info_.reports[report_num].error_type != UNKNOWN) {
173 FillInCause(tombstone->add_causes(), &error_info_.reports[report_num++], unwinder);
174 }
175}