blob: 2c7237934ec601d6a388959be07958c021d2332b [file] [log] [blame]
Josh Gaocbe70cb2016-10-18 18:17:52 -07001/*
2 * Copyright 2016, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <fcntl.h>
18#include <stdio.h>
19#include <stdlib.h>
20#include <sys/stat.h>
21#include <sys/types.h>
22#include <unistd.h>
23
24#include <array>
25#include <deque>
Josh Gaocb68a032017-06-02 13:02:10 -070026#include <string>
Josh Gaocbe70cb2016-10-18 18:17:52 -070027#include <unordered_map>
Josh Gaocb68a032017-06-02 13:02:10 -070028#include <utility>
Josh Gaocbe70cb2016-10-18 18:17:52 -070029
30#include <event2/event.h>
31#include <event2/listener.h>
32#include <event2/thread.h>
33
Josh Gao5f87bbd2019-01-09 17:01:49 -080034#include <android-base/cmsg.h>
Josh Gaocbe70cb2016-10-18 18:17:52 -070035#include <android-base/logging.h>
Elliott Hughes35bb6d22017-06-26 13:54:05 -070036#include <android-base/properties.h>
Josh Gaocbe70cb2016-10-18 18:17:52 -070037#include <android-base/stringprintf.h>
38#include <android-base/unique_fd.h>
39#include <cutils/sockets.h>
40
Josh Gao55f79a52017-03-06 12:24:07 -080041#include "debuggerd/handler.h"
Narayan Kamatha73df602017-05-24 15:07:25 +010042#include "dump_type.h"
Narayan Kamath2d377cd2017-05-10 10:58:59 +010043#include "protocol.h"
44#include "util.h"
Josh Gaocbe70cb2016-10-18 18:17:52 -070045
46#include "intercept_manager.h"
47
Elliott Hughes35bb6d22017-06-26 13:54:05 -070048using android::base::GetIntProperty;
Josh Gao5f87bbd2019-01-09 17:01:49 -080049using android::base::SendFileDescriptors;
Josh Gaocbe70cb2016-10-18 18:17:52 -070050using android::base::StringPrintf;
Josh Gao76e1e302021-01-26 15:53:11 -080051
52using android::base::borrowed_fd;
Josh Gaocbe70cb2016-10-18 18:17:52 -070053using android::base::unique_fd;
54
55static InterceptManager* intercept_manager;
56
57enum CrashStatus {
58 kCrashStatusRunning,
59 kCrashStatusQueued,
60};
61
Josh Gao76e1e302021-01-26 15:53:11 -080062struct CrashArtifact {
63 unique_fd fd;
Josh Gao76e1e302021-01-26 15:53:11 -080064
65 static CrashArtifact devnull() {
66 CrashArtifact result;
67 result.fd.reset(open("/dev/null", O_WRONLY | O_CLOEXEC));
68 return result;
69 }
70};
71
72struct CrashArtifactPaths {
73 std::string text;
74 std::optional<std::string> proto;
75};
76
77struct CrashOutput {
78 CrashArtifact text;
79 std::optional<CrashArtifact> proto;
80};
81
Elliott Hughes35bb6d22017-06-26 13:54:05 -070082// Ownership of Crash is a bit messy.
83// It's either owned by an active event that must have a timeout, or owned by
84// queued_requests, in the case that multiple crashes come in at the same time.
85struct Crash {
86 ~Crash() { event_free(crash_event); }
87
Josh Gao76e1e302021-01-26 15:53:11 -080088 CrashOutput output;
Josh Gao48383c82018-04-18 18:11:01 -070089 unique_fd crash_socket_fd;
Elliott Hughes35bb6d22017-06-26 13:54:05 -070090 pid_t crash_pid;
91 event* crash_event = nullptr;
Elliott Hughes35bb6d22017-06-26 13:54:05 -070092
93 DebuggerdDumpType crash_type;
94};
Narayan Kamath922f6b22017-05-15 15:59:30 +010095
Narayan Kamatha73df602017-05-24 15:07:25 +010096class CrashQueue {
Narayan Kamath922f6b22017-05-15 15:59:30 +010097 public:
Narayan Kamatha73df602017-05-24 15:07:25 +010098 CrashQueue(const std::string& dir_path, const std::string& file_name_prefix, size_t max_artifacts,
Florian Mayere95d7812024-03-18 17:15:57 -070099 size_t max_concurrent_dumps, bool supports_proto, bool world_readable)
Narayan Kamath922f6b22017-05-15 15:59:30 +0100100 : file_name_prefix_(file_name_prefix),
101 dir_path_(dir_path),
102 dir_fd_(open(dir_path.c_str(), O_DIRECTORY | O_RDONLY | O_CLOEXEC)),
103 max_artifacts_(max_artifacts),
104 next_artifact_(0),
105 max_concurrent_dumps_(max_concurrent_dumps),
Josh Gao76e1e302021-01-26 15:53:11 -0800106 num_concurrent_dumps_(0),
Florian Mayere95d7812024-03-18 17:15:57 -0700107 supports_proto_(supports_proto),
108 world_readable_(world_readable) {
Narayan Kamath922f6b22017-05-15 15:59:30 +0100109 if (dir_fd_ == -1) {
110 PLOG(FATAL) << "failed to open directory: " << dir_path;
111 }
112
113 // NOTE: If max_artifacts_ <= max_concurrent_dumps_, then theoretically the
114 // same filename could be handed out to multiple processes.
115 CHECK(max_artifacts_ > max_concurrent_dumps_);
116
117 find_oldest_artifact();
118 }
119
Elliott Hughes35bb6d22017-06-26 13:54:05 -0700120 static CrashQueue* for_crash(const Crash* crash) {
121 return (crash->crash_type == kDebuggerdJavaBacktrace) ? for_anrs() : for_tombstones();
122 }
123
Josh Gao76e1e302021-01-26 15:53:11 -0800124 static CrashQueue* for_crash(const std::unique_ptr<Crash>& crash) {
125 return for_crash(crash.get());
126 }
127
Elliott Hughes35bb6d22017-06-26 13:54:05 -0700128 static CrashQueue* for_tombstones() {
129 static CrashQueue queue("/data/tombstones", "tombstone_" /* file_name_prefix */,
Elliott Hughesec220cd2019-09-26 14:35:24 -0700130 GetIntProperty("tombstoned.max_tombstone_count", 32),
Florian Mayere95d7812024-03-18 17:15:57 -0700131 1 /* max_concurrent_dumps */, true /* supports_proto */,
132 true /* world_readable */);
Elliott Hughes35bb6d22017-06-26 13:54:05 -0700133 return &queue;
134 }
135
136 static CrashQueue* for_anrs() {
137 static CrashQueue queue("/data/anr", "trace_" /* file_name_prefix */,
138 GetIntProperty("tombstoned.max_anr_count", 64),
Florian Mayere95d7812024-03-18 17:15:57 -0700139 4 /* max_concurrent_dumps */, false /* supports_proto */,
140 false /* world_readable */);
Elliott Hughes35bb6d22017-06-26 13:54:05 -0700141 return &queue;
142 }
143
Josh Gao76e1e302021-01-26 15:53:11 -0800144 CrashArtifact create_temporary_file() const {
145 CrashArtifact result;
146
147 std::optional<std::string> path;
Josh Gao88846a22021-02-01 16:48:25 -0800148 result.fd.reset(openat(dir_fd_, ".", O_WRONLY | O_APPEND | O_TMPFILE | O_CLOEXEC, 0660));
Josh Gao76e1e302021-01-26 15:53:11 -0800149 if (result.fd == -1) {
Elliott Hughesaa1d18a2024-02-26 22:18:21 +0000150 PLOG(FATAL) << "failed to create temporary tombstone in " << dir_path_;
Narayan Kamath922f6b22017-05-15 15:59:30 +0100151 }
Elliott Hughesaa1d18a2024-02-26 22:18:21 +0000152
Florian Mayere95d7812024-03-18 17:15:57 -0700153 if (world_readable_) {
154 // We need to fchmodat after creating to avoid getting the umask applied.
155 std::string fd_path = StringPrintf("/proc/self/fd/%d", result.fd.get());
156 if (fchmodat(dir_fd_, fd_path.c_str(), 0664, 0) != 0) {
157 PLOG(ERROR) << "Failed to make tombstone world-readable";
158 }
Florian Mayer877d1f62024-02-23 12:54:27 -0800159 }
Josh Gao76e1e302021-01-26 15:53:11 -0800160
Yi Kongeecb1072024-08-12 18:14:57 +0800161 return result;
Josh Gao48383c82018-04-18 18:11:01 -0700162 }
Narayan Kamath922f6b22017-05-15 15:59:30 +0100163
Josh Gao76e1e302021-01-26 15:53:11 -0800164 std::optional<CrashOutput> get_output(DebuggerdDumpType dump_type) {
165 CrashOutput result;
166
167 switch (dump_type) {
168 case kDebuggerdNativeBacktrace:
Josh Gao93127482021-05-18 16:14:15 -0700169 // Don't generate tombstones for native backtrace requests.
Josh Gao76e1e302021-01-26 15:53:11 -0800170 return {};
171
172 case kDebuggerdTombstoneProto:
173 if (!supports_proto_) {
174 LOG(ERROR) << "received kDebuggerdTombstoneProto on a queue that doesn't support proto";
175 return {};
176 }
177 result.proto = create_temporary_file();
178 result.text = create_temporary_file();
179 break;
180
Josh Gao93127482021-05-18 16:14:15 -0700181 case kDebuggerdJavaBacktrace:
Josh Gao76e1e302021-01-26 15:53:11 -0800182 case kDebuggerdTombstone:
183 result.text = create_temporary_file();
184 break;
185
186 default:
187 LOG(ERROR) << "unexpected dump type: " << dump_type;
188 return {};
189 }
190
191 return result;
192 }
193
194 borrowed_fd dir_fd() { return dir_fd_; }
195
196 CrashArtifactPaths get_next_artifact_paths() {
197 CrashArtifactPaths result;
198 result.text = StringPrintf("%s%02d", file_name_prefix_.c_str(), next_artifact_);
199
200 if (supports_proto_) {
201 result.proto = StringPrintf("%s%02d.pb", file_name_prefix_.c_str(), next_artifact_);
202 }
203
Narayan Kamath922f6b22017-05-15 15:59:30 +0100204 next_artifact_ = (next_artifact_ + 1) % max_artifacts_;
Josh Gao76e1e302021-01-26 15:53:11 -0800205 return result;
Narayan Kamath922f6b22017-05-15 15:59:30 +0100206 }
207
Josh Gao76e1e302021-01-26 15:53:11 -0800208 // Consumes crash if it returns true, otherwise leaves it untouched.
209 bool maybe_enqueue_crash(std::unique_ptr<Crash>&& crash) {
Narayan Kamath922f6b22017-05-15 15:59:30 +0100210 if (num_concurrent_dumps_ == max_concurrent_dumps_) {
Josh Gao76e1e302021-01-26 15:53:11 -0800211 queued_requests_.emplace_back(std::move(crash));
Narayan Kamath922f6b22017-05-15 15:59:30 +0100212 return true;
213 }
214
215 return false;
216 }
217
Josh Gao76e1e302021-01-26 15:53:11 -0800218 void maybe_dequeue_crashes(void (*handler)(std::unique_ptr<Crash> crash)) {
Narayan Kamath922f6b22017-05-15 15:59:30 +0100219 while (!queued_requests_.empty() && num_concurrent_dumps_ < max_concurrent_dumps_) {
Josh Gao76e1e302021-01-26 15:53:11 -0800220 std::unique_ptr<Crash> next_crash = std::move(queued_requests_.front());
Narayan Kamath922f6b22017-05-15 15:59:30 +0100221 queued_requests_.pop_front();
Josh Gao76e1e302021-01-26 15:53:11 -0800222 handler(std::move(next_crash));
Narayan Kamath922f6b22017-05-15 15:59:30 +0100223 }
224 }
225
226 void on_crash_started() { ++num_concurrent_dumps_; }
227
228 void on_crash_completed() { --num_concurrent_dumps_; }
229
Narayan Kamath922f6b22017-05-15 15:59:30 +0100230 private:
231 void find_oldest_artifact() {
232 size_t oldest_tombstone = 0;
233 time_t oldest_time = std::numeric_limits<time_t>::max();
234
235 for (size_t i = 0; i < max_artifacts_; ++i) {
Josh Gao76e1e302021-01-26 15:53:11 -0800236 std::string path =
237 StringPrintf("%s/%s%02zu", dir_path_.c_str(), file_name_prefix_.c_str(), i);
Narayan Kamath922f6b22017-05-15 15:59:30 +0100238 struct stat st;
239 if (stat(path.c_str(), &st) != 0) {
240 if (errno == ENOENT) {
241 oldest_tombstone = i;
242 break;
243 } else {
244 PLOG(ERROR) << "failed to stat " << path;
245 continue;
246 }
247 }
248
249 if (st.st_mtime < oldest_time) {
250 oldest_tombstone = i;
251 oldest_time = st.st_mtime;
252 }
253 }
254
255 next_artifact_ = oldest_tombstone;
256 }
257
258 const std::string file_name_prefix_;
259
260 const std::string dir_path_;
261 const int dir_fd_;
262
263 const size_t max_artifacts_;
264 int next_artifact_;
265
266 const size_t max_concurrent_dumps_;
267 size_t num_concurrent_dumps_;
268
Josh Gao76e1e302021-01-26 15:53:11 -0800269 bool supports_proto_;
Florian Mayere95d7812024-03-18 17:15:57 -0700270 bool world_readable_;
Josh Gao76e1e302021-01-26 15:53:11 -0800271
272 std::deque<std::unique_ptr<Crash>> queued_requests_;
Narayan Kamath922f6b22017-05-15 15:59:30 +0100273
Narayan Kamatha73df602017-05-24 15:07:25 +0100274 DISALLOW_COPY_AND_ASSIGN(CrashQueue);
Narayan Kamath922f6b22017-05-15 15:59:30 +0100275};
276
277// Whether java trace dumps are produced via tombstoned.
Narayan Kamathca5e9082017-06-02 15:42:06 +0100278static constexpr bool kJavaTraceDumpsEnabled = true;
Narayan Kamath922f6b22017-05-15 15:59:30 +0100279
Josh Gaocbe70cb2016-10-18 18:17:52 -0700280// Forward declare the callbacks so they can be placed in a sensible order.
Josh Gao76e1e302021-01-26 15:53:11 -0800281static void crash_accept_cb(evconnlistener* listener, evutil_socket_t sockfd, sockaddr*, int,
282 void*);
Josh Gaocbe70cb2016-10-18 18:17:52 -0700283static void crash_request_cb(evutil_socket_t sockfd, short ev, void* arg);
284static void crash_completed_cb(evutil_socket_t sockfd, short ev, void* arg);
285
Josh Gao76e1e302021-01-26 15:53:11 -0800286static void perform_request(std::unique_ptr<Crash> crash) {
Josh Gaocbe70cb2016-10-18 18:17:52 -0700287 unique_fd output_fd;
Christopher Ferrisb92b52c2023-10-16 19:14:28 -0700288 if (intercept_manager->FindIntercept(crash->crash_pid, crash->crash_type, &output_fd)) {
Josh Gao76e1e302021-01-26 15:53:11 -0800289 if (crash->crash_type == kDebuggerdTombstoneProto) {
290 crash->output.proto = CrashArtifact::devnull();
291 }
292 } else {
293 if (auto o = CrashQueue::for_crash(crash.get())->get_output(crash->crash_type); o) {
294 crash->output = std::move(*o);
295 output_fd.reset(dup(crash->output.text.fd));
Josh Gao2b22ae12018-09-12 14:51:03 -0700296 } else {
Josh Gao76e1e302021-01-26 15:53:11 -0800297 LOG(ERROR) << "failed to get crash output for type " << crash->crash_type;
298 return;
Josh Gao2b22ae12018-09-12 14:51:03 -0700299 }
Josh Gaocbe70cb2016-10-18 18:17:52 -0700300 }
301
Josh Gao76e1e302021-01-26 15:53:11 -0800302 TombstonedCrashPacket response = {.packet_type = CrashPacketType::kPerformDump};
303
304 ssize_t rc = -1;
305 if (crash->output.proto) {
306 rc = SendFileDescriptors(crash->crash_socket_fd, &response, sizeof(response), output_fd.get(),
307 crash->output.proto->fd.get());
308 } else {
309 rc = SendFileDescriptors(crash->crash_socket_fd, &response, sizeof(response), output_fd.get());
310 }
311
Josh Gao5f87bbd2019-01-09 17:01:49 -0800312 output_fd.reset();
313
Josh Gaocbe70cb2016-10-18 18:17:52 -0700314 if (rc == -1) {
315 PLOG(WARNING) << "failed to send response to CrashRequest";
Josh Gao76e1e302021-01-26 15:53:11 -0800316 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700317 } else if (rc != sizeof(response)) {
318 PLOG(WARNING) << "crash socket write returned short";
Josh Gao76e1e302021-01-26 15:53:11 -0800319 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700320 }
Josh Gao13078242017-03-30 14:42:46 -0700321
Josh Gao76e1e302021-01-26 15:53:11 -0800322 // TODO: Make this configurable by the interceptor?
Peter Collingbournefb5eac92021-03-11 12:51:25 -0800323 struct timeval timeout = {10 * android::base::HwTimeoutMultiplier(), 0};
Josh Gao9a61f682020-12-01 21:04:09 -0800324
Josh Gao76e1e302021-01-26 15:53:11 -0800325 event_base* base = event_get_base(crash->crash_event);
326
327 event_assign(crash->crash_event, base, crash->crash_socket_fd, EV_TIMEOUT | EV_READ,
328 crash_completed_cb, crash.get());
329 event_add(crash->crash_event, &timeout);
330 CrashQueue::for_crash(crash)->on_crash_started();
331
332 // The crash is now owned by the event loop.
333 crash.release();
Josh Gaocbe70cb2016-10-18 18:17:52 -0700334}
335
336static void crash_accept_cb(evconnlistener* listener, evutil_socket_t sockfd, sockaddr*, int,
Narayan Kamatha73df602017-05-24 15:07:25 +0100337 void*) {
Josh Gaocbe70cb2016-10-18 18:17:52 -0700338 event_base* base = evconnlistener_get_base(listener);
339 Crash* crash = new Crash();
340
Narayan Kamatha73df602017-05-24 15:07:25 +0100341 // TODO: Make sure that only java crashes come in on the java socket
342 // and only native crashes on the native socket.
Peter Collingbournefb5eac92021-03-11 12:51:25 -0800343 struct timeval timeout = {1 * android::base::HwTimeoutMultiplier(), 0};
Josh Gaocbe70cb2016-10-18 18:17:52 -0700344 event* crash_event = event_new(base, sockfd, EV_TIMEOUT | EV_READ, crash_request_cb, crash);
Josh Gao48383c82018-04-18 18:11:01 -0700345 crash->crash_socket_fd.reset(sockfd);
Josh Gaocbe70cb2016-10-18 18:17:52 -0700346 crash->crash_event = crash_event;
347 event_add(crash_event, &timeout);
348}
349
350static void crash_request_cb(evutil_socket_t sockfd, short ev, void* arg) {
Josh Gao76e1e302021-01-26 15:53:11 -0800351 std::unique_ptr<Crash> crash(static_cast<Crash*>(arg));
Josh Gaocbe70cb2016-10-18 18:17:52 -0700352 TombstonedCrashPacket request = {};
353
354 if ((ev & EV_TIMEOUT) != 0) {
355 LOG(WARNING) << "crash request timed out";
Josh Gao76e1e302021-01-26 15:53:11 -0800356 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700357 } else if ((ev & EV_READ) == 0) {
358 LOG(WARNING) << "tombstoned received unexpected event from crash socket";
Josh Gao76e1e302021-01-26 15:53:11 -0800359 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700360 }
361
Josh Gao76e1e302021-01-26 15:53:11 -0800362 ssize_t rc = TEMP_FAILURE_RETRY(read(sockfd, &request, sizeof(request)));
Josh Gaocbe70cb2016-10-18 18:17:52 -0700363 if (rc == -1) {
364 PLOG(WARNING) << "failed to read from crash socket";
Josh Gao76e1e302021-01-26 15:53:11 -0800365 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700366 } else if (rc != sizeof(request)) {
367 LOG(WARNING) << "crash socket received short read of length " << rc << " (expected "
368 << sizeof(request) << ")";
Josh Gao76e1e302021-01-26 15:53:11 -0800369 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700370 }
371
372 if (request.packet_type != CrashPacketType::kDumpRequest) {
373 LOG(WARNING) << "unexpected crash packet type, expected kDumpRequest, received "
374 << StringPrintf("%#2hhX", request.packet_type);
Josh Gao76e1e302021-01-26 15:53:11 -0800375 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700376 }
377
Narayan Kamatha73df602017-05-24 15:07:25 +0100378 crash->crash_type = request.packet.dump_request.dump_type;
Josh Gao76e1e302021-01-26 15:53:11 -0800379 if (crash->crash_type < 0 || crash->crash_type > kDebuggerdTombstoneProto) {
Narayan Kamatha73df602017-05-24 15:07:25 +0100380 LOG(WARNING) << "unexpected crash dump type: " << crash->crash_type;
Josh Gao76e1e302021-01-26 15:53:11 -0800381 return;
Narayan Kamatha73df602017-05-24 15:07:25 +0100382 }
383
384 if (crash->crash_type != kDebuggerdJavaBacktrace) {
Narayan Kamath922f6b22017-05-15 15:59:30 +0100385 crash->crash_pid = request.packet.dump_request.pid;
386 } else {
387 // Requests for java traces are sent from untrusted processes, so we
388 // must not trust the PID sent down with the request. Instead, we ask the
389 // kernel.
390 ucred cr = {};
391 socklen_t len = sizeof(cr);
392 int ret = getsockopt(sockfd, SOL_SOCKET, SO_PEERCRED, &cr, &len);
393 if (ret != 0) {
394 PLOG(ERROR) << "Failed to getsockopt(..SO_PEERCRED)";
Josh Gao76e1e302021-01-26 15:53:11 -0800395 return;
Narayan Kamath922f6b22017-05-15 15:59:30 +0100396 }
397
398 crash->crash_pid = cr.pid;
399 }
400
Josh Gao76e1e302021-01-26 15:53:11 -0800401 pid_t crash_pid = crash->crash_pid;
402 LOG(INFO) << "received crash request for pid " << crash_pid;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700403
Josh Gao76e1e302021-01-26 15:53:11 -0800404 if (CrashQueue::for_crash(crash)->maybe_enqueue_crash(std::move(crash))) {
405 LOG(INFO) << "enqueueing crash request for pid " << crash_pid;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700406 } else {
Josh Gao76e1e302021-01-26 15:53:11 -0800407 perform_request(std::move(crash));
Josh Gaocbe70cb2016-10-18 18:17:52 -0700408 }
Josh Gaocbe70cb2016-10-18 18:17:52 -0700409}
410
Peter Collingbourne1e1d9202021-02-05 16:38:35 -0800411static bool rename_tombstone_fd(borrowed_fd fd, borrowed_fd dirfd, const std::string& path) {
412 // Always try to unlink the tombstone file.
413 // linkat doesn't let us replace a file, so we need to unlink before linking
414 // our results onto disk, and if we fail for some reason, we should delete
415 // stale tombstones to avoid confusing inconsistency.
416 int rc = unlinkat(dirfd.get(), path.c_str(), 0);
417 if (rc != 0 && errno != ENOENT) {
418 PLOG(ERROR) << "failed to unlink tombstone at " << path;
419 return false;
420 }
Josh Gaocbe70cb2016-10-18 18:17:52 -0700421
Florian Mayer23e92892024-02-23 22:16:01 +0000422 // This fd is created inside of dirfd in CrashQueue::create_temporary_file.
Peter Collingbourne1e1d9202021-02-05 16:38:35 -0800423 std::string fd_path = StringPrintf("/proc/self/fd/%d", fd.get());
424 rc = linkat(AT_FDCWD, fd_path.c_str(), dirfd.get(), path.c_str(), AT_SYMLINK_FOLLOW);
Josh Gao76e1e302021-01-26 15:53:11 -0800425 if (rc != 0) {
Peter Collingbourne1e1d9202021-02-05 16:38:35 -0800426 PLOG(ERROR) << "failed to link tombstone at " << path;
Josh Gao76e1e302021-01-26 15:53:11 -0800427 return false;
Jerome Gaillarde156ede2021-01-26 12:36:12 +0000428 }
Josh Gao76e1e302021-01-26 15:53:11 -0800429 return true;
430}
Jerome Gaillarde156ede2021-01-26 12:36:12 +0000431
Josh Gao76e1e302021-01-26 15:53:11 -0800432static void crash_completed(borrowed_fd sockfd, std::unique_ptr<Crash> crash) {
433 TombstonedCrashPacket request = {};
434 CrashQueue* queue = CrashQueue::for_crash(crash);
435
436 ssize_t rc = TEMP_FAILURE_RETRY(read(sockfd.get(), &request, sizeof(request)));
Josh Gaocbe70cb2016-10-18 18:17:52 -0700437 if (rc == -1) {
438 PLOG(WARNING) << "failed to read from crash socket";
Josh Gao76e1e302021-01-26 15:53:11 -0800439 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700440 } else if (rc != sizeof(request)) {
441 LOG(WARNING) << "crash socket received short read of length " << rc << " (expected "
442 << sizeof(request) << ")";
Josh Gao76e1e302021-01-26 15:53:11 -0800443 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700444 }
445
446 if (request.packet_type != CrashPacketType::kCompletedDump) {
447 LOG(WARNING) << "unexpected crash packet type, expected kCompletedDump, received "
448 << uint32_t(request.packet_type);
Josh Gao76e1e302021-01-26 15:53:11 -0800449 return;
Josh Gaocbe70cb2016-10-18 18:17:52 -0700450 }
451
Christopher Ferrisab9f0cd2021-09-01 13:36:03 -0700452 if (crash->output.text.fd == -1) {
453 LOG(WARNING) << "skipping tombstone file creation due to intercept";
Josh Gao76e1e302021-01-26 15:53:11 -0800454 return;
455 }
Josh Gaof5974ae2018-05-03 16:05:32 -0700456
Josh Gao76e1e302021-01-26 15:53:11 -0800457 CrashArtifactPaths paths = queue->get_next_artifact_paths();
Josh Gao48383c82018-04-18 18:11:01 -0700458
Peter Collingbourne7285c0d2024-03-21 16:56:07 -0700459 if (crash->output.proto && crash->output.proto->fd != -1) {
460 if (!paths.proto) {
461 LOG(ERROR) << "missing path for proto tombstone";
462 } else {
463 rename_tombstone_fd(crash->output.proto->fd, queue->dir_fd(), *paths.proto);
464 }
465 }
466
Peter Collingbourne1e1d9202021-02-05 16:38:35 -0800467 if (rename_tombstone_fd(crash->output.text.fd, queue->dir_fd(), paths.text)) {
468 if (crash->crash_type == kDebuggerdJavaBacktrace) {
469 LOG(ERROR) << "Traces for pid " << crash->crash_pid << " written to: " << paths.text;
Narayan Kamath79dd1432017-06-21 19:42:00 +0100470 } else {
Peter Collingbourne1e1d9202021-02-05 16:38:35 -0800471 // NOTE: Several tools parse this log message to figure out where the
472 // tombstone associated with a given native crash was written. Any changes
473 // to this message must be carefully considered.
474 LOG(ERROR) << "Tombstone written to: " << paths.text;
Josh Gaof5974ae2018-05-03 16:05:32 -0700475 }
Josh Gaocb68a032017-06-02 13:02:10 -0700476 }
Josh Gao76e1e302021-01-26 15:53:11 -0800477}
478
479static void crash_completed_cb(evutil_socket_t sockfd, short ev, void* arg) {
480 std::unique_ptr<Crash> crash(static_cast<Crash*>(arg));
Elliott Hughes35bb6d22017-06-26 13:54:05 -0700481 CrashQueue* queue = CrashQueue::for_crash(crash);
Josh Gao76e1e302021-01-26 15:53:11 -0800482
483 queue->on_crash_completed();
484
485 if ((ev & EV_READ) == EV_READ) {
486 crash_completed(sockfd, std::move(crash));
487 }
Josh Gaocbe70cb2016-10-18 18:17:52 -0700488
489 // If there's something queued up, let them proceed.
Narayan Kamatha73df602017-05-24 15:07:25 +0100490 queue->maybe_dequeue_crashes(perform_request);
Josh Gaocbe70cb2016-10-18 18:17:52 -0700491}
492
493int main(int, char* []) {
Josh Gao88846a22021-02-01 16:48:25 -0800494 umask(0117);
Josh Gao8830c952017-03-06 12:23:55 -0800495
Josh Gao55f79a52017-03-06 12:24:07 -0800496 // Don't try to connect to ourselves if we crash.
497 struct sigaction action = {};
498 action.sa_handler = [](int signal) {
499 LOG(ERROR) << "received fatal signal " << signal;
500 _exit(1);
501 };
502 debuggerd_register_handlers(&action);
503
Josh Gaocbe70cb2016-10-18 18:17:52 -0700504 int intercept_socket = android_get_control_socket(kTombstonedInterceptSocketName);
505 int crash_socket = android_get_control_socket(kTombstonedCrashSocketName);
506
507 if (intercept_socket == -1 || crash_socket == -1) {
508 PLOG(FATAL) << "failed to get socket from init";
509 }
510
511 evutil_make_socket_nonblocking(intercept_socket);
512 evutil_make_socket_nonblocking(crash_socket);
513
514 event_base* base = event_base_new();
515 if (!base) {
516 LOG(FATAL) << "failed to create event_base";
517 }
518
519 intercept_manager = new InterceptManager(base, intercept_socket);
520
Narayan Kamathc2e98f62017-09-13 13:12:34 +0100521 evconnlistener* tombstone_listener =
522 evconnlistener_new(base, crash_accept_cb, CrashQueue::for_tombstones(), LEV_OPT_CLOSE_ON_FREE,
523 -1 /* backlog */, crash_socket);
Narayan Kamath922f6b22017-05-15 15:59:30 +0100524 if (!tombstone_listener) {
525 LOG(FATAL) << "failed to create evconnlistener for tombstones.";
526 }
527
528 if (kJavaTraceDumpsEnabled) {
529 const int java_trace_socket = android_get_control_socket(kTombstonedJavaTraceSocketName);
530 if (java_trace_socket == -1) {
531 PLOG(FATAL) << "failed to get socket from init";
532 }
533
534 evutil_make_socket_nonblocking(java_trace_socket);
Narayan Kamathc2e98f62017-09-13 13:12:34 +0100535 evconnlistener* java_trace_listener =
536 evconnlistener_new(base, crash_accept_cb, CrashQueue::for_anrs(), LEV_OPT_CLOSE_ON_FREE,
537 -1 /* backlog */, java_trace_socket);
Narayan Kamath922f6b22017-05-15 15:59:30 +0100538 if (!java_trace_listener) {
539 LOG(FATAL) << "failed to create evconnlistener for java traces.";
540 }
Josh Gaocbe70cb2016-10-18 18:17:52 -0700541 }
542
543 LOG(INFO) << "tombstoned successfully initialized";
544 event_base_dispatch(base);
545}