blob: f4bf37f666416d73ce02278454a703f62e672e85 [file] [log] [blame]
Tom Cherry1a796bc2020-05-13 09:28:37 -07001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "SerializedLogBuffer.h"
18
Tom Cherry1fdbdbe2020-06-22 08:28:45 -070019#include <sys/prctl.h>
20
Tom Cherry1a796bc2020-05-13 09:28:37 -070021#include <limits>
Tom Cherry1a796bc2020-05-13 09:28:37 -070022
23#include <android-base/logging.h>
24#include <android-base/scopeguard.h>
25
Tom Cherry39dc2212020-08-05 12:14:45 -070026#include "LogSize.h"
Tom Cherry1a796bc2020-05-13 09:28:37 -070027#include "LogStatistics.h"
28#include "SerializedFlushToState.h"
29
30SerializedLogBuffer::SerializedLogBuffer(LogReaderList* reader_list, LogTags* tags,
31 LogStatistics* stats)
32 : reader_list_(reader_list), tags_(tags), stats_(stats) {
33 Init();
34}
35
Tom Cherry1a796bc2020-05-13 09:28:37 -070036void SerializedLogBuffer::Init() {
37 log_id_for_each(i) {
Tom Cherry39dc2212020-08-05 12:14:45 -070038 if (!SetSize(i, GetBufferSizeFromProperties(i))) {
39 SetSize(i, kLogBufferMinSize);
Tom Cherry1a796bc2020-05-13 09:28:37 -070040 }
41 }
42
43 // Release any sleeping reader threads to dump their current content.
44 auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
45 for (const auto& reader_thread : reader_list_->reader_threads()) {
46 reader_thread->triggerReader_Locked();
47 }
48}
49
50bool SerializedLogBuffer::ShouldLog(log_id_t log_id, const char* msg, uint16_t len) {
51 if (log_id == LOG_ID_SECURITY) {
52 return true;
53 }
54
55 int prio = ANDROID_LOG_INFO;
56 const char* tag = nullptr;
57 size_t tag_len = 0;
58 if (IsBinary(log_id)) {
59 int32_t tag_int = MsgToTag(msg, len);
60 tag = tags_->tagToName(tag_int);
61 if (tag) {
62 tag_len = strlen(tag);
63 }
64 } else {
65 prio = *msg;
66 tag = msg + 1;
67 tag_len = strnlen(tag, len - 1);
68 }
69 return __android_log_is_loggable_len(prio, tag, tag_len, ANDROID_LOG_VERBOSE);
70}
71
72int SerializedLogBuffer::Log(log_id_t log_id, log_time realtime, uid_t uid, pid_t pid, pid_t tid,
73 const char* msg, uint16_t len) {
74 if (log_id >= LOG_ID_MAX || len == 0) {
75 return -EINVAL;
76 }
77
78 if (!ShouldLog(log_id, msg, len)) {
79 stats_->AddTotal(log_id, len);
80 return -EACCES;
81 }
82
83 auto sequence = sequence_.fetch_add(1, std::memory_order_relaxed);
84
85 auto lock = std::lock_guard{lock_};
86
87 if (logs_[log_id].empty()) {
88 logs_[log_id].push_back(SerializedLogChunk(max_size_[log_id] / 4));
89 }
90
91 auto total_len = sizeof(SerializedLogEntry) + len;
92 if (!logs_[log_id].back().CanLog(total_len)) {
93 logs_[log_id].back().FinishWriting();
94 logs_[log_id].push_back(SerializedLogChunk(max_size_[log_id] / 4));
95 }
96
97 auto entry = logs_[log_id].back().Log(sequence, realtime, uid, pid, tid, msg, len);
98 stats_->Add(entry->ToLogStatisticsElement(log_id));
99
100 MaybePrune(log_id);
101
102 reader_list_->NotifyNewLog(1 << log_id);
103 return len;
104}
105
106void SerializedLogBuffer::MaybePrune(log_id_t log_id) {
Tom Cherryf74503d2020-06-19 12:21:21 -0700107 size_t total_size = GetSizeUsed(log_id);
108 size_t after_size = total_size;
Tom Cherry1a796bc2020-05-13 09:28:37 -0700109 if (total_size > max_size_[log_id]) {
110 Prune(log_id, total_size - max_size_[log_id], 0);
Tom Cherryf74503d2020-06-19 12:21:21 -0700111 after_size = GetSizeUsed(log_id);
Tom Cherry1a796bc2020-05-13 09:28:37 -0700112 LOG(INFO) << "Pruned Logs from log_id: " << log_id << ", previous size: " << total_size
Tom Cherryf74503d2020-06-19 12:21:21 -0700113 << " after size: " << after_size;
Tom Cherry1a796bc2020-05-13 09:28:37 -0700114 }
Tom Cherryf74503d2020-06-19 12:21:21 -0700115
116 stats_->set_overhead(log_id, after_size);
Tom Cherry1a796bc2020-05-13 09:28:37 -0700117}
118
Tom Cherry4f8125a2020-07-09 12:12:48 -0700119void SerializedLogBuffer::RemoveChunkFromStats(log_id_t log_id, SerializedLogChunk& chunk) {
120 chunk.IncReaderRefCount();
121 int read_offset = 0;
122 while (read_offset < chunk.write_offset()) {
123 auto* entry = chunk.log_entry(read_offset);
124 stats_->Subtract(entry->ToLogStatisticsElement(log_id));
125 read_offset += entry->total_len();
Tom Cherry1fdbdbe2020-06-22 08:28:45 -0700126 }
Tom Cherry59caa7a2020-07-16 20:46:14 -0700127 chunk.DecReaderRefCount();
Tom Cherry1a796bc2020-05-13 09:28:37 -0700128}
129
130void SerializedLogBuffer::NotifyReadersOfPrune(
131 log_id_t log_id, const std::list<SerializedLogChunk>::iterator& chunk) {
132 for (const auto& reader_thread : reader_list_->reader_threads()) {
133 auto& state = reinterpret_cast<SerializedFlushToState&>(reader_thread->flush_to_state());
134 state.Prune(log_id, chunk);
135 }
136}
137
138bool SerializedLogBuffer::Prune(log_id_t log_id, size_t bytes_to_free, uid_t uid) {
139 // Don't prune logs that are newer than the point at which any reader threads are reading from.
140 LogReaderThread* oldest = nullptr;
141 auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
142 for (const auto& reader_thread : reader_list_->reader_threads()) {
143 if (!reader_thread->IsWatching(log_id)) {
144 continue;
145 }
Tom Cherryd0e84512020-08-12 09:44:52 -0700146 if (reader_thread->deadline().time_since_epoch().count() != 0) {
147 // Always wake up wrapped readers when pruning. 'Wrapped' readers are an optimization
148 // that allows the reader to wait until logs starting at a specified time stamp are
149 // about to be pruned. This is error-prone however, since if that timestamp is about to
150 // be pruned, the reader is not likely to read the messages fast enough to not back-up
151 // logd. Instead, we can achieve an nearly-as-efficient but not error-prune batching
152 // effect by waking the reader whenever any chunk is about to be pruned.
153 reader_thread->triggerReader_Locked();
154 }
155 if (!oldest || oldest->start() > reader_thread->start()) {
Tom Cherry1a796bc2020-05-13 09:28:37 -0700156 oldest = reader_thread.get();
157 }
158 }
159
160 auto& log_buffer = logs_[log_id];
Tom Cherry1a796bc2020-05-13 09:28:37 -0700161 auto it = log_buffer.begin();
162 while (it != log_buffer.end()) {
163 if (oldest != nullptr && it->highest_sequence_number() >= oldest->start()) {
164 break;
165 }
166
Tom Cherry4f8125a2020-07-09 12:12:48 -0700167 // Increment ahead of time since we're going to erase this iterator from the list.
Tom Cherry1a796bc2020-05-13 09:28:37 -0700168 auto it_to_prune = it++;
169
170 // The sequence number check ensures that all readers have read all logs in this chunk, but
171 // they may still hold a reference to the chunk to track their last read log_position.
172 // Notify them to delete the reference.
173 NotifyReadersOfPrune(log_id, it_to_prune);
174
175 if (uid != 0) {
176 // Reorder the log buffer to remove logs from the given UID. If there are no logs left
177 // in the buffer after the removal, delete it.
178 if (it_to_prune->ClearUidLogs(uid, log_id, stats_)) {
179 log_buffer.erase(it_to_prune);
180 }
181 } else {
182 size_t buffer_size = it_to_prune->PruneSize();
Tom Cherry4f8125a2020-07-09 12:12:48 -0700183 RemoveChunkFromStats(log_id, *it_to_prune);
184 log_buffer.erase(it_to_prune);
Tom Cherry1a796bc2020-05-13 09:28:37 -0700185 if (buffer_size >= bytes_to_free) {
186 return true;
187 }
188 bytes_to_free -= buffer_size;
189 }
190 }
191
192 // If we've deleted all buffers without bytes_to_free hitting 0, then we're called by Clear()
193 // and should return true.
194 if (it == log_buffer.end()) {
195 return true;
196 }
197
198 // Otherwise we are stuck due to a reader, so mitigate it.
Tom Cherry9b4246d2020-06-17 11:40:55 -0700199 CHECK(oldest != nullptr);
Tom Cherry1a796bc2020-05-13 09:28:37 -0700200 KickReader(oldest, log_id, bytes_to_free);
201 return false;
202}
203
204// If the selected reader is blocking our pruning progress, decide on
205// what kind of mitigation is necessary to unblock the situation.
206void SerializedLogBuffer::KickReader(LogReaderThread* reader, log_id_t id, size_t bytes_to_free) {
207 if (bytes_to_free >= max_size_[id]) { // +100%
208 // A misbehaving or slow reader is dropped if we hit too much memory pressure.
209 LOG(WARNING) << "Kicking blocked reader, " << reader->name()
210 << ", from LogBuffer::kickMe()";
211 reader->release_Locked();
Tom Cherry1a796bc2020-05-13 09:28:37 -0700212 } else {
213 // Tell slow reader to skip entries to catch up.
214 unsigned long prune_rows = bytes_to_free / 300;
215 LOG(WARNING) << "Skipping " << prune_rows << " entries from slow reader, " << reader->name()
216 << ", from LogBuffer::kickMe()";
217 reader->triggerSkip_Locked(id, prune_rows);
218 }
219}
220
221std::unique_ptr<FlushToState> SerializedLogBuffer::CreateFlushToState(uint64_t start,
222 LogMask log_mask) {
223 return std::make_unique<SerializedFlushToState>(start, log_mask);
224}
225
226bool SerializedLogBuffer::FlushTo(
227 LogWriter* writer, FlushToState& abstract_state,
228 const std::function<FilterResult(log_id_t log_id, pid_t pid, uint64_t sequence,
229 log_time realtime)>& filter) {
230 auto lock = std::unique_lock{lock_};
231
232 auto& state = reinterpret_cast<SerializedFlushToState&>(abstract_state);
233 state.InitializeLogs(logs_);
Tom Cherry1a796bc2020-05-13 09:28:37 -0700234
235 while (state.HasUnreadLogs()) {
236 MinHeapElement top = state.PopNextUnreadLog();
237 auto* entry = top.entry;
238 auto log_id = top.log_id;
239
240 if (entry->sequence() < state.start()) {
241 continue;
242 }
243 state.set_start(entry->sequence());
244
245 if (!writer->privileged() && entry->uid() != writer->uid()) {
246 continue;
247 }
248
249 if (filter) {
250 auto ret = filter(log_id, entry->pid(), entry->sequence(), entry->realtime());
251 if (ret == FilterResult::kSkip) {
252 continue;
253 }
254 if (ret == FilterResult::kStop) {
255 break;
256 }
257 }
258
259 lock.unlock();
260 // We never prune logs equal to or newer than any LogReaderThreads' `start` value, so the
261 // `entry` pointer is safe here without the lock
262 if (!entry->Flush(writer, log_id)) {
263 return false;
264 }
265 lock.lock();
Tom Cherry1a796bc2020-05-13 09:28:37 -0700266 }
267
268 state.set_start(state.start() + 1);
269 return true;
270}
271
272bool SerializedLogBuffer::Clear(log_id_t id, uid_t uid) {
273 // Try three times to clear, then disconnect the readers and try one final time.
274 for (int retry = 0; retry < 3; ++retry) {
275 {
276 auto lock = std::lock_guard{lock_};
277 bool prune_success = Prune(id, ULONG_MAX, uid);
278 if (prune_success) {
279 return true;
280 }
281 }
282 sleep(1);
283 }
284 // Check if it is still busy after the sleep, we try to prune one entry, not another clear run,
285 // so we are looking for the quick side effect of the return value to tell us if we have a
286 // _blocked_ reader.
287 bool busy = false;
288 {
289 auto lock = std::lock_guard{lock_};
290 busy = !Prune(id, 1, uid);
291 }
292 // It is still busy, disconnect all readers.
293 if (busy) {
294 auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
295 for (const auto& reader_thread : reader_list_->reader_threads()) {
296 if (reader_thread->IsWatching(id)) {
297 LOG(WARNING) << "Kicking blocked reader, " << reader_thread->name()
298 << ", from LogBuffer::clear()";
299 reader_thread->release_Locked();
300 }
301 }
302 }
303 auto lock = std::lock_guard{lock_};
304 return Prune(id, ULONG_MAX, uid);
305}
306
Tom Cherry39dc2212020-08-05 12:14:45 -0700307size_t SerializedLogBuffer::GetSizeUsed(log_id_t id) {
Tom Cherryf74503d2020-06-19 12:21:21 -0700308 size_t total_size = 0;
309 for (const auto& chunk : logs_[id]) {
310 total_size += chunk.PruneSize();
311 }
312 return total_size;
313}
314
Tom Cherry39dc2212020-08-05 12:14:45 -0700315size_t SerializedLogBuffer::GetSize(log_id_t id) {
Tom Cherry1a796bc2020-05-13 09:28:37 -0700316 auto lock = std::lock_guard{lock_};
Tom Cherryf74503d2020-06-19 12:21:21 -0700317 return max_size_[id];
Tom Cherry1a796bc2020-05-13 09:28:37 -0700318}
319
320// New SerializedLogChunk objects will be allocated according to the new size, but older one are
321// unchanged. MaybePrune() is called on the log buffer to reduce it to an appropriate size if the
322// new size is lower.
Tom Cherry39dc2212020-08-05 12:14:45 -0700323bool SerializedLogBuffer::SetSize(log_id_t id, size_t size) {
Tom Cherry1a796bc2020-05-13 09:28:37 -0700324 // Reasonable limits ...
Tom Cherry39dc2212020-08-05 12:14:45 -0700325 if (!IsValidBufferSize(size)) {
326 return false;
Tom Cherry1a796bc2020-05-13 09:28:37 -0700327 }
328
329 auto lock = std::lock_guard{lock_};
Tom Cherryf74503d2020-06-19 12:21:21 -0700330 max_size_[id] = size;
Tom Cherry1a796bc2020-05-13 09:28:37 -0700331
332 MaybePrune(id);
333
Tom Cherry39dc2212020-08-05 12:14:45 -0700334 return true;
Tom Cherry1a796bc2020-05-13 09:28:37 -0700335}