Tom Cherry | 1a796bc | 2020-05-13 09:28:37 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2020 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "SerializedLogBuffer.h" |
| 18 | |
| 19 | #include <limits> |
| 20 | #include <thread> |
| 21 | |
| 22 | #include <android-base/logging.h> |
| 23 | #include <android-base/scopeguard.h> |
| 24 | |
| 25 | #include "LogStatistics.h" |
| 26 | #include "SerializedFlushToState.h" |
| 27 | |
| 28 | SerializedLogBuffer::SerializedLogBuffer(LogReaderList* reader_list, LogTags* tags, |
| 29 | LogStatistics* stats) |
| 30 | : reader_list_(reader_list), tags_(tags), stats_(stats) { |
| 31 | Init(); |
| 32 | } |
| 33 | |
| 34 | SerializedLogBuffer::~SerializedLogBuffer() {} |
| 35 | |
| 36 | void SerializedLogBuffer::Init() { |
| 37 | log_id_for_each(i) { |
| 38 | if (SetSize(i, __android_logger_get_buffer_size(i))) { |
| 39 | SetSize(i, LOG_BUFFER_MIN_SIZE); |
| 40 | } |
| 41 | } |
| 42 | |
| 43 | // Release any sleeping reader threads to dump their current content. |
| 44 | auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()}; |
| 45 | for (const auto& reader_thread : reader_list_->reader_threads()) { |
| 46 | reader_thread->triggerReader_Locked(); |
| 47 | } |
| 48 | } |
| 49 | |
| 50 | bool SerializedLogBuffer::ShouldLog(log_id_t log_id, const char* msg, uint16_t len) { |
| 51 | if (log_id == LOG_ID_SECURITY) { |
| 52 | return true; |
| 53 | } |
| 54 | |
| 55 | int prio = ANDROID_LOG_INFO; |
| 56 | const char* tag = nullptr; |
| 57 | size_t tag_len = 0; |
| 58 | if (IsBinary(log_id)) { |
| 59 | int32_t tag_int = MsgToTag(msg, len); |
| 60 | tag = tags_->tagToName(tag_int); |
| 61 | if (tag) { |
| 62 | tag_len = strlen(tag); |
| 63 | } |
| 64 | } else { |
| 65 | prio = *msg; |
| 66 | tag = msg + 1; |
| 67 | tag_len = strnlen(tag, len - 1); |
| 68 | } |
| 69 | return __android_log_is_loggable_len(prio, tag, tag_len, ANDROID_LOG_VERBOSE); |
| 70 | } |
| 71 | |
| 72 | int SerializedLogBuffer::Log(log_id_t log_id, log_time realtime, uid_t uid, pid_t pid, pid_t tid, |
| 73 | const char* msg, uint16_t len) { |
| 74 | if (log_id >= LOG_ID_MAX || len == 0) { |
| 75 | return -EINVAL; |
| 76 | } |
| 77 | |
| 78 | if (!ShouldLog(log_id, msg, len)) { |
| 79 | stats_->AddTotal(log_id, len); |
| 80 | return -EACCES; |
| 81 | } |
| 82 | |
| 83 | auto sequence = sequence_.fetch_add(1, std::memory_order_relaxed); |
| 84 | |
| 85 | auto lock = std::lock_guard{lock_}; |
| 86 | |
| 87 | if (logs_[log_id].empty()) { |
| 88 | logs_[log_id].push_back(SerializedLogChunk(max_size_[log_id] / 4)); |
| 89 | } |
| 90 | |
| 91 | auto total_len = sizeof(SerializedLogEntry) + len; |
| 92 | if (!logs_[log_id].back().CanLog(total_len)) { |
| 93 | logs_[log_id].back().FinishWriting(); |
| 94 | logs_[log_id].push_back(SerializedLogChunk(max_size_[log_id] / 4)); |
| 95 | } |
| 96 | |
| 97 | auto entry = logs_[log_id].back().Log(sequence, realtime, uid, pid, tid, msg, len); |
| 98 | stats_->Add(entry->ToLogStatisticsElement(log_id)); |
| 99 | |
| 100 | MaybePrune(log_id); |
| 101 | |
| 102 | reader_list_->NotifyNewLog(1 << log_id); |
| 103 | return len; |
| 104 | } |
| 105 | |
| 106 | void SerializedLogBuffer::MaybePrune(log_id_t log_id) { |
| 107 | auto get_total_size = [](const auto& buffer) { |
| 108 | size_t total_size = 0; |
| 109 | for (const auto& chunk : buffer) { |
| 110 | total_size += chunk.PruneSize(); |
| 111 | } |
| 112 | return total_size; |
| 113 | }; |
| 114 | size_t total_size = get_total_size(logs_[log_id]); |
| 115 | if (total_size > max_size_[log_id]) { |
| 116 | Prune(log_id, total_size - max_size_[log_id], 0); |
| 117 | LOG(INFO) << "Pruned Logs from log_id: " << log_id << ", previous size: " << total_size |
| 118 | << " after size: " << get_total_size(logs_[log_id]); |
| 119 | } |
| 120 | } |
| 121 | |
| 122 | // Decompresses the chunks, call LogStatistics::Subtract() on each entry, then delete the chunks and |
| 123 | // the list. Note that the SerializedLogChunk objects have been removed from logs_ and their |
| 124 | // references have been deleted from any SerializedFlushToState objects, so this can be safely done |
| 125 | // without holding lock_. It is done in a separate thread to avoid delaying the writer thread. The |
| 126 | // lambda for the thread takes ownership of the 'chunks' list and thus when the thread ends and the |
| 127 | // lambda is deleted, the objects are deleted. |
| 128 | void SerializedLogBuffer::DeleteLogChunks(std::list<SerializedLogChunk>&& chunks, log_id_t log_id) { |
| 129 | auto delete_thread = std::thread{[chunks = std::move(chunks), log_id, this]() mutable { |
| 130 | for (auto& chunk : chunks) { |
| 131 | chunk.IncReaderRefCount(); |
| 132 | int read_offset = 0; |
| 133 | while (read_offset < chunk.write_offset()) { |
| 134 | auto* entry = chunk.log_entry(read_offset); |
| 135 | stats_->Subtract(entry->ToLogStatisticsElement(log_id)); |
| 136 | read_offset += entry->total_len(); |
| 137 | } |
| 138 | chunk.DecReaderRefCount(false); |
| 139 | } |
| 140 | }}; |
| 141 | delete_thread.detach(); |
| 142 | } |
| 143 | |
| 144 | void SerializedLogBuffer::NotifyReadersOfPrune( |
| 145 | log_id_t log_id, const std::list<SerializedLogChunk>::iterator& chunk) { |
| 146 | for (const auto& reader_thread : reader_list_->reader_threads()) { |
| 147 | auto& state = reinterpret_cast<SerializedFlushToState&>(reader_thread->flush_to_state()); |
| 148 | state.Prune(log_id, chunk); |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | bool SerializedLogBuffer::Prune(log_id_t log_id, size_t bytes_to_free, uid_t uid) { |
| 153 | // Don't prune logs that are newer than the point at which any reader threads are reading from. |
| 154 | LogReaderThread* oldest = nullptr; |
| 155 | auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()}; |
| 156 | for (const auto& reader_thread : reader_list_->reader_threads()) { |
| 157 | if (!reader_thread->IsWatching(log_id)) { |
| 158 | continue; |
| 159 | } |
| 160 | if (!oldest || oldest->start() > reader_thread->start() || |
| 161 | (oldest->start() == reader_thread->start() && |
| 162 | reader_thread->deadline().time_since_epoch().count() != 0)) { |
| 163 | oldest = reader_thread.get(); |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | auto& log_buffer = logs_[log_id]; |
| 168 | |
| 169 | std::list<SerializedLogChunk> chunks_to_prune; |
| 170 | auto prune_chunks = android::base::make_scope_guard([&chunks_to_prune, log_id, this] { |
| 171 | DeleteLogChunks(std::move(chunks_to_prune), log_id); |
| 172 | }); |
| 173 | |
| 174 | auto it = log_buffer.begin(); |
| 175 | while (it != log_buffer.end()) { |
| 176 | if (oldest != nullptr && it->highest_sequence_number() >= oldest->start()) { |
| 177 | break; |
| 178 | } |
| 179 | |
| 180 | // Increment ahead of time since we're going to splice this iterator from the list. |
| 181 | auto it_to_prune = it++; |
| 182 | |
| 183 | // The sequence number check ensures that all readers have read all logs in this chunk, but |
| 184 | // they may still hold a reference to the chunk to track their last read log_position. |
| 185 | // Notify them to delete the reference. |
| 186 | NotifyReadersOfPrune(log_id, it_to_prune); |
| 187 | |
| 188 | if (uid != 0) { |
| 189 | // Reorder the log buffer to remove logs from the given UID. If there are no logs left |
| 190 | // in the buffer after the removal, delete it. |
| 191 | if (it_to_prune->ClearUidLogs(uid, log_id, stats_)) { |
| 192 | log_buffer.erase(it_to_prune); |
| 193 | } |
| 194 | } else { |
| 195 | size_t buffer_size = it_to_prune->PruneSize(); |
| 196 | chunks_to_prune.splice(chunks_to_prune.end(), log_buffer, it_to_prune); |
| 197 | if (buffer_size >= bytes_to_free) { |
| 198 | return true; |
| 199 | } |
| 200 | bytes_to_free -= buffer_size; |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | // If we've deleted all buffers without bytes_to_free hitting 0, then we're called by Clear() |
| 205 | // and should return true. |
| 206 | if (it == log_buffer.end()) { |
| 207 | return true; |
| 208 | } |
| 209 | |
| 210 | // Otherwise we are stuck due to a reader, so mitigate it. |
Tom Cherry | 9b4246d | 2020-06-17 11:40:55 -0700 | [diff] [blame] | 211 | CHECK(oldest != nullptr); |
Tom Cherry | 1a796bc | 2020-05-13 09:28:37 -0700 | [diff] [blame] | 212 | KickReader(oldest, log_id, bytes_to_free); |
| 213 | return false; |
| 214 | } |
| 215 | |
| 216 | // If the selected reader is blocking our pruning progress, decide on |
| 217 | // what kind of mitigation is necessary to unblock the situation. |
| 218 | void SerializedLogBuffer::KickReader(LogReaderThread* reader, log_id_t id, size_t bytes_to_free) { |
| 219 | if (bytes_to_free >= max_size_[id]) { // +100% |
| 220 | // A misbehaving or slow reader is dropped if we hit too much memory pressure. |
| 221 | LOG(WARNING) << "Kicking blocked reader, " << reader->name() |
| 222 | << ", from LogBuffer::kickMe()"; |
| 223 | reader->release_Locked(); |
| 224 | } else if (reader->deadline().time_since_epoch().count() != 0) { |
| 225 | // Allow a blocked WRAP deadline reader to trigger and start reporting the log data. |
| 226 | reader->triggerReader_Locked(); |
| 227 | } else { |
| 228 | // Tell slow reader to skip entries to catch up. |
| 229 | unsigned long prune_rows = bytes_to_free / 300; |
| 230 | LOG(WARNING) << "Skipping " << prune_rows << " entries from slow reader, " << reader->name() |
| 231 | << ", from LogBuffer::kickMe()"; |
| 232 | reader->triggerSkip_Locked(id, prune_rows); |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | std::unique_ptr<FlushToState> SerializedLogBuffer::CreateFlushToState(uint64_t start, |
| 237 | LogMask log_mask) { |
| 238 | return std::make_unique<SerializedFlushToState>(start, log_mask); |
| 239 | } |
| 240 | |
| 241 | bool SerializedLogBuffer::FlushTo( |
| 242 | LogWriter* writer, FlushToState& abstract_state, |
| 243 | const std::function<FilterResult(log_id_t log_id, pid_t pid, uint64_t sequence, |
| 244 | log_time realtime)>& filter) { |
| 245 | auto lock = std::unique_lock{lock_}; |
| 246 | |
| 247 | auto& state = reinterpret_cast<SerializedFlushToState&>(abstract_state); |
| 248 | state.InitializeLogs(logs_); |
| 249 | state.CheckForNewLogs(); |
| 250 | |
| 251 | while (state.HasUnreadLogs()) { |
| 252 | MinHeapElement top = state.PopNextUnreadLog(); |
| 253 | auto* entry = top.entry; |
| 254 | auto log_id = top.log_id; |
| 255 | |
| 256 | if (entry->sequence() < state.start()) { |
| 257 | continue; |
| 258 | } |
| 259 | state.set_start(entry->sequence()); |
| 260 | |
| 261 | if (!writer->privileged() && entry->uid() != writer->uid()) { |
| 262 | continue; |
| 263 | } |
| 264 | |
| 265 | if (filter) { |
| 266 | auto ret = filter(log_id, entry->pid(), entry->sequence(), entry->realtime()); |
| 267 | if (ret == FilterResult::kSkip) { |
| 268 | continue; |
| 269 | } |
| 270 | if (ret == FilterResult::kStop) { |
| 271 | break; |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | lock.unlock(); |
| 276 | // We never prune logs equal to or newer than any LogReaderThreads' `start` value, so the |
| 277 | // `entry` pointer is safe here without the lock |
| 278 | if (!entry->Flush(writer, log_id)) { |
| 279 | return false; |
| 280 | } |
| 281 | lock.lock(); |
| 282 | |
| 283 | // Since we released the log above, buffers that aren't in our min heap may now have had |
| 284 | // logs added, so re-check them. |
| 285 | state.CheckForNewLogs(); |
| 286 | } |
| 287 | |
| 288 | state.set_start(state.start() + 1); |
| 289 | return true; |
| 290 | } |
| 291 | |
| 292 | bool SerializedLogBuffer::Clear(log_id_t id, uid_t uid) { |
| 293 | // Try three times to clear, then disconnect the readers and try one final time. |
| 294 | for (int retry = 0; retry < 3; ++retry) { |
| 295 | { |
| 296 | auto lock = std::lock_guard{lock_}; |
| 297 | bool prune_success = Prune(id, ULONG_MAX, uid); |
| 298 | if (prune_success) { |
| 299 | return true; |
| 300 | } |
| 301 | } |
| 302 | sleep(1); |
| 303 | } |
| 304 | // Check if it is still busy after the sleep, we try to prune one entry, not another clear run, |
| 305 | // so we are looking for the quick side effect of the return value to tell us if we have a |
| 306 | // _blocked_ reader. |
| 307 | bool busy = false; |
| 308 | { |
| 309 | auto lock = std::lock_guard{lock_}; |
| 310 | busy = !Prune(id, 1, uid); |
| 311 | } |
| 312 | // It is still busy, disconnect all readers. |
| 313 | if (busy) { |
| 314 | auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()}; |
| 315 | for (const auto& reader_thread : reader_list_->reader_threads()) { |
| 316 | if (reader_thread->IsWatching(id)) { |
| 317 | LOG(WARNING) << "Kicking blocked reader, " << reader_thread->name() |
| 318 | << ", from LogBuffer::clear()"; |
| 319 | reader_thread->release_Locked(); |
| 320 | } |
| 321 | } |
| 322 | } |
| 323 | auto lock = std::lock_guard{lock_}; |
| 324 | return Prune(id, ULONG_MAX, uid); |
| 325 | } |
| 326 | |
| 327 | unsigned long SerializedLogBuffer::GetSize(log_id_t id) { |
| 328 | auto lock = std::lock_guard{lock_}; |
| 329 | size_t retval = 2 * max_size_[id] / 3; // See the comment in SetSize(). |
| 330 | return retval; |
| 331 | } |
| 332 | |
| 333 | // New SerializedLogChunk objects will be allocated according to the new size, but older one are |
| 334 | // unchanged. MaybePrune() is called on the log buffer to reduce it to an appropriate size if the |
| 335 | // new size is lower. |
| 336 | // ChattyLogBuffer/SimpleLogBuffer don't consider the 'Overhead' of LogBufferElement or the |
| 337 | // std::list<> overhead as part of the log size. SerializedLogBuffer does by its very nature, so |
| 338 | // the 'size' metric is not compatible between them. Their actual memory usage is between 1.5x and |
| 339 | // 2x of what they claim to use, so we conservatively set our internal size as size + size / 2. |
| 340 | int SerializedLogBuffer::SetSize(log_id_t id, unsigned long size) { |
| 341 | // Reasonable limits ... |
| 342 | if (!__android_logger_valid_buffer_size(size)) { |
| 343 | return -1; |
| 344 | } |
| 345 | |
| 346 | auto lock = std::lock_guard{lock_}; |
| 347 | max_size_[id] = size + size / 2; |
| 348 | |
| 349 | MaybePrune(id); |
| 350 | |
| 351 | return 0; |
| 352 | } |