Merge "libmodprobe: improve error reporting."
diff --git a/adb/transport.cpp b/adb/transport.cpp
index b6b6984..c33d5af 100644
--- a/adb/transport.cpp
+++ b/adb/transport.cpp
@@ -1533,8 +1533,7 @@
         keys_.pop_front();
     }
 
-    std::shared_ptr<RSA> result = keys_[0];
-    return result;
+    return Key();
 }
 
 void atransport::ResetKeys() {
diff --git a/fastboot/fastboot.cpp b/fastboot/fastboot.cpp
index 46d1d36..0e9713d 100644
--- a/fastboot/fastboot.cpp
+++ b/fastboot/fastboot.cpp
@@ -200,8 +200,10 @@
 double last_start_time;
 
 static void Status(const std::string& message) {
-    static constexpr char kStatusFormat[] = "%-50s ";
-    fprintf(stderr, kStatusFormat, message.c_str());
+    if (!message.empty()) {
+        static constexpr char kStatusFormat[] = "%-50s ";
+        fprintf(stderr, kStatusFormat, message.c_str());
+    }
     last_start_time = now();
 }
 
diff --git a/fastboot/fuzzy_fastboot/fixtures.cpp b/fastboot/fuzzy_fastboot/fixtures.cpp
index bd76ff4..9b5e5f7 100644
--- a/fastboot/fuzzy_fastboot/fixtures.cpp
+++ b/fastboot/fuzzy_fastboot/fixtures.cpp
@@ -45,6 +45,7 @@
 #include <vector>
 
 #include <android-base/stringprintf.h>
+#include <android-base/strings.h>
 #include <gtest/gtest.h>
 
 #include "fastboot_driver.h"
@@ -76,8 +77,7 @@
 }
 
 bool FastBootTest::IsFastbootOverTcp() {
-    // serial contains ":" is treated as host ip and port number
-    return (device_serial.find(":") != std::string::npos);
+    return android::base::StartsWith(device_serial, "tcp:");
 }
 
 bool FastBootTest::UsbStillAvailible() {
@@ -182,19 +182,14 @@
 }
 
 void FastBootTest::ConnectTcpFastbootDevice() {
-    std::size_t found = device_serial.find(":");
-    if (found != std::string::npos) {
-        for (int i = 0; i < MAX_TCP_TRIES && !transport; i++) {
-            std::string error;
-            std::unique_ptr<Transport> tcp(
-                    tcp::Connect(device_serial.substr(0, found), tcp::kDefaultPort, &error)
-                            .release());
-            if (tcp)
-                transport =
-                        std::unique_ptr<TransportSniffer>(new TransportSniffer(std::move(tcp), 0));
-            if (transport != nullptr) break;
-            std::this_thread::sleep_for(std::chrono::milliseconds(10));
-        }
+    for (int i = 0; i < MAX_TCP_TRIES && !transport; i++) {
+        std::string error;
+        std::unique_ptr<Transport> tcp(
+                tcp::Connect(device_serial.substr(4), tcp::kDefaultPort, &error).release());
+        if (tcp)
+            transport = std::unique_ptr<TransportSniffer>(new TransportSniffer(std::move(tcp), 0));
+        if (transport != nullptr) break;
+        std::this_thread::sleep_for(std::chrono::milliseconds(10));
     }
 }
 
diff --git a/fs_mgr/fs_mgr.cpp b/fs_mgr/fs_mgr.cpp
index b2c7a27..0c184af 100644
--- a/fs_mgr/fs_mgr.cpp
+++ b/fs_mgr/fs_mgr.cpp
@@ -301,10 +301,13 @@
     return true;
 }
 
+static bool needs_block_encryption(const FstabEntry& entry);
+static bool should_use_metadata_encryption(const FstabEntry& entry);
+
 // Read the primary superblock from an ext4 filesystem.  On failure return
 // false.  If it's not an ext4 filesystem, also set FS_STAT_INVALID_MAGIC.
-static bool read_ext4_superblock(const std::string& blk_device, struct ext4_super_block* sb,
-                                 int* fs_stat) {
+static bool read_ext4_superblock(const std::string& blk_device, const FstabEntry& entry,
+                                 struct ext4_super_block* sb, int* fs_stat) {
     android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(blk_device.c_str(), O_RDONLY | O_CLOEXEC)));
 
     if (fd < 0) {
@@ -321,7 +324,29 @@
         LINFO << "Invalid ext4 superblock on '" << blk_device << "'";
         // not a valid fs, tune2fs, fsck, and mount  will all fail.
         *fs_stat |= FS_STAT_INVALID_MAGIC;
-        return false;
+
+        bool encrypted = should_use_metadata_encryption(entry) || needs_block_encryption(entry);
+        if (entry.mount_point == "/data" &&
+            (!encrypted || android::base::StartsWith(blk_device, "/dev/block/dm-"))) {
+            // try backup superblock, if main superblock is corrupted
+            for (unsigned int blocksize = EXT4_MIN_BLOCK_SIZE; blocksize <= EXT4_MAX_BLOCK_SIZE;
+                 blocksize *= 2) {
+                uint64_t superblock = blocksize * 8;
+                if (blocksize == EXT4_MIN_BLOCK_SIZE) superblock++;
+
+                if (TEMP_FAILURE_RETRY(pread(fd, sb, sizeof(*sb), superblock * blocksize)) !=
+                    sizeof(*sb)) {
+                    PERROR << "Can't read '" << blk_device << "' superblock";
+                    return false;
+                }
+                if (is_ext4_superblock_valid(sb) &&
+                    (1 << (10 + sb->s_log_block_size) == blocksize)) {
+                    *fs_stat &= ~FS_STAT_INVALID_MAGIC;
+                    break;
+                }
+            }
+        }
+        if (*fs_stat & FS_STAT_INVALID_MAGIC) return false;
     }
     *fs_stat |= FS_STAT_IS_EXT4;
     LINFO << "superblock s_max_mnt_count:" << sb->s_max_mnt_count << "," << blk_device;
@@ -662,7 +687,7 @@
     if (is_extfs(entry.fs_type)) {
         struct ext4_super_block sb;
 
-        if (read_ext4_superblock(blk_device, &sb, &fs_stat)) {
+        if (read_ext4_superblock(blk_device, entry, &sb, &fs_stat)) {
             if ((sb.s_feature_incompat & EXT4_FEATURE_INCOMPAT_RECOVER) != 0 ||
                 (sb.s_state & EXT4_VALID_FS) == 0) {
                 LINFO << "Filesystem on " << blk_device << " was not cleanly shutdown; "
@@ -692,7 +717,7 @@
          entry.fs_mgr_flags.fs_verity || entry.fs_mgr_flags.ext_meta_csum)) {
         struct ext4_super_block sb;
 
-        if (read_ext4_superblock(blk_device, &sb, &fs_stat)) {
+        if (read_ext4_superblock(blk_device, entry, &sb, &fs_stat)) {
             tune_reserved_size(blk_device, entry, &sb, &fs_stat);
             tune_encrypt(blk_device, entry, &sb, &fs_stat);
             tune_verity(blk_device, entry, &sb, &fs_stat);
diff --git a/fs_mgr/libsnapshot/fuzz.sh b/fs_mgr/libsnapshot/fuzz.sh
index 0e57674..5995cef 100755
--- a/fs_mgr/libsnapshot/fuzz.sh
+++ b/fs_mgr/libsnapshot/fuzz.sh
@@ -11,7 +11,7 @@
 
 build_normal() (
     pushd $(gettop)
-    NATIVE_COVERAGE="" NATIVE_LINE_COVERAGE="" COVERAGE_PATHS="" m ${FUZZ_TARGET}
+    NATIVE_COVERAGE="" NATIVE_LINE_COVERAGE="" NATIVE_COVERAGE_PATHS="" m ${FUZZ_TARGET}
     ret=$?
     popd
     return ${ret}
@@ -19,7 +19,7 @@
 
 build_cov() {
     pushd $(gettop)
-    NATIVE_COVERAGE="true" NATIVE_LINE_COVERAGE="true" COVERAGE_PATHS="${PROJECT_PATH}" m ${FUZZ_TARGET}
+    NATIVE_COVERAGE="true" NATIVE_LINE_COVERAGE="true" NATIVE_COVERAGE_PATHS="${PROJECT_PATH}" m ${FUZZ_TARGET}
     ret=$?
     popd
     return ${ret}
diff --git a/fs_mgr/libsnapshot/snapshot.cpp b/fs_mgr/libsnapshot/snapshot.cpp
index 5909cff..55214f5 100644
--- a/fs_mgr/libsnapshot/snapshot.cpp
+++ b/fs_mgr/libsnapshot/snapshot.cpp
@@ -2277,6 +2277,10 @@
         auto operations_it = install_operation_map.find(target_partition->name());
         if (operations_it != install_operation_map.end()) {
             cow_creator->operations = operations_it->second;
+        } else {
+            LOG(INFO) << target_partition->name()
+                      << " isn't included in the payload, skipping the cow creation.";
+            continue;
         }
 
         cow_creator->extra_extents.clear();
diff --git a/fs_mgr/libsnapshot/snapshot_metadata_updater.cpp b/fs_mgr/libsnapshot/snapshot_metadata_updater.cpp
index 60bf796..051584c 100644
--- a/fs_mgr/libsnapshot/snapshot_metadata_updater.cpp
+++ b/fs_mgr/libsnapshot/snapshot_metadata_updater.cpp
@@ -62,6 +62,8 @@
                                                std::string(it->second) + target_suffix_, &p});
         }
     }
+
+    partial_update_ = manifest.partial_update();
 }
 
 bool SnapshotMetadataUpdater::ShrinkPartitions() const {
@@ -82,6 +84,18 @@
 }
 
 bool SnapshotMetadataUpdater::DeletePartitions() const {
+    // For partial update, not all dynamic partitions are included in the payload.
+    // TODO(xunchang) delete the untouched partitions whose group is in the payload.
+    // e.g. Delete vendor in the following scenario
+    // On device:
+    //   Group A: system, vendor
+    // In payload:
+    //   Group A: system
+    if (partial_update_) {
+        LOG(INFO) << "Skip deleting partitions for partial update";
+        return true;
+    }
+
     std::vector<std::string> partitions_to_delete;
     // Don't delete partitions in groups where the group name doesn't have target_suffix,
     // e.g. default.
@@ -139,6 +153,11 @@
 }
 
 bool SnapshotMetadataUpdater::DeleteGroups() const {
+    if (partial_update_) {
+        LOG(INFO) << "Skip deleting groups for partial update";
+        return true;
+    }
+
     std::vector<std::string> existing_groups = builder_->ListGroups();
     for (const auto& existing_group_name : existing_groups) {
         // Don't delete groups without target suffix, e.g. default.
diff --git a/fs_mgr/libsnapshot/snapshot_metadata_updater.h b/fs_mgr/libsnapshot/snapshot_metadata_updater.h
index 83c9460..5b1cbf9 100644
--- a/fs_mgr/libsnapshot/snapshot_metadata_updater.h
+++ b/fs_mgr/libsnapshot/snapshot_metadata_updater.h
@@ -79,6 +79,7 @@
     const std::string target_suffix_;
     std::vector<Group> groups_;
     std::vector<Partition> partitions_;
+    bool partial_update_{false};
 };
 
 }  // namespace snapshot
diff --git a/fs_mgr/libsnapshot/test_helpers.cpp b/fs_mgr/libsnapshot/test_helpers.cpp
index f82a602..de3d912 100644
--- a/fs_mgr/libsnapshot/test_helpers.cpp
+++ b/fs_mgr/libsnapshot/test_helpers.cpp
@@ -52,10 +52,19 @@
 
 bool TestPartitionOpener::GetInfo(const std::string& partition_name,
                                   android::fs_mgr::BlockDeviceInfo* info) const {
-    if (partition_name == "super") {
-        return PartitionOpener::GetInfo(fake_super_path_, info);
+    if (partition_name != "super") {
+        return PartitionOpener::GetInfo(partition_name, info);
     }
-    return PartitionOpener::GetInfo(partition_name, info);
+
+    if (PartitionOpener::GetInfo(fake_super_path_, info)) {
+        // SnapshotUpdateTest uses a relatively small super partition, which requires a small
+        // alignment and 0 offset to work. For the purpose of this test, hardcode the alignment
+        // and offset. This test isn't about testing liblp or libdm.
+        info->alignment_offset = 0;
+        info->alignment = std::min<uint32_t>(info->alignment, static_cast<uint32_t>(128_KiB));
+        return true;
+    }
+    return false;
 }
 
 std::string TestPartitionOpener::GetDeviceString(const std::string& partition_name) const {
diff --git a/fs_mgr/libsnapshot/update_engine/update_metadata.proto b/fs_mgr/libsnapshot/update_engine/update_metadata.proto
index 8a11eaa..202e39b 100644
--- a/fs_mgr/libsnapshot/update_engine/update_metadata.proto
+++ b/fs_mgr/libsnapshot/update_engine/update_metadata.proto
@@ -77,4 +77,5 @@
 message DeltaArchiveManifest {
     repeated PartitionUpdate partitions = 13;
     optional DynamicPartitionMetadata dynamic_partition_metadata = 15;
+    optional bool partial_update = 16;
 }
diff --git a/init/builtins.cpp b/init/builtins.cpp
index 0ac66f2..0b456e7 100644
--- a/init/builtins.cpp
+++ b/init/builtins.cpp
@@ -1221,6 +1221,20 @@
     return {};
 }
 
+static Result<void> MountLinkerConfigForDefaultNamespace() {
+    // No need to mount linkerconfig for default mount namespace if the path does not exist (which
+    // would mean it is already mounted)
+    if (access("/linkerconfig/default", 0) != 0) {
+        return {};
+    }
+
+    if (mount("/linkerconfig/default", "/linkerconfig", nullptr, MS_BIND | MS_REC, nullptr) != 0) {
+        return ErrnoError() << "Failed to mount linker configuration for default mount namespace.";
+    }
+
+    return {};
+}
+
 static bool IsApexUpdatable() {
     static bool updatable = android::sysprop::ApexProperties::updatable().value_or(false);
     return updatable;
@@ -1319,11 +1333,14 @@
 }
 
 static Result<void> do_enter_default_mount_ns(const BuiltinArguments& args) {
-    if (SwitchToDefaultMountNamespace()) {
-        return {};
-    } else {
-        return Error() << "Failed to enter into default mount namespace";
+    if (auto result = SwitchToMountNamespaceIfNeeded(NS_DEFAULT); !result.ok()) {
+        return result.error();
     }
+    if (auto result = MountLinkerConfigForDefaultNamespace(); !result.ok()) {
+        return result.error();
+    }
+    LOG(INFO) << "Switched to default mount namespace";
+    return {};
 }
 
 // Builtin-function-map start
diff --git a/init/mount_namespace.cpp b/init/mount_namespace.cpp
index f3b584c..b9d5d67 100644
--- a/init/mount_namespace.cpp
+++ b/init/mount_namespace.cpp
@@ -176,20 +176,6 @@
     return true;
 }
 
-static Result<void> MountLinkerConfigForDefaultNamespace() {
-    // No need to mount linkerconfig for default mount namespace if the path does not exist (which
-    // would mean it is already mounted)
-    if (access("/linkerconfig/default", 0) != 0) {
-        return {};
-    }
-
-    if (mount("/linkerconfig/default", "/linkerconfig", nullptr, MS_BIND | MS_REC, nullptr) != 0) {
-        return ErrnoError() << "Failed to mount linker configuration for default mount namespace.";
-    }
-
-    return {};
-}
-
 static android::base::unique_fd bootstrap_ns_fd;
 static android::base::unique_fd default_ns_fd;
 
@@ -290,40 +276,20 @@
     return success;
 }
 
-bool SwitchToDefaultMountNamespace() {
-    if (IsRecoveryMode()) {
-        // we don't have multiple namespaces in recovery mode
-        return true;
+Result<void> SwitchToMountNamespaceIfNeeded(MountNamespace target_mount_namespace) {
+    if (IsRecoveryMode() || !IsApexUpdatable()) {
+        // we don't have multiple namespaces in recovery mode or if apex is not updatable
+        return {};
     }
-    if (default_ns_id != GetMountNamespaceId()) {
-        if (setns(default_ns_fd.get(), CLONE_NEWNS) == -1) {
-            PLOG(ERROR) << "Failed to switch back to the default mount namespace.";
-            return false;
-        }
-
-        if (auto result = MountLinkerConfigForDefaultNamespace(); !result.ok()) {
-            LOG(ERROR) << result.error();
-            return false;
+    const auto& ns_id = target_mount_namespace == NS_BOOTSTRAP ? bootstrap_ns_id : default_ns_id;
+    const auto& ns_fd = target_mount_namespace == NS_BOOTSTRAP ? bootstrap_ns_fd : default_ns_fd;
+    const auto& ns_name = target_mount_namespace == NS_BOOTSTRAP ? "bootstrap" : "default";
+    if (ns_id != GetMountNamespaceId() && ns_fd.get() != -1) {
+        if (setns(ns_fd.get(), CLONE_NEWNS) == -1) {
+            return ErrnoError() << "Failed to switch to " << ns_name << " mount namespace.";
         }
     }
-
-    LOG(INFO) << "Switched to default mount namespace";
-    return true;
-}
-
-bool SwitchToBootstrapMountNamespaceIfNeeded() {
-    if (IsRecoveryMode()) {
-        // we don't have multiple namespaces in recovery mode
-        return true;
-    }
-    if (bootstrap_ns_id != GetMountNamespaceId() && bootstrap_ns_fd.get() != -1 &&
-        IsApexUpdatable()) {
-        if (setns(bootstrap_ns_fd.get(), CLONE_NEWNS) == -1) {
-            PLOG(ERROR) << "Failed to switch to bootstrap mount namespace.";
-            return false;
-        }
-    }
-    return true;
+    return {};
 }
 
 }  // namespace init
diff --git a/init/mount_namespace.h b/init/mount_namespace.h
index c41a449..d4d6f82 100644
--- a/init/mount_namespace.h
+++ b/init/mount_namespace.h
@@ -16,12 +16,15 @@
 
 #pragma once
 
+#include <android-base/result.h>
+
 namespace android {
 namespace init {
 
+enum MountNamespace { NS_BOOTSTRAP, NS_DEFAULT };
+
 bool SetupMountNamespaces();
-bool SwitchToDefaultMountNamespace();
-bool SwitchToBootstrapMountNamespaceIfNeeded();
+base::Result<void> SwitchToMountNamespaceIfNeeded(MountNamespace target_mount_namespace);
 
 }  // namespace init
 }  // namespace android
diff --git a/init/property_service.cpp b/init/property_service.cpp
index 82f5b8c..612854d 100644
--- a/init/property_service.cpp
+++ b/init/property_service.cpp
@@ -711,8 +711,8 @@
                 if (it == properties->end()) {
                     (*properties)[key] = value;
                 } else if (it->second != value) {
-                    LOG(WARNING) << "Overriding previous 'ro.' property '" << key << "':'"
-                                 << it->second << "' with new value '" << value << "'";
+                    LOG(WARNING) << "Overriding previous property '" << key << "':'" << it->second
+                                 << "' with new value '" << value << "'";
                     it->second = value;
                 }
             } else {
diff --git a/init/reboot.cpp b/init/reboot.cpp
index ffd58a3..19e83cc 100644
--- a/init/reboot.cpp
+++ b/init/reboot.cpp
@@ -544,6 +544,18 @@
     return still_running;
 }
 
+static Result<void> UnmountAllApexes() {
+    const char* args[] = {"/system/bin/apexd", "--unmount-all"};
+    int status;
+    if (logwrap_fork_execvp(arraysize(args), args, &status, false, LOG_KLOG, true, nullptr) != 0) {
+        return ErrnoError() << "Failed to call '/system/bin/apexd --unmount-all'";
+    }
+    if (WIFEXITED(status) && WEXITSTATUS(status) == 0) {
+        return {};
+    }
+    return Error() << "'/system/bin/apexd --unmount-all' failed : " << status;
+}
+
 //* Reboot / shutdown the system.
 // cmd ANDROID_RB_* as defined in android_reboot.h
 // reason Reason string like "reboot", "shutdown,userrequested"
@@ -698,6 +710,11 @@
     // 5. drop caches and disable zram backing device, if exist
     KillZramBackingDevice();
 
+    LOG(INFO) << "Ready to unmount apexes. So far shutdown sequence took " << t;
+    // 6. unmount active apexes, otherwise they might prevent clean unmount of /data.
+    if (auto ret = UnmountAllApexes(); !ret.ok()) {
+        LOG(ERROR) << ret.error();
+    }
     UmountStat stat =
             TryUmountAndFsck(cmd, run_fsck, shutdown_timeout - t.duration(), &reboot_semaphore);
     // Follow what linux shutdown is doing: one more sync with little bit delay
@@ -736,18 +753,6 @@
     StartSendingMessages();
 }
 
-static Result<void> UnmountAllApexes() {
-    const char* args[] = {"/system/bin/apexd", "--unmount-all"};
-    int status;
-    if (logwrap_fork_execvp(arraysize(args), args, &status, false, LOG_KLOG, true, nullptr) != 0) {
-        return ErrnoError() << "Failed to call '/system/bin/apexd --unmount-all'";
-    }
-    if (WIFEXITED(status) && WEXITSTATUS(status) == 0) {
-        return {};
-    }
-    return Error() << "'/system/bin/apexd --unmount-all' failed : " << status;
-}
-
 static std::chrono::milliseconds GetMillisProperty(const std::string& name,
                                                    std::chrono::milliseconds default_value) {
     auto value = GetUintProperty(name, static_cast<uint64_t>(default_value.count()));
@@ -831,7 +836,7 @@
         sub_reason = "apex";
         return result;
     }
-    if (!SwitchToBootstrapMountNamespaceIfNeeded()) {
+    if (!SwitchToMountNamespaceIfNeeded(NS_BOOTSTRAP)) {
         sub_reason = "ns_switch";
         return Error() << "Failed to switch to bootstrap namespace";
     }
diff --git a/init/service.cpp b/init/service.cpp
index 165b848..68365b3 100644
--- a/init/service.cpp
+++ b/init/service.cpp
@@ -465,6 +465,16 @@
         pre_apexd_ = true;
     }
 
+    // For pre-apexd services, override mount namespace as "bootstrap" one before starting.
+    // Note: "ueventd" is supposed to be run in "default" mount namespace even if it's pre-apexd
+    // to support loading firmwares from APEXes.
+    std::optional<MountNamespace> override_mount_namespace;
+    if (name_ == "ueventd") {
+        override_mount_namespace = NS_DEFAULT;
+    } else if (pre_apexd_) {
+        override_mount_namespace = NS_BOOTSTRAP;
+    }
+
     post_data_ = ServiceList::GetInstance().IsPostData();
 
     LOG(INFO) << "starting service '" << name_ << "'...";
@@ -496,7 +506,8 @@
     if (pid == 0) {
         umask(077);
 
-        if (auto result = EnterNamespaces(namespaces_, name_, pre_apexd_); !result.ok()) {
+        if (auto result = EnterNamespaces(namespaces_, name_, override_mount_namespace);
+            !result.ok()) {
             LOG(FATAL) << "Service '" << name_
                        << "' failed to set up namespaces: " << result.error();
         }
diff --git a/init/service_utils.cpp b/init/service_utils.cpp
index 484c2c8..05e632b 100644
--- a/init/service_utils.cpp
+++ b/init/service_utils.cpp
@@ -194,7 +194,8 @@
     return Descriptor(ANDROID_FILE_ENV_PREFIX + name, std::move(fd));
 }
 
-Result<void> EnterNamespaces(const NamespaceInfo& info, const std::string& name, bool pre_apexd) {
+Result<void> EnterNamespaces(const NamespaceInfo& info, const std::string& name,
+                             std::optional<MountNamespace> override_mount_namespace) {
     for (const auto& [nstype, path] : info.namespaces_to_enter) {
         if (auto result = EnterNamespace(nstype, path.c_str()); !result.ok()) {
             return result;
@@ -202,9 +203,10 @@
     }
 
 #if defined(__ANDROID__)
-    if (pre_apexd) {
-        if (!SwitchToBootstrapMountNamespaceIfNeeded()) {
-            return Error() << "could not enter into the bootstrap mount namespace";
+    if (override_mount_namespace.has_value()) {
+        if (auto result = SwitchToMountNamespaceIfNeeded(override_mount_namespace.value());
+            !result.ok()) {
+            return result;
         }
     }
 #endif
diff --git a/init/service_utils.h b/init/service_utils.h
index 3f1071e..e74f8c1 100644
--- a/init/service_utils.h
+++ b/init/service_utils.h
@@ -19,12 +19,14 @@
 #include <sys/resource.h>
 #include <sys/types.h>
 
+#include <optional>
 #include <string>
 #include <vector>
 
 #include <android-base/unique_fd.h>
 #include <cutils/iosched_policy.h>
 
+#include "mount_namespace.h"
 #include "result.h"
 
 namespace android {
@@ -66,7 +68,8 @@
     // Pair of namespace type, path to name.
     std::vector<std::pair<int, std::string>> namespaces_to_enter;
 };
-Result<void> EnterNamespaces(const NamespaceInfo& info, const std::string& name, bool pre_apexd);
+Result<void> EnterNamespaces(const NamespaceInfo& info, const std::string& name,
+                             std::optional<MountNamespace> override_mount_namespace);
 
 struct ProcessAttributes {
     std::string console;
diff --git a/libmodprobe/include/modprobe/modprobe.h b/libmodprobe/include/modprobe/modprobe.h
index a7687af..4806b08 100644
--- a/libmodprobe/include/modprobe/modprobe.h
+++ b/libmodprobe/include/modprobe/modprobe.h
@@ -36,7 +36,7 @@
                             std::vector<std::string>* post_dependencies);
     void ResetModuleCount() { module_count_ = 0; }
     int GetModuleCount() { return module_count_; }
-    void EnableBlacklist(bool enable);
+    void EnableBlocklist(bool enable);
     void EnableVerbose(bool enable);
 
   private:
@@ -55,7 +55,7 @@
     bool ParseSoftdepCallback(const std::vector<std::string>& args);
     bool ParseLoadCallback(const std::vector<std::string>& args);
     bool ParseOptionsCallback(const std::vector<std::string>& args);
-    bool ParseBlacklistCallback(const std::vector<std::string>& args);
+    bool ParseBlocklistCallback(const std::vector<std::string>& args);
     void ParseKernelCmdlineOptions();
     void ParseCfg(const std::string& cfg, std::function<bool(const std::vector<std::string>&)> f);
 
@@ -65,8 +65,8 @@
     std::vector<std::pair<std::string, std::string>> module_post_softdep_;
     std::vector<std::string> module_load_;
     std::unordered_map<std::string, std::string> module_options_;
-    std::set<std::string> module_blacklist_;
+    std::set<std::string> module_blocklist_;
     std::unordered_set<std::string> module_loaded_;
     int module_count_ = 0;
-    bool blacklist_enabled = false;
+    bool blocklist_enabled = false;
 };
diff --git a/libmodprobe/libmodprobe.cpp b/libmodprobe/libmodprobe.cpp
index d193796..5a6ae8b 100644
--- a/libmodprobe/libmodprobe.cpp
+++ b/libmodprobe/libmodprobe.cpp
@@ -194,17 +194,17 @@
     return true;
 }
 
-bool Modprobe::ParseBlacklistCallback(const std::vector<std::string>& args) {
+bool Modprobe::ParseBlocklistCallback(const std::vector<std::string>& args) {
     auto it = args.begin();
     const std::string& type = *it++;
 
-    if (type != "blacklist") {
-        LOG(ERROR) << "non-blacklist line encountered in modules.blacklist";
+    if (type != "blocklist") {
+        LOG(ERROR) << "non-blocklist line encountered in modules.blocklist";
         return false;
     }
 
     if (args.size() != 2) {
-        LOG(ERROR) << "lines in modules.blacklist must have exactly 2 entries, not " << args.size();
+        LOG(ERROR) << "lines in modules.blocklist must have exactly 2 entries, not " << args.size();
         return false;
     }
 
@@ -214,7 +214,7 @@
     if (canonical_name.empty()) {
         return false;
     }
-    this->module_blacklist_.emplace(canonical_name);
+    this->module_blocklist_.emplace(canonical_name);
 
     return true;
 }
@@ -331,16 +331,16 @@
         auto options_callback = std::bind(&Modprobe::ParseOptionsCallback, this, _1);
         ParseCfg(base_path + "/modules.options", options_callback);
 
-        auto blacklist_callback = std::bind(&Modprobe::ParseBlacklistCallback, this, _1);
-        ParseCfg(base_path + "/modules.blacklist", blacklist_callback);
+        auto blocklist_callback = std::bind(&Modprobe::ParseBlocklistCallback, this, _1);
+        ParseCfg(base_path + "/modules.blocklist", blocklist_callback);
     }
 
     ParseKernelCmdlineOptions();
     android::base::SetMinimumLogSeverity(android::base::INFO);
 }
 
-void Modprobe::EnableBlacklist(bool enable) {
-    blacklist_enabled = enable;
+void Modprobe::EnableBlocklist(bool enable) {
+    blocklist_enabled = enable;
 }
 
 void Modprobe::EnableVerbose(bool enable) {
diff --git a/libmodprobe/libmodprobe_ext.cpp b/libmodprobe/libmodprobe_ext.cpp
index 5e4fddd..fb1f5e7 100644
--- a/libmodprobe/libmodprobe_ext.cpp
+++ b/libmodprobe/libmodprobe_ext.cpp
@@ -80,8 +80,8 @@
 
 bool Modprobe::ModuleExists(const std::string& module_name) {
     struct stat fileStat;
-    if (blacklist_enabled && module_blacklist_.count(module_name)) {
-        LOG(INFO) << "module " << module_name << " is blacklisted";
+    if (blocklist_enabled && module_blocklist_.count(module_name)) {
+        LOG(INFO) << "module " << module_name << " is blocklisted";
         return false;
     }
     auto deps = GetDependencies(module_name);
diff --git a/libmodprobe/libmodprobe_ext_test.cpp b/libmodprobe/libmodprobe_ext_test.cpp
index 9ee5ba7..e79bfaf 100644
--- a/libmodprobe/libmodprobe_ext_test.cpp
+++ b/libmodprobe/libmodprobe_ext_test.cpp
@@ -72,7 +72,7 @@
 
 bool Modprobe::ModuleExists(const std::string& module_name) {
     auto deps = GetDependencies(module_name);
-    if (blacklist_enabled && module_blacklist_.count(module_name)) {
+    if (blocklist_enabled && module_blocklist_.count(module_name)) {
         return false;
     }
     if (deps.empty()) {
diff --git a/libmodprobe/libmodprobe_test.cpp b/libmodprobe/libmodprobe_test.cpp
index eea0abd..5919c49 100644
--- a/libmodprobe/libmodprobe_test.cpp
+++ b/libmodprobe/libmodprobe_test.cpp
@@ -113,9 +113,9 @@
             "options test9.ko param_x=1 param_y=2 param_z=3\n"
             "options test100.ko param_1=1\n";
 
-    const std::string modules_blacklist =
-            "blacklist test9.ko\n"
-            "blacklist test3.ko\n";
+    const std::string modules_blocklist =
+            "blocklist test9.ko\n"
+            "blocklist test3.ko\n";
 
     const std::string modules_load =
             "test4.ko\n"
@@ -139,7 +139,7 @@
                                                  0600, getuid(), getgid()));
     ASSERT_TRUE(android::base::WriteStringToFile(modules_load, dir_path + "/modules.load", 0600,
                                                  getuid(), getgid()));
-    ASSERT_TRUE(android::base::WriteStringToFile(modules_blacklist, dir_path + "/modules.blacklist",
+    ASSERT_TRUE(android::base::WriteStringToFile(modules_blocklist, dir_path + "/modules.blocklist",
                                                  0600, getuid(), getgid()));
 
     for (auto i = test_modules.begin(); i != test_modules.end(); ++i) {
@@ -176,6 +176,6 @@
 
     EXPECT_TRUE(modules_loaded == expected_after_remove);
 
-    m.EnableBlacklist(true);
+    m.EnableBlocklist(true);
     EXPECT_FALSE(m.LoadWithAliases("test4", true));
 }
diff --git a/libprocessgroup/cgroup_map.cpp b/libprocessgroup/cgroup_map.cpp
index 2fc920b..b82b0ab 100644
--- a/libprocessgroup/cgroup_map.cpp
+++ b/libprocessgroup/cgroup_map.cpp
@@ -115,7 +115,13 @@
         return true;
     }
 
-    std::string cg_tag = StringPrintf(":%s:", name());
+    std::string cg_tag;
+
+    if (version() == 2) {
+        cg_tag = "0::";
+    } else {
+        cg_tag = StringPrintf(":%s:", name());
+    }
     size_t start_pos = content.find(cg_tag);
     if (start_pos == std::string::npos) {
         return false;
diff --git a/libprocessgroup/cgrouprc/include/android/cgrouprc.h b/libprocessgroup/cgrouprc/include/android/cgrouprc.h
index 0ce5123..7e74432 100644
--- a/libprocessgroup/cgrouprc/include/android/cgrouprc.h
+++ b/libprocessgroup/cgrouprc/include/android/cgrouprc.h
@@ -69,6 +69,7 @@
  * Flag bitmask used in ACgroupController_getFlags
  */
 #define CGROUPRC_CONTROLLER_FLAG_MOUNTED 0x1
+#define CGROUPRC_CONTROLLER_FLAG_NEEDS_ACTIVATION 0x2
 
 #if __ANDROID_API__ >= __ANDROID_API_R__
 
diff --git a/libprocessgroup/setup/cgroup_descriptor.h b/libprocessgroup/setup/cgroup_descriptor.h
index f029c4f..699c03c 100644
--- a/libprocessgroup/setup/cgroup_descriptor.h
+++ b/libprocessgroup/setup/cgroup_descriptor.h
@@ -25,7 +25,7 @@
 class CgroupDescriptor {
   public:
     CgroupDescriptor(uint32_t version, const std::string& name, const std::string& path,
-                     mode_t mode, const std::string& uid, const std::string& gid);
+                     mode_t mode, const std::string& uid, const std::string& gid, uint32_t flags);
 
     const format::CgroupController* controller() const { return &controller_; }
     mode_t mode() const { return mode_; }
diff --git a/libprocessgroup/setup/cgroup_map_write.cpp b/libprocessgroup/setup/cgroup_map_write.cpp
index 17ea06e..25f16a6 100644
--- a/libprocessgroup/setup/cgroup_map_write.cpp
+++ b/libprocessgroup/setup/cgroup_map_write.cpp
@@ -17,6 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "libprocessgroup"
 
+#include <dirent.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <grp.h>
@@ -54,58 +55,53 @@
 static constexpr const char* CGROUPS_DESC_FILE = "/etc/cgroups.json";
 static constexpr const char* CGROUPS_DESC_VENDOR_FILE = "/vendor/etc/cgroups.json";
 
-static bool Mkdir(const std::string& path, mode_t mode, const std::string& uid,
-                  const std::string& gid) {
-    if (mode == 0) {
-        mode = 0755;
-    }
-
-    if (mkdir(path.c_str(), mode) != 0) {
-        /* chmod in case the directory already exists */
-        if (errno == EEXIST) {
-            if (fchmodat(AT_FDCWD, path.c_str(), mode, AT_SYMLINK_NOFOLLOW) != 0) {
-                // /acct is a special case when the directory already exists
-                // TODO: check if file mode is already what we want instead of using EROFS
-                if (errno != EROFS) {
-                    PLOG(ERROR) << "fchmodat() failed for " << path;
-                    return false;
-                }
-            }
-        } else {
-            PLOG(ERROR) << "mkdir() failed for " << path;
-            return false;
-        }
-    }
-
-    if (uid.empty()) {
-        return true;
-    }
-
-    passwd* uid_pwd = getpwnam(uid.c_str());
-    if (!uid_pwd) {
-        PLOG(ERROR) << "Unable to decode UID for '" << uid << "'";
-        return false;
-    }
-
-    uid_t pw_uid = uid_pwd->pw_uid;
+static bool ChangeDirModeAndOwner(const std::string& path, mode_t mode, const std::string& uid,
+                                  const std::string& gid, bool permissive_mode = false) {
+    uid_t pw_uid = -1;
     gid_t gr_gid = -1;
-    if (!gid.empty()) {
-        group* gid_pwd = getgrnam(gid.c_str());
-        if (!gid_pwd) {
-            PLOG(ERROR) << "Unable to decode GID for '" << gid << "'";
+
+    if (!uid.empty()) {
+        passwd* uid_pwd = getpwnam(uid.c_str());
+        if (!uid_pwd) {
+            PLOG(ERROR) << "Unable to decode UID for '" << uid << "'";
             return false;
         }
-        gr_gid = gid_pwd->gr_gid;
+
+        pw_uid = uid_pwd->pw_uid;
+        gr_gid = -1;
+
+        if (!gid.empty()) {
+            group* gid_pwd = getgrnam(gid.c_str());
+            if (!gid_pwd) {
+                PLOG(ERROR) << "Unable to decode GID for '" << gid << "'";
+                return false;
+            }
+            gr_gid = gid_pwd->gr_gid;
+        }
     }
 
-    if (lchown(path.c_str(), pw_uid, gr_gid) < 0) {
-        PLOG(ERROR) << "lchown() failed for " << path;
+    auto dir = std::unique_ptr<DIR, decltype(&closedir)>(opendir(path.c_str()), closedir);
+
+    if (dir == NULL) {
+        PLOG(ERROR) << "opendir failed for " << path;
         return false;
     }
 
-    /* chown may have cleared S_ISUID and S_ISGID, chmod again */
-    if (mode & (S_ISUID | S_ISGID)) {
-        if (fchmodat(AT_FDCWD, path.c_str(), mode, AT_SYMLINK_NOFOLLOW) != 0) {
+    struct dirent* dir_entry;
+    while ((dir_entry = readdir(dir.get()))) {
+        if (!strcmp("..", dir_entry->d_name)) {
+            continue;
+        }
+
+        std::string file_path = path + "/" + dir_entry->d_name;
+
+        if (pw_uid != -1 && lchown(file_path.c_str(), pw_uid, gr_gid) < 0) {
+            PLOG(ERROR) << "lchown() failed for " << file_path;
+            return false;
+        }
+
+        if (fchmodat(AT_FDCWD, file_path.c_str(), mode, AT_SYMLINK_NOFOLLOW) != 0 &&
+            (errno != EROFS || !permissive_mode)) {
             PLOG(ERROR) << "fchmodat() failed for " << path;
             return false;
         }
@@ -114,6 +110,67 @@
     return true;
 }
 
+static bool Mkdir(const std::string& path, mode_t mode, const std::string& uid,
+                  const std::string& gid) {
+    bool permissive_mode = false;
+
+    if (mode == 0) {
+        /* Allow chmod to fail */
+        permissive_mode = true;
+        mode = 0755;
+    }
+
+    if (mkdir(path.c_str(), mode) != 0) {
+        // /acct is a special case when the directory already exists
+        if (errno != EEXIST) {
+            PLOG(ERROR) << "mkdir() failed for " << path;
+            return false;
+        } else {
+            permissive_mode = true;
+        }
+    }
+
+    if (uid.empty() && permissive_mode) {
+        return true;
+    }
+
+    if (!ChangeDirModeAndOwner(path, mode, uid, gid, permissive_mode)) {
+        PLOG(ERROR) << "change of ownership or mode failed for " << path;
+        return false;
+    }
+
+    return true;
+}
+
+static void MergeCgroupToDescriptors(std::map<std::string, CgroupDescriptor>* descriptors,
+                                     const Json::Value& cgroup, const std::string& name,
+                                     const std::string& root_path, int cgroups_version) {
+    std::string path;
+
+    if (!root_path.empty()) {
+        path = root_path + "/" + cgroup["Path"].asString();
+    } else {
+        path = cgroup["Path"].asString();
+    }
+
+    uint32_t controller_flags = 0;
+
+    if (cgroup["NeedsActivation"].isBool() && cgroup["NeedsActivation"].asBool()) {
+        controller_flags |= CGROUPRC_CONTROLLER_FLAG_NEEDS_ACTIVATION;
+    }
+
+    CgroupDescriptor descriptor(
+            cgroups_version, name, path, std::strtoul(cgroup["Mode"].asString().c_str(), 0, 8),
+            cgroup["UID"].asString(), cgroup["GID"].asString(), controller_flags);
+
+    auto iter = descriptors->find(name);
+    if (iter == descriptors->end()) {
+        descriptors->emplace(name, descriptor);
+    } else {
+        iter->second = descriptor;
+    }
+}
+
 static bool ReadDescriptorsFromFile(const std::string& file_name,
                                     std::map<std::string, CgroupDescriptor>* descriptors) {
     std::vector<CgroupDescriptor> result;
@@ -135,36 +192,19 @@
         const Json::Value& cgroups = root["Cgroups"];
         for (Json::Value::ArrayIndex i = 0; i < cgroups.size(); ++i) {
             std::string name = cgroups[i]["Controller"].asString();
-            auto iter = descriptors->find(name);
-            if (iter == descriptors->end()) {
-                descriptors->emplace(
-                        name, CgroupDescriptor(
-                                      1, name, cgroups[i]["Path"].asString(),
-                                      std::strtoul(cgroups[i]["Mode"].asString().c_str(), 0, 8),
-                                      cgroups[i]["UID"].asString(), cgroups[i]["GID"].asString()));
-            } else {
-                iter->second = CgroupDescriptor(
-                        1, name, cgroups[i]["Path"].asString(),
-                        std::strtoul(cgroups[i]["Mode"].asString().c_str(), 0, 8),
-                        cgroups[i]["UID"].asString(), cgroups[i]["GID"].asString());
-            }
+            MergeCgroupToDescriptors(descriptors, cgroups[i], name, "", 1);
         }
     }
 
     if (root.isMember("Cgroups2")) {
         const Json::Value& cgroups2 = root["Cgroups2"];
-        auto iter = descriptors->find(CGROUPV2_CONTROLLER_NAME);
-        if (iter == descriptors->end()) {
-            descriptors->emplace(
-                    CGROUPV2_CONTROLLER_NAME,
-                    CgroupDescriptor(2, CGROUPV2_CONTROLLER_NAME, cgroups2["Path"].asString(),
-                                     std::strtoul(cgroups2["Mode"].asString().c_str(), 0, 8),
-                                     cgroups2["UID"].asString(), cgroups2["GID"].asString()));
-        } else {
-            iter->second =
-                    CgroupDescriptor(2, CGROUPV2_CONTROLLER_NAME, cgroups2["Path"].asString(),
-                                     std::strtoul(cgroups2["Mode"].asString().c_str(), 0, 8),
-                                     cgroups2["UID"].asString(), cgroups2["GID"].asString());
+        std::string root_path = cgroups2["Path"].asString();
+        MergeCgroupToDescriptors(descriptors, cgroups2, CGROUPV2_CONTROLLER_NAME, "", 2);
+
+        const Json::Value& childGroups = cgroups2["Controllers"];
+        for (Json::Value::ArrayIndex i = 0; i < childGroups.size(); ++i) {
+            std::string name = childGroups[i]["Controller"].asString();
+            MergeCgroupToDescriptors(descriptors, childGroups[i], name, root_path, 2);
         }
     }
 
@@ -192,17 +232,51 @@
 static bool SetupCgroup(const CgroupDescriptor& descriptor) {
     const format::CgroupController* controller = descriptor.controller();
 
-    // mkdir <path> [mode] [owner] [group]
-    if (!Mkdir(controller->path(), descriptor.mode(), descriptor.uid(), descriptor.gid())) {
-        LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup";
-        return false;
-    }
-
     int result;
     if (controller->version() == 2) {
-        result = mount("none", controller->path(), "cgroup2", MS_NODEV | MS_NOEXEC | MS_NOSUID,
-                       nullptr);
+        result = 0;
+        if (!strcmp(controller->name(), CGROUPV2_CONTROLLER_NAME)) {
+            // /sys/fs/cgroup is created by cgroup2 with specific selinux permissions,
+            // try to create again in case the mount point is changed
+            if (!Mkdir(controller->path(), 0, "", "")) {
+                LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup";
+                return false;
+            }
+
+            result = mount("none", controller->path(), "cgroup2", MS_NODEV | MS_NOEXEC | MS_NOSUID,
+                           nullptr);
+
+            // selinux permissions change after mounting, so it's ok to change mode and owner now
+            if (!ChangeDirModeAndOwner(controller->path(), descriptor.mode(), descriptor.uid(),
+                                       descriptor.gid())) {
+                LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup";
+                result = -1;
+            } else {
+                LOG(ERROR) << "restored ownership for " << controller->name() << " cgroup";
+            }
+        } else {
+            if (!Mkdir(controller->path(), descriptor.mode(), descriptor.uid(), descriptor.gid())) {
+                LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup";
+                return false;
+            }
+
+            if (controller->flags() & CGROUPRC_CONTROLLER_FLAG_NEEDS_ACTIVATION) {
+                std::string str = std::string("+") + controller->name();
+                std::string path = std::string(controller->path()) + "/cgroup.subtree_control";
+
+                if (!base::WriteStringToFile(str, path)) {
+                    LOG(ERROR) << "Failed to activate controller " << controller->name();
+                    return false;
+                }
+            }
+        }
     } else {
+        // mkdir <path> [mode] [owner] [group]
+        if (!Mkdir(controller->path(), descriptor.mode(), descriptor.uid(), descriptor.gid())) {
+            LOG(ERROR) << "Failed to create directory for " << controller->name() << " cgroup";
+            return false;
+        }
+
         // Unfortunately historically cpuset controller was mounted using a mount command
         // different from all other controllers. This results in controller attributes not
         // to be prepended with controller name. For example this way instead of
@@ -267,8 +341,8 @@
 
 CgroupDescriptor::CgroupDescriptor(uint32_t version, const std::string& name,
                                    const std::string& path, mode_t mode, const std::string& uid,
-                                   const std::string& gid)
-    : controller_(version, 0, name, path), mode_(mode), uid_(uid), gid_(gid) {}
+                                   const std::string& gid, uint32_t flags = 0)
+    : controller_(version, flags, name, path), mode_(mode), uid_(uid), gid_(gid) {}
 
 void CgroupDescriptor::set_mounted(bool mounted) {
     uint32_t flags = controller_.flags();
diff --git a/llkd/README.md b/llkd/README.md
index 191f988..6f92f14 100644
--- a/llkd/README.md
+++ b/llkd/README.md
@@ -1,199 +1,237 @@
-Android Live-LocK Daemon
-========================
+<!--
+Project: /_project.yaml
+Book: /_book.yaml
 
-Introduction
-------------
+{% include "_versions.html" %}
+-->
 
-Android Live-LocK Daemon (llkd) is used to catch kernel deadlocks and mitigate.
+<!--
+  Copyright 2020 The Android Open Source Project
 
-Code is structured to allow integration into another service as either as part
-of the main loop, or spun off as a thread should that be necessary.  A default
-standalone implementation is provided by llkd component.
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
 
-The 'C' interface from libllkd component is thus:
+      http://www.apache.org/licenses/LICENSE-2.0
 
-    #include "llkd.h"
-    bool llkInit(const char* threadname) /* return true if enabled */
-    unsigned llkCheckMillseconds(void)   /* ms to sleep for next check */
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 
-If a threadname is provided, a thread will be automatically spawned, otherwise
-caller must call llkCheckMilliseconds in its main loop.  Function will return
-the period of time before the next expected call to this handler.
+# Android Live-LocK Daemon (llkd)
 
-Operations
-----------
+Android 10 <!-- {{ androidQVersionNumber }} --> includes the Android Live-LocK Daemon
+(`llkd`), which is designed to catch and mitigate kernel deadlocks. The `llkd`
+component provides a default standalone implementation, but you can
+alternatively integrate the `llkd` code into another service, either as part of
+the main loop or as a separate thread.
 
-There are two detection scenarios. Persistent D or Z state, and persistent
+## Detection scenarios <!-- {:#detection-scenarios} -->
+
+The `llkd` has two detection scenarios: Persistent D or Z state, and persistent
 stack signature.
 
-If a thread is in D or Z state with no forward progress for longer than
-ro.llk.timeout_ms, or ro.llk.[D|Z].timeout_ms, kill the process or parent
-process respectively.  If another scan shows the same process continues to
-exist, then have a confirmed live-lock condition and need to panic.  Panic
-the kernel in a manner to provide the greatest bugreporting details as to the
-condition.  Add a alarm self watchdog should llkd ever get locked up that is
-double the expected time to flow through the mainloop.  Sampling is every
-ro.llk_sample_ms.
+### Persistent D or Z state <!-- {:#persistent-d-or-z-state} -->
 
-For usedebug releases only, persistent stack signature checking is enabled.
-If a thread in any state but Z, has a persistent listed ro.llk.stack kernel
-symbol always being reported, even if there is forward scheduling progress, for
-longer than ro.llk.timeout_ms, or ro.llk.stack.timeout_ms, then issue a kill
-to the process.  If another scan shows the same process continues to exist,
-then have a confirmed live-lock condition and need to panic.  There is no
-ABA detection since forward scheduling progress is allowed, thus the condition
-for the symbols are:
+If a thread is in D (uninterruptible sleep) or Z (zombie) state with no forward
+progress for longer than `ro.llk.timeout_ms or ro.llk.[D|Z].timeout_ms`, the
+`llkd` kills the process (or parent process). If a subsequent scan shows the
+same process continues to exist, the `llkd` confirms a live-lock condition and
+panics the kernel in a manner that provides the most detailed bug report for the
+condition.
 
-- Check is looking for " __symbol__+0x" or " __symbol__.cfi+0x" in
-  /proc/__pid__/stack.
-- The __symbol__ should be rare and short lived enough that on a typical
-  system the function is seen at most only once in a sample over the timeout
-  period of ro.llk.stack.timeout_ms, samples occur every ro.llk.check_ms. This
-  can be the only way to prevent a false trigger as there is no ABA protection.
-- Persistent continuously when the live lock condition exists.
-- Should be just below the function that is calling the lock that could
-  contend, because if the lock is below or in the symbol function, the
-  symbol will show in all affected processes, not just the one that
-  caused the lockup.
+The `llkd` includes a self watchdog that alarms if `llkd` locks up; watchdog is
+double the expected time to flow through the mainloop and sampling is every
+`ro.llk_sample_ms`.
 
-Default will not monitor init, or [kthreadd] and all that [kthreadd] spawns.
-This reduces the effectiveness of llkd by limiting its coverage.  If there is
-value in covering [kthreadd] spawned threads, the requirement will be that
-the drivers not remain in a persistent 'D' state, or that they have mechanisms
-to recover the thread should it be killed externally (this is good driver
-coding hygiene, a common request to add such to publicly reviewed kernel.org
-maintained drivers).  For instance use wait_event_interruptible() instead of
-wait_event().  The blacklists can be adjusted accordingly if these
-conditions are met to cover kernel components.  For the stack symbol checking,
-there is an additional process blacklist so that we do not incide sepolicy
-violations on services that block ptrace operations.
+### Persistent stack signature <!-- {:#persistent-stack-signature} -->
 
-An accompanying gTest set have been added, and will setup a persistent D or Z
-process, with and without forward progress, but not in a live-lock state
-because that would require a buggy kernel, or a module or kernel modification
-to stimulate.  The test will check that llkd will mitigate first by killing
-the appropriate process.  D state is setup by vfork() waiting for exec() in
-child process.  Z state is setup by fork() and an un-waited for child process.
-Should be noted that both of these conditions should never happen on Android
-on purpose, and llkd effectively sweeps up processes that create these
-conditions.  If the test can, it will reconfigure llkd to expedite the test
-duration by adjusting the ro.llk.* Android properties.  Tests run the D state
-with some scheduling progress to ensure that ABA checking prevents false
-triggers. If 100% reliable ABA on platform, then ro.llk.killtest can be
-set to false; however this will result in some of the unit tests to panic
-kernel instead of deal with more graceful kill operation.
+For userdebug releases, the `llkd` can detect kernel live-locks using persistent
+stack signature checking. If a thread in any state except Z has a persistent
+listed `ro.llk.stack` kernel symbol that is reported for longer than
+`ro.llk.timeout_ms` or `ro.llk.stack.timeout_ms`, the `llkd` kills the process
+(even if there is forward scheduling progress). If a subsequent scan shows the
+same process continues to exist, the `llkd` confirms a live-lock condition and
+panics the kernel in a manner that provides the most detailed bug report for the
+condition.
 
-Android Properties
-------------------
+Note: Because forward scheduling progress is allowed, the `llkd` does not
+perform [ABA detection](https://en.wikipedia.org/wiki/ABA_problem){:.external}.
 
-The following are the Android Properties llkd respond to.
-*prop*_ms named properties are in milliseconds.
-Properties that use comma (*,*) separator for lists, use a leading separator to
-preserve default and add or subtract entries with (*optional*) plus (*+*) and
-minus (*-*) prefixes respectively.
-For these lists, the string "*false*" is synonymous with an *empty* list,
-and *blank* or *missing* resorts to the specified *default* value.
+The `lldk` check persists continuously when the live lock condition exists and
+looks for the composed strings `" symbol+0x"` or `" symbol.cfi+0x"` in the
+`/proc/pid/stack` file on Linux. The list of symbols is in `ro.llk.stack` and
+defaults to the comma-separated list of
+"`cma_alloc,__get_user_pages,bit_wait_io,wait_on_page_bit_killable`".
 
-#### ro.config.low_ram
-device is configured with limited memory.
+Symbols should be rare and short-lived enough that on a typical system the
+function is seen only once in a sample over the timeout period of
+`ro.llk.stack.timeout_ms` (samples occur every `ro.llk.check_ms`). Due to lack
+of ABA protection, this is the only way to prevent a false trigger. The symbol
+function must appear below the function calling the lock that could contend. If
+the lock is below or in the symbol function, the symbol appears in all affected
+processes, not just the one that caused the lockup.
 
-#### ro.debuggable
-device is configured for userdebug or eng build.
+## Coverage <!-- {:#coverage} -->
 
-#### ro.llk.sysrq_t
-default not ro.config.low_ram, or ro.debuggable if property is "eng".
-if true do sysrq t (dump all threads).
+The default implementation of `llkd` does not monitor `init`, `[kthreadd]`, or
+`[kthreadd]` spawns. For the `llkd` to cover `[kthreadd]`-spawned threads:
 
-#### ro.llk.enable
-default false, allow live-lock daemon to be enabled.
+* Drivers must not remain in a persistent D state,
 
-#### llk.enable
-default ro.llk.enable, and evaluated for eng.
+OR
 
-#### ro.khungtask.enable
-default false, allow [khungtask] daemon to be enabled.
+* Drivers must have mechanisms to recover the thread should it be killed
+  externally. For example, use `wait_event_interruptible()` instead of
+  `wait_event()`.
 
-#### khungtask.enable
-default ro.khungtask.enable and evaluated for eng.
+If one of the above conditions is met, the `llkd` ignorelist can be adjusted to
+cover kernel components.  Stack symbol checking involves an additional process
+ignore list to prevent sepolicy violations on services that block `ptrace`
+operations.
 
-#### ro.llk.mlockall
-default false, enable call to mlockall().
+## Android properties <!-- {:#android-properties} -->
 
-#### ro.khungtask.timeout
-default value 12 minutes, [khungtask] maximum timelimit.
+The `llkd` responds to several Android properties (listed below).
 
-#### ro.llk.timeout_ms
-default 10 minutes, D or Z maximum timelimit, double this value and it sets
-the alarm watchdog for llkd.
+* Properties named `prop_ms` are in milliseconds.
+* Properties that use comma (,) separator for lists use a leading separator to
+  preserve the default entry, then add or subtract entries with optional plus
+  (+) and minus (-) prefixes respectively. For these lists, the string "false"
+  is synonymous with an empty list, and blank or missing entries resort to the
+  specified default value.
 
-#### ro.llk.D.timeout_ms
-default ro.llk.timeout_ms, D maximum timelimit.
+### ro.config.low_ram <!-- {:#ro-config-low-ram} -->
 
-#### ro.llk.Z.timeout_ms
-default ro.llk.timeout_ms, Z maximum timelimit.
+Device is configured with limited memory.
 
-#### ro.llk.stack.timeout_ms
-default ro.llk.timeout_ms,
-checking for persistent stack symbols maximum timelimit.
-Only active on userdebug or eng builds.
+### ro.debuggable <!-- {:#ro-debuggable} -->
 
-#### ro.llk.check_ms
-default 2 minutes samples of threads for D or Z.
+Device is configured for userdebug or eng build.
 
-#### ro.llk.stack
-default cma_alloc,__get_user_pages,bit_wait_io,wait_on_page_bit_killable
-comma separated list of kernel symbols.
-Look for kernel stack symbols that if ever persistently present can
-indicate a subsystem is locked up.
-Beware, check does not on purpose do forward scheduling ABA except by polling
-every ro.llk_check_ms over the period ro.llk.stack.timeout_ms, so stack symbol
-should be exceptionally rare and fleeting.
-One must be convinced that it is virtually *impossible* for symbol to show up
-persistently in all samples of the stack.
-Again, looks for a match for either " **symbol**+0x" or " **symbol**.cfi+0x"
-in stack expansion.
-Only available on userdebug or eng builds, limited privileges due to security
-concerns on user builds prevents this checking.
+### ro.llk.sysrq_t <!-- {:#ro-llk-sysrq-t} -->
 
-#### ro.llk.blacklist.process
-default 0,1,2 (kernel, init and [kthreadd]) plus process names
-init,[kthreadd],[khungtaskd],lmkd,llkd,watchdogd,
-[watchdogd],[watchdogd/0],...,[watchdogd/***get_nprocs**-1*].
-Do not watch these processes.  A process can be comm, cmdline or pid reference.
-NB: automated default here can be larger than the current maximum property
-size of 92.
-NB: false is a very very very unlikely process to want to blacklist.
+If property is "eng", the default is not `ro.config.low_ram` or `ro.debuggable`.
+If true, dump all threads (`sysrq t`).
 
-#### ro.llk.blacklist.parent
-default 0,2,adbd&[setsid] (kernel, [kthreadd] and adbd *only for zombie setsid*).
-Do not watch processes that have this parent.
-An ampersand (*&*) separator is used to specify that the parent is ignored
-only in combination with the target child process.
-Ampersand was selected because it is never part of a process name,
-however a setprop in the shell requires it to be escaped or quoted;
-init rc file where this is normally specified does not have this issue.
-A parent or target processes can be specified as comm, cmdline or pid reference.
+### ro.llk.enable <!-- {:#ro-llk-enable} -->
 
-#### ro.llk.blacklist.uid
-default *empty* or false, comma separated list of uid numbers or names.
-Do not watch processes that match this uid.
+Allow live-lock daemon to be enabled. Default is false.
 
-#### ro.llk.blacklist.process.stack
-default process names init,lmkd.llkd,llkd,keystore,ueventd,apexd,logd.
-This subset of processes are not monitored for live lock stack signatures.
-Also prevents the sepolicy violation associated with processes that block
-ptrace, as these can not be checked anyways.
-Only active on userdebug and eng builds.
+### llk.enable <!-- {:#llk-enable} -->
 
-Architectural Concerns
-----------------------
+Evaluated for eng builds. Default is `ro.llk.enable`.
 
-- built-in [khungtask] daemon is too generic and trips on driver code that
-  sits around in D state too much.  To switch to S instead makes the task(s)
-  killable, so the drivers should be able to resurrect them if needed.
-- Properties are limited to 92 characters.
-- Create kernel module and associated gTest to actually test panic.
-- Create gTest to test out blacklist (ro.llk.blacklist.*properties* generally
-  not be inputs).  Could require more test-only interfaces to libllkd.
-- Speed up gTest using something else than ro.llk.*properties*, which should
-  not be inputs as they should be baked into the product.
+### ro.khungtask.enable <!-- {:#ro-khungtask-enable} -->
+
+Allow `[khungtask]` daemon to be enabled. Default is false.
+
+### khungtask.enable <!-- {:#khungtask-enable} -->
+
+Evaluated for eng builds. Default is `ro.khungtask.enable`.
+
+### ro.llk.mlockall <!-- {:#ro-llk-mlockall} -->
+
+Enable call to `mlockall()`. Default is false.
+
+### ro.khungtask.timeout <!-- {:#ro-khungtask-timeout} -->
+
+`[khungtask]` maximum time limit. Default is 12 minutes.
+
+### ro.llk.timeout_ms <!-- {:#ro-llk-timeout-ms} -->
+
+D or Z maximum time limit. Default is 10 minutes. Double this value to set the
+alarm watchdog for `llkd`.
+
+### ro.llk.D.timeout_ms <!-- {:#ro-llk-D-timeout-ms} -->
+
+D maximum time limit. Default is `ro.llk.timeout_ms`.
+
+### ro.llk.Z.timeout_ms <!-- {:#ro-llk-Z-timeout-ms} -->
+
+Z maximum time limit. Default is `ro.llk.timeout_ms`.
+
+### ro.llk.stack.timeout_ms <!-- {:#ro-llk-stack-timeout-ms} -->
+
+Checks for persistent stack symbols maximum time limit. Default is
+`ro.llk.timeout_ms`. **Active only on userdebug or eng builds**.
+
+### ro.llk.check_ms <!-- {:#ro-llk-check-ms} -->
+
+Samples of threads for D or Z. Default is two minutes.
+
+### ro.llk.stack <!-- {:#ro-llk-stack} -->
+
+Checks for kernel stack symbols that if persistently present can indicate a
+subsystem is locked up. Default is
+`cma_alloc,__get_user_pages,bit_wait_io,wait_on_page_bit_killable`
+comma-separated list of kernel symbols. The check doesn't do forward scheduling
+ABA except by polling every `ro.llk_check_ms` over the period
+`ro.llk.stack.timeout_ms`, so stack symbols should be exceptionally rare and
+fleeting (it is highly unlikely for a symbol to show up persistently in all
+samples of the stack). Checks for a match for `" symbol+0x"` or
+`" symbol.cfi+0x"` in stack expansion. **Available only on userdebug or eng
+builds**; security concerns on user builds result in limited privileges that
+prevent this check.
+
+### ro.llk.ignorelist.process <!-- {:#ro-llk-ignorelist-process} -->
+
+The `llkd` does not watch the specified processes. Default is `0,1,2` (`kernel`,
+`init`, and `[kthreadd]`) plus process names
+`init,[kthreadd],[khungtaskd],lmkd,llkd,watchdogd, [watchdogd],[watchdogd/0],...,[watchdogd/get_nprocs-1]`.
+A process can be a `comm`, `cmdline`, or `pid` reference. An automated default
+can be larger than the current maximum property size of 92.
+
+Note: `false` is an extremely unlikely process to want to ignore.
+
+### ro.llk.ignorelist.parent <!-- {:#ro-llk-ignorelist-parent} -->
+
+The `llkd` does not watch processes that have the specified parent(s). Default
+is `0,2,adbd&[setsid]` (`kernel`, `[kthreadd]`, and `adbd` only for zombie
+`setsid`). An ampersand (&) separator specifies that the parent is ignored only
+in combination with the target child process. Ampersand was selected because it
+is never part of a process name; however, a `setprop` in the shell requires the
+ampersand to be escaped or quoted, although the `init rc` file where this is
+normally specified does not have this issue. A parent or target process can be a
+`comm`, `cmdline`, or `pid` reference.
+
+### ro.llk.ignorelist.uid <!-- {:#ro-llk-ignorelist-uid} -->
+
+The `llkd` does not watch processes that match the specified uid(s).
+Comma-separated list of uid numbers or names. Default is empty or false.
+
+### ro.llk.ignorelist.process.stack <!-- {:#ro-llk-ignorelist-process-stack} -->
+
+The `llkd` does not monitor the specified subset of processes for live lock stack
+signatures. Default is process names
+`init,lmkd.llkd,llkd,keystore,ueventd,apexd,logd`. Prevents the sepolicy
+violation associated with processes that block `ptrace` (as these can't be
+checked). **Active only on userdebug and eng builds**. For details on build
+types, refer to [Building Android](/setup/build/building#choose-a-target).
+
+## Architectural concerns <!-- {:#architectural-concerns} -->
+
+* Properties are limited to 92 characters.  However, this is not limited for
+  defaults defined in the `include/llkd.h` file in the sources.
+* The built-in `[khungtask]` daemon is too generic and trips on driver code that
+  sits around in D state too much. Switching drivers to sleep, or S state,
+  would make task(s) killable, and need to be resurrectable by drivers on an
+  as-need basis.
+
+## Library interface (optional) <!-- {:#library-interface-optional} -->
+
+You can optionally incorporate the `llkd` into another privileged daemon using
+the following C interface from the `libllkd` component:
+
+```
+#include "llkd.h"
+bool llkInit(const char* threadname) /* return true if enabled */
+unsigned llkCheckMillseconds(void)   /* ms to sleep for next check */
+```
+
+If a threadname is provided, a thread automatically spawns, otherwise the caller
+must call `llkCheckMilliseconds` in its main loop. The function returns the
+period of time before the next expected call to this handler.
diff --git a/llkd/include/llkd.h b/llkd/include/llkd.h
index 3586ca1..4b20a56 100644
--- a/llkd/include/llkd.h
+++ b/llkd/include/llkd.h
@@ -30,37 +30,37 @@
 unsigned llkCheckMilliseconds(void);
 
 /* clang-format off */
-#define LLK_ENABLE_WRITEABLE_PROPERTY  "llk.enable"
-#define LLK_ENABLE_PROPERTY            "ro." LLK_ENABLE_WRITEABLE_PROPERTY
-#define LLK_ENABLE_DEFAULT             false /* "eng" and userdebug true */
-#define KHT_ENABLE_WRITEABLE_PROPERTY  "khungtask.enable"
-#define KHT_ENABLE_PROPERTY            "ro." KHT_ENABLE_WRITEABLE_PROPERTY
-#define LLK_ENABLE_SYSRQ_T_PROPERTY    "ro.llk.sysrq_t"
-#define LLK_ENABLE_SYSRQ_T_DEFAULT     true
-#define LLK_MLOCKALL_PROPERTY          "ro.llk.mlockall"
-#define LLK_MLOCKALL_DEFAULT           true
-#define LLK_KILLTEST_PROPERTY          "ro.llk.killtest"
-#define LLK_KILLTEST_DEFAULT           true
-#define LLK_TIMEOUT_MS_PROPERTY        "ro.llk.timeout_ms"
-#define KHT_TIMEOUT_PROPERTY           "ro.khungtask.timeout"
-#define LLK_D_TIMEOUT_MS_PROPERTY      "ro.llk.D.timeout_ms"
-#define LLK_Z_TIMEOUT_MS_PROPERTY      "ro.llk.Z.timeout_ms"
-#define LLK_STACK_TIMEOUT_MS_PROPERTY  "ro.llk.stack.timeout_ms"
-#define LLK_CHECK_MS_PROPERTY          "ro.llk.check_ms"
+#define LLK_ENABLE_WRITEABLE_PROPERTY   "llk.enable"
+#define LLK_ENABLE_PROPERTY             "ro." LLK_ENABLE_WRITEABLE_PROPERTY
+#define LLK_ENABLE_DEFAULT              false /* "eng" and userdebug true */
+#define KHT_ENABLE_WRITEABLE_PROPERTY   "khungtask.enable"
+#define KHT_ENABLE_PROPERTY             "ro." KHT_ENABLE_WRITEABLE_PROPERTY
+#define LLK_ENABLE_SYSRQ_T_PROPERTY     "ro.llk.sysrq_t"
+#define LLK_ENABLE_SYSRQ_T_DEFAULT      true
+#define LLK_MLOCKALL_PROPERTY           "ro.llk.mlockall"
+#define LLK_MLOCKALL_DEFAULT            true
+#define LLK_KILLTEST_PROPERTY           "ro.llk.killtest"
+#define LLK_KILLTEST_DEFAULT            true
+#define LLK_TIMEOUT_MS_PROPERTY         "ro.llk.timeout_ms"
+#define KHT_TIMEOUT_PROPERTY            "ro.khungtask.timeout"
+#define LLK_D_TIMEOUT_MS_PROPERTY       "ro.llk.D.timeout_ms"
+#define LLK_Z_TIMEOUT_MS_PROPERTY       "ro.llk.Z.timeout_ms"
+#define LLK_STACK_TIMEOUT_MS_PROPERTY   "ro.llk.stack.timeout_ms"
+#define LLK_CHECK_MS_PROPERTY           "ro.llk.check_ms"
 /* LLK_CHECK_MS_DEFAULT = actual timeout_ms / LLK_CHECKS_PER_TIMEOUT_DEFAULT */
-#define LLK_CHECKS_PER_TIMEOUT_DEFAULT 5
-#define LLK_CHECK_STACK_PROPERTY       "ro.llk.stack"
-#define LLK_CHECK_STACK_DEFAULT        \
+#define LLK_CHECKS_PER_TIMEOUT_DEFAULT  5
+#define LLK_CHECK_STACK_PROPERTY        "ro.llk.stack"
+#define LLK_CHECK_STACK_DEFAULT         \
     "cma_alloc,__get_user_pages,bit_wait_io,wait_on_page_bit_killable"
-#define LLK_BLACKLIST_PROCESS_PROPERTY "ro.llk.blacklist.process"
-#define LLK_BLACKLIST_PROCESS_DEFAULT  \
+#define LLK_IGNORELIST_PROCESS_PROPERTY "ro.llk.ignorelist.process"
+#define LLK_IGNORELIST_PROCESS_DEFAULT  \
     "0,1,2,init,[kthreadd],[khungtaskd],lmkd,llkd,watchdogd,[watchdogd],[watchdogd/0]"
-#define LLK_BLACKLIST_PARENT_PROPERTY  "ro.llk.blacklist.parent"
-#define LLK_BLACKLIST_PARENT_DEFAULT   "0,2,[kthreadd],adbd&[setsid]"
-#define LLK_BLACKLIST_UID_PROPERTY     "ro.llk.blacklist.uid"
-#define LLK_BLACKLIST_UID_DEFAULT      ""
-#define LLK_BLACKLIST_STACK_PROPERTY   "ro.llk.blacklist.process.stack"
-#define LLK_BLACKLIST_STACK_DEFAULT    "init,lmkd.llkd,llkd,keystore,ueventd,apexd"
+#define LLK_IGNORELIST_PARENT_PROPERTY  "ro.llk.ignorelist.parent"
+#define LLK_IGNORELIST_PARENT_DEFAULT   "0,2,[kthreadd],adbd&[setsid]"
+#define LLK_IGNORELIST_UID_PROPERTY     "ro.llk.ignorelist.uid"
+#define LLK_IGNORELIST_UID_DEFAULT      ""
+#define LLK_IGNORELIST_STACK_PROPERTY   "ro.llk.ignorelist.process.stack"
+#define LLK_IGNORELIST_STACK_DEFAULT    "init,lmkd.llkd,llkd,keystore,ueventd,apexd"
 /* clang-format on */
 
 __END_DECLS
diff --git a/llkd/libllkd.cpp b/llkd/libllkd.cpp
index 8ad9900..a24d900 100644
--- a/llkd/libllkd.cpp
+++ b/llkd/libllkd.cpp
@@ -98,26 +98,26 @@
 std::unordered_set<std::string> llkCheckStackSymbols;
 #endif
 
-// Blacklist variables, initialized with comma separated lists of high false
+// Ignorelist variables, initialized with comma separated lists of high false
 // positive and/or dangerous references, e.g. without self restart, for pid,
 // ppid, name and uid:
 
 // list of pids, or tids or names to skip. kernel pid (0), init pid (1),
 // [kthreadd] pid (2), ourselves, "init", "[kthreadd]", "lmkd", "llkd" or
 // combinations of watchdogd in kernel and user space.
-std::unordered_set<std::string> llkBlacklistProcess;
+std::unordered_set<std::string> llkIgnorelistProcess;
 // list of parent pids, comm or cmdline names to skip. default:
 // kernel pid (0), [kthreadd] (2), or ourselves, enforced and implied
-std::unordered_set<std::string> llkBlacklistParent;
+std::unordered_set<std::string> llkIgnorelistParent;
 // list of parent and target processes to skip. default:
 // adbd *and* [setsid]
-std::unordered_map<std::string, std::unordered_set<std::string>> llkBlacklistParentAndChild;
+std::unordered_map<std::string, std::unordered_set<std::string>> llkIgnorelistParentAndChild;
 // list of uids, and uid names, to skip, default nothing
-std::unordered_set<std::string> llkBlacklistUid;
+std::unordered_set<std::string> llkIgnorelistUid;
 #ifdef __PTRACE_ENABLED__
 // list of names to skip stack checking. "init", "lmkd", "llkd", "keystore" or
 // "logd" (if not userdebug).
-std::unordered_set<std::string> llkBlacklistStack;
+std::unordered_set<std::string> llkIgnorelistStack;
 #endif
 
 class dir {
@@ -626,9 +626,9 @@
     return flag ? "true" : "false";
 }
 
-std::string llkFormat(const std::unordered_set<std::string>& blacklist) {
+std::string llkFormat(const std::unordered_set<std::string>& ignorelist) {
     std::string ret;
-    for (const auto& entry : blacklist) {
+    for (const auto& entry : ignorelist) {
         if (!ret.empty()) ret += ",";
         ret += entry;
     }
@@ -636,10 +636,10 @@
 }
 
 std::string llkFormat(
-        const std::unordered_map<std::string, std::unordered_set<std::string>>& blacklist,
+        const std::unordered_map<std::string, std::unordered_set<std::string>>& ignorelist,
         bool leading_comma = false) {
     std::string ret;
-    for (const auto& entry : blacklist) {
+    for (const auto& entry : ignorelist) {
         for (const auto& target : entry.second) {
             if (leading_comma || !ret.empty()) ret += ",";
             ret += entry.first + "&" + target;
@@ -699,61 +699,61 @@
 }
 
 bool llkSkipName(const std::string& name,
-                 const std::unordered_set<std::string>& blacklist = llkBlacklistProcess) {
-    if (name.empty() || blacklist.empty()) return false;
+                 const std::unordered_set<std::string>& ignorelist = llkIgnorelistProcess) {
+    if (name.empty() || ignorelist.empty()) return false;
 
-    return blacklist.find(name) != blacklist.end();
+    return ignorelist.find(name) != ignorelist.end();
 }
 
 bool llkSkipProc(proc* procp,
-                 const std::unordered_set<std::string>& blacklist = llkBlacklistProcess) {
+                 const std::unordered_set<std::string>& ignorelist = llkIgnorelistProcess) {
     if (!procp) return false;
-    if (llkSkipName(std::to_string(procp->pid), blacklist)) return true;
-    if (llkSkipName(procp->getComm(), blacklist)) return true;
-    if (llkSkipName(procp->getCmdline(), blacklist)) return true;
-    if (llkSkipName(android::base::Basename(procp->getCmdline()), blacklist)) return true;
+    if (llkSkipName(std::to_string(procp->pid), ignorelist)) return true;
+    if (llkSkipName(procp->getComm(), ignorelist)) return true;
+    if (llkSkipName(procp->getCmdline(), ignorelist)) return true;
+    if (llkSkipName(android::base::Basename(procp->getCmdline()), ignorelist)) return true;
     return false;
 }
 
 const std::unordered_set<std::string>& llkSkipName(
         const std::string& name,
-        const std::unordered_map<std::string, std::unordered_set<std::string>>& blacklist) {
+        const std::unordered_map<std::string, std::unordered_set<std::string>>& ignorelist) {
     static const std::unordered_set<std::string> empty;
-    if (name.empty() || blacklist.empty()) return empty;
-    auto found = blacklist.find(name);
-    if (found == blacklist.end()) return empty;
+    if (name.empty() || ignorelist.empty()) return empty;
+    auto found = ignorelist.find(name);
+    if (found == ignorelist.end()) return empty;
     return found->second;
 }
 
 bool llkSkipPproc(proc* pprocp, proc* procp,
                   const std::unordered_map<std::string, std::unordered_set<std::string>>&
-                          blacklist = llkBlacklistParentAndChild) {
-    if (!pprocp || !procp || blacklist.empty()) return false;
-    if (llkSkipProc(procp, llkSkipName(std::to_string(pprocp->pid), blacklist))) return true;
-    if (llkSkipProc(procp, llkSkipName(pprocp->getComm(), blacklist))) return true;
-    if (llkSkipProc(procp, llkSkipName(pprocp->getCmdline(), blacklist))) return true;
+                          ignorelist = llkIgnorelistParentAndChild) {
+    if (!pprocp || !procp || ignorelist.empty()) return false;
+    if (llkSkipProc(procp, llkSkipName(std::to_string(pprocp->pid), ignorelist))) return true;
+    if (llkSkipProc(procp, llkSkipName(pprocp->getComm(), ignorelist))) return true;
+    if (llkSkipProc(procp, llkSkipName(pprocp->getCmdline(), ignorelist))) return true;
     return llkSkipProc(procp,
-                       llkSkipName(android::base::Basename(pprocp->getCmdline()), blacklist));
+                       llkSkipName(android::base::Basename(pprocp->getCmdline()), ignorelist));
 }
 
 bool llkSkipPid(pid_t pid) {
-    return llkSkipName(std::to_string(pid), llkBlacklistProcess);
+    return llkSkipName(std::to_string(pid), llkIgnorelistProcess);
 }
 
 bool llkSkipPpid(pid_t ppid) {
-    return llkSkipName(std::to_string(ppid), llkBlacklistParent);
+    return llkSkipName(std::to_string(ppid), llkIgnorelistParent);
 }
 
 bool llkSkipUid(uid_t uid) {
     // Match by number?
-    if (llkSkipName(std::to_string(uid), llkBlacklistUid)) {
+    if (llkSkipName(std::to_string(uid), llkIgnorelistUid)) {
         return true;
     }
 
     // Match by name?
     auto pwd = ::getpwuid(uid);
     return (pwd != nullptr) && __predict_true(pwd->pw_name != nullptr) &&
-           __predict_true(pwd->pw_name[0] != '\0') && llkSkipName(pwd->pw_name, llkBlacklistUid);
+           __predict_true(pwd->pw_name[0] != '\0') && llkSkipName(pwd->pw_name, llkIgnorelistUid);
 }
 
 bool getValidTidDir(dirent* dp, std::string* piddir) {
@@ -811,7 +811,7 @@
     }
 
     // Don't check process that are known to block ptrace, save sepolicy noise.
-    if (llkSkipProc(procp, llkBlacklistStack)) return false;
+    if (llkSkipProc(procp, llkIgnorelistStack)) return false;
     auto kernel_stack = ReadFile(piddir + "/stack");
     if (kernel_stack.empty()) {
         LOG(VERBOSE) << piddir << "/stack empty comm=" << procp->getComm()
@@ -917,12 +917,12 @@
               << LLK_CHECK_MS_PROPERTY "=" << llkFormat(llkCheckMs) << "\n"
 #ifdef __PTRACE_ENABLED__
               << LLK_CHECK_STACK_PROPERTY "=" << llkFormat(llkCheckStackSymbols) << "\n"
-              << LLK_BLACKLIST_STACK_PROPERTY "=" << llkFormat(llkBlacklistStack) << "\n"
+              << LLK_IGNORELIST_STACK_PROPERTY "=" << llkFormat(llkIgnorelistStack) << "\n"
 #endif
-              << LLK_BLACKLIST_PROCESS_PROPERTY "=" << llkFormat(llkBlacklistProcess) << "\n"
-              << LLK_BLACKLIST_PARENT_PROPERTY "=" << llkFormat(llkBlacklistParent)
-              << llkFormat(llkBlacklistParentAndChild, true) << "\n"
-              << LLK_BLACKLIST_UID_PROPERTY "=" << llkFormat(llkBlacklistUid);
+              << LLK_IGNORELIST_PROCESS_PROPERTY "=" << llkFormat(llkIgnorelistProcess) << "\n"
+              << LLK_IGNORELIST_PARENT_PROPERTY "=" << llkFormat(llkIgnorelistParent)
+              << llkFormat(llkIgnorelistParentAndChild, true) << "\n"
+              << LLK_IGNORELIST_UID_PROPERTY "=" << llkFormat(llkIgnorelistUid);
 }
 
 void* llkThread(void* obj) {
@@ -932,14 +932,14 @@
 
     std::string name = std::to_string(::gettid());
     if (!llkSkipName(name)) {
-        llkBlacklistProcess.emplace(name);
+        llkIgnorelistProcess.emplace(name);
     }
     name = static_cast<const char*>(obj);
     prctl(PR_SET_NAME, name.c_str());
     if (__predict_false(!llkSkipName(name))) {
-        llkBlacklistProcess.insert(name);
+        llkIgnorelistProcess.insert(name);
     }
-    // No longer modifying llkBlacklistProcess.
+    // No longer modifying llkIgnorelistProcess.
     llkRunning = true;
     llkLogConfig();
     while (llkRunning) {
@@ -1122,12 +1122,12 @@
             }
             if (pprocp) {
                 if (llkSkipPproc(pprocp, procp)) break;
-                if (llkSkipProc(pprocp, llkBlacklistParent)) break;
+                if (llkSkipProc(pprocp, llkIgnorelistParent)) break;
             } else {
-                if (llkSkipName(std::to_string(ppid), llkBlacklistParent)) break;
+                if (llkSkipName(std::to_string(ppid), llkIgnorelistParent)) break;
             }
 
-            if ((llkBlacklistUid.size() != 0) && llkSkipUid(procp->getUid())) {
+            if ((llkIgnorelistUid.size() != 0) && llkSkipUid(procp->getUid())) {
                 continue;
             }
 
@@ -1320,29 +1320,29 @@
     if (debuggable) {
         llkCheckStackSymbols = llkSplit(LLK_CHECK_STACK_PROPERTY, LLK_CHECK_STACK_DEFAULT);
     }
-    std::string defaultBlacklistStack(LLK_BLACKLIST_STACK_DEFAULT);
-    if (!debuggable) defaultBlacklistStack += ",logd,/system/bin/logd";
-    llkBlacklistStack = llkSplit(LLK_BLACKLIST_STACK_PROPERTY, defaultBlacklistStack);
+    std::string defaultIgnorelistStack(LLK_IGNORELIST_STACK_DEFAULT);
+    if (!debuggable) defaultIgnorelistStack += ",logd,/system/bin/logd";
+    llkIgnorelistStack = llkSplit(LLK_IGNORELIST_STACK_PROPERTY, defaultIgnorelistStack);
 #endif
-    std::string defaultBlacklistProcess(
-        std::to_string(kernelPid) + "," + std::to_string(initPid) + "," +
-        std::to_string(kthreaddPid) + "," + std::to_string(::getpid()) + "," +
-        std::to_string(::gettid()) + "," LLK_BLACKLIST_PROCESS_DEFAULT);
+    std::string defaultIgnorelistProcess(
+            std::to_string(kernelPid) + "," + std::to_string(initPid) + "," +
+            std::to_string(kthreaddPid) + "," + std::to_string(::getpid()) + "," +
+            std::to_string(::gettid()) + "," LLK_IGNORELIST_PROCESS_DEFAULT);
     if (threadname) {
-        defaultBlacklistProcess += ","s + threadname;
+        defaultIgnorelistProcess += ","s + threadname;
     }
     for (int cpu = 1; cpu < get_nprocs_conf(); ++cpu) {
-        defaultBlacklistProcess += ",[watchdog/" + std::to_string(cpu) + "]";
+        defaultIgnorelistProcess += ",[watchdog/" + std::to_string(cpu) + "]";
     }
-    llkBlacklistProcess = llkSplit(LLK_BLACKLIST_PROCESS_PROPERTY, defaultBlacklistProcess);
+    llkIgnorelistProcess = llkSplit(LLK_IGNORELIST_PROCESS_PROPERTY, defaultIgnorelistProcess);
     if (!llkSkipName("[khungtaskd]")) {  // ALWAYS ignore as special
-        llkBlacklistProcess.emplace("[khungtaskd]");
+        llkIgnorelistProcess.emplace("[khungtaskd]");
     }
-    llkBlacklistParent = llkSplit(LLK_BLACKLIST_PARENT_PROPERTY,
-                                  std::to_string(kernelPid) + "," + std::to_string(kthreaddPid) +
-                                          "," LLK_BLACKLIST_PARENT_DEFAULT);
-    // derive llkBlacklistParentAndChild by moving entries with '&' from above
-    for (auto it = llkBlacklistParent.begin(); it != llkBlacklistParent.end();) {
+    llkIgnorelistParent = llkSplit(LLK_IGNORELIST_PARENT_PROPERTY,
+                                   std::to_string(kernelPid) + "," + std::to_string(kthreaddPid) +
+                                           "," LLK_IGNORELIST_PARENT_DEFAULT);
+    // derive llkIgnorelistParentAndChild by moving entries with '&' from above
+    for (auto it = llkIgnorelistParent.begin(); it != llkIgnorelistParent.end();) {
         auto pos = it->find('&');
         if (pos == std::string::npos) {
             ++it;
@@ -1350,18 +1350,18 @@
         }
         auto parent = it->substr(0, pos);
         auto child = it->substr(pos + 1);
-        it = llkBlacklistParent.erase(it);
+        it = llkIgnorelistParent.erase(it);
 
-        auto found = llkBlacklistParentAndChild.find(parent);
-        if (found == llkBlacklistParentAndChild.end()) {
-            llkBlacklistParentAndChild.emplace(std::make_pair(
+        auto found = llkIgnorelistParentAndChild.find(parent);
+        if (found == llkIgnorelistParentAndChild.end()) {
+            llkIgnorelistParentAndChild.emplace(std::make_pair(
                     std::move(parent), std::unordered_set<std::string>({std::move(child)})));
         } else {
             found->second.emplace(std::move(child));
         }
     }
 
-    llkBlacklistUid = llkSplit(LLK_BLACKLIST_UID_PROPERTY, LLK_BLACKLIST_UID_DEFAULT);
+    llkIgnorelistUid = llkSplit(LLK_IGNORELIST_UID_PROPERTY, LLK_IGNORELIST_UID_DEFAULT);
 
     // internal watchdog
     ::signal(SIGALRM, llkAlarmHandler);
diff --git a/logd/Android.bp b/logd/Android.bp
index 1f6ab34..e0a1168 100644
--- a/logd/Android.bp
+++ b/logd/Android.bp
@@ -31,7 +31,11 @@
 cc_defaults {
     name: "logd_defaults",
 
-    shared_libs: ["libbase"],
+    shared_libs: [
+        "libbase",
+        "libz",
+    ],
+    static_libs: ["libzstd"],
     cflags: [
         "-Wextra",
         "-Wthread-safety",
@@ -40,6 +44,7 @@
     lto: {
         thin: true,
     },
+    cpp_std: "experimental",
 }
 
 cc_library_static {
@@ -48,12 +53,16 @@
     host_supported: true,
     srcs: [
         "ChattyLogBuffer.cpp",
+        "CompressionEngine.cpp",
         "LogReaderList.cpp",
         "LogReaderThread.cpp",
         "LogBufferElement.cpp",
         "LogStatistics.cpp",
         "LogWhiteBlackList.cpp",
         "LogTags.cpp",
+        "SerializedFlushToState.cpp",
+        "SerializedLogBuffer.cpp",
+        "SerializedLogChunk.cpp",
         "SimpleLogBuffer.cpp",
     ],
     logtags: ["event.logtags"],
@@ -132,6 +141,8 @@
         "ChattyLogBufferTest.cpp",
         "logd_test.cpp",
         "LogBufferTest.cpp",
+        "SerializedLogChunkTest.cpp",
+        "SerializedFlushToStateTest.cpp",
     ],
 
     static_libs: [
@@ -140,6 +151,8 @@
         "liblog",
         "liblogd",
         "libselinux",
+        "libz",
+        "libzstd",
     ],
 }
 
diff --git a/logd/ChattyLogBuffer.cpp b/logd/ChattyLogBuffer.cpp
index f92fe65..c213448 100644
--- a/logd/ChattyLogBuffer.cpp
+++ b/logd/ChattyLogBuffer.cpp
@@ -327,7 +327,6 @@
 //
 bool ChattyLogBuffer::Prune(log_id_t id, unsigned long pruneRows, uid_t caller_uid) {
     LogReaderThread* oldest = nullptr;
-    bool busy = false;
     bool clearAll = pruneRows == ULONG_MAX;
 
     auto reader_threads_lock = std::lock_guard{reader_list()->reader_threads_lock()};
@@ -359,17 +358,16 @@
             }
 
             if (oldest && oldest->start() <= element.sequence()) {
-                busy = true;
                 KickReader(oldest, id, pruneRows);
-                break;
+                return false;
             }
 
             it = Erase(it);
             if (--pruneRows == 0) {
-                break;
+                return true;
             }
         }
-        return busy;
+        return true;
     }
 
     // prune by worst offenders; by blacklist, UID, and by PID of system UID
@@ -440,7 +438,6 @@
             LogBufferElement& element = *it;
 
             if (oldest && oldest->start() <= element.sequence()) {
-                busy = true;
                 // Do not let chatty eliding trigger any reader mitigation
                 break;
             }
@@ -577,7 +574,6 @@
         }
 
         if (oldest && oldest->start() <= element.sequence()) {
-            busy = true;
             if (!whitelist) KickReader(oldest, id, pruneRows);
             break;
         }
@@ -605,7 +601,6 @@
             }
 
             if (oldest && oldest->start() <= element.sequence()) {
-                busy = true;
                 KickReader(oldest, id, pruneRows);
                 break;
             }
@@ -615,5 +610,5 @@
         }
     }
 
-    return (pruneRows > 0) && busy;
+    return pruneRows == 0 || it == logs().end();
 }
diff --git a/logd/CommandListener.cpp b/logd/CommandListener.cpp
index 9764c44..39c5490 100644
--- a/logd/CommandListener.cpp
+++ b/logd/CommandListener.cpp
@@ -82,7 +82,7 @@
         return 0;
     }
 
-    cli->sendMsg(buf()->Clear((log_id_t)id, uid) ? "busy" : "success");
+    cli->sendMsg(buf()->Clear((log_id_t)id, uid) ? "success" : "busy");
     return 0;
 }
 
diff --git a/logd/CompressionEngine.cpp b/logd/CompressionEngine.cpp
new file mode 100644
index 0000000..f37208b
--- /dev/null
+++ b/logd/CompressionEngine.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CompressionEngine.h"
+
+#include <limits>
+
+#include <android-base/logging.h>
+#include <zlib.h>
+#include <zstd.h>
+
+CompressionEngine& CompressionEngine::GetInstance() {
+    CompressionEngine* engine = new ZstdCompressionEngine();
+    return *engine;
+}
+
+bool ZlibCompressionEngine::Compress(std::span<uint8_t> in, std::vector<uint8_t>& out) {
+    z_stream strm;
+    strm.zalloc = Z_NULL;
+    strm.zfree = Z_NULL;
+    strm.opaque = Z_NULL;
+    int ret = deflateInit(&strm, Z_DEFAULT_COMPRESSION);
+    if (ret != Z_OK) {
+        LOG(FATAL) << "deflateInit() failed";
+    }
+
+    CHECK_LE(in.size(), static_cast<int64_t>(std::numeric_limits<uint32_t>::max()));
+    uint32_t out_size = deflateBound(&strm, in.size());
+
+    out.resize(out_size);
+    strm.avail_in = in.size();
+    strm.next_in = const_cast<uint8_t*>(in.data());
+    strm.avail_out = out_size;
+    strm.next_out = out.data();
+    ret = deflate(&strm, Z_FINISH);
+    CHECK_EQ(ret, Z_STREAM_END);
+
+    uint32_t compressed_data_size = strm.total_out;
+    deflateEnd(&strm);
+    out.resize(compressed_data_size);
+
+    return true;
+}
+
+bool ZlibCompressionEngine::Decompress(const std::vector<uint8_t>& in, std::vector<uint8_t>& out,
+                                       size_t out_size) {
+    out.resize(out_size);
+
+    z_stream strm;
+    strm.zalloc = Z_NULL;
+    strm.zfree = Z_NULL;
+    strm.opaque = Z_NULL;
+    strm.avail_in = in.size();
+    strm.next_in = const_cast<uint8_t*>(in.data());
+    strm.avail_out = out.size();
+    strm.next_out = out.data();
+
+    inflateInit(&strm);
+    int ret = inflate(&strm, Z_NO_FLUSH);
+
+    CHECK_EQ(strm.avail_in, 0U);
+    CHECK_EQ(strm.avail_out, 0U);
+    CHECK_EQ(ret, Z_STREAM_END);
+    inflateEnd(&strm);
+
+    return true;
+}
+
+bool ZstdCompressionEngine::Compress(std::span<uint8_t> in, std::vector<uint8_t>& out) {
+    size_t out_size = ZSTD_compressBound(in.size());
+    out.resize(out_size);
+
+    out_size = ZSTD_compress(out.data(), out_size, in.data(), in.size(), 1);
+    if (ZSTD_isError(out_size)) {
+        LOG(FATAL) << "ZSTD_compress failed: " << ZSTD_getErrorName(out_size);
+    }
+    out.resize(out_size);
+
+    return true;
+}
+
+bool ZstdCompressionEngine::Decompress(const std::vector<uint8_t>& in, std::vector<uint8_t>& out,
+                                       size_t out_size) {
+    out.resize(out_size);
+    size_t result = ZSTD_decompress(out.data(), out.size(), in.data(), in.size());
+    if (ZSTD_isError(result)) {
+        LOG(FATAL) << "ZSTD_decompress failed: " << ZSTD_getErrorName(result);
+    }
+    CHECK_EQ(result, out.size());
+    return true;
+}
diff --git a/logd/CompressionEngine.h b/logd/CompressionEngine.h
new file mode 100644
index 0000000..d760cea
--- /dev/null
+++ b/logd/CompressionEngine.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <span>
+#include <vector>
+
+class CompressionEngine {
+  public:
+    static CompressionEngine& GetInstance();
+
+    virtual ~CompressionEngine(){};
+
+    virtual bool Compress(std::span<uint8_t> in, std::vector<uint8_t>& out) = 0;
+    // Decompress the contents of `in` into `out`.  `out_size` must be set to the decompressed size
+    // of the contents.
+    virtual bool Decompress(const std::vector<uint8_t>& in, std::vector<uint8_t>& out,
+                            size_t out_size) = 0;
+};
+
+class ZlibCompressionEngine : public CompressionEngine {
+  public:
+    bool Compress(std::span<uint8_t> in, std::vector<uint8_t>& out) override;
+    bool Decompress(const std::vector<uint8_t>& in, std::vector<uint8_t>& out,
+                    size_t out_size) override;
+};
+
+class ZstdCompressionEngine : public CompressionEngine {
+  public:
+    bool Compress(std::span<uint8_t> in, std::vector<uint8_t>& out) override;
+    bool Decompress(const std::vector<uint8_t>& in, std::vector<uint8_t>& out,
+                    size_t out_size) override;
+};
\ No newline at end of file
diff --git a/logd/LogBufferTest.cpp b/logd/LogBufferTest.cpp
index e651b4f..47d2a2f 100644
--- a/logd/LogBufferTest.cpp
+++ b/logd/LogBufferTest.cpp
@@ -26,6 +26,7 @@
 #include <android-base/stringprintf.h>
 #include <android-base/strings.h>
 
+#include "LogBuffer.h"
 #include "LogReaderThread.h"
 #include "LogWriter.h"
 
@@ -240,7 +241,7 @@
         std::unique_ptr<LogWriter> test_writer(new TestWriter(&read_log_messages, &released));
         std::unique_ptr<LogReaderThread> log_reader(
                 new LogReaderThread(log_buffer_.get(), &reader_list_, std::move(test_writer), true,
-                                    0, ~0, 0, {}, 1, {}));
+                                    0, kLogMaskAll, 0, {}, 1, {}));
         reader_list_.reader_threads().emplace_back(std::move(log_reader));
     }
 
@@ -314,7 +315,7 @@
         std::unique_ptr<LogWriter> test_writer(new TestWriter(&read_log_messages, &released));
         std::unique_ptr<LogReaderThread> log_reader(
                 new LogReaderThread(log_buffer_.get(), &reader_list_, std::move(test_writer), true,
-                                    0, ~0, 0, {}, 1, {}));
+                                    0, kLogMaskAll, 0, {}, 1, {}));
         reader_list_.reader_threads().emplace_back(std::move(log_reader));
     }
 
@@ -348,7 +349,7 @@
         std::unique_ptr<LogWriter> test_writer(new TestWriter(&read_log_messages, &released));
         std::unique_ptr<LogReaderThread> log_reader(
                 new LogReaderThread(log_buffer_.get(), &reader_list_, std::move(test_writer), true,
-                                    0, ~0, 0, {}, 3, {}));
+                                    0, kLogMaskAll, 0, {}, 3, {}));
         reader_list_.reader_threads().emplace_back(std::move(log_reader));
     }
 
@@ -363,4 +364,96 @@
     CompareLogMessages(expected_log_messages, read_log_messages);
 }
 
-INSTANTIATE_TEST_CASE_P(LogBufferTests, LogBufferTest, testing::Values("chatty", "simple"));
+TEST_P(LogBufferTest, clear_logs) {
+    // Log 3 initial logs.
+    std::vector<LogMessage> log_messages = {
+            {{.pid = 1, .tid = 2, .sec = 10000, .nsec = 20001, .lid = LOG_ID_MAIN, .uid = 0},
+             "first"},
+            {{.pid = 10, .tid = 2, .sec = 10000, .nsec = 20002, .lid = LOG_ID_MAIN, .uid = 0},
+             "second"},
+            {{.pid = 100, .tid = 2, .sec = 10000, .nsec = 20003, .lid = LOG_ID_MAIN, .uid = 0},
+             "third"},
+    };
+    FixupMessages(&log_messages);
+    LogMessages(log_messages);
+
+    std::vector<LogMessage> read_log_messages;
+    bool released = false;
+
+    // Connect a blocking reader.
+    {
+        auto lock = std::unique_lock{reader_list_.reader_threads_lock()};
+        std::unique_ptr<LogWriter> test_writer(new TestWriter(&read_log_messages, &released));
+        std::unique_ptr<LogReaderThread> log_reader(
+                new LogReaderThread(log_buffer_.get(), &reader_list_, std::move(test_writer), false,
+                                    0, kLogMaskAll, 0, {}, 1, {}));
+        reader_list_.reader_threads().emplace_back(std::move(log_reader));
+    }
+
+    // Wait up to 250ms for the reader to read the first 3 logs.
+    constexpr int kMaxRetryCount = 50;
+    int count = 0;
+    for (; count < kMaxRetryCount; ++count) {
+        usleep(5000);
+        auto lock = std::unique_lock{reader_list_.reader_threads_lock()};
+        if (reader_list_.reader_threads().back()->start() == 4) {
+            break;
+        }
+    }
+    ASSERT_LT(count, kMaxRetryCount);
+
+    // Clear the log buffer.
+    log_buffer_->Clear(LOG_ID_MAIN, 0);
+
+    // Log 3 more logs.
+    std::vector<LogMessage> after_clear_messages = {
+            {{.pid = 1, .tid = 2, .sec = 10000, .nsec = 20001, .lid = LOG_ID_MAIN, .uid = 0},
+             "4th"},
+            {{.pid = 10, .tid = 2, .sec = 10000, .nsec = 20002, .lid = LOG_ID_MAIN, .uid = 0},
+             "5th"},
+            {{.pid = 100, .tid = 2, .sec = 10000, .nsec = 20003, .lid = LOG_ID_MAIN, .uid = 0},
+             "6th"},
+    };
+    FixupMessages(&after_clear_messages);
+    LogMessages(after_clear_messages);
+
+    // Wait up to 250ms for the reader to read the 3 additional logs.
+    for (count = 0; count < kMaxRetryCount; ++count) {
+        usleep(5000);
+        auto lock = std::unique_lock{reader_list_.reader_threads_lock()};
+        if (reader_list_.reader_threads().back()->start() == 7) {
+            break;
+        }
+    }
+    ASSERT_LT(count, kMaxRetryCount);
+
+    // Release the reader, wait for it to get the signal then check that it has been deleted.
+    {
+        auto lock = std::unique_lock{reader_list_.reader_threads_lock()};
+        reader_list_.reader_threads().back()->release_Locked();
+    }
+    while (!released) {
+        usleep(5000);
+    }
+    {
+        auto lock = std::unique_lock{reader_list_.reader_threads_lock()};
+        EXPECT_EQ(0U, reader_list_.reader_threads().size());
+    }
+
+    // Check that we have read all 6 messages.
+    std::vector<LogMessage> expected_log_messages = log_messages;
+    expected_log_messages.insert(expected_log_messages.end(), after_clear_messages.begin(),
+                                 after_clear_messages.end());
+    CompareLogMessages(expected_log_messages, read_log_messages);
+
+    // Finally, call FlushTo and ensure that only the 3 logs after the clear remain in the buffer.
+    std::vector<LogMessage> read_log_messages_after_clear;
+    std::unique_ptr<LogWriter> test_writer(new TestWriter(&read_log_messages_after_clear, nullptr));
+    std::unique_ptr<FlushToState> flush_to_state = log_buffer_->CreateFlushToState(1, kLogMaskAll);
+    EXPECT_TRUE(log_buffer_->FlushTo(test_writer.get(), *flush_to_state, nullptr));
+    EXPECT_EQ(7ULL, flush_to_state->start());
+    CompareLogMessages(after_clear_messages, read_log_messages_after_clear);
+}
+
+INSTANTIATE_TEST_CASE_P(LogBufferTests, LogBufferTest,
+                        testing::Values("chatty", "serialized", "simple"));
diff --git a/logd/LogBufferTest.h b/logd/LogBufferTest.h
index f91a1b5..235f5ac 100644
--- a/logd/LogBufferTest.h
+++ b/logd/LogBufferTest.h
@@ -26,6 +26,7 @@
 #include "LogStatistics.h"
 #include "LogTags.h"
 #include "LogWhiteBlackList.h"
+#include "SerializedLogBuffer.h"
 #include "SimpleLogBuffer.h"
 
 struct LogMessage {
@@ -67,6 +68,8 @@
     void SetUp() override {
         if (GetParam() == "chatty") {
             log_buffer_.reset(new ChattyLogBuffer(&reader_list_, &tags_, &prune_, &stats_));
+        } else if (GetParam() == "serialized") {
+            log_buffer_.reset(new SerializedLogBuffer(&reader_list_, &tags_, &stats_));
         } else if (GetParam() == "simple") {
             log_buffer_.reset(new SimpleLogBuffer(&reader_list_, &tags_, &stats_));
         } else {
diff --git a/logd/LogReaderThread.h b/logd/LogReaderThread.h
index bf70b94..1855c0e 100644
--- a/logd/LogReaderThread.h
+++ b/logd/LogReaderThread.h
@@ -60,6 +60,7 @@
     std::string name() const { return writer_->name(); }
     uint64_t start() const { return flush_to_state_->start(); }
     std::chrono::steady_clock::time_point deadline() const { return deadline_; }
+    FlushToState& flush_to_state() { return *flush_to_state_; }
 
   private:
     void ThreadFunction();
diff --git a/logd/SerializedFlushToState.cpp b/logd/SerializedFlushToState.cpp
new file mode 100644
index 0000000..17ecb6d
--- /dev/null
+++ b/logd/SerializedFlushToState.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SerializedFlushToState.h"
+
+#include <android-base/logging.h>
+
+SerializedFlushToState::SerializedFlushToState(uint64_t start, LogMask log_mask)
+    : FlushToState(start, log_mask) {
+    log_id_for_each(i) {
+        if (((1 << i) & log_mask) == 0) {
+            continue;
+        }
+        logs_needed_from_next_position_[i] = true;
+    }
+}
+
+SerializedFlushToState::~SerializedFlushToState() {
+    log_id_for_each(i) {
+        if (log_positions_[i]) {
+            log_positions_[i]->buffer_it->DecReaderRefCount(true);
+        }
+    }
+}
+
+void SerializedFlushToState::CreateLogPosition(log_id_t log_id) {
+    CHECK(!logs_[log_id].empty());
+    LogPosition log_position;
+    auto it = logs_[log_id].begin();
+    while (it != logs_[log_id].end() && start() > it->highest_sequence_number()) {
+        ++it;
+    }
+    if (it == logs_[log_id].end()) {
+        --it;
+    }
+    it->IncReaderRefCount();
+    log_position.buffer_it = it;
+
+    // Find the offset of the first log with sequence number >= start().
+    int read_offset = 0;
+    while (read_offset < it->write_offset()) {
+        const auto* entry = it->log_entry(read_offset);
+        if (entry->sequence() >= start()) {
+            break;
+        }
+        read_offset += entry->total_len();
+    }
+    log_position.read_offset = read_offset;
+
+    log_positions_[log_id].emplace(std::move(log_position));
+}
+
+void SerializedFlushToState::AddMinHeapEntry(log_id_t log_id) {
+    auto& buffer_it = log_positions_[log_id]->buffer_it;
+    auto read_offset = log_positions_[log_id]->read_offset;
+
+    // If there is another log to read in this buffer, add it to the min heap.
+    if (read_offset < buffer_it->write_offset()) {
+        auto* entry = buffer_it->log_entry(read_offset);
+        min_heap_.emplace(log_id, entry);
+    } else if (read_offset == buffer_it->write_offset()) {
+        // If there are no more logs to read in this buffer and it's the last buffer, then
+        // set logs_needed_from_next_position_ to wait until more logs get logged.
+        if (buffer_it == std::prev(logs_[log_id].end())) {
+            logs_needed_from_next_position_[log_id] = true;
+        } else {
+            // Otherwise, if there is another buffer piece, move to that and do the same check.
+            buffer_it->DecReaderRefCount(true);
+            ++buffer_it;
+            buffer_it->IncReaderRefCount();
+            log_positions_[log_id]->read_offset = 0;
+            if (buffer_it->write_offset() == 0) {
+                logs_needed_from_next_position_[log_id] = true;
+            } else {
+                auto* entry = buffer_it->log_entry(0);
+                min_heap_.emplace(log_id, entry);
+            }
+        }
+    } else {
+        // read_offset > buffer_it->write_offset() should never happen.
+        CHECK(false);
+    }
+}
+
+void SerializedFlushToState::CheckForNewLogs() {
+    log_id_for_each(i) {
+        if (!logs_needed_from_next_position_[i]) {
+            continue;
+        }
+        if (!log_positions_[i]) {
+            if (logs_[i].empty()) {
+                continue;
+            }
+            CreateLogPosition(i);
+        }
+        logs_needed_from_next_position_[i] = false;
+        // If it wasn't possible to insert, logs_needed_from_next_position will be set back to true.
+        AddMinHeapEntry(i);
+    }
+}
+
+MinHeapElement SerializedFlushToState::PopNextUnreadLog() {
+    auto top = min_heap_.top();
+    min_heap_.pop();
+
+    auto* entry = top.entry;
+    auto log_id = top.log_id;
+
+    log_positions_[log_id]->read_offset += entry->total_len();
+
+    AddMinHeapEntry(log_id);
+
+    return top;
+}
+
+void SerializedFlushToState::Prune(log_id_t log_id,
+                                   const std::list<SerializedLogChunk>::iterator& buffer_it) {
+    // If we don't have a position for this log or if we're not referencing buffer_it, ignore.
+    if (!log_positions_[log_id].has_value() || log_positions_[log_id]->buffer_it != buffer_it) {
+        return;
+    }
+
+    // // Decrease the ref count since we're deleting our reference.
+    buffer_it->DecReaderRefCount(false);
+
+    // Delete in the reference.
+    log_positions_[log_id].reset();
+
+    // Remove the MinHeapElement referencing log_id, if it exists, but retain the others.
+    std::vector<MinHeapElement> old_elements;
+    while (!min_heap_.empty()) {
+        auto& element = min_heap_.top();
+        if (element.log_id != log_id) {
+            old_elements.emplace_back(element);
+        }
+        min_heap_.pop();
+    }
+    for (auto&& element : old_elements) {
+        min_heap_.emplace(std::move(element));
+    }
+
+    // Finally set logs_needed_from_next_position_, so CheckForNewLogs() will re-create the
+    // log_position_ object during the next read.
+    logs_needed_from_next_position_[log_id] = true;
+}
diff --git a/logd/SerializedFlushToState.h b/logd/SerializedFlushToState.h
new file mode 100644
index 0000000..74b3de5
--- /dev/null
+++ b/logd/SerializedFlushToState.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <bitset>
+#include <list>
+#include <queue>
+
+#include "LogBuffer.h"
+#include "SerializedLogChunk.h"
+#include "SerializedLogEntry.h"
+
+struct LogPosition {
+    std::list<SerializedLogChunk>::iterator buffer_it;
+    int read_offset;
+};
+
+struct MinHeapElement {
+    MinHeapElement(log_id_t log_id, const SerializedLogEntry* entry)
+        : log_id(log_id), entry(entry) {}
+    log_id_t log_id;
+    const SerializedLogEntry* entry;
+    // The change of comparison operators is intentional, std::priority_queue uses operator<() to
+    // compare but creates a max heap.  Since we want a min heap, we return the opposite result.
+    bool operator<(const MinHeapElement& rhs) const {
+        return entry->sequence() > rhs.entry->sequence();
+    }
+};
+
+// This class tracks the specific point where a FlushTo client has read through the logs.  It
+// directly references the std::list<> iterators from the parent SerializedLogBuffer and the offset
+// into each log chunk where it has last read.  All interactions with this class, except for its
+// construction, must be done with SerializedLogBuffer::lock_ held.  No log chunks that it
+// references may be pruned, which is handled by ensuring prune does not touch any log chunk with
+// highest sequence number greater or equal to start().
+class SerializedFlushToState : public FlushToState {
+  public:
+    // Initializes this state object.  For each log buffer set in log_mask, this sets
+    // logs_needed_from_next_position_.
+    SerializedFlushToState(uint64_t start, LogMask log_mask);
+
+    // Decrease the reference of all referenced logs.  This happens when a reader is disconnected.
+    ~SerializedFlushToState() override;
+
+    // We can't hold SerializedLogBuffer::lock_ in the constructor, so we must initialize logs here.
+    void InitializeLogs(std::list<SerializedLogChunk>* logs) {
+        if (logs_ == nullptr) logs_ = logs;
+    }
+
+    // Checks to see if any log buffers set in logs_needed_from_next_position_ have new logs and
+    // calls AddMinHeapEntry() if so.
+    void CheckForNewLogs();
+
+    bool HasUnreadLogs() { return !min_heap_.empty(); }
+
+    // Pops the next unread log from the min heap.  Add the next log for that log_id to the min heap
+    // if one is available otherwise set logs_needed_from_next_position_ to indicate that we're
+    // waiting for more logs.
+    MinHeapElement PopNextUnreadLog();
+
+    // If the parent log buffer prunes logs, the reference that this class contains may become
+    // invalid, so this must be called first to drop the reference to buffer_it, if any.
+    void Prune(log_id_t log_id, const std::list<SerializedLogChunk>::iterator& buffer_it);
+
+  private:
+    // If there is a log in the serialized log buffer for `log_id` at the read_offset, add it to the
+    // min heap for reading, otherwise set logs_needed_from_next_position_ to indicate that we're
+    // waiting for the next log.
+    void AddMinHeapEntry(log_id_t log_id);
+
+    // Create a LogPosition object for the given log_id by searching through the log chunks for the
+    // first chunk and then first log entry within that chunk that is greater or equal to start().
+    void CreateLogPosition(log_id_t log_id);
+
+    std::list<SerializedLogChunk>* logs_ = nullptr;
+    // An optional structure that contains an iterator to the serialized log buffer and offset into
+    // it that this logger should handle next.
+    std::optional<LogPosition> log_positions_[LOG_ID_MAX];
+    // A bit for each log that is set if a given log_id has no logs or if this client has read all
+    // of its logs. In order words: `logs_[i].empty() || (buffer_it == std::prev(logs_.end) &&
+    // next_log_position == logs_write_position_)`.  These will be re-checked in each
+    // loop in case new logs came in.
+    std::bitset<LOG_ID_MAX> logs_needed_from_next_position_ = {};
+    // A min heap that has up to one entry per log buffer, sorted by sequence number, of the next
+    // element that this reader should read.
+    std::priority_queue<MinHeapElement> min_heap_;
+};
diff --git a/logd/SerializedFlushToStateTest.cpp b/logd/SerializedFlushToStateTest.cpp
new file mode 100644
index 0000000..a1d21ac
--- /dev/null
+++ b/logd/SerializedFlushToStateTest.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SerializedFlushToState.h"
+
+#include <map>
+
+#include <android-base/logging.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+#include <gtest/gtest.h>
+
+using android::base::Join;
+using android::base::StringPrintf;
+
+constexpr size_t kChunkSize = 3 * 4096;
+
+class SerializedFlushToStateTest : public testing::Test {
+  protected:
+    void SetUp() override {
+        // This test spams many unneeded INFO logs, so we suppress them.
+        old_log_severity_ = android::base::SetMinimumLogSeverity(android::base::WARNING);
+    }
+    void TearDown() override { android::base::SetMinimumLogSeverity(old_log_severity_); }
+
+    std::string TestReport(const std::vector<uint64_t>& expected,
+                           const std::vector<uint64_t>& read) {
+        auto sequence_to_log_id = [&](uint64_t sequence) -> int {
+            for (const auto& [log_id, sequences] : sequence_numbers_per_buffer_) {
+                if (std::find(sequences.begin(), sequences.end(), sequence) != sequences.end()) {
+                    return log_id;
+                }
+            }
+            return -1;
+        };
+
+        std::map<int, std::vector<uint64_t>> missing_sequences;
+        std::vector<uint64_t> missing_expected;
+        std::set_difference(expected.begin(), expected.end(), read.begin(), read.end(),
+                            std::back_inserter(missing_expected));
+        for (uint64_t sequence : missing_expected) {
+            int log_id = sequence_to_log_id(sequence);
+            missing_sequences[log_id].emplace_back(sequence);
+        }
+
+        std::map<int, std::vector<uint64_t>> extra_sequences;
+        std::vector<uint64_t> extra_read;
+        std::set_difference(read.begin(), read.end(), expected.begin(), expected.end(),
+                            std::back_inserter(extra_read));
+        for (uint64_t sequence : extra_read) {
+            int log_id = sequence_to_log_id(sequence);
+            extra_sequences[log_id].emplace_back(sequence);
+        }
+
+        std::vector<std::string> errors;
+        for (const auto& [log_id, sequences] : missing_sequences) {
+            errors.emplace_back(
+                    StringPrintf("Log id %d missing %zu sequences", log_id, sequences.size()));
+        }
+
+        for (const auto& [log_id, sequences] : extra_sequences) {
+            errors.emplace_back(
+                    StringPrintf("Log id %d has extra %zu sequences", log_id, sequences.size()));
+        }
+
+        return Join(errors, ", ");
+    }
+
+    // Read sequence numbers in order from SerializedFlushToState for every mask combination and all
+    // sequence numbers from 0 through the highest logged sequence number + 1.
+    // This assumes that all of the logs have already been written.
+    void TestAllReading() {
+        uint64_t max_sequence = sequence_ + 1;
+        uint32_t max_mask = (1 << LOG_ID_MAX) - 1;
+        for (uint64_t sequence = 0; sequence < max_sequence; ++sequence) {
+            for (uint32_t mask = 0; mask < max_mask; ++mask) {
+                auto state = SerializedFlushToState{sequence, mask};
+                state.InitializeLogs(log_chunks_);
+                state.CheckForNewLogs();
+                TestReading(sequence, mask, state);
+            }
+        }
+    }
+
+    // Similar to TestAllReading() except that it doesn't assume any logs are in the buffer, instead
+    // it calls write_logs() in a loop for sequence/mask combination.  It clears log_chunks_ and
+    // sequence_numbers_per_buffer_ between calls, such that only the sequence numbers written in
+    // the previous call to write_logs() are expected.
+    void TestAllReadingWithFutureMessages(const std::function<bool(int)>& write_logs) {
+        uint64_t max_sequence = sequence_ + 1;
+        uint32_t max_mask = (1 << LOG_ID_MAX) - 1;
+        for (uint64_t sequence = 1; sequence < max_sequence; ++sequence) {
+            for (uint32_t mask = 1; mask < max_mask; ++mask) {
+                log_id_for_each(i) { log_chunks_[i].clear(); }
+                auto state = SerializedFlushToState{sequence, mask};
+                state.InitializeLogs(log_chunks_);
+                int loop_count = 0;
+                while (write_logs(loop_count++)) {
+                    state.CheckForNewLogs();
+                    TestReading(sequence, mask, state);
+                    sequence_numbers_per_buffer_.clear();
+                }
+            }
+        }
+    }
+
+    void TestReading(uint64_t start, LogMask log_mask, SerializedFlushToState& state) {
+        std::vector<uint64_t> expected_sequence;
+        log_id_for_each(i) {
+            if (((1 << i) & log_mask) == 0) {
+                continue;
+            }
+            for (const auto& sequence : sequence_numbers_per_buffer_[i]) {
+                if (sequence >= start) {
+                    expected_sequence.emplace_back(sequence);
+                }
+            }
+        }
+        std::sort(expected_sequence.begin(), expected_sequence.end());
+
+        std::vector<uint64_t> read_sequence;
+
+        while (state.HasUnreadLogs()) {
+            auto top = state.PopNextUnreadLog();
+            read_sequence.emplace_back(top.entry->sequence());
+        }
+
+        EXPECT_TRUE(std::is_sorted(read_sequence.begin(), read_sequence.end()));
+
+        EXPECT_EQ(expected_sequence.size(), read_sequence.size());
+
+        EXPECT_EQ(expected_sequence, read_sequence)
+                << "start: " << start << " log_mask: " << log_mask << " "
+                << TestReport(expected_sequence, read_sequence);
+    }
+
+    // Add a chunk with the given messages to the a given log buffer.  Keep track of the sequence
+    // numbers for future validation.  Optionally mark the block as having finished writing.
+    void AddChunkWithMessages(int buffer, bool finish_writing,
+                              const std::vector<std::string>& messages) {
+        auto chunk = SerializedLogChunk{kChunkSize};
+        for (const auto& message : messages) {
+            auto sequence = sequence_++;
+            sequence_numbers_per_buffer_[buffer].emplace_back(sequence);
+            ASSERT_TRUE(chunk.CanLog(message.size() + 1));
+            chunk.Log(sequence, log_time(), 0, 1, 1, message.c_str(), message.size() + 1);
+        }
+        if (finish_writing) {
+            chunk.FinishWriting();
+        }
+        log_chunks_[buffer].emplace_back(std::move(chunk));
+    }
+
+    android::base::LogSeverity old_log_severity_;
+    std::map<int, std::vector<uint64_t>> sequence_numbers_per_buffer_;
+    std::list<SerializedLogChunk> log_chunks_[LOG_ID_MAX];
+    uint64_t sequence_ = 1;
+};
+
+// 0: multiple chunks, with variable number of entries, with/without finishing writing
+// 1: 1 chunk with 1 log and finished writing
+// 2: 1 chunk with 1 log and not finished writing
+// 3: 1 chunk with 0 logs and not finished writing
+// 4: 1 chunk with 0 logs and finished writing (impossible, but SerializedFlushToState handles it)
+// 5-7: 0 chunks
+TEST_F(SerializedFlushToStateTest, smoke) {
+    AddChunkWithMessages(true, 0, {"1st", "2nd"});
+    AddChunkWithMessages(true, 1, {"3rd"});
+    AddChunkWithMessages(false, 0, {"4th"});
+    AddChunkWithMessages(true, 0, {"4th", "5th", "more", "even", "more", "go", "here"});
+    AddChunkWithMessages(false, 2, {"6th"});
+    AddChunkWithMessages(true, 0, {"7th"});
+    AddChunkWithMessages(false, 3, {});
+    AddChunkWithMessages(true, 4, {});
+
+    TestAllReading();
+}
+
+TEST_F(SerializedFlushToStateTest, random) {
+    srand(1);
+    for (int count = 0; count < 20; ++count) {
+        unsigned int num_messages = 1 + rand() % 15;
+        auto messages = std::vector<std::string>{num_messages, "same message"};
+
+        bool compress = rand() % 2;
+        int buf = rand() % LOG_ID_MAX;
+
+        AddChunkWithMessages(compress, buf, messages);
+    }
+
+    TestAllReading();
+}
+
+// Same start as smoke, but we selectively write logs to the buffers and ensure they're read.
+TEST_F(SerializedFlushToStateTest, future_writes) {
+    auto write_logs = [&](int loop_count) {
+        switch (loop_count) {
+            case 0:
+                // Initial writes.
+                AddChunkWithMessages(true, 0, {"1st", "2nd"});
+                AddChunkWithMessages(true, 1, {"3rd"});
+                AddChunkWithMessages(false, 0, {"4th"});
+                AddChunkWithMessages(true, 0, {"4th", "5th", "more", "even", "more", "go", "here"});
+                AddChunkWithMessages(false, 2, {"6th"});
+                AddChunkWithMessages(true, 0, {"7th"});
+                AddChunkWithMessages(false, 3, {});
+                AddChunkWithMessages(true, 4, {});
+                break;
+            case 1:
+                // Smoke test, add a simple chunk.
+                AddChunkWithMessages(true, 0, {"1st", "2nd"});
+                break;
+            case 2:
+                // Add chunks to all but one of the logs.
+                AddChunkWithMessages(true, 0, {"1st", "2nd"});
+                AddChunkWithMessages(true, 1, {"1st", "2nd"});
+                AddChunkWithMessages(true, 2, {"1st", "2nd"});
+                AddChunkWithMessages(true, 3, {"1st", "2nd"});
+                AddChunkWithMessages(true, 4, {"1st", "2nd"});
+                AddChunkWithMessages(true, 5, {"1st", "2nd"});
+                AddChunkWithMessages(true, 6, {"1st", "2nd"});
+                break;
+            case 3:
+                // Finally add chunks to all logs.
+                AddChunkWithMessages(true, 0, {"1st", "2nd"});
+                AddChunkWithMessages(true, 1, {"1st", "2nd"});
+                AddChunkWithMessages(true, 2, {"1st", "2nd"});
+                AddChunkWithMessages(true, 3, {"1st", "2nd"});
+                AddChunkWithMessages(true, 4, {"1st", "2nd"});
+                AddChunkWithMessages(true, 5, {"1st", "2nd"});
+                AddChunkWithMessages(true, 6, {"1st", "2nd"});
+                AddChunkWithMessages(true, 7, {"1st", "2nd"});
+                break;
+            default:
+                return false;
+        }
+        return true;
+    };
+
+    TestAllReadingWithFutureMessages(write_logs);
+}
diff --git a/logd/SerializedLogBuffer.cpp b/logd/SerializedLogBuffer.cpp
new file mode 100644
index 0000000..8bda7cf
--- /dev/null
+++ b/logd/SerializedLogBuffer.cpp
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SerializedLogBuffer.h"
+
+#include <limits>
+#include <thread>
+
+#include <android-base/logging.h>
+#include <android-base/scopeguard.h>
+
+#include "LogStatistics.h"
+#include "SerializedFlushToState.h"
+
+SerializedLogBuffer::SerializedLogBuffer(LogReaderList* reader_list, LogTags* tags,
+                                         LogStatistics* stats)
+    : reader_list_(reader_list), tags_(tags), stats_(stats) {
+    Init();
+}
+
+SerializedLogBuffer::~SerializedLogBuffer() {}
+
+void SerializedLogBuffer::Init() {
+    log_id_for_each(i) {
+        if (SetSize(i, __android_logger_get_buffer_size(i))) {
+            SetSize(i, LOG_BUFFER_MIN_SIZE);
+        }
+    }
+
+    // Release any sleeping reader threads to dump their current content.
+    auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
+    for (const auto& reader_thread : reader_list_->reader_threads()) {
+        reader_thread->triggerReader_Locked();
+    }
+}
+
+bool SerializedLogBuffer::ShouldLog(log_id_t log_id, const char* msg, uint16_t len) {
+    if (log_id == LOG_ID_SECURITY) {
+        return true;
+    }
+
+    int prio = ANDROID_LOG_INFO;
+    const char* tag = nullptr;
+    size_t tag_len = 0;
+    if (IsBinary(log_id)) {
+        int32_t tag_int = MsgToTag(msg, len);
+        tag = tags_->tagToName(tag_int);
+        if (tag) {
+            tag_len = strlen(tag);
+        }
+    } else {
+        prio = *msg;
+        tag = msg + 1;
+        tag_len = strnlen(tag, len - 1);
+    }
+    return __android_log_is_loggable_len(prio, tag, tag_len, ANDROID_LOG_VERBOSE);
+}
+
+int SerializedLogBuffer::Log(log_id_t log_id, log_time realtime, uid_t uid, pid_t pid, pid_t tid,
+                             const char* msg, uint16_t len) {
+    if (log_id >= LOG_ID_MAX || len == 0) {
+        return -EINVAL;
+    }
+
+    if (!ShouldLog(log_id, msg, len)) {
+        stats_->AddTotal(log_id, len);
+        return -EACCES;
+    }
+
+    auto sequence = sequence_.fetch_add(1, std::memory_order_relaxed);
+
+    auto lock = std::lock_guard{lock_};
+
+    if (logs_[log_id].empty()) {
+        logs_[log_id].push_back(SerializedLogChunk(max_size_[log_id] / 4));
+    }
+
+    auto total_len = sizeof(SerializedLogEntry) + len;
+    if (!logs_[log_id].back().CanLog(total_len)) {
+        logs_[log_id].back().FinishWriting();
+        logs_[log_id].push_back(SerializedLogChunk(max_size_[log_id] / 4));
+    }
+
+    auto entry = logs_[log_id].back().Log(sequence, realtime, uid, pid, tid, msg, len);
+    stats_->Add(entry->ToLogStatisticsElement(log_id));
+
+    MaybePrune(log_id);
+
+    reader_list_->NotifyNewLog(1 << log_id);
+    return len;
+}
+
+void SerializedLogBuffer::MaybePrune(log_id_t log_id) {
+    auto get_total_size = [](const auto& buffer) {
+        size_t total_size = 0;
+        for (const auto& chunk : buffer) {
+            total_size += chunk.PruneSize();
+        }
+        return total_size;
+    };
+    size_t total_size = get_total_size(logs_[log_id]);
+    if (total_size > max_size_[log_id]) {
+        Prune(log_id, total_size - max_size_[log_id], 0);
+        LOG(INFO) << "Pruned Logs from log_id: " << log_id << ", previous size: " << total_size
+                  << " after size: " << get_total_size(logs_[log_id]);
+    }
+}
+
+// Decompresses the chunks, call LogStatistics::Subtract() on each entry, then delete the chunks and
+// the list.  Note that the SerializedLogChunk objects have been removed from logs_ and their
+// references have been deleted from any SerializedFlushToState objects, so this can be safely done
+// without holding lock_.  It is done in a separate thread to avoid delaying the writer thread.  The
+// lambda for the thread takes ownership of the 'chunks' list and thus when the thread ends and the
+// lambda is deleted, the objects are deleted.
+void SerializedLogBuffer::DeleteLogChunks(std::list<SerializedLogChunk>&& chunks, log_id_t log_id) {
+    auto delete_thread = std::thread{[chunks = std::move(chunks), log_id, this]() mutable {
+        for (auto& chunk : chunks) {
+            chunk.IncReaderRefCount();
+            int read_offset = 0;
+            while (read_offset < chunk.write_offset()) {
+                auto* entry = chunk.log_entry(read_offset);
+                stats_->Subtract(entry->ToLogStatisticsElement(log_id));
+                read_offset += entry->total_len();
+            }
+            chunk.DecReaderRefCount(false);
+        }
+    }};
+    delete_thread.detach();
+}
+
+void SerializedLogBuffer::NotifyReadersOfPrune(
+        log_id_t log_id, const std::list<SerializedLogChunk>::iterator& chunk) {
+    for (const auto& reader_thread : reader_list_->reader_threads()) {
+        auto& state = reinterpret_cast<SerializedFlushToState&>(reader_thread->flush_to_state());
+        state.Prune(log_id, chunk);
+    }
+}
+
+bool SerializedLogBuffer::Prune(log_id_t log_id, size_t bytes_to_free, uid_t uid) {
+    // Don't prune logs that are newer than the point at which any reader threads are reading from.
+    LogReaderThread* oldest = nullptr;
+    auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
+    for (const auto& reader_thread : reader_list_->reader_threads()) {
+        if (!reader_thread->IsWatching(log_id)) {
+            continue;
+        }
+        if (!oldest || oldest->start() > reader_thread->start() ||
+            (oldest->start() == reader_thread->start() &&
+             reader_thread->deadline().time_since_epoch().count() != 0)) {
+            oldest = reader_thread.get();
+        }
+    }
+
+    auto& log_buffer = logs_[log_id];
+
+    std::list<SerializedLogChunk> chunks_to_prune;
+    auto prune_chunks = android::base::make_scope_guard([&chunks_to_prune, log_id, this] {
+        DeleteLogChunks(std::move(chunks_to_prune), log_id);
+    });
+
+    auto it = log_buffer.begin();
+    while (it != log_buffer.end()) {
+        if (oldest != nullptr && it->highest_sequence_number() >= oldest->start()) {
+            break;
+        }
+
+        // Increment ahead of time since we're going to splice this iterator from the list.
+        auto it_to_prune = it++;
+
+        // The sequence number check ensures that all readers have read all logs in this chunk, but
+        // they may still hold a reference to the chunk to track their last read log_position.
+        // Notify them to delete the reference.
+        NotifyReadersOfPrune(log_id, it_to_prune);
+
+        if (uid != 0) {
+            // Reorder the log buffer to remove logs from the given UID.  If there are no logs left
+            // in the buffer after the removal, delete it.
+            if (it_to_prune->ClearUidLogs(uid, log_id, stats_)) {
+                log_buffer.erase(it_to_prune);
+            }
+        } else {
+            size_t buffer_size = it_to_prune->PruneSize();
+            chunks_to_prune.splice(chunks_to_prune.end(), log_buffer, it_to_prune);
+            if (buffer_size >= bytes_to_free) {
+                return true;
+            }
+            bytes_to_free -= buffer_size;
+        }
+    }
+
+    // If we've deleted all buffers without bytes_to_free hitting 0, then we're called by Clear()
+    // and should return true.
+    if (it == log_buffer.end()) {
+        return true;
+    }
+
+    // Otherwise we are stuck due to a reader, so mitigate it.
+    KickReader(oldest, log_id, bytes_to_free);
+    return false;
+}
+
+// If the selected reader is blocking our pruning progress, decide on
+// what kind of mitigation is necessary to unblock the situation.
+void SerializedLogBuffer::KickReader(LogReaderThread* reader, log_id_t id, size_t bytes_to_free) {
+    if (bytes_to_free >= max_size_[id]) {  // +100%
+        // A misbehaving or slow reader is dropped if we hit too much memory pressure.
+        LOG(WARNING) << "Kicking blocked reader, " << reader->name()
+                     << ", from LogBuffer::kickMe()";
+        reader->release_Locked();
+    } else if (reader->deadline().time_since_epoch().count() != 0) {
+        // Allow a blocked WRAP deadline reader to trigger and start reporting the log data.
+        reader->triggerReader_Locked();
+    } else {
+        // Tell slow reader to skip entries to catch up.
+        unsigned long prune_rows = bytes_to_free / 300;
+        LOG(WARNING) << "Skipping " << prune_rows << " entries from slow reader, " << reader->name()
+                     << ", from LogBuffer::kickMe()";
+        reader->triggerSkip_Locked(id, prune_rows);
+    }
+}
+
+std::unique_ptr<FlushToState> SerializedLogBuffer::CreateFlushToState(uint64_t start,
+                                                                      LogMask log_mask) {
+    return std::make_unique<SerializedFlushToState>(start, log_mask);
+}
+
+bool SerializedLogBuffer::FlushTo(
+        LogWriter* writer, FlushToState& abstract_state,
+        const std::function<FilterResult(log_id_t log_id, pid_t pid, uint64_t sequence,
+                                         log_time realtime)>& filter) {
+    auto lock = std::unique_lock{lock_};
+
+    auto& state = reinterpret_cast<SerializedFlushToState&>(abstract_state);
+    state.InitializeLogs(logs_);
+    state.CheckForNewLogs();
+
+    while (state.HasUnreadLogs()) {
+        MinHeapElement top = state.PopNextUnreadLog();
+        auto* entry = top.entry;
+        auto log_id = top.log_id;
+
+        if (entry->sequence() < state.start()) {
+            continue;
+        }
+        state.set_start(entry->sequence());
+
+        if (!writer->privileged() && entry->uid() != writer->uid()) {
+            continue;
+        }
+
+        if (filter) {
+            auto ret = filter(log_id, entry->pid(), entry->sequence(), entry->realtime());
+            if (ret == FilterResult::kSkip) {
+                continue;
+            }
+            if (ret == FilterResult::kStop) {
+                break;
+            }
+        }
+
+        lock.unlock();
+        // We never prune logs equal to or newer than any LogReaderThreads' `start` value, so the
+        // `entry` pointer is safe here without the lock
+        if (!entry->Flush(writer, log_id)) {
+            return false;
+        }
+        lock.lock();
+
+        // Since we released the log above, buffers that aren't in our min heap may now have had
+        // logs added, so re-check them.
+        state.CheckForNewLogs();
+    }
+
+    state.set_start(state.start() + 1);
+    return true;
+}
+
+bool SerializedLogBuffer::Clear(log_id_t id, uid_t uid) {
+    // Try three times to clear, then disconnect the readers and try one final time.
+    for (int retry = 0; retry < 3; ++retry) {
+        {
+            auto lock = std::lock_guard{lock_};
+            bool prune_success = Prune(id, ULONG_MAX, uid);
+            if (prune_success) {
+                return true;
+            }
+        }
+        sleep(1);
+    }
+    // Check if it is still busy after the sleep, we try to prune one entry, not another clear run,
+    // so we are looking for the quick side effect of the return value to tell us if we have a
+    // _blocked_ reader.
+    bool busy = false;
+    {
+        auto lock = std::lock_guard{lock_};
+        busy = !Prune(id, 1, uid);
+    }
+    // It is still busy, disconnect all readers.
+    if (busy) {
+        auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
+        for (const auto& reader_thread : reader_list_->reader_threads()) {
+            if (reader_thread->IsWatching(id)) {
+                LOG(WARNING) << "Kicking blocked reader, " << reader_thread->name()
+                             << ", from LogBuffer::clear()";
+                reader_thread->release_Locked();
+            }
+        }
+    }
+    auto lock = std::lock_guard{lock_};
+    return Prune(id, ULONG_MAX, uid);
+}
+
+unsigned long SerializedLogBuffer::GetSize(log_id_t id) {
+    auto lock = std::lock_guard{lock_};
+    size_t retval = 2 * max_size_[id] / 3;  // See the comment in SetSize().
+    return retval;
+}
+
+// New SerializedLogChunk objects will be allocated according to the new size, but older one are
+// unchanged.  MaybePrune() is called on the log buffer to reduce it to an appropriate size if the
+// new size is lower.
+// ChattyLogBuffer/SimpleLogBuffer don't consider the 'Overhead' of LogBufferElement or the
+// std::list<> overhead as part of the log size.  SerializedLogBuffer does by its very nature, so
+// the 'size' metric is not compatible between them.  Their actual memory usage is between 1.5x and
+// 2x of what they claim to use, so we conservatively set our internal size as size + size / 2.
+int SerializedLogBuffer::SetSize(log_id_t id, unsigned long size) {
+    // Reasonable limits ...
+    if (!__android_logger_valid_buffer_size(size)) {
+        return -1;
+    }
+
+    auto lock = std::lock_guard{lock_};
+    max_size_[id] = size + size / 2;
+
+    MaybePrune(id);
+
+    return 0;
+}
diff --git a/logd/SerializedLogBuffer.h b/logd/SerializedLogBuffer.h
new file mode 100644
index 0000000..27334ba
--- /dev/null
+++ b/logd/SerializedLogBuffer.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <atomic>
+#include <bitset>
+#include <list>
+#include <mutex>
+#include <queue>
+#include <vector>
+
+#include <android-base/thread_annotations.h>
+
+#include "LogBuffer.h"
+#include "LogReaderList.h"
+#include "LogStatistics.h"
+#include "LogTags.h"
+#include "SerializedLogChunk.h"
+#include "SerializedLogEntry.h"
+#include "rwlock.h"
+
+class SerializedLogBuffer : public LogBuffer {
+  public:
+    SerializedLogBuffer(LogReaderList* reader_list, LogTags* tags, LogStatistics* stats);
+    ~SerializedLogBuffer();
+    void Init() override;
+
+    int Log(log_id_t log_id, log_time realtime, uid_t uid, pid_t pid, pid_t tid, const char* msg,
+            uint16_t len) override;
+    std::unique_ptr<FlushToState> CreateFlushToState(uint64_t start, LogMask log_mask) override;
+    bool FlushTo(LogWriter* writer, FlushToState& state,
+                 const std::function<FilterResult(log_id_t log_id, pid_t pid, uint64_t sequence,
+                                                  log_time realtime)>& filter) override;
+
+    bool Clear(log_id_t id, uid_t uid) override;
+    unsigned long GetSize(log_id_t id) override;
+    int SetSize(log_id_t id, unsigned long size) override;
+
+    uint64_t sequence() const override { return sequence_.load(std::memory_order_relaxed); }
+
+  private:
+    bool ShouldLog(log_id_t log_id, const char* msg, uint16_t len);
+    void MaybePrune(log_id_t log_id) REQUIRES(lock_);
+    bool Prune(log_id_t log_id, size_t bytes_to_free, uid_t uid) REQUIRES(lock_);
+    void KickReader(LogReaderThread* reader, log_id_t id, size_t bytes_to_free)
+            REQUIRES_SHARED(lock_);
+    void NotifyReadersOfPrune(log_id_t log_id, const std::list<SerializedLogChunk>::iterator& chunk)
+            REQUIRES(reader_list_->reader_threads_lock());
+    void DeleteLogChunks(std::list<SerializedLogChunk>&& chunks, log_id_t log_id);
+
+    LogReaderList* reader_list_;
+    LogTags* tags_;
+    LogStatistics* stats_;
+
+    unsigned long max_size_[LOG_ID_MAX] GUARDED_BY(lock_) = {};
+    std::list<SerializedLogChunk> logs_[LOG_ID_MAX] GUARDED_BY(lock_);
+    RwLock lock_;
+
+    std::atomic<uint64_t> sequence_ = 1;
+};
diff --git a/logd/SerializedLogChunk.cpp b/logd/SerializedLogChunk.cpp
new file mode 100644
index 0000000..2516003
--- /dev/null
+++ b/logd/SerializedLogChunk.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SerializedLogChunk.h"
+
+#include <android-base/logging.h>
+
+#include "CompressionEngine.h"
+
+SerializedLogChunk::~SerializedLogChunk() {
+    CHECK_EQ(reader_ref_count_, 0U);
+}
+
+void SerializedLogChunk::Compress() {
+    if (compressed_log_.empty()) {
+        CompressionEngine::GetInstance().Compress({contents_.data(), write_offset_},
+                                                  compressed_log_);
+        LOG(INFO) << "Compressed Log, buffer max size: " << contents_.size()
+                  << " size used: " << write_offset_
+                  << " compressed size: " << compressed_log_.size();
+    }
+    contents_.resize(0);
+}
+
+// TODO: Develop a better reference counting strategy to guard against the case where the writer is
+// much faster than the reader, and we needlessly compess / decompress the logs.
+void SerializedLogChunk::IncReaderRefCount() {
+    if (++reader_ref_count_ != 1 || writer_active_) {
+        return;
+    }
+    CompressionEngine::GetInstance().Decompress(compressed_log_, contents_, write_offset_);
+}
+
+void SerializedLogChunk::DecReaderRefCount(bool compress) {
+    CHECK_NE(reader_ref_count_, 0U);
+    if (--reader_ref_count_ != 0) {
+        return;
+    }
+    if (compress && !writer_active_) {
+        Compress();
+    }
+}
+
+bool SerializedLogChunk::ClearUidLogs(uid_t uid, log_id_t log_id, LogStatistics* stats) {
+    CHECK_EQ(reader_ref_count_, 0U);
+    if (write_offset_ == 0) {
+        return true;
+    }
+
+    IncReaderRefCount();
+
+    int read_offset = 0;
+    int new_write_offset = 0;
+    while (read_offset < write_offset_) {
+        const auto* entry = log_entry(read_offset);
+        if (entry->uid() == uid) {
+            read_offset += entry->total_len();
+            if (stats != nullptr) {
+                stats->Subtract(entry->ToLogStatisticsElement(log_id));
+            }
+            continue;
+        }
+        size_t entry_total_len = entry->total_len();
+        if (read_offset != new_write_offset) {
+            memmove(contents_.data() + new_write_offset, contents_.data() + read_offset,
+                    entry_total_len);
+        }
+        read_offset += entry_total_len;
+        new_write_offset += entry_total_len;
+    }
+
+    if (new_write_offset == 0) {
+        DecReaderRefCount(false);
+        return true;
+    }
+
+    // Clear the old compressed logs and set write_offset_ appropriately for DecReaderRefCount()
+    // to compress the new partially cleared log.
+    if (new_write_offset != write_offset_) {
+        compressed_log_.clear();
+        write_offset_ = new_write_offset;
+    }
+
+    DecReaderRefCount(true);
+
+    return false;
+}
+
+bool SerializedLogChunk::CanLog(size_t len) {
+    return write_offset_ + len <= contents_.size();
+}
+
+SerializedLogEntry* SerializedLogChunk::Log(uint64_t sequence, log_time realtime, uid_t uid,
+                                            pid_t pid, pid_t tid, const char* msg, uint16_t len) {
+    auto new_log_address = contents_.data() + write_offset_;
+    auto* entry = new (new_log_address) SerializedLogEntry(uid, pid, tid, sequence, realtime, len);
+    memcpy(entry->msg(), msg, len);
+    write_offset_ += entry->total_len();
+    highest_sequence_number_ = sequence;
+    return entry;
+}
\ No newline at end of file
diff --git a/logd/SerializedLogChunk.h b/logd/SerializedLogChunk.h
new file mode 100644
index 0000000..75cb936
--- /dev/null
+++ b/logd/SerializedLogChunk.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include <vector>
+
+#include "LogWriter.h"
+#include "SerializedLogEntry.h"
+
+class SerializedLogChunk {
+  public:
+    SerializedLogChunk(size_t size) : contents_(size) {}
+    ~SerializedLogChunk();
+
+    void Compress();
+    void IncReaderRefCount();
+    // Decrease the reader ref count and compress the log if appropriate.  `compress` should only be
+    // set to false in the case that the log buffer will be deleted afterwards.
+    void DecReaderRefCount(bool compress);
+
+    // Must have no readers referencing this.  Return true if there are no logs left in this chunk.
+    bool ClearUidLogs(uid_t uid, log_id_t log_id, LogStatistics* stats);
+
+    bool CanLog(size_t len);
+    SerializedLogEntry* Log(uint64_t sequence, log_time realtime, uid_t uid, pid_t pid, pid_t tid,
+                            const char* msg, uint16_t len);
+
+    // If this buffer has been compressed, we only consider its compressed size when accounting for
+    // memory consumption for pruning.  This is since the uncompressed log is only by used by
+    // readers, and thus not a representation of how much these logs cost to keep in memory.
+    size_t PruneSize() const { return compressed_log_.size() ?: contents_.size(); }
+
+    void FinishWriting() {
+        writer_active_ = false;
+        if (reader_ref_count_ == 0) {
+            Compress();
+        }
+    }
+
+    const SerializedLogEntry* log_entry(int offset) const {
+        return reinterpret_cast<const SerializedLogEntry*>(data() + offset);
+    }
+    const uint8_t* data() const { return contents_.data(); }
+    int write_offset() const { return write_offset_; }
+    uint64_t highest_sequence_number() const { return highest_sequence_number_; }
+
+  private:
+    // The decompressed contents of this log buffer.  Deallocated when the ref_count reaches 0 and
+    // writer_active_ is false.
+    std::vector<uint8_t> contents_;
+    int write_offset_ = 0;
+    uint32_t reader_ref_count_ = 0;
+    bool writer_active_ = true;
+    uint64_t highest_sequence_number_ = 1;
+    std::vector<uint8_t> compressed_log_;
+};
diff --git a/logd/SerializedLogChunkTest.cpp b/logd/SerializedLogChunkTest.cpp
new file mode 100644
index 0000000..6572503
--- /dev/null
+++ b/logd/SerializedLogChunkTest.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SerializedLogChunk.h"
+
+#include <limits>
+
+#include <android-base/stringprintf.h>
+#include <android/log.h>
+#include <gtest/gtest.h>
+
+using android::base::StringPrintf;
+
+TEST(SerializedLogChunk, smoke) {
+    size_t chunk_size = 10 * 4096;
+    auto chunk = SerializedLogChunk{chunk_size};
+    EXPECT_EQ(chunk_size, chunk.PruneSize());
+
+    static const char log_message[] = "log message";
+    size_t expected_total_len = sizeof(SerializedLogEntry) + sizeof(log_message);
+    ASSERT_TRUE(chunk.CanLog(expected_total_len));
+    EXPECT_TRUE(chunk.CanLog(chunk_size));
+    EXPECT_FALSE(chunk.CanLog(chunk_size + 1));
+
+    log_time time(CLOCK_REALTIME);
+    auto* entry = chunk.Log(1234, time, 0, 1, 2, log_message, sizeof(log_message));
+    ASSERT_NE(nullptr, entry);
+
+    EXPECT_EQ(1234U, entry->sequence());
+    EXPECT_EQ(time, entry->realtime());
+    EXPECT_EQ(0U, entry->uid());
+    EXPECT_EQ(1, entry->pid());
+    EXPECT_EQ(2, entry->tid());
+    EXPECT_EQ(sizeof(log_message), entry->msg_len());
+    EXPECT_STREQ(log_message, entry->msg());
+    EXPECT_EQ(expected_total_len, entry->total_len());
+
+    EXPECT_FALSE(chunk.CanLog(chunk_size));
+    EXPECT_EQ(static_cast<int>(expected_total_len), chunk.write_offset());
+    EXPECT_EQ(1234U, chunk.highest_sequence_number());
+}
+
+TEST(SerializedLogChunk, fill_log_exactly) {
+    static const char log_message[] = "this is a log message";
+    size_t individual_message_size = sizeof(SerializedLogEntry) + sizeof(log_message);
+    size_t chunk_size = individual_message_size * 3;
+    auto chunk = SerializedLogChunk{chunk_size};
+    EXPECT_EQ(chunk_size, chunk.PruneSize());
+
+    ASSERT_TRUE(chunk.CanLog(individual_message_size));
+    EXPECT_NE(nullptr, chunk.Log(1, log_time(), 1000, 1, 1, log_message, sizeof(log_message)));
+
+    ASSERT_TRUE(chunk.CanLog(individual_message_size));
+    EXPECT_NE(nullptr, chunk.Log(2, log_time(), 1000, 2, 1, log_message, sizeof(log_message)));
+
+    ASSERT_TRUE(chunk.CanLog(individual_message_size));
+    EXPECT_NE(nullptr, chunk.Log(3, log_time(), 1000, 3, 1, log_message, sizeof(log_message)));
+
+    EXPECT_FALSE(chunk.CanLog(1));
+}
+
+TEST(SerializedLogChunk, three_logs) {
+    size_t chunk_size = 10 * 4096;
+    auto chunk = SerializedLogChunk{chunk_size};
+
+    chunk.Log(2, log_time(0x1234, 0x5678), 0x111, 0x222, 0x333, "initial message",
+              strlen("initial message"));
+    chunk.Log(3, log_time(0x2345, 0x6789), 0x444, 0x555, 0x666, "second message",
+              strlen("second message"));
+    auto uint64_t_max = std::numeric_limits<uint64_t>::max();
+    auto uint32_t_max = std::numeric_limits<uint32_t>::max();
+    chunk.Log(uint64_t_max, log_time(uint32_t_max, uint32_t_max), uint32_t_max, uint32_t_max,
+              uint32_t_max, "last message", strlen("last message"));
+
+    static const char expected_buffer_data[] =
+            "\x11\x01\x00\x00\x22\x02\x00\x00\x33\x03\x00\x00"  // UID PID TID
+            "\x02\x00\x00\x00\x00\x00\x00\x00"                  // Sequence
+            "\x34\x12\x00\x00\x78\x56\x00\x00"                  // Timestamp
+            "\x0F\x00initial message"                           // msg_len + message
+            "\x44\x04\x00\x00\x55\x05\x00\x00\x66\x06\x00\x00"  // UID PID TID
+            "\x03\x00\x00\x00\x00\x00\x00\x00"                  // Sequence
+            "\x45\x23\x00\x00\x89\x67\x00\x00"                  // Timestamp
+            "\x0E\x00second message"                            // msg_len + message
+            "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"  // UID PID TID
+            "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"                  // Sequence
+            "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"                  // Timestamp
+            "\x0C\x00last message";                             // msg_len + message
+
+    for (size_t i = 0; i < chunk_size; ++i) {
+        if (i < sizeof(expected_buffer_data)) {
+            EXPECT_EQ(static_cast<uint8_t>(expected_buffer_data[i]), chunk.data()[i])
+                    << "position: " << i;
+        } else {
+            EXPECT_EQ(0, chunk.data()[i]) << "position: " << i;
+        }
+    }
+}
+
+// Check that the CHECK() in DecReaderRefCount() if the ref count goes bad is caught.
+TEST(SerializedLogChunk, catch_DecCompressedRef_CHECK) {
+    size_t chunk_size = 10 * 4096;
+    auto chunk = SerializedLogChunk{chunk_size};
+    EXPECT_DEATH({ chunk.DecReaderRefCount(true); }, "");
+    EXPECT_DEATH({ chunk.DecReaderRefCount(false); }, "");
+}
+
+// Check that the CHECK() in ClearUidLogs() if the ref count is greater than 0 is caught.
+TEST(SerializedLogChunk, catch_ClearUidLogs_CHECK) {
+    size_t chunk_size = 10 * 4096;
+    auto chunk = SerializedLogChunk{chunk_size};
+    chunk.IncReaderRefCount();
+    EXPECT_DEATH({ chunk.ClearUidLogs(1000, LOG_ID_MAIN, nullptr); }, "");
+    chunk.DecReaderRefCount(false);
+}
+
+class UidClearTest : public testing::TestWithParam<bool> {
+  protected:
+    template <typename Write, typename Check>
+    void Test(const Write& write, const Check& check, bool expected_result) {
+        write(chunk_);
+
+        bool finish_writing = GetParam();
+        if (finish_writing) {
+            chunk_.FinishWriting();
+        }
+        EXPECT_EQ(expected_result, chunk_.ClearUidLogs(kUidToClear, LOG_ID_MAIN, nullptr));
+        if (finish_writing) {
+            chunk_.IncReaderRefCount();
+        }
+
+        check(chunk_);
+
+        if (finish_writing) {
+            chunk_.DecReaderRefCount(false);
+        }
+    }
+
+    static constexpr size_t kChunkSize = 10 * 4096;
+    static constexpr uid_t kUidToClear = 1000;
+    static constexpr uid_t kOtherUid = 1234;
+
+    SerializedLogChunk chunk_{kChunkSize};
+};
+
+// Test that ClearUidLogs() is a no-op if there are no logs of that UID in the buffer.
+TEST_P(UidClearTest, no_logs_in_chunk) {
+    auto write = [](SerializedLogChunk&) {};
+    auto check = [](SerializedLogChunk&) {};
+
+    Test(write, check, true);
+}
+
+// Test that ClearUidLogs() is a no-op if there are no logs of that UID in the buffer.
+TEST_P(UidClearTest, no_logs_from_uid) {
+    static const char msg[] = "this is a log message";
+    auto write = [](SerializedLogChunk& chunk) {
+        chunk.Log(1, log_time(), kOtherUid, 1, 2, msg, sizeof(msg));
+    };
+
+    auto check = [](SerializedLogChunk& chunk) {
+        auto* entry = chunk.log_entry(0);
+        EXPECT_STREQ(msg, entry->msg());
+    };
+
+    Test(write, check, false);
+}
+
+// Test that ClearUidLogs() returns true if all logs in a given buffer correspond to the given UID.
+TEST_P(UidClearTest, all_single) {
+    static const char msg[] = "this is a log message";
+    auto write = [](SerializedLogChunk& chunk) {
+        chunk.Log(1, log_time(), kUidToClear, 1, 2, msg, sizeof(msg));
+    };
+    auto check = [](SerializedLogChunk&) {};
+
+    Test(write, check, true);
+}
+
+// Test that ClearUidLogs() returns true if all logs in a given buffer correspond to the given UID.
+TEST_P(UidClearTest, all_multiple) {
+    static const char msg[] = "this is a log message";
+    auto write = [](SerializedLogChunk& chunk) {
+        chunk.Log(2, log_time(), kUidToClear, 1, 2, msg, sizeof(msg));
+        chunk.Log(3, log_time(), kUidToClear, 1, 2, msg, sizeof(msg));
+        chunk.Log(4, log_time(), kUidToClear, 1, 2, msg, sizeof(msg));
+    };
+    auto check = [](SerializedLogChunk&) {};
+
+    Test(write, check, true);
+}
+
+static std::string MakePrintable(const uint8_t* in, size_t length) {
+    std::string result;
+    for (size_t i = 0; i < length; ++i) {
+        uint8_t c = in[i];
+        if (isprint(c)) {
+            result.push_back(c);
+        } else {
+            result.append(StringPrintf("\\%02x", static_cast<int>(c) & 0xFF));
+        }
+    }
+    return result;
+}
+
+// This test clears UID logs at the beginning and end of the buffer, as well as two back to back
+// logs in the interior.
+TEST_P(UidClearTest, clear_beginning_and_end) {
+    static const char msg1[] = "this is a log message";
+    static const char msg2[] = "non-cleared message";
+    static const char msg3[] = "back to back cleared messages";
+    static const char msg4[] = "second in a row gone";
+    static const char msg5[] = "but we save this one";
+    static const char msg6[] = "and this 1!";
+    static const char msg7[] = "the last one goes too";
+    auto write = [](SerializedLogChunk& chunk) {
+        ASSERT_NE(nullptr, chunk.Log(1, log_time(), kUidToClear, 1, 2, msg1, sizeof(msg1)));
+        ASSERT_NE(nullptr, chunk.Log(2, log_time(), kOtherUid, 1, 2, msg2, sizeof(msg2)));
+        ASSERT_NE(nullptr, chunk.Log(3, log_time(), kUidToClear, 1, 2, msg3, sizeof(msg3)));
+        ASSERT_NE(nullptr, chunk.Log(4, log_time(), kUidToClear, 1, 2, msg4, sizeof(msg4)));
+        ASSERT_NE(nullptr, chunk.Log(5, log_time(), kOtherUid, 1, 2, msg5, sizeof(msg5)));
+        ASSERT_NE(nullptr, chunk.Log(6, log_time(), kOtherUid, 1, 2, msg6, sizeof(msg6)));
+        ASSERT_NE(nullptr, chunk.Log(7, log_time(), kUidToClear, 1, 2, msg7, sizeof(msg7)));
+    };
+
+    auto check = [](SerializedLogChunk& chunk) {
+        size_t read_offset = 0;
+        auto* entry = chunk.log_entry(read_offset);
+        EXPECT_STREQ(msg2, entry->msg());
+        read_offset += entry->total_len();
+
+        entry = chunk.log_entry(read_offset);
+        EXPECT_STREQ(msg5, entry->msg());
+        read_offset += entry->total_len();
+
+        entry = chunk.log_entry(read_offset);
+        EXPECT_STREQ(msg6, entry->msg()) << MakePrintable(chunk.data(), chunk.write_offset());
+        read_offset += entry->total_len();
+
+        EXPECT_EQ(static_cast<int>(read_offset), chunk.write_offset());
+    };
+    Test(write, check, false);
+}
+
+// This tests the opposite case of beginning_and_end, in which we don't clear the beginning or end
+// logs.  There is a single log pruned in the middle instead of back to back logs.
+TEST_P(UidClearTest, save_beginning_and_end) {
+    static const char msg1[] = "saved first message";
+    static const char msg2[] = "cleared interior message";
+    static const char msg3[] = "last message stays";
+    auto write = [](SerializedLogChunk& chunk) {
+        ASSERT_NE(nullptr, chunk.Log(1, log_time(), kOtherUid, 1, 2, msg1, sizeof(msg1)));
+        ASSERT_NE(nullptr, chunk.Log(2, log_time(), kUidToClear, 1, 2, msg2, sizeof(msg2)));
+        ASSERT_NE(nullptr, chunk.Log(3, log_time(), kOtherUid, 1, 2, msg3, sizeof(msg3)));
+    };
+
+    auto check = [](SerializedLogChunk& chunk) {
+        size_t read_offset = 0;
+        auto* entry = chunk.log_entry(read_offset);
+        EXPECT_STREQ(msg1, entry->msg());
+        read_offset += entry->total_len();
+
+        entry = chunk.log_entry(read_offset);
+        EXPECT_STREQ(msg3, entry->msg());
+        read_offset += entry->total_len();
+
+        EXPECT_EQ(static_cast<int>(read_offset), chunk.write_offset());
+    };
+    Test(write, check, false);
+}
+
+INSTANTIATE_TEST_CASE_P(UidClearTests, UidClearTest, testing::Values(true, false));
diff --git a/logd/SerializedLogEntry.h b/logd/SerializedLogEntry.h
new file mode 100644
index 0000000..f599abe
--- /dev/null
+++ b/logd/SerializedLogEntry.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include <log/log.h>
+#include <log/log_read.h>
+
+#include "LogStatistics.h"
+#include "LogWriter.h"
+
+// These structs are packed into a single chunk of memory for each log type within a
+// SerializedLogChunk object.  Their message is contained immediately at the end of the struct.  The
+// address of the next log in the buffer is *this + sizeof(SerializedLogEntry) + msg_len_.  If that
+// value would overflow the chunk of memory associated with the SerializedLogChunk object, then a
+// new SerializedLogChunk must be allocated to contain the next SerializedLogEntry.
+class __attribute__((packed)) SerializedLogEntry {
+  public:
+    SerializedLogEntry(uid_t uid, pid_t pid, pid_t tid, uint64_t sequence, log_time realtime,
+                       uint16_t len)
+        : uid_(uid),
+          pid_(pid),
+          tid_(tid),
+          sequence_(sequence),
+          realtime_(realtime),
+          msg_len_(len) {}
+    SerializedLogEntry(const SerializedLogEntry& elem) = delete;
+    SerializedLogEntry& operator=(const SerializedLogEntry& elem) = delete;
+    ~SerializedLogEntry() {
+        // Never place anything in this destructor.  This class is in place constructed and never
+        // destructed.
+    }
+
+    LogStatisticsElement ToLogStatisticsElement(log_id_t log_id) const {
+        return LogStatisticsElement{
+                .uid = uid(),
+                .pid = pid(),
+                .tid = tid(),
+                .tag = IsBinary(log_id) ? MsgToTag(msg(), msg_len()) : 0,
+                .realtime = realtime(),
+                .msg = msg(),
+                .msg_len = msg_len(),
+                .dropped_count = 0,
+                .log_id = log_id,
+        };
+    }
+
+    bool Flush(LogWriter* writer, log_id_t log_id) const {
+        struct logger_entry entry = {};
+
+        entry.hdr_size = sizeof(struct logger_entry);
+        entry.lid = log_id;
+        entry.pid = pid();
+        entry.tid = tid();
+        entry.uid = uid();
+        entry.sec = realtime().tv_sec;
+        entry.nsec = realtime().tv_nsec;
+        entry.len = msg_len();
+
+        return writer->Write(entry, msg());
+    }
+
+    uid_t uid() const { return uid_; }
+    pid_t pid() const { return pid_; }
+    pid_t tid() const { return tid_; }
+    uint16_t msg_len() const { return msg_len_; }
+    uint64_t sequence() const { return sequence_; }
+    log_time realtime() const { return realtime_; }
+
+    char* msg() { return reinterpret_cast<char*>(this) + sizeof(*this); }
+    const char* msg() const { return reinterpret_cast<const char*>(this) + sizeof(*this); }
+    uint16_t total_len() const { return sizeof(*this) + msg_len_; }
+
+  private:
+    const uint32_t uid_;
+    const uint32_t pid_;
+    const uint32_t tid_;
+    const uint64_t sequence_;
+    const log_time realtime_;
+    const uint16_t msg_len_;
+};
diff --git a/logd/SimpleLogBuffer.cpp b/logd/SimpleLogBuffer.cpp
index 292a7e4..ec08d54 100644
--- a/logd/SimpleLogBuffer.cpp
+++ b/logd/SimpleLogBuffer.cpp
@@ -212,46 +212,38 @@
     return true;
 }
 
-// clear all rows of type "id" from the buffer.
 bool SimpleLogBuffer::Clear(log_id_t id, uid_t uid) {
-    bool busy = true;
-    // If it takes more than 4 tries (seconds) to clear, then kill reader(s)
-    for (int retry = 4;;) {
-        if (retry == 1) {  // last pass
-            // Check if it is still busy after the sleep, we say prune
-            // one entry, not another clear run, so we are looking for
-            // the quick side effect of the return value to tell us if
-            // we have a _blocked_ reader.
-            {
-                auto lock = std::lock_guard{lock_};
-                busy = Prune(id, 1, uid);
-            }
-            // It is still busy, blocked reader(s), lets kill them all!
-            // otherwise, lets be a good citizen and preserve the slow
-            // readers and let the clear run (below) deal with determining
-            // if we are still blocked and return an error code to caller.
-            if (busy) {
-                auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
-                for (const auto& reader_thread : reader_list_->reader_threads()) {
-                    if (reader_thread->IsWatching(id)) {
-                        LOG(WARNING) << "Kicking blocked reader, " << reader_thread->name()
-                                     << ", from LogBuffer::clear()";
-                        reader_thread->release_Locked();
-                    }
-                }
-            }
-        }
+    // Try three times to clear, then disconnect the readers and try one final time.
+    for (int retry = 0; retry < 3; ++retry) {
         {
             auto lock = std::lock_guard{lock_};
-            busy = Prune(id, ULONG_MAX, uid);
+            if (Prune(id, ULONG_MAX, uid)) {
+                return true;
+            }
         }
-
-        if (!busy || !--retry) {
-            break;
-        }
-        sleep(1);  // Let reader(s) catch up after notification
+        sleep(1);
     }
-    return busy;
+    // Check if it is still busy after the sleep, we try to prune one entry, not another clear run,
+    // so we are looking for the quick side effect of the return value to tell us if we have a
+    // _blocked_ reader.
+    bool busy = false;
+    {
+        auto lock = std::lock_guard{lock_};
+        busy = !Prune(id, 1, uid);
+    }
+    // It is still busy, disconnect all readers.
+    if (busy) {
+        auto reader_threads_lock = std::lock_guard{reader_list_->reader_threads_lock()};
+        for (const auto& reader_thread : reader_list_->reader_threads()) {
+            if (reader_thread->IsWatching(id)) {
+                LOG(WARNING) << "Kicking blocked reader, " << reader_thread->name()
+                             << ", from LogBuffer::clear()";
+                reader_thread->release_Locked();
+            }
+        }
+    }
+    auto lock = std::lock_guard{lock_};
+    return Prune(id, ULONG_MAX, uid);
 }
 
 // get the total space allocated to "id"
@@ -313,16 +305,16 @@
 
         if (oldest && oldest->start() <= element.sequence()) {
             KickReader(oldest, id, prune_rows);
-            return true;
+            return false;
         }
 
         stats_->Subtract(element.ToLogStatisticsElement());
         it = Erase(it);
         if (--prune_rows == 0) {
-            return false;
+            return true;
         }
     }
-    return false;
+    return true;
 }
 
 std::list<LogBufferElement>::iterator SimpleLogBuffer::Erase(
diff --git a/logd/fuzz/Android.bp b/logd/fuzz/Android.bp
index f65fbdf..9834ff0 100644
--- a/logd/fuzz/Android.bp
+++ b/logd/fuzz/Android.bp
@@ -26,6 +26,8 @@
         "liblogd",
         "libcutils",
         "libsysutils",
+        "libz",
+        "libzstd",
     ],
     cflags: ["-Werror"],
 }
diff --git a/logd/main.cpp b/logd/main.cpp
index 773ffb8..46b6567 100644
--- a/logd/main.cpp
+++ b/logd/main.cpp
@@ -58,6 +58,7 @@
 #include "LogStatistics.h"
 #include "LogTags.h"
 #include "LogUtils.h"
+#include "SerializedLogBuffer.h"
 #include "SimpleLogBuffer.h"
 
 #define KMSG_PRIORITY(PRI)                                 \
diff --git a/rootdir/init.rc b/rootdir/init.rc
index 427ac4b..01551e2 100644
--- a/rootdir/init.rc
+++ b/rootdir/init.rc
@@ -490,6 +490,9 @@
     chown root log /proc/slabinfo
     chmod 0440 /proc/slabinfo
 
+    chown root log /proc/pagetypeinfo
+    chmod 0440 /proc/pagetypeinfo
+
     #change permissions on kmsg & sysrq-trigger so bugreports can grab kthread stacks
     chown root system /proc/kmsg
     chmod 0440 /proc/kmsg
diff --git a/rootdir/init.zygote32_64.rc b/rootdir/init.zygote32_64.rc
deleted file mode 100644
index f6149c9..0000000
--- a/rootdir/init.zygote32_64.rc
+++ /dev/null
@@ -1,24 +0,0 @@
-service zygote /system/bin/app_process32 -Xzygote /system/bin --zygote --start-system-server --socket-name=zygote
-    class main
-    priority -20
-    user root
-    group root readproc reserved_disk
-    socket zygote stream 660 root system
-    socket usap_pool_primary stream 660 root system
-    onrestart write /sys/power/state on
-    onrestart restart audioserver
-    onrestart restart cameraserver
-    onrestart restart media
-    onrestart restart netd
-    onrestart restart wificond
-    writepid /dev/cpuset/foreground/tasks
-
-service zygote_secondary /system/bin/app_process64 -Xzygote /system/bin --zygote --socket-name=zygote_secondary
-    class main
-    priority -20
-    user root
-    group root readproc reserved_disk
-    socket zygote_secondary stream 660 root system
-    socket usap_pool_secondary stream 660 root system
-    onrestart restart zygote
-    writepid /dev/cpuset/foreground/tasks
diff --git a/shell_and_utilities/Android.bp b/shell_and_utilities/Android.bp
index b5a5fb6..f83c43e 100644
--- a/shell_and_utilities/Android.bp
+++ b/shell_and_utilities/Android.bp
@@ -36,7 +36,7 @@
         "sh.recovery",
         "toolbox.recovery",
         "toybox.recovery",
-        "unzip.recovery",
+        "ziptool.recovery",
     ],
 }
 
diff --git a/toolbox/modprobe.cpp b/toolbox/modprobe.cpp
index 1b5f54e..3ffa74e 100644
--- a/toolbox/modprobe.cpp
+++ b/toolbox/modprobe.cpp
@@ -36,7 +36,7 @@
     std::cerr << "  modprobe [-alrqvsDb] [-d DIR] MODULE [symbol=value][...]" << std::endl;
     std::cerr << std::endl;
     std::cerr << "Options:" << std::endl;
-    std::cerr << "  -b: Apply blacklist to module names too" << std::endl;
+    std::cerr << "  -b: Apply blocklist to module names too" << std::endl;
     std::cerr << "  -d: Load modules from DIR, option may be used multiple times" << std::endl;
     std::cerr << "  -D: Print dependencies for modules only, do not load";
     std::cerr << "  -h: Print this help" << std::endl;
@@ -59,7 +59,7 @@
     std::string module_parameters;
     std::vector<std::string> mod_dirs;
     modprobe_mode mode = AddModulesMode;
-    bool blacklist = false;
+    bool blocklist = false;
     bool verbose = false;
     int rv = EXIT_SUCCESS;
 
@@ -72,7 +72,7 @@
                 check_mode();
                 break;
             case 'b':
-                blacklist = true;
+                blocklist = true;
                 break;
             case 'd':
                 mod_dirs.emplace_back(optarg);
@@ -151,8 +151,8 @@
 
     Modprobe m(mod_dirs);
     m.EnableVerbose(verbose);
-    if (blacklist) {
-        m.EnableBlacklist(true);
+    if (blocklist) {
+        m.EnableBlocklist(true);
     }
 
     for (const auto& module : modules) {
diff --git a/trusty/storage/proxy/rpmb.c b/trusty/storage/proxy/rpmb.c
index 7dfd0d0..d1ed649 100644
--- a/trusty/storage/proxy/rpmb.c
+++ b/trusty/storage/proxy/rpmb.c
@@ -231,7 +231,7 @@
 
     if (req->read_size) {
         /* Prepare SECURITY PROTOCOL IN command. */
-        out_cdb.length = __builtin_bswap32(req->read_size);
+        in_cdb.length = __builtin_bswap32(req->read_size);
         sg_io_hdr_t io_hdr;
         set_sg_io_hdr(&io_hdr, SG_DXFER_FROM_DEV, sizeof(in_cdb), sizeof(sense_buffer),
                       req->read_size, read_buf, (unsigned char*)&in_cdb, sense_buffer);