Merge "Hang up narayan's janitor overalls." into main
diff --git a/debuggerd/libdebuggerd/scudo.cpp b/debuggerd/libdebuggerd/scudo.cpp
index cff43f8..3fa3bd0 100644
--- a/debuggerd/libdebuggerd/scudo.cpp
+++ b/debuggerd/libdebuggerd/scudo.cpp
@@ -81,7 +81,7 @@
}
__scudo_get_error_info(&error_info_, process_info.maybe_tagged_fault_address, stack_depot.get(),
- __scudo_get_stack_depot_size(), region_info.get(), ring_buffer.get(),
+ process_info.scudo_stack_depot_size, region_info.get(), ring_buffer.get(),
process_info.scudo_ring_buffer_size, memory.get(), memory_tags.get(),
memory_begin, memory_end - memory_begin);
}
diff --git a/fastboot/fastboot.cpp b/fastboot/fastboot.cpp
index 1bc7b75..235d723 100644
--- a/fastboot/fastboot.cpp
+++ b/fastboot/fastboot.cpp
@@ -1675,7 +1675,7 @@
}
for (size_t i = 0; i < tasks->size(); i++) {
if (auto flash_task = tasks->at(i)->AsFlashTask()) {
- if (FlashTask::IsDynamicParitition(fp->source.get(), flash_task)) {
+ if (FlashTask::IsDynamicPartition(fp->source.get(), flash_task)) {
if (!loc) {
loc = i;
}
diff --git a/fastboot/task.cpp b/fastboot/task.cpp
index 0947ff9..ea78a01 100644
--- a/fastboot/task.cpp
+++ b/fastboot/task.cpp
@@ -30,7 +30,7 @@
const bool apply_vbmeta, const FlashingPlan* fp)
: pname_(pname), fname_(fname), slot_(slot), apply_vbmeta_(apply_vbmeta), fp_(fp) {}
-bool FlashTask::IsDynamicParitition(const ImageSource* source, const FlashTask* task) {
+bool FlashTask::IsDynamicPartition(const ImageSource* source, const FlashTask* task) {
std::vector<char> contents;
if (!source->ReadFile("super_empty.img", &contents)) {
return false;
@@ -152,7 +152,7 @@
continue;
}
auto flash_task = tasks[i + 2]->AsFlashTask();
- if (!FlashTask::IsDynamicParitition(source, flash_task)) {
+ if (!FlashTask::IsDynamicPartition(source, flash_task)) {
continue;
}
return true;
diff --git a/fastboot/task.h b/fastboot/task.h
index a98c874..7a713cf 100644
--- a/fastboot/task.h
+++ b/fastboot/task.h
@@ -52,7 +52,7 @@
const bool apply_vbmeta, const FlashingPlan* fp);
virtual FlashTask* AsFlashTask() override { return this; }
- static bool IsDynamicParitition(const ImageSource* source, const FlashTask* task);
+ static bool IsDynamicPartition(const ImageSource* source, const FlashTask* task);
void Run() override;
std::string ToString() const override;
std::string GetPartition() const { return pname_; }
diff --git a/fastboot/task_test.cpp b/fastboot/task_test.cpp
index 81154c6..519d4ed 100644
--- a/fastboot/task_test.cpp
+++ b/fastboot/task_test.cpp
@@ -233,7 +233,7 @@
<< "size of fastboot-info task list: " << fastboot_info_tasks.size()
<< " size of hardcoded task list: " << hardcoded_tasks.size();
}
-TEST_F(ParseTest, IsDynamicParitiontest) {
+TEST_F(ParseTest, IsDynamicPartitiontest) {
if (!get_android_product_out()) {
GTEST_SKIP();
}
@@ -258,7 +258,7 @@
ParseFastbootInfoLine(fp.get(), android::base::Tokenize(test.first, " "));
auto flash_task = task->AsFlashTask();
ASSERT_FALSE(flash_task == nullptr);
- ASSERT_EQ(FlashTask::IsDynamicParitition(fp->source.get(), flash_task), test.second);
+ ASSERT_EQ(FlashTask::IsDynamicPartition(fp->source.get(), flash_task), test.second);
}
}
@@ -358,7 +358,7 @@
contains_optimized_task = true;
}
if (auto flash_task = task->AsFlashTask()) {
- if (FlashTask::IsDynamicParitition(fp->source.get(), flash_task)) {
+ if (FlashTask::IsDynamicPartition(fp->source.get(), flash_task)) {
return false;
}
}
diff --git a/fs_mgr/fs_mgr_overlayfs_control.cpp b/fs_mgr/fs_mgr_overlayfs_control.cpp
index 06214ef..08ad80c 100644
--- a/fs_mgr/fs_mgr_overlayfs_control.cpp
+++ b/fs_mgr/fs_mgr_overlayfs_control.cpp
@@ -219,6 +219,35 @@
return OverlayfsTeardownResult::Ok;
}
+bool GetOverlaysActiveFlag() {
+ auto slot_number = fs_mgr_overlayfs_slot_number();
+ const auto super_device = kPhysicalDevice + fs_mgr_get_super_partition_name();
+
+ auto metadata = ReadMetadata(super_device, slot_number);
+ if (!metadata) {
+ return false;
+ }
+ return !!(metadata->header.flags & LP_HEADER_FLAG_OVERLAYS_ACTIVE);
+}
+
+bool SetOverlaysActiveFlag(bool flag) {
+ // Mark overlays as active in the partition table, to detect re-flash.
+ auto slot_number = fs_mgr_overlayfs_slot_number();
+ const auto super_device = kPhysicalDevice + fs_mgr_get_super_partition_name();
+ auto builder = MetadataBuilder::New(super_device, slot_number);
+ if (!builder) {
+ LERROR << "open " << super_device << " metadata";
+ return false;
+ }
+ builder->SetOverlaysActiveFlag(flag);
+ auto metadata = builder->Export();
+ if (!metadata || !UpdatePartitionTable(super_device, *metadata.get(), slot_number)) {
+ LERROR << "update super metadata";
+ return false;
+ }
+ return true;
+}
+
OverlayfsTeardownResult fs_mgr_overlayfs_teardown_scratch(const std::string& overlay,
bool* change) {
// umount and delete kScratchMountPoint storage if we have logical partitions
@@ -232,6 +261,10 @@
return OverlayfsTeardownResult::Error;
}
+ // Note: we don't care if SetOverlaysActiveFlag fails, since
+ // the overlays are removed no matter what.
+ SetOverlaysActiveFlag(false);
+
bool was_mounted = fs_mgr_overlayfs_already_mounted(kScratchMountPoint, false);
if (was_mounted) {
fs_mgr_overlayfs_umount_scratch();
@@ -448,6 +481,7 @@
}
}
}
+
// land the update back on to the partition
if (changed) {
auto metadata = builder->Export();
@@ -592,6 +626,12 @@
return false;
}
+ if (!SetOverlaysActiveFlag(true)) {
+ LOG(ERROR) << "Failed to update dynamic partition data";
+ fs_mgr_overlayfs_teardown_scratch(kScratchMountPoint, nullptr);
+ return false;
+ }
+
// If the partition exists, assume first that it can be mounted.
if (partition_exists) {
if (MountScratch(scratch_device)) {
@@ -856,6 +896,9 @@
return;
}
+ if (!GetOverlaysActiveFlag()) {
+ return;
+ }
if (ScratchIsOnData()) {
if (auto images = IImageManager::Open("remount", 0ms)) {
images->MapAllImages(init);
@@ -879,6 +922,9 @@
}
if (auto images = IImageManager::Open("remount", 0ms)) {
images->RemoveDisabledImages();
+ if (!GetOverlaysActiveFlag()) {
+ fs_mgr_overlayfs_teardown_scratch(kScratchMountPoint, nullptr);
+ }
}
}
diff --git a/fs_mgr/libfs_avb/fs_avb.cpp b/fs_mgr/libfs_avb/fs_avb.cpp
index fb22423..be48de6 100644
--- a/fs_mgr/libfs_avb/fs_avb.cpp
+++ b/fs_mgr/libfs_avb/fs_avb.cpp
@@ -288,14 +288,82 @@
return false;
}
-AvbUniquePtr AvbHandle::LoadAndVerifyVbmeta(const FstabEntry& fstab_entry,
- const std::vector<std::string>& preload_avb_key_blobs) {
+bool IsPublicKeyMatching(const FstabEntry& fstab_entry, const std::string& public_key_data,
+ const std::vector<std::string>& preload_avb_key_blobs) {
// At least one of the following should be provided for public key matching.
if (preload_avb_key_blobs.empty() && fstab_entry.avb_keys.empty()) {
LERROR << "avb_keys=/path/to/key(s) is missing for " << fstab_entry.mount_point;
- return nullptr;
+ return false;
}
+ // Expected key shouldn't be empty.
+ if (public_key_data.empty()) {
+ LERROR << "public key data shouldn't be empty for " << fstab_entry.mount_point;
+ return false;
+ }
+
+ // Performs key matching for preload_avb_key_blobs first, if it is present.
+ if (!preload_avb_key_blobs.empty()) {
+ if (std::find(preload_avb_key_blobs.begin(), preload_avb_key_blobs.end(),
+ public_key_data) != preload_avb_key_blobs.end()) {
+ return true;
+ }
+ }
+
+ // Performs key matching for fstab_entry.avb_keys if necessary.
+ // Note that it is intentional to match both preload_avb_key_blobs and fstab_entry.avb_keys.
+ // Some keys might only be available before init chroots into /system, e.g., /avb/key1
+ // in the first-stage ramdisk, while other keys might only be available after the chroot,
+ // e.g., /system/etc/avb/key2.
+ // fstab_entry.avb_keys might be either a directory containing multiple keys,
+ // or a string indicating multiple keys separated by ':'.
+ std::vector<std::string> allowed_avb_keys;
+ auto list_avb_keys_in_dir = ListFiles(fstab_entry.avb_keys);
+ if (list_avb_keys_in_dir.ok()) {
+ std::sort(list_avb_keys_in_dir->begin(), list_avb_keys_in_dir->end());
+ allowed_avb_keys = *list_avb_keys_in_dir;
+ } else {
+ allowed_avb_keys = Split(fstab_entry.avb_keys, ":");
+ }
+ return ValidatePublicKeyBlob(public_key_data, allowed_avb_keys);
+}
+
+bool IsHashtreeDescriptorRootDigestMatching(const FstabEntry& fstab_entry,
+ const std::vector<VBMetaData>& vbmeta_images,
+ const std::string& ab_suffix,
+ const std::string& ab_other_suffix) {
+ // Read expected value of hashtree descriptor root digest from fstab_entry.
+ std::string root_digest_expected;
+ if (!ReadFileToString(fstab_entry.avb_hashtree_digest, &root_digest_expected)) {
+ LERROR << "Failed to load expected root digest for " << fstab_entry.mount_point;
+ return false;
+ }
+
+ // Read actual hashtree descriptor from vbmeta image.
+ std::string partition_name = DeriveAvbPartitionName(fstab_entry, ab_suffix, ab_other_suffix);
+ if (partition_name.empty()) {
+ LERROR << "Failed to find partition name for " << fstab_entry.mount_point;
+ return false;
+ }
+ std::unique_ptr<FsAvbHashtreeDescriptor> hashtree_descriptor =
+ android::fs_mgr::GetHashtreeDescriptor(partition_name, vbmeta_images);
+ if (!hashtree_descriptor) {
+ LERROR << "Not found hashtree descriptor for " << fstab_entry.mount_point;
+ return false;
+ }
+
+ // Performs hashtree descriptor root digest matching.
+ if (hashtree_descriptor->root_digest != root_digest_expected) {
+ LERROR << "root digest (" << hashtree_descriptor->root_digest
+ << ") is different from expected value (" << root_digest_expected << ")";
+ return false;
+ }
+
+ return true;
+}
+
+AvbUniquePtr AvbHandle::LoadAndVerifyVbmeta(const FstabEntry& fstab_entry,
+ const std::vector<std::string>& preload_avb_key_blobs) {
// Binds allow_verification_error and rollback_protection to device unlock state.
bool allow_verification_error = IsAvbPermissive();
bool rollback_protection = !allow_verification_error;
@@ -333,40 +401,24 @@
return nullptr;
}
- bool public_key_match = false;
- // Performs key matching for preload_avb_key_blobs first, if it is present.
- if (!public_key_data.empty() && !preload_avb_key_blobs.empty()) {
- if (std::find(preload_avb_key_blobs.begin(), preload_avb_key_blobs.end(),
- public_key_data) != preload_avb_key_blobs.end()) {
- public_key_match = true;
+ // Verify vbmeta image checking by either public key or hashtree descriptor root digest.
+ if (!preload_avb_key_blobs.empty() || !fstab_entry.avb_keys.empty()) {
+ if (!IsPublicKeyMatching(fstab_entry, public_key_data, preload_avb_key_blobs)) {
+ avb_handle->status_ = AvbHandleStatus::kVerificationError;
+ LWARNING << "Found unknown public key used to sign " << fstab_entry.mount_point;
+ if (!allow_verification_error) {
+ LERROR << "Unknown public key is not allowed";
+ return nullptr;
+ }
}
- }
- // Performs key matching for fstab_entry.avb_keys if necessary.
- // Note that it is intentional to match both preload_avb_key_blobs and fstab_entry.avb_keys.
- // Some keys might only be availble before init chroots into /system, e.g., /avb/key1
- // in the first-stage ramdisk, while other keys might only be available after the chroot,
- // e.g., /system/etc/avb/key2.
- if (!public_key_data.empty() && !public_key_match) {
- // fstab_entry.avb_keys might be either a directory containing multiple keys,
- // or a string indicating multiple keys separated by ':'.
- std::vector<std::string> allowed_avb_keys;
- auto list_avb_keys_in_dir = ListFiles(fstab_entry.avb_keys);
- if (list_avb_keys_in_dir.ok()) {
- std::sort(list_avb_keys_in_dir->begin(), list_avb_keys_in_dir->end());
- allowed_avb_keys = *list_avb_keys_in_dir;
- } else {
- allowed_avb_keys = Split(fstab_entry.avb_keys, ":");
- }
- if (ValidatePublicKeyBlob(public_key_data, allowed_avb_keys)) {
- public_key_match = true;
- }
- }
-
- if (!public_key_match) {
+ } else if (!IsHashtreeDescriptorRootDigestMatching(fstab_entry, avb_handle->vbmeta_images_,
+ avb_handle->slot_suffix_,
+ avb_handle->other_slot_suffix_)) {
avb_handle->status_ = AvbHandleStatus::kVerificationError;
- LWARNING << "Found unknown public key used to sign " << fstab_entry.mount_point;
+ LWARNING << "Found unknown hashtree descriptor root digest used on "
+ << fstab_entry.mount_point;
if (!allow_verification_error) {
- LERROR << "Unknown public key is not allowed";
+ LERROR << "Verification based on root digest failed. Vbmeta image is not allowed.";
return nullptr;
}
}
diff --git a/fs_mgr/libfstab/fstab.cpp b/fs_mgr/libfstab/fstab.cpp
index 32460b1..6fa22fe 100644
--- a/fs_mgr/libfstab/fstab.cpp
+++ b/fs_mgr/libfstab/fstab.cpp
@@ -286,6 +286,10 @@
}
} else if (StartsWith(flag, "avb_keys=")) { // must before the following "avb"
entry->avb_keys = arg;
+ } else if (StartsWith(flag, "avb_hashtree_digest=")) {
+ // "avb_hashtree_digest" must before the following "avb"
+ // The path where hex-encoded hashtree descriptor root digest is located.
+ entry->avb_hashtree_digest = arg;
} else if (StartsWith(flag, "avb")) {
entry->fs_mgr_flags.avb = true;
entry->vbmeta_partition = arg;
@@ -716,7 +720,7 @@
if (!ReadFstabFromFileCommon(path, fstab)) {
return false;
}
- if (path != kProcMountsPath) {
+ if (path != kProcMountsPath && !InRecovery()) {
if (!access(android::gsi::kGsiBootedIndicatorFile, F_OK)) {
std::string dsu_slot;
if (!android::gsi::GetActiveDsu(&dsu_slot)) {
diff --git a/fs_mgr/libfstab/include/fstab/fstab.h b/fs_mgr/libfstab/include/fstab/fstab.h
index 09471f0..5e4019c 100644
--- a/fs_mgr/libfstab/include/fstab/fstab.h
+++ b/fs_mgr/libfstab/include/fstab/fstab.h
@@ -57,6 +57,7 @@
uint64_t zram_backingdev_size = 0;
std::string avb_keys;
std::string lowerdir;
+ std::string avb_hashtree_digest;
struct FsMgrFlags {
bool wait : 1;
diff --git a/fs_mgr/liblp/builder.cpp b/fs_mgr/liblp/builder.cpp
index 6cb2c51..4e6e97b 100644
--- a/fs_mgr/liblp/builder.cpp
+++ b/fs_mgr/liblp/builder.cpp
@@ -1211,6 +1211,15 @@
header_.flags |= LP_HEADER_FLAG_VIRTUAL_AB_DEVICE;
}
+void MetadataBuilder::SetOverlaysActiveFlag(bool flag) {
+ RequireExpandedMetadataHeader();
+ if (flag) {
+ header_.flags |= LP_HEADER_FLAG_OVERLAYS_ACTIVE;
+ } else {
+ header_.flags &= ~LP_HEADER_FLAG_OVERLAYS_ACTIVE;
+ }
+}
+
bool MetadataBuilder::IsABDevice() {
return !IPropertyFetcher::GetInstance()->GetProperty("ro.boot.slot_suffix", "").empty();
}
diff --git a/fs_mgr/liblp/include/liblp/builder.h b/fs_mgr/liblp/include/liblp/builder.h
index 54f31bc..957b96b 100644
--- a/fs_mgr/liblp/include/liblp/builder.h
+++ b/fs_mgr/liblp/include/liblp/builder.h
@@ -346,6 +346,8 @@
void SetAutoSlotSuffixing();
// Set the LP_HEADER_FLAG_VIRTUAL_AB_DEVICE flag.
void SetVirtualABDeviceFlag();
+ // Set or unset the LP_HEADER_FLAG_OVERLAYS_ACTIVE flag.
+ void SetOverlaysActiveFlag(bool flag);
bool GetBlockDeviceInfo(const std::string& partition_name, BlockDeviceInfo* info) const;
bool UpdateBlockDeviceInfo(const std::string& partition_name, const BlockDeviceInfo& info);
diff --git a/fs_mgr/liblp/include/liblp/metadata_format.h b/fs_mgr/liblp/include/liblp/metadata_format.h
index 41d8b0c..8d77097 100644
--- a/fs_mgr/liblp/include/liblp/metadata_format.h
+++ b/fs_mgr/liblp/include/liblp/metadata_format.h
@@ -240,6 +240,9 @@
*/
#define LP_HEADER_FLAG_VIRTUAL_AB_DEVICE 0x1
+/* This device has overlays activated via "adb remount". */
+#define LP_HEADER_FLAG_OVERLAYS_ACTIVE 0x2
+
/* This struct defines a logical partition entry, similar to what would be
* present in a GUID Partition Table.
*/
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/cow_format.h b/fs_mgr/libsnapshot/include/libsnapshot/cow_format.h
index 6b34152..9401c66 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/cow_format.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/cow_format.h
@@ -107,7 +107,7 @@
static constexpr uint8_t kNumResumePoints = 4;
struct CowHeaderV3 : public CowHeader {
- // Number of sequence data stored (each of which is a 32 byte integer)
+ // Number of sequence data stored (each of which is a 32 bit integer)
uint64_t sequence_data_count;
// Number of currently written resume points &&
uint32_t resume_point_count;
@@ -311,6 +311,8 @@
std::ostream& operator<<(std::ostream& os, ResumePoint const& arg);
+std::ostream& operator<<(std::ostream& os, CowOperationType cow_type);
+
int64_t GetNextOpOffset(const CowOperationV2& op, uint32_t cluster_size);
int64_t GetNextDataOffset(const CowOperationV2& op, uint32_t cluster_size);
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/cow_format.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/cow_format.cpp
index b0eb723..8d1786c 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/cow_format.cpp
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/cow_format.cpp
@@ -52,6 +52,10 @@
}
}
+std::ostream& operator<<(std::ostream& os, CowOperationType cow_type) {
+ return EmitCowTypeString(os, cow_type);
+}
+
std::ostream& operator<<(std::ostream& os, CowOperationV2 const& op) {
os << "CowOperationV2(";
EmitCowTypeString(os, op.type) << ", ";
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/parser_v3.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/parser_v3.cpp
index ce68b39..036d335 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/parser_v3.cpp
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/parser_v3.cpp
@@ -114,6 +114,12 @@
for (auto op : *ops_) {
if (op.type() == kCowXorOp) {
xor_data_loc_->insert({op.new_block, data_pos});
+ } else if (op.type() == kCowReplaceOp) {
+ if (data_pos != op.source()) {
+ LOG(ERROR) << "Invalid data location for operation " << op
+ << ", expected: " << data_pos;
+ return false;
+ }
}
data_pos += op.data_length;
}
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/test_v3.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/test_v3.cpp
index 44b7344..3383a58 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/test_v3.cpp
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/test_v3.cpp
@@ -1,10 +1,3 @@
-// Copyright (C) 2023 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,6 +8,7 @@
#include <sys/stat.h>
#include <cstdio>
+#include <limits>
#include <memory>
#include <android-base/file.h>
@@ -513,19 +507,24 @@
TEST_F(CowTestV3, SequenceTest) {
CowOptions options;
- options.op_count_max = std::numeric_limits<uint32_t>::max();
+ constexpr int seq_len = std::numeric_limits<uint16_t>::max() / sizeof(uint32_t) + 1;
+ options.op_count_max = seq_len;
auto writer = CreateCowWriter(3, options, GetCowFd());
// sequence data. This just an arbitrary set of integers that specify the merge order. The
// actual calculation is done by update_engine and passed to writer. All we care about here is
// writing that data correctly
- const int seq_len = std::numeric_limits<uint16_t>::max() / sizeof(uint32_t) + 1;
uint32_t sequence[seq_len];
for (int i = 0; i < seq_len; i++) {
sequence[i] = i + 1;
}
ASSERT_TRUE(writer->AddSequenceData(seq_len, sequence));
- ASSERT_TRUE(writer->AddZeroBlocks(1, seq_len));
+ ASSERT_TRUE(writer->AddZeroBlocks(1, seq_len - 1));
+ std::vector<uint8_t> data(writer->GetBlockSize());
+ for (size_t i = 0; i < data.size(); i++) {
+ data[i] = static_cast<uint8_t>(i & 0xFF);
+ }
+ ASSERT_TRUE(writer->AddRawBlocks(seq_len, data.data(), data.size()));
ASSERT_TRUE(writer->Finalize());
ASSERT_EQ(lseek(cow_->fd, 0, SEEK_SET), 0);
@@ -539,6 +538,12 @@
const auto& op = iter->Get();
ASSERT_EQ(op->new_block, seq_len - i);
+ if (op->new_block == seq_len) {
+ std::vector<uint8_t> read_back(writer->GetBlockSize());
+ ASSERT_EQ(reader.ReadData(op, read_back.data(), read_back.size()),
+ static_cast<ssize_t>(read_back.size()));
+ ASSERT_EQ(read_back, data);
+ }
iter->Next();
}
@@ -683,5 +688,15 @@
}
}
+TEST_F(CowTestV3, CheckOpCount) {
+ CowOptions options;
+ options.op_count_max = 20;
+ options.batch_write = true;
+ options.cluster_ops = 200;
+ auto writer = CreateCowWriter(3, options, GetCowFd());
+ ASSERT_TRUE(writer->AddZeroBlocks(0, 19));
+ ASSERT_FALSE(writer->AddZeroBlocks(0, 19));
+}
+
} // namespace snapshot
} // namespace android
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp
index 4df0e76..d99e6e6 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp
@@ -38,6 +38,7 @@
#include <linux/fs.h>
#include <sys/ioctl.h>
#include <unistd.h>
+#include <numeric>
// The info messages here are spammy, but as useful for update_engine. Disable
// them when running on the host.
@@ -55,7 +56,7 @@
using android::base::unique_fd;
CowWriterV3::CowWriterV3(const CowOptions& options, unique_fd&& fd)
- : CowWriterBase(options, std::move(fd)) {
+ : CowWriterBase(options, std::move(fd)), batch_size_(std::max<size_t>(options.cluster_ops, 1)) {
SetupHeaders();
}
@@ -78,6 +79,7 @@
// WIP: not quite sure how some of these are calculated yet, assuming buffer_size is determined
// during COW size estimation
header_.sequence_data_count = 0;
+
header_.resume_point_count = 0;
header_.resume_point_max = kNumResumePoints;
header_.op_count = 0;
@@ -119,6 +121,19 @@
LOG(ERROR) << "Failed to create compressor for " << compression_.algorithm;
return false;
}
+ if (options_.cluster_ops &&
+ (android::base::GetBoolProperty("ro.virtual_ab.batch_writes", false) ||
+ options_.batch_write)) {
+ batch_size_ = std::max<size_t>(options_.cluster_ops, 1);
+ data_vec_.reserve(batch_size_);
+ cached_data_.reserve(batch_size_);
+ cached_ops_.reserve(batch_size_);
+ }
+ }
+ if (batch_size_ > 1) {
+ LOG(INFO) << "Batch writes: enabled with batch size " << batch_size_;
+ } else {
+ LOG(INFO) << "Batch writes: disabled";
}
return true;
}
@@ -212,30 +227,61 @@
return true;
}
+bool CowWriterV3::CheckOpCount(size_t op_count) {
+ if (IsEstimating()) {
+ return true;
+ }
+ if (header_.op_count + cached_ops_.size() + op_count > header_.op_count_max) {
+ LOG(ERROR) << "Current number of ops on disk: " << header_.op_count
+ << ", number of ops cached in memory: " << cached_ops_.size()
+ << ", number of ops attempting to write: " << op_count
+ << ", this will exceed max op count " << header_.op_count_max;
+ return false;
+ }
+ return true;
+}
+
bool CowWriterV3::EmitCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks) {
- std::vector<CowOperationV3> ops(num_blocks);
+ if (!CheckOpCount(num_blocks)) {
+ return false;
+ }
for (size_t i = 0; i < num_blocks; i++) {
- CowOperationV3& op = ops[i];
+ CowOperationV3& op = cached_ops_.emplace_back();
op.set_type(kCowCopyOp);
op.new_block = new_block + i;
op.set_source(old_block + i);
}
- if (!WriteOperation({ops.data(), ops.size()}, {})) {
- return false;
- }
+ if (NeedsFlush()) {
+ if (!FlushCacheOps()) {
+ return false;
+ }
+ }
return true;
}
bool CowWriterV3::EmitRawBlocks(uint64_t new_block_start, const void* data, size_t size) {
+ if (!CheckOpCount(size / header_.block_size)) {
+ return false;
+ }
return EmitBlocks(new_block_start, data, size, 0, 0, kCowReplaceOp);
}
bool CowWriterV3::EmitXorBlocks(uint32_t new_block_start, const void* data, size_t size,
uint32_t old_block, uint16_t offset) {
+ if (!CheckOpCount(size / header_.block_size)) {
+ return false;
+ }
return EmitBlocks(new_block_start, data, size, old_block, offset, kCowXorOp);
}
+bool CowWriterV3::NeedsFlush() const {
+ // Allow bigger batch sizes for ops without data. A single CowOperationV3
+ // struct uses 14 bytes of memory, even if we cache 200 * 16 ops in memory,
+ // it's only ~44K.
+ return cached_data_.size() >= batch_size_ || cached_ops_.size() >= batch_size_ * 16;
+}
+
bool CowWriterV3::EmitBlocks(uint64_t new_block_start, const void* data, size_t size,
uint64_t old_block, uint16_t offset, CowOperationType type) {
if (compression_.algorithm != kCowCompressNone && compressor_ == nullptr) {
@@ -244,70 +290,69 @@
return false;
}
const size_t num_blocks = (size / header_.block_size);
- if (compression_.algorithm == kCowCompressNone) {
- std::vector<CowOperationV3> ops(num_blocks);
- for (size_t i = 0; i < num_blocks; i++) {
- CowOperation& op = ops[i];
- op.new_block = new_block_start + i;
+
+ for (size_t i = 0; i < num_blocks;) {
+ const auto blocks_to_write =
+ std::min<size_t>(batch_size_ - cached_data_.size(), num_blocks - i);
+ size_t compressed_bytes = 0;
+ for (size_t j = 0; j < blocks_to_write; j++) {
+ const uint8_t* const iter =
+ reinterpret_cast<const uint8_t*>(data) + (header_.block_size * (i + j));
+
+ CowOperation& op = cached_ops_.emplace_back();
+ auto& vec = data_vec_.emplace_back();
+ auto& compressed_data = cached_data_.emplace_back();
+ op.new_block = new_block_start + i + j;
op.set_type(type);
if (type == kCowXorOp) {
- op.set_source((old_block + i) * header_.block_size + offset);
+ op.set_source((old_block + i + j) * header_.block_size + offset);
} else {
- op.set_source(next_data_pos_ + header_.block_size * i);
+ op.set_source(next_data_pos_ + compressed_bytes);
}
- op.data_length = header_.block_size;
+ if (compression_.algorithm == kCowCompressNone) {
+ compressed_data.resize(header_.block_size);
+ } else {
+ compressed_data = compressor_->Compress(iter, header_.block_size);
+ if (compressed_data.empty()) {
+ LOG(ERROR) << "Compression failed during EmitBlocks(" << new_block_start << ", "
+ << num_blocks << ");";
+ return false;
+ }
+ }
+ if (compressed_data.size() >= header_.block_size) {
+ compressed_data.resize(header_.block_size);
+ std::memcpy(compressed_data.data(), iter, header_.block_size);
+ }
+ vec = {.iov_base = compressed_data.data(), .iov_len = compressed_data.size()};
+ op.data_length = vec.iov_len;
+ compressed_bytes += op.data_length;
}
- return WriteOperation({ops.data(), ops.size()},
- {reinterpret_cast<const uint8_t*>(data), size});
- }
-
- const auto saved_op_count = header_.op_count;
- const auto saved_data_pos = next_data_pos_;
- for (size_t i = 0; i < num_blocks; i++) {
- const uint8_t* const iter =
- reinterpret_cast<const uint8_t*>(data) + (header_.block_size * i);
-
- CowOperation op{};
- op.new_block = new_block_start + i;
-
- op.set_type(type);
- if (type == kCowXorOp) {
- op.set_source((old_block + i) * header_.block_size + offset);
- } else {
- op.set_source(next_data_pos_);
- }
- const void* out_data = iter;
-
- op.data_length = header_.block_size;
-
- const std::basic_string<uint8_t> compressed_data =
- compressor_->Compress(out_data, header_.block_size);
- if (compressed_data.size() < op.data_length) {
- out_data = compressed_data.data();
- op.data_length = compressed_data.size();
- }
- if (!WriteOperation(op, out_data, op.data_length)) {
- PLOG(ERROR) << "AddRawBlocks with compression: write failed. new block: "
- << new_block_start << " compression: " << compression_.algorithm;
- header_.op_count = saved_op_count;
- next_data_pos_ = saved_data_pos;
+ if (NeedsFlush() && !FlushCacheOps()) {
+ LOG(ERROR) << "EmitBlocks with compression: write failed. new block: "
+ << new_block_start << " compression: " << compression_.algorithm
+ << ", op type: " << type;
return false;
}
+ i += blocks_to_write;
}
return true;
}
-bool CowWriterV3::EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) {
- std::vector<CowOperationV3> ops(num_blocks);
+bool CowWriterV3::EmitZeroBlocks(uint64_t new_block_start, const uint64_t num_blocks) {
+ if (!CheckOpCount(num_blocks)) {
+ return false;
+ }
for (uint64_t i = 0; i < num_blocks; i++) {
- CowOperationV3& op = ops[i];
+ auto& op = cached_ops_.emplace_back();
op.set_type(kCowZeroOp);
op.new_block = new_block_start + i;
}
- if (!WriteOperation({ops.data(), ops.size()}, {})) {
- return false;
+ if (NeedsFlush()) {
+ if (!FlushCacheOps()) {
+ return false;
+ }
}
return true;
}
@@ -316,6 +361,10 @@
// remove all labels greater than this current one. we want to avoid the situation of adding
// in
// duplicate labels with differing op values
+ if (!FlushCacheOps()) {
+ LOG(ERROR) << "Failed to flush cached ops before emitting label " << label;
+ return false;
+ }
auto remove_if_callback = [&](const auto& resume_point) -> bool {
if (resume_point.label >= label) return true;
return false;
@@ -344,7 +393,15 @@
bool CowWriterV3::EmitSequenceData(size_t num_ops, const uint32_t* data) {
// TODO: size sequence buffer based on options
+ if (header_.op_count > 0 || !cached_ops_.empty()) {
+ LOG(ERROR) << "There's " << header_.op_count << " operations written to disk and "
+ << cached_ops_.size()
+ << " ops cached in memory. Writing sequence data is only allowed before all "
+ "operation writes.";
+ return false;
+ }
header_.sequence_data_count = num_ops;
+ next_data_pos_ = GetDataOffset(header_);
if (!android::base::WriteFullyAtOffset(fd_, data, sizeof(data[0]) * num_ops,
GetSequenceOffset(header_))) {
PLOG(ERROR) << "writing sequence buffer failed";
@@ -353,8 +410,39 @@
return true;
}
+bool CowWriterV3::FlushCacheOps() {
+ if (cached_ops_.empty()) {
+ if (!data_vec_.empty()) {
+ LOG(ERROR) << "Cached ops is empty, but data iovec has size: " << data_vec_.size()
+ << " this is definitely a bug.";
+ return false;
+ }
+ return true;
+ }
+ size_t bytes_written = 0;
+
+ for (auto& op : cached_ops_) {
+ if (op.type() == kCowReplaceOp) {
+ op.set_source(next_data_pos_ + bytes_written);
+ }
+ bytes_written += op.data_length;
+ }
+ if (!WriteOperation({cached_ops_.data(), cached_ops_.size()},
+ {data_vec_.data(), data_vec_.size()})) {
+ LOG(ERROR) << "Failed to flush " << cached_ops_.size() << " ops to disk";
+ return false;
+ }
+ cached_ops_.clear();
+ cached_data_.clear();
+ data_vec_.clear();
+ return true;
+}
+
bool CowWriterV3::WriteOperation(std::basic_string_view<CowOperationV3> ops,
- std::basic_string_view<uint8_t> data) {
+ std::basic_string_view<struct iovec> data) {
+ const auto total_data_size =
+ std::transform_reduce(data.begin(), data.end(), 0, std::plus<size_t>{},
+ [](const struct iovec& a) { return a.iov_len; });
if (IsEstimating()) {
header_.op_count += ops.size();
if (header_.op_count > header_.op_count_max) {
@@ -363,7 +451,7 @@
next_data_pos_ += (header_.op_count - header_.op_count_max) * sizeof(CowOperationV3);
header_.op_count_max = header_.op_count;
}
- next_data_pos_ += data.size();
+ next_data_pos_ += total_data_size;
return true;
}
@@ -372,32 +460,31 @@
<< ops.size() << " ops will exceed the max of " << header_.op_count_max;
return false;
}
-
const off_t offset = GetOpOffset(header_.op_count, header_);
if (!android::base::WriteFullyAtOffset(fd_, ops.data(), ops.size() * sizeof(ops[0]), offset)) {
PLOG(ERROR) << "Write failed for " << ops.size() << " ops at " << offset;
return false;
}
if (!data.empty()) {
- if (!android::base::WriteFullyAtOffset(fd_, data.data(), data.size(), next_data_pos_)) {
+ const auto ret = pwritev(fd_, data.data(), data.size(), next_data_pos_);
+ if (ret != total_data_size) {
PLOG(ERROR) << "write failed for data of size: " << data.size()
- << " at offset: " << next_data_pos_;
+ << " at offset: " << next_data_pos_ << " " << ret;
return false;
}
}
header_.op_count += ops.size();
- next_data_pos_ += data.size();
+ next_data_pos_ += total_data_size;
return true;
}
-bool CowWriterV3::WriteOperation(const CowOperationV3& op, const void* data, size_t size) {
- return WriteOperation({&op, 1}, {reinterpret_cast<const uint8_t*>(data), size});
-}
-
bool CowWriterV3::Finalize() {
CHECK_GE(header_.prefix.header_size, sizeof(CowHeaderV3));
CHECK_LE(header_.prefix.header_size, sizeof(header_));
+ if (!FlushCacheOps()) {
+ return false;
+ }
if (!android::base::WriteFullyAtOffset(fd_, &header_, header_.prefix.header_size, 0)) {
return false;
}
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.h b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.h
index 02b4e61..3a7b877 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.h
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.h
@@ -16,6 +16,7 @@
#include <android-base/logging.h>
#include <string_view>
+#include <vector>
#include "writer_base.h"
@@ -42,17 +43,20 @@
private:
void SetupHeaders();
+ bool NeedsFlush() const;
bool ParseOptions();
bool OpenForWrite();
bool OpenForAppend(uint64_t label);
bool WriteOperation(std::basic_string_view<CowOperationV3> op,
- std::basic_string_view<uint8_t> data);
- bool WriteOperation(const CowOperationV3& op, const void* data = nullptr, size_t size = 0);
+ std::basic_string_view<struct iovec> data);
bool EmitBlocks(uint64_t new_block_start, const void* data, size_t size, uint64_t old_block,
uint16_t offset, CowOperationType type);
bool CompressBlocks(size_t num_blocks, const void* data);
+ bool CheckOpCount(size_t op_count);
private:
+ bool ReadBackVerification();
+ bool FlushCacheOps();
CowHeaderV3 header_{};
CowCompression compression_;
// in the case that we are using one thread for compression, we can store and re-use the same
@@ -63,11 +67,14 @@
std::shared_ptr<std::vector<ResumePoint>> resume_points_;
uint64_t next_data_pos_ = 0;
- std::vector<std::basic_string<uint8_t>> compressed_buf_;
// in the case that we are using one thread for compression, we can store and re-use the same
// compressor
int num_compress_threads_ = 1;
+ size_t batch_size_ = 1;
+ std::vector<CowOperationV3> cached_ops_;
+ std::vector<std::basic_string<uint8_t>> cached_data_;
+ std::vector<struct iovec> data_vec_;
};
} // namespace snapshot
diff --git a/fs_mgr/libsnapshot/snapshot.cpp b/fs_mgr/libsnapshot/snapshot.cpp
index f6a35a8..e33bdff 100644
--- a/fs_mgr/libsnapshot/snapshot.cpp
+++ b/fs_mgr/libsnapshot/snapshot.cpp
@@ -3330,7 +3330,7 @@
// Terminate stale daemon if any
std::unique_ptr<SnapuserdClient> snapuserd_client = std::move(snapuserd_client_);
if (!snapuserd_client) {
- snapuserd_client = SnapuserdClient::Connect(kSnapuserdSocket, 5s);
+ snapuserd_client = SnapuserdClient::TryConnect(kSnapuserdSocket, 5s);
}
if (snapuserd_client) {
snapuserd_client->DetachSnapuserd();
@@ -3661,7 +3661,7 @@
cow_options.compression = status.compression_algorithm();
cow_options.max_blocks = {status.device_size() / cow_options.block_size};
cow_options.batch_write = status.batched_writes();
- cow_options.num_compress_threads = status.enable_threading() ? 2 : 0;
+ cow_options.num_compress_threads = status.enable_threading() ? 2 : 1;
// TODO(b/313962438) Improve op_count estimate. For now, use number of
// blocks as an upper bound.
cow_options.op_count_max = status.device_size() / cow_options.block_size;
diff --git a/fs_mgr/libsnapshot/snapshot_test.cpp b/fs_mgr/libsnapshot/snapshot_test.cpp
index 4e6b5e1..e538d50 100644
--- a/fs_mgr/libsnapshot/snapshot_test.cpp
+++ b/fs_mgr/libsnapshot/snapshot_test.cpp
@@ -2362,8 +2362,10 @@
auto init = NewManagerForFirstStageMount("_b");
ASSERT_NE(init, nullptr);
- ASSERT_TRUE(init->EnsureSnapuserdConnected());
- init->set_use_first_stage_snapuserd(true);
+ if (snapuserd_required_) {
+ ASSERT_TRUE(init->EnsureSnapuserdConnected());
+ init->set_use_first_stage_snapuserd(true);
+ }
ASSERT_TRUE(init->NeedSnapshotsInFirstStageMount());
ASSERT_TRUE(init->CreateLogicalAndSnapshotPartitions("super", snapshot_timeout_));
@@ -2374,9 +2376,11 @@
ASSERT_TRUE(IsPartitionUnchanged(name));
}
- ASSERT_TRUE(init->PerformInitTransition(SnapshotManager::InitTransition::SECOND_STAGE));
- for (const auto& name : partitions) {
- ASSERT_TRUE(init->snapuserd_client()->WaitForDeviceDelete(name + "-user-cow-init"));
+ if (snapuserd_required_) {
+ ASSERT_TRUE(init->PerformInitTransition(SnapshotManager::InitTransition::SECOND_STAGE));
+ for (const auto& name : partitions) {
+ ASSERT_TRUE(init->snapuserd_client()->WaitForDeviceDelete(name + "-user-cow-init"));
+ }
}
// Initiate the merge and wait for it to be completed.
@@ -2860,15 +2864,23 @@
}
void KillSnapuserd() {
- auto status = android::base::GetProperty("init.svc.snapuserd", "stopped");
- if (status == "stopped") {
- return;
+ // Detach the daemon if it's alive
+ auto snapuserd_client = SnapuserdClient::TryConnect(kSnapuserdSocket, 5s);
+ if (snapuserd_client) {
+ snapuserd_client->DetachSnapuserd();
}
- auto snapuserd_client = SnapuserdClient::Connect(kSnapuserdSocket, 5s);
- if (!snapuserd_client) {
- return;
+
+ // Now stop the service - Init will send a SIGKILL to the daemon. However,
+ // process state will move from "running" to "stopping". Only after the
+ // process is reaped by init, the service state is moved to "stopped".
+ //
+ // Since the tests involve starting the daemon immediately, wait for the
+ // process to completely stop (aka. wait until init reaps the terminated
+ // process).
+ android::base::SetProperty("ctl.stop", "snapuserd");
+ if (!android::base::WaitForProperty("init.svc.snapuserd", "stopped", 10s)) {
+ LOG(ERROR) << "Timed out waiting for snapuserd to stop.";
}
- snapuserd_client->DetachSnapuserd();
}
} // namespace snapshot
diff --git a/fs_mgr/libsnapshot/snapuserd/Android.bp b/fs_mgr/libsnapshot/snapuserd/Android.bp
index 1b0c563..6b8e084 100644
--- a/fs_mgr/libsnapshot/snapuserd/Android.bp
+++ b/fs_mgr/libsnapshot/snapuserd/Android.bp
@@ -147,12 +147,6 @@
// snapuserd, which would lead to deadlock if we had to handle page
// faults for its code pages.
static_executable: true,
-
- // Snapuserd segfaults with ThinLTO
- // http://b/208565717
- lto: {
- never: true,
- },
}
cc_binary {
diff --git a/fs_mgr/libsnapshot/snapuserd/include/snapuserd/snapuserd_client.h b/fs_mgr/libsnapshot/snapuserd/include/snapuserd/snapuserd_client.h
index 010beb3..ede92dd 100644
--- a/fs_mgr/libsnapshot/snapuserd/include/snapuserd/snapuserd_client.h
+++ b/fs_mgr/libsnapshot/snapuserd/include/snapuserd/snapuserd_client.h
@@ -17,11 +17,7 @@
#include <unistd.h>
#include <chrono>
-#include <cstring>
-#include <iostream>
#include <string>
-#include <thread>
-#include <vector>
#include <android-base/unique_fd.h>
@@ -53,9 +49,14 @@
explicit SnapuserdClient(android::base::unique_fd&& sockfd);
SnapuserdClient(){};
+ // Attempt to connect to snapsuerd, wait for the daemon to start if
+ // connection failed.
static std::unique_ptr<SnapuserdClient> Connect(const std::string& socket_name,
std::chrono::milliseconds timeout_ms);
-
+ // Attempt to connect to snapsuerd, but does not wait for the daemon to
+ // start.
+ static std::unique_ptr<SnapuserdClient> TryConnect(const std::string& socket_name,
+ std::chrono::milliseconds timeout_ms);
bool StopSnapuserd();
// Initializing a snapuserd handler is a three-step process:
diff --git a/fs_mgr/libsnapshot/snapuserd/snapuserd_client.cpp b/fs_mgr/libsnapshot/snapuserd/snapuserd_client.cpp
index 3bed3a4..789c980 100644
--- a/fs_mgr/libsnapshot/snapuserd/snapuserd_client.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/snapuserd_client.cpp
@@ -27,7 +27,7 @@
#include <unistd.h>
#include <chrono>
-#include <sstream>
+#include <thread>
#include <android-base/file.h>
#include <android-base/logging.h>
@@ -64,6 +64,40 @@
return errno == ECONNREFUSED || errno == EINTR || errno == ENOENT;
}
+std::unique_ptr<SnapuserdClient> SnapuserdClient::TryConnect(const std::string& socket_name,
+ std::chrono::milliseconds timeout_ms) {
+ unique_fd fd;
+ const auto start = std::chrono::steady_clock::now();
+ while (true) {
+ fd.reset(TEMP_FAILURE_RETRY(socket_local_client(
+ socket_name.c_str(), ANDROID_SOCKET_NAMESPACE_RESERVED, SOCK_STREAM)));
+ if (fd >= 0) {
+ auto client = std::make_unique<SnapuserdClient>(std::move(fd));
+ if (!client->ValidateConnection()) {
+ return nullptr;
+ }
+ return client;
+ }
+ if (errno == ENOENT) {
+ LOG(INFO) << "Daemon socket " << socket_name
+ << " does not exist, return without waiting.";
+ return nullptr;
+ }
+ if (errno == ECONNREFUSED) {
+ const auto now = std::chrono::steady_clock::now();
+ const auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(now - start);
+ if (elapsed >= timeout_ms) {
+ LOG(ERROR) << "Timed out connecting to snapuserd socket: " << socket_name;
+ return nullptr;
+ }
+ std::this_thread::sleep_for(10ms);
+ } else {
+ PLOG(ERROR) << "connect failed: " << socket_name;
+ return nullptr;
+ }
+ }
+}
+
std::unique_ptr<SnapuserdClient> SnapuserdClient::Connect(const std::string& socket_name,
std::chrono::milliseconds timeout_ms) {
unique_fd fd;
diff --git a/healthd/BatteryMonitor.cpp b/healthd/BatteryMonitor.cpp
index 0c97632..b8bb586 100644
--- a/healthd/BatteryMonitor.cpp
+++ b/healthd/BatteryMonitor.cpp
@@ -59,6 +59,7 @@
using aidl::android::hardware::health::BatteryChargingState;
using aidl::android::hardware::health::BatteryHealth;
using aidl::android::hardware::health::BatteryHealthData;
+using aidl::android::hardware::health::BatteryPartStatus;
using aidl::android::hardware::health::BatteryStatus;
using aidl::android::hardware::health::HealthInfo;
@@ -219,6 +220,7 @@
{"Warm", BatteryHealth::GOOD},
{"Cool", BatteryHealth::GOOD},
{"Hot", BatteryHealth::OVERHEAT},
+ {"Calibration required", BatteryHealth::INCONSISTENT},
{NULL, BatteryHealth::UNKNOWN},
};
@@ -596,6 +598,9 @@
if (!mHealthdConfig->batteryStateOfHealthPath.empty())
return getIntField(mHealthdConfig->batteryStateOfHealthPath);
}
+ if (id == BATTERY_PROP_PART_STATUS) {
+ return static_cast<int>(BatteryPartStatus::UNSUPPORTED);
+ }
return 0;
}
@@ -679,6 +684,11 @@
ret = OK;
break;
+ case BATTERY_PROP_PART_STATUS:
+ val->valueInt64 = getBatteryHealthData(BATTERY_PROP_PART_STATUS);
+ ret = OK;
+ break;
+
default:
break;
}
@@ -686,6 +696,11 @@
return ret;
}
+status_t BatteryMonitor::getSerialNumber(std::optional<std::string>* out) {
+ *out = std::nullopt;
+ return OK;
+}
+
void BatteryMonitor::dumpState(int fd) {
int v;
char vs[128];
diff --git a/healthd/include/healthd/BatteryMonitor.h b/healthd/include/healthd/BatteryMonitor.h
index e9998ba..b30458d 100644
--- a/healthd/include/healthd/BatteryMonitor.h
+++ b/healthd/include/healthd/BatteryMonitor.h
@@ -18,6 +18,7 @@
#define HEALTHD_BATTERYMONITOR_H
#include <memory>
+#include <optional>
#include <batteryservice/BatteryService.h>
#include <utils/String8.h>
@@ -86,6 +87,8 @@
int getChargingPolicy();
int getBatteryHealthData(int id);
+ status_t getSerialNumber(std::optional<std::string>* out);
+
static void logValues(const android::hardware::health::V2_1::HealthInfo& health_info,
const struct healthd_config& healthd_config);
diff --git a/init/first_stage_init.cpp b/init/first_stage_init.cpp
index e48fa15..c4d0f75 100644
--- a/init/first_stage_init.cpp
+++ b/init/first_stage_init.cpp
@@ -30,6 +30,7 @@
#include <chrono>
#include <filesystem>
#include <string>
+#include <thread>
#include <vector>
#include <android-base/chrono_utils.h>
diff --git a/init/first_stage_mount.cpp b/init/first_stage_mount.cpp
index d0f68a8..c0b9281 100644
--- a/init/first_stage_mount.cpp
+++ b/init/first_stage_mount.cpp
@@ -732,6 +732,15 @@
return true;
}
+bool IsHashtreeDisabled(const AvbHandle& vbmeta, const std::string& mount_point) {
+ if (vbmeta.status() == AvbHandleStatus::kHashtreeDisabled ||
+ vbmeta.status() == AvbHandleStatus::kVerificationDisabled) {
+ LOG(ERROR) << "Top-level vbmeta is disabled, skip Hashtree setup for " << mount_point;
+ return true; // Returns true to mount the partition directly.
+ }
+ return false;
+}
+
bool FirstStageMountVBootV2::SetUpDmVerity(FstabEntry* fstab_entry) {
AvbHashtreeResult hashtree_result;
@@ -740,34 +749,46 @@
if (!fstab_entry->avb_keys.empty()) {
if (!InitAvbHandle()) return false;
// Checks if hashtree should be disabled from the top-level /vbmeta.
- if (avb_handle_->status() == AvbHandleStatus::kHashtreeDisabled ||
- avb_handle_->status() == AvbHandleStatus::kVerificationDisabled) {
- LOG(ERROR) << "Top-level vbmeta is disabled, skip Hashtree setup for "
- << fstab_entry->mount_point;
- return true; // Returns true to mount the partition directly.
+ if (IsHashtreeDisabled(*avb_handle_, fstab_entry->mount_point)) {
+ return true;
+ }
+ auto avb_standalone_handle = AvbHandle::LoadAndVerifyVbmeta(
+ *fstab_entry, preload_avb_key_blobs_[fstab_entry->avb_keys]);
+ if (!avb_standalone_handle) {
+ LOG(ERROR) << "Failed to load offline vbmeta for " << fstab_entry->mount_point;
+ // Fallbacks to built-in hashtree if fs_mgr_flags.avb is set.
+ if (!fstab_entry->fs_mgr_flags.avb) return false;
+ LOG(INFO) << "Fallback to built-in hashtree for " << fstab_entry->mount_point;
+ hashtree_result =
+ avb_handle_->SetUpAvbHashtree(fstab_entry, false /* wait_for_verity_dev */);
} else {
- auto avb_standalone_handle = AvbHandle::LoadAndVerifyVbmeta(
- *fstab_entry, preload_avb_key_blobs_[fstab_entry->avb_keys]);
- if (!avb_standalone_handle) {
- LOG(ERROR) << "Failed to load offline vbmeta for " << fstab_entry->mount_point;
- // Fallbacks to built-in hashtree if fs_mgr_flags.avb is set.
- if (!fstab_entry->fs_mgr_flags.avb) return false;
- LOG(INFO) << "Fallback to built-in hashtree for " << fstab_entry->mount_point;
- hashtree_result =
- avb_handle_->SetUpAvbHashtree(fstab_entry, false /* wait_for_verity_dev */);
- } else {
- // Sets up hashtree via the standalone handle.
- if (IsStandaloneImageRollback(*avb_handle_, *avb_standalone_handle, *fstab_entry)) {
- return false;
- }
- hashtree_result = avb_standalone_handle->SetUpAvbHashtree(
- fstab_entry, false /* wait_for_verity_dev */);
+ // Sets up hashtree via the standalone handle.
+ if (IsStandaloneImageRollback(*avb_handle_, *avb_standalone_handle, *fstab_entry)) {
+ return false;
}
+ hashtree_result = avb_standalone_handle->SetUpAvbHashtree(
+ fstab_entry, false /* wait_for_verity_dev */);
}
} else if (fstab_entry->fs_mgr_flags.avb) {
if (!InitAvbHandle()) return false;
hashtree_result =
avb_handle_->SetUpAvbHashtree(fstab_entry, false /* wait_for_verity_dev */);
+ } else if (!fstab_entry->avb_hashtree_digest.empty()) {
+ // When fstab_entry has neither avb_keys nor avb flag, try using
+ // avb_hashtree_digest.
+ if (!InitAvbHandle()) return false;
+ // Checks if hashtree should be disabled from the top-level /vbmeta.
+ if (IsHashtreeDisabled(*avb_handle_, fstab_entry->mount_point)) {
+ return true;
+ }
+ auto avb_standalone_handle = AvbHandle::LoadAndVerifyVbmeta(*fstab_entry);
+ if (!avb_standalone_handle) {
+ LOG(ERROR) << "Failed to load vbmeta based on hashtree descriptor root digest for "
+ << fstab_entry->mount_point;
+ return false;
+ }
+ hashtree_result = avb_standalone_handle->SetUpAvbHashtree(fstab_entry,
+ false /* wait_for_verity_dev */);
} else {
return true; // No need AVB, returns true to mount the partition directly.
}
diff --git a/init/snapuserd_transition.cpp b/init/snapuserd_transition.cpp
index 3a9ff5b..3a78343 100644
--- a/init/snapuserd_transition.cpp
+++ b/init/snapuserd_transition.cpp
@@ -25,6 +25,7 @@
#include <filesystem>
#include <string>
#include <string_view>
+#include <thread>
#include <android-base/file.h>
#include <android-base/logging.h>
diff --git a/libprocessgroup/include/processgroup/processgroup.h b/libprocessgroup/include/processgroup/processgroup.h
index 9107838..ca6868c 100644
--- a/libprocessgroup/include/processgroup/processgroup.h
+++ b/libprocessgroup/include/processgroup/processgroup.h
@@ -65,9 +65,8 @@
// should be active again. E.g. Zygote specialization for child process.
void DropTaskProfilesResourceCaching();
-// Return 0 and removes the cgroup if there are no longer any processes in it.
-// Returns -1 in the case of an error occurring or if there are processes still running
-// even after retrying for up to 200ms.
+// Return 0 if all processes were killed and the cgroup was successfully removed.
+// Returns -1 in the case of an error occurring or if there are processes still running.
int killProcessGroup(uid_t uid, int initialPid, int signal);
// Returns the same as killProcessGroup(), however it does not retry, which means
@@ -76,8 +75,9 @@
// Sends the provided signal to all members of a process group, but does not wait for processes to
// exit, or for the cgroup to be removed. Callers should also ensure that killProcessGroup is called
-// later to ensure the cgroup is fully removed, otherwise system resources may leak.
-int sendSignalToProcessGroup(uid_t uid, int initialPid, int signal);
+// later to ensure the cgroup is fully removed, otherwise system resources will leak.
+// Returns true if no errors are encountered sending signals, otherwise false.
+bool sendSignalToProcessGroup(uid_t uid, int initialPid, int signal);
int createProcessGroup(uid_t uid, int initialPid, bool memControl = false);
diff --git a/libprocessgroup/processgroup.cpp b/libprocessgroup/processgroup.cpp
index f594f7f..3209adf 100644
--- a/libprocessgroup/processgroup.cpp
+++ b/libprocessgroup/processgroup.cpp
@@ -22,6 +22,7 @@
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
+#include <poll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@@ -30,6 +31,7 @@
#include <unistd.h>
#include <chrono>
+#include <cstring>
#include <map>
#include <memory>
#include <mutex>
@@ -53,7 +55,9 @@
using namespace std::chrono_literals;
-#define PROCESSGROUP_CGROUP_PROCS_FILE "/cgroup.procs"
+#define PROCESSGROUP_CGROUP_PROCS_FILE "cgroup.procs"
+#define PROCESSGROUP_CGROUP_KILL_FILE "cgroup.kill"
+#define PROCESSGROUP_CGROUP_EVENTS_FILE "cgroup.events"
bool CgroupsAvailable() {
static bool cgroups_available = access("/proc/cgroups", F_OK) == 0;
@@ -74,6 +78,29 @@
return true;
}
+static std::string ConvertUidToPath(const char* cgroup, uid_t uid) {
+ return StringPrintf("%s/uid_%u", cgroup, uid);
+}
+
+static std::string ConvertUidPidToPath(const char* cgroup, uid_t uid, int pid) {
+ return StringPrintf("%s/uid_%u/pid_%d", cgroup, uid, pid);
+}
+
+static bool CgroupKillAvailable() {
+ static std::once_flag f;
+ static bool cgroup_kill_available = false;
+ std::call_once(f, []() {
+ std::string cg_kill;
+ CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &cg_kill);
+ // cgroup.kill is not on the root cgroup, so check a non-root cgroup that should always
+ // exist
+ cg_kill = ConvertUidToPath(cg_kill.c_str(), AID_ROOT) + '/' + PROCESSGROUP_CGROUP_KILL_FILE;
+ cgroup_kill_available = access(cg_kill.c_str(), F_OK) == 0;
+ });
+
+ return cgroup_kill_available;
+}
+
static bool CgroupGetMemcgAppsPath(std::string* path) {
CgroupController controller = CgroupMap::GetInstance().FindController("memory");
@@ -205,38 +232,21 @@
false);
}
-static std::string ConvertUidToPath(const char* cgroup, uid_t uid) {
- return StringPrintf("%s/uid_%u", cgroup, uid);
-}
-
-static std::string ConvertUidPidToPath(const char* cgroup, uid_t uid, int pid) {
- return StringPrintf("%s/uid_%u/pid_%d", cgroup, uid, pid);
-}
-
-static int RemoveCgroup(const char* cgroup, uid_t uid, int pid, unsigned int retries) {
- int ret = 0;
- auto uid_pid_path = ConvertUidPidToPath(cgroup, uid, pid);
-
- while (retries--) {
- ret = rmdir(uid_pid_path.c_str());
- // If we get an error 2 'No such file or directory' , that means the
- // cgroup is already removed, treat it as success and return 0 for
- // idempotency.
- if (ret < 0 && errno == ENOENT) {
- ret = 0;
- }
- if (!ret || errno != EBUSY || !retries) break;
- std::this_thread::sleep_for(5ms);
- }
+static int RemoveCgroup(const char* cgroup, uid_t uid, int pid) {
+ auto path = ConvertUidPidToPath(cgroup, uid, pid);
+ int ret = TEMP_FAILURE_RETRY(rmdir(path.c_str()));
if (!ret && uid >= AID_ISOLATED_START && uid <= AID_ISOLATED_END) {
// Isolated UIDs are unlikely to be reused soon after removal,
// so free up the kernel resources for the UID level cgroup.
- const auto uid_path = ConvertUidToPath(cgroup, uid);
- ret = rmdir(uid_path.c_str());
- if (ret < 0 && errno == ENOENT) {
- ret = 0;
- }
+ path = ConvertUidToPath(cgroup, uid);
+ ret = TEMP_FAILURE_RETRY(rmdir(path.c_str()));
+ }
+
+ if (ret < 0 && errno == ENOENT) {
+ // This function is idempoetent, but still warn here.
+ LOG(WARNING) << "RemoveCgroup: " << path << " does not exist.";
+ ret = 0;
}
return ret;
@@ -360,38 +370,55 @@
return false;
}
-// Returns number of processes killed on success
-// Returns 0 if there are no processes in the process cgroup left to kill
-// Returns -1 on error
-static int DoKillProcessGroupOnce(const char* cgroup, uid_t uid, int initialPid, int signal) {
- // We separate all of the pids in the cgroup into those pids that are also the leaders of
- // process groups (stored in the pgids set) and those that are not (stored in the pids set).
- std::set<pid_t> pgids;
- pgids.emplace(initialPid);
- std::set<pid_t> pids;
- int processes = 0;
-
- std::unique_ptr<FILE, decltype(&fclose)> fd(nullptr, fclose);
+bool sendSignalToProcessGroup(uid_t uid, int initialPid, int signal) {
+ std::set<pid_t> pgids, pids;
if (CgroupsAvailable()) {
- auto path = ConvertUidPidToPath(cgroup, uid, initialPid) + PROCESSGROUP_CGROUP_PROCS_FILE;
- fd.reset(fopen(path.c_str(), "re"));
- if (!fd) {
- if (errno == ENOENT) {
- // This happens when the process is already dead or if, as the result of a bug, it
- // has been migrated to another cgroup. An example of a bug that can cause migration
- // to another cgroup is using the JoinCgroup action with a cgroup controller that
- // has been activated in the v2 cgroup hierarchy.
- goto kill;
+ std::string hierarchy_root_path, cgroup_v2_path;
+ CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &hierarchy_root_path);
+ cgroup_v2_path = ConvertUidPidToPath(hierarchy_root_path.c_str(), uid, initialPid);
+
+ if (signal == SIGKILL && CgroupKillAvailable()) {
+ LOG(VERBOSE) << "Using " << PROCESSGROUP_CGROUP_KILL_FILE << " to SIGKILL "
+ << cgroup_v2_path;
+
+ // We need to kill the process group in addition to the cgroup. For normal apps they
+ // should completely overlap, but system_server kills depend on process group kills to
+ // take down apps which are in their own cgroups and not individually targeted.
+ if (kill(-initialPid, signal) == -1 && errno != ESRCH) {
+ PLOG(WARNING) << "kill(" << -initialPid << ", " << signal << ") failed";
}
- PLOG(WARNING) << __func__ << " failed to open process cgroup uid " << uid << " pid "
- << initialPid;
- return -1;
+
+ const std::string killfilepath = cgroup_v2_path + '/' + PROCESSGROUP_CGROUP_KILL_FILE;
+ if (WriteStringToFile("1", killfilepath)) {
+ return true;
+ } else {
+ PLOG(ERROR) << "Failed to write 1 to " << killfilepath;
+ // Fallback to cgroup.procs below
+ }
}
+
+ // Since cgroup.kill only sends SIGKILLs, we read cgroup.procs to find each process to
+ // signal individually. This is more costly than using cgroup.kill for SIGKILLs.
+ LOG(VERBOSE) << "Using " << PROCESSGROUP_CGROUP_PROCS_FILE << " to signal (" << signal
+ << ") " << cgroup_v2_path;
+
+ // We separate all of the pids in the cgroup into those pids that are also the leaders of
+ // process groups (stored in the pgids set) and those that are not (stored in the pids set).
+ const auto procsfilepath = cgroup_v2_path + '/' + PROCESSGROUP_CGROUP_PROCS_FILE;
+ std::unique_ptr<FILE, decltype(&fclose)> fp(fopen(procsfilepath.c_str(), "re"), fclose);
+ if (!fp) {
+ // This should only happen if the cgroup has already been removed with a successful call
+ // to killProcessGroup. Callers should only retry sendSignalToProcessGroup or
+ // killProcessGroup calls if they fail without ENOENT.
+ PLOG(ERROR) << "Failed to open " << procsfilepath;
+ kill(-initialPid, signal);
+ return false;
+ }
+
pid_t pid;
bool file_is_empty = true;
- while (fscanf(fd.get(), "%d\n", &pid) == 1 && pid >= 0) {
- processes++;
+ while (fscanf(fp.get(), "%d\n", &pid) == 1 && pid >= 0) {
file_is_empty = false;
if (pid == 0) {
// Should never happen... but if it does, trying to kill this
@@ -421,7 +448,8 @@
}
}
-kill:
+ pgids.emplace(initialPid);
+
// Kill all process groups.
for (const auto pgid : pgids) {
LOG(VERBOSE) << "Killing process group " << -pgid << " in uid " << uid
@@ -442,101 +470,174 @@
}
}
- return (!fd || feof(fd.get())) ? processes : -1;
+ return true;
}
-static int KillProcessGroup(uid_t uid, int initialPid, int signal, int retries) {
+template <typename T>
+static std::chrono::milliseconds toMillisec(T&& duration) {
+ return std::chrono::duration_cast<std::chrono::milliseconds>(duration);
+}
+
+enum class populated_status
+{
+ populated,
+ not_populated,
+ error
+};
+
+static populated_status cgroupIsPopulated(int events_fd) {
+ const std::string POPULATED_KEY("populated ");
+ const std::string::size_type MAX_EVENTS_FILE_SIZE = 32;
+
+ std::string buf;
+ buf.resize(MAX_EVENTS_FILE_SIZE);
+ ssize_t len = TEMP_FAILURE_RETRY(pread(events_fd, buf.data(), buf.size(), 0));
+ if (len == -1) {
+ PLOG(ERROR) << "Could not read cgroup.events: ";
+ // Potentially ENODEV if the cgroup has been removed since we opened this file, but that
+ // shouldn't have happened yet.
+ return populated_status::error;
+ }
+
+ if (len == 0) {
+ LOG(ERROR) << "cgroup.events EOF";
+ return populated_status::error;
+ }
+
+ buf.resize(len);
+
+ const std::string::size_type pos = buf.find(POPULATED_KEY);
+ if (pos == std::string::npos) {
+ LOG(ERROR) << "Could not find populated key in cgroup.events";
+ return populated_status::error;
+ }
+
+ if (pos + POPULATED_KEY.size() + 1 > len) {
+ LOG(ERROR) << "Partial read of cgroup.events";
+ return populated_status::error;
+ }
+
+ return buf[pos + POPULATED_KEY.size()] == '1' ?
+ populated_status::populated : populated_status::not_populated;
+}
+
+// The default timeout of 2200ms comes from the default number of retries in a previous
+// implementation of this function. The default retry value was 40 for killing and 400 for cgroup
+// removal with 5ms sleeps between each retry.
+static int KillProcessGroup(
+ uid_t uid, int initialPid, int signal, bool once = false,
+ std::chrono::steady_clock::time_point until = std::chrono::steady_clock::now() + 2200ms) {
CHECK_GE(uid, 0);
CHECK_GT(initialPid, 0);
+ // Always attempt to send a kill signal to at least the initialPid, at least once, regardless of
+ // whether its cgroup exists or not. This should only be necessary if a bug results in the
+ // migration of the targeted process out of its cgroup, which we will also attempt to kill.
+ const bool signal_ret = sendSignalToProcessGroup(uid, initialPid, signal);
+
+ if (!CgroupsAvailable() || !signal_ret) return signal_ret ? 0 : -1;
+
std::string hierarchy_root_path;
- if (CgroupsAvailable()) {
- CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &hierarchy_root_path);
- }
- const char* cgroup = hierarchy_root_path.c_str();
+ CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &hierarchy_root_path);
- std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
+ const std::string cgroup_v2_path =
+ ConvertUidPidToPath(hierarchy_root_path.c_str(), uid, initialPid);
- int retry = retries;
- int processes;
- while ((processes = DoKillProcessGroupOnce(cgroup, uid, initialPid, signal)) > 0) {
- LOG(VERBOSE) << "Killed " << processes << " processes for processgroup " << initialPid;
- if (!CgroupsAvailable()) {
- // makes no sense to retry, because there are no cgroup_procs file
- processes = 0; // no remaining processes
- break;
- }
- if (retry > 0) {
- std::this_thread::sleep_for(5ms);
- --retry;
- } else {
- break;
- }
- }
-
- if (processes < 0) {
- PLOG(ERROR) << "Error encountered killing process cgroup uid " << uid << " pid "
- << initialPid;
+ const std::string eventsfile = cgroup_v2_path + '/' + PROCESSGROUP_CGROUP_EVENTS_FILE;
+ android::base::unique_fd events_fd(open(eventsfile.c_str(), O_RDONLY));
+ if (events_fd.get() == -1) {
+ PLOG(WARNING) << "Error opening " << eventsfile << " for KillProcessGroup";
return -1;
}
- std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
- auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
+ struct pollfd fds = {
+ .fd = events_fd,
+ .events = POLLPRI,
+ };
- // We only calculate the number of 'processes' when killing the processes.
- // In the retries == 0 case, we only kill the processes once and therefore
- // will not have waited then recalculated how many processes are remaining
- // after the first signals have been sent.
- // Logging anything regarding the number of 'processes' here does not make sense.
+ const std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
- if (processes == 0) {
- if (retries > 0) {
- LOG(INFO) << "Successfully killed process cgroup uid " << uid << " pid " << initialPid
- << " in " << static_cast<int>(ms) << "ms";
+ // The primary reason to loop here is to capture any new forks or migrations that could occur
+ // after we send signals to the original set of processes, but before all of those processes
+ // exit and the cgroup becomes unpopulated, or before we remove the cgroup. We try hard to
+ // ensure this completes successfully to avoid permanent memory leaks, but we still place a
+ // large default upper bound on the amount of time we spend in this loop. The amount of CPU
+ // contention, and the amount of work that needs to be done in do_exit for each process
+ // determines how long this will take.
+ int ret;
+ do {
+ populated_status populated;
+ while ((populated = cgroupIsPopulated(events_fd.get())) == populated_status::populated &&
+ std::chrono::steady_clock::now() < until) {
+
+ sendSignalToProcessGroup(uid, initialPid, signal);
+ if (once) {
+ populated = cgroupIsPopulated(events_fd.get());
+ break;
+ }
+
+ const std::chrono::steady_clock::time_point poll_start =
+ std::chrono::steady_clock::now();
+
+ if (poll_start < until)
+ ret = TEMP_FAILURE_RETRY(poll(&fds, 1, toMillisec(until - poll_start).count()));
+
+ if (ret == -1) {
+ // Fallback to 5ms sleeps if poll fails
+ PLOG(ERROR) << "Poll on " << eventsfile << "failed";
+ const std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now();
+ if (now < until)
+ std::this_thread::sleep_for(std::min(5ms, toMillisec(until - now)));
+ }
+
+ LOG(VERBOSE) << "Waited "
+ << toMillisec(std::chrono::steady_clock::now() - poll_start).count()
+ << " ms for " << eventsfile << " poll";
}
- if (!CgroupsAvailable()) {
- // nothing to do here, if cgroups isn't available
- return 0;
+ const std::chrono::milliseconds kill_duration =
+ toMillisec(std::chrono::steady_clock::now() - start);
+
+ if (populated == populated_status::populated) {
+ LOG(WARNING) << "Still waiting on process(es) to exit for cgroup " << cgroup_v2_path
+ << " after " << kill_duration.count() << " ms";
+ // We'll still try the cgroup removal below which we expect to log an error.
+ } else if (populated == populated_status::not_populated) {
+ LOG(VERBOSE) << "Killed all processes under cgroup " << cgroup_v2_path
+ << " after " << kill_duration.count() << " ms";
}
- // 400 retries correspond to 2 secs max timeout
- int err = RemoveCgroup(cgroup, uid, initialPid, 400);
+ ret = RemoveCgroup(hierarchy_root_path.c_str(), uid, initialPid);
+ if (ret)
+ PLOG(ERROR) << "Unable to remove cgroup " << cgroup_v2_path;
+ else
+ LOG(INFO) << "Removed cgroup " << cgroup_v2_path;
if (isMemoryCgroupSupported() && UsePerAppMemcg()) {
+ // This per-application memcg v1 case should eventually be removed after migration to
+ // memcg v2.
std::string memcg_apps_path;
if (CgroupGetMemcgAppsPath(&memcg_apps_path) &&
- RemoveCgroup(memcg_apps_path.c_str(), uid, initialPid, 400) < 0) {
- return -1;
+ (ret = RemoveCgroup(memcg_apps_path.c_str(), uid, initialPid)) < 0) {
+ const auto memcg_v1_cgroup_path =
+ ConvertUidPidToPath(memcg_apps_path.c_str(), uid, initialPid);
+ PLOG(ERROR) << "Unable to remove memcg v1 cgroup " << memcg_v1_cgroup_path;
}
}
- return err;
- } else {
- if (retries > 0) {
- LOG(ERROR) << "Failed to kill process cgroup uid " << uid << " pid " << initialPid
- << " in " << static_cast<int>(ms) << "ms, " << processes
- << " processes remain";
- }
- return -1;
- }
+ if (once) break;
+ if (std::chrono::steady_clock::now() >= until) break;
+ } while (ret && errno == EBUSY);
+
+ return ret;
}
int killProcessGroup(uid_t uid, int initialPid, int signal) {
- return KillProcessGroup(uid, initialPid, signal, 40 /*retries*/);
+ return KillProcessGroup(uid, initialPid, signal);
}
int killProcessGroupOnce(uid_t uid, int initialPid, int signal) {
- return KillProcessGroup(uid, initialPid, signal, 0 /*retries*/);
-}
-
-int sendSignalToProcessGroup(uid_t uid, int initialPid, int signal) {
- std::string hierarchy_root_path;
- if (CgroupsAvailable()) {
- CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &hierarchy_root_path);
- }
- const char* cgroup = hierarchy_root_path.c_str();
- return DoKillProcessGroupOnce(cgroup, uid, initialPid, signal);
+ return KillProcessGroup(uid, initialPid, signal, true);
}
static int createProcessGroupInternal(uid_t uid, int initialPid, std::string cgroup,
@@ -576,7 +677,7 @@
return -errno;
}
- auto uid_pid_procs_file = uid_pid_path + PROCESSGROUP_CGROUP_PROCS_FILE;
+ auto uid_pid_procs_file = uid_pid_path + '/' + PROCESSGROUP_CGROUP_PROCS_FILE;
if (!WriteStringToFile(std::to_string(initialPid), uid_pid_procs_file)) {
ret = -errno;
diff --git a/libvndksupport/include/vndksupport/linker.h b/libvndksupport/include/vndksupport/linker.h
index 5f48c39..6845135 100644
--- a/libvndksupport/include/vndksupport/linker.h
+++ b/libvndksupport/include/vndksupport/linker.h
@@ -20,15 +20,8 @@
extern "C" {
#endif
-/*
- * Returns whether the current process is a vendor process.
- *
- * Note that this is only checking what process is running and has nothing to
- * do with what namespace the caller is loaded at. For example, a VNDK-SP
- * library loaded by SP-HAL calling this function may still get a 'false',
- * because it is running in a system process.
- */
-int android_is_in_vendor_process();
+int android_is_in_vendor_process() __attribute__((
+ deprecated("This function would not give exact result if VNDK is deprecated.")));
void* android_load_sphal_library(const char* name, int flag);
diff --git a/libvndksupport/libvndksupport.map.txt b/libvndksupport/libvndksupport.map.txt
index 1d94b9d..325505d 100644
--- a/libvndksupport/libvndksupport.map.txt
+++ b/libvndksupport/libvndksupport.map.txt
@@ -1,6 +1,6 @@
LIBVNDKSUPPORT {
global:
- android_is_in_vendor_process; # llndk systemapi
+ android_is_in_vendor_process; # llndk-deprecated=35 systemapi
android_load_sphal_library; # llndk systemapi
android_unload_sphal_library; # llndk systemapi
local:
diff --git a/rootdir/Android.mk b/rootdir/Android.mk
index 7deb173..7444f96 100644
--- a/rootdir/Android.mk
+++ b/rootdir/Android.mk
@@ -97,7 +97,7 @@
# create some directories (some are mount points) and symlinks
LOCAL_POST_INSTALL_CMD := mkdir -p $(addprefix $(TARGET_ROOT_OUT)/, \
dev proc sys system data data_mirror odm oem acct config storage mnt apex bootstrap-apex debug_ramdisk \
- linkerconfig second_stage_resources postinstall $(BOARD_ROOT_EXTRA_FOLDERS)); \
+ linkerconfig second_stage_resources postinstall tmp $(BOARD_ROOT_EXTRA_FOLDERS)); \
ln -sf /system/bin $(TARGET_ROOT_OUT)/bin; \
ln -sf /system/etc $(TARGET_ROOT_OUT)/etc; \
ln -sf /data/user_de/0/com.android.shell/files/bugreports $(TARGET_ROOT_OUT)/bugreports; \
diff --git a/rootdir/init.rc b/rootdir/init.rc
index fb64736..12c46eb 100644
--- a/rootdir/init.rc
+++ b/rootdir/init.rc
@@ -92,6 +92,12 @@
# checker programs.
mkdir /dev/fscklogs 0770 root system
+ # Create tmpfs for use by the shell user.
+ mount tmpfs tmpfs /tmp
+ restorecon /tmp
+ chown shell shell /tmp
+ chmod 0771 /tmp
+
on init
sysclktz 0
diff --git a/trusty/storage/proxy/storage.c b/trusty/storage/proxy/storage.c
index 2299481..8c8edb7 100644
--- a/trusty/storage/proxy/storage.c
+++ b/trusty/storage/proxy/storage.c
@@ -353,7 +353,6 @@
if (open_flags & O_CREAT) {
sync_parent(path, watcher);
}
- free(path);
/* at this point rc contains storage file fd */
msg->result = STORAGE_NO_ERROR;
@@ -361,6 +360,9 @@
ALOGV("%s: \"%s\": fd = %u: handle = %d\n",
__func__, path, rc, resp.handle);
+ free(path);
+ path = NULL;
+
/* a backing file has been opened, notify any waiting init steps */
if (!fs_ready_initialized) {
rc = property_set(FS_READY_PROPERTY, "1");