Merge "trusty: storageproxyd: fix logging of freed path pointer" into main
diff --git a/debuggerd/crash_dump.cpp b/debuggerd/crash_dump.cpp
index 3563436..0899111 100644
--- a/debuggerd/crash_dump.cpp
+++ b/debuggerd/crash_dump.cpp
@@ -317,6 +317,7 @@
process_info->gwp_asan_state = crash_info->data.d.gwp_asan_state;
process_info->gwp_asan_metadata = crash_info->data.d.gwp_asan_metadata;
process_info->scudo_stack_depot = crash_info->data.d.scudo_stack_depot;
+ process_info->scudo_stack_depot_size = crash_info->data.d.scudo_stack_depot_size;
process_info->scudo_region_info = crash_info->data.d.scudo_region_info;
process_info->scudo_ring_buffer = crash_info->data.d.scudo_ring_buffer;
process_info->scudo_ring_buffer_size = crash_info->data.d.scudo_ring_buffer_size;
diff --git a/debuggerd/handler/debuggerd_handler.cpp b/debuggerd/handler/debuggerd_handler.cpp
index 01365f2..ea07ce2 100644
--- a/debuggerd/handler/debuggerd_handler.cpp
+++ b/debuggerd/handler/debuggerd_handler.cpp
@@ -395,6 +395,7 @@
ASSERT_SAME_OFFSET(scudo_region_info, scudo_region_info);
ASSERT_SAME_OFFSET(scudo_ring_buffer, scudo_ring_buffer);
ASSERT_SAME_OFFSET(scudo_ring_buffer_size, scudo_ring_buffer_size);
+ ASSERT_SAME_OFFSET(scudo_stack_depot_size, scudo_stack_depot_size);
ASSERT_SAME_OFFSET(recoverable_gwp_asan_crash, recoverable_gwp_asan_crash);
#undef ASSERT_SAME_OFFSET
diff --git a/debuggerd/include/debuggerd/handler.h b/debuggerd/include/debuggerd/handler.h
index ebb5372..de12fc6 100644
--- a/debuggerd/include/debuggerd/handler.h
+++ b/debuggerd/include/debuggerd/handler.h
@@ -44,6 +44,7 @@
const char* scudo_region_info;
const char* scudo_ring_buffer;
size_t scudo_ring_buffer_size;
+ size_t scudo_stack_depot_size;
bool recoverable_gwp_asan_crash;
};
diff --git a/debuggerd/libdebuggerd/include/libdebuggerd/types.h b/debuggerd/libdebuggerd/include/libdebuggerd/types.h
index 5a2a7ab..075b12c 100644
--- a/debuggerd/libdebuggerd/include/libdebuggerd/types.h
+++ b/debuggerd/libdebuggerd/include/libdebuggerd/types.h
@@ -51,6 +51,7 @@
uintptr_t scudo_region_info = 0;
uintptr_t scudo_ring_buffer = 0;
size_t scudo_ring_buffer_size = 0;
+ size_t scudo_stack_depot_size = 0;
bool has_fault_address = false;
uintptr_t untagged_fault_address = 0;
diff --git a/debuggerd/libdebuggerd/scudo.cpp b/debuggerd/libdebuggerd/scudo.cpp
index ea8dff4..3fa3bd0 100644
--- a/debuggerd/libdebuggerd/scudo.cpp
+++ b/debuggerd/libdebuggerd/scudo.cpp
@@ -41,8 +41,6 @@
return;
}
- auto stack_depot = AllocAndReadFully(process_memory, process_info.scudo_stack_depot,
- __scudo_get_stack_depot_size());
auto region_info = AllocAndReadFully(process_memory, process_info.scudo_region_info,
__scudo_get_region_info_size());
std::unique_ptr<char[]> ring_buffer;
@@ -50,7 +48,12 @@
ring_buffer = AllocAndReadFully(process_memory, process_info.scudo_ring_buffer,
process_info.scudo_ring_buffer_size);
}
- if (!stack_depot || !region_info) {
+ std::unique_ptr<char[]> stack_depot;
+ if (process_info.scudo_stack_depot_size != 0) {
+ stack_depot = AllocAndReadFully(process_memory, process_info.scudo_stack_depot,
+ process_info.scudo_stack_depot_size);
+ }
+ if (!region_info) {
return;
}
@@ -78,7 +81,7 @@
}
__scudo_get_error_info(&error_info_, process_info.maybe_tagged_fault_address, stack_depot.get(),
- __scudo_get_stack_depot_size(), region_info.get(), ring_buffer.get(),
+ process_info.scudo_stack_depot_size, region_info.get(), ring_buffer.get(),
process_info.scudo_ring_buffer_size, memory.get(), memory_tags.get(),
memory_begin, memory_end - memory_begin);
}
diff --git a/debuggerd/protocol.h b/debuggerd/protocol.h
index 212d6dc..793428a 100644
--- a/debuggerd/protocol.h
+++ b/debuggerd/protocol.h
@@ -99,6 +99,7 @@
uintptr_t scudo_region_info;
uintptr_t scudo_ring_buffer;
size_t scudo_ring_buffer_size;
+ size_t scudo_stack_depot_size;
bool recoverable_gwp_asan_crash;
};
diff --git a/fastboot/Android.bp b/fastboot/Android.bp
index f85d1de..c0445f3 100644
--- a/fastboot/Android.bp
+++ b/fastboot/Android.bp
@@ -170,7 +170,7 @@
"android.hardware.fastboot@1.1",
"android.hardware.fastboot-V1-ndk",
"android.hardware.health@2.0",
- "android.hardware.health-V2-ndk",
+ "android.hardware.health-V3-ndk",
"libasyncio",
"libbase",
"libbinder_ndk",
diff --git a/fastboot/constants.h b/fastboot/constants.h
index a803307..af4d1eb 100644
--- a/fastboot/constants.h
+++ b/fastboot/constants.h
@@ -82,3 +82,5 @@
#define FB_VAR_TREBLE_ENABLED "treble-enabled"
#define FB_VAR_MAX_FETCH_SIZE "max-fetch-size"
#define FB_VAR_DMESG "dmesg"
+#define FB_VAR_BATTERY_SERIAL_NUMBER "battery-serial-number"
+#define FB_VAR_BATTERY_PART_STATUS "battery-part-status"
diff --git a/fastboot/device/commands.cpp b/fastboot/device/commands.cpp
index bd936ae..e522f4d 100644
--- a/fastboot/device/commands.cpp
+++ b/fastboot/device/commands.cpp
@@ -147,6 +147,8 @@
{FB_VAR_SECURITY_PATCH_LEVEL, {GetSecurityPatchLevel, nullptr}},
{FB_VAR_TREBLE_ENABLED, {GetTrebleEnabled, nullptr}},
{FB_VAR_MAX_FETCH_SIZE, {GetMaxFetchSize, nullptr}},
+ {FB_VAR_BATTERY_SERIAL_NUMBER, {GetBatterySerialNumber, nullptr}},
+ {FB_VAR_BATTERY_PART_STATUS, {GetBatteryPartStatus, nullptr}},
};
static bool GetVarAll(FastbootDevice* device) {
diff --git a/fastboot/device/variables.cpp b/fastboot/device/variables.cpp
index 2847e35..77210ab 100644
--- a/fastboot/device/variables.cpp
+++ b/fastboot/device/variables.cpp
@@ -570,3 +570,79 @@
return true;
}
+
+bool GetBatterySerialNumber(FastbootDevice* device, const std::vector<std::string>&,
+ std::string* message) {
+ auto health_hal = device->health_hal();
+ if (!health_hal) {
+ return false;
+ }
+
+ if (GetDeviceLockStatus()) {
+ return device->WriteFail("Device is locked");
+ }
+
+ *message = "unsupported";
+
+ int32_t version = 0;
+ auto res = health_hal->getInterfaceVersion(&version);
+ if (!res.isOk()) {
+ return device->WriteFail("Unable to query battery data");
+ }
+ if (version >= 3) {
+ using aidl::android::hardware::health::BatteryHealthData;
+
+ BatteryHealthData data;
+ auto res = health_hal->getBatteryHealthData(&data);
+ if (!res.isOk()) {
+ return device->WriteFail("Unable to query battery data");
+ }
+ if (data.batterySerialNumber) {
+ *message = *data.batterySerialNumber;
+ }
+ }
+ return true;
+}
+
+bool GetBatteryPartStatus(FastbootDevice* device, const std::vector<std::string>&,
+ std::string* message) {
+ auto health_hal = device->health_hal();
+ if (!health_hal) {
+ return false;
+ }
+
+ using aidl::android::hardware::health::BatteryPartStatus;
+
+ BatteryPartStatus status = BatteryPartStatus::UNSUPPORTED;
+
+ int32_t version = 0;
+ auto res = health_hal->getInterfaceVersion(&version);
+ if (!res.isOk()) {
+ return device->WriteFail("Unable to query battery data");
+ }
+ if (version >= 3) {
+ using aidl::android::hardware::health::BatteryHealthData;
+
+ BatteryHealthData data;
+ auto res = health_hal->getBatteryHealthData(&data);
+ if (!res.isOk()) {
+ return device->WriteFail("Unable to query battery data");
+ }
+ status = data.batteryPartStatus;
+ }
+ switch (status) {
+ case BatteryPartStatus::UNSUPPORTED:
+ *message = "unsupported";
+ break;
+ case BatteryPartStatus::ORIGINAL:
+ *message = "original";
+ break;
+ case BatteryPartStatus::REPLACED:
+ *message = "replaced";
+ break;
+ default:
+ *message = "unknown";
+ break;
+ }
+ return true;
+}
diff --git a/fastboot/device/variables.h b/fastboot/device/variables.h
index 9a46786..99d1355 100644
--- a/fastboot/device/variables.h
+++ b/fastboot/device/variables.h
@@ -67,6 +67,10 @@
std::string* message);
bool GetBatterySoCOk(FastbootDevice* device, const std::vector<std::string>& args,
std::string* message);
+bool GetBatterySerialNumber(FastbootDevice* device, const std::vector<std::string>& args,
+ std::string* message);
+bool GetBatteryPartStatus(FastbootDevice* device, const std::vector<std::string>& args,
+ std::string* message);
bool GetSuperPartitionName(FastbootDevice* device, const std::vector<std::string>& args,
std::string* message);
bool GetSnapshotUpdateStatus(FastbootDevice* device, const std::vector<std::string>& args,
diff --git a/fastboot/fastboot.cpp b/fastboot/fastboot.cpp
index ac2a20f..235d723 100644
--- a/fastboot/fastboot.cpp
+++ b/fastboot/fastboot.cpp
@@ -402,7 +402,7 @@
transport = open_device(device.c_str(), false, false);
if (print) {
- PrintDevice(device.c_str(), transport ? "offline" : "fastboot");
+ PrintDevice(device.c_str(), transport ? "fastboot" : "offline");
}
if (transport) {
@@ -1675,7 +1675,7 @@
}
for (size_t i = 0; i < tasks->size(); i++) {
if (auto flash_task = tasks->at(i)->AsFlashTask()) {
- if (FlashTask::IsDynamicParitition(fp->source.get(), flash_task)) {
+ if (FlashTask::IsDynamicPartition(fp->source.get(), flash_task)) {
if (!loc) {
loc = i;
}
diff --git a/fastboot/task.cpp b/fastboot/task.cpp
index 0947ff9..ea78a01 100644
--- a/fastboot/task.cpp
+++ b/fastboot/task.cpp
@@ -30,7 +30,7 @@
const bool apply_vbmeta, const FlashingPlan* fp)
: pname_(pname), fname_(fname), slot_(slot), apply_vbmeta_(apply_vbmeta), fp_(fp) {}
-bool FlashTask::IsDynamicParitition(const ImageSource* source, const FlashTask* task) {
+bool FlashTask::IsDynamicPartition(const ImageSource* source, const FlashTask* task) {
std::vector<char> contents;
if (!source->ReadFile("super_empty.img", &contents)) {
return false;
@@ -152,7 +152,7 @@
continue;
}
auto flash_task = tasks[i + 2]->AsFlashTask();
- if (!FlashTask::IsDynamicParitition(source, flash_task)) {
+ if (!FlashTask::IsDynamicPartition(source, flash_task)) {
continue;
}
return true;
diff --git a/fastboot/task.h b/fastboot/task.h
index a98c874..7a713cf 100644
--- a/fastboot/task.h
+++ b/fastboot/task.h
@@ -52,7 +52,7 @@
const bool apply_vbmeta, const FlashingPlan* fp);
virtual FlashTask* AsFlashTask() override { return this; }
- static bool IsDynamicParitition(const ImageSource* source, const FlashTask* task);
+ static bool IsDynamicPartition(const ImageSource* source, const FlashTask* task);
void Run() override;
std::string ToString() const override;
std::string GetPartition() const { return pname_; }
diff --git a/fastboot/task_test.cpp b/fastboot/task_test.cpp
index 81154c6..519d4ed 100644
--- a/fastboot/task_test.cpp
+++ b/fastboot/task_test.cpp
@@ -233,7 +233,7 @@
<< "size of fastboot-info task list: " << fastboot_info_tasks.size()
<< " size of hardcoded task list: " << hardcoded_tasks.size();
}
-TEST_F(ParseTest, IsDynamicParitiontest) {
+TEST_F(ParseTest, IsDynamicPartitiontest) {
if (!get_android_product_out()) {
GTEST_SKIP();
}
@@ -258,7 +258,7 @@
ParseFastbootInfoLine(fp.get(), android::base::Tokenize(test.first, " "));
auto flash_task = task->AsFlashTask();
ASSERT_FALSE(flash_task == nullptr);
- ASSERT_EQ(FlashTask::IsDynamicParitition(fp->source.get(), flash_task), test.second);
+ ASSERT_EQ(FlashTask::IsDynamicPartition(fp->source.get(), flash_task), test.second);
}
}
@@ -358,7 +358,7 @@
contains_optimized_task = true;
}
if (auto flash_task = task->AsFlashTask()) {
- if (FlashTask::IsDynamicParitition(fp->source.get(), flash_task)) {
+ if (FlashTask::IsDynamicPartition(fp->source.get(), flash_task)) {
return false;
}
}
diff --git a/fs_mgr/fs_mgr.cpp b/fs_mgr/fs_mgr.cpp
index a94a274..af2b35a 100644
--- a/fs_mgr/fs_mgr.cpp
+++ b/fs_mgr/fs_mgr.cpp
@@ -701,6 +701,29 @@
}
//
+// Mechanism to allow fsck to be triggered by setting ro.preventative_fsck
+// Introduced to address b/305658663
+// If the property value is not equal to the flag file contents, trigger
+// fsck and store the property value in the flag file
+// If we want to trigger again, simply change the property value
+//
+static bool check_if_preventative_fsck_needed(const FstabEntry& entry) {
+ const char* flag_file = "/metadata/vold/preventative_fsck";
+ if (entry.mount_point != "/data") return false;
+
+ // Don't error check - both default to empty string, which is OK
+ std::string prop = android::base::GetProperty("ro.preventative_fsck", "");
+ std::string flag;
+ android::base::ReadFileToString(flag_file, &flag);
+ if (prop == flag) return false;
+ // fsck is run immediately, so assume it runs or there is some deeper problem
+ if (!android::base::WriteStringToFile(prop, flag_file))
+ PERROR << "Failed to write file " << flag_file;
+ LINFO << "Run preventative fsck on /data";
+ return true;
+}
+
+//
// Prepare the filesystem on the given block device to be mounted.
//
// If the "check" option was given in the fstab record, or it seems that the
@@ -750,7 +773,7 @@
}
}
- if (entry.fs_mgr_flags.check ||
+ if (check_if_preventative_fsck_needed(entry) || entry.fs_mgr_flags.check ||
(fs_stat & (FS_STAT_UNCLEAN_SHUTDOWN | FS_STAT_QUOTA_ENABLED))) {
check_fs(blk_device, entry.fs_type, mount_point, &fs_stat);
}
diff --git a/fs_mgr/fs_mgr_overlayfs_control.cpp b/fs_mgr/fs_mgr_overlayfs_control.cpp
index 06214ef..08ad80c 100644
--- a/fs_mgr/fs_mgr_overlayfs_control.cpp
+++ b/fs_mgr/fs_mgr_overlayfs_control.cpp
@@ -219,6 +219,35 @@
return OverlayfsTeardownResult::Ok;
}
+bool GetOverlaysActiveFlag() {
+ auto slot_number = fs_mgr_overlayfs_slot_number();
+ const auto super_device = kPhysicalDevice + fs_mgr_get_super_partition_name();
+
+ auto metadata = ReadMetadata(super_device, slot_number);
+ if (!metadata) {
+ return false;
+ }
+ return !!(metadata->header.flags & LP_HEADER_FLAG_OVERLAYS_ACTIVE);
+}
+
+bool SetOverlaysActiveFlag(bool flag) {
+ // Mark overlays as active in the partition table, to detect re-flash.
+ auto slot_number = fs_mgr_overlayfs_slot_number();
+ const auto super_device = kPhysicalDevice + fs_mgr_get_super_partition_name();
+ auto builder = MetadataBuilder::New(super_device, slot_number);
+ if (!builder) {
+ LERROR << "open " << super_device << " metadata";
+ return false;
+ }
+ builder->SetOverlaysActiveFlag(flag);
+ auto metadata = builder->Export();
+ if (!metadata || !UpdatePartitionTable(super_device, *metadata.get(), slot_number)) {
+ LERROR << "update super metadata";
+ return false;
+ }
+ return true;
+}
+
OverlayfsTeardownResult fs_mgr_overlayfs_teardown_scratch(const std::string& overlay,
bool* change) {
// umount and delete kScratchMountPoint storage if we have logical partitions
@@ -232,6 +261,10 @@
return OverlayfsTeardownResult::Error;
}
+ // Note: we don't care if SetOverlaysActiveFlag fails, since
+ // the overlays are removed no matter what.
+ SetOverlaysActiveFlag(false);
+
bool was_mounted = fs_mgr_overlayfs_already_mounted(kScratchMountPoint, false);
if (was_mounted) {
fs_mgr_overlayfs_umount_scratch();
@@ -448,6 +481,7 @@
}
}
}
+
// land the update back on to the partition
if (changed) {
auto metadata = builder->Export();
@@ -592,6 +626,12 @@
return false;
}
+ if (!SetOverlaysActiveFlag(true)) {
+ LOG(ERROR) << "Failed to update dynamic partition data";
+ fs_mgr_overlayfs_teardown_scratch(kScratchMountPoint, nullptr);
+ return false;
+ }
+
// If the partition exists, assume first that it can be mounted.
if (partition_exists) {
if (MountScratch(scratch_device)) {
@@ -856,6 +896,9 @@
return;
}
+ if (!GetOverlaysActiveFlag()) {
+ return;
+ }
if (ScratchIsOnData()) {
if (auto images = IImageManager::Open("remount", 0ms)) {
images->MapAllImages(init);
@@ -879,6 +922,9 @@
}
if (auto images = IImageManager::Open("remount", 0ms)) {
images->RemoveDisabledImages();
+ if (!GetOverlaysActiveFlag()) {
+ fs_mgr_overlayfs_teardown_scratch(kScratchMountPoint, nullptr);
+ }
}
}
diff --git a/fs_mgr/liblp/builder.cpp b/fs_mgr/liblp/builder.cpp
index 6cb2c51..4e6e97b 100644
--- a/fs_mgr/liblp/builder.cpp
+++ b/fs_mgr/liblp/builder.cpp
@@ -1211,6 +1211,15 @@
header_.flags |= LP_HEADER_FLAG_VIRTUAL_AB_DEVICE;
}
+void MetadataBuilder::SetOverlaysActiveFlag(bool flag) {
+ RequireExpandedMetadataHeader();
+ if (flag) {
+ header_.flags |= LP_HEADER_FLAG_OVERLAYS_ACTIVE;
+ } else {
+ header_.flags &= ~LP_HEADER_FLAG_OVERLAYS_ACTIVE;
+ }
+}
+
bool MetadataBuilder::IsABDevice() {
return !IPropertyFetcher::GetInstance()->GetProperty("ro.boot.slot_suffix", "").empty();
}
diff --git a/fs_mgr/liblp/include/liblp/builder.h b/fs_mgr/liblp/include/liblp/builder.h
index 54f31bc..957b96b 100644
--- a/fs_mgr/liblp/include/liblp/builder.h
+++ b/fs_mgr/liblp/include/liblp/builder.h
@@ -346,6 +346,8 @@
void SetAutoSlotSuffixing();
// Set the LP_HEADER_FLAG_VIRTUAL_AB_DEVICE flag.
void SetVirtualABDeviceFlag();
+ // Set or unset the LP_HEADER_FLAG_OVERLAYS_ACTIVE flag.
+ void SetOverlaysActiveFlag(bool flag);
bool GetBlockDeviceInfo(const std::string& partition_name, BlockDeviceInfo* info) const;
bool UpdateBlockDeviceInfo(const std::string& partition_name, const BlockDeviceInfo& info);
diff --git a/fs_mgr/liblp/include/liblp/metadata_format.h b/fs_mgr/liblp/include/liblp/metadata_format.h
index 41d8b0c..8d77097 100644
--- a/fs_mgr/liblp/include/liblp/metadata_format.h
+++ b/fs_mgr/liblp/include/liblp/metadata_format.h
@@ -240,6 +240,9 @@
*/
#define LP_HEADER_FLAG_VIRTUAL_AB_DEVICE 0x1
+/* This device has overlays activated via "adb remount". */
+#define LP_HEADER_FLAG_OVERLAYS_ACTIVE 0x2
+
/* This struct defines a logical partition entry, similar to what would be
* present in a GUID Partition Table.
*/
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/cow_format.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/cow_format.cpp
index 2c4d40f..b0eb723 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/cow_format.cpp
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/cow_format.cpp
@@ -14,7 +14,6 @@
// limitations under the License.
//
-#include <inttypes.h>
#include <libsnapshot/cow_format.h>
#include <sys/types.h>
#include <unistd.h>
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/test_v3.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/test_v3.cpp
index 27accdc..44b7344 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/test_v3.cpp
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/test_v3.cpp
@@ -97,7 +97,7 @@
options.op_count_max = 20;
auto writer = CreateCowWriter(3, options, GetCowFd());
ASSERT_FALSE(writer->AddZeroBlocks(1, 21));
- ASSERT_FALSE(writer->AddZeroBlocks(1, 1));
+ ASSERT_TRUE(writer->AddZeroBlocks(1, 20));
std::string data = "This is some data, believe it";
data.resize(options.block_size, '\0');
@@ -184,7 +184,7 @@
std::string data;
data.resize(options.block_size * 5);
for (int i = 0; i < data.size(); i++) {
- data[i] = char(rand() % 256);
+ data[i] = static_cast<char>('A' + i / options.block_size);
}
ASSERT_TRUE(writer->AddRawBlocks(5, data.data(), data.size()));
@@ -205,19 +205,20 @@
ASSERT_FALSE(iter->AtEnd());
size_t i = 0;
- std::string sink(data.size(), '\0');
while (!iter->AtEnd()) {
auto op = iter->Get();
+ std::string sink(options.block_size, '\0');
ASSERT_EQ(op->type(), kCowReplaceOp);
ASSERT_EQ(op->data_length, options.block_size);
ASSERT_EQ(op->new_block, 5 + i);
- ASSERT_TRUE(
- ReadData(reader, op, sink.data() + (i * options.block_size), options.block_size));
+ ASSERT_TRUE(ReadData(reader, op, sink.data(), options.block_size));
+ ASSERT_EQ(std::string_view(sink),
+ std::string_view(data).substr(i * options.block_size, options.block_size))
+ << " readback data for " << i << "th block does not match";
iter->Next();
i++;
}
- ASSERT_EQ(sink, data);
ASSERT_EQ(i, 5);
}
@@ -372,41 +373,33 @@
ASSERT_NE(iter, nullptr);
ASSERT_FALSE(iter->AtEnd());
- size_t i = 0;
-
- while (i < 5) {
+ for (size_t i = 0; i < 5; i++) {
auto op = iter->Get();
ASSERT_EQ(op->type(), kCowZeroOp);
ASSERT_EQ(op->new_block, 10 + i);
iter->Next();
- i++;
}
- i = 0;
- while (i < 5) {
+ for (size_t i = 0; i < 5; i++) {
auto op = iter->Get();
ASSERT_EQ(op->type(), kCowCopyOp);
ASSERT_EQ(op->new_block, 15 + i);
ASSERT_EQ(op->source(), 3 + i);
iter->Next();
- i++;
}
- i = 0;
std::string sink(data.size(), '\0');
- while (i < 5) {
+ for (size_t i = 0; i < 5; i++) {
auto op = iter->Get();
ASSERT_EQ(op->type(), kCowReplaceOp);
ASSERT_EQ(op->new_block, 18 + i);
- ASSERT_TRUE(
- ReadData(reader, op, sink.data() + (i * options.block_size), options.block_size));
+ ASSERT_EQ(reader.ReadData(op, sink.data() + (i * options.block_size), options.block_size),
+ options.block_size);
iter->Next();
- i++;
}
ASSERT_EQ(sink, data);
- i = 0;
std::fill(sink.begin(), sink.end(), '\0');
- while (i < 5) {
+ for (size_t i = 0; i < 5; i++) {
auto op = iter->Get();
ASSERT_EQ(op->type(), kCowXorOp);
ASSERT_EQ(op->new_block, 50 + i);
@@ -414,7 +407,6 @@
ASSERT_TRUE(
ReadData(reader, op, sink.data() + (i * options.block_size), options.block_size));
iter->Next();
- i++;
}
ASSERT_EQ(sink, data);
}
@@ -671,5 +663,25 @@
ASSERT_LE(writer.GetCowSize(), cow_size);
}
+TEST_F(CowTestV3, CopyOpMany) {
+ CowOptions options;
+ options.op_count_max = 100;
+ CowWriterV3 writer(options, GetCowFd());
+ writer.Initialize();
+ ASSERT_TRUE(writer.AddCopy(100, 50, 50));
+ ASSERT_TRUE(writer.AddCopy(150, 100, 50));
+ ASSERT_TRUE(writer.Finalize());
+ CowReader reader;
+ ASSERT_TRUE(reader.Parse(GetCowFd()));
+ auto it = reader.GetOpIter();
+ for (size_t i = 0; i < 100; i++) {
+ ASSERT_FALSE(it->AtEnd()) << " op iterator ended at " << i;
+ const auto op = *it->Get();
+ ASSERT_EQ(op.type(), kCowCopyOp);
+ ASSERT_EQ(op.new_block, 100 + i);
+ it->Next();
+ }
+}
+
} // namespace snapshot
} // namespace android
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp
index 07f6f00..de097f5 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.cpp
@@ -38,6 +38,7 @@
#include <linux/fs.h>
#include <sys/ioctl.h>
#include <unistd.h>
+#include <numeric>
// The info messages here are spammy, but as useful for update_engine. Disable
// them when running on the host.
@@ -55,7 +56,7 @@
using android::base::unique_fd;
CowWriterV3::CowWriterV3(const CowOptions& options, unique_fd&& fd)
- : CowWriterBase(options, std::move(fd)) {
+ : CowWriterBase(options, std::move(fd)), batch_size_(std::max<size_t>(options.cluster_ops, 1)) {
SetupHeaders();
}
@@ -70,6 +71,9 @@
header_.block_size = options_.block_size;
header_.num_merge_ops = options_.num_merge_ops;
header_.cluster_ops = 0;
+ if (batch_size_ > 1) {
+ LOG(INFO) << "Batch writes enabled with batch size of " << batch_size_;
+ }
if (options_.scratch_space) {
header_.buffer_size = BUFFER_REGION_DEFAULT_SIZE;
}
@@ -113,7 +117,13 @@
}
compression_.algorithm = *algorithm;
- compressor_ = ICompressor::Create(compression_, header_.block_size);
+ if (compression_.algorithm != kCowCompressNone) {
+ compressor_ = ICompressor::Create(compression_, header_.block_size);
+ if (compressor_ == nullptr) {
+ LOG(ERROR) << "Failed to create compressor for " << compression_.algorithm;
+ return false;
+ }
+ }
return true;
}
@@ -207,14 +217,15 @@
}
bool CowWriterV3::EmitCopy(uint64_t new_block, uint64_t old_block, uint64_t num_blocks) {
+ std::vector<CowOperationV3> ops(num_blocks);
for (size_t i = 0; i < num_blocks; i++) {
- CowOperationV3 op{};
+ CowOperationV3& op = ops[i];
op.set_type(kCowCopyOp);
op.new_block = new_block + i;
op.set_source(old_block + i);
- if (!WriteOperation(op)) {
- return false;
- }
+ }
+ if (!WriteOperation({ops.data(), ops.size()}, {})) {
+ return false;
}
return true;
@@ -231,37 +242,67 @@
bool CowWriterV3::EmitBlocks(uint64_t new_block_start, const void* data, size_t size,
uint64_t old_block, uint16_t offset, CowOperationType type) {
+ if (compression_.algorithm != kCowCompressNone && compressor_ == nullptr) {
+ LOG(ERROR) << "Compression algorithm is " << compression_.algorithm
+ << " but compressor is uninitialized.";
+ return false;
+ }
const size_t num_blocks = (size / header_.block_size);
- for (size_t i = 0; i < num_blocks; i++) {
- const uint8_t* const iter =
- reinterpret_cast<const uint8_t*>(data) + (header_.block_size * i);
+ if (compression_.algorithm == kCowCompressNone) {
+ std::vector<CowOperationV3> ops(num_blocks);
+ for (size_t i = 0; i < num_blocks; i++) {
+ CowOperation& op = ops[i];
+ op.new_block = new_block_start + i;
- CowOperation op = {};
- op.new_block = new_block_start + i;
-
- op.set_type(type);
- if (type == kCowXorOp) {
- op.set_source((old_block + i) * header_.block_size + offset);
- } else {
- op.set_source(next_data_pos_);
+ op.set_type(type);
+ if (type == kCowXorOp) {
+ op.set_source((old_block + i) * header_.block_size + offset);
+ } else {
+ op.set_source(next_data_pos_ + header_.block_size * i);
+ }
+ op.data_length = header_.block_size;
}
- std::basic_string<uint8_t> compressed_data;
- const void* out_data = iter;
+ return WriteOperation({ops.data(), ops.size()}, data, size);
+ }
- op.data_length = header_.block_size;
+ for (size_t i = 0; i < num_blocks; i += batch_size_) {
+ const auto blocks_to_write = std::min<size_t>(batch_size_, num_blocks - i);
+ std::vector<std::basic_string<uint8_t>> compressed_blocks(blocks_to_write);
+ std::vector<CowOperationV3> ops(blocks_to_write);
+ std::vector<struct iovec> vec(blocks_to_write);
+ size_t compressed_bytes = 0;
+ for (size_t j = 0; j < blocks_to_write; j++) {
+ const uint8_t* const iter =
+ reinterpret_cast<const uint8_t*>(data) + (header_.block_size * (i + j));
- if (compression_.algorithm) {
- if (!compressor_) {
- PLOG(ERROR) << "Compressor not initialized";
+ CowOperation& op = ops[j];
+ op.new_block = new_block_start + i + j;
+
+ op.set_type(type);
+ if (type == kCowXorOp) {
+ op.set_source((old_block + i + j) * header_.block_size + offset);
+ } else {
+ op.set_source(next_data_pos_ + compressed_bytes);
+ }
+
+ std::basic_string<uint8_t> compressed_data =
+ compressor_->Compress(iter, header_.block_size);
+ if (compressed_data.empty()) {
+ LOG(ERROR) << "Compression failed during EmitBlocks(" << new_block_start << ", "
+ << num_blocks << ");";
return false;
}
- compressed_data = compressor_->Compress(out_data, header_.block_size);
- if (compressed_data.size() < op.data_length) {
- out_data = compressed_data.data();
- op.data_length = compressed_data.size();
+ if (compressed_data.size() >= header_.block_size) {
+ compressed_data.resize(header_.block_size);
+ std::memcpy(compressed_data.data(), iter, header_.block_size);
}
+ compressed_blocks[j] = std::move(compressed_data);
+ vec[j] = {.iov_base = compressed_blocks[j].data(),
+ .iov_len = compressed_blocks[j].size()};
+ op.data_length = vec[j].iov_len;
+ compressed_bytes += op.data_length;
}
- if (!WriteOperation(op, out_data, op.data_length)) {
+ if (!WriteOperation({ops.data(), ops.size()}, {vec.data(), vec.size()})) {
PLOG(ERROR) << "AddRawBlocks with compression: write failed. new block: "
<< new_block_start << " compression: " << compression_.algorithm;
return false;
@@ -271,14 +312,15 @@
return true;
}
-bool CowWriterV3::EmitZeroBlocks(uint64_t new_block_start, uint64_t num_blocks) {
- for (uint64_t i = 0; i < num_blocks; i++) {
- CowOperationV3 op{};
+bool CowWriterV3::EmitZeroBlocks(uint64_t new_block_start, const uint64_t num_blocks) {
+ std::vector<CowOperationV3> ops(num_blocks);
+ for (uint64_t i = 0; i < ops.size(); i++) {
+ auto& op = ops[i];
op.set_type(kCowZeroOp);
op.new_block = new_block_start + i;
- if (!WriteOperation(op)) {
- return false;
- }
+ }
+ if (!WriteOperation({ops.data(), ops.size()})) {
+ return false;
}
return true;
}
@@ -324,43 +366,64 @@
return true;
}
-bool CowWriterV3::WriteOperation(const CowOperationV3& op, const void* data, size_t size) {
+bool CowWriterV3::WriteOperation(std::basic_string_view<CowOperationV3> op, const void* data,
+ size_t size) {
+ struct iovec vec {
+ .iov_len = size
+ };
+ // Dear C++ god, this is effectively a const_cast. I had to do this because
+ // pwritev()'s struct iovec requires a non-const pointer. The input data
+ // will not be modified, as the iovec is only used for a write operation.
+ std::memcpy(&vec.iov_base, &data, sizeof(data));
+ return WriteOperation(op, {&vec, 1});
+}
+
+bool CowWriterV3::WriteOperation(std::basic_string_view<CowOperationV3> ops,
+ std::basic_string_view<struct iovec> data) {
+ const auto total_data_size =
+ std::transform_reduce(data.begin(), data.end(), 0, std::plus<size_t>{},
+ [](const struct iovec& a) { return a.iov_len; });
if (IsEstimating()) {
- header_.op_count++;
+ header_.op_count += ops.size();
if (header_.op_count > header_.op_count_max) {
// If we increment op_count_max, the offset of data section would
// change. So need to update |next_data_pos_|
next_data_pos_ += (header_.op_count - header_.op_count_max) * sizeof(CowOperationV3);
header_.op_count_max = header_.op_count;
}
- next_data_pos_ += op.data_length;
+ next_data_pos_ += total_data_size;
return true;
}
- if (header_.op_count + 1 > header_.op_count_max) {
- LOG(ERROR) << "Maximum number of ops reached: " << header_.op_count_max;
+ if (header_.op_count + ops.size() > header_.op_count_max) {
+ LOG(ERROR) << "Current op count " << header_.op_count << ", attempting to write "
+ << ops.size() << " ops will exceed the max of " << header_.op_count_max;
return false;
}
const off_t offset = GetOpOffset(header_.op_count, header_);
- if (!android::base::WriteFullyAtOffset(fd_, &op, sizeof(op), offset)) {
- PLOG(ERROR) << "write failed for " << op << " at " << offset;
+ if (!android::base::WriteFullyAtOffset(fd_, ops.data(), ops.size() * sizeof(ops[0]), offset)) {
+ PLOG(ERROR) << "Write failed for " << ops.size() << " ops at " << offset;
return false;
}
- if (data && size > 0) {
- if (!android::base::WriteFullyAtOffset(fd_, data, size, next_data_pos_)) {
- PLOG(ERROR) << "write failed for data of size: " << size
- << " at offset: " << next_data_pos_;
+ if (!data.empty()) {
+ const auto ret = pwritev(fd_, data.data(), data.size(), next_data_pos_);
+ if (ret != total_data_size) {
+ PLOG(ERROR) << "write failed for data of size: " << data.size()
+ << " at offset: " << next_data_pos_ << " " << ret;
return false;
}
}
- header_.op_count++;
- next_data_pos_ += op.data_length;
- next_op_pos_ += sizeof(CowOperationV3);
+ header_.op_count += ops.size();
+ next_data_pos_ += total_data_size;
return true;
}
+bool CowWriterV3::WriteOperation(const CowOperationV3& op, const void* data, size_t size) {
+ return WriteOperation({&op, 1}, data, size);
+}
+
bool CowWriterV3::Finalize() {
CHECK_GE(header_.prefix.header_size, sizeof(CowHeaderV3));
CHECK_LE(header_.prefix.header_size, sizeof(header_));
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.h b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.h
index 340218f..93f1d24 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.h
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/writer_v3.h
@@ -15,6 +15,7 @@
#pragma once
#include <android-base/logging.h>
+#include <string_view>
#include "writer_base.h"
@@ -44,6 +45,10 @@
bool ParseOptions();
bool OpenForWrite();
bool OpenForAppend(uint64_t label);
+ bool WriteOperation(std::basic_string_view<CowOperationV3> op,
+ std::basic_string_view<struct iovec> data);
+ bool WriteOperation(std::basic_string_view<CowOperationV3> op, const void* data = nullptr,
+ size_t size = 0);
bool WriteOperation(const CowOperationV3& op, const void* data = nullptr, size_t size = 0);
bool EmitBlocks(uint64_t new_block_start, const void* data, size_t size, uint64_t old_block,
uint16_t offset, CowOperationType type);
@@ -59,13 +64,13 @@
// Resume points contain a laebl + cow_op_index.
std::shared_ptr<std::vector<ResumePoint>> resume_points_;
- uint64_t next_op_pos_ = 0;
uint64_t next_data_pos_ = 0;
std::vector<std::basic_string<uint8_t>> compressed_buf_;
// in the case that we are using one thread for compression, we can store and re-use the same
// compressor
int num_compress_threads_ = 1;
+ size_t batch_size_ = 0;
};
} // namespace snapshot
diff --git a/fs_mgr/libsnapshot/snapshot.cpp b/fs_mgr/libsnapshot/snapshot.cpp
index f6a35a8..e33bdff 100644
--- a/fs_mgr/libsnapshot/snapshot.cpp
+++ b/fs_mgr/libsnapshot/snapshot.cpp
@@ -3330,7 +3330,7 @@
// Terminate stale daemon if any
std::unique_ptr<SnapuserdClient> snapuserd_client = std::move(snapuserd_client_);
if (!snapuserd_client) {
- snapuserd_client = SnapuserdClient::Connect(kSnapuserdSocket, 5s);
+ snapuserd_client = SnapuserdClient::TryConnect(kSnapuserdSocket, 5s);
}
if (snapuserd_client) {
snapuserd_client->DetachSnapuserd();
@@ -3661,7 +3661,7 @@
cow_options.compression = status.compression_algorithm();
cow_options.max_blocks = {status.device_size() / cow_options.block_size};
cow_options.batch_write = status.batched_writes();
- cow_options.num_compress_threads = status.enable_threading() ? 2 : 0;
+ cow_options.num_compress_threads = status.enable_threading() ? 2 : 1;
// TODO(b/313962438) Improve op_count estimate. For now, use number of
// blocks as an upper bound.
cow_options.op_count_max = status.device_size() / cow_options.block_size;
diff --git a/fs_mgr/libsnapshot/snapshot_test.cpp b/fs_mgr/libsnapshot/snapshot_test.cpp
index 4e6b5e1..c0c3eaf 100644
--- a/fs_mgr/libsnapshot/snapshot_test.cpp
+++ b/fs_mgr/libsnapshot/snapshot_test.cpp
@@ -2362,8 +2362,10 @@
auto init = NewManagerForFirstStageMount("_b");
ASSERT_NE(init, nullptr);
- ASSERT_TRUE(init->EnsureSnapuserdConnected());
- init->set_use_first_stage_snapuserd(true);
+ if (snapuserd_required_) {
+ ASSERT_TRUE(init->EnsureSnapuserdConnected());
+ init->set_use_first_stage_snapuserd(true);
+ }
ASSERT_TRUE(init->NeedSnapshotsInFirstStageMount());
ASSERT_TRUE(init->CreateLogicalAndSnapshotPartitions("super", snapshot_timeout_));
@@ -2374,9 +2376,11 @@
ASSERT_TRUE(IsPartitionUnchanged(name));
}
- ASSERT_TRUE(init->PerformInitTransition(SnapshotManager::InitTransition::SECOND_STAGE));
- for (const auto& name : partitions) {
- ASSERT_TRUE(init->snapuserd_client()->WaitForDeviceDelete(name + "-user-cow-init"));
+ if (snapuserd_required_) {
+ ASSERT_TRUE(init->PerformInitTransition(SnapshotManager::InitTransition::SECOND_STAGE));
+ for (const auto& name : partitions) {
+ ASSERT_TRUE(init->snapuserd_client()->WaitForDeviceDelete(name + "-user-cow-init"));
+ }
}
// Initiate the merge and wait for it to be completed.
diff --git a/fs_mgr/libsnapshot/snapuserd/Android.bp b/fs_mgr/libsnapshot/snapuserd/Android.bp
index 1b0c563..6b8e084 100644
--- a/fs_mgr/libsnapshot/snapuserd/Android.bp
+++ b/fs_mgr/libsnapshot/snapuserd/Android.bp
@@ -147,12 +147,6 @@
// snapuserd, which would lead to deadlock if we had to handle page
// faults for its code pages.
static_executable: true,
-
- // Snapuserd segfaults with ThinLTO
- // http://b/208565717
- lto: {
- never: true,
- },
}
cc_binary {
diff --git a/fs_mgr/libsnapshot/snapuserd/include/snapuserd/snapuserd_client.h b/fs_mgr/libsnapshot/snapuserd/include/snapuserd/snapuserd_client.h
index 010beb3..ede92dd 100644
--- a/fs_mgr/libsnapshot/snapuserd/include/snapuserd/snapuserd_client.h
+++ b/fs_mgr/libsnapshot/snapuserd/include/snapuserd/snapuserd_client.h
@@ -17,11 +17,7 @@
#include <unistd.h>
#include <chrono>
-#include <cstring>
-#include <iostream>
#include <string>
-#include <thread>
-#include <vector>
#include <android-base/unique_fd.h>
@@ -53,9 +49,14 @@
explicit SnapuserdClient(android::base::unique_fd&& sockfd);
SnapuserdClient(){};
+ // Attempt to connect to snapsuerd, wait for the daemon to start if
+ // connection failed.
static std::unique_ptr<SnapuserdClient> Connect(const std::string& socket_name,
std::chrono::milliseconds timeout_ms);
-
+ // Attempt to connect to snapsuerd, but does not wait for the daemon to
+ // start.
+ static std::unique_ptr<SnapuserdClient> TryConnect(const std::string& socket_name,
+ std::chrono::milliseconds timeout_ms);
bool StopSnapuserd();
// Initializing a snapuserd handler is a three-step process:
diff --git a/fs_mgr/libsnapshot/snapuserd/snapuserd_client.cpp b/fs_mgr/libsnapshot/snapuserd/snapuserd_client.cpp
index 3bed3a4..789c980 100644
--- a/fs_mgr/libsnapshot/snapuserd/snapuserd_client.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/snapuserd_client.cpp
@@ -27,7 +27,7 @@
#include <unistd.h>
#include <chrono>
-#include <sstream>
+#include <thread>
#include <android-base/file.h>
#include <android-base/logging.h>
@@ -64,6 +64,40 @@
return errno == ECONNREFUSED || errno == EINTR || errno == ENOENT;
}
+std::unique_ptr<SnapuserdClient> SnapuserdClient::TryConnect(const std::string& socket_name,
+ std::chrono::milliseconds timeout_ms) {
+ unique_fd fd;
+ const auto start = std::chrono::steady_clock::now();
+ while (true) {
+ fd.reset(TEMP_FAILURE_RETRY(socket_local_client(
+ socket_name.c_str(), ANDROID_SOCKET_NAMESPACE_RESERVED, SOCK_STREAM)));
+ if (fd >= 0) {
+ auto client = std::make_unique<SnapuserdClient>(std::move(fd));
+ if (!client->ValidateConnection()) {
+ return nullptr;
+ }
+ return client;
+ }
+ if (errno == ENOENT) {
+ LOG(INFO) << "Daemon socket " << socket_name
+ << " does not exist, return without waiting.";
+ return nullptr;
+ }
+ if (errno == ECONNREFUSED) {
+ const auto now = std::chrono::steady_clock::now();
+ const auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(now - start);
+ if (elapsed >= timeout_ms) {
+ LOG(ERROR) << "Timed out connecting to snapuserd socket: " << socket_name;
+ return nullptr;
+ }
+ std::this_thread::sleep_for(10ms);
+ } else {
+ PLOG(ERROR) << "connect failed: " << socket_name;
+ return nullptr;
+ }
+ }
+}
+
std::unique_ptr<SnapuserdClient> SnapuserdClient::Connect(const std::string& socket_name,
std::chrono::milliseconds timeout_ms) {
unique_fd fd;
diff --git a/healthd/Android.bp b/healthd/Android.bp
index 235303f..427ac48 100644
--- a/healthd/Android.bp
+++ b/healthd/Android.bp
@@ -76,7 +76,7 @@
defaults: ["libbatterymonitor_defaults"],
srcs: ["BatteryMonitor.cpp"],
static_libs: [
- "android.hardware.health-V2-ndk",
+ "android.hardware.health-V3-ndk",
],
whole_static_libs: [
// Need to translate HIDL to AIDL to support legacy APIs in
@@ -203,12 +203,12 @@
defaults: ["libhealthd_charger_ui_defaults"],
static_libs: [
- "android.hardware.health-V2-ndk",
+ "android.hardware.health-V3-ndk",
"android.hardware.health-translate-ndk",
],
export_static_lib_headers: [
- "android.hardware.health-V2-ndk",
+ "android.hardware.health-V3-ndk",
],
}
@@ -280,7 +280,7 @@
static_libs: [
// common
"android.hardware.health@1.0-convert",
- "android.hardware.health-V2-ndk",
+ "android.hardware.health-V3-ndk",
"libbatterymonitor",
"libcharger_sysprop",
"libhealthd_charger_nops",
diff --git a/healthd/BatteryMonitor.cpp b/healthd/BatteryMonitor.cpp
index 0c97632..b8bb586 100644
--- a/healthd/BatteryMonitor.cpp
+++ b/healthd/BatteryMonitor.cpp
@@ -59,6 +59,7 @@
using aidl::android::hardware::health::BatteryChargingState;
using aidl::android::hardware::health::BatteryHealth;
using aidl::android::hardware::health::BatteryHealthData;
+using aidl::android::hardware::health::BatteryPartStatus;
using aidl::android::hardware::health::BatteryStatus;
using aidl::android::hardware::health::HealthInfo;
@@ -219,6 +220,7 @@
{"Warm", BatteryHealth::GOOD},
{"Cool", BatteryHealth::GOOD},
{"Hot", BatteryHealth::OVERHEAT},
+ {"Calibration required", BatteryHealth::INCONSISTENT},
{NULL, BatteryHealth::UNKNOWN},
};
@@ -596,6 +598,9 @@
if (!mHealthdConfig->batteryStateOfHealthPath.empty())
return getIntField(mHealthdConfig->batteryStateOfHealthPath);
}
+ if (id == BATTERY_PROP_PART_STATUS) {
+ return static_cast<int>(BatteryPartStatus::UNSUPPORTED);
+ }
return 0;
}
@@ -679,6 +684,11 @@
ret = OK;
break;
+ case BATTERY_PROP_PART_STATUS:
+ val->valueInt64 = getBatteryHealthData(BATTERY_PROP_PART_STATUS);
+ ret = OK;
+ break;
+
default:
break;
}
@@ -686,6 +696,11 @@
return ret;
}
+status_t BatteryMonitor::getSerialNumber(std::optional<std::string>* out) {
+ *out = std::nullopt;
+ return OK;
+}
+
void BatteryMonitor::dumpState(int fd) {
int v;
char vs[128];
diff --git a/healthd/include/healthd/BatteryMonitor.h b/healthd/include/healthd/BatteryMonitor.h
index e9998ba..b30458d 100644
--- a/healthd/include/healthd/BatteryMonitor.h
+++ b/healthd/include/healthd/BatteryMonitor.h
@@ -18,6 +18,7 @@
#define HEALTHD_BATTERYMONITOR_H
#include <memory>
+#include <optional>
#include <batteryservice/BatteryService.h>
#include <utils/String8.h>
@@ -86,6 +87,8 @@
int getChargingPolicy();
int getBatteryHealthData(int id);
+ status_t getSerialNumber(std::optional<std::string>* out);
+
static void logValues(const android::hardware::health::V2_1::HealthInfo& health_info,
const struct healthd_config& healthd_config);
diff --git a/init/first_stage_init.cpp b/init/first_stage_init.cpp
index e48fa15..c4d0f75 100644
--- a/init/first_stage_init.cpp
+++ b/init/first_stage_init.cpp
@@ -30,6 +30,7 @@
#include <chrono>
#include <filesystem>
#include <string>
+#include <thread>
#include <vector>
#include <android-base/chrono_utils.h>
diff --git a/init/property_service.cpp b/init/property_service.cpp
index bd74358..e2cff95 100644
--- a/init/property_service.cpp
+++ b/init/property_service.cpp
@@ -84,6 +84,7 @@
using android::base::ErrnoError;
using android::base::Error;
+using android::base::GetIntProperty;
using android::base::GetProperty;
using android::base::ParseInt;
using android::base::ReadFileToString;
@@ -112,7 +113,7 @@
constexpr auto LEGACY_ID_PROP = "ro.build.legacy.id";
constexpr auto VBMETA_DIGEST_PROP = "ro.boot.vbmeta.digest";
constexpr auto DIGEST_SIZE_USED = 8;
-constexpr auto API_LEVEL_CURRENT = 10000;
+constexpr auto MAX_VENDOR_API_LEVEL = 1000000;
static bool persistent_properties_loaded = false;
@@ -1084,15 +1085,16 @@
}
}
-static int read_api_level_props(const std::vector<std::string>& api_level_props) {
- int api_level = API_LEVEL_CURRENT;
- for (const auto& api_level_prop : api_level_props) {
- api_level = android::base::GetIntProperty(api_level_prop, API_LEVEL_CURRENT);
- if (api_level != API_LEVEL_CURRENT) {
- break;
- }
+static int vendor_api_level_of(int sdk_api_level) {
+ if (sdk_api_level < __ANDROID_API_V__) {
+ return sdk_api_level;
}
- return api_level;
+ // In Android V, vendor API level started with version 202404.
+ // The calculation assumes that the SDK api level bumps once a year.
+ if (sdk_api_level < __ANDROID_API_FUTURE__) {
+ return 202404 + ((sdk_api_level - __ANDROID_API_V__) * 100);
+ }
+ return MAX_VENDOR_API_LEVEL;
}
static void property_initialize_ro_vendor_api_level() {
@@ -1100,20 +1102,27 @@
// required to support.
constexpr auto VENDOR_API_LEVEL_PROP = "ro.vendor.api_level";
- // Api level properties of the board. The order of the properties must be kept.
- std::vector<std::string> BOARD_API_LEVEL_PROPS = {"ro.board.api_level",
- "ro.board.first_api_level"};
- // Api level properties of the device. The order of the properties must be kept.
- std::vector<std::string> DEVICE_API_LEVEL_PROPS = {"ro.product.first_api_level",
- "ro.build.version.sdk"};
+ auto vendor_api_level = GetIntProperty("ro.board.first_api_level", MAX_VENDOR_API_LEVEL);
+ if (vendor_api_level != MAX_VENDOR_API_LEVEL) {
+ // Update the vendor_api_level with "ro.board.api_level" only if both "ro.board.api_level"
+ // and "ro.board.first_api_level" are defined.
+ vendor_api_level = GetIntProperty("ro.board.api_level", vendor_api_level);
+ }
- int api_level = std::min(read_api_level_props(BOARD_API_LEVEL_PROPS),
- read_api_level_props(DEVICE_API_LEVEL_PROPS));
+ auto product_first_api_level =
+ GetIntProperty("ro.product.first_api_level", __ANDROID_API_FUTURE__);
+ if (product_first_api_level == __ANDROID_API_FUTURE__) {
+ // Fallback to "ro.build.version.sdk" if the "ro.product.first_api_level" is not defined.
+ product_first_api_level = GetIntProperty("ro.build.version.sdk", __ANDROID_API_FUTURE__);
+ }
+
+ vendor_api_level = std::min(vendor_api_level_of(product_first_api_level), vendor_api_level);
+
std::string error;
- auto res = PropertySetNoSocket(VENDOR_API_LEVEL_PROP, std::to_string(api_level), &error);
+ auto res = PropertySetNoSocket(VENDOR_API_LEVEL_PROP, std::to_string(vendor_api_level), &error);
if (res != PROP_SUCCESS) {
- LOG(ERROR) << "Failed to set " << VENDOR_API_LEVEL_PROP << " with " << api_level << ": "
- << error << "(" << res << ")";
+ LOG(ERROR) << "Failed to set " << VENDOR_API_LEVEL_PROP << " with " << vendor_api_level
+ << ": " << error << "(" << res << ")";
}
}
diff --git a/init/snapuserd_transition.cpp b/init/snapuserd_transition.cpp
index 3a9ff5b..3a78343 100644
--- a/init/snapuserd_transition.cpp
+++ b/init/snapuserd_transition.cpp
@@ -25,6 +25,7 @@
#include <filesystem>
#include <string>
#include <string_view>
+#include <thread>
#include <android-base/file.h>
#include <android-base/logging.h>
diff --git a/libprocessgroup/include/processgroup/processgroup.h b/libprocessgroup/include/processgroup/processgroup.h
index 9107838..ca6868c 100644
--- a/libprocessgroup/include/processgroup/processgroup.h
+++ b/libprocessgroup/include/processgroup/processgroup.h
@@ -65,9 +65,8 @@
// should be active again. E.g. Zygote specialization for child process.
void DropTaskProfilesResourceCaching();
-// Return 0 and removes the cgroup if there are no longer any processes in it.
-// Returns -1 in the case of an error occurring or if there are processes still running
-// even after retrying for up to 200ms.
+// Return 0 if all processes were killed and the cgroup was successfully removed.
+// Returns -1 in the case of an error occurring or if there are processes still running.
int killProcessGroup(uid_t uid, int initialPid, int signal);
// Returns the same as killProcessGroup(), however it does not retry, which means
@@ -76,8 +75,9 @@
// Sends the provided signal to all members of a process group, but does not wait for processes to
// exit, or for the cgroup to be removed. Callers should also ensure that killProcessGroup is called
-// later to ensure the cgroup is fully removed, otherwise system resources may leak.
-int sendSignalToProcessGroup(uid_t uid, int initialPid, int signal);
+// later to ensure the cgroup is fully removed, otherwise system resources will leak.
+// Returns true if no errors are encountered sending signals, otherwise false.
+bool sendSignalToProcessGroup(uid_t uid, int initialPid, int signal);
int createProcessGroup(uid_t uid, int initialPid, bool memControl = false);
diff --git a/libprocessgroup/processgroup.cpp b/libprocessgroup/processgroup.cpp
index f594f7f..3209adf 100644
--- a/libprocessgroup/processgroup.cpp
+++ b/libprocessgroup/processgroup.cpp
@@ -22,6 +22,7 @@
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
+#include <poll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@@ -30,6 +31,7 @@
#include <unistd.h>
#include <chrono>
+#include <cstring>
#include <map>
#include <memory>
#include <mutex>
@@ -53,7 +55,9 @@
using namespace std::chrono_literals;
-#define PROCESSGROUP_CGROUP_PROCS_FILE "/cgroup.procs"
+#define PROCESSGROUP_CGROUP_PROCS_FILE "cgroup.procs"
+#define PROCESSGROUP_CGROUP_KILL_FILE "cgroup.kill"
+#define PROCESSGROUP_CGROUP_EVENTS_FILE "cgroup.events"
bool CgroupsAvailable() {
static bool cgroups_available = access("/proc/cgroups", F_OK) == 0;
@@ -74,6 +78,29 @@
return true;
}
+static std::string ConvertUidToPath(const char* cgroup, uid_t uid) {
+ return StringPrintf("%s/uid_%u", cgroup, uid);
+}
+
+static std::string ConvertUidPidToPath(const char* cgroup, uid_t uid, int pid) {
+ return StringPrintf("%s/uid_%u/pid_%d", cgroup, uid, pid);
+}
+
+static bool CgroupKillAvailable() {
+ static std::once_flag f;
+ static bool cgroup_kill_available = false;
+ std::call_once(f, []() {
+ std::string cg_kill;
+ CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &cg_kill);
+ // cgroup.kill is not on the root cgroup, so check a non-root cgroup that should always
+ // exist
+ cg_kill = ConvertUidToPath(cg_kill.c_str(), AID_ROOT) + '/' + PROCESSGROUP_CGROUP_KILL_FILE;
+ cgroup_kill_available = access(cg_kill.c_str(), F_OK) == 0;
+ });
+
+ return cgroup_kill_available;
+}
+
static bool CgroupGetMemcgAppsPath(std::string* path) {
CgroupController controller = CgroupMap::GetInstance().FindController("memory");
@@ -205,38 +232,21 @@
false);
}
-static std::string ConvertUidToPath(const char* cgroup, uid_t uid) {
- return StringPrintf("%s/uid_%u", cgroup, uid);
-}
-
-static std::string ConvertUidPidToPath(const char* cgroup, uid_t uid, int pid) {
- return StringPrintf("%s/uid_%u/pid_%d", cgroup, uid, pid);
-}
-
-static int RemoveCgroup(const char* cgroup, uid_t uid, int pid, unsigned int retries) {
- int ret = 0;
- auto uid_pid_path = ConvertUidPidToPath(cgroup, uid, pid);
-
- while (retries--) {
- ret = rmdir(uid_pid_path.c_str());
- // If we get an error 2 'No such file or directory' , that means the
- // cgroup is already removed, treat it as success and return 0 for
- // idempotency.
- if (ret < 0 && errno == ENOENT) {
- ret = 0;
- }
- if (!ret || errno != EBUSY || !retries) break;
- std::this_thread::sleep_for(5ms);
- }
+static int RemoveCgroup(const char* cgroup, uid_t uid, int pid) {
+ auto path = ConvertUidPidToPath(cgroup, uid, pid);
+ int ret = TEMP_FAILURE_RETRY(rmdir(path.c_str()));
if (!ret && uid >= AID_ISOLATED_START && uid <= AID_ISOLATED_END) {
// Isolated UIDs are unlikely to be reused soon after removal,
// so free up the kernel resources for the UID level cgroup.
- const auto uid_path = ConvertUidToPath(cgroup, uid);
- ret = rmdir(uid_path.c_str());
- if (ret < 0 && errno == ENOENT) {
- ret = 0;
- }
+ path = ConvertUidToPath(cgroup, uid);
+ ret = TEMP_FAILURE_RETRY(rmdir(path.c_str()));
+ }
+
+ if (ret < 0 && errno == ENOENT) {
+ // This function is idempoetent, but still warn here.
+ LOG(WARNING) << "RemoveCgroup: " << path << " does not exist.";
+ ret = 0;
}
return ret;
@@ -360,38 +370,55 @@
return false;
}
-// Returns number of processes killed on success
-// Returns 0 if there are no processes in the process cgroup left to kill
-// Returns -1 on error
-static int DoKillProcessGroupOnce(const char* cgroup, uid_t uid, int initialPid, int signal) {
- // We separate all of the pids in the cgroup into those pids that are also the leaders of
- // process groups (stored in the pgids set) and those that are not (stored in the pids set).
- std::set<pid_t> pgids;
- pgids.emplace(initialPid);
- std::set<pid_t> pids;
- int processes = 0;
-
- std::unique_ptr<FILE, decltype(&fclose)> fd(nullptr, fclose);
+bool sendSignalToProcessGroup(uid_t uid, int initialPid, int signal) {
+ std::set<pid_t> pgids, pids;
if (CgroupsAvailable()) {
- auto path = ConvertUidPidToPath(cgroup, uid, initialPid) + PROCESSGROUP_CGROUP_PROCS_FILE;
- fd.reset(fopen(path.c_str(), "re"));
- if (!fd) {
- if (errno == ENOENT) {
- // This happens when the process is already dead or if, as the result of a bug, it
- // has been migrated to another cgroup. An example of a bug that can cause migration
- // to another cgroup is using the JoinCgroup action with a cgroup controller that
- // has been activated in the v2 cgroup hierarchy.
- goto kill;
+ std::string hierarchy_root_path, cgroup_v2_path;
+ CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &hierarchy_root_path);
+ cgroup_v2_path = ConvertUidPidToPath(hierarchy_root_path.c_str(), uid, initialPid);
+
+ if (signal == SIGKILL && CgroupKillAvailable()) {
+ LOG(VERBOSE) << "Using " << PROCESSGROUP_CGROUP_KILL_FILE << " to SIGKILL "
+ << cgroup_v2_path;
+
+ // We need to kill the process group in addition to the cgroup. For normal apps they
+ // should completely overlap, but system_server kills depend on process group kills to
+ // take down apps which are in their own cgroups and not individually targeted.
+ if (kill(-initialPid, signal) == -1 && errno != ESRCH) {
+ PLOG(WARNING) << "kill(" << -initialPid << ", " << signal << ") failed";
}
- PLOG(WARNING) << __func__ << " failed to open process cgroup uid " << uid << " pid "
- << initialPid;
- return -1;
+
+ const std::string killfilepath = cgroup_v2_path + '/' + PROCESSGROUP_CGROUP_KILL_FILE;
+ if (WriteStringToFile("1", killfilepath)) {
+ return true;
+ } else {
+ PLOG(ERROR) << "Failed to write 1 to " << killfilepath;
+ // Fallback to cgroup.procs below
+ }
}
+
+ // Since cgroup.kill only sends SIGKILLs, we read cgroup.procs to find each process to
+ // signal individually. This is more costly than using cgroup.kill for SIGKILLs.
+ LOG(VERBOSE) << "Using " << PROCESSGROUP_CGROUP_PROCS_FILE << " to signal (" << signal
+ << ") " << cgroup_v2_path;
+
+ // We separate all of the pids in the cgroup into those pids that are also the leaders of
+ // process groups (stored in the pgids set) and those that are not (stored in the pids set).
+ const auto procsfilepath = cgroup_v2_path + '/' + PROCESSGROUP_CGROUP_PROCS_FILE;
+ std::unique_ptr<FILE, decltype(&fclose)> fp(fopen(procsfilepath.c_str(), "re"), fclose);
+ if (!fp) {
+ // This should only happen if the cgroup has already been removed with a successful call
+ // to killProcessGroup. Callers should only retry sendSignalToProcessGroup or
+ // killProcessGroup calls if they fail without ENOENT.
+ PLOG(ERROR) << "Failed to open " << procsfilepath;
+ kill(-initialPid, signal);
+ return false;
+ }
+
pid_t pid;
bool file_is_empty = true;
- while (fscanf(fd.get(), "%d\n", &pid) == 1 && pid >= 0) {
- processes++;
+ while (fscanf(fp.get(), "%d\n", &pid) == 1 && pid >= 0) {
file_is_empty = false;
if (pid == 0) {
// Should never happen... but if it does, trying to kill this
@@ -421,7 +448,8 @@
}
}
-kill:
+ pgids.emplace(initialPid);
+
// Kill all process groups.
for (const auto pgid : pgids) {
LOG(VERBOSE) << "Killing process group " << -pgid << " in uid " << uid
@@ -442,101 +470,174 @@
}
}
- return (!fd || feof(fd.get())) ? processes : -1;
+ return true;
}
-static int KillProcessGroup(uid_t uid, int initialPid, int signal, int retries) {
+template <typename T>
+static std::chrono::milliseconds toMillisec(T&& duration) {
+ return std::chrono::duration_cast<std::chrono::milliseconds>(duration);
+}
+
+enum class populated_status
+{
+ populated,
+ not_populated,
+ error
+};
+
+static populated_status cgroupIsPopulated(int events_fd) {
+ const std::string POPULATED_KEY("populated ");
+ const std::string::size_type MAX_EVENTS_FILE_SIZE = 32;
+
+ std::string buf;
+ buf.resize(MAX_EVENTS_FILE_SIZE);
+ ssize_t len = TEMP_FAILURE_RETRY(pread(events_fd, buf.data(), buf.size(), 0));
+ if (len == -1) {
+ PLOG(ERROR) << "Could not read cgroup.events: ";
+ // Potentially ENODEV if the cgroup has been removed since we opened this file, but that
+ // shouldn't have happened yet.
+ return populated_status::error;
+ }
+
+ if (len == 0) {
+ LOG(ERROR) << "cgroup.events EOF";
+ return populated_status::error;
+ }
+
+ buf.resize(len);
+
+ const std::string::size_type pos = buf.find(POPULATED_KEY);
+ if (pos == std::string::npos) {
+ LOG(ERROR) << "Could not find populated key in cgroup.events";
+ return populated_status::error;
+ }
+
+ if (pos + POPULATED_KEY.size() + 1 > len) {
+ LOG(ERROR) << "Partial read of cgroup.events";
+ return populated_status::error;
+ }
+
+ return buf[pos + POPULATED_KEY.size()] == '1' ?
+ populated_status::populated : populated_status::not_populated;
+}
+
+// The default timeout of 2200ms comes from the default number of retries in a previous
+// implementation of this function. The default retry value was 40 for killing and 400 for cgroup
+// removal with 5ms sleeps between each retry.
+static int KillProcessGroup(
+ uid_t uid, int initialPid, int signal, bool once = false,
+ std::chrono::steady_clock::time_point until = std::chrono::steady_clock::now() + 2200ms) {
CHECK_GE(uid, 0);
CHECK_GT(initialPid, 0);
+ // Always attempt to send a kill signal to at least the initialPid, at least once, regardless of
+ // whether its cgroup exists or not. This should only be necessary if a bug results in the
+ // migration of the targeted process out of its cgroup, which we will also attempt to kill.
+ const bool signal_ret = sendSignalToProcessGroup(uid, initialPid, signal);
+
+ if (!CgroupsAvailable() || !signal_ret) return signal_ret ? 0 : -1;
+
std::string hierarchy_root_path;
- if (CgroupsAvailable()) {
- CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &hierarchy_root_path);
- }
- const char* cgroup = hierarchy_root_path.c_str();
+ CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &hierarchy_root_path);
- std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
+ const std::string cgroup_v2_path =
+ ConvertUidPidToPath(hierarchy_root_path.c_str(), uid, initialPid);
- int retry = retries;
- int processes;
- while ((processes = DoKillProcessGroupOnce(cgroup, uid, initialPid, signal)) > 0) {
- LOG(VERBOSE) << "Killed " << processes << " processes for processgroup " << initialPid;
- if (!CgroupsAvailable()) {
- // makes no sense to retry, because there are no cgroup_procs file
- processes = 0; // no remaining processes
- break;
- }
- if (retry > 0) {
- std::this_thread::sleep_for(5ms);
- --retry;
- } else {
- break;
- }
- }
-
- if (processes < 0) {
- PLOG(ERROR) << "Error encountered killing process cgroup uid " << uid << " pid "
- << initialPid;
+ const std::string eventsfile = cgroup_v2_path + '/' + PROCESSGROUP_CGROUP_EVENTS_FILE;
+ android::base::unique_fd events_fd(open(eventsfile.c_str(), O_RDONLY));
+ if (events_fd.get() == -1) {
+ PLOG(WARNING) << "Error opening " << eventsfile << " for KillProcessGroup";
return -1;
}
- std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
- auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
+ struct pollfd fds = {
+ .fd = events_fd,
+ .events = POLLPRI,
+ };
- // We only calculate the number of 'processes' when killing the processes.
- // In the retries == 0 case, we only kill the processes once and therefore
- // will not have waited then recalculated how many processes are remaining
- // after the first signals have been sent.
- // Logging anything regarding the number of 'processes' here does not make sense.
+ const std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
- if (processes == 0) {
- if (retries > 0) {
- LOG(INFO) << "Successfully killed process cgroup uid " << uid << " pid " << initialPid
- << " in " << static_cast<int>(ms) << "ms";
+ // The primary reason to loop here is to capture any new forks or migrations that could occur
+ // after we send signals to the original set of processes, but before all of those processes
+ // exit and the cgroup becomes unpopulated, or before we remove the cgroup. We try hard to
+ // ensure this completes successfully to avoid permanent memory leaks, but we still place a
+ // large default upper bound on the amount of time we spend in this loop. The amount of CPU
+ // contention, and the amount of work that needs to be done in do_exit for each process
+ // determines how long this will take.
+ int ret;
+ do {
+ populated_status populated;
+ while ((populated = cgroupIsPopulated(events_fd.get())) == populated_status::populated &&
+ std::chrono::steady_clock::now() < until) {
+
+ sendSignalToProcessGroup(uid, initialPid, signal);
+ if (once) {
+ populated = cgroupIsPopulated(events_fd.get());
+ break;
+ }
+
+ const std::chrono::steady_clock::time_point poll_start =
+ std::chrono::steady_clock::now();
+
+ if (poll_start < until)
+ ret = TEMP_FAILURE_RETRY(poll(&fds, 1, toMillisec(until - poll_start).count()));
+
+ if (ret == -1) {
+ // Fallback to 5ms sleeps if poll fails
+ PLOG(ERROR) << "Poll on " << eventsfile << "failed";
+ const std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now();
+ if (now < until)
+ std::this_thread::sleep_for(std::min(5ms, toMillisec(until - now)));
+ }
+
+ LOG(VERBOSE) << "Waited "
+ << toMillisec(std::chrono::steady_clock::now() - poll_start).count()
+ << " ms for " << eventsfile << " poll";
}
- if (!CgroupsAvailable()) {
- // nothing to do here, if cgroups isn't available
- return 0;
+ const std::chrono::milliseconds kill_duration =
+ toMillisec(std::chrono::steady_clock::now() - start);
+
+ if (populated == populated_status::populated) {
+ LOG(WARNING) << "Still waiting on process(es) to exit for cgroup " << cgroup_v2_path
+ << " after " << kill_duration.count() << " ms";
+ // We'll still try the cgroup removal below which we expect to log an error.
+ } else if (populated == populated_status::not_populated) {
+ LOG(VERBOSE) << "Killed all processes under cgroup " << cgroup_v2_path
+ << " after " << kill_duration.count() << " ms";
}
- // 400 retries correspond to 2 secs max timeout
- int err = RemoveCgroup(cgroup, uid, initialPid, 400);
+ ret = RemoveCgroup(hierarchy_root_path.c_str(), uid, initialPid);
+ if (ret)
+ PLOG(ERROR) << "Unable to remove cgroup " << cgroup_v2_path;
+ else
+ LOG(INFO) << "Removed cgroup " << cgroup_v2_path;
if (isMemoryCgroupSupported() && UsePerAppMemcg()) {
+ // This per-application memcg v1 case should eventually be removed after migration to
+ // memcg v2.
std::string memcg_apps_path;
if (CgroupGetMemcgAppsPath(&memcg_apps_path) &&
- RemoveCgroup(memcg_apps_path.c_str(), uid, initialPid, 400) < 0) {
- return -1;
+ (ret = RemoveCgroup(memcg_apps_path.c_str(), uid, initialPid)) < 0) {
+ const auto memcg_v1_cgroup_path =
+ ConvertUidPidToPath(memcg_apps_path.c_str(), uid, initialPid);
+ PLOG(ERROR) << "Unable to remove memcg v1 cgroup " << memcg_v1_cgroup_path;
}
}
- return err;
- } else {
- if (retries > 0) {
- LOG(ERROR) << "Failed to kill process cgroup uid " << uid << " pid " << initialPid
- << " in " << static_cast<int>(ms) << "ms, " << processes
- << " processes remain";
- }
- return -1;
- }
+ if (once) break;
+ if (std::chrono::steady_clock::now() >= until) break;
+ } while (ret && errno == EBUSY);
+
+ return ret;
}
int killProcessGroup(uid_t uid, int initialPid, int signal) {
- return KillProcessGroup(uid, initialPid, signal, 40 /*retries*/);
+ return KillProcessGroup(uid, initialPid, signal);
}
int killProcessGroupOnce(uid_t uid, int initialPid, int signal) {
- return KillProcessGroup(uid, initialPid, signal, 0 /*retries*/);
-}
-
-int sendSignalToProcessGroup(uid_t uid, int initialPid, int signal) {
- std::string hierarchy_root_path;
- if (CgroupsAvailable()) {
- CgroupGetControllerPath(CGROUPV2_HIERARCHY_NAME, &hierarchy_root_path);
- }
- const char* cgroup = hierarchy_root_path.c_str();
- return DoKillProcessGroupOnce(cgroup, uid, initialPid, signal);
+ return KillProcessGroup(uid, initialPid, signal, true);
}
static int createProcessGroupInternal(uid_t uid, int initialPid, std::string cgroup,
@@ -576,7 +677,7 @@
return -errno;
}
- auto uid_pid_procs_file = uid_pid_path + PROCESSGROUP_CGROUP_PROCS_FILE;
+ auto uid_pid_procs_file = uid_pid_path + '/' + PROCESSGROUP_CGROUP_PROCS_FILE;
if (!WriteStringToFile(std::to_string(initialPid), uid_pid_procs_file)) {
ret = -errno;
diff --git a/libutils/Android.bp b/libutils/Android.bp
index 85a0fd2..1c622ff 100644
--- a/libutils/Android.bp
+++ b/libutils/Android.bp
@@ -56,7 +56,7 @@
}
cc_defaults {
- name: "libutils_defaults",
+ name: "libutils_defaults_nodeps",
vendor_available: true,
product_available: true,
recovery_available: true,
@@ -69,10 +69,6 @@
"-DANDROID_UTILS_REF_BASE_DISABLE_IMPLICIT_CONSTRUCTION",
],
- shared_libs: [
- "libcutils",
- "liblog",
- ],
sanitize: {
misc_undefined: ["integer"],
},
@@ -118,6 +114,18 @@
}
cc_defaults {
+ name: "libutils_defaults",
+ defaults: [
+ "libutils_defaults_nodeps",
+ ],
+
+ shared_libs: [
+ "libcutils",
+ "liblog",
+ ],
+}
+
+cc_defaults {
name: "libutils_impl_defaults",
defaults: [
"libutils_defaults",
diff --git a/libutils/binder/Android.bp b/libutils/binder/Android.bp
index a049f3d..60b0cb6 100644
--- a/libutils/binder/Android.bp
+++ b/libutils/binder/Android.bp
@@ -3,9 +3,9 @@
}
cc_defaults {
- name: "libutils_binder_impl_defaults",
+ name: "libutils_binder_impl_defaults_nodeps",
defaults: [
- "libutils_defaults",
+ "libutils_defaults_nodeps",
"apex-lowest-min-sdk-version",
],
native_bridge_supported: true,
@@ -30,11 +30,33 @@
afdo: true,
}
+cc_defaults {
+ name: "libutils_binder_impl_defaults",
+ defaults: [
+ "libutils_defaults",
+ "libutils_binder_impl_defaults_nodeps",
+ ],
+}
+
cc_library {
name: "libutils_binder",
defaults: ["libutils_binder_impl_defaults"],
}
+cc_library_shared {
+ name: "libutils_binder_sdk",
+ defaults: ["libutils_binder_impl_defaults_nodeps"],
+
+ header_libs: [
+ "liblog_stub",
+ ],
+
+ cflags: [
+ "-DANDROID_LOG_STUB_WEAK_PRINT",
+ "-DANDROID_UTILS_CALLSTACK_ENABLED=0",
+ ],
+}
+
cc_library {
name: "libutils_binder_test_compile",
defaults: ["libutils_binder_impl_defaults"],
diff --git a/storaged/Android.bp b/storaged/Android.bp
index fe8c1f3..357c0e6 100644
--- a/storaged/Android.bp
+++ b/storaged/Android.bp
@@ -24,7 +24,7 @@
shared_libs: [
"android.hardware.health@1.0",
"android.hardware.health@2.0",
- "android.hardware.health-V2-ndk",
+ "android.hardware.health-V3-ndk",
"libbase",
"libbinder",
"libbinder_ndk",