Merge "Do not check for debuggable / profilable for app tracing."
diff --git a/debuggerd/libdebuggerd/test/tombstone_test.cpp b/debuggerd/libdebuggerd/test/tombstone_test.cpp
index b42d70c..7fe8f82 100644
--- a/debuggerd/libdebuggerd/test/tombstone_test.cpp
+++ b/debuggerd/libdebuggerd/test/tombstone_test.cpp
@@ -379,7 +379,7 @@
TEST_F(TombstoneTest, gwp_asan_cause_uaf_exact) {
gwp_asan::AllocationMetadata meta;
meta.Addr = 0x1000;
- meta.Size = 32;
+ meta.RequestedSize = 32;
GwpAsanCrashDataTest crash_data(gwp_asan::Error::USE_AFTER_FREE, &meta);
crash_data.SetCrashAddress(0x1000);
@@ -396,7 +396,7 @@
TEST_F(TombstoneTest, gwp_asan_cause_double_free) {
gwp_asan::AllocationMetadata meta;
meta.Addr = 0x1000;
- meta.Size = 32;
+ meta.RequestedSize = 32;
GwpAsanCrashDataTest crash_data(gwp_asan::Error::DOUBLE_FREE, &meta);
crash_data.SetCrashAddress(0x1000);
@@ -413,7 +413,7 @@
TEST_F(TombstoneTest, gwp_asan_cause_overflow) {
gwp_asan::AllocationMetadata meta;
meta.Addr = 0x1000;
- meta.Size = 32;
+ meta.RequestedSize = 32;
GwpAsanCrashDataTest crash_data(gwp_asan::Error::BUFFER_OVERFLOW, &meta);
crash_data.SetCrashAddress(0x1025);
@@ -432,7 +432,7 @@
TEST_F(TombstoneTest, gwp_asan_cause_underflow) {
gwp_asan::AllocationMetadata meta;
meta.Addr = 0x1000;
- meta.Size = 32;
+ meta.RequestedSize = 32;
GwpAsanCrashDataTest crash_data(gwp_asan::Error::BUFFER_UNDERFLOW, &meta);
crash_data.SetCrashAddress(0xffe);
@@ -451,7 +451,7 @@
TEST_F(TombstoneTest, gwp_asan_cause_invalid_free_inside) {
gwp_asan::AllocationMetadata meta;
meta.Addr = 0x1000;
- meta.Size = 32;
+ meta.RequestedSize = 32;
GwpAsanCrashDataTest crash_data(gwp_asan::Error::INVALID_FREE, &meta);
crash_data.SetCrashAddress(0x1001);
@@ -470,7 +470,7 @@
TEST_F(TombstoneTest, gwp_asan_cause_invalid_free_outside) {
gwp_asan::AllocationMetadata meta;
meta.Addr = 0x1000;
- meta.Size = 32;
+ meta.RequestedSize = 32;
GwpAsanCrashDataTest crash_data(gwp_asan::Error::INVALID_FREE, &meta);
crash_data.SetCrashAddress(0x1021);
diff --git a/debuggerd/proto/tombstone.proto b/debuggerd/proto/tombstone.proto
index 38a06f4..2c7156b 100644
--- a/debuggerd/proto/tombstone.proto
+++ b/debuggerd/proto/tombstone.proto
@@ -3,6 +3,9 @@
option java_package = "com.android.server.os";
option java_outer_classname = "TombstoneProtos";
+// NOTE TO OEMS:
+// If you add custom fields to this proto, do not use numbers in the reserved range.
+
message Tombstone {
Architecture arch = 1;
string build_fingerprint = 2;
@@ -24,6 +27,8 @@
repeated MemoryMapping memory_mappings = 17;
repeated LogBuffer log_buffers = 18;
repeated FD open_fds = 19;
+
+ reserved 20 to 999;
}
enum Architecture {
@@ -31,6 +36,8 @@
ARM64 = 1;
X86 = 2;
X86_64 = 3;
+
+ reserved 4 to 999;
}
message Signal {
@@ -46,15 +53,21 @@
bool has_fault_address = 8;
uint64 fault_address = 9;
+
+ reserved 10 to 999;
}
message Cause {
string human_readable = 1;
+
+ reserved 2 to 999;
}
message Register {
string name = 1;
uint64 u64 = 2;
+
+ reserved 3 to 999;
}
message Thread {
@@ -63,6 +76,8 @@
repeated Register registers = 3;
repeated BacktraceFrame current_backtrace = 4;
repeated MemoryDump memory_dump = 5;
+
+ reserved 6 to 999;
}
message BacktraceFrame {
@@ -76,6 +91,8 @@
string file_name = 6;
uint64 file_map_offset = 7;
string build_id = 8;
+
+ reserved 9 to 999;
}
message MemoryDump {
@@ -83,6 +100,8 @@
string mapping_name = 2;
uint64 begin_address = 3;
bytes memory = 4;
+
+ reserved 5 to 999;
}
message MemoryMapping {
@@ -97,6 +116,8 @@
string mapping_name = 7;
string build_id = 8;
uint64 load_bias = 9;
+
+ reserved 10 to 999;
}
message FD {
@@ -104,11 +125,15 @@
string path = 2;
string owner = 3;
uint64 tag = 4;
+
+ reserved 5 to 999;
}
message LogBuffer {
string name = 1;
repeated LogMessage logs = 2;
+
+ reserved 3 to 999;
}
message LogMessage {
@@ -118,4 +143,6 @@
uint32 priority = 4;
string tag = 5;
string message = 6;
+
+ reserved 7 to 999;
}
diff --git a/fs_mgr/fs_mgr_overlayfs.cpp b/fs_mgr/fs_mgr_overlayfs.cpp
index 388c296..1134f14 100644
--- a/fs_mgr/fs_mgr_overlayfs.cpp
+++ b/fs_mgr/fs_mgr_overlayfs.cpp
@@ -125,10 +125,38 @@
namespace {
+bool fs_mgr_in_recovery() {
+ // Check the existence of recovery binary instead of using the compile time
+ // macro, because first-stage-init is compiled with __ANDROID_RECOVERY__
+ // defined, albeit not in recovery. More details: system/core/init/README.md
+ return fs_mgr_access("/system/bin/recovery");
+}
+
+bool fs_mgr_is_dsu_running() {
+ // Since android::gsi::CanBootIntoGsi() or android::gsi::MarkSystemAsGsi() is
+ // never called in recovery, the return value of android::gsi::IsGsiRunning()
+ // is not well-defined. In this case, just return false as being in recovery
+ // implies not running a DSU system.
+ if (fs_mgr_in_recovery()) return false;
+ auto saved_errno = errno;
+ auto ret = android::gsi::IsGsiRunning();
+ errno = saved_errno;
+ return ret;
+}
+
// list of acceptable overlayfs backing storage
const auto kScratchMountPoint = "/mnt/scratch"s;
const auto kCacheMountPoint = "/cache"s;
-const std::vector<const std::string> kOverlayMountPoints = {kScratchMountPoint, kCacheMountPoint};
+
+std::vector<const std::string> OverlayMountPoints() {
+ // Never fallback to legacy cache mount point if within a DSU system,
+ // because running a DSU system implies the device supports dynamic
+ // partitions, which means legacy cache mustn't be used.
+ if (fs_mgr_is_dsu_running()) {
+ return {kScratchMountPoint};
+ }
+ return {kScratchMountPoint, kCacheMountPoint};
+}
// Return true if everything is mounted, but before adb is started. Right
// after 'trigger load_persist_props_action' is done.
@@ -166,26 +194,7 @@
static constexpr unsigned long kSizeThreshold = 8 * 1024 * 1024; // 8MB
return (vst.f_bfree >= (vst.f_blocks * kPercentThreshold / 100)) &&
- (vst.f_bfree * vst.f_bsize) >= kSizeThreshold;
-}
-
-bool fs_mgr_in_recovery() {
- // Check the existence of recovery binary instead of using the compile time
- // macro, because first-stage-init is compiled with __ANDROID_RECOVERY__
- // defined, albeit not in recovery. More details: system/core/init/README.md
- return fs_mgr_access("/system/bin/recovery");
-}
-
-bool fs_mgr_is_dsu_running() {
- // Since android::gsi::CanBootIntoGsi() or android::gsi::MarkSystemAsGsi() is
- // never called in recovery, the return value of android::gsi::IsGsiRunning()
- // is not well-defined. In this case, just return false as being in recovery
- // implies not running a DSU system.
- if (fs_mgr_in_recovery()) return false;
- auto saved_errno = errno;
- auto ret = android::gsi::IsGsiRunning();
- errno = saved_errno;
- return ret;
+ (static_cast<uint64_t>(vst.f_bfree) * vst.f_frsize) >= kSizeThreshold;
}
const auto kPhysicalDevice = "/dev/block/by-name/"s;
@@ -300,7 +309,7 @@
std::string fs_mgr_get_overlayfs_candidate(const std::string& mount_point) {
if (!fs_mgr_is_dir(mount_point)) return "";
const auto base = android::base::Basename(mount_point) + "/";
- for (const auto& overlay_mount_point : kOverlayMountPoints) {
+ for (const auto& overlay_mount_point : OverlayMountPoints()) {
auto dir = overlay_mount_point + kOverlayTopDir + "/" + base;
auto upper = dir + kUpperName;
if (!fs_mgr_is_dir(upper)) continue;
@@ -1344,7 +1353,7 @@
if (candidates.empty()) return ret;
std::string dir;
- for (const auto& overlay_mount_point : kOverlayMountPoints) {
+ for (const auto& overlay_mount_point : OverlayMountPoints()) {
if (backing && backing[0] && (overlay_mount_point != backing)) continue;
if (overlay_mount_point == kScratchMountPoint) {
if (!fs_mgr_overlayfs_setup_scratch(fstab, change)) continue;
@@ -1465,7 +1474,7 @@
}
}
bool should_destroy_scratch = false;
- for (const auto& overlay_mount_point : kOverlayMountPoints) {
+ for (const auto& overlay_mount_point : OverlayMountPoints()) {
ret &= fs_mgr_overlayfs_teardown_one(
overlay_mount_point, mount_point ? fs_mgr_mount_point(mount_point) : "", change,
overlay_mount_point == kScratchMountPoint ? &should_destroy_scratch : nullptr);
@@ -1569,7 +1578,7 @@
constexpr bool* ignore_change = nullptr;
// Teardown legacy overlay mount points that's not backed by a scratch device.
- for (const auto& overlay_mount_point : kOverlayMountPoints) {
+ for (const auto& overlay_mount_point : OverlayMountPoints()) {
if (overlay_mount_point == kScratchMountPoint) {
continue;
}
diff --git a/fs_mgr/libfiemap/fiemap_writer.cpp b/fs_mgr/libfiemap/fiemap_writer.cpp
index 621031a..8acb885 100644
--- a/fs_mgr/libfiemap/fiemap_writer.cpp
+++ b/fs_mgr/libfiemap/fiemap_writer.cpp
@@ -52,7 +52,7 @@
static constexpr const uint32_t kUnsupportedExtentFlags =
FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_UNWRITTEN | FIEMAP_EXTENT_DELALLOC |
FIEMAP_EXTENT_NOT_ALIGNED | FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_DATA_TAIL |
- FIEMAP_EXTENT_UNWRITTEN | FIEMAP_EXTENT_SHARED | FIEMAP_EXTENT_MERGED;
+ FIEMAP_EXTENT_UNWRITTEN | FIEMAP_EXTENT_SHARED;
// Large file support must be enabled.
static_assert(sizeof(off_t) == sizeof(uint64_t));
diff --git a/fs_mgr/libsnapshot/Android.bp b/fs_mgr/libsnapshot/Android.bp
index d36a7f0..678adf8 100644
--- a/fs_mgr/libsnapshot/Android.bp
+++ b/fs_mgr/libsnapshot/Android.bp
@@ -549,6 +549,7 @@
],
srcs: [
"cow_snapuserd_test.cpp",
+ "snapuserd.cpp",
],
cflags: [
"-Wall",
diff --git a/fs_mgr/libsnapshot/cow_reader.cpp b/fs_mgr/libsnapshot/cow_reader.cpp
index c15a05b..cf9f6ea 100644
--- a/fs_mgr/libsnapshot/cow_reader.cpp
+++ b/fs_mgr/libsnapshot/cow_reader.cpp
@@ -181,6 +181,7 @@
ops_buffer->resize(current_op_num);
}
+ LOG(DEBUG) << "COW file read complete. Total ops: " << ops_buffer->size();
// To successfully parse a COW file, we need either:
// (1) a label to read up to, and for that label to be found, or
// (2) a valid footer.
@@ -236,6 +237,7 @@
[](CowOperation& op) { return IsMetadataOp(op); }),
ops_.get()->end());
+ set_total_data_ops(ops_->size());
// We will re-arrange the vector in such a way that
// kernel can batch merge. Ex:
//
@@ -298,10 +300,9 @@
// are contiguous. These are monotonically increasing numbers.
//
// When both (1) and (2) are true, kernel will batch merge the operations.
- // However, we do not want copy operations to be batch merged as
- // a crash or system reboot during an overlapping copy can drive the device
- // to a corrupted state. Hence, merging of copy operations should always be
- // done as a individual 4k block. In the above case, since the
+ // In the above case, we have to ensure that the copy operations
+ // are merged first before replace operations are done. Hence,
+ // we will not change the order of copy operations. Since,
// cow_op->new_block numbers are contiguous, we will ensure that the
// cow block numbers assigned in ReadMetadata() for these respective copy
// operations are not contiguous forcing kernel to issue merge for each
@@ -328,10 +329,8 @@
//
// Merge sequence will look like:
//
- // Merge-1 - Copy-op-1
- // Merge-2 - Copy-op-2
- // Merge-3 - Copy-op-3
- // Merge-4 - Batch-merge {Replace-op-7, Replace-op-6, Zero-op-8,
+ // Merge-1 - Batch-merge { Copy-op-1, Copy-op-2, Copy-op-3 }
+ // Merge-2 - Batch-merge {Replace-op-7, Replace-op-6, Zero-op-8,
// Replace-op-4, Zero-op-9, Replace-op-5 }
//==============================================================
diff --git a/fs_mgr/libsnapshot/cow_snapuserd_test.cpp b/fs_mgr/libsnapshot/cow_snapuserd_test.cpp
index 7fa23db..045d9db 100644
--- a/fs_mgr/libsnapshot/cow_snapuserd_test.cpp
+++ b/fs_mgr/libsnapshot/cow_snapuserd_test.cpp
@@ -36,6 +36,8 @@
#include <libsnapshot/snapuserd_client.h>
#include <storage_literals/storage_literals.h>
+#include "snapuserd.h"
+
namespace android {
namespace snapshot {
@@ -119,7 +121,6 @@
void CreateDmUserDevice();
void StartSnapuserdDaemon();
void CreateSnapshotDevice();
- unique_fd CreateTempFile(const std::string& name, size_t size);
unique_ptr<LoopDevice> base_loop_;
unique_ptr<TempDevice> dmuser_dev_;
@@ -140,7 +141,24 @@
int total_base_size_;
};
-unique_fd CowSnapuserdTest::CreateTempFile(const std::string& name, size_t size) {
+class CowSnapuserdMetadataTest final {
+ public:
+ void Setup();
+ void SetupPartialArea();
+ void ValidateMetadata();
+ void ValidatePartialFilledArea();
+
+ private:
+ void InitMetadata();
+ void CreateCowDevice();
+ void CreateCowPartialFilledArea();
+
+ std::unique_ptr<Snapuserd> snapuserd_;
+ std::unique_ptr<TemporaryFile> cow_system_;
+ size_t size_ = 1_MiB;
+};
+
+static unique_fd CreateTempFile(const std::string& name, size_t size) {
unique_fd fd(syscall(__NR_memfd_create, name.c_str(), MFD_ALLOW_SEALING));
if (fd < 0) {
return {};
@@ -430,25 +448,299 @@
}
void CowSnapuserdTest::MergeInterrupt() {
+ // Interrupt merge at various intervals
StartMerge();
- std::this_thread::sleep_for(4s);
+ std::this_thread::sleep_for(250ms);
SimulateDaemonRestart();
StartMerge();
- std::this_thread::sleep_for(3s);
+ std::this_thread::sleep_for(250ms);
SimulateDaemonRestart();
StartMerge();
- std::this_thread::sleep_for(3s);
+ std::this_thread::sleep_for(150ms);
SimulateDaemonRestart();
StartMerge();
- std::this_thread::sleep_for(1s);
+ std::this_thread::sleep_for(100ms);
+ SimulateDaemonRestart();
+
+ StartMerge();
+ std::this_thread::sleep_for(800ms);
+ SimulateDaemonRestart();
+
+ StartMerge();
+ std::this_thread::sleep_for(600ms);
SimulateDaemonRestart();
ASSERT_TRUE(Merge());
}
+void CowSnapuserdMetadataTest::CreateCowPartialFilledArea() {
+ std::string path = android::base::GetExecutableDirectory();
+ cow_system_ = std::make_unique<TemporaryFile>(path);
+
+ CowOptions options;
+ options.compression = "gz";
+ CowWriter writer(options);
+
+ ASSERT_TRUE(writer.Initialize(cow_system_->fd));
+
+ // Area 0 is completely filled with 256 exceptions
+ for (int i = 0; i < 256; i++) {
+ ASSERT_TRUE(writer.AddCopy(i, 256 + i));
+ }
+
+ // Area 1 is partially filled with 2 copy ops and 10 zero ops
+ ASSERT_TRUE(writer.AddCopy(500, 1000));
+ ASSERT_TRUE(writer.AddCopy(501, 1001));
+
+ ASSERT_TRUE(writer.AddZeroBlocks(300, 10));
+
+ // Flush operations
+ ASSERT_TRUE(writer.Finalize());
+}
+
+void CowSnapuserdMetadataTest::ValidatePartialFilledArea() {
+ int area_sz = snapuserd_->GetMetadataAreaSize();
+
+ ASSERT_EQ(area_sz, 2);
+
+ size_t new_chunk = 263;
+ // Verify the partially filled area
+ void* buffer = snapuserd_->GetExceptionBuffer(1);
+ loff_t offset = 0;
+ struct disk_exception* de;
+ for (int i = 0; i < 12; i++) {
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, i);
+ ASSERT_EQ(de->new_chunk, new_chunk);
+ offset += sizeof(struct disk_exception);
+ new_chunk += 1;
+ }
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 0);
+ ASSERT_EQ(de->new_chunk, 0);
+}
+
+void CowSnapuserdMetadataTest::SetupPartialArea() {
+ CreateCowPartialFilledArea();
+ InitMetadata();
+}
+
+void CowSnapuserdMetadataTest::CreateCowDevice() {
+ unique_fd rnd_fd;
+ loff_t offset = 0;
+
+ std::string path = android::base::GetExecutableDirectory();
+ cow_system_ = std::make_unique<TemporaryFile>(path);
+
+ rnd_fd.reset(open("/dev/random", O_RDONLY));
+ ASSERT_TRUE(rnd_fd > 0);
+
+ std::unique_ptr<uint8_t[]> random_buffer_1_ = std::make_unique<uint8_t[]>(size_);
+
+ // Fill random data
+ for (size_t j = 0; j < (size_ / 1_MiB); j++) {
+ ASSERT_EQ(ReadFullyAtOffset(rnd_fd, (char*)random_buffer_1_.get() + offset, 1_MiB, 0),
+ true);
+
+ offset += 1_MiB;
+ }
+
+ CowOptions options;
+ options.compression = "gz";
+ CowWriter writer(options);
+
+ ASSERT_TRUE(writer.Initialize(cow_system_->fd));
+
+ size_t num_blocks = size_ / options.block_size;
+
+ // Overlapping region. This has to be split
+ // into two batch operations
+ ASSERT_TRUE(writer.AddCopy(23, 20));
+ ASSERT_TRUE(writer.AddCopy(22, 19));
+ ASSERT_TRUE(writer.AddCopy(21, 18));
+ ASSERT_TRUE(writer.AddCopy(20, 17));
+ ASSERT_TRUE(writer.AddCopy(19, 16));
+ ASSERT_TRUE(writer.AddCopy(18, 15));
+
+ // Contiguous region but blocks in ascending order
+ // Daemon has to ensure that these blocks are merged
+ // in a batch
+ ASSERT_TRUE(writer.AddCopy(50, 75));
+ ASSERT_TRUE(writer.AddCopy(51, 76));
+ ASSERT_TRUE(writer.AddCopy(52, 77));
+ ASSERT_TRUE(writer.AddCopy(53, 78));
+
+ // Dis-contiguous region
+ ASSERT_TRUE(writer.AddCopy(110, 130));
+ ASSERT_TRUE(writer.AddCopy(105, 125));
+ ASSERT_TRUE(writer.AddCopy(100, 120));
+
+ // Overlap
+ ASSERT_TRUE(writer.AddCopy(25, 30));
+ ASSERT_TRUE(writer.AddCopy(30, 31));
+
+ size_t source_blk = num_blocks;
+
+ ASSERT_TRUE(writer.AddRawBlocks(source_blk, random_buffer_1_.get(), size_));
+
+ size_t blk_zero_copy_start = source_blk + num_blocks;
+
+ ASSERT_TRUE(writer.AddZeroBlocks(blk_zero_copy_start, num_blocks));
+
+ // Flush operations
+ ASSERT_TRUE(writer.Finalize());
+}
+
+void CowSnapuserdMetadataTest::InitMetadata() {
+ snapuserd_ = std::make_unique<Snapuserd>("", cow_system_->path, "");
+ ASSERT_TRUE(snapuserd_->InitCowDevice());
+}
+
+void CowSnapuserdMetadataTest::Setup() {
+ CreateCowDevice();
+ InitMetadata();
+}
+
+void CowSnapuserdMetadataTest::ValidateMetadata() {
+ int area_sz = snapuserd_->GetMetadataAreaSize();
+ ASSERT_EQ(area_sz, 3);
+
+ size_t old_chunk;
+ size_t new_chunk;
+
+ for (int i = 0; i < area_sz; i++) {
+ void* buffer = snapuserd_->GetExceptionBuffer(i);
+ loff_t offset = 0;
+ if (i == 0) {
+ old_chunk = 256;
+ new_chunk = 2;
+ } else if (i == 1) {
+ old_chunk = 512;
+ new_chunk = 259;
+ }
+ for (int j = 0; j < 256; j++) {
+ struct disk_exception* de =
+ reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+
+ if (i != 2) {
+ ASSERT_EQ(de->old_chunk, old_chunk);
+ ASSERT_EQ(de->new_chunk, new_chunk);
+ old_chunk += 1;
+ new_chunk += 1;
+ } else {
+ break;
+ }
+ offset += sizeof(struct disk_exception);
+ }
+
+ if (i == 2) {
+ // The first 5 copy operation is not batch merged
+ // as the sequence is discontiguous
+ struct disk_exception* de =
+ reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 30);
+ ASSERT_EQ(de->new_chunk, 518);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 25);
+ ASSERT_EQ(de->new_chunk, 520);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 100);
+ ASSERT_EQ(de->new_chunk, 522);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 105);
+ ASSERT_EQ(de->new_chunk, 524);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 110);
+ ASSERT_EQ(de->new_chunk, 526);
+ offset += sizeof(struct disk_exception);
+
+ // The next 4 operations are batch merged as
+ // both old and new chunk are contiguous
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 50);
+ ASSERT_EQ(de->new_chunk, 528);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 51);
+ ASSERT_EQ(de->new_chunk, 529);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 52);
+ ASSERT_EQ(de->new_chunk, 530);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 53);
+ ASSERT_EQ(de->new_chunk, 531);
+ offset += sizeof(struct disk_exception);
+
+ // This is handling overlap operation with
+ // two batch merge operations.
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 18);
+ ASSERT_EQ(de->new_chunk, 533);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 19);
+ ASSERT_EQ(de->new_chunk, 534);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 20);
+ ASSERT_EQ(de->new_chunk, 535);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 21);
+ ASSERT_EQ(de->new_chunk, 537);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 22);
+ ASSERT_EQ(de->new_chunk, 538);
+ offset += sizeof(struct disk_exception);
+
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 23);
+ ASSERT_EQ(de->new_chunk, 539);
+ offset += sizeof(struct disk_exception);
+
+ // End of metadata
+ de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+ ASSERT_EQ(de->old_chunk, 0);
+ ASSERT_EQ(de->new_chunk, 0);
+ offset += sizeof(struct disk_exception);
+ }
+ }
+}
+
+TEST(Snapuserd_Test, Snapshot_Metadata) {
+ CowSnapuserdMetadataTest harness;
+ harness.Setup();
+ harness.ValidateMetadata();
+}
+
+TEST(Snapuserd_Test, Snapshot_Metadata_Overlap) {
+ CowSnapuserdMetadataTest harness;
+ harness.SetupPartialArea();
+ harness.ValidatePartialFilledArea();
+}
+
TEST(Snapuserd_Test, Snapshot_Merge_Resume) {
CowSnapuserdTest harness;
ASSERT_TRUE(harness.Setup());
@@ -457,7 +749,7 @@
harness.Shutdown();
}
-TEST(Snapuserd_Test, Snapshot) {
+TEST(Snapuserd_Test, Snapshot_IO_TEST) {
CowSnapuserdTest harness;
ASSERT_TRUE(harness.Setup());
harness.ReadSnapshotDeviceAndValidate();
@@ -465,7 +757,6 @@
harness.ValidateMerge();
harness.Shutdown();
}
-
} // namespace snapshot
} // namespace android
diff --git a/fs_mgr/libsnapshot/cow_writer.cpp b/fs_mgr/libsnapshot/cow_writer.cpp
index c1a5f32..81edc79 100644
--- a/fs_mgr/libsnapshot/cow_writer.cpp
+++ b/fs_mgr/libsnapshot/cow_writer.cpp
@@ -491,7 +491,7 @@
return true;
}
-bool CowWriter::CommitMerge(int merged_ops, bool sync) {
+bool CowWriter::CommitMerge(int merged_ops) {
CHECK(merge_in_progress_);
header_.num_merge_ops += merged_ops;
@@ -506,11 +506,7 @@
return false;
}
- // Sync only for merging of copy operations.
- if (sync) {
- return Sync();
- }
- return true;
+ return Sync();
}
bool CowWriter::Truncate(off_t length) {
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/cow_reader.h b/fs_mgr/libsnapshot/include/libsnapshot/cow_reader.h
index 62b54f9..1de7473 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/cow_reader.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/cow_reader.h
@@ -142,6 +142,10 @@
void InitializeMerge();
+ void set_total_data_ops(uint64_t size) { total_data_ops_ = size; }
+
+ uint64_t total_data_ops() { return total_data_ops_; }
+
private:
bool ParseOps(std::optional<uint64_t> label);
@@ -152,6 +156,7 @@
uint64_t fd_size_;
std::optional<uint64_t> last_label_;
std::shared_ptr<std::vector<CowOperation>> ops_;
+ uint64_t total_data_ops_;
};
} // namespace snapshot
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h b/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h
index 22ddfa6..6ffd5d8 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h
@@ -101,7 +101,7 @@
bool InitializeAppend(android::base::borrowed_fd fd, uint64_t label);
void InitializeMerge(android::base::borrowed_fd fd, CowHeader* header);
- bool CommitMerge(int merged_ops, bool sync);
+ bool CommitMerge(int merged_ops);
bool Finalize() override;
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_kernel.h b/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_kernel.h
index 7941e68..2b6c8ef 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_kernel.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_kernel.h
@@ -47,8 +47,8 @@
static constexpr uint32_t CHUNK_SIZE = 8;
static constexpr uint32_t CHUNK_SHIFT = (__builtin_ffs(CHUNK_SIZE) - 1);
-static constexpr uint32_t BLOCK_SIZE = 4096;
-static constexpr uint32_t BLOCK_SHIFT = (__builtin_ffs(BLOCK_SIZE) - 1);
+static constexpr uint32_t BLOCK_SZ = 4096;
+static constexpr uint32_t BLOCK_SHIFT = (__builtin_ffs(BLOCK_SZ) - 1);
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
diff --git a/fs_mgr/libsnapshot/snapshot.cpp b/fs_mgr/libsnapshot/snapshot.cpp
index 90225f8..eb3a501 100644
--- a/fs_mgr/libsnapshot/snapshot.cpp
+++ b/fs_mgr/libsnapshot/snapshot.cpp
@@ -1234,6 +1234,25 @@
return true;
}
+static bool DeleteDmDevice(const std::string& name, const std::chrono::milliseconds& timeout_ms) {
+ auto start = std::chrono::steady_clock::now();
+ auto& dm = DeviceMapper::Instance();
+ while (true) {
+ if (dm.DeleteDeviceIfExists(name)) {
+ break;
+ }
+ auto now = std::chrono::steady_clock::now();
+ auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(now - start);
+ if (elapsed >= timeout_ms) {
+ LOG(ERROR) << "DeleteDevice timeout: " << name;
+ return false;
+ }
+ std::this_thread::sleep_for(250ms);
+ }
+
+ return true;
+}
+
bool SnapshotManager::CollapseSnapshotDevice(const std::string& name,
const SnapshotStatus& status) {
auto& dm = DeviceMapper::Instance();
@@ -1292,10 +1311,11 @@
if (!dm.DeleteDeviceIfExists(base_name)) {
LOG(ERROR) << "Unable to delete base device for snapshot: " << base_name;
}
- auto source_name = GetSourceDeviceName(name);
- if (!dm.DeleteDeviceIfExists(source_name)) {
- LOG(ERROR) << "Unable to delete source device for snapshot: " << source_name;
+
+ if (!DeleteDmDevice(GetSourceDeviceName(name), 4000ms)) {
+ LOG(ERROR) << "Unable to delete source device for snapshot: " << GetSourceDeviceName(name);
}
+
return true;
}
@@ -1387,9 +1407,6 @@
}
auto misc_name = user_cow_name;
- if (transition == InitTransition::SELINUX_DETACH) {
- misc_name += "-selinux";
- }
DmTable table;
table.Emplace<DmTargetUser>(0, target.spec.length, misc_name);
@@ -1422,6 +1439,7 @@
// Wait for ueventd to acknowledge and create the control device node.
std::string control_device = "/dev/dm-user/" + misc_name;
if (!WaitForDevice(control_device, 10s)) {
+ LOG(ERROR) << "dm-user control device no found: " << misc_name;
continue;
}
@@ -2122,15 +2140,12 @@
CHECK(lock);
if (!EnsureImageManager()) return false;
- auto& dm = DeviceMapper::Instance();
-
if (UpdateUsesCompression(lock) && !UnmapDmUserDevice(name)) {
return false;
}
- auto cow_name = GetCowName(name);
- if (!dm.DeleteDeviceIfExists(cow_name)) {
- LOG(ERROR) << "Cannot unmap " << cow_name;
+ if (!DeleteDmDevice(GetCowName(name), 4000ms)) {
+ LOG(ERROR) << "Cannot unmap: " << GetCowName(name);
return false;
}
@@ -2155,12 +2170,11 @@
return false;
}
- if (!EnsureSnapuserdConnected()) {
- return false;
- }
- if (!snapuserd_client_->WaitForDeviceDelete(dm_user_name)) {
- LOG(ERROR) << "Failed to wait for " << dm_user_name << " control device to delete";
- return false;
+ if (EnsureSnapuserdConnected()) {
+ if (!snapuserd_client_->WaitForDeviceDelete(dm_user_name)) {
+ LOG(ERROR) << "Failed to wait for " << dm_user_name << " control device to delete";
+ return false;
+ }
}
// Ensure the control device is gone so we don't run into ABA problems.
diff --git a/fs_mgr/libsnapshot/snapuserd.cpp b/fs_mgr/libsnapshot/snapuserd.cpp
index 82db0d3..d620300 100644
--- a/fs_mgr/libsnapshot/snapuserd.cpp
+++ b/fs_mgr/libsnapshot/snapuserd.cpp
@@ -17,6 +17,8 @@
#include "snapuserd.h"
#include <csignal>
+#include <optional>
+#include <set>
#include <libsnapshot/snapuserd_client.h>
@@ -32,7 +34,7 @@
static constexpr size_t PAYLOAD_SIZE = (1UL << 20);
-static_assert(PAYLOAD_SIZE >= BLOCK_SIZE);
+static_assert(PAYLOAD_SIZE >= BLOCK_SZ);
void BufferSink::Initialize(size_t size) {
buffer_size_ = size;
@@ -78,10 +80,10 @@
// request will always be 4k. After constructing
// the header, zero out the remaining block.
void Snapuserd::ConstructKernelCowHeader() {
- void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SIZE);
+ void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SZ);
CHECK(buffer != nullptr);
- memset(buffer, 0, BLOCK_SIZE);
+ memset(buffer, 0, BLOCK_SZ);
struct disk_header* dh = reinterpret_cast<struct disk_header*>(buffer);
@@ -106,13 +108,13 @@
// Start the copy operation. This will read the backing
// block device which is represented by cow_op->source.
bool Snapuserd::ProcessCopyOp(const CowOperation* cow_op) {
- void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SIZE);
+ void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SZ);
CHECK(buffer != nullptr);
// Issue a single 4K IO. However, this can be optimized
// if the successive blocks are contiguous.
- if (!android::base::ReadFullyAtOffset(backing_store_fd_, buffer, BLOCK_SIZE,
- cow_op->source * BLOCK_SIZE)) {
+ if (!android::base::ReadFullyAtOffset(backing_store_fd_, buffer, BLOCK_SZ,
+ cow_op->source * BLOCK_SZ)) {
SNAP_PLOG(ERROR) << "Copy-op failed. Read from backing store: " << backing_store_device_
<< "at block :" << cow_op->source;
return false;
@@ -123,10 +125,10 @@
bool Snapuserd::ProcessZeroOp() {
// Zero out the entire block
- void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SIZE);
+ void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SZ);
CHECK(buffer != nullptr);
- memset(buffer, 0, BLOCK_SIZE);
+ memset(buffer, 0, BLOCK_SZ);
return true;
}
@@ -173,11 +175,11 @@
struct dm_user_message* msg = (struct dm_user_message*)(&(buffer[0]));
memmove(msg->payload.buf, (char*)msg->payload.buf + skip_sector_size,
- (BLOCK_SIZE - skip_sector_size));
+ (BLOCK_SZ - skip_sector_size));
}
bufsink_.ResetBufferOffset();
- return std::min(size, (BLOCK_SIZE - skip_sector_size));
+ return std::min(size, (BLOCK_SZ - skip_sector_size));
}
/*
@@ -234,7 +236,7 @@
return ReadUnalignedSector(sector, size, it);
}
- int num_ops = DIV_ROUND_UP(size, BLOCK_SIZE);
+ int num_ops = DIV_ROUND_UP(size, BLOCK_SZ);
while (num_ops) {
if (!ProcessCowOp(it->second)) {
return -1;
@@ -242,7 +244,7 @@
num_ops -= 1;
it++;
// Update the buffer offset
- bufsink_.UpdateBufferOffset(BLOCK_SIZE);
+ bufsink_.UpdateBufferOffset(BLOCK_SZ);
SNAP_LOG(DEBUG) << "ReadData at sector: " << sector << " size: " << size;
}
@@ -344,7 +346,7 @@
}
int Snapuserd::GetNumberOfMergedOps(void* merged_buffer, void* unmerged_buffer, loff_t offset,
- int unmerged_exceptions, bool* copy_op) {
+ int unmerged_exceptions) {
int merged_ops_cur_iter = 0;
// Find the operations which are merged in this cycle.
@@ -362,10 +364,8 @@
offset += sizeof(struct disk_exception);
const CowOperation* cow_op = chunk_map_[ChunkToSector(cow_de->new_chunk)];
CHECK(cow_op != nullptr);
+
CHECK(cow_op->new_block == cow_de->old_chunk);
- if (cow_op->type == kCowCopyOp) {
- *copy_op = true;
- }
// zero out to indicate that operation is merged.
cow_de->old_chunk = 0;
cow_de->new_chunk = 0;
@@ -389,10 +389,6 @@
return -1;
}
}
-
- if (*copy_op) {
- CHECK(merged_ops_cur_iter == 1);
- }
return merged_ops_cur_iter;
}
@@ -414,47 +410,21 @@
int unmerged_exceptions = 0;
loff_t offset = GetMergeStartOffset(buffer, vec_[divresult.quot].get(), &unmerged_exceptions);
- bool copy_op = false;
- // Check if the merged operation is a copy operation. If so, then we need
- // to explicitly sync the metadata before initiating the next merge.
- // For ex: Consider a following sequence of copy operations in the COW file:
- //
- // Op-1: Copy 2 -> 3
- // Op-2: Copy 1 -> 2
- // Op-3: Copy 5 -> 10
- //
- // Op-1 and Op-2 are overlapping copy operations. The merge sequence will
- // look like:
- //
- // Merge op-1: Copy 2 -> 3
- // Merge op-2: Copy 1 -> 2
- // Merge op-3: Copy 5 -> 10
- //
- // Now, let's say we have a crash _after_ Merge op-2; Block 2 contents would
- // have been over-written by Block-1 after merge op-2. During next reboot,
- // kernel will request the metadata for all the un-merged blocks. If we had
- // not sync the metadata after Merge-op 1 and Merge op-2, snapuser daemon
- // will think that these merge operations are still pending and hence will
- // inform the kernel that Op-1 and Op-2 are un-merged blocks. When kernel
- // resumes back the merging process, it will attempt to redo the Merge op-1
- // once again. However, block 2 contents are wrong as it has the contents
- // of block 1 from previous merge cycle. Although, merge will silently succeed,
- // this will lead to silent data corruption.
- //
- int merged_ops_cur_iter = GetNumberOfMergedOps(buffer, vec_[divresult.quot].get(), offset,
- unmerged_exceptions, ©_op);
+ int merged_ops_cur_iter =
+ GetNumberOfMergedOps(buffer, vec_[divresult.quot].get(), offset, unmerged_exceptions);
// There should be at least one operation merged in this cycle
CHECK(merged_ops_cur_iter > 0);
header.num_merge_ops += merged_ops_cur_iter;
reader_->UpdateMergeProgress(merged_ops_cur_iter);
- if (!writer_->CommitMerge(merged_ops_cur_iter, copy_op)) {
+ if (!writer_->CommitMerge(merged_ops_cur_iter)) {
SNAP_LOG(ERROR) << "CommitMerge failed... merged_ops_cur_iter: " << merged_ops_cur_iter;
return false;
}
SNAP_LOG(DEBUG) << "Merge success: " << merged_ops_cur_iter << "chunk: " << chunk;
+ merge_initiated_ = true;
return true;
}
@@ -476,6 +446,19 @@
return next_chunk;
}
+void Snapuserd::CheckMergeCompletionStatus() {
+ CowHeader header;
+
+ if (merge_initiated_) {
+ reader_->GetHeader(&header);
+ SNAP_LOG(INFO) << "Merge-status: Total-Merged-ops: " << header.num_merge_ops
+ << " Total-data-ops: " << reader_->total_data_ops();
+ } else {
+ SNAP_LOG(INFO) << "Merge was not initiated. Total-Merged-ops: " << header.num_merge_ops
+ << " Total-data-ops: " << reader_->total_data_ops();
+ }
+}
+
/*
* Read the metadata from COW device and
* construct the metadata as required by the kernel.
@@ -506,12 +489,13 @@
* during merge; specifically when the merge operation has dependency.
* These dependencies can only happen during copy operations.
*
- * To avoid this problem, we make sure that no two copy-operations
- * do not have contiguous chunk IDs. Additionally, we make sure
- * that each copy operation is merged individually.
+ * To avoid this problem, we make sure overlap copy operations
+ * are not batch merged.
* 6: Use a monotonically increasing chunk number to assign the
* new_chunk
- * 7: Each chunk-id represents either a: Metadata page or b: Data page
+ * 7: Each chunk-id represents either
+ * a: Metadata page or
+ * b: Data page
* 8: Chunk-id representing a data page is stored in a map.
* 9: Chunk-id representing a metadata page is converted into a vector
* index. We store this in vector as kernel requests metadata during
@@ -531,10 +515,10 @@
reader_ = std::make_unique<CowReader>();
CowHeader header;
CowOptions options;
- bool prev_copy_op = false;
bool metadata_found = false;
+ int replace_ops = 0, zero_ops = 0, copy_ops = 0;
- SNAP_LOG(DEBUG) << "ReadMetadata Start...";
+ SNAP_LOG(DEBUG) << "ReadMetadata: Parsing cow file";
if (!reader_->Parse(cow_fd_)) {
SNAP_LOG(ERROR) << "Failed to parse";
@@ -546,10 +530,10 @@
return false;
}
- CHECK(header.block_size == BLOCK_SIZE);
+ CHECK(header.block_size == BLOCK_SZ);
- SNAP_LOG(DEBUG) << "Merge-ops: " << header.num_merge_ops;
reader_->InitializeMerge();
+ SNAP_LOG(DEBUG) << "Merge-ops: " << header.num_merge_ops;
writer_ = std::make_unique<CowWriter>(options);
writer_->InitializeMerge(cow_fd_.get(), &header);
@@ -584,17 +568,26 @@
}
metadata_found = true;
- if ((cow_op->type == kCowCopyOp || prev_copy_op)) {
+ // This loop will handle all the replace and zero ops.
+ // We will handle the copy ops later as it requires special
+ // handling of assigning chunk-id's. Furthermore, we make
+ // sure that replace/zero and copy ops are not batch merged; hence,
+ // the bump in the chunk_id before break of this loop
+ if (cow_op->type == kCowCopyOp) {
data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
+ break;
}
- prev_copy_op = (cow_op->type == kCowCopyOp);
+ if (cow_op->type == kCowReplaceOp) {
+ replace_ops++;
+ } else if (cow_op->type == kCowZeroOp) {
+ zero_ops++;
+ }
// Construct the disk-exception
de->old_chunk = cow_op->new_block;
de->new_chunk = data_chunk_id;
- SNAP_LOG(DEBUG) << "Old-chunk: " << de->old_chunk << "New-chunk: " << de->new_chunk;
// Store operation pointer.
chunk_map_[ChunkToSector(data_chunk_id)] = cow_op;
@@ -602,6 +595,9 @@
offset += sizeof(struct disk_exception);
cowop_riter_->Next();
+ SNAP_LOG(DEBUG) << num_ops << ":"
+ << " Old-chunk: " << de->old_chunk << " New-chunk: " << de->new_chunk;
+
if (num_ops == exceptions_per_area_) {
// Store it in vector at the right index. This maps the chunk-id to
// vector index.
@@ -616,13 +612,213 @@
if (cowop_riter_->Done()) {
vec_.push_back(std::move(de_ptr));
- SNAP_LOG(DEBUG) << "ReadMetadata() completed; Number of Areas: " << vec_.size();
}
}
data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
}
+ std::optional<chunk_t> prev_id = {};
+ std::map<uint64_t, const CowOperation*> map;
+ std::set<uint64_t> dest_blocks;
+ size_t pending_copy_ops = exceptions_per_area_ - num_ops;
+ SNAP_LOG(INFO) << " Processing copy-ops at Area: " << vec_.size()
+ << " Number of replace/zero ops completed in this area: " << num_ops
+ << " Pending copy ops for this area: " << pending_copy_ops;
+ while (!cowop_riter_->Done()) {
+ do {
+ const CowOperation* cow_op = &cowop_riter_->Get();
+ if (IsMetadataOp(*cow_op)) {
+ cowop_riter_->Next();
+ continue;
+ }
+
+ // We have two cases specific cases:
+ //
+ // =====================================================
+ // Case 1: Overlapping copy regions
+ //
+ // Ex:
+ //
+ // Source -> Destination
+ //
+ // 1: 15 -> 18
+ // 2: 16 -> 19
+ // 3: 17 -> 20
+ // 4: 18 -> 21
+ // 5: 19 -> 22
+ // 6: 20 -> 23
+ //
+ // We have 6 copy operations to be executed in OTA and there is a overlap. Update-engine
+ // will write to COW file as follows:
+ //
+ // Op-1: 20 -> 23
+ // Op-2: 19 -> 22
+ // Op-3: 18 -> 21
+ // Op-4: 17 -> 20
+ // Op-5: 16 -> 19
+ // Op-6: 15 -> 18
+ //
+ // Note that the blocks numbers are contiguous. Hence, all 6 copy
+ // operations can potentially be batch merged. However, that will be
+ // problematic if we have a crash as block 20, 19, 18 would have
+ // been overwritten and hence subsequent recovery may end up with
+ // a silent data corruption when op-1, op-2 and op-3 are
+ // re-executed.
+ //
+ // We will split these 6 operations into two batches viz:
+ //
+ // Batch-1:
+ // ===================
+ // Op-1: 20 -> 23
+ // Op-2: 19 -> 22
+ // Op-3: 18 -> 21
+ // ===================
+ //
+ // Batch-2:
+ // ==================
+ // Op-4: 17 -> 20
+ // Op-5: 16 -> 19
+ // Op-6: 15 -> 18
+ // ==================
+ //
+ // Now, merge sequence will look like:
+ //
+ // 1: Merge Batch-1 { op-1, op-2, op-3 }
+ // 2: Update Metadata in COW File that op-1, op-2, op-3 merge is
+ // done.
+ // 3: Merge Batch-2
+ // 4: Update Metadata in COW File that op-4, op-5, op-6 merge is
+ // done.
+ //
+ // Note, that the order of block operations are still the same.
+ // However, we have two batch merge operations. Any crash between
+ // either of this sequence should be safe as each of these
+ // batches are self-contained.
+ //
+ //===========================================================
+ //
+ // Case 2:
+ //
+ // Let's say we have three copy operations written to COW file
+ // in the following order:
+ //
+ // op-1: 15 -> 18
+ // op-2: 16 -> 19
+ // op-3: 17 -> 20
+ //
+ // As aforementioned, kernel will initiate merge in reverse order.
+ // Hence, we will read these ops in reverse order so that all these
+ // ops are exectued in the same order as requested. Thus, we will
+ // read the metadata in reverse order and for the kernel it will
+ // look like:
+ //
+ // op-3: 17 -> 20
+ // op-2: 16 -> 19
+ // op-1: 15 -> 18 <-- Merge starts here in the kernel
+ //
+ // Now, this is problematic as kernel cannot batch merge them.
+ //
+ // Merge sequence will look like:
+ //
+ // Merge-1: op-1: 15 -> 18
+ // Merge-2: op-2: 16 -> 19
+ // Merge-3: op-3: 17 -> 20
+ //
+ // We have three merge operations.
+ //
+ // Even though the blocks are contiguous, kernel can batch merge
+ // them if the blocks are in descending order. Update engine
+ // addresses this issue partially for overlapping operations as
+ // we see that op-1 to op-3 and op-4 to op-6 operatiosn are in
+ // descending order. However, if the copy operations are not
+ // overlapping, update engine cannot write these blocks
+ // in descending order. Hence, we will try to address it.
+ // Thus, we will send these blocks to the kernel and it will
+ // look like:
+ //
+ // op-3: 15 -> 18
+ // op-2: 16 -> 19
+ // op-1: 17 -> 20 <-- Merge starts here in the kernel
+ //
+ // Now with this change, we can batch merge all these three
+ // operations. Merge sequence will look like:
+ //
+ // Merge-1: {op-1: 17 -> 20, op-2: 16 -> 19, op-3: 15 -> 18}
+ //
+ // Note that we have changed the ordering of merge; However, this
+ // is ok as each of these copy operations are independent and there
+ // is no overlap.
+ //
+ //===================================================================
+ if (prev_id.has_value()) {
+ chunk_t diff = (cow_op->new_block > prev_id.value())
+ ? (cow_op->new_block - prev_id.value())
+ : (prev_id.value() - cow_op->new_block);
+ if (diff != 1) {
+ break;
+ }
+ if (dest_blocks.count(cow_op->new_block) || map.count(cow_op->source) > 0) {
+ break;
+ }
+ }
+ metadata_found = true;
+ pending_copy_ops -= 1;
+ map[cow_op->new_block] = cow_op;
+ dest_blocks.insert(cow_op->source);
+ prev_id = cow_op->new_block;
+ cowop_riter_->Next();
+ } while (!cowop_riter_->Done() && pending_copy_ops);
+
+ data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
+ SNAP_LOG(DEBUG) << "Batch Merge copy-ops of size: " << map.size()
+ << " Area: " << vec_.size() << " Area offset: " << offset
+ << " Pending-copy-ops in this area: " << pending_copy_ops;
+
+ for (auto it = map.begin(); it != map.end(); it++) {
+ struct disk_exception* de =
+ reinterpret_cast<struct disk_exception*>((char*)de_ptr.get() + offset);
+ de->old_chunk = it->first;
+ de->new_chunk = data_chunk_id;
+
+ // Store operation pointer.
+ chunk_map_[ChunkToSector(data_chunk_id)] = it->second;
+ offset += sizeof(struct disk_exception);
+ num_ops += 1;
+ copy_ops++;
+
+ SNAP_LOG(DEBUG) << num_ops << ":"
+ << " Copy-op: "
+ << " Old-chunk: " << de->old_chunk << " New-chunk: " << de->new_chunk;
+
+ if (num_ops == exceptions_per_area_) {
+ // Store it in vector at the right index. This maps the chunk-id to
+ // vector index.
+ vec_.push_back(std::move(de_ptr));
+ num_ops = 0;
+ offset = 0;
+
+ // Create buffer for next area
+ de_ptr = std::make_unique<uint8_t[]>(exceptions_per_area_ *
+ sizeof(struct disk_exception));
+ memset(de_ptr.get(), 0, (exceptions_per_area_ * sizeof(struct disk_exception)));
+
+ if (cowop_riter_->Done()) {
+ vec_.push_back(std::move(de_ptr));
+ SNAP_LOG(DEBUG) << "ReadMetadata() completed; Number of Areas: " << vec_.size();
+ }
+
+ CHECK(pending_copy_ops == 0);
+ pending_copy_ops = exceptions_per_area_;
+ }
+
+ data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
+ }
+ map.clear();
+ dest_blocks.clear();
+ prev_id.reset();
+ }
+
// Partially filled area or there is no metadata
// If there is no metadata, fill with zero so that kernel
// is aware that merge is completed.
@@ -632,12 +828,17 @@
<< "Areas : " << vec_.size();
}
- SNAP_LOG(DEBUG) << "ReadMetadata() completed. Final_chunk_id: " << data_chunk_id
- << "Num Sector: " << ChunkToSector(data_chunk_id);
+ SNAP_LOG(INFO) << "ReadMetadata completed. Final-chunk-id: " << data_chunk_id
+ << " Num Sector: " << ChunkToSector(data_chunk_id)
+ << " Replace-ops: " << replace_ops << " Zero-ops: " << zero_ops
+ << " Copy-ops: " << copy_ops << " Areas: " << vec_.size()
+ << " Num-ops-merged: " << header.num_merge_ops
+ << " Total-data-ops: " << reader_->total_data_ops();
// Total number of sectors required for creating dm-user device
num_sectors_ = ChunkToSector(data_chunk_id);
metadata_read_done_ = true;
+ merge_initiated_ = false;
return true;
}
@@ -742,7 +943,7 @@
size_t remaining_size = header->len;
size_t read_size = std::min(PAYLOAD_SIZE, remaining_size);
- CHECK(read_size == BLOCK_SIZE);
+ CHECK(read_size == BLOCK_SZ);
CHECK(header->sector > 0);
chunk_t chunk = SectorToChunk(header->sector);
@@ -793,11 +994,11 @@
// will always be a single 4k.
if (header->sector == 0) {
CHECK(metadata_read_done_ == true);
- CHECK(read_size == BLOCK_SIZE);
+ CHECK(read_size == BLOCK_SZ);
ConstructKernelCowHeader();
SNAP_LOG(DEBUG) << "Kernel header constructed";
} else {
- if (!offset && (read_size == BLOCK_SIZE) &&
+ if (!offset && (read_size == BLOCK_SZ) &&
chunk_map_.find(header->sector) == chunk_map_.end()) {
if (!ReadDiskExceptions(chunk, read_size)) {
SNAP_LOG(ERROR) << "ReadDiskExceptions failed for chunk id: " << chunk
diff --git a/fs_mgr/libsnapshot/snapuserd.h b/fs_mgr/libsnapshot/snapuserd.h
index c01fee3..518d08b 100644
--- a/fs_mgr/libsnapshot/snapuserd.h
+++ b/fs_mgr/libsnapshot/snapuserd.h
@@ -70,6 +70,14 @@
const std::string& GetMiscName() { return misc_name_; }
uint64_t GetNumSectors() { return num_sectors_; }
bool IsAttached() const { return ctrl_fd_ >= 0; }
+ void CheckMergeCompletionStatus();
+ void CloseFds() {
+ ctrl_fd_ = {};
+ cow_fd_ = {};
+ backing_store_fd_ = {};
+ }
+ size_t GetMetadataAreaSize() { return vec_.size(); }
+ void* GetExceptionBuffer(size_t i) { return vec_[i].get(); }
private:
bool DmuserReadRequest();
@@ -96,11 +104,11 @@
loff_t GetMergeStartOffset(void* merged_buffer, void* unmerged_buffer,
int* unmerged_exceptions);
int GetNumberOfMergedOps(void* merged_buffer, void* unmerged_buffer, loff_t offset,
- int unmerged_exceptions, bool* copy_op);
+ int unmerged_exceptions);
bool ProcessMergeComplete(chunk_t chunk, void* buffer);
sector_t ChunkToSector(chunk_t chunk) { return chunk << CHUNK_SHIFT; }
chunk_t SectorToChunk(sector_t sector) { return sector >> CHUNK_SHIFT; }
- bool IsBlockAligned(int read_size) { return ((read_size & (BLOCK_SIZE - 1)) == 0); }
+ bool IsBlockAligned(int read_size) { return ((read_size & (BLOCK_SZ - 1)) == 0); }
std::string cow_device_;
std::string backing_store_device_;
@@ -134,6 +142,7 @@
std::map<sector_t, const CowOperation*> chunk_map_;
bool metadata_read_done_ = false;
+ bool merge_initiated_ = false;
BufferSink bufsink_;
};
diff --git a/fs_mgr/libsnapshot/snapuserd_server.cpp b/fs_mgr/libsnapshot/snapuserd_server.cpp
index 38abaec..017de3b 100644
--- a/fs_mgr/libsnapshot/snapuserd_server.cpp
+++ b/fs_mgr/libsnapshot/snapuserd_server.cpp
@@ -210,8 +210,11 @@
}
}
+ handler->snapuserd()->CloseFds();
+
auto misc_name = handler->misc_name();
LOG(INFO) << "Handler thread about to exit: " << misc_name;
+ handler->snapuserd()->CheckMergeCompletionStatus();
{
std::lock_guard<std::mutex> lock(lock_);
diff --git a/fs_mgr/tests/adb-remount-test.sh b/fs_mgr/tests/adb-remount-test.sh
index ec73253..242fa93 100755
--- a/fs_mgr/tests/adb-remount-test.sh
+++ b/fs_mgr/tests/adb-remount-test.sh
@@ -881,10 +881,10 @@
[ -z "${D}" -o -n "${ANDROID_SERIAL}" ] || ANDROID_SERIAL=${D}
USB_SERIAL=
if [ -n "${ANDROID_SERIAL}" -a "Darwin" != "${HOSTOS}" ]; then
- USB_SERIAL="`find /sys/devices -name serial | grep usb`"
+ USB_SERIAL="`find /sys/devices -name serial | grep usb || true`"
if [ -n "${USB_SERIAL}" ]; then
USB_SERIAL=`echo "${USB_SERIAL}" |
- xargs grep -l ${ANDROID_SERIAL}`
+ xargs grep -l ${ANDROID_SERIAL} || true`
fi
fi
USB_ADDRESS=
@@ -1315,7 +1315,7 @@
[ -n "${VENDOR_DEVT%[0-9a-fA-F][0-9a-fA-F]}" ] ||
echo "${YELLOW}[ WARNING ]${NORMAL} vendor devt ${VENDOR_DEVT} major 0" >&2
-# Download libc.so, append some gargage, push back, and check if the file
+# Download libc.so, append some garbage, push back, and check if the file
# is updated.
tempdir="`mktemp -d`"
cleanup() {
@@ -1323,8 +1323,8 @@
}
adb pull /system/lib/bootstrap/libc.so ${tempdir} >/dev/null ||
die "pull libc.so from device"
-garbage="`hexdump -n 16 -e '4/4 "%08X" 1 "\n"' /dev/random`"
-echo ${garbage} >> ${tempdir}/libc.so
+garbage="D105225BBFCB1EB8AB8EBDB7094646F0"
+echo "${garbage}" >> ${tempdir}/libc.so
adb push ${tempdir}/libc.so /system/lib/bootstrap/libc.so >/dev/null ||
die "push libc.so to device"
adb pull /system/lib/bootstrap/libc.so ${tempdir}/libc.so.fromdevice >/dev/null ||
diff --git a/gatekeeperd/gatekeeperd.cpp b/gatekeeperd/gatekeeperd.cpp
index 781b4af..f9c0cdd 100644
--- a/gatekeeperd/gatekeeperd.cpp
+++ b/gatekeeperd/gatekeeperd.cpp
@@ -318,6 +318,7 @@
authToken.timestamp.milliSeconds = betoh64(hwAuthToken->timestamp);
authToken.challenge = hwAuthToken->challenge;
+ authToken.userId = hwAuthToken->user_id;
authToken.authenticatorId = hwAuthToken->authenticator_id;
authToken.authenticatorType = static_cast<HardwareAuthenticatorType>(
betoh32(hwAuthToken->authenticator_type));
diff --git a/healthd/BatteryMonitor.cpp b/healthd/BatteryMonitor.cpp
index fd810cb..377acb7 100644
--- a/healthd/BatteryMonitor.cpp
+++ b/healthd/BatteryMonitor.cpp
@@ -349,9 +349,14 @@
}
void BatteryMonitor::logValues(void) {
+ logValues(*mHealthInfo, *mHealthdConfig);
+}
+
+void BatteryMonitor::logValues(const android::hardware::health::V2_1::HealthInfo& health_info,
+ const struct healthd_config& healthd_config) {
char dmesgline[256];
size_t len;
- const HealthInfo_1_0& props = mHealthInfo->legacy.legacy;
+ const HealthInfo_1_0& props = health_info.legacy.legacy;
if (props.batteryPresent) {
snprintf(dmesgline, sizeof(dmesgline), "battery l=%d v=%d t=%s%d.%d h=%d st=%d",
props.batteryLevel, props.batteryVoltage, props.batteryTemperature < 0 ? "-" : "",
@@ -359,17 +364,17 @@
props.batteryHealth, props.batteryStatus);
len = strlen(dmesgline);
- if (!mHealthdConfig->batteryCurrentNowPath.isEmpty()) {
+ if (!healthd_config.batteryCurrentNowPath.isEmpty()) {
len += snprintf(dmesgline + len, sizeof(dmesgline) - len, " c=%d",
props.batteryCurrent);
}
- if (!mHealthdConfig->batteryFullChargePath.isEmpty()) {
+ if (!healthd_config.batteryFullChargePath.isEmpty()) {
len += snprintf(dmesgline + len, sizeof(dmesgline) - len, " fc=%d",
props.batteryFullCharge);
}
- if (!mHealthdConfig->batteryCycleCountPath.isEmpty()) {
+ if (!healthd_config.batteryCycleCountPath.isEmpty()) {
len += snprintf(dmesgline + len, sizeof(dmesgline) - len, " cc=%d",
props.batteryCycleCount);
}
diff --git a/healthd/include/healthd/BatteryMonitor.h b/healthd/include/healthd/BatteryMonitor.h
index fadb5a5..3cda727 100644
--- a/healthd/include/healthd/BatteryMonitor.h
+++ b/healthd/include/healthd/BatteryMonitor.h
@@ -66,6 +66,9 @@
void logValues(void);
bool isChargerOnline();
+ static void logValues(const android::hardware::health::V2_1::HealthInfo& health_info,
+ const struct healthd_config& healthd_config);
+
private:
struct healthd_config *mHealthdConfig;
Vector<String8> mChargerNames;
diff --git a/init/Android.bp b/init/Android.bp
index b0a59b1..5da8e36 100644
--- a/init/Android.bp
+++ b/init/Android.bp
@@ -235,6 +235,119 @@
visibility: ["//packages/modules/Virtualization/microdroid"],
}
+// This currently is only for the VM usecase.
+// TODO(jiyong): replace init_first_stage in Android.mk with this
+cc_binary {
+ name: "init_first_stage_soong",
+ stem: "init_vendor",
+
+ srcs: [
+ "block_dev_initializer.cpp",
+ "devices.cpp",
+ "first_stage_console.cpp",
+ "first_stage_init.cpp",
+ "first_stage_main.cpp",
+ "first_stage_mount.cpp",
+ "reboot_utils.cpp",
+ "selabel.cpp",
+ "selinux.cpp",
+ "service_utils.cpp",
+ "snapuserd_transition.cpp",
+ "switch_root.cpp",
+ "uevent_listener.cpp",
+ "util.cpp",
+ ],
+
+ static_libs: [
+ "libc++fs",
+ "libfs_avb",
+ "libfs_mgr",
+ "libfec",
+ "libfec_rs",
+ "libsquashfs_utils",
+ "liblogwrap",
+ "libext4_utils",
+ "libcrypto_utils",
+ "libsparse",
+ "libavb",
+ "libkeyutils",
+ "liblp",
+ "libcutils",
+ "libbase",
+ "liblog",
+ "libcrypto_static",
+ "libdl",
+ "libz",
+ "libselinux",
+ "libcap",
+ "libgsi",
+ "libcom.android.sysprop.apex",
+ "liblzma",
+ "libunwindstack_no_dex",
+ "libbacktrace_no_dex",
+ "libmodprobe",
+ "libext2_uuid",
+ "libprotobuf-cpp-lite",
+ "libsnapshot_cow",
+ "libsnapshot_init",
+ "update_metadata-protos",
+ ],
+
+ static_executable: true,
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Wno-unused-parameter",
+ "-Werror",
+ "-DALLOW_FIRST_STAGE_CONSOLE=0",
+ "-DALLOW_LOCAL_PROP_OVERRIDE=0",
+ "-DALLOW_PERMISSIVE_SELINUX=0",
+ "-DREBOOT_BOOTLOADER_ON_PANIC=0",
+ "-DWORLD_WRITABLE_KMSG=0",
+ "-DDUMP_ON_UMOUNT_FAILURE=0",
+ "-DSHUTDOWN_ZERO_TIMEOUT=0",
+ "-DLOG_UEVENTS=0",
+ "-DSEPOLICY_VERSION=30", // TODO(jiyong): externalize the version number
+ ],
+
+ product_variables: {
+ debuggable: {
+ cflags: [
+ "-UALLOW_FIRST_STAGE_CONSOLE",
+ "-DALLOW_FIRST_STAGE_CONSOLE=1",
+
+ "-UALLOW_LOCAL_PROP_OVERRIDE",
+ "-DALLOW_LOCAL_PROP_OVERRIDE=1",
+
+ "-UALLOW_PERMISSIVE_SELINUX",
+ "-DALLOW_PERMISSIVE_SELINUX=1",
+
+ "-UREBOOT_BOOTLOADER_ON_PANIC",
+ "-DREBOOT_BOOTLOADER_ON_PANIC=1",
+
+ "-UWORLD_WRITABLE_KMSG",
+ "-DWORLD_WRITABLE_KMSG=1",
+
+ "-UDUMP_ON_UMOUNT_FAILURE",
+ "-DDUMP_ON_UMOUNT_FAILURE=1",
+ ],
+ },
+
+ eng: {
+ cflags: [
+ "-USHUTDOWN_ZERO_TIMEOUT",
+ "-DSHUTDOWN_ZERO_TIMEOUT=1",
+ ],
+ },
+ },
+
+ sanitize: {
+ misc_undefined: ["signed-integer-overflow"],
+ hwaddress: false,
+ },
+}
+
// Tests
// ------------------------------------------------------------------------------
diff --git a/init/init.cpp b/init/init.cpp
index 0c752a9..b08037a 100644
--- a/init/init.cpp
+++ b/init/init.cpp
@@ -723,37 +723,6 @@
}
}
-static Result<void> TransitionSnapuserdAction(const BuiltinArguments&) {
- if (!SnapshotManager::IsSnapshotManagerNeeded() ||
- !android::base::GetBoolProperty(android::snapshot::kVirtualAbCompressionProp, false)) {
- return {};
- }
-
- auto sm = SnapshotManager::New();
- if (!sm) {
- LOG(FATAL) << "Failed to create SnapshotManager, will not transition snapuserd";
- return {};
- }
-
- ServiceList& service_list = ServiceList::GetInstance();
- auto svc = service_list.FindService("snapuserd");
- if (!svc) {
- LOG(FATAL) << "Failed to find snapuserd service, aborting transition";
- return {};
- }
- svc->Start();
- svc->SetShutdownCritical();
-
- if (!sm->PerformSecondStageInitTransition()) {
- LOG(FATAL) << "Failed to transition snapuserd to second-stage";
- }
-
- if (auto pid = GetSnapuserdFirstStagePid()) {
- KillFirstStageSnapuserd(pid.value());
- }
- return {};
-}
-
int SecondStageMain(int argc, char** argv) {
if (REBOOT_BOOTLOADER_ON_PANIC) {
InstallRebootSignalHandlers();
@@ -900,7 +869,6 @@
// Queue an action that waits for coldboot done so we know ueventd has set up all of /dev...
am.QueueBuiltinAction(wait_for_coldboot_done_action, "wait_for_coldboot_done");
- am.QueueBuiltinAction(TransitionSnapuserdAction, "TransitionSnapuserd");
// ... so that we can start queuing up actions that require stuff from /dev.
am.QueueBuiltinAction(SetMmapRndBitsAction, "SetMmapRndBits");
Keychords keychords;
diff --git a/libcutils/trace-dev.inc b/libcutils/trace-dev.inc
index 821f1ea..3b459e0 100644
--- a/libcutils/trace-dev.inc
+++ b/libcutils/trace-dev.inc
@@ -21,6 +21,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <fnmatch.h>
#include <limits.h>
#include <pthread.h>
#include <stdatomic.h>
@@ -106,7 +107,7 @@
for (int i = 0; i < count; i++) {
snprintf(buf, sizeof(buf), "debug.atrace.app_%d", i);
property_get(buf, value, "");
- if (strcmp(value, "*") == 0 || strcmp(value, cmdline) == 0) {
+ if (fnmatch(value, cmdline, FNM_NOESCAPE) == 0) {
return true;
}
}
diff --git a/libprocessgroup/include/processgroup/processgroup.h b/libprocessgroup/include/processgroup/processgroup.h
index 1cadc9f..1aaed7b 100644
--- a/libprocessgroup/include/processgroup/processgroup.h
+++ b/libprocessgroup/include/processgroup/processgroup.h
@@ -36,7 +36,8 @@
static constexpr const char* CGROUPS_RC_PATH = "/dev/cgroup_info/cgroup.rc";
// Path to test against for freezer support
-static constexpr const char* CGROUP_FREEZE_PATH = "/sys/fs/cgroup/freezer/cgroup.freeze";
+// TODO: remove and replace with a function call, see http://b/180056337
+static constexpr const char* CGROUP_FREEZE_PATH = "/sys/fs/cgroup/uid_0/cgroup.freeze";
bool UsePerAppMemcg();
diff --git a/libprocessgroup/processgroup.cpp b/libprocessgroup/processgroup.cpp
index d669ebe..209ccd9 100644
--- a/libprocessgroup/processgroup.cpp
+++ b/libprocessgroup/processgroup.cpp
@@ -131,13 +131,25 @@
return StringPrintf("%s/uid_%d/pid_%d", cgroup, uid, pid);
}
-static int RemoveProcessGroup(const char* cgroup, uid_t uid, int pid) {
- int ret;
-
+static int RemoveProcessGroup(const char* cgroup, uid_t uid, int pid, unsigned int retries) {
+ int ret = 0;
auto uid_pid_path = ConvertUidPidToPath(cgroup, uid, pid);
- ret = rmdir(uid_pid_path.c_str());
-
auto uid_path = ConvertUidToPath(cgroup, uid);
+
+ if (retries == 0) {
+ retries = 1;
+ }
+
+ while (retries--) {
+ ret = rmdir(uid_pid_path.c_str());
+ if (!ret || errno != EBUSY) break;
+ std::this_thread::sleep_for(5ms);
+ }
+
+ // With the exception of boot or shutdown, system uid_ folders are always populated. Spinning
+ // here would needlessly delay most pid removals. Additionally, once empty a uid_ cgroup won't
+ // have processes hanging on it (we've already spun for all its pid_), so there's no need to
+ // spin anyway.
rmdir(uid_path.c_str());
return ret;
@@ -176,7 +188,7 @@
std::vector<std::string> cgroups;
std::string path;
- if (CgroupGetControllerPath("cpuacct", &path)) {
+ if (CgroupGetControllerPath(CGROUPV2_CONTROLLER_NAME, &path)) {
cgroups.push_back(path);
}
if (CgroupGetControllerPath("memory", &path)) {
@@ -212,19 +224,49 @@
}
}
+/**
+ * Process groups are primarily created by the Zygote, meaning that uid/pid groups are created by
+ * the user root. Ownership for the newly created cgroup and all of its files must thus be
+ * transferred for the user/group passed as uid/gid before system_server can properly access them.
+ */
static bool MkdirAndChown(const std::string& path, mode_t mode, uid_t uid, gid_t gid) {
if (mkdir(path.c_str(), mode) == -1 && errno != EEXIST) {
return false;
}
- if (chown(path.c_str(), uid, gid) == -1) {
- int saved_errno = errno;
- rmdir(path.c_str());
- errno = saved_errno;
- return false;
+ auto dir = std::unique_ptr<DIR, decltype(&closedir)>(opendir(path.c_str()), closedir);
+
+ if (dir == NULL) {
+ PLOG(ERROR) << "opendir failed for " << path;
+ goto err;
+ }
+
+ struct dirent* dir_entry;
+ while ((dir_entry = readdir(dir.get()))) {
+ if (!strcmp("..", dir_entry->d_name)) {
+ continue;
+ }
+
+ std::string file_path = path + "/" + dir_entry->d_name;
+
+ if (lchown(file_path.c_str(), uid, gid) < 0) {
+ PLOG(ERROR) << "lchown failed for " << file_path;
+ goto err;
+ }
+
+ if (fchmodat(AT_FDCWD, file_path.c_str(), mode, AT_SYMLINK_NOFOLLOW) != 0) {
+ PLOG(ERROR) << "fchmodat failed for " << file_path;
+ goto err;
+ }
}
return true;
+err:
+ int saved_errno = errno;
+ rmdir(path.c_str());
+ errno = saved_errno;
+
+ return false;
}
// Returns number of processes killed on success
@@ -302,17 +344,9 @@
static int KillProcessGroup(uid_t uid, int initialPid, int signal, int retries,
int* max_processes) {
- std::string cpuacct_path;
- std::string memory_path;
-
- CgroupGetControllerPath("cpuacct", &cpuacct_path);
- CgroupGetControllerPath("memory", &memory_path);
- memory_path += "/apps";
-
- const char* cgroup =
- (!access(ConvertUidPidToPath(cpuacct_path.c_str(), uid, initialPid).c_str(), F_OK))
- ? cpuacct_path.c_str()
- : memory_path.c_str();
+ std::string hierarchy_root_path;
+ CgroupGetControllerPath(CGROUPV2_CONTROLLER_NAME, &hierarchy_root_path);
+ const char* cgroup = hierarchy_root_path.c_str();
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
@@ -355,7 +389,17 @@
LOG(INFO) << "Successfully killed process cgroup uid " << uid << " pid " << initialPid
<< " in " << static_cast<int>(ms) << "ms";
}
- return RemoveProcessGroup(cgroup, uid, initialPid);
+
+ int err = RemoveProcessGroup(cgroup, uid, initialPid, retries);
+
+ if (isMemoryCgroupSupported() && UsePerAppMemcg()) {
+ std::string memory_path;
+ CgroupGetControllerPath("memory", &memory_path);
+ memory_path += "/apps";
+ if (RemoveProcessGroup(memory_path.c_str(), uid, initialPid, retries)) return -1;
+ }
+
+ return err;
} else {
if (retries > 0) {
LOG(ERROR) << "Failed to kill process cgroup uid " << uid << " pid " << initialPid
@@ -374,25 +418,30 @@
return KillProcessGroup(uid, initialPid, signal, 0 /*retries*/, max_processes);
}
-int createProcessGroup(uid_t uid, int initialPid, bool memControl) {
- std::string cgroup;
- if (isMemoryCgroupSupported() && (memControl || UsePerAppMemcg())) {
- CgroupGetControllerPath("memory", &cgroup);
- cgroup += "/apps";
- } else {
- CgroupGetControllerPath("cpuacct", &cgroup);
- }
-
+static int createProcessGroupInternal(uid_t uid, int initialPid, std::string cgroup) {
auto uid_path = ConvertUidToPath(cgroup.c_str(), uid);
- if (!MkdirAndChown(uid_path, 0750, AID_SYSTEM, AID_SYSTEM)) {
+ struct stat cgroup_stat;
+ mode_t cgroup_mode = 0750;
+ gid_t cgroup_uid = AID_SYSTEM;
+ uid_t cgroup_gid = AID_SYSTEM;
+
+ if (stat(cgroup.c_str(), &cgroup_stat) == 1) {
+ PLOG(ERROR) << "Failed to get stats for " << cgroup;
+ } else {
+ cgroup_mode = cgroup_stat.st_mode;
+ cgroup_uid = cgroup_stat.st_uid;
+ cgroup_gid = cgroup_stat.st_gid;
+ }
+
+ if (!MkdirAndChown(uid_path, cgroup_mode, cgroup_uid, cgroup_gid)) {
PLOG(ERROR) << "Failed to make and chown " << uid_path;
return -errno;
}
auto uid_pid_path = ConvertUidPidToPath(cgroup.c_str(), uid, initialPid);
- if (!MkdirAndChown(uid_pid_path, 0750, AID_SYSTEM, AID_SYSTEM)) {
+ if (!MkdirAndChown(uid_pid_path, cgroup_mode, cgroup_uid, cgroup_gid)) {
PLOG(ERROR) << "Failed to make and chown " << uid_pid_path;
return -errno;
}
@@ -408,6 +457,27 @@
return ret;
}
+int createProcessGroup(uid_t uid, int initialPid, bool memControl) {
+ std::string cgroup;
+
+ if (memControl && !UsePerAppMemcg()) {
+ PLOG(ERROR) << "service memory controls are used without per-process memory cgroup support";
+ return -EINVAL;
+ }
+
+ if (isMemoryCgroupSupported() && UsePerAppMemcg()) {
+ CgroupGetControllerPath("memory", &cgroup);
+ cgroup += "/apps";
+ int ret = createProcessGroupInternal(uid, initialPid, cgroup);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ CgroupGetControllerPath(CGROUPV2_CONTROLLER_NAME, &cgroup);
+ return createProcessGroupInternal(uid, initialPid, cgroup);
+}
+
static bool SetProcessGroupValue(int tid, const std::string& attr_name, int64_t value) {
if (!isMemoryCgroupSupported()) {
PLOG(ERROR) << "Memcg is not mounted.";
diff --git a/libprocessgroup/profiles/cgroups.json b/libprocessgroup/profiles/cgroups.json
index 5b7a28a..7bcb94b 100644
--- a/libprocessgroup/profiles/cgroups.json
+++ b/libprocessgroup/profiles/cgroups.json
@@ -15,11 +15,6 @@
"GID": "system"
},
{
- "Controller": "cpuacct",
- "Path": "/acct",
- "Mode": "0555"
- },
- {
"Controller": "cpuset",
"Path": "/dev/cpuset",
"Mode": "0755",
diff --git a/libprocessgroup/profiles/cgroups.recovery.json b/libprocessgroup/profiles/cgroups.recovery.json
index f0bf5fd..e275252 100644
--- a/libprocessgroup/profiles/cgroups.recovery.json
+++ b/libprocessgroup/profiles/cgroups.recovery.json
@@ -1,9 +1,8 @@
{
- "Cgroups": [
- {
- "Controller": "cpuacct",
- "Path": "/acct",
- "Mode": "0555"
- }
- ]
+ "Cgroups2": {
+ "Path": "/sys/fs/cgroup",
+ "Mode": "0755",
+ "UID": "root",
+ "GID": "root"
+ }
}
diff --git a/rootdir/init.rc b/rootdir/init.rc
index 03af4f3..863cf6c 100644
--- a/rootdir/init.rc
+++ b/rootdir/init.rc
@@ -655,6 +655,7 @@
mkdir /data/apex 0755 root system encryption=None
mkdir /data/apex/active 0755 root system
mkdir /data/apex/backup 0700 root system
+ mkdir /data/apex/decompressed 0700 root system encryption=Require
mkdir /data/apex/hashtree 0700 root system
mkdir /data/apex/sessions 0700 root system
mkdir /data/app-staging 0751 system system encryption=DeleteIfNecessary
@@ -722,6 +723,8 @@
mkdir /data/misc/trace 0700 root root
# create location to store surface and window trace files
mkdir /data/misc/wmtrace 0700 system system
+ # create location to store accessibility trace files
+ mkdir /data/misc/a11ytrace 0700 system system
# profile file layout
mkdir /data/misc/profiles 0771 system system
mkdir /data/misc/profiles/cur 0771 system system
@@ -1169,6 +1172,8 @@
chmod 0773 /data/misc/trace
# Give reads to anyone for the window trace folder on debug builds.
chmod 0775 /data/misc/wmtrace
+ # Give reads to anyone for the accessibility trace folder on debug builds.
+ chmod 0775 /data/misc/a11ytrace
on init && property:ro.debuggable=1
start console
diff --git a/rootdir/ueventd.rc b/rootdir/ueventd.rc
index e9293b5..65e29c1 100644
--- a/rootdir/ueventd.rc
+++ b/rootdir/ueventd.rc
@@ -37,8 +37,6 @@
/dev/tty 0666 root root
/dev/random 0666 root root
/dev/urandom 0666 root root
-# Make HW RNG readable by group system to let EntropyMixer read it.
-/dev/hw_random 0440 root system
/dev/ashmem* 0666 root root
/dev/binder 0666 root root
/dev/hwbinder 0666 root root
diff --git a/trusty/confirmationui/Android.bp b/trusty/confirmationui/Android.bp
index 60e0e71..09d48ad 100644
--- a/trusty/confirmationui/Android.bp
+++ b/trusty/confirmationui/Android.bp
@@ -54,6 +54,7 @@
"android.hardware.confirmationui@1.0",
"android.hardware.keymaster@4.0",
"libbase",
+ "libdmabufheap",
"libhidlbase",
"libteeui_hal_support",
"libtrusty",
@@ -92,4 +93,4 @@
"-Werror",
"-DTEEUI_USE_STD_VECTOR",
],
-}
\ No newline at end of file
+}
diff --git a/trusty/confirmationui/TrustyApp.cpp b/trusty/confirmationui/TrustyApp.cpp
index e4c68f9..0e84b19 100644
--- a/trusty/confirmationui/TrustyApp.cpp
+++ b/trusty/confirmationui/TrustyApp.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -15,140 +15,155 @@
*/
#include "TrustyApp.h"
+#include "TrustyIpc.h"
+#include <BufferAllocator/BufferAllocator.h>
#include <android-base/logging.h>
+#include <sys/mman.h>
#include <sys/uio.h>
#include <trusty/tipc.h>
+#define countof(arr) (sizeof(arr) / sizeof(arr[0]))
+
namespace android {
namespace trusty {
-// 0x1000 is the message buffer size but we need to leave some space for a protocol header.
-// This assures that packets can always be read/written in one read/write operation.
-static constexpr const uint32_t kPacketSize = 0x1000 - 32;
+using ::android::base::unique_fd;
-enum class PacketType : uint32_t {
- SND,
- RCV,
- ACK,
-};
-
-struct PacketHeader {
- PacketType type;
- uint32_t remaining;
-};
-
-const char* toString(PacketType t) {
- switch (t) {
- case PacketType::SND:
- return "SND";
- case PacketType::RCV:
- return "RCV";
- case PacketType::ACK:
- return "ACK";
- default:
- return "UNKNOWN";
- }
+static inline uintptr_t RoundPageUp(uintptr_t val) {
+ return (val + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
}
-static constexpr const uint32_t kHeaderSize = sizeof(PacketHeader);
-static constexpr const uint32_t kPayloadSize = kPacketSize - kHeaderSize;
+ssize_t TrustyApp::TrustyRpc(const uint8_t* obegin, const uint8_t* oend, uint8_t* ibegin,
+ uint8_t* iend) {
+ uint32_t olen = oend - obegin;
-ssize_t TrustyRpc(int handle, const uint8_t* obegin, const uint8_t* oend, uint8_t* ibegin,
- uint8_t* iend) {
- while (obegin != oend) {
- PacketHeader header = {
- .type = PacketType::SND,
- .remaining = uint32_t(oend - obegin),
- };
- uint32_t body_size = std::min(kPayloadSize, header.remaining);
- iovec iov[] = {
- {
- .iov_base = &header,
- .iov_len = kHeaderSize,
- },
- {
- .iov_base = const_cast<uint8_t*>(obegin),
- .iov_len = body_size,
- },
- };
- int rc = writev(handle, iov, 2);
- if (!rc) {
- PLOG(ERROR) << "Error sending SND message. " << rc;
- return rc;
- }
-
- obegin += body_size;
-
- rc = read(handle, &header, kHeaderSize);
- if (!rc) {
- PLOG(ERROR) << "Error reading ACK. " << rc;
- return rc;
- }
-
- if (header.type != PacketType::ACK || header.remaining != oend - obegin) {
- LOG(ERROR) << "malformed ACK";
- return -1;
- }
+ if (olen > shm_len_) {
+ LOG(ERROR) << AT << "request message too long to fit in shared memory";
+ return -1;
}
- ssize_t remaining = 0;
- auto begin = ibegin;
- do {
- PacketHeader header = {
- .type = PacketType::RCV,
- .remaining = 0,
- };
+ memcpy(shm_base_, obegin, olen);
- iovec iov[] = {
- {
- .iov_base = &header,
- .iov_len = kHeaderSize,
- },
- {
- .iov_base = begin,
- .iov_len = uint32_t(iend - begin),
- },
- };
+ confirmationui_hdr hdr = {
+ .cmd = CONFIRMATIONUI_CMD_MSG,
+ };
+ confirmationui_msg_args args = {
+ .msg_len = olen,
+ };
+ iovec iov[] = {
+ {
+ .iov_base = &hdr,
+ .iov_len = sizeof(hdr),
+ },
+ {
+ .iov_base = &args,
+ .iov_len = sizeof(args),
+ },
+ };
- ssize_t rc = writev(handle, iov, 1);
- if (!rc) {
- PLOG(ERROR) << "Error sending RCV message. " << rc;
- return rc;
- }
+ int rc = tipc_send(handle_, iov, countof(iov), NULL, 0);
+ if (rc != static_cast<int>(sizeof(hdr) + sizeof(args))) {
+ LOG(ERROR) << AT << "failed to send MSG request";
+ return -1;
+ }
- rc = readv(handle, iov, 2);
- if (rc < 0) {
- PLOG(ERROR) << "Error reading response. " << rc;
- return rc;
- }
+ rc = readv(handle_, iov, countof(iov));
+ if (rc != static_cast<int>(sizeof(hdr) + sizeof(args))) {
+ LOG(ERROR) << AT << "failed to receive MSG response";
+ return -1;
+ }
- uint32_t body_size = std::min(kPayloadSize, header.remaining);
- if (body_size != rc - kHeaderSize) {
- LOG(ERROR) << "Unexpected amount of data: " << rc;
- return -1;
- }
+ if (hdr.cmd != (CONFIRMATIONUI_CMD_MSG | CONFIRMATIONUI_RESP_BIT)) {
+ LOG(ERROR) << AT << "unknown response command: " << hdr.cmd;
+ return -1;
+ }
- remaining = header.remaining - body_size;
- begin += body_size;
- } while (remaining);
+ uint32_t ilen = iend - ibegin;
+ if (args.msg_len > ilen) {
+ LOG(ERROR) << AT << "response message too long to fit in return buffer";
+ return -1;
+ }
- return begin - ibegin;
+ memcpy(ibegin, shm_base_, args.msg_len);
+
+ return args.msg_len;
}
TrustyApp::TrustyApp(const std::string& path, const std::string& appname)
: handle_(kInvalidHandle) {
- handle_ = tipc_connect(path.c_str(), appname.c_str());
- if (handle_ == kInvalidHandle) {
+ unique_fd tipc_handle(tipc_connect(path.c_str(), appname.c_str()));
+ if (tipc_handle < 0) {
LOG(ERROR) << AT << "failed to connect to Trusty TA \"" << appname << "\" using dev:"
<< "\"" << path << "\"";
+ return;
}
+
+ uint32_t shm_len = RoundPageUp(CONFIRMATIONUI_MAX_MSG_SIZE);
+ BufferAllocator allocator;
+ unique_fd dma_buf(allocator.Alloc("system", shm_len));
+ if (dma_buf < 0) {
+ LOG(ERROR) << AT << "failed to allocate shared memory buffer";
+ return;
+ }
+
+ if (dma_buf < 0) {
+ LOG(ERROR) << AT << "failed to allocate shared memory buffer";
+ return;
+ }
+
+ confirmationui_hdr hdr = {
+ .cmd = CONFIRMATIONUI_CMD_INIT,
+ };
+ confirmationui_init_req args = {
+ .shm_len = shm_len,
+ };
+ iovec iov[] = {
+ {
+ .iov_base = &hdr,
+ .iov_len = sizeof(hdr),
+ },
+ {
+ .iov_base = &args,
+ .iov_len = sizeof(args),
+ },
+ };
+ trusty_shm shm = {
+ .fd = dma_buf,
+ .transfer = TRUSTY_SHARE,
+ };
+
+ int rc = tipc_send(tipc_handle, iov, 2, &shm, 1);
+ if (rc != static_cast<int>(sizeof(hdr) + sizeof(args))) {
+ LOG(ERROR) << AT << "failed to send INIT request";
+ return;
+ }
+
+ rc = read(tipc_handle, &hdr, sizeof(hdr));
+ if (rc != static_cast<int>(sizeof(hdr))) {
+ LOG(ERROR) << AT << "failed to receive INIT response";
+ return;
+ }
+
+ if (hdr.cmd != (CONFIRMATIONUI_CMD_INIT | CONFIRMATIONUI_RESP_BIT)) {
+ LOG(ERROR) << AT << "unknown response command: " << hdr.cmd;
+ return;
+ }
+
+ void* shm_base = mmap(0, shm_len, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf, 0);
+ if (shm_base == MAP_FAILED) {
+ LOG(ERROR) << AT << "failed to mmap() shared memory buffer";
+ return;
+ }
+
+ handle_ = std::move(tipc_handle);
+ shm_base_ = shm_base;
+ shm_len_ = shm_len;
+
LOG(INFO) << AT << "succeeded to connect to Trusty TA \"" << appname << "\"";
}
+
TrustyApp::~TrustyApp() {
- if (handle_ != kInvalidHandle) {
- tipc_close(handle_);
- }
LOG(INFO) << "Done shutting down TrustyApp";
}
diff --git a/trusty/confirmationui/TrustyApp.h b/trusty/confirmationui/TrustyApp.h
index 05a25f6..406f439 100644
--- a/trusty/confirmationui/TrustyApp.h
+++ b/trusty/confirmationui/TrustyApp.h
@@ -16,7 +16,10 @@
#pragma once
+#include "TrustyIpc.h"
+
#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
#include <errno.h>
#include <poll.h>
#include <stdio.h>
@@ -60,19 +63,11 @@
MSG_TOO_LONG = -2,
};
-/*
- * There is a hard limitation of 0x1800 bytes for the to-be-signed message size. The protocol
- * overhead is limited, so that 0x2000 is a buffer size that will be sufficient in any benign
- * mode of operation.
- */
-static constexpr const size_t kSendBufferSize = 0x2000;
-
-ssize_t TrustyRpc(int handle, const uint8_t* obegin, const uint8_t* oend, uint8_t* ibegin,
- uint8_t* iend);
-
class TrustyApp {
private:
- int handle_;
+ android::base::unique_fd handle_;
+ void* shm_base_;
+ size_t shm_len_;
static constexpr const int kInvalidHandle = -1;
/*
* This mutex serializes communication with the trusted app, not handle_.
@@ -84,6 +79,8 @@
TrustyApp(const std::string& path, const std::string& appname);
~TrustyApp();
+ ssize_t TrustyRpc(const uint8_t* obegin, const uint8_t* oend, uint8_t* ibegin, uint8_t* iend);
+
template <typename Request, typename Response, typename... T>
std::tuple<TrustyAppError, msg2tuple_t<Response>> issueCmd(const T&... args) {
std::lock_guard<std::mutex> lock(mutex_);
@@ -93,7 +90,7 @@
return {TrustyAppError::ERROR, {}};
}
- uint8_t buffer[kSendBufferSize];
+ uint8_t buffer[CONFIRMATIONUI_MAX_MSG_SIZE];
WriteStream out(buffer);
out = write(Request(), out, args...);
@@ -102,8 +99,8 @@
return {TrustyAppError::MSG_TOO_LONG, {}};
}
- auto rc = TrustyRpc(handle_, &buffer[0], const_cast<const uint8_t*>(out.pos()), &buffer[0],
- &buffer[kSendBufferSize]);
+ auto rc = TrustyRpc(&buffer[0], const_cast<const uint8_t*>(out.pos()), &buffer[0],
+ &buffer[CONFIRMATIONUI_MAX_MSG_SIZE]);
if (rc < 0) return {TrustyAppError::ERROR, {}};
ReadStream in(&buffer[0], rc);
@@ -125,7 +122,7 @@
return TrustyAppError::ERROR;
}
- uint8_t buffer[kSendBufferSize];
+ uint8_t buffer[CONFIRMATIONUI_MAX_MSG_SIZE];
WriteStream out(buffer);
out = write(Request(), out, args...);
@@ -134,8 +131,8 @@
return TrustyAppError::MSG_TOO_LONG;
}
- auto rc = TrustyRpc(handle_, &buffer[0], const_cast<const uint8_t*>(out.pos()), &buffer[0],
- &buffer[kSendBufferSize]);
+ auto rc = TrustyRpc(&buffer[0], const_cast<const uint8_t*>(out.pos()), &buffer[0],
+ &buffer[CONFIRMATIONUI_MAX_MSG_SIZE]);
if (rc < 0) {
LOG(ERROR) << "send command failed: " << strerror(errno) << " (" << errno << ")";
return TrustyAppError::ERROR;
diff --git a/trusty/confirmationui/TrustyConfirmationUI.cpp b/trusty/confirmationui/TrustyConfirmationUI.cpp
index 6b25893..c8b24e3 100644
--- a/trusty/confirmationui/TrustyConfirmationUI.cpp
+++ b/trusty/confirmationui/TrustyConfirmationUI.cpp
@@ -71,7 +71,7 @@
using TeeuiRc = ::teeui::ResponseCode;
constexpr const char kTrustyDeviceName[] = "/dev/trusty-ipc-dev0";
-constexpr const char kConfirmationuiAppName[] = "com.android.trusty.confirmationui";
+constexpr const char kConfirmationuiAppName[] = CONFIRMATIONUI_PORT;
namespace {
diff --git a/trusty/confirmationui/TrustyIpc.h b/trusty/confirmationui/TrustyIpc.h
new file mode 100644
index 0000000..eb764bc
--- /dev/null
+++ b/trusty/confirmationui/TrustyIpc.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+/*
+ * This interface is shared between Android and Trusty. There is a copy in each
+ * repository. They must be kept in sync.
+ */
+
+#define CONFIRMATIONUI_PORT "com.android.trusty.confirmationui"
+
+/**
+ * enum confirmationui_cmd - command identifiers for ConfirmationUI interface
+ * @CONFIRMATIONUI_RESP_BIT: response bit set as part of response
+ * @CONFIRMATIONUI_REQ_SHIFT: number of bits used by response bit
+ * @CONFIRMATIONUI_CMD_INIT: command to initialize session
+ * @CONFIRMATIONUI_CMD_MSG: command to send ConfirmationUI messages
+ */
+enum confirmationui_cmd : uint32_t {
+ CONFIRMATIONUI_RESP_BIT = 1,
+ CONFIRMATIONUI_REQ_SHIFT = 1,
+
+ CONFIRMATIONUI_CMD_INIT = (1 << CONFIRMATIONUI_REQ_SHIFT),
+ CONFIRMATIONUI_CMD_MSG = (2 << CONFIRMATIONUI_REQ_SHIFT),
+};
+
+/**
+ * struct confirmationui_hdr - header for ConfirmationUI messages
+ * @cmd: command identifier
+ *
+ * Note that no messages return a status code. Any error on the server side
+ * results in the connection being closed. So, operations can be assumed to be
+ * successful if they return a response.
+ */
+struct confirmationui_hdr {
+ uint32_t cmd;
+};
+
+/**
+ * struct confirmationui_init_req - arguments for request to initialize a
+ * session
+ * @shm_len: length of memory region being shared
+ *
+ * A handle to a memory region must be sent along with this message. This memory
+ * is send to ConfirmationUI messages.
+ */
+struct confirmationui_init_req {
+ uint32_t shm_len;
+};
+
+/**
+ * struct confirmationui_msg_args - arguments for sending a message
+ * @msg_len: length of message being sent
+ *
+ * Contents of the message are located in the shared memory region that is
+ * established using %CONFIRMATIONUI_CMD_INIT.
+ *
+ * ConfirmationUI messages can travel both ways.
+ */
+struct confirmationui_msg_args {
+ uint32_t msg_len;
+};
+
+#define CONFIRMATIONUI_MAX_MSG_SIZE 0x2000
diff --git a/trusty/confirmationui/android.hardware.confirmationui@1.0-service.trusty.rc b/trusty/confirmationui/android.hardware.confirmationui@1.0-service.trusty.rc
index dc7a03b..3ba6fc0 100644
--- a/trusty/confirmationui/android.hardware.confirmationui@1.0-service.trusty.rc
+++ b/trusty/confirmationui/android.hardware.confirmationui@1.0-service.trusty.rc
@@ -1,4 +1,4 @@
service confirmationui-1-0 /vendor/bin/hw/android.hardware.confirmationui@1.0-service.trusty
class hal
- user nobody
- group drmrpc input
+ user system
+ group drmrpc input system
diff --git a/trusty/coverage/coverage.cpp b/trusty/coverage/coverage.cpp
index 5eccdc5..3c6b5c5 100644
--- a/trusty/coverage/coverage.cpp
+++ b/trusty/coverage/coverage.cpp
@@ -29,6 +29,7 @@
#include <trusty/coverage/record.h>
#include <trusty/coverage/tipc.h>
#include <trusty/tipc.h>
+#include <iostream>
#define COVERAGE_CLIENT_PORT "com.android.trusty.coverage.client"
@@ -122,7 +123,9 @@
int fd = tipc_connect(tipc_dev_.c_str(), COVERAGE_CLIENT_PORT);
if (fd < 0) {
- return ErrnoError() << "failed to connect to Trusty coverarge server: ";
+ // Don't error out to support fuzzing builds without coverage, e.g. for repros.
+ std::cerr << "WARNING!!! Failed to connect to Trusty coverarge server." << std::endl;
+ return {};
}
coverage_srv_fd_.reset(fd);
@@ -130,7 +133,7 @@
req.open_args.uuid = uuid_;
auto ret = Rpc(&req, -1, &resp);
if (!ret.ok()) {
- return Error() << "failed to open coverage client: ";
+ return Error() << "failed to open coverage client: " << ret.error();
}
record_len_ = resp.open_args.record_len;
shm_len_ = RoundPageUp(record_len_);
@@ -153,13 +156,17 @@
req.share_record_args.shm_len = shm_len_;
ret = Rpc(&req, dma_buf, &resp);
if (!ret.ok()) {
- return Error() << "failed to send shared memory: ";
+ return Error() << "failed to send shared memory: " << ret.error();
}
shm_ = shm;
return {};
}
+bool CoverageRecord::IsOpen() {
+ return shm_;
+}
+
void CoverageRecord::ResetFullRecord() {
auto header_region = GetRegionBounds(COV_START);
if (!header_region.ok()) {
diff --git a/trusty/coverage/include/trusty/coverage/coverage.h b/trusty/coverage/include/trusty/coverage/coverage.h
index 5da68da..9ccc981 100644
--- a/trusty/coverage/include/trusty/coverage/coverage.h
+++ b/trusty/coverage/include/trusty/coverage/coverage.h
@@ -47,6 +47,7 @@
~CoverageRecord();
Result<void> Open();
+ bool IsOpen();
void ResetFullRecord();
void ResetCounts();
void ResetPCs();
diff --git a/trusty/fuzz/counters.cpp b/trusty/fuzz/counters.cpp
index 1e863ac..4d34059 100644
--- a/trusty/fuzz/counters.cpp
+++ b/trusty/fuzz/counters.cpp
@@ -41,6 +41,10 @@
namespace fuzz {
ExtraCounters::ExtraCounters(coverage::CoverageRecord* record) : record_(record) {
+ if (!record_->IsOpen()) {
+ return;
+ }
+
assert(fuzzer::ExtraCountersBegin());
assert(fuzzer::ExtraCountersEnd());
@@ -51,10 +55,18 @@
}
ExtraCounters::~ExtraCounters() {
+ if (!record_->IsOpen()) {
+ return;
+ }
+
Flush();
}
void ExtraCounters::Reset() {
+ if (!record_->IsOpen()) {
+ return;
+ }
+
record_->ResetCounts();
fuzzer::ClearExtraCounters();
}
diff --git a/trusty/fuzz/utils.cpp b/trusty/fuzz/utils.cpp
index f4cf0b6..3526337 100644
--- a/trusty/fuzz/utils.cpp
+++ b/trusty/fuzz/utils.cpp
@@ -113,7 +113,7 @@
int rc = write(ta_fd_, buf, len);
alarm(0);
if (rc < 0) {
- return Error() << "failed to read TIPC message from TA: ";
+ return Error() << "failed to write TIPC message to TA: ";
}
return {};