Merge "Update for MapInfo objects in frame data."
diff --git a/fastboot/Android.mk b/fastboot/Android.mk
index 322fe5c..10bed6d 100644
--- a/fastboot/Android.mk
+++ b/fastboot/Android.mk
@@ -23,5 +23,5 @@
my_dist_files += $(HOST_OUT_EXECUTABLES)/make_f2fs
my_dist_files += $(HOST_OUT_EXECUTABLES)/make_f2fs_casefold
my_dist_files += $(HOST_OUT_EXECUTABLES)/sload_f2fs
-$(call dist-for-goals,dist_files sdk win_sdk,$(my_dist_files))
+$(call dist-for-goals,dist_files sdk,$(my_dist_files))
my_dist_files :=
diff --git a/fastboot/OWNERS b/fastboot/OWNERS
index 58b2a81..17b3466 100644
--- a/fastboot/OWNERS
+++ b/fastboot/OWNERS
@@ -1,3 +1,3 @@
dvander@google.com
-hridya@google.com
+elsk@google.com
enh@google.com
diff --git a/fastboot/device/flashing.cpp b/fastboot/device/flashing.cpp
index 3f9bcdc..7bef72a 100644
--- a/fastboot/device/flashing.cpp
+++ b/fastboot/device/flashing.cpp
@@ -172,7 +172,8 @@
return -EOVERFLOW;
} else if (data.size() < block_device_size &&
(partition_name == "boot" || partition_name == "boot_a" ||
- partition_name == "boot_b")) {
+ partition_name == "boot_b" || partition_name == "init_boot" ||
+ partition_name == "init_boot_a" || partition_name == "init_boot_b")) {
CopyAVBFooter(&data, block_device_size);
}
if (android::base::GetProperty("ro.system.build.type", "") != "user") {
diff --git a/fastboot/fastboot.bash b/fastboot/fastboot.bash
index 5397455..e9bf9e9 100644
--- a/fastboot/fastboot.bash
+++ b/fastboot/fastboot.bash
@@ -109,7 +109,7 @@
cur="${COMP_WORDS[COMP_CWORD]}"
if [[ $i -eq $COMP_CWORD ]]; then
- partitions="boot bootloader dtbo modem odm odm_dlkm oem product pvmfw radio recovery system system_dlkm vbmeta vendor vendor_dlkm"
+ partitions="boot bootloader dtbo init_boot modem odm odm_dlkm oem product pvmfw radio recovery system system_dlkm vbmeta vendor vendor_dlkm"
COMPREPLY=( $(compgen -W "$partitions" -- $cur) )
else
_fastboot_util_complete_local_file "${cur}" '!*.img'
diff --git a/fastboot/fastboot.cpp b/fastboot/fastboot.cpp
index c8ef94f..fde1dab 100644
--- a/fastboot/fastboot.cpp
+++ b/fastboot/fastboot.cpp
@@ -141,6 +141,10 @@
static Image images[] = {
// clang-format off
{ "boot", "boot.img", "boot.sig", "boot", false, ImageType::BootCritical },
+ { "init_boot",
+ "init_boot.img", "init_boot.sig",
+ "init_boot",
+ true, ImageType::BootCritical },
{ nullptr, "boot_other.img", "boot.sig", "boot", true, ImageType::Normal },
{ "cache", "cache.img", "cache.sig", "cache", true, ImageType::Extra },
{ "dtbo", "dtbo.img", "dtbo.sig", "dtbo", true, ImageType::BootCritical },
@@ -1021,7 +1025,7 @@
return partition_size;
}
-static void copy_boot_avb_footer(const std::string& partition, struct fastboot_buffer* buf) {
+static void copy_avb_footer(const std::string& partition, struct fastboot_buffer* buf) {
if (buf->sz < AVB_FOOTER_SIZE) {
return;
}
@@ -1036,9 +1040,9 @@
// In this case, partition_size will be zero.
if (partition_size < buf->sz) {
fprintf(stderr,
- "Warning: skip copying boot image avb footer"
- " (boot partition size: %" PRId64 ", boot image size: %" PRId64 ").\n",
- partition_size, buf->sz);
+ "Warning: skip copying %s image avb footer"
+ " (%s partition size: %" PRId64 ", %s image size: %" PRId64 ").\n",
+ partition.c_str(), partition.c_str(), partition_size, partition.c_str(), buf->sz);
return;
}
@@ -1046,7 +1050,7 @@
// Because buf->fd will still be used afterwards.
std::string data;
if (!android::base::ReadFdToString(buf->fd, &data)) {
- die("Failed reading from boot");
+ die("Failed reading from %s", partition.c_str());
}
uint64_t footer_offset = buf->sz - AVB_FOOTER_SIZE;
@@ -1055,13 +1059,14 @@
return;
}
- unique_fd fd(make_temporary_fd("boot rewriting"));
+ const std::string tmp_fd_template = partition + " rewriting";
+ unique_fd fd(make_temporary_fd(tmp_fd_template.c_str()));
if (!android::base::WriteStringToFd(data, fd)) {
- die("Failed writing to modified boot");
+ die("Failed writing to modified %s", partition.c_str());
}
lseek(fd.get(), partition_size - AVB_FOOTER_SIZE, SEEK_SET);
if (!android::base::WriteStringToFd(data.substr(footer_offset), fd)) {
- die("Failed copying AVB footer in boot");
+ die("Failed copying AVB footer in %s", partition.c_str());
}
buf->fd = std::move(fd);
buf->sz = partition_size;
@@ -1072,8 +1077,9 @@
{
sparse_file** s;
- if (partition == "boot" || partition == "boot_a" || partition == "boot_b") {
- copy_boot_avb_footer(partition, buf);
+ if (partition == "boot" || partition == "boot_a" || partition == "boot_b" ||
+ partition == "init_boot" || partition == "init_boot_a" || partition == "init_boot_b") {
+ copy_avb_footer(partition, buf);
}
// Rewrite vbmeta if that's what we're flashing and modification has been requested.
diff --git a/fs_mgr/fs_mgr.cpp b/fs_mgr/fs_mgr.cpp
index 33dca58..8ce961b 100644
--- a/fs_mgr/fs_mgr.cpp
+++ b/fs_mgr/fs_mgr.cpp
@@ -170,6 +170,22 @@
FS_STAT_SET_RESERVED_BLOCKS_FAILED | FS_STAT_ENABLE_ENCRYPTION_FAILED);
}
+static bool umount_retry(const std::string& mount_point) {
+ int retry_count = 5;
+ bool umounted = false;
+
+ while (retry_count-- > 0) {
+ umounted = umount(mount_point.c_str()) == 0;
+ if (umounted) {
+ LINFO << __FUNCTION__ << "(): unmount(" << mount_point << ") succeeded";
+ break;
+ }
+ PERROR << __FUNCTION__ << "(): umount(" << mount_point << ") failed";
+ if (retry_count) sleep(1);
+ }
+ return umounted;
+}
+
static void check_fs(const std::string& blk_device, const std::string& fs_type,
const std::string& target, int* fs_stat) {
int status;
@@ -209,25 +225,12 @@
tmpmnt_opts.c_str());
PINFO << __FUNCTION__ << "(): mount(" << blk_device << "," << target << "," << fs_type
<< ")=" << ret;
- if (!ret) {
- bool umounted = false;
- int retry_count = 5;
- while (retry_count-- > 0) {
- umounted = umount(target.c_str()) == 0;
- if (umounted) {
- LINFO << __FUNCTION__ << "(): unmount(" << target << ") succeeded";
- break;
- }
- PERROR << __FUNCTION__ << "(): umount(" << target << ") failed";
- if (retry_count) sleep(1);
- }
- if (!umounted) {
- // boot may fail but continue and leave it to later stage for now.
- PERROR << __FUNCTION__ << "(): umount(" << target << ") timed out";
- *fs_stat |= FS_STAT_RO_UNMOUNT_FAILED;
- }
- } else {
+ if (ret) {
*fs_stat |= FS_STAT_RO_MOUNT_FAILED;
+ } else if (!umount_retry(target)) {
+ // boot may fail but continue and leave it to later stage for now.
+ PERROR << __FUNCTION__ << "(): umount(" << target << ") timed out";
+ *fs_stat |= FS_STAT_RO_UNMOUNT_FAILED;
}
}
@@ -268,12 +271,12 @@
LINFO << "Running " << F2FS_FSCK_BIN << " -f -c 10000 --debug-cache "
<< realpath(blk_device);
ret = logwrap_fork_execvp(ARRAY_SIZE(f2fs_fsck_forced_argv), f2fs_fsck_forced_argv,
- &status, false, LOG_KLOG | LOG_FILE, false, FSCK_LOG_FILE);
+ &status, false, LOG_KLOG | LOG_FILE, false, nullptr);
} else {
LINFO << "Running " << F2FS_FSCK_BIN << " -a -c 10000 --debug-cache "
<< realpath(blk_device);
ret = logwrap_fork_execvp(ARRAY_SIZE(f2fs_fsck_argv), f2fs_fsck_argv, &status, false,
- LOG_KLOG | LOG_FILE, false, FSCK_LOG_FILE);
+ LOG_KLOG | LOG_FILE, false, nullptr);
}
if (ret < 0) {
/* No need to check for error in fork, we can't really handle it now */
@@ -1009,12 +1012,11 @@
// Check to see if a mountable volume has encryption requirements
static int handle_encryptable(const FstabEntry& entry) {
if (should_use_metadata_encryption(entry)) {
- if (umount(entry.mount_point.c_str()) == 0) {
+ if (umount_retry(entry.mount_point)) {
return FS_MGR_MNTALL_DEV_NEEDS_METADATA_ENCRYPTION;
- } else {
- PERROR << "Could not umount " << entry.mount_point << " - fail since can't encrypt";
- return FS_MGR_MNTALL_FAIL;
}
+ PERROR << "Could not umount " << entry.mount_point << " - fail since can't encrypt";
+ return FS_MGR_MNTALL_FAIL;
} else if (entry.fs_mgr_flags.file_encryption) {
LINFO << entry.mount_point << " is file encrypted";
return FS_MGR_MNTALL_DEV_FILE_ENCRYPTED;
@@ -1807,9 +1809,13 @@
auto& mount_point = alt_mount_point.empty() ? entry.mount_point : alt_mount_point;
// Run fsck if needed
- prepare_fs_for_mount(entry.blk_device, entry, mount_point);
+ int ret = prepare_fs_for_mount(entry.blk_device, entry, mount_point);
+ // Wiped case doesn't require to try __mount below.
+ if (ret & FS_STAT_INVALID_MAGIC) {
+ return FS_MGR_DOMNT_FAILED;
+ }
- int ret = __mount(entry.blk_device, mount_point, entry);
+ ret = __mount(entry.blk_device, mount_point, entry);
if (ret) {
ret = (errno == EBUSY) ? FS_MGR_DOMNT_BUSY : FS_MGR_DOMNT_FAILED;
}
diff --git a/fs_mgr/fs_mgr_overlayfs.cpp b/fs_mgr/fs_mgr_overlayfs.cpp
index 2b31119..2da5b0f 100644
--- a/fs_mgr/fs_mgr_overlayfs.cpp
+++ b/fs_mgr/fs_mgr_overlayfs.cpp
@@ -880,9 +880,14 @@
errno = save_errno;
}
entry.flags &= ~MS_RDONLY;
+ entry.flags |= MS_SYNCHRONOUS;
+ entry.fs_options = "nodiscard";
fs_mgr_set_blk_ro(device_path, false);
}
- entry.fs_mgr_flags.check = true;
+ // check_fs requires apex runtime library
+ if (fs_mgr_overlayfs_already_mounted("/data", false)) {
+ entry.fs_mgr_flags.check = true;
+ }
auto save_errno = errno;
if (mounted) mounted = fs_mgr_do_mount_one(entry) == 0;
if (!mounted) {
diff --git a/fs_mgr/libfs_avb/avb_util.cpp b/fs_mgr/libfs_avb/avb_util.cpp
index e913d50..85dbb36 100644
--- a/fs_mgr/libfs_avb/avb_util.cpp
+++ b/fs_mgr/libfs_avb/avb_util.cpp
@@ -35,19 +35,6 @@
namespace android {
namespace fs_mgr {
-std::string GetAvbPropertyDescriptor(const std::string& key,
- const std::vector<VBMetaData>& vbmeta_images) {
- size_t value_size;
- for (const auto& vbmeta : vbmeta_images) {
- const char* value = avb_property_lookup(vbmeta.data(), vbmeta.size(), key.data(),
- key.size(), &value_size);
- if (value != nullptr) {
- return {value, value_size};
- }
- }
- return "";
-}
-
// Constructs dm-verity arguments for sending DM_TABLE_LOAD ioctl to kernel.
// See the following link for more details:
// https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity
@@ -130,64 +117,6 @@
return true;
}
-std::unique_ptr<FsAvbHashDescriptor> GetHashDescriptor(
- const std::string& partition_name, const std::vector<VBMetaData>& vbmeta_images) {
- bool found = false;
- const uint8_t* desc_partition_name;
- auto hash_desc = std::make_unique<FsAvbHashDescriptor>();
-
- for (const auto& vbmeta : vbmeta_images) {
- size_t num_descriptors;
- std::unique_ptr<const AvbDescriptor*[], decltype(&avb_free)> descriptors(
- avb_descriptor_get_all(vbmeta.data(), vbmeta.size(), &num_descriptors), avb_free);
-
- if (!descriptors || num_descriptors < 1) {
- continue;
- }
-
- for (size_t n = 0; n < num_descriptors && !found; n++) {
- AvbDescriptor desc;
- if (!avb_descriptor_validate_and_byteswap(descriptors[n], &desc)) {
- LWARNING << "Descriptor[" << n << "] is invalid";
- continue;
- }
- if (desc.tag == AVB_DESCRIPTOR_TAG_HASH) {
- desc_partition_name = (const uint8_t*)descriptors[n] + sizeof(AvbHashDescriptor);
- if (!avb_hash_descriptor_validate_and_byteswap((AvbHashDescriptor*)descriptors[n],
- hash_desc.get())) {
- continue;
- }
- if (hash_desc->partition_name_len != partition_name.length()) {
- continue;
- }
- // Notes that desc_partition_name is not NUL-terminated.
- std::string hash_partition_name((const char*)desc_partition_name,
- hash_desc->partition_name_len);
- if (hash_partition_name == partition_name) {
- found = true;
- }
- }
- }
-
- if (found) break;
- }
-
- if (!found) {
- LERROR << "Hash descriptor not found: " << partition_name;
- return nullptr;
- }
-
- hash_desc->partition_name = partition_name;
-
- const uint8_t* desc_salt = desc_partition_name + hash_desc->partition_name_len;
- hash_desc->salt = BytesToHex(desc_salt, hash_desc->salt_len);
-
- const uint8_t* desc_digest = desc_salt + hash_desc->salt_len;
- hash_desc->digest = BytesToHex(desc_digest, hash_desc->digest_len);
-
- return hash_desc;
-}
-
std::unique_ptr<FsAvbHashtreeDescriptor> GetHashtreeDescriptor(
const std::string& partition_name, const std::vector<VBMetaData>& vbmeta_images) {
bool found = false;
diff --git a/fs_mgr/libfs_avb/avb_util.h b/fs_mgr/libfs_avb/avb_util.h
index e8f7c39..7941c70 100644
--- a/fs_mgr/libfs_avb/avb_util.h
+++ b/fs_mgr/libfs_avb/avb_util.h
@@ -37,12 +37,6 @@
: partition_name(chain_partition_name), public_key_blob(chain_public_key_blob) {}
};
-std::string GetAvbPropertyDescriptor(const std::string& key,
- const std::vector<VBMetaData>& vbmeta_images);
-
-std::unique_ptr<FsAvbHashDescriptor> GetHashDescriptor(
- const std::string& partition_name, const std::vector<VBMetaData>& vbmeta_images);
-
// AvbHashtreeDescriptor to dm-verity table setup.
std::unique_ptr<FsAvbHashtreeDescriptor> GetHashtreeDescriptor(
const std::string& partition_name, const std::vector<VBMetaData>& vbmeta_images);
diff --git a/fs_mgr/libfs_avb/fs_avb.cpp b/fs_mgr/libfs_avb/fs_avb.cpp
index 1da7117..a288876 100644
--- a/fs_mgr/libfs_avb/fs_avb.cpp
+++ b/fs_mgr/libfs_avb/fs_avb.cpp
@@ -37,6 +37,7 @@
#include "avb_ops.h"
#include "avb_util.h"
+#include "fs_avb/fs_avb_util.h"
#include "sha.h"
#include "util.h"
diff --git a/fs_mgr/libfs_avb/fs_avb_util.cpp b/fs_mgr/libfs_avb/fs_avb_util.cpp
index 1c14cc0..5326226 100644
--- a/fs_mgr/libfs_avb/fs_avb_util.cpp
+++ b/fs_mgr/libfs_avb/fs_avb_util.cpp
@@ -74,6 +74,64 @@
return GetHashtreeDescriptor(avb_partition_name, vbmeta_images);
}
+std::unique_ptr<FsAvbHashDescriptor> GetHashDescriptor(
+ const std::string& partition_name, const std::vector<VBMetaData>& vbmeta_images) {
+ bool found = false;
+ const uint8_t* desc_partition_name;
+ auto hash_desc = std::make_unique<FsAvbHashDescriptor>();
+
+ for (const auto& vbmeta : vbmeta_images) {
+ size_t num_descriptors;
+ std::unique_ptr<const AvbDescriptor*[], decltype(&avb_free)> descriptors(
+ avb_descriptor_get_all(vbmeta.data(), vbmeta.size(), &num_descriptors), avb_free);
+
+ if (!descriptors || num_descriptors < 1) {
+ continue;
+ }
+
+ for (size_t n = 0; n < num_descriptors && !found; n++) {
+ AvbDescriptor desc;
+ if (!avb_descriptor_validate_and_byteswap(descriptors[n], &desc)) {
+ LWARNING << "Descriptor[" << n << "] is invalid";
+ continue;
+ }
+ if (desc.tag == AVB_DESCRIPTOR_TAG_HASH) {
+ desc_partition_name = (const uint8_t*)descriptors[n] + sizeof(AvbHashDescriptor);
+ if (!avb_hash_descriptor_validate_and_byteswap((AvbHashDescriptor*)descriptors[n],
+ hash_desc.get())) {
+ continue;
+ }
+ if (hash_desc->partition_name_len != partition_name.length()) {
+ continue;
+ }
+ // Notes that desc_partition_name is not NUL-terminated.
+ std::string hash_partition_name((const char*)desc_partition_name,
+ hash_desc->partition_name_len);
+ if (hash_partition_name == partition_name) {
+ found = true;
+ }
+ }
+ }
+
+ if (found) break;
+ }
+
+ if (!found) {
+ LERROR << "Hash descriptor not found: " << partition_name;
+ return nullptr;
+ }
+
+ hash_desc->partition_name = partition_name;
+
+ const uint8_t* desc_salt = desc_partition_name + hash_desc->partition_name_len;
+ hash_desc->salt = BytesToHex(desc_salt, hash_desc->salt_len);
+
+ const uint8_t* desc_digest = desc_salt + hash_desc->salt_len;
+ hash_desc->digest = BytesToHex(desc_digest, hash_desc->digest_len);
+
+ return hash_desc;
+}
+
// Given a path, loads and verifies the vbmeta, to extract the Avb Hash descriptor.
std::unique_ptr<FsAvbHashDescriptor> GetHashDescriptor(const std::string& avb_partition_name,
VBMetaData&& vbmeta) {
@@ -84,5 +142,18 @@
return GetHashDescriptor(avb_partition_name, vbmeta_images);
}
+std::string GetAvbPropertyDescriptor(const std::string& key,
+ const std::vector<VBMetaData>& vbmeta_images) {
+ size_t value_size;
+ for (const auto& vbmeta : vbmeta_images) {
+ const char* value = avb_property_lookup(vbmeta.data(), vbmeta.size(), key.data(),
+ key.size(), &value_size);
+ if (value != nullptr) {
+ return {value, value_size};
+ }
+ }
+ return "";
+}
+
} // namespace fs_mgr
} // namespace android
diff --git a/fs_mgr/libfs_avb/include/fs_avb/fs_avb_util.h b/fs_mgr/libfs_avb/include/fs_avb/fs_avb_util.h
index 3f37bd7..1b15db7 100644
--- a/fs_mgr/libfs_avb/include/fs_avb/fs_avb_util.h
+++ b/fs_mgr/libfs_avb/include/fs_avb/fs_avb_util.h
@@ -43,9 +43,15 @@
std::unique_ptr<FsAvbHashtreeDescriptor> GetHashtreeDescriptor(
const std::string& avb_partition_name, VBMetaData&& vbmeta);
+std::unique_ptr<FsAvbHashDescriptor> GetHashDescriptor(
+ const std::string& partition_name, const std::vector<VBMetaData>& vbmeta_images);
+
// Gets the hash descriptor for avb_partition_name from the vbmeta.
std::unique_ptr<FsAvbHashDescriptor> GetHashDescriptor(const std::string& avb_partition_name,
VBMetaData&& vbmeta);
+std::string GetAvbPropertyDescriptor(const std::string& key,
+ const std::vector<VBMetaData>& vbmeta_images);
+
} // namespace fs_mgr
} // namespace android
diff --git a/fs_mgr/libfs_avb/run_tests.sh b/fs_mgr/libfs_avb/run_tests.sh
index 5d2ce3d..3e945a4 100755
--- a/fs_mgr/libfs_avb/run_tests.sh
+++ b/fs_mgr/libfs_avb/run_tests.sh
@@ -1,8 +1,13 @@
#!/bin/sh
#
# Run host tests
-atest libfs_avb_test # Tests public libfs_avb APIs.
-atest libfs_avb_internal_test # Tests libfs_avb private APIs.
+atest --host libfs_avb_test # Tests public libfs_avb APIs.
+
+# Tests libfs_avb private APIs.
+# The tests need more time to finish, so increase the timeout to 5 mins.
+# The default timeout is only 60 seconds.
+atest --host libfs_avb_internal_test -- --test-arg \
+ com.android.tradefed.testtype.HostGTest:native-test-timeout:5m
# Run device tests
atest libfs_avb_device_test # Test public libfs_avb APIs on a device.
diff --git a/fs_mgr/libfs_avb/tests/avb_util_test.cpp b/fs_mgr/libfs_avb/tests/avb_util_test.cpp
index 6f874a6..2e34920 100644
--- a/fs_mgr/libfs_avb/tests/avb_util_test.cpp
+++ b/fs_mgr/libfs_avb/tests/avb_util_test.cpp
@@ -23,6 +23,7 @@
#include <libavb/libavb.h>
#include "avb_util.h"
+#include "fs_avb/fs_avb_util.h"
#include "fs_avb_test_util.h"
// Target classes or functions to test:
diff --git a/fs_mgr/libsnapshot/cow_reader.cpp b/fs_mgr/libsnapshot/cow_reader.cpp
index 20030b9..9b5fd2a 100644
--- a/fs_mgr/libsnapshot/cow_reader.cpp
+++ b/fs_mgr/libsnapshot/cow_reader.cpp
@@ -475,10 +475,7 @@
std::sort(other_ops.begin(), other_ops.end(), std::greater<int>());
}
- merge_op_blocks->reserve(merge_op_blocks->size() + other_ops.size());
- for (auto block : other_ops) {
- merge_op_blocks->emplace_back(block);
- }
+ merge_op_blocks->insert(merge_op_blocks->end(), other_ops.begin(), other_ops.end());
num_total_data_ops_ = merge_op_blocks->size();
if (header_.num_merge_ops > 0) {
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h b/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h
index 41c6ef5..120f95b 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h
@@ -396,6 +396,17 @@
DM_USER,
};
+ // Add new public entries above this line.
+
+ // Helpers for failure injection.
+ using MergeConsistencyChecker =
+ std::function<MergeFailureCode(const std::string& name, const SnapshotStatus& status)>;
+
+ void set_merge_consistency_checker(MergeConsistencyChecker checker) {
+ merge_consistency_checker_ = checker;
+ }
+ MergeConsistencyChecker merge_consistency_checker() const { return merge_consistency_checker_; }
+
private:
FRIEND_TEST(SnapshotTest, CleanFirstStageMount);
FRIEND_TEST(SnapshotTest, CreateSnapshot);
@@ -410,6 +421,7 @@
FRIEND_TEST(SnapshotTest, NoMergeBeforeReboot);
FRIEND_TEST(SnapshotTest, UpdateBootControlHal);
FRIEND_TEST(SnapshotUpdateTest, AddPartition);
+ FRIEND_TEST(SnapshotUpdateTest, ConsistencyCheckResume);
FRIEND_TEST(SnapshotUpdateTest, DaemonTransition);
FRIEND_TEST(SnapshotUpdateTest, DataWipeAfterRollback);
FRIEND_TEST(SnapshotUpdateTest, DataWipeRollbackInRecovery);
@@ -811,6 +823,7 @@
std::unique_ptr<SnapuserdClient> snapuserd_client_;
std::unique_ptr<LpMetadata> old_partition_metadata_;
std::optional<bool> is_snapshot_userspace_;
+ MergeConsistencyChecker merge_consistency_checker_;
};
} // namespace snapshot
diff --git a/fs_mgr/libsnapshot/snapshot.cpp b/fs_mgr/libsnapshot/snapshot.cpp
index e6e17bd..f3de2b4 100644
--- a/fs_mgr/libsnapshot/snapshot.cpp
+++ b/fs_mgr/libsnapshot/snapshot.cpp
@@ -87,6 +87,8 @@
static constexpr char kRollbackIndicatorPath[] = "/metadata/ota/rollback-indicator";
static constexpr auto kUpdateStateCheckInterval = 2s;
+MergeFailureCode CheckMergeConsistency(const std::string& name, const SnapshotStatus& status);
+
// Note: IImageManager is an incomplete type in the header, so the default
// destructor doesn't work.
SnapshotManager::~SnapshotManager() {}
@@ -116,7 +118,9 @@
}
SnapshotManager::SnapshotManager(IDeviceInfo* device)
- : dm_(device->GetDeviceMapper()), device_(device), metadata_dir_(device_->GetMetadataDir()) {}
+ : dm_(device->GetDeviceMapper()), device_(device), metadata_dir_(device_->GetMetadataDir()) {
+ merge_consistency_checker_ = android::snapshot::CheckMergeConsistency;
+}
static std::string GetCowName(const std::string& snapshot_name) {
return snapshot_name + "-cow";
@@ -1329,14 +1333,20 @@
const SnapshotStatus& status) {
CHECK(lock);
+ return merge_consistency_checker_(name, status);
+}
+
+MergeFailureCode CheckMergeConsistency(const std::string& name, const SnapshotStatus& status) {
if (!status.compression_enabled()) {
// Do not try to verify old-style COWs yet.
return MergeFailureCode::Ok;
}
+ auto& dm = DeviceMapper::Instance();
+
std::string cow_image_name = GetMappedCowDeviceName(name, status);
std::string cow_image_path;
- if (!dm_.GetDmDevicePathByName(cow_image_name, &cow_image_path)) {
+ if (!dm.GetDmDevicePathByName(cow_image_name, &cow_image_path)) {
LOG(ERROR) << "Failed to get path for cow device: " << cow_image_name;
return MergeFailureCode::GetCowPathConsistencyCheck;
}
@@ -1400,9 +1410,11 @@
}
SnapshotUpdateStatus update_status = ReadSnapshotUpdateStatus(lock);
- CHECK(update_status.state() == UpdateState::Merging);
+ CHECK(update_status.state() == UpdateState::Merging ||
+ update_status.state() == UpdateState::MergeFailed);
CHECK(update_status.merge_phase() == MergePhase::FIRST_PHASE);
+ update_status.set_state(UpdateState::Merging);
update_status.set_merge_phase(MergePhase::SECOND_PHASE);
if (!WriteSnapshotUpdateStatus(lock, update_status)) {
return MergeFailureCode::WriteStatus;
@@ -1455,6 +1467,14 @@
}
RemoveAllUpdateState(lock);
+
+ if (UpdateUsesUserSnapshots(lock) && !device()->IsTestDevice()) {
+ if (snapuserd_client_) {
+ snapuserd_client_->DetachSnapuserd();
+ snapuserd_client_->CloseConnection();
+ snapuserd_client_ = nullptr;
+ }
+ }
}
void SnapshotManager::AcknowledgeMergeFailure(MergeFailureCode failure_code) {
@@ -3188,7 +3208,7 @@
// Terminate stale daemon if any
std::unique_ptr<SnapuserdClient> snapuserd_client =
- SnapuserdClient::Connect(kSnapuserdSocket, 10s);
+ SnapuserdClient::Connect(kSnapuserdSocket, 5s);
if (snapuserd_client) {
snapuserd_client->DetachSnapuserd();
snapuserd_client->CloseConnection();
diff --git a/fs_mgr/libsnapshot/snapshot_test.cpp b/fs_mgr/libsnapshot/snapshot_test.cpp
index 14f2d45..d76558b 100644
--- a/fs_mgr/libsnapshot/snapshot_test.cpp
+++ b/fs_mgr/libsnapshot/snapshot_test.cpp
@@ -54,6 +54,8 @@
#include <libsnapshot/mock_snapshot.h>
DEFINE_string(force_config, "", "Force testing mode (dmsnap, vab, vabc) ignoring device config.");
+DEFINE_string(force_iouring_disable, "",
+ "Force testing mode (iouring_disabled) - disable io_uring");
namespace android {
namespace snapshot {
@@ -1394,6 +1396,93 @@
}
}
+// Test that a transient merge consistency check failure can resume properly.
+TEST_F(SnapshotUpdateTest, ConsistencyCheckResume) {
+ if (!ShouldUseCompression()) {
+ // b/179111359
+ GTEST_SKIP() << "Skipping Virtual A/B Compression test";
+ }
+
+ auto old_sys_size = GetSize(sys_);
+ auto old_prd_size = GetSize(prd_);
+
+ // Grow |sys| but shrink |prd|.
+ SetSize(sys_, old_sys_size * 2);
+ sys_->set_estimate_cow_size(8_MiB);
+ SetSize(prd_, old_prd_size / 2);
+ prd_->set_estimate_cow_size(1_MiB);
+
+ AddOperationForPartitions();
+
+ ASSERT_TRUE(sm->BeginUpdate());
+ ASSERT_TRUE(sm->CreateUpdateSnapshots(manifest_));
+ ASSERT_TRUE(WriteSnapshotAndHash("sys_b"));
+ ASSERT_TRUE(WriteSnapshotAndHash("vnd_b"));
+ ASSERT_TRUE(ShiftAllSnapshotBlocks("prd_b", old_prd_size));
+
+ sync();
+
+ // Assert that source partitions aren't affected.
+ for (const auto& name : {"sys_a", "vnd_a", "prd_a"}) {
+ ASSERT_TRUE(IsPartitionUnchanged(name));
+ }
+
+ ASSERT_TRUE(sm->FinishedSnapshotWrites(false));
+
+ // Simulate shutting down the device.
+ ASSERT_TRUE(UnmapAll());
+
+ // After reboot, init does first stage mount.
+ auto init = NewManagerForFirstStageMount("_b");
+ ASSERT_NE(init, nullptr);
+ ASSERT_TRUE(init->NeedSnapshotsInFirstStageMount());
+ ASSERT_TRUE(init->CreateLogicalAndSnapshotPartitions("super", snapshot_timeout_));
+
+ // Check that the target partitions have the same content.
+ for (const auto& name : {"sys_b", "vnd_b", "prd_b"}) {
+ ASSERT_TRUE(IsPartitionUnchanged(name));
+ }
+
+ auto old_checker = init->merge_consistency_checker();
+
+ init->set_merge_consistency_checker(
+ [](const std::string&, const SnapshotStatus&) -> MergeFailureCode {
+ return MergeFailureCode::WrongMergeCountConsistencyCheck;
+ });
+
+ // Initiate the merge and wait for it to be completed.
+ ASSERT_TRUE(init->InitiateMerge());
+ ASSERT_EQ(init->IsSnapuserdRequired(), ShouldUseUserspaceSnapshots());
+ {
+ // Check that the merge phase is FIRST_PHASE until at least one call
+ // to ProcessUpdateState() occurs.
+ ASSERT_TRUE(AcquireLock());
+ auto local_lock = std::move(lock_);
+ auto status = init->ReadSnapshotUpdateStatus(local_lock.get());
+ ASSERT_EQ(status.merge_phase(), MergePhase::FIRST_PHASE);
+ }
+
+ // Merge should have failed.
+ ASSERT_EQ(UpdateState::MergeFailed, init->ProcessUpdateState());
+
+ // Simulate shutting down the device and creating partitions again.
+ ASSERT_TRUE(UnmapAll());
+
+ // Restore the checker.
+ init->set_merge_consistency_checker(std::move(old_checker));
+
+ ASSERT_TRUE(init->CreateLogicalAndSnapshotPartitions("super", snapshot_timeout_));
+
+ // Complete the merge.
+ ASSERT_EQ(UpdateState::MergeCompleted, init->ProcessUpdateState());
+
+ // Check that the target partitions have the same content after the merge.
+ for (const auto& name : {"sys_b", "vnd_b", "prd_b"}) {
+ ASSERT_TRUE(IsPartitionUnchanged(name))
+ << "Content of " << name << " changes after the merge";
+ }
+}
+
// Test that if new system partitions uses empty space in super, that region is not snapshotted.
TEST_F(SnapshotUpdateTest, DirectWriteEmptySpace) {
GTEST_SKIP() << "b/141889746";
@@ -2682,10 +2771,22 @@
}
}
+ if (FLAGS_force_iouring_disable == "iouring_disabled") {
+ if (!android::base::SetProperty("snapuserd.test.io_uring.force_disable", "1")) {
+ return testing::AssertionFailure()
+ << "Failed to disable property: snapuserd.test.io_uring.disabled";
+ }
+ }
+
int ret = RUN_ALL_TESTS();
if (FLAGS_force_config == "dmsnap") {
android::base::SetProperty("snapuserd.test.dm.snapshots", "0");
}
+
+ if (FLAGS_force_iouring_disable == "iouring_disabled") {
+ android::base::SetProperty("snapuserd.test.io_uring.force_disable", "0");
+ }
+
return ret;
}
diff --git a/fs_mgr/libsnapshot/snapuserd/Android.bp b/fs_mgr/libsnapshot/snapuserd/Android.bp
index 84bcb94..bc2bceb 100644
--- a/fs_mgr/libsnapshot/snapuserd/Android.bp
+++ b/fs_mgr/libsnapshot/snapuserd/Android.bp
@@ -86,7 +86,9 @@
"libsnapshot_cow",
"libz",
"libext4_utils",
+ "liburing",
],
+ include_dirs: ["bionic/libc/kernel"],
}
cc_binary {
@@ -182,7 +184,10 @@
"libfs_mgr",
"libdm",
"libext4_utils",
+ "liburing",
+ "libgflags",
],
+ include_dirs: ["bionic/libc/kernel"],
header_libs: [
"libstorage_literals_headers",
"libfiemap_headers",
diff --git a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_core.cpp b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_core.cpp
index 95d95cd..5109d82 100644
--- a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_core.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_core.cpp
@@ -16,6 +16,10 @@
#include "snapuserd_core.h"
+#include <sys/utsname.h>
+
+#include <android-base/properties.h>
+#include <android-base/scopeguard.h>
#include <android-base/strings.h>
namespace android {
@@ -288,6 +292,136 @@
return ReadMetadata();
}
+void SnapshotHandler::FinalizeIouring() {
+ io_uring_queue_exit(ring_.get());
+}
+
+bool SnapshotHandler::InitializeIouring(int io_depth) {
+ ring_ = std::make_unique<struct io_uring>();
+
+ int ret = io_uring_queue_init(io_depth, ring_.get(), 0);
+ if (ret) {
+ LOG(ERROR) << "io_uring_queue_init failed with ret: " << ret;
+ return false;
+ }
+
+ LOG(INFO) << "io_uring_queue_init success with io_depth: " << io_depth;
+ return true;
+}
+
+bool SnapshotHandler::ReadBlocksAsync(const std::string& dm_block_device,
+ const std::string& partition_name, size_t size) {
+ // 64k block size with io_depth of 64 is optimal
+ // for a single thread. We just need a single thread
+ // to read all the blocks from all dynamic partitions.
+ size_t io_depth = 64;
+ size_t bs = (64 * 1024);
+
+ if (!InitializeIouring(io_depth)) {
+ return false;
+ }
+
+ LOG(INFO) << "ReadBlockAsync start "
+ << " Block-device: " << dm_block_device << " Partition-name: " << partition_name
+ << " Size: " << size;
+
+ auto scope_guard = android::base::make_scope_guard([this]() -> void { FinalizeIouring(); });
+
+ std::vector<std::unique_ptr<struct iovec>> vecs;
+ using AlignedBuf = std::unique_ptr<void, decltype(free)*>;
+ std::vector<AlignedBuf> alignedBufVector;
+
+ /*
+ * TODO: We need aligned memory for DIRECT-IO. However, if we do
+ * a DIRECT-IO and verify the blocks then we need to inform
+ * update-verifier that block verification has been done and
+ * there is no need to repeat the same. We are not there yet
+ * as we need to see if there are any boot time improvements doing
+ * a DIRECT-IO.
+ *
+ * Also, we could you the same function post merge for block verification;
+ * again, we can do a DIRECT-IO instead of thrashing page-cache and
+ * hurting other applications.
+ *
+ * For now, we will just create aligned buffers but rely on buffered
+ * I/O until we have perf numbers to justify DIRECT-IO.
+ */
+ for (int i = 0; i < io_depth; i++) {
+ auto iovec = std::make_unique<struct iovec>();
+ vecs.push_back(std::move(iovec));
+
+ struct iovec* iovec_ptr = vecs[i].get();
+
+ if (posix_memalign(&iovec_ptr->iov_base, BLOCK_SZ, bs)) {
+ LOG(ERROR) << "posix_memalign failed";
+ return false;
+ }
+
+ iovec_ptr->iov_len = bs;
+ alignedBufVector.push_back(
+ std::unique_ptr<void, decltype(free)*>(iovec_ptr->iov_base, free));
+ }
+
+ android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(dm_block_device.c_str(), O_RDONLY)));
+ if (fd.get() == -1) {
+ SNAP_PLOG(ERROR) << "File open failed - block-device " << dm_block_device
+ << " partition-name: " << partition_name;
+ return false;
+ }
+
+ loff_t offset = 0;
+ size_t remain = size;
+ size_t read_sz = io_depth * bs;
+
+ while (remain > 0) {
+ size_t to_read = std::min(remain, read_sz);
+ size_t queue_size = to_read / bs;
+
+ for (int i = 0; i < queue_size; i++) {
+ struct io_uring_sqe* sqe = io_uring_get_sqe(ring_.get());
+ if (!sqe) {
+ SNAP_LOG(ERROR) << "io_uring_get_sqe() failed";
+ return false;
+ }
+
+ struct iovec* iovec_ptr = vecs[i].get();
+
+ io_uring_prep_read(sqe, fd.get(), iovec_ptr->iov_base, iovec_ptr->iov_len, offset);
+ sqe->flags |= IOSQE_ASYNC;
+ offset += bs;
+ }
+
+ int ret = io_uring_submit(ring_.get());
+ if (ret != queue_size) {
+ SNAP_LOG(ERROR) << "submit got: " << ret << " wanted: " << queue_size;
+ return false;
+ }
+
+ for (int i = 0; i < queue_size; i++) {
+ struct io_uring_cqe* cqe;
+
+ int ret = io_uring_wait_cqe(ring_.get(), &cqe);
+ if (ret) {
+ SNAP_PLOG(ERROR) << "wait_cqe failed" << ret;
+ return false;
+ }
+
+ if (cqe->res < 0) {
+ SNAP_LOG(ERROR) << "io failed with res: " << cqe->res;
+ return false;
+ }
+ io_uring_cqe_seen(ring_.get(), cqe);
+ }
+
+ remain -= to_read;
+ }
+
+ LOG(INFO) << "ReadBlockAsync complete: "
+ << " Block-device: " << dm_block_device << " Partition-name: " << partition_name
+ << " Size: " << size;
+ return true;
+}
+
void SnapshotHandler::ReadBlocksToCache(const std::string& dm_block_device,
const std::string& partition_name, off_t offset,
size_t size) {
@@ -344,17 +478,22 @@
return;
}
- int num_threads = 2;
- size_t num_blocks = dev_sz >> BLOCK_SHIFT;
- size_t num_blocks_per_thread = num_blocks / num_threads;
- size_t read_sz_per_thread = num_blocks_per_thread << BLOCK_SHIFT;
- off_t offset = 0;
+ if (IsIouringSupported()) {
+ std::async(std::launch::async, &SnapshotHandler::ReadBlocksAsync, this, dm_block_device,
+ partition_name, dev_sz);
+ } else {
+ int num_threads = 2;
+ size_t num_blocks = dev_sz >> BLOCK_SHIFT;
+ size_t num_blocks_per_thread = num_blocks / num_threads;
+ size_t read_sz_per_thread = num_blocks_per_thread << BLOCK_SHIFT;
+ off_t offset = 0;
- for (int i = 0; i < num_threads; i++) {
- std::async(std::launch::async, &SnapshotHandler::ReadBlocksToCache, this, dm_block_device,
- partition_name, offset, read_sz_per_thread);
+ for (int i = 0; i < num_threads; i++) {
+ std::async(std::launch::async, &SnapshotHandler::ReadBlocksToCache, this,
+ dm_block_device, partition_name, offset, read_sz_per_thread);
- offset += read_sz_per_thread;
+ offset += read_sz_per_thread;
+ }
}
}
@@ -513,5 +652,33 @@
return ra_state;
}
+bool SnapshotHandler::IsIouringSupported() {
+ struct utsname uts;
+ unsigned int major, minor;
+
+ if (android::base::GetBoolProperty("snapuserd.test.io_uring.force_disable", false)) {
+ SNAP_LOG(INFO) << "io_uring disabled for testing";
+ return false;
+ }
+
+ if ((uname(&uts) != 0) || (sscanf(uts.release, "%u.%u", &major, &minor) != 2)) {
+ SNAP_LOG(ERROR) << "Could not parse the kernel version from uname. "
+ << " io_uring not supported";
+ return false;
+ }
+
+ // We will only support kernels from 5.6 onwards as IOSQE_ASYNC flag and
+ // IO_URING_OP_READ/WRITE opcodes were introduced only on 5.6 kernel
+ if (major >= 5) {
+ if (major == 5 && minor < 6) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+
+ return android::base::GetBoolProperty("ro.virtual_ab.io_uring.enabled", false);
+}
+
} // namespace snapshot
} // namespace android
diff --git a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_core.h b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_core.h
index 1953316..b0f2d65 100644
--- a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_core.h
+++ b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_core.h
@@ -39,6 +39,7 @@
#include <libdm/dm.h>
#include <libsnapshot/cow_reader.h>
#include <libsnapshot/cow_writer.h>
+#include <liburing.h>
#include <snapuserd/snapuserd_buffer.h>
#include <snapuserd/snapuserd_kernel.h>
@@ -113,6 +114,19 @@
bool ReconstructDataFromCow();
void CheckOverlap(const CowOperation* cow_op);
+ bool ReadAheadAsyncIO();
+ bool ReapIoCompletions(int pending_ios_to_complete);
+ bool ReadXorData(size_t block_index, size_t xor_op_index,
+ std::vector<const CowOperation*>& xor_op_vec);
+ void ProcessXorData(size_t& block_xor_index, size_t& xor_index,
+ std::vector<const CowOperation*>& xor_op_vec, void* buffer,
+ loff_t& buffer_offset);
+ void UpdateScratchMetadata();
+
+ bool ReadAheadSyncIO();
+ bool InitializeIouring();
+ void FinalizeIouring();
+
void* read_ahead_buffer_;
void* metadata_buffer_;
@@ -131,7 +145,19 @@
std::unordered_set<uint64_t> dest_blocks_;
std::unordered_set<uint64_t> source_blocks_;
bool overlap_;
+ std::vector<uint64_t> blocks_;
+ int total_blocks_merged_ = 0;
+ std::unique_ptr<uint8_t[]> ra_temp_buffer_;
+ std::unique_ptr<uint8_t[]> ra_temp_meta_buffer_;
BufferSink bufsink_;
+
+ bool read_ahead_async_ = false;
+ // Queue depth of 32 seems optimal. We don't want
+ // to have a huge depth as it may put more memory pressure
+ // on the kernel worker threads given that we use
+ // IOSQE_ASYNC flag.
+ int queue_depth_ = 32;
+ std::unique_ptr<struct io_uring> ring_;
};
class Worker {
@@ -185,6 +211,7 @@
// Merge related ops
bool Merge();
bool MergeOrderedOps(const std::unique_ptr<ICowOpIter>& cowop_iter);
+ bool MergeOrderedOpsAsync(const std::unique_ptr<ICowOpIter>& cowop_iter);
bool MergeReplaceZeroOps(const std::unique_ptr<ICowOpIter>& cowop_iter);
int PrepareMerge(uint64_t* source_offset, int* pending_ops,
const std::unique_ptr<ICowOpIter>& cowop_iter,
@@ -193,6 +220,9 @@
sector_t ChunkToSector(chunk_t chunk) { return chunk << CHUNK_SHIFT; }
chunk_t SectorToChunk(sector_t sector) { return sector >> CHUNK_SHIFT; }
+ bool InitializeIouring();
+ void FinalizeIouring();
+
std::unique_ptr<CowReader> reader_;
BufferSink bufsink_;
XorSink xorsink_;
@@ -208,6 +238,14 @@
unique_fd base_path_merge_fd_;
unique_fd ctrl_fd_;
+ bool merge_async_ = false;
+ // Queue depth of 32 seems optimal. We don't want
+ // to have a huge depth as it may put more memory pressure
+ // on the kernel worker threads given that we use
+ // IOSQE_ASYNC flag.
+ int queue_depth_ = 32;
+ std::unique_ptr<struct io_uring> ring_;
+
std::shared_ptr<SnapshotHandler> snapuserd_;
};
@@ -292,6 +330,8 @@
bool GetRABuffer(std::unique_lock<std::mutex>* lock, uint64_t block, void* buffer);
MERGE_GROUP_STATE ProcessMergingBlock(uint64_t new_block, void* buffer);
+ bool IsIouringSupported();
+
private:
bool ReadMetadata();
sector_t ChunkToSector(chunk_t chunk) { return chunk << CHUNK_SHIFT; }
@@ -304,6 +344,11 @@
void ReadBlocksToCache(const std::string& dm_block_device, const std::string& partition_name,
off_t offset, size_t size);
+ bool InitializeIouring(int io_depth);
+ void FinalizeIouring();
+ bool ReadBlocksAsync(const std::string& dm_block_device, const std::string& partition_name,
+ size_t size);
+
// COW device
std::string cow_device_;
// Source device
@@ -352,6 +397,8 @@
bool attached_ = false;
bool is_socket_present_;
bool scratch_space_ = false;
+
+ std::unique_ptr<struct io_uring> ring_;
};
} // namespace snapshot
diff --git a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_merge.cpp b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_merge.cpp
index fa055b7..d4d4efe 100644
--- a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_merge.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_merge.cpp
@@ -72,16 +72,16 @@
}
bool Worker::MergeReplaceZeroOps(const std::unique_ptr<ICowOpIter>& cowop_iter) {
- // Flush every 2048 ops. Since all ops are independent and there is no
+ // Flush every 8192 ops. Since all ops are independent and there is no
// dependency between COW ops, we will flush the data and the number
- // of ops merged in COW file for every 2048 ops. If there is a crash,
+ // of ops merged in COW file for every 8192 ops. If there is a crash,
// we will end up replaying some of the COW ops which were already merged.
// That is ok.
//
- // Why 2048 ops ? We can probably increase this to bigger value but just
- // need to ensure that merge makes forward progress if there are
- // crashes repeatedly which is highly unlikely.
- int total_ops_merged_per_commit = (PAYLOAD_BUFFER_SZ / BLOCK_SZ) * 8;
+ // Why 8192 ops ? Increasing this may improve merge time 3-4 seconds but
+ // we need to make sure that we checkpoint; 8k ops seems optimal. In-case
+ // if there is a crash merge should always make forward progress.
+ int total_ops_merged_per_commit = (PAYLOAD_BUFFER_SZ / BLOCK_SZ) * 32;
int num_ops_merged = 0;
while (!cowop_iter->Done()) {
@@ -128,7 +128,7 @@
num_ops_merged += linear_blocks;
- if (num_ops_merged == total_ops_merged_per_commit) {
+ if (num_ops_merged >= total_ops_merged_per_commit) {
// Flush the data
if (fsync(base_path_merge_fd_.get()) < 0) {
SNAP_LOG(ERROR) << "Merge: ReplaceZeroOps: Failed to fsync merged data";
@@ -172,6 +172,173 @@
return true;
}
+bool Worker::MergeOrderedOpsAsync(const std::unique_ptr<ICowOpIter>& cowop_iter) {
+ void* mapped_addr = snapuserd_->GetMappedAddr();
+ void* read_ahead_buffer =
+ static_cast<void*>((char*)mapped_addr + snapuserd_->GetBufferDataOffset());
+ size_t block_index = 0;
+
+ SNAP_LOG(INFO) << "MergeOrderedOpsAsync started....";
+
+ while (!cowop_iter->Done()) {
+ const CowOperation* cow_op = &cowop_iter->Get();
+ if (!IsOrderedOp(*cow_op)) {
+ break;
+ }
+
+ SNAP_LOG(DEBUG) << "Waiting for merge begin...";
+ // Wait for RA thread to notify that the merge window
+ // is ready for merging.
+ if (!snapuserd_->WaitForMergeBegin()) {
+ snapuserd_->SetMergeFailed(block_index);
+ return false;
+ }
+
+ snapuserd_->SetMergeInProgress(block_index);
+
+ loff_t offset = 0;
+ int num_ops = snapuserd_->GetTotalBlocksToMerge();
+
+ int pending_sqe = queue_depth_;
+ int pending_ios_to_submit = 0;
+ bool flush_required = false;
+
+ SNAP_LOG(DEBUG) << "Merging copy-ops of size: " << num_ops;
+ while (num_ops) {
+ uint64_t source_offset;
+
+ int linear_blocks = PrepareMerge(&source_offset, &num_ops, cowop_iter);
+
+ if (linear_blocks != 0) {
+ size_t io_size = (linear_blocks * BLOCK_SZ);
+
+ // Get an SQE entry from the ring and populate the I/O variables
+ struct io_uring_sqe* sqe = io_uring_get_sqe(ring_.get());
+ if (!sqe) {
+ SNAP_PLOG(ERROR) << "io_uring_get_sqe failed during merge-ordered ops";
+ snapuserd_->SetMergeFailed(block_index);
+ return false;
+ }
+
+ io_uring_prep_write(sqe, base_path_merge_fd_.get(),
+ (char*)read_ahead_buffer + offset, io_size, source_offset);
+
+ offset += io_size;
+ num_ops -= linear_blocks;
+
+ pending_sqe -= 1;
+ pending_ios_to_submit += 1;
+ sqe->flags |= IOSQE_ASYNC;
+ }
+
+ // Ring is full or no more COW ops to be merged in this batch
+ if (pending_sqe == 0 || num_ops == 0 || (linear_blocks == 0 && pending_ios_to_submit)) {
+ // If this is a last set of COW ops to be merged in this batch, we need
+ // to sync the merged data. We will try to grab an SQE entry
+ // and set the FSYNC command; additionally, make sure that
+ // the fsync is done after all the I/O operations queued
+ // in the ring is completed by setting IOSQE_IO_DRAIN.
+ //
+ // If there is no space in the ring, we will flush it later
+ // by explicitly calling fsync() system call.
+ if (num_ops == 0 || (linear_blocks == 0 && pending_ios_to_submit)) {
+ if (pending_sqe != 0) {
+ struct io_uring_sqe* sqe = io_uring_get_sqe(ring_.get());
+ if (!sqe) {
+ // very unlikely but let's continue and not fail the
+ // merge - we will flush it later
+ SNAP_PLOG(ERROR) << "io_uring_get_sqe failed during merge-ordered ops";
+ flush_required = true;
+ } else {
+ io_uring_prep_fsync(sqe, base_path_merge_fd_.get(), 0);
+ // Drain the queue before fsync
+ io_uring_sqe_set_flags(sqe, IOSQE_IO_DRAIN);
+ pending_sqe -= 1;
+ flush_required = false;
+ pending_ios_to_submit += 1;
+ sqe->flags |= IOSQE_ASYNC;
+ }
+ } else {
+ flush_required = true;
+ }
+ }
+
+ // Submit the IO for all the COW ops in a single syscall
+ int ret = io_uring_submit(ring_.get());
+ if (ret != pending_ios_to_submit) {
+ SNAP_PLOG(ERROR)
+ << "io_uring_submit failed for read-ahead: "
+ << " io submit: " << ret << " expected: " << pending_ios_to_submit;
+ snapuserd_->SetMergeFailed(block_index);
+ return false;
+ }
+
+ int pending_ios_to_complete = pending_ios_to_submit;
+ pending_ios_to_submit = 0;
+
+ // Reap I/O completions
+ while (pending_ios_to_complete) {
+ struct io_uring_cqe* cqe;
+
+ ret = io_uring_wait_cqe(ring_.get(), &cqe);
+ if (ret) {
+ SNAP_LOG(ERROR) << "Read-ahead - io_uring_wait_cqe failed: " << ret;
+ snapuserd_->SetMergeFailed(block_index);
+ return false;
+ }
+
+ if (cqe->res < 0) {
+ SNAP_LOG(ERROR)
+ << "Read-ahead - io_uring_Wait_cqe failed with res: " << cqe->res;
+ snapuserd_->SetMergeFailed(block_index);
+ return false;
+ }
+
+ io_uring_cqe_seen(ring_.get(), cqe);
+ pending_ios_to_complete -= 1;
+ }
+
+ pending_sqe = queue_depth_;
+ }
+
+ if (linear_blocks == 0) {
+ break;
+ }
+ }
+
+ // Verify all ops are merged
+ CHECK(num_ops == 0);
+
+ // Flush the data
+ if (flush_required && (fsync(base_path_merge_fd_.get()) < 0)) {
+ SNAP_LOG(ERROR) << " Failed to fsync merged data";
+ snapuserd_->SetMergeFailed(block_index);
+ return false;
+ }
+
+ // Merge is done and data is on disk. Update the COW Header about
+ // the merge completion
+ if (!snapuserd_->CommitMerge(snapuserd_->GetTotalBlocksToMerge())) {
+ SNAP_LOG(ERROR) << " Failed to commit the merged block in the header";
+ snapuserd_->SetMergeFailed(block_index);
+ return false;
+ }
+
+ SNAP_LOG(DEBUG) << "Block commit of size: " << snapuserd_->GetTotalBlocksToMerge();
+ // Mark the block as merge complete
+ snapuserd_->SetMergeCompleted(block_index);
+
+ // Notify RA thread that the merge thread is ready to merge the next
+ // window
+ snapuserd_->NotifyRAForMergeReady();
+
+ // Get the next block
+ block_index += 1;
+ }
+
+ return true;
+}
+
bool Worker::MergeOrderedOps(const std::unique_ptr<ICowOpIter>& cowop_iter) {
void* mapped_addr = snapuserd_->GetMappedAddr();
void* read_ahead_buffer =
@@ -260,15 +427,23 @@
bool Worker::Merge() {
std::unique_ptr<ICowOpIter> cowop_iter = reader_->GetMergeOpIter();
- // Start with Copy and Xor ops
- if (!MergeOrderedOps(cowop_iter)) {
- SNAP_LOG(ERROR) << "Merge failed for ordered ops";
- snapuserd_->MergeFailed();
- return false;
+ if (merge_async_) {
+ if (!MergeOrderedOpsAsync(cowop_iter)) {
+ SNAP_LOG(ERROR) << "Merge failed for ordered ops";
+ snapuserd_->MergeFailed();
+ return false;
+ }
+ SNAP_LOG(INFO) << "MergeOrderedOpsAsync completed.....";
+ } else {
+ // Start with Copy and Xor ops
+ if (!MergeOrderedOps(cowop_iter)) {
+ SNAP_LOG(ERROR) << "Merge failed for ordered ops";
+ snapuserd_->MergeFailed();
+ return false;
+ }
+ SNAP_LOG(INFO) << "MergeOrderedOps completed.....";
}
- SNAP_LOG(INFO) << "MergeOrderedOps completed...";
-
// Replace and Zero ops
if (!MergeReplaceZeroOps(cowop_iter)) {
SNAP_LOG(ERROR) << "Merge failed for replace/zero ops";
@@ -281,6 +456,31 @@
return true;
}
+bool Worker::InitializeIouring() {
+ if (!snapuserd_->IsIouringSupported()) {
+ return false;
+ }
+
+ ring_ = std::make_unique<struct io_uring>();
+
+ int ret = io_uring_queue_init(queue_depth_, ring_.get(), 0);
+ if (ret) {
+ LOG(ERROR) << "Merge: io_uring_queue_init failed with ret: " << ret;
+ return false;
+ }
+
+ merge_async_ = true;
+
+ LOG(INFO) << "Merge: io_uring initialized with queue depth: " << queue_depth_;
+ return true;
+}
+
+void Worker::FinalizeIouring() {
+ if (merge_async_) {
+ io_uring_queue_exit(ring_.get());
+ }
+}
+
bool Worker::RunMergeThread() {
SNAP_LOG(DEBUG) << "Waiting for merge begin...";
if (!snapuserd_->WaitForMergeBegin()) {
@@ -296,10 +496,13 @@
return false;
}
+ InitializeIouring();
+
if (!Merge()) {
return false;
}
+ FinalizeIouring();
CloseFds();
reader_->CloseCowFd();
diff --git a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_readahead.cpp b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_readahead.cpp
index 9e8ccfb..26c5f19 100644
--- a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_readahead.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_readahead.cpp
@@ -183,25 +183,311 @@
return true;
}
-bool ReadAhead::ReadAheadIOStart() {
- // Check if the data has to be constructed from the COW file.
- // This will be true only once during boot up after a crash
- // during merge.
- if (snapuserd_->ShouldReconstructDataFromCow()) {
- return ReconstructDataFromCow();
- }
+/*
+ * With io_uring, the data flow is slightly different.
+ *
+ * The data flow is as follows:
+ *
+ * 1: Queue the I/O requests to be read from backing source device.
+ * This is done by retrieving the SQE entry from ring and populating
+ * the SQE entry. Note that the I/O is not submitted yet.
+ *
+ * 2: Once the ring is full (aka queue_depth), we will submit all
+ * the queued I/O request with a single system call. This essentially
+ * cuts down "queue_depth" number of system calls to a single system call.
+ *
+ * 3: Once the I/O is submitted, user-space thread will now work
+ * on processing the XOR Operations. This happens in parallel when
+ * I/O requests are submitted to the kernel. This is ok because, for XOR
+ * operations, we first need to retrieve the compressed data form COW block
+ * device. Thus, we have offloaded the backing source I/O to the kernel
+ * and user-space is parallely working on fetching the data for XOR operations.
+ *
+ * 4: After the XOR operations are read from COW device, poll the completion
+ * queue for all the I/O submitted. If the I/O's were already completed,
+ * then user-space thread will just read the CQE requests from the ring
+ * without doing any system call. If none of the I/O were completed yet,
+ * user-space thread will do a system call and wait for I/O completions.
+ *
+ * Flow diagram:
+ * SQ-RING
+ * SQE1 <----------- Fetch SQE1 Entry ---------- |SQE1||SQE2|SQE3|
+ *
+ * SQE1 ------------ Populate SQE1 Entry ------> |SQE1-X||SQE2|SQE3|
+ *
+ * SQE2 <----------- Fetch SQE2 Entry ---------- |SQE1-X||SQE2|SQE3|
+ *
+ * SQE2 ------------ Populate SQE2 Entry ------> |SQE1-X||SQE2-X|SQE3|
+ *
+ * SQE3 <----------- Fetch SQE3 Entry ---------- |SQE1-X||SQE2-X|SQE3|
+ *
+ * SQE3 ------------ Populate SQE3 Entry ------> |SQE1-X||SQE2-X|SQE3-X|
+ *
+ * Submit-IO ---------------------------------> |SQE1-X||SQE2-X|SQE3-X|
+ * | |
+ * | Process I/O entries in kernel
+ * | |
+ * Retrieve XOR |
+ * data from COW |
+ * | |
+ * | |
+ * Fetch CQ completions
+ * | CQ-RING
+ * |CQE1-X||CQE2-X|CQE3-X|
+ * |
+ * CQE1 <------------Fetch CQE1 Entry |CQE1||CQE2-X|CQE3-X|
+ * CQE2 <------------Fetch CQE2 Entry |CQE1||CQE2-|CQE3-X|
+ * CQE3 <------------Fetch CQE3 Entry |CQE1||CQE2-|CQE3-|
+ * |
+ * |
+ * Continue Next set of operations in the RING
+ */
- std::vector<uint64_t> blocks;
-
+bool ReadAhead::ReadAheadAsyncIO() {
int num_ops = (snapuserd_->GetBufferDataSize()) / BLOCK_SZ;
loff_t buffer_offset = 0;
- int total_blocks_merged = 0;
+ total_blocks_merged_ = 0;
overlap_ = false;
dest_blocks_.clear();
source_blocks_.clear();
+ blocks_.clear();
std::vector<const CowOperation*> xor_op_vec;
- auto ra_temp_buffer = std::make_unique<uint8_t[]>(snapuserd_->GetBufferDataSize());
+ int pending_sqe = queue_depth_;
+ int pending_ios_to_submit = 0;
+
+ size_t xor_op_index = 0;
+ size_t block_index = 0;
+
+ loff_t offset = 0;
+
+ bufsink_.ResetBufferOffset();
+
+ // Number of ops to be merged in this window. This is a fixed size
+ // except for the last window wherein the number of ops can be less
+ // than the size of the RA window.
+ while (num_ops) {
+ uint64_t source_offset;
+ struct io_uring_sqe* sqe;
+
+ int linear_blocks = PrepareNextReadAhead(&source_offset, &num_ops, blocks_, xor_op_vec);
+
+ if (linear_blocks != 0) {
+ size_t io_size = (linear_blocks * BLOCK_SZ);
+
+ // Get an SQE entry from the ring and populate the I/O variables
+ sqe = io_uring_get_sqe(ring_.get());
+ if (!sqe) {
+ SNAP_PLOG(ERROR) << "io_uring_get_sqe failed during read-ahead";
+ snapuserd_->ReadAheadIOFailed();
+ return false;
+ }
+
+ io_uring_prep_read(sqe, backing_store_fd_.get(),
+ (char*)ra_temp_buffer_.get() + buffer_offset, io_size,
+ source_offset);
+
+ buffer_offset += io_size;
+ num_ops -= linear_blocks;
+ total_blocks_merged_ += linear_blocks;
+
+ pending_sqe -= 1;
+ pending_ios_to_submit += 1;
+ sqe->flags |= IOSQE_ASYNC;
+ }
+
+ // pending_sqe == 0 : Ring is full
+ //
+ // num_ops == 0 : All the COW ops in this batch are processed - Submit
+ // pending I/O requests in the ring
+ //
+ // linear_blocks == 0 : All the COW ops processing is done. Submit
+ // pending I/O requests in the ring
+ if (pending_sqe == 0 || num_ops == 0 || (linear_blocks == 0 && pending_ios_to_submit)) {
+ // Submit the IO for all the COW ops in a single syscall
+ int ret = io_uring_submit(ring_.get());
+ if (ret != pending_ios_to_submit) {
+ SNAP_PLOG(ERROR) << "io_uring_submit failed for read-ahead: "
+ << " io submit: " << ret << " expected: " << pending_ios_to_submit;
+ snapuserd_->ReadAheadIOFailed();
+ return false;
+ }
+
+ int pending_ios_to_complete = pending_ios_to_submit;
+ pending_ios_to_submit = 0;
+
+ bool xor_processing_required = (xor_op_vec.size() > 0);
+
+ // Read XOR data from COW file in parallel when I/O's are in-flight
+ if (xor_processing_required && !ReadXorData(block_index, xor_op_index, xor_op_vec)) {
+ SNAP_LOG(ERROR) << "ReadXorData failed";
+ snapuserd_->ReadAheadIOFailed();
+ return false;
+ }
+
+ // Fetch I/O completions
+ if (!ReapIoCompletions(pending_ios_to_complete)) {
+ SNAP_LOG(ERROR) << "ReapIoCompletions failed";
+ snapuserd_->ReadAheadIOFailed();
+ return false;
+ }
+
+ // Retrieve XOR'ed data
+ if (xor_processing_required) {
+ ProcessXorData(block_index, xor_op_index, xor_op_vec, ra_temp_buffer_.get(),
+ offset);
+ }
+
+ // All the I/O in the ring is processed.
+ pending_sqe = queue_depth_;
+ }
+
+ if (linear_blocks == 0) {
+ break;
+ }
+ }
+
+ // Done with merging ordered ops
+ if (RAIterDone() && total_blocks_merged_ == 0) {
+ return true;
+ }
+
+ CHECK(blocks_.size() == total_blocks_merged_);
+
+ UpdateScratchMetadata();
+
+ return true;
+}
+
+void ReadAhead::UpdateScratchMetadata() {
+ loff_t metadata_offset = 0;
+
+ struct ScratchMetadata* bm = reinterpret_cast<struct ScratchMetadata*>(
+ (char*)ra_temp_meta_buffer_.get() + metadata_offset);
+
+ bm->new_block = 0;
+ bm->file_offset = 0;
+
+ loff_t file_offset = snapuserd_->GetBufferDataOffset();
+
+ for (size_t block_index = 0; block_index < blocks_.size(); block_index++) {
+ uint64_t new_block = blocks_[block_index];
+ // Track the metadata blocks which are stored in scratch space
+ bm = reinterpret_cast<struct ScratchMetadata*>((char*)ra_temp_meta_buffer_.get() +
+ metadata_offset);
+
+ bm->new_block = new_block;
+ bm->file_offset = file_offset;
+
+ metadata_offset += sizeof(struct ScratchMetadata);
+ file_offset += BLOCK_SZ;
+ }
+
+ // This is important - explicitly set the contents to zero. This is used
+ // when re-constructing the data after crash. This indicates end of
+ // reading metadata contents when re-constructing the data
+ bm = reinterpret_cast<struct ScratchMetadata*>((char*)ra_temp_meta_buffer_.get() +
+ metadata_offset);
+ bm->new_block = 0;
+ bm->file_offset = 0;
+}
+
+bool ReadAhead::ReapIoCompletions(int pending_ios_to_complete) {
+ // Reap I/O completions
+ while (pending_ios_to_complete) {
+ struct io_uring_cqe* cqe;
+
+ int ret = io_uring_wait_cqe(ring_.get(), &cqe);
+ if (ret) {
+ SNAP_LOG(ERROR) << "Read-ahead - io_uring_wait_cqe failed: " << ret;
+ return false;
+ }
+
+ if (cqe->res < 0) {
+ SNAP_LOG(ERROR) << "Read-ahead - io_uring_Wait_cqe failed with res: " << cqe->res;
+ return false;
+ }
+
+ io_uring_cqe_seen(ring_.get(), cqe);
+ pending_ios_to_complete -= 1;
+ }
+
+ return true;
+}
+
+void ReadAhead::ProcessXorData(size_t& block_xor_index, size_t& xor_index,
+ std::vector<const CowOperation*>& xor_op_vec, void* buffer,
+ loff_t& buffer_offset) {
+ loff_t xor_buf_offset = 0;
+
+ while (block_xor_index < blocks_.size()) {
+ void* bufptr = static_cast<void*>((char*)buffer + buffer_offset);
+ uint64_t new_block = blocks_[block_xor_index];
+
+ if (xor_index < xor_op_vec.size()) {
+ const CowOperation* xor_op = xor_op_vec[xor_index];
+
+ // Check if this block is an XOR op
+ if (xor_op->new_block == new_block) {
+ // Pointer to the data read from base device
+ uint8_t* buffer = reinterpret_cast<uint8_t*>(bufptr);
+ // Get the xor'ed data read from COW device
+ uint8_t* xor_data = reinterpret_cast<uint8_t*>((char*)bufsink_.GetPayloadBufPtr() +
+ xor_buf_offset);
+
+ for (size_t byte_offset = 0; byte_offset < BLOCK_SZ; byte_offset++) {
+ buffer[byte_offset] ^= xor_data[byte_offset];
+ }
+
+ // Move to next XOR op
+ xor_index += 1;
+ xor_buf_offset += BLOCK_SZ;
+ }
+ }
+
+ buffer_offset += BLOCK_SZ;
+ block_xor_index += 1;
+ }
+
+ bufsink_.ResetBufferOffset();
+}
+
+bool ReadAhead::ReadXorData(size_t block_index, size_t xor_op_index,
+ std::vector<const CowOperation*>& xor_op_vec) {
+ // Process the XOR ops in parallel - We will be reading data
+ // from COW file for XOR ops processing.
+ while (block_index < blocks_.size()) {
+ uint64_t new_block = blocks_[block_index];
+
+ if (xor_op_index < xor_op_vec.size()) {
+ const CowOperation* xor_op = xor_op_vec[xor_op_index];
+ if (xor_op->new_block == new_block) {
+ if (!reader_->ReadData(*xor_op, &bufsink_)) {
+ SNAP_LOG(ERROR)
+ << " ReadAhead - XorOp Read failed for block: " << xor_op->new_block;
+ return false;
+ }
+
+ xor_op_index += 1;
+ bufsink_.UpdateBufferOffset(BLOCK_SZ);
+ }
+ }
+ block_index += 1;
+ }
+ return true;
+}
+
+bool ReadAhead::ReadAheadSyncIO() {
+ int num_ops = (snapuserd_->GetBufferDataSize()) / BLOCK_SZ;
+ loff_t buffer_offset = 0;
+ total_blocks_merged_ = 0;
+ overlap_ = false;
+ dest_blocks_.clear();
+ source_blocks_.clear();
+ blocks_.clear();
+ std::vector<const CowOperation*> xor_op_vec;
+
+ bufsink_.ResetBufferOffset();
// Number of ops to be merged in this window. This is a fixed size
// except for the last window wherein the number of ops can be less
@@ -209,7 +495,7 @@
while (num_ops) {
uint64_t source_offset;
- int linear_blocks = PrepareNextReadAhead(&source_offset, &num_ops, blocks, xor_op_vec);
+ int linear_blocks = PrepareNextReadAhead(&source_offset, &num_ops, blocks_, xor_op_vec);
if (linear_blocks == 0) {
// No more blocks to read
SNAP_LOG(DEBUG) << " Read-ahead completed....";
@@ -220,7 +506,7 @@
// Read from the base device consecutive set of blocks in one shot
if (!android::base::ReadFullyAtOffset(backing_store_fd_,
- (char*)ra_temp_buffer.get() + buffer_offset, io_size,
+ (char*)ra_temp_buffer_.get() + buffer_offset, io_size,
source_offset)) {
SNAP_PLOG(ERROR) << "Ordered-op failed. Read from backing store: "
<< backing_store_device_ << "at block :" << source_offset / BLOCK_SZ
@@ -233,21 +519,19 @@
}
buffer_offset += io_size;
- total_blocks_merged += linear_blocks;
+ total_blocks_merged_ += linear_blocks;
num_ops -= linear_blocks;
}
// Done with merging ordered ops
- if (RAIterDone() && total_blocks_merged == 0) {
+ if (RAIterDone() && total_blocks_merged_ == 0) {
return true;
}
loff_t metadata_offset = 0;
- auto ra_temp_meta_buffer = std::make_unique<uint8_t[]>(snapuserd_->GetBufferMetadataSize());
-
struct ScratchMetadata* bm = reinterpret_cast<struct ScratchMetadata*>(
- (char*)ra_temp_meta_buffer.get() + metadata_offset);
+ (char*)ra_temp_meta_buffer_.get() + metadata_offset);
bm->new_block = 0;
bm->file_offset = 0;
@@ -255,12 +539,15 @@
loff_t file_offset = snapuserd_->GetBufferDataOffset();
loff_t offset = 0;
- CHECK(blocks.size() == total_blocks_merged);
+ CHECK(blocks_.size() == total_blocks_merged_);
size_t xor_index = 0;
- for (size_t block_index = 0; block_index < blocks.size(); block_index++) {
- void* bufptr = static_cast<void*>((char*)ra_temp_buffer.get() + offset);
- uint64_t new_block = blocks[block_index];
+ BufferSink bufsink;
+ bufsink.Initialize(BLOCK_SZ * 2);
+
+ for (size_t block_index = 0; block_index < blocks_.size(); block_index++) {
+ void* bufptr = static_cast<void*>((char*)ra_temp_buffer_.get() + offset);
+ uint64_t new_block = blocks_[block_index];
if (xor_index < xor_op_vec.size()) {
const CowOperation* xor_op = xor_op_vec[xor_index];
@@ -268,17 +555,16 @@
// Check if this block is an XOR op
if (xor_op->new_block == new_block) {
// Read the xor'ed data from COW
- if (!reader_->ReadData(*xor_op, &bufsink_)) {
+ if (!reader_->ReadData(*xor_op, &bufsink)) {
SNAP_LOG(ERROR)
<< " ReadAhead - XorOp Read failed for block: " << xor_op->new_block;
snapuserd_->ReadAheadIOFailed();
return false;
}
-
// Pointer to the data read from base device
uint8_t* buffer = reinterpret_cast<uint8_t*>(bufptr);
// Get the xor'ed data read from COW device
- uint8_t* xor_data = reinterpret_cast<uint8_t*>(bufsink_.GetPayloadBufPtr());
+ uint8_t* xor_data = reinterpret_cast<uint8_t*>(bufsink.GetPayloadBufPtr());
// Retrieve the original data
for (size_t byte_offset = 0; byte_offset < BLOCK_SZ; byte_offset++) {
@@ -292,7 +578,7 @@
offset += BLOCK_SZ;
// Track the metadata blocks which are stored in scratch space
- bm = reinterpret_cast<struct ScratchMetadata*>((char*)ra_temp_meta_buffer.get() +
+ bm = reinterpret_cast<struct ScratchMetadata*>((char*)ra_temp_meta_buffer_.get() +
metadata_offset);
bm->new_block = new_block;
@@ -308,11 +594,34 @@
// This is important - explicitly set the contents to zero. This is used
// when re-constructing the data after crash. This indicates end of
// reading metadata contents when re-constructing the data
- bm = reinterpret_cast<struct ScratchMetadata*>((char*)ra_temp_meta_buffer.get() +
+ bm = reinterpret_cast<struct ScratchMetadata*>((char*)ra_temp_meta_buffer_.get() +
metadata_offset);
bm->new_block = 0;
bm->file_offset = 0;
+ return true;
+}
+
+bool ReadAhead::ReadAheadIOStart() {
+ // Check if the data has to be constructed from the COW file.
+ // This will be true only once during boot up after a crash
+ // during merge.
+ if (snapuserd_->ShouldReconstructDataFromCow()) {
+ return ReconstructDataFromCow();
+ }
+
+ if (read_ahead_async_) {
+ if (!ReadAheadAsyncIO()) {
+ SNAP_LOG(ERROR) << "ReadAheadAsyncIO failed - io_uring processing failure.";
+ return false;
+ }
+ } else {
+ if (!ReadAheadSyncIO()) {
+ SNAP_LOG(ERROR) << "ReadAheadSyncIO failed";
+ return false;
+ }
+ }
+
// Wait for the merge to finish for the previous RA window. We shouldn't
// be touching the scratch space until merge is complete of previous RA
// window. If there is a crash during this time frame, merge should resume
@@ -322,22 +631,22 @@
}
// Copy the data to scratch space
- memcpy(metadata_buffer_, ra_temp_meta_buffer.get(), snapuserd_->GetBufferMetadataSize());
- memcpy(read_ahead_buffer_, ra_temp_buffer.get(), total_blocks_merged * BLOCK_SZ);
+ memcpy(metadata_buffer_, ra_temp_meta_buffer_.get(), snapuserd_->GetBufferMetadataSize());
+ memcpy(read_ahead_buffer_, ra_temp_buffer_.get(), total_blocks_merged_ * BLOCK_SZ);
- offset = 0;
+ loff_t offset = 0;
std::unordered_map<uint64_t, void*>& read_ahead_buffer_map = snapuserd_->GetReadAheadMap();
read_ahead_buffer_map.clear();
- for (size_t block_index = 0; block_index < blocks.size(); block_index++) {
+ for (size_t block_index = 0; block_index < blocks_.size(); block_index++) {
void* bufptr = static_cast<void*>((char*)read_ahead_buffer_ + offset);
- uint64_t new_block = blocks[block_index];
+ uint64_t new_block = blocks_[block_index];
read_ahead_buffer_map[new_block] = bufptr;
offset += BLOCK_SZ;
}
- snapuserd_->SetMergedBlockCountForNextCommit(total_blocks_merged);
+ snapuserd_->SetMergedBlockCountForNextCommit(total_blocks_merged_);
// Flush the data only if we have a overlapping blocks in the region
// Notify the Merge thread to resume merging this window
@@ -350,6 +659,33 @@
return true;
}
+bool ReadAhead::InitializeIouring() {
+ if (!snapuserd_->IsIouringSupported()) {
+ return false;
+ }
+
+ ring_ = std::make_unique<struct io_uring>();
+
+ int ret = io_uring_queue_init(queue_depth_, ring_.get(), 0);
+ if (ret) {
+ SNAP_LOG(ERROR) << "io_uring_queue_init failed with ret: " << ret;
+ return false;
+ }
+
+ // For xor ops processing
+ bufsink_.Initialize(PAYLOAD_BUFFER_SZ * 2);
+ read_ahead_async_ = true;
+
+ SNAP_LOG(INFO) << "Read-ahead: io_uring initialized with queue depth: " << queue_depth_;
+ return true;
+}
+
+void ReadAhead::FinalizeIouring() {
+ if (read_ahead_async_) {
+ io_uring_queue_exit(ring_.get());
+ }
+}
+
bool ReadAhead::RunThread() {
if (!InitializeFds()) {
return false;
@@ -363,14 +699,18 @@
InitializeRAIter();
+ InitializeIouring();
+
while (!RAIterDone()) {
if (!ReadAheadIOStart()) {
break;
}
}
+ FinalizeIouring();
CloseFds();
reader_->CloseCowFd();
+
SNAP_LOG(INFO) << " ReadAhead thread terminating....";
return true;
}
@@ -434,8 +774,9 @@
metadata_buffer_ =
static_cast<void*>((char*)mapped_addr + snapuserd_->GetBufferMetadataOffset());
read_ahead_buffer_ = static_cast<void*>((char*)mapped_addr + snapuserd_->GetBufferDataOffset());
- // For xor ops
- bufsink_.Initialize(PAYLOAD_BUFFER_SZ);
+
+ ra_temp_buffer_ = std::make_unique<uint8_t[]>(snapuserd_->GetBufferDataSize());
+ ra_temp_meta_buffer_ = std::make_unique<uint8_t[]>(snapuserd_->GetBufferMetadataSize());
}
} // namespace snapshot
diff --git a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_test.cpp b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_test.cpp
index 1c3e04b..d670f1e 100644
--- a/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_test.cpp
+++ b/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_test.cpp
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <android-base/strings.h>
+#include <gflags/gflags.h>
+
#include <fcntl.h>
#include <linux/fs.h>
#include <linux/memfd.h>
@@ -27,6 +30,7 @@
#include <string_view>
#include <android-base/file.h>
+#include <android-base/properties.h>
#include <android-base/unique_fd.h>
#include <fs_mgr/file_wait.h>
#include <gtest/gtest.h>
@@ -38,6 +42,8 @@
#include "snapuserd_core.h"
+DEFINE_string(force_config, "", "Force testing mode with iouring disabled");
+
namespace android {
namespace snapshot {
@@ -857,5 +863,23 @@
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
+
+ gflags::ParseCommandLineFlags(&argc, &argv, false);
+
+ android::base::SetProperty("ctl.stop", "snapuserd");
+
+ if (FLAGS_force_config == "iouring_disabled") {
+ if (!android::base::SetProperty("snapuserd.test.io_uring.force_disable", "1")) {
+ return testing::AssertionFailure()
+ << "Failed to disable property: snapuserd.test.io_uring.disabled";
+ }
+ }
+
+ int ret = RUN_ALL_TESTS();
+
+ if (FLAGS_force_config == "iouring_disabled") {
+ android::base::SetProperty("snapuserd.test.io_uring.force_disable", "0");
+ }
+
+ return ret;
}
diff --git a/healthd/OWNERS b/healthd/OWNERS
index d3f8758..e64c33d 100644
--- a/healthd/OWNERS
+++ b/healthd/OWNERS
@@ -1,2 +1 @@
elsk@google.com
-hridya@google.com
diff --git a/init/builtins.cpp b/init/builtins.cpp
index cc445be..0eb894b 100644
--- a/init/builtins.cpp
+++ b/init/builtins.cpp
@@ -117,7 +117,7 @@
android::base::GetMinimumLogSeverity() > android::base::DEBUG) {}
template <typename T>
- operator android::base::expected<T, ResultError<int>>() {
+ operator android::base::expected<T, ResultError<android::base::Errno>>() {
if (ignore_error_) {
return {};
}
diff --git a/libcutils/fs_config.cpp b/libcutils/fs_config.cpp
index e9497a8..a6835fc 100644
--- a/libcutils/fs_config.cpp
+++ b/libcutils/fs_config.cpp
@@ -211,6 +211,7 @@
{ 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/resize2fs" },
{ 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/snapuserd" },
{ 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/tune2fs" },
+ { 00755, AID_ROOT, AID_ROOT, 0, "first_stage_ramdisk/system/bin/fsck.f2fs" },
// generic defaults
{ 00755, AID_ROOT, AID_ROOT, 0, "bin/*" },
{ 00640, AID_ROOT, AID_SHELL, 0, "fstab.*" },
diff --git a/libqtaguid/Android.bp b/libqtaguid/Android.bp
deleted file mode 100644
index 64db095..0000000
--- a/libqtaguid/Android.bp
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-// Copyright (C) 2017 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-cc_library_headers {
- name: "libqtaguid_headers",
- vendor_available: false,
- host_supported: false,
- export_include_dirs: ["include"],
- target: {
- linux_bionic: {
- enabled: true,
- },
- },
-}
-
-cc_library {
- name: "libqtaguid",
- vendor_available: false,
- host_supported: false,
- target: {
- android: {
- srcs: [
- "qtaguid.c",
- ],
- sanitize: {
- misc_undefined: ["integer"],
- },
- },
- },
-
- shared_libs: ["liblog"],
- header_libs: [
- "libqtaguid_headers",
- ],
- export_header_lib_headers: ["libqtaguid_headers"],
- local_include_dirs: ["include"],
-
- cflags: [
- "-Werror",
- "-Wall",
- "-Wextra",
- ],
-}
diff --git a/libqtaguid/include/qtaguid/qtaguid.h b/libqtaguid/include/qtaguid/qtaguid.h
deleted file mode 100644
index 72285e5..0000000
--- a/libqtaguid/include/qtaguid/qtaguid.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __LEGACY_QTAGUID_H
-#define __LEGACY_QTAGUID_H
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Set tags (and owning UIDs) for network sockets. The socket must be untagged
- * by calling qtaguid_untagSocket() before closing it, otherwise the qtaguid
- * module will keep a reference to it even after close.
- */
-extern int legacy_tagSocket(int sockfd, int tag, uid_t uid);
-
-/*
- * Untag a network socket before closing.
- */
-extern int legacy_untagSocket(int sockfd);
-
-/*
- * For the given uid, switch counter sets.
- * The kernel only keeps a limited number of sets.
- * 2 for now.
- */
-extern int legacy_setCounterSet(int counterSetNum, uid_t uid);
-
-/*
- * Delete all tag info that relates to the given tag an uid.
- * If the tag is 0, then ALL info about the uid is freeded.
- * The delete data also affects active tagged socketd, which are
- * then untagged.
- * The calling process can only operate on its own tags.
- * Unless it is part of the happy AID_NET_BW_ACCT group.
- * In which case it can clobber everything.
- */
-extern int legacy_deleteTagData(int tag, uid_t uid);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __LEGACY_QTAGUID_H */
diff --git a/libqtaguid/qtaguid.c b/libqtaguid/qtaguid.c
deleted file mode 100644
index cd38bad..0000000
--- a/libqtaguid/qtaguid.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
-** Copyright 2011, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-// #define LOG_NDEBUG 0
-
-#define LOG_TAG "qtaguid"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <inttypes.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <log/log.h>
-#include <qtaguid/qtaguid.h>
-
-static const char* CTRL_PROCPATH = "/proc/net/xt_qtaguid/ctrl";
-static const int CTRL_MAX_INPUT_LEN = 128;
-
-/*
- * One per proccess.
- * Once the device is open, this process will have its socket tags tracked.
- * And on exit or untimely death, all socket tags will be removed.
- * A process can only open /dev/xt_qtaguid once.
- * It should not close it unless it is really done with all the socket tags.
- * Failure to open it will be visible when socket tagging will be attempted.
- */
-static int resTrackFd = -1;
-pthread_once_t resTrackInitDone = PTHREAD_ONCE_INIT;
-
-/* Only call once per process. */
-void legacy_resTrack(void) {
- resTrackFd = TEMP_FAILURE_RETRY(open("/dev/xt_qtaguid", O_RDONLY | O_CLOEXEC));
-}
-
-/*
- * Returns:
- * 0 on success.
- * -errno on failure.
- */
-static int write_ctrl(const char* cmd) {
- int fd, res, savedErrno;
-
- ALOGV("write_ctrl(%s)", cmd);
-
- fd = TEMP_FAILURE_RETRY(open(CTRL_PROCPATH, O_WRONLY | O_CLOEXEC));
- if (fd < 0) {
- return -errno;
- }
-
- res = TEMP_FAILURE_RETRY(write(fd, cmd, strlen(cmd)));
- if (res < 0) {
- savedErrno = errno;
- } else {
- savedErrno = 0;
- }
- if (res < 0) {
- // ALOGV is enough because all the callers also log failures
- ALOGV("Failed write_ctrl(%s) res=%d errno=%d", cmd, res, savedErrno);
- }
- close(fd);
- return -savedErrno;
-}
-
-int legacy_tagSocket(int sockfd, int tag, uid_t uid) {
- char lineBuf[CTRL_MAX_INPUT_LEN];
- int res;
- uint64_t kTag = ((uint64_t)tag << 32);
-
- pthread_once(&resTrackInitDone, legacy_resTrack);
-
- snprintf(lineBuf, sizeof(lineBuf), "t %d %" PRIu64 " %d", sockfd, kTag, uid);
-
- ALOGV("Tagging socket %d with tag %" PRIx64 "{%u,0} for uid %d", sockfd, kTag, tag, uid);
-
- res = write_ctrl(lineBuf);
- if (res < 0) {
- ALOGI("Tagging socket %d with tag %" PRIx64 "(%d) for uid %d failed errno=%d", sockfd, kTag,
- tag, uid, res);
- }
-
- return res;
-}
-
-int legacy_untagSocket(int sockfd) {
- char lineBuf[CTRL_MAX_INPUT_LEN];
- int res;
-
- ALOGV("Untagging socket %d", sockfd);
-
- snprintf(lineBuf, sizeof(lineBuf), "u %d", sockfd);
- res = write_ctrl(lineBuf);
- if (res < 0) {
- ALOGI("Untagging socket %d failed errno=%d", sockfd, res);
- }
-
- return res;
-}
-
-int legacy_setCounterSet(int counterSetNum, uid_t uid) {
- char lineBuf[CTRL_MAX_INPUT_LEN];
- int res;
-
- ALOGV("Setting counters to set %d for uid %d", counterSetNum, uid);
-
- snprintf(lineBuf, sizeof(lineBuf), "s %d %d", counterSetNum, uid);
- res = write_ctrl(lineBuf);
- return res;
-}
-
-int legacy_deleteTagData(int tag, uid_t uid) {
- char lineBuf[CTRL_MAX_INPUT_LEN];
- int cnt = 0, res = 0;
- uint64_t kTag = (uint64_t)tag << 32;
-
- ALOGV("Deleting tag data with tag %" PRIx64 "{%d,0} for uid %d", kTag, tag, uid);
-
- pthread_once(&resTrackInitDone, legacy_resTrack);
-
- snprintf(lineBuf, sizeof(lineBuf), "d %" PRIu64 " %d", kTag, uid);
- res = write_ctrl(lineBuf);
- if (res < 0) {
- ALOGI("Deleting tag data with tag %" PRIx64 "/%d for uid %d failed with cnt=%d errno=%d",
- kTag, tag, uid, cnt, errno);
- }
-
- return res;
-}
diff --git a/libutils/Android.bp b/libutils/Android.bp
index bda9d6b..234638b 100644
--- a/libutils/Android.bp
+++ b/libutils/Android.bp
@@ -28,16 +28,18 @@
min_sdk_version: "apex_inherit",
header_libs: [
- "liblog_headers",
- "libsystem_headers",
+ "libbase_headers",
"libcutils_headers",
+ "liblog_headers",
"libprocessgroup_headers",
+ "libsystem_headers",
],
export_header_lib_headers: [
- "liblog_headers",
- "libsystem_headers",
+ "libbase_headers",
"libcutils_headers",
+ "liblog_headers",
"libprocessgroup_headers",
+ "libsystem_headers",
],
export_include_dirs: ["include"],
@@ -299,13 +301,14 @@
srcs: [
"BitSet_test.cpp",
+ "Errors_test.cpp",
"FileMap_test.cpp",
"LruCache_test.cpp",
"Mutex_test.cpp",
"SharedBuffer_test.cpp",
"Singleton_test.cpp",
- "String8_test.cpp",
"String16_test.cpp",
+ "String8_test.cpp",
"StrongPointer_test.cpp",
"Timers_test.cpp",
"Unicode_test.cpp",
@@ -364,6 +367,7 @@
"-Wall",
"-Werror",
],
+ header_libs: ["libutils_headers"],
}
cc_test_library {
@@ -376,6 +380,7 @@
"-Werror",
],
shared_libs: ["libutils_test_singleton1"],
+ header_libs: ["libutils_headers"],
}
cc_benchmark {
diff --git a/libutils/Errors_test.cpp b/libutils/Errors_test.cpp
new file mode 100644
index 0000000..873c994
--- /dev/null
+++ b/libutils/Errors_test.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "utils/ErrorsMacros.h"
+
+#include <android-base/result.h>
+
+#include <gtest/gtest.h>
+
+using namespace android;
+
+using android::base::Error;
+using android::base::Result;
+
+status_t success_or_fail(bool success) {
+ if (success)
+ return OK;
+ else
+ return PERMISSION_DENIED;
+}
+
+TEST(errors, unwrap_or_return) {
+ auto f = [](bool success, int* val) -> status_t {
+ OR_RETURN(success_or_fail(success));
+ *val = 10;
+ return OK;
+ };
+
+ int val;
+ status_t s = f(true, &val);
+ EXPECT_EQ(OK, s);
+ EXPECT_EQ(10, val);
+
+ val = 0; // reset
+ status_t q = f(false, &val);
+ EXPECT_EQ(PERMISSION_DENIED, q);
+ EXPECT_EQ(0, val);
+}
+
+TEST(errors, unwrap_or_return_result) {
+ auto f = [](bool success) -> Result<std::string, StatusT> {
+ OR_RETURN(success_or_fail(success));
+ return "hello";
+ };
+
+ auto r = f(true);
+ EXPECT_TRUE(r.ok());
+ EXPECT_EQ("hello", *r);
+
+ auto s = f(false);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(PERMISSION_DENIED, s.error().code());
+ EXPECT_EQ("PERMISSION_DENIED", s.error().message());
+}
+
+TEST(errors, unwrap_or_return_result_int) {
+ auto f = [](bool success) -> Result<int, StatusT> {
+ OR_RETURN(success_or_fail(success));
+ return 10;
+ };
+
+ auto r = f(true);
+ EXPECT_TRUE(r.ok());
+ EXPECT_EQ(10, *r);
+
+ auto s = f(false);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(PERMISSION_DENIED, s.error().code());
+ EXPECT_EQ("PERMISSION_DENIED", s.error().message());
+}
+
+TEST(errors, unwrap_or_fatal) {
+ OR_FATAL(success_or_fail(true));
+
+ EXPECT_DEATH(OR_FATAL(success_or_fail(false)), "PERMISSION_DENIED");
+}
+
+TEST(errors, result_in_status) {
+ auto f = [](bool success) -> Result<std::string, StatusT> {
+ if (success)
+ return "OK";
+ else
+ return Error<StatusT>(PERMISSION_DENIED) << "custom error message";
+ };
+
+ auto g = [&](bool success) -> status_t {
+ std::string val = OR_RETURN(f(success));
+ EXPECT_EQ("OK", val);
+ return OK;
+ };
+
+ status_t a = g(true);
+ EXPECT_EQ(OK, a);
+
+ status_t b = g(false);
+ EXPECT_EQ(PERMISSION_DENIED, b);
+}
diff --git a/libutils/Unicode_test.cpp b/libutils/Unicode_test.cpp
index b92eef8..8b994d9 100644
--- a/libutils/Unicode_test.cpp
+++ b/libutils/Unicode_test.cpp
@@ -100,7 +100,7 @@
0xF0, 0x90, 0x80, 0x80, // U+10000, 2 UTF-16 character
};
- char16_t output[1 + 1 + 1 + 2 + 1]; // Room for NULL
+ char16_t output[1 + 1 + 1 + 2 + 1]; // Room for null
utf8_to_utf16(str, sizeof(str), output, sizeof(output) / sizeof(output[0]));
@@ -114,8 +114,7 @@
<< "should be first half of surrogate U+10000";
EXPECT_EQ(0xDC00, output[4])
<< "should be second half of surrogate U+10000";
- EXPECT_EQ(NULL, output[5])
- << "should be NULL terminated";
+ EXPECT_EQ(0, output[5]) << "should be null terminated";
}
TEST_F(UnicodeTest, strstr16EmptyTarget) {
diff --git a/libutils/include/utils/ErrorsMacros.h b/libutils/include/utils/ErrorsMacros.h
new file mode 100644
index 0000000..048c538
--- /dev/null
+++ b/libutils/include/utils/ErrorsMacros.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "Errors.h"
+
+// It would have been better if this file (ErrorsMacros.h) is entirely in utils/Errors.h. However
+// that is infeasible as some (actually many) are using utils/Errors.h via the implicit include path
+// `system/core/include` [1]. Since such users are not guaranteed to specify the dependency to
+// libbase_headers, the following headers from libbase_headers can't be found.
+// [1] build/soong/cc/config/global.go#commonGlobalIncludes
+#include <android-base/errors.h>
+#include <android-base/result.h>
+
+#include <assert.h>
+
+namespace android {
+
+// StatusT is a wrapper class for status_t. Use this type instead of status_t when instantiating
+// Result<T, E> and Error<E> template classes. This is required to distinguish status_t from
+// other integer-based error code types like errno, and also to provide utility functions like
+// print().
+struct StatusT {
+ StatusT() : val_(OK) {}
+ StatusT(status_t s) : val_(s) {}
+ const status_t& value() const { return val_; }
+ operator status_t() const { return val_; }
+ std::string print() const { return statusToString(val_); }
+
+ status_t val_;
+};
+
+namespace base {
+
+// Specialization of android::base::OkOrFail<V> for V = status_t. This is used to use the OR_RETURN
+// and OR_FATAL macros with statements that yields a value of status_t. See android-base/errors.h
+// for the detailed contract.
+template <>
+struct OkOrFail<status_t> {
+ // Tests if status_t is a success value of not.
+ static bool IsOk(const status_t& s) { return s == OK; }
+
+ // Unwrapping status_t in the success case is just asserting that it is actually a success.
+ // We don't return OK because it would be redundant.
+ static void Unwrap([[maybe_unused]] status_t&& s) { assert(IsOk(s)); }
+
+ // Consumes status_t when it's a fail value
+ static OkOrFail<status_t> Fail(status_t&& s) {
+ assert(!IsOk(s));
+ return OkOrFail<status_t>{s};
+ }
+ status_t val_;
+
+ // And converts back into status_t. This is used when OR_RETURN is used in a function whose
+ // return type is status_t.
+ operator status_t() && { return val_; }
+
+ // Or converts into Result<T, StatusT>. This is used when OR_RETURN is used in a function whose
+ // return type is Result<T, StatusT>.
+ template <typename T, typename = std::enable_if_t<!std::is_same_v<T, status_t>>>
+ operator Result<T, StatusT>() && {
+ return Error<StatusT>(std::move(val_));
+ }
+
+ operator Result<int, StatusT>() && { return Error<StatusT>(std::move(val_)); }
+
+ // String representation of the error value.
+ static std::string ErrorMessage(const status_t& s) { return statusToString(s); }
+};
+
+} // namespace base
+} // namespace android
diff --git a/rootdir/Android.mk b/rootdir/Android.mk
index ce2ec0e..2ed9eec 100644
--- a/rootdir/Android.mk
+++ b/rootdir/Android.mk
@@ -75,9 +75,14 @@
EXPORT_GLOBAL_GCOV_OPTIONS := export GCOV_PREFIX /data/misc/trace
endif
-EXPORT_GLOBAL_CLANG_COVERAGE_OPTIONS :=
ifeq ($(CLANG_COVERAGE),true)
- EXPORT_GLOBAL_CLANG_COVERAGE_OPTIONS := export LLVM_PROFILE_FILE /data/misc/trace/clang-%20m.profraw
+ ifeq ($(BIONIC_COVERAGE),false)
+ # http://b/210012154 Disable continuous coverage if instrumentation is on
+ # for bionic/libc
+ EXPORT_GLOBAL_CLANG_COVERAGE_OPTIONS := export LLVM_PROFILE_FILE /data/misc/trace/clang%c-%20m.profraw
+ else
+ EXPORT_GLOBAL_CLANG_COVERAGE_OPTIONS := export LLVM_PROFILE_FILE /data/misc/trace/clang-%20m.profraw
+ endif
endif
# Put it here instead of in init.rc module definition,
diff --git a/rootdir/avb/Android.bp b/rootdir/avb/Android.bp
deleted file mode 100644
index cfc59a7..0000000
--- a/rootdir/avb/Android.bp
+++ /dev/null
@@ -1,31 +0,0 @@
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-filegroup {
- name: "q-gsi_avbpubkey",
- srcs: [
- "q-gsi.avbpubkey",
- ],
-}
-
-filegroup {
- name: "r-gsi_avbpubkey",
- srcs: [
- "r-gsi.avbpubkey",
- ],
-}
-
-filegroup {
- name: "s-gsi_avbpubkey",
- srcs: [
- "s-gsi.avbpubkey",
- ],
-}
-
-filegroup {
- name: "qcar-gsi_avbpubkey",
- srcs: [
- "qcar-gsi.avbpubkey",
- ],
-}
diff --git a/rootdir/avb/Android.mk b/rootdir/avb/Android.mk
index 647cfa2..8cf3172 100644
--- a/rootdir/avb/Android.mk
+++ b/rootdir/avb/Android.mk
@@ -15,19 +15,6 @@
endif
#######################################
-# q-gsi.avbpubkey
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := q-gsi.avbpubkey
-LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS := notice
-LOCAL_MODULE_CLASS := ETC
-LOCAL_SRC_FILES := $(LOCAL_MODULE)
-LOCAL_MODULE_PATH := $(my_gsi_avb_keys_path)
-
-include $(BUILD_PREBUILT)
-
-#######################################
# q-developer-gsi.avbpubkey
include $(CLEAR_VARS)
@@ -41,19 +28,6 @@
include $(BUILD_PREBUILT)
#######################################
-# r-gsi.avbpubkey
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := r-gsi.avbpubkey
-LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS := notice
-LOCAL_MODULE_CLASS := ETC
-LOCAL_SRC_FILES := $(LOCAL_MODULE)
-LOCAL_MODULE_PATH := $(my_gsi_avb_keys_path)
-
-include $(BUILD_PREBUILT)
-
-#######################################
# r-developer-gsi.avbpubkey
include $(CLEAR_VARS)
@@ -67,19 +41,6 @@
include $(BUILD_PREBUILT)
#######################################
-# s-gsi.avbpubkey
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := s-gsi.avbpubkey
-LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS := notice
-LOCAL_MODULE_CLASS := ETC
-LOCAL_SRC_FILES := $(LOCAL_MODULE)
-LOCAL_MODULE_PATH := $(my_gsi_avb_keys_path)
-
-include $(BUILD_PREBUILT)
-
-#######################################
# s-developer-gsi.avbpubkey
include $(CLEAR_VARS)
@@ -92,17 +53,4 @@
include $(BUILD_PREBUILT)
-#######################################
-# qcar-gsi.avbpubkey
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := qcar-gsi.avbpubkey
-LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS := notice
-LOCAL_MODULE_CLASS := ETC
-LOCAL_SRC_FILES := $(LOCAL_MODULE)
-LOCAL_MODULE_PATH := $(my_gsi_avb_keys_path)
-
-include $(BUILD_PREBUILT)
-
my_gsi_avb_keys_path :=
diff --git a/rootdir/avb/q-gsi.avbpubkey b/rootdir/avb/q-gsi.avbpubkey
deleted file mode 100644
index 5ed7543..0000000
--- a/rootdir/avb/q-gsi.avbpubkey
+++ /dev/null
Binary files differ
diff --git a/rootdir/avb/qcar-gsi.avbpubkey b/rootdir/avb/qcar-gsi.avbpubkey
deleted file mode 100644
index ce56646..0000000
--- a/rootdir/avb/qcar-gsi.avbpubkey
+++ /dev/null
Binary files differ
diff --git a/rootdir/avb/r-gsi.avbpubkey b/rootdir/avb/r-gsi.avbpubkey
deleted file mode 100644
index 2609b30..0000000
--- a/rootdir/avb/r-gsi.avbpubkey
+++ /dev/null
Binary files differ
diff --git a/rootdir/avb/s-gsi.avbpubkey b/rootdir/avb/s-gsi.avbpubkey
deleted file mode 100644
index 9065fb8..0000000
--- a/rootdir/avb/s-gsi.avbpubkey
+++ /dev/null
Binary files differ