Merge changes from topic "incremental-workflow" into main
* changes:
Apply-update: Script to apply updates to device
create_snapshot: Derive source hash from device
snapshotctl: Verify data blocks
diff --git a/fs_mgr/libsnapshot/Android.bp b/fs_mgr/libsnapshot/Android.bp
index 966696b..b5b976a 100644
--- a/fs_mgr/libsnapshot/Android.bp
+++ b/fs_mgr/libsnapshot/Android.bp
@@ -374,11 +374,15 @@
srcs: [
"snapshotctl.cpp",
"scratch_super.cpp",
+ "android/snapshot/snapshot.proto",
],
static_libs: [
"libbrotli",
"libfstab",
"libz",
+ "libavb",
+ "libfs_avb",
+ "libcrypto_static",
"update_metadata-protos",
],
shared_libs: [
@@ -488,7 +492,10 @@
host_supported: true,
device_supported: false,
- srcs: ["libsnapshot_cow/create_cow.cpp"],
+ srcs: [
+ "libsnapshot_cow/create_cow.cpp",
+ "android/snapshot/snapshot.proto",
+ ],
cflags: [
"-Wall",
@@ -498,14 +505,21 @@
static_libs: [
"liblog",
"libbase",
+ "libfstab",
"libext4_utils",
"libsnapshot_cow",
"libcrypto",
"libbrotli",
"libz",
+ "libdm",
"liblz4",
"libzstd",
"libgflags",
+ "libavb",
+ "libext2_uuid",
+ "libfs_avb",
+ "libcrypto",
+ "libprotobuf-cpp-lite",
],
shared_libs: [
],
diff --git a/fs_mgr/libsnapshot/android/snapshot/snapshot.proto b/fs_mgr/libsnapshot/android/snapshot/snapshot.proto
index 5fb71a3..6f31251 100644
--- a/fs_mgr/libsnapshot/android/snapshot/snapshot.proto
+++ b/fs_mgr/libsnapshot/android/snapshot/snapshot.proto
@@ -283,3 +283,14 @@
// Size of v3 operation buffer. Needs to be determined during writer initialization
uint64 estimated_op_count_max = 14;
}
+
+message VerityHash {
+ // Partition name
+ string partition_name = 1;
+
+ // Salt used for verity hashes
+ string salt = 2;
+
+ // sha256 hash values of each block in the image
+ repeated bytes block_hash = 3;
+}
diff --git a/fs_mgr/libsnapshot/libsnapshot_cow/create_cow.cpp b/fs_mgr/libsnapshot/libsnapshot_cow/create_cow.cpp
index 5497b72..fd4e7da 100644
--- a/fs_mgr/libsnapshot/libsnapshot_cow/create_cow.cpp
+++ b/fs_mgr/libsnapshot/libsnapshot_cow/create_cow.cpp
@@ -8,6 +8,7 @@
#include <condition_variable>
#include <cstring>
+#include <fstream>
#include <future>
#include <iostream>
#include <limits>
@@ -17,26 +18,27 @@
#include <unordered_map>
#include <vector>
-#include <android-base/file.h>
-#include <android-base/logging.h>
-#include <android-base/stringprintf.h>
-#include <android-base/unique_fd.h>
-#include <ext4_utils/ext4_utils.h>
-#include <storage_literals/storage_literals.h>
-
#include <android-base/chrono_utils.h>
+#include <android-base/file.h>
+#include <android-base/hex.h>
+#include <android-base/logging.h>
#include <android-base/scopeguard.h>
+#include <android-base/stringprintf.h>
#include <android-base/strings.h>
-
+#include <android-base/unique_fd.h>
+#include <android/snapshot/snapshot.pb.h>
+#include <ext4_utils/ext4_utils.h>
+#include <fs_avb/fs_avb_util.h>
#include <gflags/gflags.h>
#include <libsnapshot/cow_writer.h>
-
#include <openssl/sha.h>
+#include <storage_literals/storage_literals.h>
DEFINE_string(source, "", "Source partition image");
DEFINE_string(target, "", "Target partition image");
DEFINE_string(compression, "lz4",
"Compression algorithm. Default is set to lz4. Available options: lz4, zstd, gz");
+DEFINE_bool(merkel_tree, false, "If true, source image hash is obtained from verity merkel tree");
namespace android {
namespace snapshot {
@@ -51,7 +53,8 @@
class CreateSnapshot {
public:
CreateSnapshot(const std::string& src_file, const std::string& target_file,
- const std::string& patch_file, const std::string& compression);
+ const std::string& patch_file, const std::string& compression,
+ const bool& merkel_tree);
bool CreateSnapshotPatch();
private:
@@ -108,6 +111,14 @@
bool WriteOrderedSnapshots();
bool WriteNonOrderedSnapshots();
bool VerifyMergeOrder();
+
+ bool CalculateDigest(const void* buffer, size_t size, const void* salt, uint32_t salt_length,
+ uint8_t* digest);
+ bool ParseSourceMerkelTree();
+
+ bool use_merkel_tree_ = false;
+ std::vector<uint8_t> target_salt_;
+ std::vector<uint8_t> source_salt_;
};
void CreateSnapshotLogger(android::base::LogId, android::base::LogSeverity severity, const char*,
@@ -120,8 +131,12 @@
}
CreateSnapshot::CreateSnapshot(const std::string& src_file, const std::string& target_file,
- const std::string& patch_file, const std::string& compression)
- : src_file_(src_file), target_file_(target_file), patch_file_(patch_file) {
+ const std::string& patch_file, const std::string& compression,
+ const bool& merkel_tree)
+ : src_file_(src_file),
+ target_file_(target_file),
+ patch_file_(patch_file),
+ use_merkel_tree_(merkel_tree) {
if (!compression.empty()) {
compression_ = compression;
}
@@ -156,7 +171,76 @@
if (!PrepareParse(src_file_, false)) {
return false;
}
- return ParsePartition();
+
+ if (use_merkel_tree_) {
+ return ParseSourceMerkelTree();
+ } else {
+ return ParsePartition();
+ }
+}
+
+bool CreateSnapshot::CalculateDigest(const void* buffer, size_t size, const void* salt,
+ uint32_t salt_length, uint8_t* digest) {
+ SHA256_CTX ctx;
+ if (SHA256_Init(&ctx) != 1) {
+ return false;
+ }
+ if (SHA256_Update(&ctx, salt, salt_length) != 1) {
+ return false;
+ }
+ if (SHA256_Update(&ctx, buffer, size) != 1) {
+ return false;
+ }
+ if (SHA256_Final(digest, &ctx) != 1) {
+ return false;
+ }
+ return true;
+}
+
+bool CreateSnapshot::ParseSourceMerkelTree() {
+ std::string fname = android::base::Basename(target_file_.c_str());
+ std::string partitionName = fname.substr(0, fname.find(".img"));
+
+ auto vbmeta = android::fs_mgr::LoadAndVerifyVbmetaByPath(
+ target_file_, partitionName, "", true, false, false, nullptr, nullptr, nullptr);
+ if (vbmeta == nullptr) {
+ LOG(ERROR) << "LoadAndVerifyVbmetaByPath failed for partition: " << partitionName;
+ return false;
+ }
+ auto descriptor = android::fs_mgr::GetHashtreeDescriptor(partitionName, std::move(*vbmeta));
+ if (descriptor == nullptr) {
+ LOG(ERROR) << "GetHashtreeDescriptor failed for partition: " << partitionName;
+ return false;
+ }
+
+ std::fstream input(src_file_, std::ios::in | std::ios::binary);
+ VerityHash hash;
+ if (!hash.ParseFromIstream(&input)) {
+ LOG(ERROR) << "Failed to parse message.";
+ return false;
+ }
+
+ std::string source_salt = hash.salt();
+ source_salt.erase(std::remove(source_salt.begin(), source_salt.end(), '\0'), source_salt.end());
+ if (!android::base::HexToBytes(source_salt, &source_salt_)) {
+ LOG(ERROR) << "HexToBytes conversion failed for source salt: " << source_salt;
+ return false;
+ }
+
+ std::string target_salt = descriptor->salt;
+ if (!android::base::HexToBytes(target_salt, &target_salt_)) {
+ LOG(ERROR) << "HexToBytes conversion failed for target salt: " << target_salt;
+ return false;
+ }
+
+ std::vector<uint8_t> digest(32, 0);
+ for (int i = 0; i < hash.block_hash_size(); i++) {
+ CalculateDigest(hash.block_hash(i).data(), hash.block_hash(i).size(), target_salt_.data(),
+ target_salt_.size(), digest.data());
+ source_block_hash_[ToHexString(digest.data(), 32)] = i;
+ }
+
+ return true;
}
/*
@@ -386,10 +470,21 @@
while (num_blocks) {
const void* bufptr = (char*)buffer.get() + buffer_offset;
uint64_t blkindex = foffset / BLOCK_SZ;
+ std::string hash;
- uint8_t checksum[32];
- SHA256(bufptr, BLOCK_SZ, checksum);
- std::string hash = ToHexString(checksum, sizeof(checksum));
+ if (create_snapshot_patch_ && use_merkel_tree_) {
+ std::vector<uint8_t> digest(32, 0);
+ CalculateDigest(bufptr, BLOCK_SZ, target_salt_.data(), target_salt_.size(),
+ digest.data());
+ std::vector<uint8_t> final_digest(32, 0);
+ CalculateDigest(digest.data(), digest.size(), source_salt_.data(),
+ source_salt_.size(), final_digest.data());
+ hash = ToHexString(final_digest.data(), final_digest.size());
+ } else {
+ uint8_t checksum[32];
+ SHA256(bufptr, BLOCK_SZ, checksum);
+ hash = ToHexString(checksum, sizeof(checksum));
+ }
if (create_snapshot_patch_) {
PrepareMergeBlock(bufptr, blkindex, hash);
@@ -497,7 +592,7 @@
auto parts = android::base::Split(fname, ".");
std::string snapshotfile = parts[0] + ".patch";
android::snapshot::CreateSnapshot snapshot(FLAGS_source, FLAGS_target, snapshotfile,
- FLAGS_compression);
+ FLAGS_compression, FLAGS_merkel_tree);
if (!snapshot.CreateSnapshotPatch()) {
LOG(ERROR) << "Snapshot creation failed";
diff --git a/fs_mgr/libsnapshot/scripts/Android.bp b/fs_mgr/libsnapshot/scripts/Android.bp
index 829f5bc..b99da93 100644
--- a/fs_mgr/libsnapshot/scripts/Android.bp
+++ b/fs_mgr/libsnapshot/scripts/Android.bp
@@ -29,3 +29,8 @@
"snapshot_proto_python",
],
}
+
+sh_binary_host {
+ name: "apply_update",
+ src: "apply-update.sh",
+}
diff --git a/fs_mgr/libsnapshot/scripts/apply-update.sh b/fs_mgr/libsnapshot/scripts/apply-update.sh
index 90b0119..0b10721 100755
--- a/fs_mgr/libsnapshot/scripts/apply-update.sh
+++ b/fs_mgr/libsnapshot/scripts/apply-update.sh
@@ -1,77 +1,220 @@
#!/bin/bash
-# This is a debug script to quicky test end-to-end flow
-# of snapshot updates without going through update-engine.
+# Copyright 2024 Google Inc. All rights reserved.
#
-# Usage:
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
#
-# To update both dynamic and static partitions:
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# ./system/core/fs_mgr/libsnapshot/apply_update.sh [--update-static-partitions] [--wipe]
-#
-# --update-static-partitions: This will update bootloader and static A/B
-# partitions
-# --wipe: Allows data wipe as part of update flow
-#
-# To update dynamic partitions only (this should be used when static
-# partitions are present in both the slots):
-#
-# ./system/core/fs_mgr/libsnapshot/apply_update.sh
-#
-#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
-rm -f $OUT/*.patch
+# apply_update.sh: Script to update the device in incremental way
-# Compare images and create snapshot patches. Currently, this
-# just compares two identical images in $OUT. In general, any source
-# and target images could be passed to create snapshot patches. However,
-# care must be taken to ensure source images are already present on the device.
-#
-# create_snapshot is a host side binary. Build it with `m create_snapshot`
-create_snapshot --source=$OUT/system.img --target=$OUT/system.img &
-create_snapshot --source=$OUT/product.img --target=$OUT/product.img &
-create_snapshot --source=$OUT/vendor.img --target=$OUT/vendor.img &
-create_snapshot --source=$OUT/system_ext.img --target=$OUT/system_ext.img &
-create_snapshot --source=$OUT/vendor_dlkm.img --target=$OUT/vendor_dlkm.img &
-create_snapshot --source=$OUT/system_dlkm.img --target=$OUT/system_dlkm.img &
+# Ensure OUT directory exists
+if [ -z "$OUT" ]; then
+ echo "Error: OUT environment variable not set." >&2
+ exit 1
+fi
-echo "Waiting for snapshot patch creation"
-wait $(jobs -p)
-echo "Snapshot patch creation completed"
+DEVICE_PATH="/data/verity-hash"
+HOST_PATH="$OUT/verity-hash"
-mv *.patch $OUT/
+# Create the log file path
+log_file="$HOST_PATH/snapshot.log"
+
+# Function to log messages to both console and log file
+log_message() {
+ message="$1"
+ echo "$message" # Print to stdout
+ echo "$(date '+%Y-%m-%d %H:%M:%S') - $message" >> "$log_file" # Append to log file with timestamp
+}
+
+# Function to check for create_snapshot and build if needed
+ensure_create_snapshot() {
+ if ! command -v create_snapshot &> /dev/null; then
+ log_message "create_snapshot not found. Building..."
+ m create_snapshot
+ if [[ $? -ne 0 ]]; then
+ log_message "Error: Failed to build create_snapshot."
+ exit 1
+ fi
+ fi
+}
+
+ensure_create_snapshot
+
+# Function to flash static partitions
+flash_static_partitions() {
+ local wipe_flag="$1"
+
+ fastboot flash bootloader "$OUT"/bootloader.img
+ fastboot reboot bootloader
+ sleep 1
+ fastboot flash radio "$OUT"/radio.img
+ fastboot reboot bootloader
+ sleep 1
+ fastboot flashall --exclude-dynamic-partitions --disable-super-optimization --skip-reboot
+
+ if (( wipe_flag )); then
+ log_message "Wiping device..."
+ fastboot -w
+ fi
+ fastboot reboot
+}
+
+# Function to display the help message
+show_help() {
+ cat << EOF
+Usage: $0 [OPTIONS]
+
+This script updates an Android device with incremental flashing, optionally wiping data and flashing static partitions.
+
+Options:
+ --skip-static-partitions Skip flashing static partitions (bootloader, radio, boot, vbmeta, dtbo and other static A/B partitions).
+ * Requires manual update of static partitions on both A/B slots
+ *before* using this flag.
+ * Speeds up the update process and development iteration.
+ * Ideal for development focused on the Android platform (AOSP,
+ git_main).
+ * Safe usage: First update static partitions on both slots, then
+ use this flag for faster development iterations.
+ Ex:
+ 1: Run this on both the slots - This will update the kernel and other static partitions:
+ $fastboot flashall --exclude-dynamic-partitions --disable-super-optimization --skip-reboot
+
+ 2: Update bootloader on both the slots:
+ $fastboot flash bootloader $OUT/bootloader.img --slot=all
+
+ 3: Update radio on both the slots:
+ $fastboot flash radio $OUT/radio.img --slot=all
+ Now, the script can safely use this flag for update purpose.
+
+ --wipe Wipe user data during the update.
+ --help Display this help message.
+
+Environment Variables:
+ OUT Path to the directory containing build output.
+ This is required for the script to function correctly.
+
+Examples:
+ <Development workflow for any project in the platform and build with 'm' to create the images>
+
+ Update the device:
+ $0
+
+ Update the device, but skip flashing static partitions (see above for the usage):
+ $0 --skip-static-partitions
+
+ Update the device and wipe user data:
+ $0 --wipe
+
+ Display this help message:
+ $0 --help
+EOF
+}
+
+skip_static_partitions=0
+wipe_flag=0
+help_flag=0
+
+# Parse arguments
+for arg in "$@"; do
+ case "$arg" in
+ --skip-static-partitions)
+ skip_static_partitions=1
+ ;;
+ --wipe)
+ wipe_flag=1
+ ;;
+ --help)
+ help_flag=1
+ ;;
+ *)
+ echo "Unknown argument: $arg" >&2
+ help_flag=1
+ ;;
+ esac
+done
+
+# Check if help flag is set
+if (( help_flag )); then
+ show_help
+ exit 0
+fi
+
+rm -rf $HOST_PATH
adb root
adb wait-for-device
-adb shell mkdir -p /data/update/
-adb push $OUT/*.patch /data/update/
-if [[ "$2" == "--wipe" ]]; then
- adb shell snapshotctl apply-update /data/update/ -w
+adb shell rm -rf $DEVICE_PATH
+adb shell mkdir -p $DEVICE_PATH
+
+echo "Extracting device source hash from dynamic partitions"
+adb shell snapshotctl dump-verity-hash $DEVICE_PATH
+adb pull -q $DEVICE_PATH $OUT/
+
+log_message "Entering directory:"
+
+# Navigate to the verity-hash directory
+cd "$HOST_PATH" || { log_message "Error: Could not navigate to $HOST_PATH"; exit 1; }
+
+pwd
+
+# Iterate over all .pb files using a for loop
+for pb_file in *.pb; do
+ # Extract the base filename without the .pb extension
+ base_filename="${pb_file%.*}"
+
+ # Construct the source and target file names
+ source_file="$pb_file"
+ target_file="$OUT/$base_filename.img"
+
+ # Construct the create_snapshot command using an array
+ snapshot_args=(
+ "create_snapshot"
+ "--source" "$source_file"
+ "--target" "$target_file"
+ "--merkel_tree"
+ )
+
+ # Log the command about to be executed
+ log_message "Running: ${snapshot_args[*]}"
+
+ "${snapshot_args[@]}" >> "$log_file" 2>&1 &
+done
+
+log_message "Waiting for snapshot patch creation"
+
+# Wait for all background processes to complete
+wait $(jobs -p)
+
+log_message "Snapshot patches created successfully"
+
+adb push -q $HOST_PATH/*.patch $DEVICE_PATH
+
+log_message "Applying update"
+
+if (( wipe_flag )); then
+ adb shell snapshotctl apply-update $DEVICE_PATH -w
else
- adb shell snapshotctl apply-update /data/update/
+ adb shell snapshotctl apply-update $DEVICE_PATH
fi
-# Check if the --update-static-partitions option is provided.
-# For quick developer workflow, there is no need to repeatedly
-# apply static partitions.
-if [[ "$1" == "--update-static-partitions" ]]; then
- adb reboot bootloader
- sleep 5
- if [[ "$2" == "--wipe" ]]; then
- fastboot -w
- fi
- fastboot flash bootloader $OUT/bootloader.img
- sleep 1
- fastboot reboot bootloader
- sleep 1
- fastboot flash radio $OUT/radio.img
- sleep 1
- fastboot reboot bootloader
- sleep 1
- fastboot flashall --exclude-dynamic-partitions --disable-super-optimization
+if (( skip_static_partitions )); then
+ log_message "Rebooting device - Skipping flashing static partitions"
+ adb reboot
else
- adb reboot
+ log_message "Rebooting device to bootloader"
+ adb reboot bootloader
+ log_message "Waiting to enter fastboot bootloader"
+ flash_static_partitions "$wipe_flag"
fi
-echo "Update completed"
+log_message "Update completed"
diff --git a/fs_mgr/libsnapshot/snapshotctl.cpp b/fs_mgr/libsnapshot/snapshotctl.cpp
index 46de991..e1a3310 100644
--- a/fs_mgr/libsnapshot/snapshotctl.cpp
+++ b/fs_mgr/libsnapshot/snapshotctl.cpp
@@ -30,12 +30,15 @@
#include <android-base/unique_fd.h>
#include <android-base/chrono_utils.h>
+#include <android-base/hex.h>
#include <android-base/parseint.h>
#include <android-base/properties.h>
#include <android-base/scopeguard.h>
#include <android-base/stringprintf.h>
#include <android-base/strings.h>
+#include <android/snapshot/snapshot.pb.h>
+#include <fs_avb/fs_avb_util.h>
#include <fs_mgr.h>
#include <fs_mgr_dm_linear.h>
#include <fstab/fstab.h>
@@ -44,9 +47,13 @@
#include <libsnapshot/snapshot.h>
#include <storage_literals/storage_literals.h>
+#include <openssl/sha.h>
+
#include "partition_cow_creator.h"
#include "scratch_super.h"
+#include "utility.h"
+
#ifdef SNAPSHOTCTL_USERDEBUG_OR_ENG
#include <BootControlClient.h>
#endif
@@ -89,7 +96,12 @@
" apply-update\n"
" Apply the incremental OTA update wherein the snapshots are\n"
" directly written to COW block device. This will bypass update-engine\n"
- " and the device will be ready to boot from the target build.\n";
+ " and the device will be ready to boot from the target build.\n"
+ " dump-verity-hash <directory where verity merkel tree hashes are stored> "
+ "[-verify]\n"
+ " Dump the verity merkel tree hashes at the specified path\n"
+ " -verify: Verify the dynamic partition blocks by comparing it with verity "
+ "merkel tree\n";
return EX_USAGE;
}
@@ -631,6 +643,252 @@
return true;
}
+static bool GetBlockHashFromMerkelTree(android::base::borrowed_fd image_fd, uint64_t image_size,
+ uint32_t data_block_size, uint32_t hash_block_size,
+ uint64_t tree_offset,
+ std::vector<std::string>& out_block_hash) {
+ uint32_t padded_digest_size = 32;
+ if (image_size % data_block_size != 0) {
+ LOG(ERROR) << "Image_size: " << image_size
+ << " not a multiple of data block size: " << data_block_size;
+ return false;
+ }
+
+ // vector of level-size and offset
+ std::vector<std::pair<uint64_t, uint64_t>> levels;
+ uint64_t data_block_count = image_size / data_block_size;
+ uint32_t digests_per_block = hash_block_size / padded_digest_size;
+ uint32_t level_block_count = data_block_count;
+ while (level_block_count > 1) {
+ uint32_t next_level_block_count =
+ (level_block_count + digests_per_block - 1) / digests_per_block;
+ levels.emplace_back(std::make_pair(next_level_block_count * hash_block_size, 0));
+ level_block_count = next_level_block_count;
+ }
+ // root digest
+ levels.emplace_back(std::make_pair(0, 0));
+ // initialize offset
+ for (auto level = std::prev(levels.end()); level != levels.begin(); level--) {
+ std::prev(level)->second = level->second + level->first;
+ }
+
+ // We just want level 0
+ auto level = levels.begin();
+ std::string hash_block(hash_block_size, '\0');
+ uint64_t block_offset = tree_offset + level->second;
+ uint64_t t_read_blocks = 0;
+ uint64_t blockidx = 0;
+ uint64_t num_hash_blocks = level->first / hash_block_size;
+ while ((t_read_blocks < num_hash_blocks) && (blockidx < data_block_count)) {
+ if (!android::base::ReadFullyAtOffset(image_fd, hash_block.data(), hash_block.size(),
+ block_offset)) {
+ LOG(ERROR) << "Failed to read tree block at offset: " << block_offset;
+ return false;
+ }
+
+ for (uint32_t offset = 0; offset < hash_block.size(); offset += padded_digest_size) {
+ std::string single_hash = hash_block.substr(offset, padded_digest_size);
+ out_block_hash.emplace_back(single_hash);
+
+ blockidx += 1;
+ if (blockidx >= data_block_count) {
+ break;
+ }
+ }
+
+ block_offset += hash_block_size;
+ t_read_blocks += 1;
+ }
+ return true;
+}
+
+static bool CalculateDigest(const void* buffer, size_t size, const void* salt, uint32_t salt_length,
+ uint8_t* digest) {
+ SHA256_CTX ctx;
+ if (SHA256_Init(&ctx) != 1) {
+ return false;
+ }
+ if (SHA256_Update(&ctx, salt, salt_length) != 1) {
+ return false;
+ }
+ if (SHA256_Update(&ctx, buffer, size) != 1) {
+ return false;
+ }
+ if (SHA256_Final(digest, &ctx) != 1) {
+ return false;
+ }
+ return true;
+}
+
+bool verify_data_blocks(android::base::borrowed_fd fd, const std::vector<std::string>& block_hash,
+ std::unique_ptr<android::fs_mgr::FsAvbHashtreeDescriptor>& descriptor,
+ const std::vector<uint8_t>& salt) {
+ uint64_t data_block_count = descriptor->image_size / descriptor->data_block_size;
+ uint64_t foffset = 0;
+ uint64_t blk = 0;
+
+ std::string hash_block(descriptor->hash_block_size, '\0');
+ while (blk < data_block_count) {
+ if (!android::base::ReadFullyAtOffset(fd, hash_block.data(), descriptor->hash_block_size,
+ foffset)) {
+ LOG(ERROR) << "Failed to read from offset: " << foffset;
+ return false;
+ }
+
+ std::string digest(32, '\0');
+ CalculateDigest(hash_block.data(), descriptor->hash_block_size, salt.data(), salt.size(),
+ reinterpret_cast<uint8_t*>(digest.data()));
+ if (digest != block_hash[blk]) {
+ LOG(ERROR) << "Hash mismatch for block: " << blk << " Expected: " << block_hash[blk]
+ << " Received: " << digest;
+ return false;
+ }
+
+ foffset += descriptor->hash_block_size;
+ blk += 1;
+ }
+
+ return true;
+}
+
+bool DumpVerityHash(int argc, char** argv) {
+ android::base::InitLogging(argv, &android::base::KernelLogger);
+
+ if (::getuid() != 0) {
+ LOG(ERROR) << "Not running as root. Try \"adb root\" first.";
+ return EXIT_FAILURE;
+ }
+
+ if (argc < 3) {
+ std::cerr
+ << " dump-verity-hash <directory location where verity hash is saved> {-verify}\n";
+ return false;
+ }
+
+ bool verification_required = false;
+ std::string hash_file_path = argv[2];
+ bool metadata_on_super = false;
+ if (argc == 4) {
+ if (argv[3] == "-verify"s) {
+ verification_required = true;
+ }
+ }
+
+ auto& dm = android::dm::DeviceMapper::Instance();
+ auto dm_block_devices = dm.FindDmPartitions();
+ if (dm_block_devices.empty()) {
+ LOG(ERROR) << "No dm-enabled block device is found.";
+ return false;
+ }
+
+ android::fs_mgr::Fstab fstab;
+ if (!ReadDefaultFstab(&fstab)) {
+ LOG(ERROR) << "Failed to read fstab";
+ return false;
+ }
+
+ for (const auto& pair : dm_block_devices) {
+ std::string partition_name = pair.first;
+ android::fs_mgr::FstabEntry* fstab_entry =
+ GetEntryForMountPoint(&fstab, "/" + partition_name);
+ auto vbmeta = LoadAndVerifyVbmeta(*fstab_entry, "", nullptr, nullptr, nullptr);
+ if (vbmeta == nullptr) {
+ LOG(ERROR) << "LoadAndVerifyVbmetaByPath failed for partition: " << partition_name;
+ return false;
+ }
+
+ auto descriptor =
+ android::fs_mgr::GetHashtreeDescriptor(partition_name, std::move(*vbmeta));
+ if (descriptor == nullptr) {
+ LOG(ERROR) << "GetHashtreeDescriptor failed for partition: " << partition_name;
+ return false;
+ }
+
+ std::string device_path = fstab_entry->blk_device;
+ if (!dm.GetDmDevicePathByName(fstab_entry->blk_device, &device_path)) {
+ LOG(ERROR) << "Failed to resolve logical device path for: " << fstab_entry->blk_device;
+ return false;
+ }
+
+ android::base::unique_fd fd(open(device_path.c_str(), O_RDONLY));
+ if (fd < 0) {
+ LOG(ERROR) << "Failed to open file: " << device_path;
+ return false;
+ }
+ std::vector<std::string> block_hash;
+ if (!GetBlockHashFromMerkelTree(fd, descriptor->image_size, descriptor->data_block_size,
+ descriptor->hash_block_size, descriptor->tree_offset,
+ block_hash)) {
+ LOG(ERROR) << "GetBlockHashFromMerkelTree failed";
+ return false;
+ }
+
+ uint64_t dev_sz = lseek(fd, 0, SEEK_END);
+ uint64_t fec_size = dev_sz - descriptor->image_size;
+ if (fec_size % descriptor->data_block_size != 0) {
+ LOG(ERROR) << "fec_size: " << fec_size
+ << " isn't multiple of: " << descriptor->data_block_size;
+ return false;
+ }
+
+ std::vector<uint8_t> salt;
+ const std::string& salt_str = descriptor->salt;
+ bool ok = android::base::HexToBytes(salt_str, &salt);
+ if (!ok) {
+ LOG(ERROR) << "HexToBytes conversion failed";
+ return false;
+ }
+ uint64_t file_offset = descriptor->image_size;
+ std::vector<uint8_t> hash_block(descriptor->hash_block_size, 0);
+ while (file_offset < dev_sz) {
+ if (!android::base::ReadFullyAtOffset(fd, hash_block.data(),
+ descriptor->hash_block_size, file_offset)) {
+ LOG(ERROR) << "Failed to read tree block at offset: " << file_offset;
+ return false;
+ }
+ std::string digest(32, '\0');
+ CalculateDigest(hash_block.data(), descriptor->hash_block_size, salt.data(),
+ salt.size(), reinterpret_cast<uint8_t*>(digest.data()));
+ block_hash.push_back(digest);
+ file_offset += descriptor->hash_block_size;
+ fec_size -= descriptor->hash_block_size;
+ }
+
+ if (fec_size != 0) {
+ LOG(ERROR) << "Checksum calculation pending: " << fec_size;
+ return false;
+ }
+
+ if (verification_required) {
+ if (!verify_data_blocks(fd, block_hash, descriptor, salt)) {
+ LOG(ERROR) << "verify_data_blocks failed";
+ return false;
+ }
+ }
+
+ VerityHash verity_hash;
+ verity_hash.set_partition_name(partition_name);
+ verity_hash.set_salt(salt_str);
+ for (auto hash : block_hash) {
+ verity_hash.add_block_hash(hash.data(), hash.size());
+ }
+ std::string hash_file = hash_file_path + "/" + partition_name + ".pb";
+ std::string content;
+ if (!verity_hash.SerializeToString(&content)) {
+ LOG(ERROR) << "Unable to serialize verity_hash";
+ return false;
+ }
+ if (!WriteStringToFileAtomic(content, hash_file)) {
+ PLOG(ERROR) << "Unable to write VerityHash to " << hash_file;
+ return false;
+ }
+
+ LOG(INFO) << partition_name
+ << ": GetBlockHashFromMerkelTree success. Num Blocks: " << block_hash.size();
+ }
+ return true;
+}
+
bool MapPrecreatedSnapshots(int argc, char** argv) {
android::base::InitLogging(argv, &android::base::KernelLogger);
@@ -827,6 +1085,7 @@
{"unmap-snapshots", UnMapPrecreatedSnapshots},
{"delete-snapshots", DeletePrecreatedSnapshots},
{"revert-snapshots", RemovePrecreatedSnapshots},
+ {"dump-verity-hash", DumpVerityHash},
#endif
{"unmap", UnmapCmdHandler},
// clang-format on