[automerger skipped] [automerged blank] update OTA script in tm-mainline-prod to the latest version. 2p: 8720cc66fe am: 93e063328a -s ours
am skip reason: Merged-In Iaa317a3a4b8addbca8ea987aee9953c78fa1a679 with SHA-1 c890e48d81 is already in history
Original change: https://googleplex-android-review.googlesource.com/c/platform/system/update_engine/+/23200961
Change-Id: I8ea1b0e2169587713c6985eefa2dd33f8ebd5897
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/aosp/update_attempter_android.cc b/aosp/update_attempter_android.cc
index 5628109..6134885 100644
--- a/aosp/update_attempter_android.cc
+++ b/aosp/update_attempter_android.cc
@@ -464,8 +464,19 @@
return true;
}
+bool operator==(const std::vector<unsigned char>& a, std::string_view b) {
+ if (a.size() != b.size()) {
+ return false;
+ }
+ return memcmp(a.data(), b.data(), a.size()) == 0;
+}
+bool operator!=(const std::vector<unsigned char>& a, std::string_view b) {
+ return !(a == b);
+}
+
bool UpdateAttempterAndroid::VerifyPayloadParseManifest(
const std::string& metadata_filename,
+ std::string_view expected_metadata_hash,
DeltaArchiveManifest* manifest,
brillo::ErrorPtr* error) {
FileDescriptorPtr fd(new EintrSafeFileDescriptor);
@@ -508,6 +519,21 @@
"Failed to read metadata and signature from " + metadata_filename);
}
fd->Close();
+ if (!expected_metadata_hash.empty()) {
+ brillo::Blob metadata_hash;
+ TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfBytes(
+ metadata.data(), payload_metadata.GetMetadataSize(), &metadata_hash));
+ if (metadata_hash != expected_metadata_hash) {
+ return LogAndSetError(error,
+ FROM_HERE,
+ "Metadata hash mismatch. Expected hash: " +
+ HexEncode(expected_metadata_hash) +
+ " actual hash: " + HexEncode(metadata_hash));
+ } else {
+ LOG(INFO) << "Payload metadata hash check passed : "
+ << HexEncode(metadata_hash);
+ }
+ }
auto payload_verifier = PayloadVerifier::CreateInstanceFromZipPath(
constants::kUpdateCertificatesPath);
@@ -1097,14 +1123,20 @@
const std::string& metadata_filename,
const vector<string>& key_value_pair_headers,
brillo::ErrorPtr* error) {
- DeltaArchiveManifest manifest;
- if (!VerifyPayloadParseManifest(metadata_filename, &manifest, error)) {
- return 0;
- }
std::map<string, string> headers;
if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) {
return 0;
}
+ DeltaArchiveManifest manifest;
+ brillo::Blob metadata_hash;
+ if (!brillo::data_encoding::Base64Decode(
+ headers[kPayloadPropertyMetadataHash], &metadata_hash)) {
+ metadata_hash.clear();
+ }
+ if (!VerifyPayloadParseManifest(
+ metadata_filename, ToStringView(metadata_hash), &manifest, error)) {
+ return 0;
+ }
std::vector<ApexInfo> apex_infos(manifest.apex_info().begin(),
manifest.apex_info().end());
diff --git a/aosp/update_attempter_android.h b/aosp/update_attempter_android.h
index c2226b2..bbffbe9 100644
--- a/aosp/update_attempter_android.h
+++ b/aosp/update_attempter_android.h
@@ -221,8 +221,14 @@
// Helper of public VerifyPayloadApplicable. Return the parsed manifest in
// |manifest|.
static bool VerifyPayloadParseManifest(const std::string& metadata_filename,
+ std::string_view metadata_hash,
DeltaArchiveManifest* manifest,
brillo::ErrorPtr* error);
+ static bool VerifyPayloadParseManifest(const std::string& metadata_filename,
+ DeltaArchiveManifest* manifest,
+ brillo::ErrorPtr* error) {
+ return VerifyPayloadParseManifest(metadata_filename, "", manifest, error);
+ }
// Enqueue and run a CleanupPreviousUpdateAction.
void ScheduleCleanupPreviousUpdate();
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 8ee1436..152da4d 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -677,6 +677,10 @@
size_t max_threads = GetMaxThreads();
+ if (config.max_threads > 0) {
+ max_threads = config.max_threads;
+ }
+
// Sort the files in descending order based on number of new blocks to make
// sure we start the largest ones first.
if (file_delta_processors.size() > max_threads) {
diff --git a/payload_generator/erofs_filesystem.cc b/payload_generator/erofs_filesystem.cc
index bf10d8c..c2c3979 100644
--- a/payload_generator/erofs_filesystem.cc
+++ b/payload_generator/erofs_filesystem.cc
@@ -28,7 +28,6 @@
#include "erofs_iterate.h"
#include "lz4diff/lz4diff.pb.h"
#include "lz4diff/lz4patch.h"
-#include "lz4diff/lz4diff.h"
#include "update_engine/common/utils.h"
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/extent_ranges.h"
@@ -78,7 +77,8 @@
static void FillExtentInfo(FilesystemInterface::File* p_file,
std::string_view image_filename,
- struct erofs_inode* inode) {
+ struct erofs_inode* inode,
+ size_t* const unaligned_bytes) {
auto& file = *p_file;
struct erofs_map_blocks block {};
@@ -88,9 +88,11 @@
auto& compressed_blocks = file.compressed_file_info.blocks;
auto last_pa = block.m_pa;
auto last_plen = 0;
- LOG(INFO) << file.name << ", isize: " << inode->i_size;
while (block.m_la < inode->i_size) {
auto error = ErofsMapBlocks(inode, &block, EROFS_GET_BLOCKS_FIEMAP);
+ DEFER {
+ block.m_la += block.m_llen;
+ };
if (error) {
LOG(FATAL) << "Failed to map blocks for " << file.name << " in "
<< image_filename;
@@ -105,9 +107,10 @@
<< "` has unaligned blocks: at physical byte offset: "
<< block.m_pa << ", "
<< " length: " << block.m_plen
- << ", logical offset: " << block.m_la;
+ << ", logical offset: " << block.m_la << ", remaining data: "
+ << inode->i_size - (block.m_la + block.m_llen);
}
- break;
+ (*unaligned_bytes) += block.m_plen;
}
// Certain uncompressed blocks have physical size > logical size. Usually
// the physical block contains bunch of trailing zeros. Include thees
@@ -140,11 +143,11 @@
CompressedBlock(block.m_la, block.m_plen, block.m_llen));
}
}
-
- block.m_la += block.m_llen;
}
- file.extents.push_back(ExtentForRange(
- last_pa / kBlockSize, utils::DivRoundUp(last_plen, kBlockSize)));
+ if (last_plen != 0) {
+ file.extents.push_back(ExtentForRange(
+ last_pa / kBlockSize, utils::DivRoundUp(last_plen, kBlockSize)));
+ }
return;
}
@@ -203,6 +206,7 @@
bool ErofsFilesystem::GetFiles(const std::string& filename,
std::vector<File>* files,
const CompressionAlgorithm& algo) {
+ size_t unaligned_bytes = 0;
erofs_iterate_root_dir(&sbi, [&](struct erofs_iterate_dir_context* p_info) {
const auto& info = *p_info;
if (info.ctx.de_ftype != EROFS_FT_REG_FILE) {
@@ -225,14 +229,10 @@
LOG(FATAL) << "Failed to get occupied size for " << filename;
return err;
}
- // If data is packed inline, likely this node is stored on block unalighed
- // addresses. OTA doesn't work for non-block aligned files. All blocks not
- // reported by |GetFiles| will be updated in 1 operation. Ignore inline
- // files for now.
- // TODO(b/206729162) Support un-aligned files.
- if (inode.datalayout == EROFS_INODE_FLAT_INLINE) {
- return 0;
- }
+ // For EROFS_INODE_FLAT_INLINE , most blocks are stored on aligned
+ // addresses. Except the last block, which is stored right after the
+ // inode. These nodes will have a slight amount of data unaligned, which
+ // is fine.
File file;
file.name = info.path;
@@ -242,7 +242,7 @@
file.file_stat.st_size = uncompressed_size;
file.file_stat.st_ino = inode.nid;
- FillExtentInfo(&file, filename, &inode);
+ FillExtentInfo(&file, filename, &inode, &unaligned_bytes);
file.compressed_file_info.algo = algo;
files->emplace_back(std::move(file));
@@ -252,6 +252,11 @@
for (auto& file : *files) {
NormalizeExtents(&file.extents);
}
+ LOG(INFO) << "EROFS image " << filename << " has " << unaligned_bytes
+ << " unaligned bytes, which is "
+ << static_cast<float>(unaligned_bytes) / utils::FileSize(filename) *
+ 100.0f
+ << "% of partition data";
return true;
}
diff --git a/payload_generator/erofs_filesystem_unittest.cc b/payload_generator/erofs_filesystem_unittest.cc
index e6a8929..58686c3 100644
--- a/payload_generator/erofs_filesystem_unittest.cc
+++ b/payload_generator/erofs_filesystem_unittest.cc
@@ -102,11 +102,11 @@
"/dir1/dir2/file4",
"/dir1/file0",
"/dir1/file2",
+ "/etc/update_engine.conf",
"/file1",
// Files < 4K are stored inline, and therefore ignored, as they are often
// stored not on block boundary.
- // "/generate_test_erofs_images.sh"
- };
+ "/generate_test_erofs_images.sh"};
ASSERT_EQ(filenames, expected_filenames);
const auto delta_generator = files[0];
ASSERT_GT(delta_generator.compressed_file_info.blocks.size(), 0UL);
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 5a901d3..6616ee1 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -455,6 +455,11 @@
"Compression parameter passed to mkfs.erofs's -z option. "
"Example: lz4 lz4hc,9");
+DEFINE_int64(max_threads,
+ 0,
+ "The maximum number of threads allowed for generating "
+ "ota.");
+
void RoundDownPartitions(const ImageConfig& config) {
for (const auto& part : config.partitions) {
if (part.path.empty()) {
@@ -764,6 +769,8 @@
payload_config.security_patch_level = FLAGS_security_patch_level;
+ payload_config.max_threads = FLAGS_max_threads;
+
if (!FLAGS_partition_timestamps.empty()) {
CHECK(ParsePerPartitionTimestamps(FLAGS_partition_timestamps,
&payload_config));
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 387cc3a..a9926d1 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -21,6 +21,7 @@
#include <map>
#include <utility>
+#include <android-base/parseint.h>
#include <base/logging.h>
#include <base/strings/string_number_conversions.h>
#include <brillo/strings/string_utils.h>
@@ -212,7 +213,14 @@
compression_method = "gz";
}
metadata->set_vabc_compression_param(compression_method);
- metadata->set_cow_version(android::snapshot::kCowVersionManifest);
+ std::string cow_version;
+ if (!store.GetString("virtual_ab_cow_version", &cow_version)) {
+ metadata->set_cow_version(android::snapshot::kCowVersionManifest);
+ } else {
+ uint32_t cow_version_num{};
+ android::base::ParseUint(cow_version, &cow_version_num);
+ metadata->set_cow_version(cow_version_num);
+ }
}
dynamic_partition_metadata = std::move(metadata);
return true;
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index fc56f56..225237a 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -263,6 +263,8 @@
std::string security_patch_level;
+ uint32_t max_threads = 0;
+
std::vector<bsdiff::CompressorType> compressors{
bsdiff::CompressorType::kBZ2, bsdiff::CompressorType::kBrotli};
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index 6652b38..083bfc2 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -222,6 +222,8 @@
"Compression parameter passed to mkfs.erofs's -z option."
DEFINE_string security_patch_level "" \
"Optional: security patch level of this OTA"
+ DEFINE_string max_threads "" \
+ "Optional: specifies max_threads used to generate OTA"
fi
if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -761,6 +763,11 @@
--disable_vabc="${FLAGS_disable_vabc}" )
fi
+ if [[ -n "${FLAGS_max_threads}" ]]; then
+ GENERATOR_ARGS+=(
+ --max_threads="${FLAGS_max_threads}" )
+ fi
+
# minor version is set only for delta or partial payload.
if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
diff --git a/scripts/update_device.py b/scripts/update_device.py
index f94774b..8b9fbe9 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -517,10 +517,12 @@
metadata_path = "/data/ota_package/metadata"
if args.allocate_only:
+ with zipfile.ZipFile(args.otafile, "r") as zfp:
+ headers = zfp.read("payload_properties.txt").decode()
if PushMetadata(dut, args.otafile, metadata_path):
dut.adb([
"shell", "update_engine_client", "--allocate",
- "--metadata={}".format(metadata_path)])
+ "--metadata={} --headers='{}'".format(metadata_path, headers)])
# Return 0, as we are executing ADB commands here, no work needed after
# this point
return 0