Merge "create filegroup for update_device scripts." into udc-dev am: 5a7aab655f

Original change: https://googleplex-android-review.googlesource.com/c/platform/system/update_engine/+/23249943

Change-Id: I4d027de36bcb25e7125cf8afd9bc4a34ba583979
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/aosp/update_attempter_android.cc b/aosp/update_attempter_android.cc
index 5628109..6134885 100644
--- a/aosp/update_attempter_android.cc
+++ b/aosp/update_attempter_android.cc
@@ -464,8 +464,19 @@
   return true;
 }
 
+bool operator==(const std::vector<unsigned char>& a, std::string_view b) {
+  if (a.size() != b.size()) {
+    return false;
+  }
+  return memcmp(a.data(), b.data(), a.size()) == 0;
+}
+bool operator!=(const std::vector<unsigned char>& a, std::string_view b) {
+  return !(a == b);
+}
+
 bool UpdateAttempterAndroid::VerifyPayloadParseManifest(
     const std::string& metadata_filename,
+    std::string_view expected_metadata_hash,
     DeltaArchiveManifest* manifest,
     brillo::ErrorPtr* error) {
   FileDescriptorPtr fd(new EintrSafeFileDescriptor);
@@ -508,6 +519,21 @@
         "Failed to read metadata and signature from " + metadata_filename);
   }
   fd->Close();
+  if (!expected_metadata_hash.empty()) {
+    brillo::Blob metadata_hash;
+    TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfBytes(
+        metadata.data(), payload_metadata.GetMetadataSize(), &metadata_hash));
+    if (metadata_hash != expected_metadata_hash) {
+      return LogAndSetError(error,
+                            FROM_HERE,
+                            "Metadata hash mismatch. Expected hash: " +
+                                HexEncode(expected_metadata_hash) +
+                                " actual hash: " + HexEncode(metadata_hash));
+    } else {
+      LOG(INFO) << "Payload metadata hash check passed : "
+                << HexEncode(metadata_hash);
+    }
+  }
 
   auto payload_verifier = PayloadVerifier::CreateInstanceFromZipPath(
       constants::kUpdateCertificatesPath);
@@ -1097,14 +1123,20 @@
     const std::string& metadata_filename,
     const vector<string>& key_value_pair_headers,
     brillo::ErrorPtr* error) {
-  DeltaArchiveManifest manifest;
-  if (!VerifyPayloadParseManifest(metadata_filename, &manifest, error)) {
-    return 0;
-  }
   std::map<string, string> headers;
   if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) {
     return 0;
   }
+  DeltaArchiveManifest manifest;
+  brillo::Blob metadata_hash;
+  if (!brillo::data_encoding::Base64Decode(
+          headers[kPayloadPropertyMetadataHash], &metadata_hash)) {
+    metadata_hash.clear();
+  }
+  if (!VerifyPayloadParseManifest(
+          metadata_filename, ToStringView(metadata_hash), &manifest, error)) {
+    return 0;
+  }
 
   std::vector<ApexInfo> apex_infos(manifest.apex_info().begin(),
                                    manifest.apex_info().end());
diff --git a/aosp/update_attempter_android.h b/aosp/update_attempter_android.h
index c2226b2..bbffbe9 100644
--- a/aosp/update_attempter_android.h
+++ b/aosp/update_attempter_android.h
@@ -221,8 +221,14 @@
   // Helper of public VerifyPayloadApplicable. Return the parsed manifest in
   // |manifest|.
   static bool VerifyPayloadParseManifest(const std::string& metadata_filename,
+                                         std::string_view metadata_hash,
                                          DeltaArchiveManifest* manifest,
                                          brillo::ErrorPtr* error);
+  static bool VerifyPayloadParseManifest(const std::string& metadata_filename,
+                                         DeltaArchiveManifest* manifest,
+                                         brillo::ErrorPtr* error) {
+    return VerifyPayloadParseManifest(metadata_filename, "", manifest, error);
+  }
 
   // Enqueue and run a CleanupPreviousUpdateAction.
   void ScheduleCleanupPreviousUpdate();
diff --git a/common/hash_calculator.h b/common/hash_calculator.h
index dd7b2e8..36bfcc8 100644
--- a/common/hash_calculator.h
+++ b/common/hash_calculator.h
@@ -90,7 +90,7 @@
   bool valid_;
 
   // The hash state used by OpenSSL
-  SHA256_CTX ctx_;
+  SHA256_CTX ctx_{};
   DISALLOW_COPY_AND_ASSIGN(HashCalculator);
 };
 
diff --git a/payload_consumer/fec_file_descriptor.cc b/payload_consumer/fec_file_descriptor.cc
index 3fee196..56edc24 100644
--- a/payload_consumer/fec_file_descriptor.cc
+++ b/payload_consumer/fec_file_descriptor.cc
@@ -34,7 +34,7 @@
     return false;
   }
 
-  fec_status status;
+  fec_status status{};
   if (!fh_.get_status(status)) {
     LOG(ERROR) << "Couldn't load ECC status";
     fh_.close();
diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc
index d7d8bea..2ec05f5 100644
--- a/payload_consumer/partition_writer.cc
+++ b/payload_consumer/partition_writer.cc
@@ -32,18 +32,14 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 
-#include "update_engine/common/terminator.h"
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/bzip_extent_writer.h"
 #include "update_engine/payload_consumer/cached_file_descriptor.h"
-#include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/extent_writer.h"
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/install_operation_executor.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/mount_history.h"
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_consumer/xz_extent_writer.h"
 #include "update_engine/payload_generator/extent_utils.h"
 
 namespace chromeos_update_engine {
@@ -232,22 +228,23 @@
   // decide it the operation should be skipped.
   const PartitionUpdate& partition = partition_update_;
 
-  InstallOperation buf;
-  const bool should_optimize = dynamic_control_->OptimizeOperation(
-      partition.partition_name(), operation, &buf);
-  const InstallOperation& optimized = should_optimize ? buf : operation;
-
   // Invoke ChooseSourceFD with original operation, so that it can properly
   // verify source hashes. Optimized operation might contain a smaller set of
   // extents, or completely empty.
   auto source_fd = ChooseSourceFD(operation, error);
-  if (source_fd == nullptr) {
-    LOG(ERROR) << "Unrecoverable source hash mismatch found on partition "
-               << partition.partition_name()
-               << " extents: " << ExtentsToString(operation.src_extents());
+  if (*error != ErrorCode::kSuccess || source_fd == nullptr) {
+    LOG(WARNING) << "Source hash mismatch detected for extents "
+                 << operation.src_extents() << " on partition "
+                 << partition.partition_name() << " @ " << source_path_;
+
     return false;
   }
 
+  InstallOperation buf;
+  const bool should_optimize = dynamic_control_->OptimizeOperation(
+      partition.partition_name(), operation, &buf);
+  const InstallOperation& optimized = should_optimize ? buf : operation;
+
   auto writer = CreateBaseExtentWriter();
   return install_op_executor_.ExecuteSourceCopyOperation(
       optimized, std::move(writer), source_fd);
@@ -340,8 +337,9 @@
 
     // Log remount history if this device is an ext4 partition.
     LogMountHistory(source_fd);
-
-    *error = ErrorCode::kDownloadStateInitializationError;
+    if (error) {
+      *error = ErrorCode::kDownloadStateInitializationError;
+    }
     return false;
   }
   return true;
diff --git a/payload_consumer/partition_writer_unittest.cc b/payload_consumer/partition_writer_unittest.cc
index 4910594..32324b6 100644
--- a/payload_consumer/partition_writer_unittest.cc
+++ b/payload_consumer/partition_writer_unittest.cc
@@ -210,8 +210,7 @@
   op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
   ErrorCode error = ErrorCode::kSuccess;
-  ASSERT_EQ(writer_.verified_source_fd_.source_ecc_fd_,
-            writer_.ChooseSourceFD(op, &error));
+  ASSERT_NE(writer_.ChooseSourceFD(op, &error), nullptr);
   ASSERT_EQ(ErrorCode::kSuccess, error);
   // Verify that the fake_fec was actually used.
   ASSERT_EQ(1U, fake_fec->GetReadOps().size());
diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc
index 17b7d50..b00ff70 100644
--- a/payload_consumer/vabc_partition_writer.cc
+++ b/payload_consumer/vabc_partition_writer.cc
@@ -95,7 +95,7 @@
     }
     copy_blocks_.AddExtent(cow_op.dst_extent());
   }
-  LOG(INFO) << "Partition `" << partition_update.partition_name() << " has "
+  LOG(INFO) << "Partition `" << partition_update.partition_name() << "` has "
             << copy_blocks_.blocks() << " copy blocks";
 }
 
diff --git a/payload_consumer/verified_source_fd.cc b/payload_consumer/verified_source_fd.cc
index 002bd07..f35b6a9 100644
--- a/payload_consumer/verified_source_fd.cc
+++ b/payload_consumer/verified_source_fd.cc
@@ -26,11 +26,17 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/fec_file_descriptor.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
-#include "update_engine/payload_consumer/mount_history.h"
 #include "update_engine/payload_consumer/partition_writer.h"
+#include "update_engine/update_metadata.pb.h"
+#if USE_FEC
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+#endif
 
 namespace chromeos_update_engine {
 using std::string;
@@ -45,7 +51,7 @@
     return false;
 
 #if USE_FEC
-  FileDescriptorPtr fd(new FecFileDescriptor());
+  auto fd = std::make_shared<FecFileDescriptor>();
   if (!fd->Open(source_path_.c_str(), O_RDONLY, 0)) {
     PLOG(ERROR) << "Unable to open ECC source partition " << source_path_;
     source_ecc_open_failure_ = true;
@@ -60,12 +66,25 @@
   return !source_ecc_open_failure_;
 }
 
+bool VerifiedSourceFd::WriteBackCorrectedSourceBlocks(
+    const std::vector<unsigned char>& source_data,
+    const google::protobuf::RepeatedPtrField<Extent>& extents) {
+  auto fd = std::make_shared<EintrSafeFileDescriptor>();
+  TEST_AND_RETURN_FALSE_ERRNO(fd->Open(source_path_.c_str(), O_RDWR));
+  DirectExtentWriter writer(fd);
+  TEST_AND_RETURN_FALSE(writer.Init(extents, block_size_));
+  return writer.Write(source_data.data(), source_data.size());
+}
+
 FileDescriptorPtr VerifiedSourceFd::ChooseSourceFD(
     const InstallOperation& operation, ErrorCode* error) {
   if (source_fd_ == nullptr) {
     LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
     return nullptr;
   }
+  if (error) {
+    *error = ErrorCode::kSuccess;
+  }
   if (!operation.has_src_sha256_hash()) {
     // When the operation doesn't include a source hash, we attempt the error
     // corrected device first since we can't verify the block in the raw device
@@ -74,6 +93,9 @@
     if (OpenCurrentECCPartition() &&
         fd_utils::ReadAndHashExtents(
             source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
+      if (error) {
+        *error = ErrorCode::kDownloadOperationHashMissingError;
+      }
       return source_ecc_fd_;
     }
     return source_fd_;
@@ -87,6 +109,9 @@
       source_hash == expected_source_hash) {
     return source_fd_;
   }
+  if (error) {
+    *error = ErrorCode::kDownloadOperationHashMismatch;
+  }
   // We fall back to use the error corrected device if the hash of the raw
   // device doesn't match or there was an error reading the source partition.
   if (!OpenCurrentECCPartition()) {
@@ -103,11 +128,23 @@
                << base::HexEncode(expected_source_hash.data(),
                                   expected_source_hash.size());
 
-  if (fd_utils::ReadAndHashExtents(
-          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
-      PartitionWriter::ValidateSourceHash(
+  std::vector<unsigned char> source_data;
+  if (!utils::ReadExtents(
+          source_ecc_fd_, operation.src_extents(), &source_data, block_size_)) {
+    return nullptr;
+  }
+  if (!HashCalculator::RawHashOfData(source_data, &source_hash)) {
+    return nullptr;
+  }
+  if (PartitionWriter::ValidateSourceHash(
           source_hash, operation, source_ecc_fd_, error)) {
     source_ecc_recovered_failures_++;
+    if (WriteBackCorrectedSourceBlocks(source_data, operation.src_extents())) {
+      if (error) {
+        *error = ErrorCode::kSuccess;
+      }
+      return source_fd_;
+    }
     return source_ecc_fd_;
   }
   return nullptr;
diff --git a/payload_consumer/verified_source_fd.h b/payload_consumer/verified_source_fd.h
index f7d0620..6d859b9 100644
--- a/payload_consumer/verified_source_fd.h
+++ b/payload_consumer/verified_source_fd.h
@@ -39,6 +39,9 @@
   [[nodiscard]] bool Open();
 
  private:
+  bool WriteBackCorrectedSourceBlocks(
+      const std::vector<unsigned char>& source_data,
+      const google::protobuf::RepeatedPtrField<Extent>& extents);
   bool OpenCurrentECCPartition();
   const size_t block_size_;
   const std::string source_path_;
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 8ee1436..152da4d 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -677,6 +677,10 @@
 
   size_t max_threads = GetMaxThreads();
 
+  if (config.max_threads > 0) {
+    max_threads = config.max_threads;
+  }
+
   // Sort the files in descending order based on number of new blocks to make
   // sure we start the largest ones first.
   if (file_delta_processors.size() > max_threads) {
diff --git a/payload_generator/erofs_filesystem.cc b/payload_generator/erofs_filesystem.cc
index bf10d8c..c2c3979 100644
--- a/payload_generator/erofs_filesystem.cc
+++ b/payload_generator/erofs_filesystem.cc
@@ -28,7 +28,6 @@
 #include "erofs_iterate.h"
 #include "lz4diff/lz4diff.pb.h"
 #include "lz4diff/lz4patch.h"
-#include "lz4diff/lz4diff.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/extent_ranges.h"
@@ -78,7 +77,8 @@
 
 static void FillExtentInfo(FilesystemInterface::File* p_file,
                            std::string_view image_filename,
-                           struct erofs_inode* inode) {
+                           struct erofs_inode* inode,
+                           size_t* const unaligned_bytes) {
   auto& file = *p_file;
 
   struct erofs_map_blocks block {};
@@ -88,9 +88,11 @@
   auto& compressed_blocks = file.compressed_file_info.blocks;
   auto last_pa = block.m_pa;
   auto last_plen = 0;
-  LOG(INFO) << file.name << ", isize: " << inode->i_size;
   while (block.m_la < inode->i_size) {
     auto error = ErofsMapBlocks(inode, &block, EROFS_GET_BLOCKS_FIEMAP);
+    DEFER {
+      block.m_la += block.m_llen;
+    };
     if (error) {
       LOG(FATAL) << "Failed to map blocks for " << file.name << " in "
                  << image_filename;
@@ -105,9 +107,10 @@
                    << "` has unaligned blocks: at physical byte offset: "
                    << block.m_pa << ", "
                    << " length: " << block.m_plen
-                   << ", logical offset: " << block.m_la;
+                   << ", logical offset: " << block.m_la << ", remaining data: "
+                   << inode->i_size - (block.m_la + block.m_llen);
       }
-      break;
+      (*unaligned_bytes) += block.m_plen;
     }
     // Certain uncompressed blocks have physical size > logical size. Usually
     // the physical block contains bunch of trailing zeros. Include thees
@@ -140,11 +143,11 @@
             CompressedBlock(block.m_la, block.m_plen, block.m_llen));
       }
     }
-
-    block.m_la += block.m_llen;
   }
-  file.extents.push_back(ExtentForRange(
-      last_pa / kBlockSize, utils::DivRoundUp(last_plen, kBlockSize)));
+  if (last_plen != 0) {
+    file.extents.push_back(ExtentForRange(
+        last_pa / kBlockSize, utils::DivRoundUp(last_plen, kBlockSize)));
+  }
   return;
 }
 
@@ -203,6 +206,7 @@
 bool ErofsFilesystem::GetFiles(const std::string& filename,
                                std::vector<File>* files,
                                const CompressionAlgorithm& algo) {
+  size_t unaligned_bytes = 0;
   erofs_iterate_root_dir(&sbi, [&](struct erofs_iterate_dir_context* p_info) {
     const auto& info = *p_info;
     if (info.ctx.de_ftype != EROFS_FT_REG_FILE) {
@@ -225,14 +229,10 @@
       LOG(FATAL) << "Failed to get occupied size for " << filename;
       return err;
     }
-    // If data is packed inline, likely this node is stored on block unalighed
-    // addresses. OTA doesn't work for non-block aligned files. All blocks not
-    // reported by |GetFiles| will be updated in 1 operation. Ignore inline
-    // files for now.
-    // TODO(b/206729162) Support un-aligned files.
-    if (inode.datalayout == EROFS_INODE_FLAT_INLINE) {
-      return 0;
-    }
+    // For EROFS_INODE_FLAT_INLINE , most blocks are stored on aligned
+    // addresses. Except the last block, which is stored right after the
+    // inode. These nodes will have a slight amount of data unaligned, which
+    // is fine.
 
     File file;
     file.name = info.path;
@@ -242,7 +242,7 @@
 
     file.file_stat.st_size = uncompressed_size;
     file.file_stat.st_ino = inode.nid;
-    FillExtentInfo(&file, filename, &inode);
+    FillExtentInfo(&file, filename, &inode, &unaligned_bytes);
     file.compressed_file_info.algo = algo;
 
     files->emplace_back(std::move(file));
@@ -252,6 +252,11 @@
   for (auto& file : *files) {
     NormalizeExtents(&file.extents);
   }
+  LOG(INFO) << "EROFS image " << filename << " has " << unaligned_bytes
+            << " unaligned bytes, which is "
+            << static_cast<float>(unaligned_bytes) / utils::FileSize(filename) *
+                   100.0f
+            << "% of partition data";
   return true;
 }
 
diff --git a/payload_generator/erofs_filesystem_unittest.cc b/payload_generator/erofs_filesystem_unittest.cc
index e6a8929..58686c3 100644
--- a/payload_generator/erofs_filesystem_unittest.cc
+++ b/payload_generator/erofs_filesystem_unittest.cc
@@ -102,11 +102,11 @@
       "/dir1/dir2/file4",
       "/dir1/file0",
       "/dir1/file2",
+      "/etc/update_engine.conf",
       "/file1",
       // Files < 4K are stored inline, and therefore ignored, as they are often
       // stored not on block boundary.
-      // "/generate_test_erofs_images.sh"
-  };
+      "/generate_test_erofs_images.sh"};
   ASSERT_EQ(filenames, expected_filenames);
   const auto delta_generator = files[0];
   ASSERT_GT(delta_generator.compressed_file_info.blocks.size(), 0UL);
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 5a901d3..6616ee1 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -455,6 +455,11 @@
               "Compression parameter passed to mkfs.erofs's -z option. "
               "Example: lz4 lz4hc,9");
 
+DEFINE_int64(max_threads,
+             0,
+             "The maximum number of threads allowed for generating "
+             "ota.");
+
 void RoundDownPartitions(const ImageConfig& config) {
   for (const auto& part : config.partitions) {
     if (part.path.empty()) {
@@ -764,6 +769,8 @@
 
   payload_config.security_patch_level = FLAGS_security_patch_level;
 
+  payload_config.max_threads = FLAGS_max_threads;
+
   if (!FLAGS_partition_timestamps.empty()) {
     CHECK(ParsePerPartitionTimestamps(FLAGS_partition_timestamps,
                                       &payload_config));
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 387cc3a..a9926d1 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -21,6 +21,7 @@
 #include <map>
 #include <utility>
 
+#include <android-base/parseint.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <brillo/strings/string_utils.h>
@@ -212,7 +213,14 @@
       compression_method = "gz";
     }
     metadata->set_vabc_compression_param(compression_method);
-    metadata->set_cow_version(android::snapshot::kCowVersionManifest);
+    std::string cow_version;
+    if (!store.GetString("virtual_ab_cow_version", &cow_version)) {
+      metadata->set_cow_version(android::snapshot::kCowVersionManifest);
+    } else {
+      uint32_t cow_version_num{};
+      android::base::ParseUint(cow_version, &cow_version_num);
+      metadata->set_cow_version(cow_version_num);
+    }
   }
   dynamic_partition_metadata = std::move(metadata);
   return true;
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index fc56f56..225237a 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -263,6 +263,8 @@
 
   std::string security_patch_level;
 
+  uint32_t max_threads = 0;
+
   std::vector<bsdiff::CompressorType> compressors{
       bsdiff::CompressorType::kBZ2, bsdiff::CompressorType::kBrotli};
 
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index 6652b38..083bfc2 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -222,6 +222,8 @@
     "Compression parameter passed to mkfs.erofs's -z option."
   DEFINE_string security_patch_level "" \
     "Optional: security patch level of this OTA"
+  DEFINE_string max_threads "" \
+    "Optional: specifies max_threads used to generate OTA"
 fi
 if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
   DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -761,6 +763,11 @@
       --disable_vabc="${FLAGS_disable_vabc}" )
   fi
 
+  if [[ -n "${FLAGS_max_threads}" ]]; then
+    GENERATOR_ARGS+=(
+      --max_threads="${FLAGS_max_threads}" )
+  fi
+
   # minor version is set only for delta or partial payload.
   if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
     GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
diff --git a/scripts/update_device.py b/scripts/update_device.py
index f94774b..8b9fbe9 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -517,10 +517,12 @@
 
   metadata_path = "/data/ota_package/metadata"
   if args.allocate_only:
+    with zipfile.ZipFile(args.otafile, "r") as zfp:
+      headers = zfp.read("payload_properties.txt").decode()
     if PushMetadata(dut, args.otafile, metadata_path):
       dut.adb([
           "shell", "update_engine_client", "--allocate",
-          "--metadata={}".format(metadata_path)])
+          "--metadata={} --headers='{}'".format(metadata_path, headers)])
     # Return 0, as we are executing ADB commands here, no work needed after
     # this point
     return 0