create a python binary for update_device am: f5dd1b9006 am: a8f1476bea

Original change: https://googleplex-android-review.googlesource.com/c/platform/system/update_engine/+/23372276

Change-Id: If07a9d3612d1bc96a424f38f6f9cb24d3d000a8a
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/Android.bp b/Android.bp
index c321de8..86add6a 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1187,6 +1187,7 @@
         "aosp/update_attempter_android_unittest.cc",
         "common/utils_unittest.cc",
         "download_action_android_unittest.cc",
+        "payload_consumer/block_extent_writer_unittest.cc",
         "payload_consumer/bzip_extent_writer_unittest.cc",
         "payload_consumer/cached_file_descriptor_unittest.cc",
         "payload_consumer/cow_writer_file_descriptor_unittest.cc",
diff --git a/aosp/update_attempter_android.cc b/aosp/update_attempter_android.cc
index 5628109..6134885 100644
--- a/aosp/update_attempter_android.cc
+++ b/aosp/update_attempter_android.cc
@@ -464,8 +464,19 @@
   return true;
 }
 
+bool operator==(const std::vector<unsigned char>& a, std::string_view b) {
+  if (a.size() != b.size()) {
+    return false;
+  }
+  return memcmp(a.data(), b.data(), a.size()) == 0;
+}
+bool operator!=(const std::vector<unsigned char>& a, std::string_view b) {
+  return !(a == b);
+}
+
 bool UpdateAttempterAndroid::VerifyPayloadParseManifest(
     const std::string& metadata_filename,
+    std::string_view expected_metadata_hash,
     DeltaArchiveManifest* manifest,
     brillo::ErrorPtr* error) {
   FileDescriptorPtr fd(new EintrSafeFileDescriptor);
@@ -508,6 +519,21 @@
         "Failed to read metadata and signature from " + metadata_filename);
   }
   fd->Close();
+  if (!expected_metadata_hash.empty()) {
+    brillo::Blob metadata_hash;
+    TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfBytes(
+        metadata.data(), payload_metadata.GetMetadataSize(), &metadata_hash));
+    if (metadata_hash != expected_metadata_hash) {
+      return LogAndSetError(error,
+                            FROM_HERE,
+                            "Metadata hash mismatch. Expected hash: " +
+                                HexEncode(expected_metadata_hash) +
+                                " actual hash: " + HexEncode(metadata_hash));
+    } else {
+      LOG(INFO) << "Payload metadata hash check passed : "
+                << HexEncode(metadata_hash);
+    }
+  }
 
   auto payload_verifier = PayloadVerifier::CreateInstanceFromZipPath(
       constants::kUpdateCertificatesPath);
@@ -1097,14 +1123,20 @@
     const std::string& metadata_filename,
     const vector<string>& key_value_pair_headers,
     brillo::ErrorPtr* error) {
-  DeltaArchiveManifest manifest;
-  if (!VerifyPayloadParseManifest(metadata_filename, &manifest, error)) {
-    return 0;
-  }
   std::map<string, string> headers;
   if (!ParseKeyValuePairHeaders(key_value_pair_headers, &headers, error)) {
     return 0;
   }
+  DeltaArchiveManifest manifest;
+  brillo::Blob metadata_hash;
+  if (!brillo::data_encoding::Base64Decode(
+          headers[kPayloadPropertyMetadataHash], &metadata_hash)) {
+    metadata_hash.clear();
+  }
+  if (!VerifyPayloadParseManifest(
+          metadata_filename, ToStringView(metadata_hash), &manifest, error)) {
+    return 0;
+  }
 
   std::vector<ApexInfo> apex_infos(manifest.apex_info().begin(),
                                    manifest.apex_info().end());
diff --git a/aosp/update_attempter_android.h b/aosp/update_attempter_android.h
index c2226b2..bbffbe9 100644
--- a/aosp/update_attempter_android.h
+++ b/aosp/update_attempter_android.h
@@ -221,8 +221,14 @@
   // Helper of public VerifyPayloadApplicable. Return the parsed manifest in
   // |manifest|.
   static bool VerifyPayloadParseManifest(const std::string& metadata_filename,
+                                         std::string_view metadata_hash,
                                          DeltaArchiveManifest* manifest,
                                          brillo::ErrorPtr* error);
+  static bool VerifyPayloadParseManifest(const std::string& metadata_filename,
+                                         DeltaArchiveManifest* manifest,
+                                         brillo::ErrorPtr* error) {
+    return VerifyPayloadParseManifest(metadata_filename, "", manifest, error);
+  }
 
   // Enqueue and run a CleanupPreviousUpdateAction.
   void ScheduleCleanupPreviousUpdate();
diff --git a/common/hash_calculator.h b/common/hash_calculator.h
index dd7b2e8..36bfcc8 100644
--- a/common/hash_calculator.h
+++ b/common/hash_calculator.h
@@ -90,7 +90,7 @@
   bool valid_;
 
   // The hash state used by OpenSSL
-  SHA256_CTX ctx_;
+  SHA256_CTX ctx_{};
   DISALLOW_COPY_AND_ASSIGN(HashCalculator);
 };
 
diff --git a/common/utils.cc b/common/utils.cc
index 4c1365a..9723c8f 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -57,7 +57,9 @@
 
 #include "update_engine/common/constants.h"
 #include "update_engine/common/subprocess.h"
+#ifdef __ANDROID__
 #include "update_engine/common/platform_constants.h"
+#endif
 #include "update_engine/payload_consumer/file_descriptor.h"
 
 using base::Time;
diff --git a/common/utils.h b/common/utils.h
index 0087794..6c6337f 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -31,6 +31,8 @@
 #include <vector>
 
 #include <android-base/strings.h>
+#include <android-base/mapped_file.h>
+#include <android-base/scopeguard.h>
 #include <base/files/file_path.h>
 #include <base/posix/eintr_wrapper.h>
 #include <base/strings/string_number_conversions.h>
@@ -38,9 +40,6 @@
 #include <brillo/key_value_store.h>
 #include <brillo/secure_blob.h>
 
-#include "android-base/mapped_file.h"
-#include "android-base/scopeguard.h"
-#include "google/protobuf/repeated_field.h"
 #include "update_engine/common/action.h"
 #include "update_engine/common/action_processor.h"
 #include "update_engine/common/constants.h"
diff --git a/payload_consumer/block_extent_writer.cc b/payload_consumer/block_extent_writer.cc
index 055b485..456c491 100644
--- a/payload_consumer/block_extent_writer.cc
+++ b/payload_consumer/block_extent_writer.cc
@@ -16,9 +16,13 @@
 
 #include "update_engine/payload_consumer/block_extent_writer.h"
 
-#include <algorithm>
-#include <cstdint>
+#include <stdint.h>
 
+#include <algorithm>
+
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/update_metadata.pb.h"
 
 namespace chromeos_update_engine {
@@ -35,56 +39,65 @@
   return true;
 }
 
-size_t BlockExtentWriter::ConsumeWithBuffer(const uint8_t* data, size_t count) {
-  CHECK_LT(cur_extent_idx_, static_cast<size_t>(extents_.size()));
+bool BlockExtentWriter::WriteExtent(const void* bytes, const size_t count) {
   const auto& cur_extent = extents_[cur_extent_idx_];
-  const auto cur_extent_size = cur_extent.num_blocks() * block_size_;
+  const auto write_extent =
+      ExtentForRange(cur_extent.start_block() + offset_in_extent_ / block_size_,
+                     count / kBlockSize);
+  offset_in_extent_ += count;
+  if (offset_in_extent_ == cur_extent.num_blocks() * block_size_) {
+    NextExtent();
+  }
+  return WriteExtent(bytes, write_extent, block_size_);
+}
 
-  if (buffer_.empty() && count >= cur_extent_size) {
-    if (!WriteExtent(data, cur_extent, block_size_)) {
+size_t BlockExtentWriter::ConsumeWithBuffer(const uint8_t* const data,
+                                            const size_t count) {
+  if (cur_extent_idx_ >= static_cast<size_t>(extents_.size())) {
+    if (count > 0) {
+      LOG(ERROR) << "Exhausted all blocks, but still have " << count
+                 << " bytes pending for write";
+    }
+    return 0;
+  }
+  const auto& cur_extent = extents_[cur_extent_idx_];
+  const auto cur_extent_size =
+      static_cast<size_t>(cur_extent.num_blocks() * block_size_);
+
+  const auto write_size =
+      std::min(cur_extent_size - offset_in_extent_, BUFFER_SIZE);
+  if (buffer_.empty() && count >= write_size) {
+    if (!WriteExtent(data, write_size)) {
       LOG(ERROR) << "WriteExtent(" << cur_extent.start_block() << ", "
-                 << static_cast<const void*>(data) << ", " << cur_extent_size
+                 << static_cast<const void*>(data) << ", " << write_size
                  << ") failed.";
       // return value is expected to be greater than 0. Return 0 to signal error
       // condition
       return 0;
     }
-    if (!NextExtent()) {
-      if (count != cur_extent_size) {
-        LOG(ERROR) << "Exhausted all blocks, but still have "
-                   << count - cur_extent_size << " bytes left";
-        return 0;
-      }
-    }
-    return cur_extent_size;
+    return write_size;
   }
-  if (buffer_.size() >= cur_extent_size) {
+  if (buffer_.size() >= write_size) {
     LOG(ERROR)
-        << "Data left in buffer should never be >= cur_extent_size, otherwise "
+        << "Data left in buffer should never be >= write_size, otherwise "
            "we should have send that data to CowWriter. Buffer size: "
-        << buffer_.size() << " current extent size: " << cur_extent_size;
+        << buffer_.size() << " write_size: " << write_size;
   }
   const size_t bytes_to_copy =
-      std::min<size_t>(count, cur_extent_size - buffer_.size());
+      std::min<size_t>(count, write_size - buffer_.size());
   TEST_GT(bytes_to_copy, 0U);
 
   buffer_.insert(buffer_.end(), data, data + bytes_to_copy);
-  TEST_LE(buffer_.size(), cur_extent_size);
+  TEST_LE(buffer_.size(), write_size);
 
-  if (buffer_.size() == cur_extent_size) {
-    if (!WriteExtent(buffer_.data(), cur_extent, block_size_)) {
+  if (buffer_.size() == write_size) {
+    if (!WriteExtent(buffer_.data(), write_size)) {
       LOG(ERROR) << "WriteExtent(" << buffer_.data() << ", "
                  << cur_extent.start_block() << ", " << cur_extent.num_blocks()
                  << ") failed.";
       return 0;
     }
     buffer_.clear();
-    if (!NextExtent()) {
-      if (count != bytes_to_copy) {
-        LOG(ERROR) << "Exhausted all blocks, but still have "
-                   << count - bytes_to_copy << " bytes left";
-      }
-    }
   }
   return bytes_to_copy;
 }
@@ -111,6 +124,7 @@
 
 bool BlockExtentWriter::NextExtent() {
   cur_extent_idx_++;
+  offset_in_extent_ = 0;
   return cur_extent_idx_ < static_cast<size_t>(extents_.size());
 }
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/block_extent_writer.h b/payload_consumer/block_extent_writer.h
index 902e3e1..eeae36d 100644
--- a/payload_consumer/block_extent_writer.h
+++ b/payload_consumer/block_extent_writer.h
@@ -28,6 +28,7 @@
 // Cache data upto size of one extent before writing.
 class BlockExtentWriter : public chromeos_update_engine::ExtentWriter {
  public:
+  static constexpr size_t BUFFER_SIZE = 1024 * 1024;
   BlockExtentWriter() = default;
   ~BlockExtentWriter() = default;
   // Returns true on success.
@@ -44,15 +45,18 @@
   size_t BlockSize() const { return block_size_; }
 
  private:
+  bool WriteExtent(const void* bytes, size_t count);
   bool NextExtent();
-  [[nodiscard]] size_t ConsumeWithBuffer(const uint8_t* bytes, size_t count);
+  [[nodiscard]] size_t ConsumeWithBuffer(const uint8_t* const bytes,
+                                         const size_t count);
   // It's a non-owning pointer, because PartitionWriter owns the CowWruter. This
   // allows us to use a single instance of CowWriter for all operations applied
   // to the same partition.
   google::protobuf::RepeatedPtrField<Extent> extents_;
-  size_t cur_extent_idx_;
+  size_t cur_extent_idx_{};
   std::vector<uint8_t> buffer_;
-  size_t block_size_;
+  size_t block_size_{};
+  size_t offset_in_extent_{};
 };
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/block_extent_writer_unittest.cc b/payload_consumer/block_extent_writer_unittest.cc
new file mode 100644
index 0000000..558e46b
--- /dev/null
+++ b/payload_consumer/block_extent_writer_unittest.cc
@@ -0,0 +1,184 @@
+//
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_consumer/block_extent_writer.h"
+
+#include <fcntl.h>
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+
+#include "update_engine/common/test_utils.h"
+#include "update_engine/payload_generator/delta_diff_generator.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/common/utils.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+
+using std::min;
+using std::string;
+using std::vector;
+using testing::_;
+using testing::Return;
+
+namespace chromeos_update_engine {
+
+class BlockExtentWriterTest : public ::testing::Test {
+ protected:
+  void SetUp() override {}
+  void TearDown() override {}
+};
+
+class MockBlockExtentWriter : public BlockExtentWriter {
+ public:
+  MOCK_METHOD(bool,
+              WriteExtent,
+              (const void*, const Extent&, size_t),
+              (override));
+};
+
+TEST_F(BlockExtentWriterTest, LongExtentTest) {
+  google::protobuf::RepeatedPtrField<Extent> extents;
+  *extents.Add() = ExtentForRange(0, 1);
+  *extents.Add() = ExtentForRange(2, 1);
+  *extents.Add() = ExtentForRange(4, 1);
+  // A single large extent which doesn't fit in 1 buffer
+  static constexpr auto BLOCKS_PER_BUFFER =
+      BlockExtentWriter::BUFFER_SIZE / kBlockSize;
+  *extents.Add() = ExtentForRange(10, BLOCKS_PER_BUFFER * 2);
+  MockBlockExtentWriter writer;
+  ASSERT_TRUE(writer.Init(extents, kBlockSize));
+  std::string buffer;
+  buffer.resize(BlockExtentWriter::BUFFER_SIZE * 2);
+  ON_CALL(writer, WriteExtent(_, _, _)).WillByDefault(Return(true));
+  EXPECT_CALL(writer,
+              WriteExtent(buffer.data(), ExtentForRange(0, 1), kBlockSize));
+  EXPECT_CALL(writer,
+              WriteExtent(static_cast<void*>(buffer.data() + kBlockSize),
+                          ExtentForRange(2, 1),
+                          kBlockSize));
+  EXPECT_CALL(writer,
+              WriteExtent(static_cast<void*>(buffer.data() + kBlockSize * 2),
+                          ExtentForRange(4, 1),
+                          kBlockSize));
+  // The last chunk should be split up into multiple chunks, each chunk is 1
+  // BUFFR_SIZE
+  EXPECT_CALL(writer,
+              WriteExtent(static_cast<void*>(buffer.data()),
+                          ExtentForRange(10, BLOCKS_PER_BUFFER),
+                          kBlockSize));
+  EXPECT_CALL(
+      writer,
+      WriteExtent(
+          static_cast<void*>(buffer.data() + BlockExtentWriter::BUFFER_SIZE),
+          ExtentForRange(10 + BLOCKS_PER_BUFFER, BLOCKS_PER_BUFFER),
+          kBlockSize));
+  ASSERT_TRUE(writer.Write(buffer.data(), kBlockSize * 3));
+  ASSERT_TRUE(writer.Write(buffer.data(), BlockExtentWriter::BUFFER_SIZE * 2));
+}
+
+TEST_F(BlockExtentWriterTest, LongExtentMultiCall) {
+  google::protobuf::RepeatedPtrField<Extent> extents;
+  static constexpr auto BLOCKS_PER_BUFFER =
+      BlockExtentWriter::BUFFER_SIZE / kBlockSize;
+  *extents.Add() = ExtentForRange(10, BLOCKS_PER_BUFFER * 5);
+  MockBlockExtentWriter writer;
+  ASSERT_TRUE(writer.Init(extents, kBlockSize));
+  std::string buffer;
+  buffer.resize(BlockExtentWriter::BUFFER_SIZE * 2);
+  ON_CALL(writer, WriteExtent(_, _, _)).WillByDefault(Return(true));
+  // The last chunk should be split up into multiple chunks, each chunk is 1
+  // BUFFR_SIZE
+  EXPECT_CALL(writer,
+              WriteExtent(static_cast<void*>(buffer.data()),
+                          ExtentForRange(10, BLOCKS_PER_BUFFER),
+                          kBlockSize));
+  EXPECT_CALL(
+      writer,
+      WriteExtent(static_cast<void*>(buffer.data()),
+                  ExtentForRange(10 + BLOCKS_PER_BUFFER, BLOCKS_PER_BUFFER),
+                  kBlockSize));
+  EXPECT_CALL(
+      writer,
+      WriteExtent(static_cast<void*>(buffer.data()),
+                  ExtentForRange(10 + BLOCKS_PER_BUFFER * 2, BLOCKS_PER_BUFFER),
+                  kBlockSize));
+  ASSERT_TRUE(writer.Write(buffer.data(), BlockExtentWriter::BUFFER_SIZE));
+  ASSERT_TRUE(writer.Write(buffer.data(), BlockExtentWriter::BUFFER_SIZE));
+  ASSERT_TRUE(writer.Write(buffer.data(), BlockExtentWriter::BUFFER_SIZE));
+}
+
+void FillArbitraryData(std::string* buffer) {
+  for (size_t i = 0; i < buffer->size(); i++) {
+    (*buffer)[i] = i;
+  }
+}
+
+TEST_F(BlockExtentWriterTest, SingleBufferMultiCall) {
+  google::protobuf::RepeatedPtrField<Extent> extents;
+  static constexpr auto BLOCKS_PER_BUFFER =
+      BlockExtentWriter::BUFFER_SIZE / kBlockSize;
+  *extents.Add() = ExtentForRange(10, BLOCKS_PER_BUFFER);
+  MockBlockExtentWriter writer;
+  ASSERT_TRUE(writer.Init(extents, kBlockSize));
+  std::string buffer;
+  buffer.resize(BlockExtentWriter::BUFFER_SIZE);
+  FillArbitraryData(&buffer);
+
+  ON_CALL(writer, WriteExtent(_, _, _)).WillByDefault(Return(true));
+  EXPECT_CALL(writer,
+              WriteExtent(_, ExtentForRange(10, BLOCKS_PER_BUFFER), kBlockSize))
+      .WillOnce([&buffer](const void* data, const Extent& extent, size_t) {
+        return memcmp(data, buffer.data(), buffer.size()) == 0;
+      });
+
+  ASSERT_TRUE(
+      writer.Write(buffer.data(), BlockExtentWriter::BUFFER_SIZE - kBlockSize));
+  ASSERT_TRUE(writer.Write(
+      buffer.data() + BlockExtentWriter::BUFFER_SIZE - kBlockSize, kBlockSize));
+}
+
+TEST_F(BlockExtentWriterTest, MultiBufferMultiCall) {
+  google::protobuf::RepeatedPtrField<Extent> extents;
+  static constexpr auto BLOCKS_PER_BUFFER =
+      BlockExtentWriter::BUFFER_SIZE / kBlockSize;
+  *extents.Add() = ExtentForRange(10, BLOCKS_PER_BUFFER + 1);
+  MockBlockExtentWriter writer;
+  ASSERT_TRUE(writer.Init(extents, kBlockSize));
+  std::string buffer;
+  buffer.resize(BlockExtentWriter::BUFFER_SIZE);
+  FillArbitraryData(&buffer);
+
+  ON_CALL(writer, WriteExtent(_, _, _)).WillByDefault(Return(true));
+  EXPECT_CALL(writer,
+              WriteExtent(_, ExtentForRange(10, BLOCKS_PER_BUFFER), kBlockSize))
+      .WillOnce([&buffer](const void* data, const Extent& extent, size_t) {
+        return memcmp(data, buffer.data(), extent.num_blocks() * kBlockSize) ==
+               0;
+      });
+  EXPECT_CALL(
+      writer,
+      WriteExtent(_, ExtentForRange(10 + BLOCKS_PER_BUFFER, 1), kBlockSize));
+
+  ASSERT_TRUE(writer.Write(buffer.data(), BlockExtentWriter::BUFFER_SIZE));
+  ASSERT_TRUE(writer.Write(buffer.data(), kBlockSize));
+}
+
+}  // namespace chromeos_update_engine
diff --git a/payload_consumer/bzip_extent_writer.cc b/payload_consumer/bzip_extent_writer.cc
index 9491964..bee9fcd 100644
--- a/payload_consumer/bzip_extent_writer.cc
+++ b/payload_consumer/bzip_extent_writer.cc
@@ -14,6 +14,7 @@
 // limitations under the License.
 //
 
+#include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/bzip_extent_writer.h"
 
 using google::protobuf::RepeatedPtrField;
diff --git a/payload_consumer/bzip_extent_writer.h b/payload_consumer/bzip_extent_writer.h
index 38c041a..4926411 100644
--- a/payload_consumer/bzip_extent_writer.h
+++ b/payload_consumer/bzip_extent_writer.h
@@ -23,7 +23,6 @@
 
 #include <brillo/secure_blob.h>
 
-#include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/extent_writer.h"
 
 // BzipExtentWriter is a concrete ExtentWriter subclass that bzip-decompresses
@@ -46,7 +45,7 @@
 
  private:
   std::unique_ptr<ExtentWriter> next_;  // The underlying ExtentWriter.
-  bz_stream stream_;                    // the libbz2 stream
+  bz_stream stream_{};                  // the libbz2 stream
   brillo::Blob input_buffer_;
 };
 
diff --git a/payload_consumer/extent_writer.h b/payload_consumer/extent_writer.h
index 8b1b532..5a4e3ee 100644
--- a/payload_consumer/extent_writer.h
+++ b/payload_consumer/extent_writer.h
@@ -23,7 +23,6 @@
 #include <base/logging.h>
 #include <brillo/secure_blob.h>
 
-#include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/update_metadata.pb.h"
 
diff --git a/payload_consumer/fec_file_descriptor.cc b/payload_consumer/fec_file_descriptor.cc
index 3fee196..56edc24 100644
--- a/payload_consumer/fec_file_descriptor.cc
+++ b/payload_consumer/fec_file_descriptor.cc
@@ -34,7 +34,7 @@
     return false;
   }
 
-  fec_status status;
+  fec_status status{};
   if (!fh_.get_status(status)) {
     LOG(ERROR) << "Couldn't load ECC status";
     fh_.close();
diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc
index d7d8bea..2ec05f5 100644
--- a/payload_consumer/partition_writer.cc
+++ b/payload_consumer/partition_writer.cc
@@ -32,18 +32,14 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 
-#include "update_engine/common/terminator.h"
+#include "update_engine/common/error_code.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/bzip_extent_writer.h"
 #include "update_engine/payload_consumer/cached_file_descriptor.h"
-#include "update_engine/payload_consumer/extent_reader.h"
 #include "update_engine/payload_consumer/extent_writer.h"
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
 #include "update_engine/payload_consumer/install_operation_executor.h"
 #include "update_engine/payload_consumer/install_plan.h"
 #include "update_engine/payload_consumer/mount_history.h"
-#include "update_engine/payload_consumer/payload_constants.h"
-#include "update_engine/payload_consumer/xz_extent_writer.h"
 #include "update_engine/payload_generator/extent_utils.h"
 
 namespace chromeos_update_engine {
@@ -232,22 +228,23 @@
   // decide it the operation should be skipped.
   const PartitionUpdate& partition = partition_update_;
 
-  InstallOperation buf;
-  const bool should_optimize = dynamic_control_->OptimizeOperation(
-      partition.partition_name(), operation, &buf);
-  const InstallOperation& optimized = should_optimize ? buf : operation;
-
   // Invoke ChooseSourceFD with original operation, so that it can properly
   // verify source hashes. Optimized operation might contain a smaller set of
   // extents, or completely empty.
   auto source_fd = ChooseSourceFD(operation, error);
-  if (source_fd == nullptr) {
-    LOG(ERROR) << "Unrecoverable source hash mismatch found on partition "
-               << partition.partition_name()
-               << " extents: " << ExtentsToString(operation.src_extents());
+  if (*error != ErrorCode::kSuccess || source_fd == nullptr) {
+    LOG(WARNING) << "Source hash mismatch detected for extents "
+                 << operation.src_extents() << " on partition "
+                 << partition.partition_name() << " @ " << source_path_;
+
     return false;
   }
 
+  InstallOperation buf;
+  const bool should_optimize = dynamic_control_->OptimizeOperation(
+      partition.partition_name(), operation, &buf);
+  const InstallOperation& optimized = should_optimize ? buf : operation;
+
   auto writer = CreateBaseExtentWriter();
   return install_op_executor_.ExecuteSourceCopyOperation(
       optimized, std::move(writer), source_fd);
@@ -340,8 +337,9 @@
 
     // Log remount history if this device is an ext4 partition.
     LogMountHistory(source_fd);
-
-    *error = ErrorCode::kDownloadStateInitializationError;
+    if (error) {
+      *error = ErrorCode::kDownloadStateInitializationError;
+    }
     return false;
   }
   return true;
diff --git a/payload_consumer/partition_writer_unittest.cc b/payload_consumer/partition_writer_unittest.cc
index 4910594..32324b6 100644
--- a/payload_consumer/partition_writer_unittest.cc
+++ b/payload_consumer/partition_writer_unittest.cc
@@ -210,8 +210,7 @@
   op.set_src_sha256_hash(src_hash.data(), src_hash.size());
 
   ErrorCode error = ErrorCode::kSuccess;
-  ASSERT_EQ(writer_.verified_source_fd_.source_ecc_fd_,
-            writer_.ChooseSourceFD(op, &error));
+  ASSERT_NE(writer_.ChooseSourceFD(op, &error), nullptr);
   ASSERT_EQ(ErrorCode::kSuccess, error);
   // Verify that the fake_fec was actually used.
   ASSERT_EQ(1U, fake_fec->GetReadOps().size());
diff --git a/payload_consumer/vabc_partition_writer.cc b/payload_consumer/vabc_partition_writer.cc
index 17b7d50..b00ff70 100644
--- a/payload_consumer/vabc_partition_writer.cc
+++ b/payload_consumer/vabc_partition_writer.cc
@@ -95,7 +95,7 @@
     }
     copy_blocks_.AddExtent(cow_op.dst_extent());
   }
-  LOG(INFO) << "Partition `" << partition_update.partition_name() << " has "
+  LOG(INFO) << "Partition `" << partition_update.partition_name() << "` has "
             << copy_blocks_.blocks() << " copy blocks";
 }
 
diff --git a/payload_consumer/verified_source_fd.cc b/payload_consumer/verified_source_fd.cc
index 002bd07..f35b6a9 100644
--- a/payload_consumer/verified_source_fd.cc
+++ b/payload_consumer/verified_source_fd.cc
@@ -26,11 +26,17 @@
 #include <base/strings/string_util.h>
 #include <base/strings/stringprintf.h>
 
+#include "update_engine/common/error_code.h"
+#include "update_engine/common/hash_calculator.h"
 #include "update_engine/common/utils.h"
-#include "update_engine/payload_consumer/fec_file_descriptor.h"
+#include "update_engine/payload_consumer/extent_writer.h"
+#include "update_engine/payload_consumer/file_descriptor.h"
 #include "update_engine/payload_consumer/file_descriptor_utils.h"
-#include "update_engine/payload_consumer/mount_history.h"
 #include "update_engine/payload_consumer/partition_writer.h"
+#include "update_engine/update_metadata.pb.h"
+#if USE_FEC
+#include "update_engine/payload_consumer/fec_file_descriptor.h"
+#endif
 
 namespace chromeos_update_engine {
 using std::string;
@@ -45,7 +51,7 @@
     return false;
 
 #if USE_FEC
-  FileDescriptorPtr fd(new FecFileDescriptor());
+  auto fd = std::make_shared<FecFileDescriptor>();
   if (!fd->Open(source_path_.c_str(), O_RDONLY, 0)) {
     PLOG(ERROR) << "Unable to open ECC source partition " << source_path_;
     source_ecc_open_failure_ = true;
@@ -60,12 +66,25 @@
   return !source_ecc_open_failure_;
 }
 
+bool VerifiedSourceFd::WriteBackCorrectedSourceBlocks(
+    const std::vector<unsigned char>& source_data,
+    const google::protobuf::RepeatedPtrField<Extent>& extents) {
+  auto fd = std::make_shared<EintrSafeFileDescriptor>();
+  TEST_AND_RETURN_FALSE_ERRNO(fd->Open(source_path_.c_str(), O_RDWR));
+  DirectExtentWriter writer(fd);
+  TEST_AND_RETURN_FALSE(writer.Init(extents, block_size_));
+  return writer.Write(source_data.data(), source_data.size());
+}
+
 FileDescriptorPtr VerifiedSourceFd::ChooseSourceFD(
     const InstallOperation& operation, ErrorCode* error) {
   if (source_fd_ == nullptr) {
     LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
     return nullptr;
   }
+  if (error) {
+    *error = ErrorCode::kSuccess;
+  }
   if (!operation.has_src_sha256_hash()) {
     // When the operation doesn't include a source hash, we attempt the error
     // corrected device first since we can't verify the block in the raw device
@@ -74,6 +93,9 @@
     if (OpenCurrentECCPartition() &&
         fd_utils::ReadAndHashExtents(
             source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
+      if (error) {
+        *error = ErrorCode::kDownloadOperationHashMissingError;
+      }
       return source_ecc_fd_;
     }
     return source_fd_;
@@ -87,6 +109,9 @@
       source_hash == expected_source_hash) {
     return source_fd_;
   }
+  if (error) {
+    *error = ErrorCode::kDownloadOperationHashMismatch;
+  }
   // We fall back to use the error corrected device if the hash of the raw
   // device doesn't match or there was an error reading the source partition.
   if (!OpenCurrentECCPartition()) {
@@ -103,11 +128,23 @@
                << base::HexEncode(expected_source_hash.data(),
                                   expected_source_hash.size());
 
-  if (fd_utils::ReadAndHashExtents(
-          source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
-      PartitionWriter::ValidateSourceHash(
+  std::vector<unsigned char> source_data;
+  if (!utils::ReadExtents(
+          source_ecc_fd_, operation.src_extents(), &source_data, block_size_)) {
+    return nullptr;
+  }
+  if (!HashCalculator::RawHashOfData(source_data, &source_hash)) {
+    return nullptr;
+  }
+  if (PartitionWriter::ValidateSourceHash(
           source_hash, operation, source_ecc_fd_, error)) {
     source_ecc_recovered_failures_++;
+    if (WriteBackCorrectedSourceBlocks(source_data, operation.src_extents())) {
+      if (error) {
+        *error = ErrorCode::kSuccess;
+      }
+      return source_fd_;
+    }
     return source_ecc_fd_;
   }
   return nullptr;
diff --git a/payload_consumer/verified_source_fd.h b/payload_consumer/verified_source_fd.h
index f7d0620..6d859b9 100644
--- a/payload_consumer/verified_source_fd.h
+++ b/payload_consumer/verified_source_fd.h
@@ -39,6 +39,9 @@
   [[nodiscard]] bool Open();
 
  private:
+  bool WriteBackCorrectedSourceBlocks(
+      const std::vector<unsigned char>& source_data,
+      const google::protobuf::RepeatedPtrField<Extent>& extents);
   bool OpenCurrentECCPartition();
   const size_t block_size_;
   const std::string source_path_;
diff --git a/payload_consumer/xz_extent_writer.cc b/payload_consumer/xz_extent_writer.cc
index 361e635..5c92d05 100644
--- a/payload_consumer/xz_extent_writer.cc
+++ b/payload_consumer/xz_extent_writer.cc
@@ -14,6 +14,7 @@
 // limitations under the License.
 //
 
+#include "update_engine/common/utils.h"
 #include "update_engine/payload_consumer/xz_extent_writer.h"
 
 using google::protobuf::RepeatedPtrField;
@@ -75,7 +76,7 @@
     count = input_buffer_.size();
   }
 
-  xz_buf request;
+  xz_buf request{};
   request.in = input;
   request.in_pos = 0;
   request.in_size = count;
diff --git a/payload_generator/boot_img_filesystem.cc b/payload_generator/boot_img_filesystem.cc
index cad267e..3e324f1 100644
--- a/payload_generator/boot_img_filesystem.cc
+++ b/payload_generator/boot_img_filesystem.cc
@@ -16,10 +16,15 @@
 
 #include "update_engine/payload_generator/boot_img_filesystem.h"
 
+#include <array>
+
+#include <android-base/unique_fd.h>
+#include <android-base/file.h>
 #include <base/logging.h>
 #include <bootimg.h>
 #include <brillo/secure_blob.h>
 #include <puffin/utils.h>
+#include <sys/stat.h>
 
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
@@ -33,11 +38,26 @@
 
 unique_ptr<BootImgFilesystem> BootImgFilesystem::CreateFromFile(
     const string& filename) {
-  if (filename.empty())
+  if (filename.empty()) {
     return nullptr;
-
-  if (brillo::Blob header_magic;
-      !utils::ReadFileChunk(filename, 0, BOOT_MAGIC_SIZE, &header_magic) ||
+  }
+  android::base::unique_fd fd(open(filename.c_str(), O_RDONLY));
+  if (!fd.ok()) {
+    PLOG(ERROR) << "Failed to open " << filename;
+    return nullptr;
+  }
+  struct stat st {};
+  if (fstat(fd.get(), &st) < 0) {
+    return nullptr;
+  }
+  if (static_cast<size_t>(st.st_size) < sizeof(boot_img_hdr_v0)) {
+    LOG(INFO) << "Image " << filename
+              << " is too small to be a boot image. file size: " << st.st_size;
+    return nullptr;
+  }
+  std::array<char, BOOT_MAGIC_SIZE> header_magic{};
+  if (!android::base::ReadFullyAtOffset(
+          fd, header_magic.data(), BOOT_MAGIC_SIZE, 0) ||
       memcmp(header_magic.data(), BOOT_MAGIC, BOOT_MAGIC_SIZE) != 0) {
     return nullptr;
   }
@@ -48,11 +68,11 @@
   // See details in system/tools/mkbootimg/include/bootimg/bootimg.h
   constexpr size_t header_version_offset =
       BOOT_MAGIC_SIZE + 8 * sizeof(uint32_t);
-  brillo::Blob header_version_blob;
-  if (!utils::ReadFileChunk(filename,
-                            header_version_offset,
-                            sizeof(uint32_t),
-                            &header_version_blob)) {
+  std::array<char, sizeof(uint32_t)> header_version_blob{};
+  if (!android::base::ReadFullyAtOffset(fd,
+                                        header_version_blob.data(),
+                                        sizeof(uint32_t),
+                                        header_version_offset)) {
     return nullptr;
   }
   uint32_t header_version =
@@ -66,8 +86,8 @@
   // Read the bytes of boot image header based on the header version.
   size_t header_size =
       header_version == 3 ? sizeof(boot_img_hdr_v3) : sizeof(boot_img_hdr_v0);
-  brillo::Blob header_blob;
-  if (!utils::ReadFileChunk(filename, 0, header_size, &header_blob)) {
+  brillo::Blob header_blob(header_size);
+  if (!ReadFullyAtOffset(fd, header_blob.data(), header_size, 0)) {
     return nullptr;
   }
 
@@ -120,7 +140,8 @@
   file.extents = {ExtentForBytes(kBlockSize, offset, size)};
 
   brillo::Blob data;
-  if (utils::ReadFileChunk(filename_, offset, size, &data)) {
+  if (utils::ReadFileChunk(filename_, offset, size, &data) &&
+      data.size() == size) {
     constexpr size_t kGZipHeaderSize = 10;
     // Check GZip header magic.
     if (data.size() > kGZipHeaderSize && data[0] == 0x1F && data[1] == 0x8B) {
diff --git a/payload_generator/erofs_filesystem.cc b/payload_generator/erofs_filesystem.cc
index bf10d8c..508c9a1 100644
--- a/payload_generator/erofs_filesystem.cc
+++ b/payload_generator/erofs_filesystem.cc
@@ -16,19 +16,22 @@
 
 #include "update_engine/payload_generator/erofs_filesystem.h"
 
+#include <endian.h>
+#include <fcntl.h>
 #include <time.h>
 
+#include <array>
 #include <string>
 #include <mutex>
 
-#include <erofs/internal.h>
+#include <android-base/unique_fd.h>
 #include <erofs/dir.h>
 #include <erofs/io.h>
+#include <erofs_fs.h>
 
 #include "erofs_iterate.h"
 #include "lz4diff/lz4diff.pb.h"
 #include "lz4diff/lz4patch.h"
-#include "lz4diff/lz4diff.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/extent_ranges.h"
@@ -78,7 +81,8 @@
 
 static void FillExtentInfo(FilesystemInterface::File* p_file,
                            std::string_view image_filename,
-                           struct erofs_inode* inode) {
+                           struct erofs_inode* inode,
+                           size_t* const unaligned_bytes) {
   auto& file = *p_file;
 
   struct erofs_map_blocks block {};
@@ -88,9 +92,11 @@
   auto& compressed_blocks = file.compressed_file_info.blocks;
   auto last_pa = block.m_pa;
   auto last_plen = 0;
-  LOG(INFO) << file.name << ", isize: " << inode->i_size;
   while (block.m_la < inode->i_size) {
     auto error = ErofsMapBlocks(inode, &block, EROFS_GET_BLOCKS_FIEMAP);
+    DEFER {
+      block.m_la += block.m_llen;
+    };
     if (error) {
       LOG(FATAL) << "Failed to map blocks for " << file.name << " in "
                  << image_filename;
@@ -105,9 +111,10 @@
                    << "` has unaligned blocks: at physical byte offset: "
                    << block.m_pa << ", "
                    << " length: " << block.m_plen
-                   << ", logical offset: " << block.m_la;
+                   << ", logical offset: " << block.m_la << ", remaining data: "
+                   << inode->i_size - (block.m_la + block.m_llen);
       }
-      break;
+      (*unaligned_bytes) += block.m_plen;
     }
     // Certain uncompressed blocks have physical size > logical size. Usually
     // the physical block contains bunch of trailing zeros. Include thees
@@ -140,20 +147,32 @@
             CompressedBlock(block.m_la, block.m_plen, block.m_llen));
       }
     }
-
-    block.m_la += block.m_llen;
   }
-  file.extents.push_back(ExtentForRange(
-      last_pa / kBlockSize, utils::DivRoundUp(last_plen, kBlockSize)));
+  if (last_plen != 0) {
+    file.extents.push_back(ExtentForRange(
+        last_pa / kBlockSize, utils::DivRoundUp(last_plen, kBlockSize)));
+  }
   return;
 }
 
+bool IsErofsImage(const char* path) {
+  android::base::unique_fd fd(open(path, O_RDONLY));
+  uint32_t buf{};
+  if (pread(fd.get(), &buf, 4, EROFS_SUPER_OFFSET) < 0) {
+    return false;
+  }
+  return le32toh(buf) == EROFS_SUPER_MAGIC_V1;
+}
+
 }  // namespace
 
 static_assert(kBlockSize == EROFS_BLKSIZ);
 
 std::unique_ptr<ErofsFilesystem> ErofsFilesystem::CreateFromFile(
     const std::string& filename, const CompressionAlgorithm& algo) {
+  if (!IsErofsImage(filename.c_str())) {
+    return {};
+  }
   // erofs-utils makes heavy use of global variables. Hence its functions aren't
   // thread safe. For example, it stores a global int holding file descriptors
   // to the opened EROFS image. It doesn't even support opening more than 1
@@ -203,6 +222,7 @@
 bool ErofsFilesystem::GetFiles(const std::string& filename,
                                std::vector<File>* files,
                                const CompressionAlgorithm& algo) {
+  size_t unaligned_bytes = 0;
   erofs_iterate_root_dir(&sbi, [&](struct erofs_iterate_dir_context* p_info) {
     const auto& info = *p_info;
     if (info.ctx.de_ftype != EROFS_FT_REG_FILE) {
@@ -225,14 +245,10 @@
       LOG(FATAL) << "Failed to get occupied size for " << filename;
       return err;
     }
-    // If data is packed inline, likely this node is stored on block unalighed
-    // addresses. OTA doesn't work for non-block aligned files. All blocks not
-    // reported by |GetFiles| will be updated in 1 operation. Ignore inline
-    // files for now.
-    // TODO(b/206729162) Support un-aligned files.
-    if (inode.datalayout == EROFS_INODE_FLAT_INLINE) {
-      return 0;
-    }
+    // For EROFS_INODE_FLAT_INLINE , most blocks are stored on aligned
+    // addresses. Except the last block, which is stored right after the
+    // inode. These nodes will have a slight amount of data unaligned, which
+    // is fine.
 
     File file;
     file.name = info.path;
@@ -242,7 +258,7 @@
 
     file.file_stat.st_size = uncompressed_size;
     file.file_stat.st_ino = inode.nid;
-    FillExtentInfo(&file, filename, &inode);
+    FillExtentInfo(&file, filename, &inode, &unaligned_bytes);
     file.compressed_file_info.algo = algo;
 
     files->emplace_back(std::move(file));
@@ -252,6 +268,11 @@
   for (auto& file : *files) {
     NormalizeExtents(&file.extents);
   }
+  LOG(INFO) << "EROFS image " << filename << " has " << unaligned_bytes
+            << " unaligned bytes, which is "
+            << static_cast<float>(unaligned_bytes) / utils::FileSize(filename) *
+                   100.0f
+            << "% of partition data";
   return true;
 }
 
diff --git a/payload_generator/erofs_filesystem_unittest.cc b/payload_generator/erofs_filesystem_unittest.cc
index e6a8929..58686c3 100644
--- a/payload_generator/erofs_filesystem_unittest.cc
+++ b/payload_generator/erofs_filesystem_unittest.cc
@@ -102,11 +102,11 @@
       "/dir1/dir2/file4",
       "/dir1/file0",
       "/dir1/file2",
+      "/etc/update_engine.conf",
       "/file1",
       // Files < 4K are stored inline, and therefore ignored, as they are often
       // stored not on block boundary.
-      // "/generate_test_erofs_images.sh"
-  };
+      "/generate_test_erofs_images.sh"};
   ASSERT_EQ(filenames, expected_filenames);
   const auto delta_generator = files[0];
   ASSERT_GT(delta_generator.compressed_file_info.blocks.size(), 0UL);
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 387cc3a..a9926d1 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -21,6 +21,7 @@
 #include <map>
 #include <utility>
 
+#include <android-base/parseint.h>
 #include <base/logging.h>
 #include <base/strings/string_number_conversions.h>
 #include <brillo/strings/string_utils.h>
@@ -212,7 +213,14 @@
       compression_method = "gz";
     }
     metadata->set_vabc_compression_param(compression_method);
-    metadata->set_cow_version(android::snapshot::kCowVersionManifest);
+    std::string cow_version;
+    if (!store.GetString("virtual_ab_cow_version", &cow_version)) {
+      metadata->set_cow_version(android::snapshot::kCowVersionManifest);
+    } else {
+      uint32_t cow_version_num{};
+      android::base::ParseUint(cow_version, &cow_version_num);
+      metadata->set_cow_version(cow_version_num);
+    }
   }
   dynamic_partition_metadata = std::move(metadata);
   return true;
diff --git a/scripts/update_device.py b/scripts/update_device.py
index f94774b..8b9fbe9 100755
--- a/scripts/update_device.py
+++ b/scripts/update_device.py
@@ -517,10 +517,12 @@
 
   metadata_path = "/data/ota_package/metadata"
   if args.allocate_only:
+    with zipfile.ZipFile(args.otafile, "r") as zfp:
+      headers = zfp.read("payload_properties.txt").decode()
     if PushMetadata(dut, args.otafile, metadata_path):
       dut.adb([
           "shell", "update_engine_client", "--allocate",
-          "--metadata={}".format(metadata_path)])
+          "--metadata={} --headers='{}'".format(metadata_path, headers)])
     # Return 0, as we are executing ADB commands here, no work needed after
     # this point
     return 0