Empty merge of sc-v2-dev-plus-aosp-without-vendor@8084891

Bug: 214455710
Merged-In: If155b7d59b2e6b32da16a7f227f32f7e5e1d801a
Change-Id: I0ba71d1ac98d59d19312ecd1793a9f00ff75640b
diff --git a/aosp/dynamic_partition_control_android.cc b/aosp/dynamic_partition_control_android.cc
index 825e0d6..6d33a09 100644
--- a/aosp/dynamic_partition_control_android.cc
+++ b/aosp/dynamic_partition_control_android.cc
@@ -1420,7 +1420,7 @@
   return snapshot_->OpenSnapshotWriter(params, std::move(source_path));
 }  // namespace chromeos_update_engine
 
-FileDescriptorPtr DynamicPartitionControlAndroid::OpenCowFd(
+std::unique_ptr<FileDescriptor> DynamicPartitionControlAndroid::OpenCowFd(
     const std::string& unsuffixed_partition_name,
     const std::optional<std::string>& source_path,
     bool is_append) {
@@ -1438,7 +1438,7 @@
     LOG(ERROR) << "ICowWriter::OpenReader() failed.";
     return nullptr;
   }
-  return std::make_shared<CowWriterFileDescriptor>(std::move(cow_writer),
+  return std::make_unique<CowWriterFileDescriptor>(std::move(cow_writer),
                                                    std::move(reader));
 }
 
diff --git a/aosp/dynamic_partition_control_android.h b/aosp/dynamic_partition_control_android.h
index d0842a9..cebca07 100644
--- a/aosp/dynamic_partition_control_android.h
+++ b/aosp/dynamic_partition_control_android.h
@@ -104,9 +104,10 @@
       const std::string& unsuffixed_partition_name,
       const std::optional<std::string>& source_path,
       bool is_append) override;
-  FileDescriptorPtr OpenCowFd(const std::string& unsuffixed_partition_name,
-                              const std::optional<std::string>&,
-                              bool is_append = false) override;
+  std::unique_ptr<FileDescriptor> OpenCowFd(
+      const std::string& unsuffixed_partition_name,
+      const std::optional<std::string>&,
+      bool is_append = false) override;
 
   bool MapAllPartitions() override;
   bool UnmapAllPartitions() override;
diff --git a/aosp/mock_dynamic_partition_control_android.h b/aosp/mock_dynamic_partition_control_android.h
index 428b6c7..33ef39c 100644
--- a/aosp/mock_dynamic_partition_control_android.h
+++ b/aosp/mock_dynamic_partition_control_android.h
@@ -94,7 +94,7 @@
                const std::optional<std::string>& source_path,
                bool is_append),
               (override));
-  MOCK_METHOD(FileDescriptorPtr,
+  MOCK_METHOD(std::unique_ptr<FileDescriptor>,
               OpenCowFd,
               (const std::string& unsuffixed_partition_name,
                const std::optional<std::string>& source_path,
diff --git a/common/dynamic_partition_control_interface.h b/common/dynamic_partition_control_interface.h
index a5be6e1..e6ebe6a 100644
--- a/common/dynamic_partition_control_interface.h
+++ b/common/dynamic_partition_control_interface.h
@@ -167,7 +167,7 @@
       bool is_append = false) = 0;
   // Open a general purpose FD capable to reading and writing to COW. Note that
   // writes must be block aligned.
-  virtual FileDescriptorPtr OpenCowFd(
+  virtual std::unique_ptr<FileDescriptor> OpenCowFd(
       const std::string& unsuffixed_partition_name,
       const std::optional<std::string>&,
       bool is_append = false) = 0;
diff --git a/common/dynamic_partition_control_stub.h b/common/dynamic_partition_control_stub.h
index 515ec7c..5aa4336 100644
--- a/common/dynamic_partition_control_stub.h
+++ b/common/dynamic_partition_control_stub.h
@@ -65,9 +65,10 @@
       const std::optional<std::string>&,
       bool is_append) override;
 
-  FileDescriptorPtr OpenCowFd(const std::string& unsuffixed_partition_name,
-                              const std::optional<std::string>&,
-                              bool is_append = false) override {
+  std::unique_ptr<FileDescriptor> OpenCowFd(
+      const std::string& unsuffixed_partition_name,
+      const std::optional<std::string>&,
+      bool is_append = false) override {
     return nullptr;
   }
 
diff --git a/common/mock_dynamic_partition_control.h b/common/mock_dynamic_partition_control.h
index c6b0b2d..f3a446a 100644
--- a/common/mock_dynamic_partition_control.h
+++ b/common/mock_dynamic_partition_control.h
@@ -36,7 +36,7 @@
   MOCK_METHOD(FeatureFlag, GetVirtualAbCompressionFeatureFlag, (), (override));
   MOCK_METHOD(FeatureFlag, GetVirtualAbFeatureFlag, (), (override));
   MOCK_METHOD(bool, FinishUpdate, (bool), (override));
-  MOCK_METHOD(FileDescriptorPtr,
+  MOCK_METHOD(std::unique_ptr<FileDescriptor>,
               OpenCowFd,
               (const std::string& unsuffixed_partition_name,
                const std::optional<std::string>& source_path,
diff --git a/common/utils.cc b/common/utils.cc
index aa6c6b3..794b832 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -181,7 +181,7 @@
   return true;
 }
 
-bool WriteAll(const FileDescriptorPtr& fd, const void* buf, size_t count) {
+bool WriteAll(FileDescriptor* fd, const void* buf, size_t count) {
   const char* c_buf = static_cast<const char*>(buf);
   ssize_t bytes_written = 0;
   while (bytes_written < static_cast<ssize_t>(count)) {
@@ -218,7 +218,7 @@
   return true;
 }
 
-bool ReadAll(const FileDescriptorPtr& fd,
+bool ReadAll(FileDescriptor* fd,
              void* buf,
              size_t count,
              off_t offset,
@@ -239,7 +239,7 @@
   return true;
 }
 
-bool PReadAll(const FileDescriptorPtr& fd,
+bool PReadAll(FileDescriptor* fd,
               void* buf,
               size_t count,
               off_t offset,
diff --git a/common/utils.h b/common/utils.h
index a33efb2..0f8da22 100644
--- a/common/utils.h
+++ b/common/utils.h
@@ -68,12 +68,15 @@
 bool WriteAll(int fd, const void* buf, size_t count);
 bool PWriteAll(int fd, const void* buf, size_t count, off_t offset);
 
-bool WriteAll(const FileDescriptorPtr& fd, const void* buf, size_t count);
+bool WriteAll(FileDescriptor* fd, const void* buf, size_t count);
+
+constexpr bool WriteAll(const FileDescriptorPtr& fd,
+                        const void* buf,
+                        size_t count) {
+  return WriteAll(fd.get(), buf, count);
+}
 // WriteAll writes data at specified offset, but it modifies file position.
-bool WriteAll(const FileDescriptorPtr& fd,
-              const void* buf,
-              size_t count,
-              off_t off);
+bool WriteAll(FileDescriptorPtr* fd, const void* buf, size_t count, off_t off);
 
 // https://man7.org/linux/man-pages/man2/pread.2.html
 // PWriteAll writes data at specified offset, but it DOES NOT modify file
@@ -97,21 +100,38 @@
     int fd, void* buf, size_t count, off_t offset, ssize_t* out_bytes_read);
 
 // Reads data at specified offset, this function does change file position.
-bool ReadAll(const FileDescriptorPtr& fd,
+
+bool ReadAll(FileDescriptor* fd,
              void* buf,
              size_t count,
              off_t offset,
              ssize_t* out_bytes_read);
 
+constexpr bool ReadAll(const FileDescriptorPtr& fd,
+                       void* buf,
+                       size_t count,
+                       off_t offset,
+                       ssize_t* out_bytes_read) {
+  return ReadAll(fd.get(), buf, count, offset, out_bytes_read);
+}
+
 // https://man7.org/linux/man-pages/man2/pread.2.html
 // Reads data at specified offset, this function DOES NOT change file position.
 // Behavior is similar to linux's pread syscall.
-bool PReadAll(const FileDescriptorPtr& fd,
+bool PReadAll(FileDescriptor* fd,
               void* buf,
               size_t count,
               off_t offset,
               ssize_t* out_bytes_read);
 
+constexpr bool PReadAll(const FileDescriptorPtr& fd,
+                        void* buf,
+                        size_t count,
+                        off_t offset,
+                        ssize_t* out_bytes_read) {
+  return PReadAll(fd.get(), buf, count, offset, out_bytes_read);
+}
+
 // Opens |path| for reading and appends its entire content to the container
 // pointed to by |out_p|. Returns true upon successfully reading all of the
 // file's content, false otherwise, in which case the state of the output
diff --git a/lz4diff/lz4diff_compress.cc b/lz4diff/lz4diff_compress.cc
index 67b02de..930954b 100644
--- a/lz4diff/lz4diff_compress.cc
+++ b/lz4diff/lz4diff_compress.cc
@@ -131,7 +131,7 @@
     compressed_size += block.compressed_length;
   }
   if (blob.size() < compressed_size) {
-    LOG(INFO) << "File is chunked. Skip lz4 decompress.Expected size : "
+    LOG(INFO) << "File is chunked. Skip lz4 decompress. Expected size: "
               << compressed_size << ", actual size: " << blob.size();
     return {};
   }
diff --git a/lz4diff/lz4patch.h b/lz4diff/lz4patch.h
index 8f66180..ce49430 100644
--- a/lz4diff/lz4patch.h
+++ b/lz4diff/lz4patch.h
@@ -26,6 +26,8 @@
               Blob* output);
 bool Lz4Patch(const Blob& src_data, const Blob& patch_data, Blob* output);
 
+std::ostream& operator<<(std::ostream& out, const CompressionAlgorithm& info);
+
 std::ostream& operator<<(std::ostream& out, const Lz4diffHeader&);
 
 template <typename T>
diff --git a/payload_consumer/block_extent_writer.cc b/payload_consumer/block_extent_writer.cc
index e50526c..6b1fba7 100644
--- a/payload_consumer/block_extent_writer.cc
+++ b/payload_consumer/block_extent_writer.cc
@@ -23,13 +23,10 @@
 
 namespace chromeos_update_engine {
 
-BlockExtentWriter::~BlockExtentWriter() {
-  CHECK(buffer_.empty()) << buffer_.size();
-}
-
 bool BlockExtentWriter::Init(
     const google::protobuf::RepeatedPtrField<Extent>& extents,
     uint32_t block_size) {
+  TEST_NE(extents.size(), 0);
   extents_ = extents;
   cur_extent_idx_ = 0;
   buffer_.clear();
@@ -52,22 +49,26 @@
       return 0;
     }
     if (!NextExtent()) {
-      CHECK_EQ(count, cur_extent_size)
-          << "Exhausted all blocks, but still have " << count - cur_extent_size
-          << " bytes left";
+      if (count != cur_extent_size) {
+        LOG(ERROR) << "Exhausted all blocks, but still have "
+                   << count - cur_extent_size << " bytes left";
+        return 0;
+      }
     }
     return cur_extent_size;
   }
-  CHECK_LT(buffer_.size(), cur_extent_size)
-      << "Data left in buffer should never be >= cur_extent_size, otherwise "
-         "we should have send that data to CowWriter. Buffer size: "
-      << buffer_.size() << " current extent size: " << cur_extent_size;
-  size_t bytes_to_copy =
+  if (buffer_.size() >= cur_extent_size) {
+    LOG(ERROR)
+        << "Data left in buffer should never be >= cur_extent_size, otherwise "
+           "we should have send that data to CowWriter. Buffer size: "
+        << buffer_.size() << " current extent size: " << cur_extent_size;
+  }
+  const size_t bytes_to_copy =
       std::min<size_t>(count, cur_extent_size - buffer_.size());
-  CHECK_GT(bytes_to_copy, 0U);
+  TEST_GT(bytes_to_copy, 0U);
 
   buffer_.insert(buffer_.end(), data, data + bytes_to_copy);
-  CHECK_LE(buffer_.size(), cur_extent_size);
+  TEST_LE(buffer_.size(), cur_extent_size);
 
   if (buffer_.size() == cur_extent_size) {
     if (!WriteExtent(buffer_.data(), cur_extent, block_size_)) {
@@ -78,8 +79,10 @@
     }
     buffer_.clear();
     if (!NextExtent()) {
-      CHECK_EQ(count, bytes_to_copy) << "Exhausted all blocks, but still have "
-                                     << count - bytes_to_copy << " bytes left";
+      if (count != bytes_to_copy) {
+        LOG(ERROR) << "Exhausted all blocks, but still have "
+                   << count - bytes_to_copy << " bytes left";
+      }
     }
   }
   return bytes_to_copy;
@@ -94,11 +97,10 @@
   if (count == 0) {
     return true;
   }
-  CHECK_NE(extents_.size(), 0);
 
   auto data = static_cast<const uint8_t*>(bytes);
   while (count > 0) {
-    auto bytes_written = ConsumeWithBuffer(data, count);
+    const auto bytes_written = ConsumeWithBuffer(data, count);
     TEST_AND_RETURN_FALSE(bytes_written > 0);
     data += bytes_written;
     count -= bytes_written;
diff --git a/payload_consumer/block_extent_writer.h b/payload_consumer/block_extent_writer.h
index f9c7b15..902e3e1 100644
--- a/payload_consumer/block_extent_writer.h
+++ b/payload_consumer/block_extent_writer.h
@@ -29,7 +29,7 @@
 class BlockExtentWriter : public chromeos_update_engine::ExtentWriter {
  public:
   BlockExtentWriter() = default;
-  ~BlockExtentWriter();
+  ~BlockExtentWriter() = default;
   // Returns true on success.
   bool Init(const google::protobuf::RepeatedPtrField<Extent>& extents,
             uint32_t block_size) override;
diff --git a/payload_consumer/cached_file_descriptor.cc b/payload_consumer/cached_file_descriptor.cc
index 7f2515e..aa0dbcd 100644
--- a/payload_consumer/cached_file_descriptor.cc
+++ b/payload_consumer/cached_file_descriptor.cc
@@ -26,7 +26,7 @@
 
 namespace chromeos_update_engine {
 
-off64_t CachedFileDescriptor::Seek(off64_t offset, int whence) {
+off64_t CachedFileDescriptorBase::Seek(off64_t offset, int whence) {
   // Only support SEEK_SET and SEEK_CUR. I think these two would be enough. If
   // we want to support SEEK_END then we have to figure out the size of the
   // underlying file descriptor each time and it may not be a very good idea.
@@ -40,7 +40,7 @@
       return -1;
     }
     // Then we have to seek there.
-    if (fd_->Seek(next_offset, SEEK_SET) < 0) {
+    if (GetFd()->Seek(next_offset, SEEK_SET) < 0) {
       return -1;
     }
     offset_ = next_offset;
@@ -48,7 +48,7 @@
   return offset_;
 }
 
-ssize_t CachedFileDescriptor::Write(const void* buf, size_t count) {
+ssize_t CachedFileDescriptorBase::Write(const void* buf, size_t count) {
   auto bytes = static_cast<const uint8_t*>(buf);
   size_t total_bytes_wrote = 0;
   while (total_bytes_wrote < count) {
@@ -72,19 +72,20 @@
   return total_bytes_wrote;
 }
 
-bool CachedFileDescriptor::Flush() {
-  return FlushCache() && fd_->Flush();
+bool CachedFileDescriptorBase::Flush() {
+  return FlushCache() && GetFd()->Flush();
 }
 
-bool CachedFileDescriptor::Close() {
+bool CachedFileDescriptorBase::Close() {
   offset_ = 0;
-  return FlushCache() && fd_->Close();
+  return FlushCache() && GetFd()->Close();
 }
 
-bool CachedFileDescriptor::FlushCache() {
+bool CachedFileDescriptorBase::FlushCache() {
   size_t begin = 0;
   while (begin < bytes_cached_) {
-    auto bytes_wrote = fd_->Write(cache_.data() + begin, bytes_cached_ - begin);
+    auto bytes_wrote =
+        GetFd()->Write(cache_.data() + begin, bytes_cached_ - begin);
     if (bytes_wrote < 0) {
       PLOG(ERROR) << "Failed to flush cached data!";
       return false;
diff --git a/payload_consumer/cached_file_descriptor.h b/payload_consumer/cached_file_descriptor.h
index ada112b..1193455 100644
--- a/payload_consumer/cached_file_descriptor.h
+++ b/payload_consumer/cached_file_descriptor.h
@@ -29,45 +29,66 @@
 
 namespace chromeos_update_engine {
 
-class CachedFileDescriptor : public FileDescriptor {
+class CachedFileDescriptorBase : public FileDescriptor {
  public:
-  CachedFileDescriptor(FileDescriptorPtr fd, size_t cache_size)
-      : fd_(fd), cache_(cache_size) {}
-  ~CachedFileDescriptor() override = default;
+  CachedFileDescriptorBase(size_t cache_size) : cache_(cache_size) {}
+  ~CachedFileDescriptorBase() override = default;
 
   bool Open(const char* path, int flags, mode_t mode) override {
-    return fd_->Open(path, flags, mode);
+    return GetFd()->Open(path, flags, mode);
   }
   bool Open(const char* path, int flags) override {
-    return fd_->Open(path, flags);
+    return GetFd()->Open(path, flags);
   }
   ssize_t Read(void* buf, size_t count) override {
-    return fd_->Read(buf, count);
+    return GetFd()->Read(buf, count);
   }
   ssize_t Write(const void* buf, size_t count) override;
   off64_t Seek(off64_t offset, int whence) override;
-  uint64_t BlockDevSize() override { return fd_->BlockDevSize(); }
+  uint64_t BlockDevSize() override { return GetFd()->BlockDevSize(); }
   bool BlkIoctl(int request,
                 uint64_t start,
                 uint64_t length,
                 int* result) override {
-    return fd_->BlkIoctl(request, start, length, result);
+    return GetFd()->BlkIoctl(request, start, length, result);
   }
   bool Flush() override;
   bool Close() override;
-  bool IsSettingErrno() override { return fd_->IsSettingErrno(); }
-  bool IsOpen() override { return fd_->IsOpen(); }
+  bool IsSettingErrno() override { return GetFd()->IsSettingErrno(); }
+  bool IsOpen() override { return GetFd()->IsOpen(); }
+
+ protected:
+  virtual FileDescriptor* GetFd() = 0;
 
  private:
   // Internal flush without the need to call |fd_->Flush()|.
   bool FlushCache();
 
-  FileDescriptorPtr fd_;
   brillo::Blob cache_;
   size_t bytes_cached_{0};
   off64_t offset_{0};
 
-  DISALLOW_COPY_AND_ASSIGN(CachedFileDescriptor);
+  DISALLOW_COPY_AND_ASSIGN(CachedFileDescriptorBase);
+};
+
+class CachedFileDescriptor final : public CachedFileDescriptorBase {
+ public:
+  CachedFileDescriptor(FileDescriptorPtr fd, size_t cache_size)
+      : CachedFileDescriptorBase(cache_size), fd_(fd) {}
+
+ protected:
+  virtual FileDescriptor* GetFd() { return fd_.get(); }
+  FileDescriptorPtr fd_;
+};
+
+class UnownedCachedFileDescriptor final : public CachedFileDescriptorBase {
+ public:
+  UnownedCachedFileDescriptor(FileDescriptor* fd, size_t cache_size)
+      : CachedFileDescriptorBase(cache_size), fd_(fd) {}
+
+ protected:
+  virtual FileDescriptor* GetFd() { return fd_; }
+  FileDescriptor* fd_;
 };
 
 }  // namespace chromeos_update_engine
diff --git a/payload_consumer/filesystem_verifier_action.cc b/payload_consumer/filesystem_verifier_action.cc
index a8b9269..2770aff 100644
--- a/payload_consumer/filesystem_verifier_action.cc
+++ b/payload_consumer/filesystem_verifier_action.cc
@@ -182,7 +182,7 @@
 }
 
 bool FilesystemVerifierAction::InitializeFd(const std::string& part_path) {
-  partition_fd_ = FileDescriptorPtr(new EintrSafeFileDescriptor());
+  partition_fd_ = std::make_unique<EintrSafeFileDescriptor>();
   const bool write_verity = ShouldWriteVerity();
   int flags = write_verity ? O_RDWR : O_RDONLY;
   if (!utils::SetBlockDeviceReadOnly(part_path, !write_verity)) {
@@ -197,11 +197,12 @@
 }
 
 void FilesystemVerifierAction::WriteVerityAndHashPartition(
-    FileDescriptorPtr fd,
     const off64_t start_offset,
     const off64_t end_offset,
     void* buffer,
     const size_t buffer_size) {
+  auto fd = partition_fd_.get();
+  TEST_AND_RETURN(fd != nullptr);
   if (start_offset >= end_offset) {
     LOG_IF(WARNING, start_offset > end_offset)
         << "start_offset is greater than end_offset : " << start_offset << " > "
@@ -219,7 +220,7 @@
         return;
       }
     }
-    HashPartition(partition_fd_, 0, partition_size_, buffer, buffer_size);
+    HashPartition(0, partition_size_, buffer, buffer_size);
     return;
   }
   const auto cur_offset = fd->Seek(start_offset, SEEK_SET);
@@ -249,18 +250,18 @@
       FROM_HERE,
       base::BindOnce(&FilesystemVerifierAction::WriteVerityAndHashPartition,
                      base::Unretained(this),
-                     fd,
                      start_offset + bytes_read,
                      end_offset,
                      buffer,
                      buffer_size)));
 }
 
-void FilesystemVerifierAction::HashPartition(FileDescriptorPtr fd,
-                                             const off64_t start_offset,
+void FilesystemVerifierAction::HashPartition(const off64_t start_offset,
                                              const off64_t end_offset,
                                              void* buffer,
                                              const size_t buffer_size) {
+  auto fd = partition_fd_.get();
+  TEST_AND_RETURN(fd != nullptr);
   if (start_offset >= end_offset) {
     LOG_IF(WARNING, start_offset > end_offset)
         << "start_offset is greater than end_offset : " << start_offset << " > "
@@ -295,7 +296,6 @@
       FROM_HERE,
       base::BindOnce(&FilesystemVerifierAction::HashPartition,
                      base::Unretained(this),
-                     fd,
                      start_offset + bytes_read,
                      end_offset,
                      buffer,
@@ -361,6 +361,7 @@
     CHECK_LE(partition.hash_tree_offset, partition.fec_offset)
         << " Hash tree is expected to come before FEC data";
   }
+  CHECK_NE(partition_fd_, nullptr);
   if (partition.hash_tree_offset != 0) {
     filesystem_data_end_ = partition.hash_tree_offset;
   } else if (partition.fec_offset != 0) {
@@ -374,11 +375,10 @@
       return;
     }
     WriteVerityAndHashPartition(
-        partition_fd_, 0, filesystem_data_end_, buffer_.data(), buffer_.size());
+        0, filesystem_data_end_, buffer_.data(), buffer_.size());
   } else {
     LOG(INFO) << "Verity writes disabled on partition " << partition.name;
-    HashPartition(
-        partition_fd_, 0, partition_size_, buffer_.data(), buffer_.size());
+    HashPartition(0, partition_size_, buffer_.data(), buffer_.size());
   }
 }
 
@@ -430,7 +430,7 @@
     Cleanup(ErrorCode::kError);
     return;
   }
-  InstallPlan::Partition& partition =
+  const InstallPlan::Partition& partition =
       install_plan_.partitions[partition_index_];
   LOG(INFO) << "Hash of " << partition.name << ": "
             << HexEncode(hasher_->raw_hash());
@@ -492,7 +492,6 @@
       return;
   }
   // Start hashing the next partition, if any.
-  hasher_.reset();
   buffer_.clear();
   if (partition_fd_) {
     partition_fd_->Close();
diff --git a/payload_consumer/filesystem_verifier_action.h b/payload_consumer/filesystem_verifier_action.h
index 850abda..edc8e53 100644
--- a/payload_consumer/filesystem_verifier_action.h
+++ b/payload_consumer/filesystem_verifier_action.h
@@ -86,13 +86,11 @@
 
  private:
   friend class FilesystemVerifierActionTestDelegate;
-  void WriteVerityAndHashPartition(FileDescriptorPtr fd,
-                                   const off64_t start_offset,
+  void WriteVerityAndHashPartition(const off64_t start_offset,
                                    const off64_t end_offset,
                                    void* buffer,
                                    const size_t buffer_size);
-  void HashPartition(FileDescriptorPtr fd,
-                     const off64_t start_offset,
+  void HashPartition(const off64_t start_offset,
                      const off64_t end_offset,
                      void* buffer,
                      const size_t buffer_size);
@@ -138,7 +136,7 @@
 
   // If not null, the FileDescriptor used to read from the device.
   // verity writer might attempt to write to this fd, if verity is enabled.
-  FileDescriptorPtr partition_fd_;
+  std::unique_ptr<FileDescriptor> partition_fd_;
 
   // Buffer for storing data we read.
   brillo::Blob buffer_;
diff --git a/payload_consumer/filesystem_verifier_action_unittest.cc b/payload_consumer/filesystem_verifier_action_unittest.cc
index f2f2954..533292a 100644
--- a/payload_consumer/filesystem_verifier_action_unittest.cc
+++ b/payload_consumer/filesystem_verifier_action_unittest.cc
@@ -170,7 +170,7 @@
       bytes_to_read -= bytes_read;
       offset += bytes_read;
     }
-    ASSERT_TRUE(verity_writer.Finalize(fd, fd));
+    ASSERT_TRUE(verity_writer.Finalize(fd.get(), fd.get()));
     ASSERT_TRUE(fd->IsOpen());
     ASSERT_TRUE(HashCalculator::RawHashOfFile(target_part_.path(),
                                               &partition->target_hash));
@@ -565,7 +565,7 @@
 
   EnableVABC(&dynamic_control, part.name);
   auto open_cow = [part]() {
-    auto cow_fd = std::make_shared<EintrSafeFileDescriptor>();
+    auto cow_fd = std::make_unique<EintrSafeFileDescriptor>();
     EXPECT_TRUE(cow_fd->Open(part.readonly_target_path.c_str(), O_RDWR))
         << "Failed to open part " << part.readonly_target_path
         << strerror(errno);
@@ -618,14 +618,14 @@
   if (enable_verity) {
     std::vector<unsigned char> actual_fec(fec_size);
     ssize_t bytes_read = 0;
-    ASSERT_TRUE(utils::PReadAll(cow_fd,
+    ASSERT_TRUE(utils::PReadAll(cow_fd.get(),
                                 actual_fec.data(),
                                 actual_fec.size(),
                                 fec_start_offset,
                                 &bytes_read));
     ASSERT_EQ(actual_fec, fec_data_);
     std::vector<unsigned char> actual_hash_tree(hash_tree_size);
-    ASSERT_TRUE(utils::PReadAll(cow_fd,
+    ASSERT_TRUE(utils::PReadAll(cow_fd.get(),
                                 actual_hash_tree.data(),
                                 actual_hash_tree.size(),
                                 HASH_TREE_START_OFFSET,
diff --git a/payload_consumer/install_operation_executor_unittest.cc b/payload_consumer/install_operation_executor_unittest.cc
index 5b4bd6b..705d8f8 100644
--- a/payload_consumer/install_operation_executor_unittest.cc
+++ b/payload_consumer/install_operation_executor_unittest.cc
@@ -236,8 +236,9 @@
   PayloadGenerationConfig config{
       .version = PayloadVersion(kBrilloMajorPayloadVersion,
                                 kZucchiniMinorPayloadVersion)};
+  const FilesystemInterface::File empty;
   diff_utils::BestDiffGenerator best_diff_generator(
-      source_data_, target_data_, src_extents, dst_extents, {}, {}, config);
+      source_data_, target_data_, src_extents, dst_extents, empty, empty, config);
   std::vector<uint8_t> patch_data = target_data_;  // Fake the full operation
   AnnotatedOperation aop;
   // Zucchini is enabled only on files with certain extensions
diff --git a/payload_consumer/partition_writer.cc b/payload_consumer/partition_writer.cc
index 4625a7a..b94f8c7 100644
--- a/payload_consumer/partition_writer.cc
+++ b/payload_consumer/partition_writer.cc
@@ -86,8 +86,6 @@
 
 }  // namespace
 
-using google::protobuf::RepeatedPtrField;
-
 // Opens path for read/write. On success returns an open FileDescriptor
 // and sets *err to 0. On failure, sets *err to errno and returns nullptr.
 FileDescriptorPtr OpenFile(const char* path,
diff --git a/payload_consumer/postinstall_runner_action.cc b/payload_consumer/postinstall_runner_action.cc
index e489dfc..a72462a 100644
--- a/payload_consumer/postinstall_runner_action.cc
+++ b/payload_consumer/postinstall_runner_action.cc
@@ -91,9 +91,17 @@
   fs_mount_dir_ = temp_dir.value();
 #endif  // __ANDROID__
   CHECK(!fs_mount_dir_.empty());
+  EnsureUnmounted();
   LOG(INFO) << "postinstall mount point: " << fs_mount_dir_;
 }
 
+void PostinstallRunnerAction::EnsureUnmounted() {
+  if (utils::IsMountpoint(fs_mount_dir_)) {
+    LOG(INFO) << "Found previously mounted filesystem at " << fs_mount_dir_;
+    utils::UnmountFilesystem(fs_mount_dir_);
+  }
+}
+
 void PostinstallRunnerAction::PerformAction() {
   CHECK(HasInputObject());
   CHECK(boot_control_);
@@ -167,10 +175,7 @@
   }
   // Double check that the fs_mount_dir is not busy with a previous mounted
   // filesystem from a previous crashed postinstall step.
-  if (utils::IsMountpoint(fs_mount_dir_)) {
-    LOG(INFO) << "Found previously mounted filesystem at " << fs_mount_dir_;
-    utils::UnmountFilesystem(fs_mount_dir_);
-  }
+  EnsureUnmounted();
 
 #ifdef __ANDROID__
   // In Chromium OS, the postinstall step is allowed to write to the block
diff --git a/payload_consumer/postinstall_runner_action.h b/payload_consumer/postinstall_runner_action.h
index 41be201..66721af 100644
--- a/payload_consumer/postinstall_runner_action.h
+++ b/payload_consumer/postinstall_runner_action.h
@@ -70,6 +70,7 @@
 
   // exposed for testing purposes only
   void SetMountDir(std::string dir) { fs_mount_dir_ = std::move(dir); }
+  void EnsureUnmounted();
 
   void PerformPartitionPostinstall();
   [[nodiscard]] bool MountPartition(
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
index e2fab7d..b669b4d 100644
--- a/payload_consumer/verity_writer_android.cc
+++ b/payload_consumer/verity_writer_android.cc
@@ -104,8 +104,8 @@
   return true;
 }
 
-bool VerityWriterAndroid::Finalize(FileDescriptorPtr read_fd,
-                                   FileDescriptorPtr write_fd) {
+bool VerityWriterAndroid::Finalize(FileDescriptor* read_fd,
+                                   FileDescriptor* write_fd) {
   const auto hash_tree_data_end =
       partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
   if (total_offset_ < hash_tree_data_end) {
@@ -142,8 +142,8 @@
   return true;
 }
 
-bool VerityWriterAndroid::EncodeFEC(FileDescriptorPtr read_fd,
-                                    FileDescriptorPtr write_fd,
+bool VerityWriterAndroid::EncodeFEC(FileDescriptor* read_fd,
+                                    FileDescriptor* write_fd,
                                     uint64_t data_offset,
                                     uint64_t data_size,
                                     uint64_t fec_offset,
@@ -161,11 +161,12 @@
   std::unique_ptr<void, decltype(&free_rs_char)> rs_char(
       init_rs_char(FEC_PARAMS(fec_roots)), &free_rs_char);
   TEST_AND_RETURN_FALSE(rs_char != nullptr);
-
   // Cache at most 1MB of fec data, in VABC, we need to re-open fd if we
   // perform a read() operation after write(). So reduce the number of writes
   // can save unnecessary re-opens.
-  write_fd = std::make_shared<CachedFileDescriptor>(write_fd, 1 * (1 << 20));
+  UnownedCachedFileDescriptor cache_fd(write_fd, 1 * (1 << 20));
+  write_fd = &cache_fd;
+
   for (size_t i = 0; i < rounds; i++) {
     // Encodes |block_size| number of rs blocks each round so that we can read
     // one block each time instead of 1 byte to increase random read
@@ -229,11 +230,10 @@
                                     uint32_t fec_roots,
                                     uint32_t block_size,
                                     bool verify_mode) {
-  FileDescriptorPtr fd(new EintrSafeFileDescriptor());
-  TEST_AND_RETURN_FALSE(
-      fd->Open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR));
-  return EncodeFEC(fd,
-                   fd,
+  EintrSafeFileDescriptor fd;
+  TEST_AND_RETURN_FALSE(fd.Open(path.c_str(), verify_mode ? O_RDONLY : O_RDWR));
+  return EncodeFEC(&fd,
+                   &fd,
                    data_offset,
                    data_size,
                    fec_offset,
diff --git a/payload_consumer/verity_writer_android.h b/payload_consumer/verity_writer_android.h
index 8339528..a6a4920 100644
--- a/payload_consumer/verity_writer_android.h
+++ b/payload_consumer/verity_writer_android.h
@@ -34,7 +34,7 @@
 
   bool Init(const InstallPlan::Partition& partition);
   bool Update(uint64_t offset, const uint8_t* buffer, size_t size) override;
-  bool Finalize(FileDescriptorPtr read_fd, FileDescriptorPtr write_fd) override;
+  bool Finalize(FileDescriptor* read_fd, FileDescriptor* write_fd) override;
 
   // Read [data_offset : data_offset + data_size) from |path| and encode FEC
   // data, if |verify_mode|, then compare the encoded FEC with the one in
@@ -42,8 +42,8 @@
   // in each Update() like hash tree, because for every rs block, its data are
   // spreaded across entire |data_size|, unless we can cache all data in
   // memory, we have to re-read them from disk.
-  static bool EncodeFEC(FileDescriptorPtr read_fd,
-                        FileDescriptorPtr write_fd,
+  static bool EncodeFEC(FileDescriptor* read_fd,
+                        FileDescriptor* write_fd,
                         uint64_t data_offset,
                         uint64_t data_size,
                         uint64_t fec_offset,
diff --git a/payload_consumer/verity_writer_android_unittest.cc b/payload_consumer/verity_writer_android_unittest.cc
index 75da0ae..693bcda 100644
--- a/payload_consumer/verity_writer_android_unittest.cc
+++ b/payload_consumer/verity_writer_android_unittest.cc
@@ -54,7 +54,8 @@
   ASSERT_TRUE(verity_writer_.Init(partition_));
   ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
   ASSERT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
-  ASSERT_TRUE(verity_writer_.Finalize(partition_fd_, partition_fd_));
+  ASSERT_TRUE(
+      verity_writer_.Finalize(partition_fd_.get(), partition_fd_.get()));
   brillo::Blob actual_part;
   utils::ReadFile(partition_.target_path, &actual_part);
   // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha1sum | xxd -r -p |
@@ -102,7 +103,8 @@
   ASSERT_TRUE(verity_writer_.Init(partition_));
   ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), 4096));
   ASSERT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
-  ASSERT_TRUE(verity_writer_.Finalize(partition_fd_, partition_fd_));
+  ASSERT_TRUE(
+      verity_writer_.Finalize(partition_fd_.get(), partition_fd_.get()));
   brillo::Blob actual_part;
   utils::ReadFile(partition_.target_path, &actual_part);
   // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha256sum | xxd -r -p |
@@ -127,7 +129,8 @@
   ASSERT_TRUE(verity_writer_.Update(4096, part_data.data() + 4096, 4096));
   ASSERT_TRUE(verity_writer_.Update(
       8192, part_data.data() + 8192, partition_.hash_tree_data_offset));
-  ASSERT_TRUE(verity_writer_.Finalize(partition_fd_, partition_fd_));
+  ASSERT_TRUE(
+      verity_writer_.Finalize(partition_fd_.get(), partition_fd_.get()));
   brillo::Blob actual_part;
   utils::ReadFile(partition_.target_path, &actual_part);
   // dd if=/dev/zero bs=4096 count=1 2>/dev/null | sha256sum | xxd -r -p |
@@ -150,7 +153,8 @@
   test_utils::WriteFileVector(partition_.target_path, part_data);
   ASSERT_TRUE(verity_writer_.Init(partition_));
   ASSERT_TRUE(verity_writer_.Update(0, part_data.data(), part_data.size()));
-  ASSERT_TRUE(verity_writer_.Finalize(partition_fd_, partition_fd_));
+  ASSERT_TRUE(
+      verity_writer_.Finalize(partition_fd_.get(), partition_fd_.get()));
   brillo::Blob actual_part;
   utils::ReadFile(partition_.target_path, &actual_part);
   // Write FEC data.
diff --git a/payload_consumer/verity_writer_interface.h b/payload_consumer/verity_writer_interface.h
index 37ed605..432ede7 100644
--- a/payload_consumer/verity_writer_interface.h
+++ b/payload_consumer/verity_writer_interface.h
@@ -39,8 +39,7 @@
   virtual bool Update(uint64_t offset, const uint8_t* buffer, size_t size) = 0;
 
   // Write hash tree && FEC data to underlying fd, if they are present
-  virtual bool Finalize(FileDescriptorPtr read_fd,
-                        FileDescriptorPtr write_fd) = 0;
+  virtual bool Finalize(FileDescriptor* read_fd, FileDescriptor* write_fd) = 0;
 
  protected:
   VerityWriterInterface() = default;
diff --git a/payload_generator/delta_diff_utils.cc b/payload_generator/delta_diff_utils.cc
index 321795a..389cf97 100644
--- a/payload_generator/delta_diff_utils.cc
+++ b/payload_generator/delta_diff_utils.cc
@@ -552,20 +552,40 @@
     new_visited_blocks.AddExtent(new_part.verity.fec_extent);
   }
 
-  ExtentRanges old_zero_blocks;
-  TEST_AND_RETURN_FALSE(DeltaMovedAndZeroBlocks(aops,
-                                                old_part.path,
-                                                new_part.path,
-                                                old_part.size / kBlockSize,
-                                                new_part.size / kBlockSize,
-                                                soft_chunk_blocks,
-                                                config,
-                                                blob_file,
-                                                &old_visited_blocks,
-                                                &new_visited_blocks,
-                                                &old_zero_blocks));
+  const bool puffdiff_allowed =
+      config.OperationEnabled(InstallOperation::PUFFDIFF);
 
-  bool puffdiff_allowed = config.OperationEnabled(InstallOperation::PUFFDIFF);
+  TEST_AND_RETURN_FALSE(new_part.fs_interface);
+  vector<FilesystemInterface::File> new_files;
+  TEST_AND_RETURN_FALSE(deflate_utils::PreprocessPartitionFiles(
+      new_part, &new_files, puffdiff_allowed));
+
+  ExtentRanges old_zero_blocks;
+  // Prematurely removing moved blocks will render compression info useless.
+  // Even if a single block inside a 100MB file is filtered out, the entire
+  // 100MB file can't be decompressed. In this case we will fallback to BSDIFF,
+  // which performs much worse than LZ4diff. It's better to let LZ4DIFF perform
+  // decompression, and let underlying BSDIFF to take care of moved blocks.
+  // TODO(b/206729162) Implement block filtering with compression block info
+  const auto no_compressed_files =
+      std::all_of(new_files.begin(), new_files.end(), [](const File& a) {
+        return a.compressed_file_info.blocks.empty();
+      });
+  if (!config.OperationEnabled(InstallOperation::LZ4DIFF_BSDIFF) ||
+      no_compressed_files) {
+    TEST_AND_RETURN_FALSE(DeltaMovedAndZeroBlocks(aops,
+                                                  old_part.path,
+                                                  new_part.path,
+                                                  old_part.size / kBlockSize,
+                                                  new_part.size / kBlockSize,
+                                                  soft_chunk_blocks,
+                                                  config,
+                                                  blob_file,
+                                                  &old_visited_blocks,
+                                                  &new_visited_blocks,
+                                                  &old_zero_blocks));
+  }
+
   map<string, FilesystemInterface::File> old_files_map;
   if (old_part.fs_interface) {
     vector<FilesystemInterface::File> old_files;
@@ -575,11 +595,6 @@
       old_files_map[file.name] = file;
   }
 
-  TEST_AND_RETURN_FALSE(new_part.fs_interface);
-  vector<FilesystemInterface::File> new_files;
-  TEST_AND_RETURN_FALSE(deflate_utils::PreprocessPartitionFiles(
-      new_part, &new_files, puffdiff_allowed));
-
   list<FileDeltaProcessor> file_delta_processors;
 
   // The processing is very straightforward here, we generate operations for
diff --git a/payload_generator/delta_diff_utils.h b/payload_generator/delta_diff_utils.h
index 1fd1f46..dcb6867 100644
--- a/payload_generator/delta_diff_utils.h
+++ b/payload_generator/delta_diff_utils.h
@@ -227,8 +227,8 @@
   bool TryZucchiniAndUpdateOperation(AnnotatedOperation* aop,
                                      brillo::Blob* data_blob);
 
-  brillo::Blob old_data_;
-  brillo::Blob new_data_;
+  const brillo::Blob& old_data_;
+  const brillo::Blob& new_data_;
   const std::vector<Extent>& src_extents_;
   const std::vector<Extent>& dst_extents_;
   std::vector<puffin::BitExtent> old_deflates_;
diff --git a/payload_generator/erofs_filesystem.cc b/payload_generator/erofs_filesystem.cc
index 9ab37fd..677b473 100644
--- a/payload_generator/erofs_filesystem.cc
+++ b/payload_generator/erofs_filesystem.cc
@@ -27,11 +27,13 @@
 
 #include "erofs_iterate.h"
 #include "lz4diff/lz4diff.pb.h"
-#include "payload_generator/filesystem_interface.h"
+#include "lz4diff/lz4patch.h"
+#include "lz4diff/lz4diff.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/filesystem_interface.h"
 
 namespace chromeos_update_engine {
 
@@ -81,10 +83,6 @@
   if (!file.is_compressed) {
     return;
   }
-  // TODO(b/206729162) Fill in compression algorithm info from input target
-  // files
-  file.compressed_file_info.algo.set_type(CompressionAlgorithm::LZ4HC);
-  file.compressed_file_info.algo.set_level(9);
 
   struct erofs_map_blocks block {};
   block.m_la = 0;
@@ -142,7 +140,7 @@
 static_assert(kBlockSize == EROFS_BLKSIZ);
 
 std::unique_ptr<ErofsFilesystem> ErofsFilesystem::CreateFromFile(
-    const std::string& filename) {
+    const std::string& filename, const CompressionAlgorithm& algo) {
   // erofs-utils makes heavy use of global variables. Hence its functions aren't
   // thread safe. For example, it stores a global int holding file descriptors
   // to the opened EROFS image. It doesn't even support opening more than 1
@@ -171,9 +169,10 @@
   LOG(INFO) << "Parsed EROFS image of size " << st.st_size << " built in "
             << ctime(&time) << " " << filename;
   std::vector<File> files;
-  if (!ErofsFilesystem::GetFiles(filename, &files)) {
+  if (!ErofsFilesystem::GetFiles(filename, &files, algo)) {
     return nullptr;
   }
+  LOG(INFO) << "Using compression algo " << algo << " for " << filename;
   // private ctor doesn't work with make_unique
   return std::unique_ptr<ErofsFilesystem>(
       new ErofsFilesystem(filename, st.st_size, std::move(files)));
@@ -185,7 +184,8 @@
 }
 
 bool ErofsFilesystem::GetFiles(const std::string& filename,
-                               std::vector<File>* files) {
+                               std::vector<File>* files,
+                               const CompressionAlgorithm& algo) {
   erofs_iterate_root_dir(&sbi, [&](struct erofs_iterate_dir_context* p_info) {
     const auto& info = *p_info;
     if (info.ctx.de_ftype != EROFS_FT_REG_FILE) {
@@ -226,6 +226,7 @@
     file.file_stat.st_size = uncompressed_size;
     file.file_stat.st_ino = inode.nid;
     FillCompressedBlockInfo(&file, filename, &inode);
+    file.compressed_file_info.algo = algo;
 
     files->emplace_back(std::move(file));
     return 0;
diff --git a/payload_generator/erofs_filesystem.h b/payload_generator/erofs_filesystem.h
index 473c609..0863b50 100644
--- a/payload_generator/erofs_filesystem.h
+++ b/payload_generator/erofs_filesystem.h
@@ -24,10 +24,15 @@
 
 class ErofsFilesystem final : public FilesystemInterface {
  public:
-  // Creates an Ext2Filesystem from a ext2 formatted filesystem stored in a
-  // file. The file doesn't need to be loop-back mounted.
+  // Creates an ErofsFilesystem from a erofs formatted filesystem stored in a
+  // file. The file doesn't need to be loop-back mounted. Since erofs-utils
+  // library functions are not concurrency safe(can't be used in multi-threaded
+  // context, can't even work with multiple EROFS images concurrently on 1
+  // thread), this function takes a global mutex.
   static std::unique_ptr<ErofsFilesystem> CreateFromFile(
-      const std::string& filename);
+      const std::string& filename,
+      const CompressionAlgorithm& algo =
+          PartitionConfig::GetDefaultCompressionParam());
   virtual ~ErofsFilesystem() = default;
 
   // FilesystemInterface overrides.
@@ -45,7 +50,9 @@
   //    space.
   //  <metadata>: With the rest of ext2 metadata blocks, such as superblocks
   //    and bitmap tables.
-  static bool GetFiles(const std::string& filename, std::vector<File>* files);
+  static bool GetFiles(const std::string& filename,
+                       std::vector<File>* files,
+                       const CompressionAlgorithm& algo);
 
   bool GetFiles(std::vector<File>* files) const override;
 
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 09fb837..ef36a6d 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -444,6 +444,11 @@
       true,
       "Whether to enable zucchini feature when processing executable files.");
 
+  DEFINE_string(erofs_compression_param,
+                "",
+                "Compression parameter passed to mkfs.erofs's -z option. "
+                "Example: lz4 lz4hc,9");
+
   brillo::FlagHelper::Init(
       argc,
       argv,
@@ -594,6 +599,10 @@
     payload_config.target.partitions.back().path = new_partitions[i];
     payload_config.target.partitions.back().disable_fec_computation =
         FLAGS_disable_fec_computation;
+    if (!FLAGS_erofs_compression_param.empty()) {
+      payload_config.target.partitions.back().erofs_compression_param =
+          PartitionConfig::ParseCompressionParam(FLAGS_erofs_compression_param);
+    }
     if (i < new_mapfiles.size())
       payload_config.target.partitions.back().mapfile_path = new_mapfiles[i];
   }
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 8ff4999..d520123 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -17,6 +17,7 @@
 #include "update_engine/payload_generator/payload_generation_config.h"
 
 #include <algorithm>
+#include <charconv>
 #include <map>
 #include <utility>
 
@@ -76,7 +77,7 @@
       return true;
     }
   }
-  fs_interface = ErofsFilesystem::CreateFromFile(path);
+  fs_interface = ErofsFilesystem::CreateFromFile(path, erofs_compression_param);
   if (fs_interface) {
     TEST_AND_RETURN_FALSE(fs_interface->GetBlockSize() == kBlockSize);
     return true;
@@ -193,7 +194,15 @@
   }
   // We use "gz" compression by default for VABC.
   if (metadata->vabc_enabled()) {
-    metadata->set_vabc_compression_param("gz");
+    std::string compression_method;
+    if (store.GetString("virtual_ab_compression_method", &compression_method)) {
+      LOG(INFO) << "Using VABC compression method '" << compression_method
+                << "'";
+    } else {
+      LOG(INFO) << "No VABC compression method specified. Defaulting to 'gz'";
+      compression_method = "gz";
+    }
+    metadata->set_vabc_compression_param(compression_method);
     metadata->set_cow_version(android::snapshot::kCowVersionManifest);
   }
   dynamic_partition_metadata = std::move(metadata);
@@ -373,4 +382,35 @@
   }
 }
 
+CompressionAlgorithm PartitionConfig::ParseCompressionParam(
+    std::string_view param) {
+  CompressionAlgorithm algo;
+  auto algo_name = param;
+  const auto pos = param.find_first_of(',');
+  if (pos != std::string::npos) {
+    algo_name = param.substr(0, pos);
+  }
+  if (algo_name == "lz4") {
+    algo.set_type(CompressionAlgorithm::LZ4);
+    CHECK_EQ(pos, std::string::npos)
+        << "Invalid compression param " << param
+        << ", compression level not supported for lz4";
+  } else if (algo_name == "lz4hc") {
+    algo.set_type(CompressionAlgorithm::LZ4HC);
+    if (pos != std::string::npos) {
+      const auto level = param.substr(pos + 1);
+      int level_num = 0;
+      const auto [ptr, ec] =
+          std::from_chars(level.data(), level.data() + level.size(), level_num);
+      CHECK_EQ(ec, std::errc()) << "Failed to parse compression level " << level
+                                << ", compression param: " << param;
+      algo.set_level(level_num);
+    } else {
+      LOG(FATAL) << "Unrecognized compression type: " << algo_name
+                 << ", param: " << param;
+    }
+  }
+  return algo;
+}
+
 }  // namespace chromeos_update_engine
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index d71649b..a7ddee4 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -25,6 +25,7 @@
 
 #include <brillo/key_value_store.h>
 #include <brillo/secure_blob.h>
+#include <lz4diff/lz4diff.pb.h>
 
 #include "bsdiff/constants.h"
 #include "update_engine/payload_consumer/payload_constants.h"
@@ -83,6 +84,13 @@
 
 struct PartitionConfig {
   explicit PartitionConfig(std::string name) : name(name) {}
+  static CompressionAlgorithm ParseCompressionParam(std::string_view param);
+  static CompressionAlgorithm GetDefaultCompressionParam() {
+    CompressionAlgorithm algo;
+    algo.set_type(CompressionAlgorithm::LZ4HC);
+    algo.set_level(9);
+    return algo;
+  }
 
   // Returns whether the PartitionConfig is not an empty image and all the
   // fields are set correctly to a valid image file.
@@ -123,6 +131,12 @@
 
   // Per-partition version, usually a number representing timestamp.
   std::string version;
+
+  // parameter passed to mkfs.erofs's -z option.
+  // In the format of "compressor,compression_level"
+  // Examples: lz4    lz4hc,9
+  // The default is usually lz4hc,9 for mkfs.erofs
+  CompressionAlgorithm erofs_compression_param = GetDefaultCompressionParam();
 };
 
 // The ImageConfig struct describes a pair of binaries kernel and rootfs and the
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index f9c70f3..b2d6080 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -218,6 +218,8 @@
     "Optional: Whether to enable lz4 diffing for EROFS"
   DEFINE_string liblz4_path "" \
     "Required if --enabled_lz4diff true is passed. Path to liblz4.so. delta_generator will use this copy of liblz4.so for compression. It is important that this copy of liblz4.so is the same as the one on source build."
+  DEFINE_string erofs_compression_param "" \
+    "Compression parameter passed to mkfs.erofs's -z option."
 fi
 if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
   DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
@@ -741,6 +743,10 @@
       GENERATOR_ARGS+=(
         --enable_lz4diff="${FLAGS_enable_lz4diff}" )
     fi
+    if [[ -n "${FLAGS_erofs_compression_param}" ]]; then
+      GENERATOR_ARGS+=(
+        --erofs_compression_param="${FLAGS_erofs_compression_param}" )
+    fi
   fi
 
   if [[ -n "${FLAGS_enable_vabc_xor}" ]]; then