Changed EncodeFEC to compute in incremental steps allowing OTA to be
paused during verity writes.

Test: tested incremental OTA on cuttelfish devices.

Bug: 243594791
Change-Id: I55179ab06fd22f10b246449e841d9b40204c6aaf
diff --git a/payload_consumer/verity_writer_android.cc b/payload_consumer/verity_writer_android.cc
index 91efa3e..479231d 100644
--- a/payload_consumer/verity_writer_android.cc
+++ b/payload_consumer/verity_writer_android.cc
@@ -20,6 +20,7 @@
 
 #include <algorithm>
 #include <memory>
+#include <utility>
 
 #include <base/logging.h>
 #include <base/posix/eintr_wrapper.h>
@@ -34,6 +35,121 @@
 
 namespace chromeos_update_engine {
 
+bool IncrementalEncodeFEC::Init(const uint64_t _data_offset,
+                                const uint64_t _data_size,
+                                const uint64_t _fec_offset,
+                                const uint64_t _fec_size,
+                                const uint64_t _fec_roots,
+                                const uint64_t _block_size,
+                                const bool _verify_mode) {
+  current_step_ = EncodeFECStep::kInitFDStep;
+  data_offset_ = _data_offset;
+  data_size_ = _data_size;
+  fec_offset_ = _fec_offset;
+  fec_size_ = _fec_size;
+  fec_roots_ = _fec_roots;
+  block_size_ = _block_size;
+  verify_mode_ = _verify_mode;
+  current_round_ = 0;
+  // This is the N in RS(M, N), which is the number of bytes for each rs block.
+  rs_n_ = FEC_RSM - fec_roots_;
+  rs_char_.reset(init_rs_char(FEC_PARAMS(fec_roots_)));
+  rs_blocks_.resize(block_size_ * rs_n_);
+  buffer_.resize(block_size_, 0);
+  fec_.resize(block_size_ * fec_roots_);
+  fec_read_.resize(fec_.size());
+  TEST_AND_RETURN_FALSE(data_size_ % block_size_ == 0);
+  TEST_AND_RETURN_FALSE(fec_roots_ >= 0 && fec_roots_ < FEC_RSM);
+
+  num_rounds_ = utils::DivRoundUp(data_size_ / block_size_, rs_n_);
+  TEST_AND_RETURN_FALSE(num_rounds_ * fec_roots_ * block_size_ == fec_size_);
+  TEST_AND_RETURN_FALSE(rs_char_ != nullptr);
+  return true;
+}
+
+bool IncrementalEncodeFEC::Compute(FileDescriptor* _read_fd,
+                                   FileDescriptor* _write_fd) {
+  if (current_step_ == EncodeFECStep::kInitFDStep) {
+    read_fd_ = _read_fd;
+    write_fd_ = _write_fd;
+    cache_fd_.SetFD(write_fd_);
+    write_fd_ = &cache_fd_;
+  } else if (current_step_ == EncodeFECStep::kEncodeRoundStep) {
+    // Encodes |block_size| number of rs blocks each round so that we can read
+    // one block each time instead of 1 byte to increase random read
+    // performance. This uses about 1 MiB memory for 4K block size.
+    for (size_t j = 0; j < rs_n_; j++) {
+      uint64_t offset = fec_ecc_interleave(
+          current_round_ * rs_n_ * block_size_ + j, rs_n_, num_rounds_);
+      // Don't read past |data_size|, treat them as 0.
+      if (offset >= data_size_) {
+        std::fill(buffer_.begin(), buffer_.end(), 0);
+      } else {
+        ssize_t bytes_read = 0;
+        TEST_AND_RETURN_FALSE(utils::PReadAll(read_fd_,
+                                              buffer_.data(),
+                                              buffer_.size(),
+                                              data_offset_ + offset,
+                                              &bytes_read));
+        TEST_AND_RETURN_FALSE(bytes_read >= 0);
+        TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) ==
+                              buffer_.size());
+      }
+      for (size_t k = 0; k < buffer_.size(); k++) {
+        rs_blocks_[k * rs_n_ + j] = buffer_[k];
+      }
+    }
+    for (size_t j = 0; j < block_size_; j++) {
+      // Encode [j * rs_n_ : (j + 1) * rs_n_) in |rs_blocks| and write
+      // |fec_roots| number of parity bytes to |j * fec_roots| in |fec|.
+      encode_rs_char(rs_char_.get(),
+                     rs_blocks_.data() + j * rs_n_,
+                     fec_.data() + j * fec_roots_);
+    }
+
+    if (verify_mode_) {
+      ssize_t bytes_read = 0;
+      TEST_AND_RETURN_FALSE(utils::PReadAll(read_fd_,
+                                            fec_read_.data(),
+                                            fec_read_.size(),
+                                            fec_offset_,
+                                            &bytes_read));
+      TEST_AND_RETURN_FALSE(bytes_read >= 0);
+      TEST_AND_RETURN_FALSE(static_cast<size_t>(bytes_read) ==
+                            fec_read_.size());
+      TEST_AND_RETURN_FALSE(fec_ == fec_read_);
+    } else {
+      CHECK(write_fd_);
+      write_fd_->Seek(fec_offset_, SEEK_SET);
+      if (!utils::WriteAll(write_fd_, fec_.data(), fec_.size())) {
+        PLOG(ERROR) << "EncodeFEC write() failed";
+        return false;
+      }
+    }
+    fec_offset_ += fec_.size();
+    current_round_++;
+  } else if (current_step_ == EncodeFECStep::kWriteStep) {
+    write_fd_->Flush();
+  }
+  UpdateState();
+  return true;
+}
+// update the current state of EncodeFEC. Can be changed to have smaller steps
+void IncrementalEncodeFEC::UpdateState() {
+  if (current_step_ == EncodeFECStep::kInitFDStep) {
+    current_step_ = EncodeFECStep::kEncodeRoundStep;
+  } else if (current_step_ == EncodeFECStep::kEncodeRoundStep &&
+             current_round_ == num_rounds_) {
+    current_step_ = EncodeFECStep::kWriteStep;
+  } else if (current_step_ == EncodeFECStep::kWriteStep) {
+    current_step_ = EncodeFECStep::kComplete;
+  }
+}
+
+bool IncrementalEncodeFEC::Finished() const {
+  return current_step_ == EncodeFECStep::kComplete;
+}
+
 namespace verity_writer {
 std::unique_ptr<VerityWriterInterface> CreateVerityWriter() {
   return std::make_unique<VerityWriterAndroid>();
@@ -42,7 +158,15 @@
 
 bool VerityWriterAndroid::Init(const InstallPlan::Partition& partition) {
   partition_ = &partition;
-
+  LOG(INFO) << "Initializing Incremental EncodeFEC";
+  TEST_AND_RETURN_FALSE(encodeFEC_.Init(partition_->fec_data_offset,
+                                        partition_->fec_data_size,
+                                        partition_->fec_offset,
+                                        partition_->fec_size,
+                                        partition_->fec_roots,
+                                        partition_->block_size,
+                                        false /* verify_mode */));
+  hash_tree_written_ = false;
   if (partition_->hash_tree_size != 0) {
     auto hash_function =
         HashTreeBuilder::HashFunction(partition_->hash_tree_algorithm);
@@ -103,7 +227,6 @@
 
   return true;
 }
-
 bool VerityWriterAndroid::Finalize(FileDescriptor* read_fd,
                                    FileDescriptor* write_fd) {
   const auto hash_tree_data_end =
@@ -145,6 +268,50 @@
   return true;
 }
 
+bool VerityWriterAndroid::IncrementalFinalize(FileDescriptor* read_fd,
+                                              FileDescriptor* write_fd) {
+  if (!hash_tree_written_) {
+    LOG(INFO) << "Completing prework in Finalize";
+    const auto hash_tree_data_end =
+        partition_->hash_tree_data_offset + partition_->hash_tree_data_size;
+    if (total_offset_ < hash_tree_data_end) {
+      LOG(ERROR) << "Read up to " << total_offset_
+                 << " when we are expecting to read everything "
+                    "before "
+                 << hash_tree_data_end;
+      return false;
+    }
+    // All hash tree data blocks has been hashed, write hash tree to disk.
+    LOG(INFO) << "Writing verity hash tree to "
+              << partition_->readonly_target_path;
+    if (hash_tree_builder_) {
+      TEST_AND_RETURN_FALSE(hash_tree_builder_->BuildHashTree());
+      TEST_AND_RETURN_FALSE_ERRNO(
+          write_fd->Seek(partition_->hash_tree_offset, SEEK_SET));
+      auto success =
+          hash_tree_builder_->WriteHashTree([write_fd](auto data, auto size) {
+            return utils::WriteAll(write_fd, data, size);
+          });
+      // hashtree builder already prints error messages.
+      TEST_AND_RETURN_FALSE(success);
+      hash_tree_builder_.reset();
+    }
+    hash_tree_written_ = true;
+    if (partition_->fec_size != 0) {
+      LOG(INFO) << "Writing verity FEC to " << partition_->readonly_target_path;
+    }
+  }
+  if (partition_->fec_size != 0) {
+    TEST_AND_RETURN_FALSE(encodeFEC_.Compute(read_fd, write_fd));
+  }
+  return true;
+}
+bool VerityWriterAndroid::FECFinished() const {
+  if (encodeFEC_.Finished()) {
+    return true;
+  }
+  return false;
+}
 bool VerityWriterAndroid::EncodeFEC(FileDescriptor* read_fd,
                                     FileDescriptor* write_fd,
                                     uint64_t data_offset,
@@ -156,7 +323,8 @@
                                     bool verify_mode) {
   TEST_AND_RETURN_FALSE(data_size % block_size == 0);
   TEST_AND_RETURN_FALSE(fec_roots >= 0 && fec_roots < FEC_RSM);
-  // This is the N in RS(M, N), which is the number of bytes for each rs block.
+  // This is the N in RS(M, N), which is the number of bytes for each rs
+  // block.
   size_t rs_n = FEC_RSM - fec_roots;
   uint64_t rounds = utils::DivRoundUp(data_size / block_size, rs_n);
   TEST_AND_RETURN_FALSE(rounds * fec_roots * block_size == fec_size);
@@ -196,8 +364,8 @@
     }
     brillo::Blob fec(block_size * fec_roots);
     for (size_t j = 0; j < block_size; j++) {
-      // Encode [j * rs_n : (j + 1) * rs_n) in |rs_blocks| and write |fec_roots|
-      // number of parity bytes to |j * fec_roots| in |fec|.
+      // Encode [j * rs_n : (j + 1) * rs_n) in |rs_blocks| and write
+      // |fec_roots| number of parity bytes to |j * fec_roots| in |fec|.
       encode_rs_char(rs_char.get(),
                      rs_blocks.data() + j * rs_n,
                      fec.data() + j * fec_roots);