update_engine: Resolve read-after-write dependencies in the kernel.

BUG=chromium:510909
TEST=Ran delta_generator on rambi from 6415.1.0 to 7272.0.0 dev-channel.
Resulting kernel operations are in order.

Change-Id: I8843c1497b095af7520404d9845db9037bb5e262
Reviewed-on: https://chromium-review.googlesource.com/286212
Tested-by: Alex Deymo <deymo@chromium.org>
Reviewed-by: Gilad Arnold <garnold@chromium.org>
Commit-Queue: Alex Deymo <deymo@chromium.org>
diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc
index 2e0dbaf..6d7df3c 100644
--- a/payload_generator/inplace_generator.cc
+++ b/payload_generator/inplace_generator.cc
@@ -705,6 +705,61 @@
   }
 }
 
+bool InplaceGenerator::ResolveReadAfterWriteDependencies(
+    const PartitionConfig& new_part,
+    uint64_t partition_size,
+    size_t block_size,
+    int data_file_fd,
+    off_t* data_file_size,
+    vector<AnnotatedOperation>* aops) {
+  // Convert the operations to the graph.
+  Graph graph;
+  CheckGraph(graph);
+  vector<Block> blocks(new_part.size / block_size);
+  for (const auto& aop : *aops) {
+    AddInstallOpToGraph(
+        &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
+  }
+  CheckGraph(graph);
+
+  // Final scratch block (if there's space)
+  Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
+  if (blocks.size() < (partition_size / block_size)) {
+    scratch_vertex = graph.size();
+    graph.emplace_back();
+    size_t scratch_blocks = (partition_size / block_size) - blocks.size();
+    LOG(INFO) << "Added " << scratch_blocks << " scratch space blocks.";
+    CreateScratchNode(blocks.size(), scratch_blocks, &graph.back());
+  }
+  CheckGraph(graph);
+
+  LOG(INFO) << "Creating edges...";
+  CreateEdges(&graph, blocks);
+  LOG(INFO) << "Done creating edges";
+  CheckGraph(graph);
+
+  vector<Vertex::Index> final_order;
+  TEST_AND_RETURN_FALSE(ConvertGraphToDag(
+      &graph,
+      new_part.path,
+      data_file_fd,
+      data_file_size,
+      &final_order,
+      scratch_vertex));
+
+  // Copy operations over to the |aops| vector in the final_order generated by
+  // the topological sort.
+  aops->clear();
+  aops->reserve(final_order.size());
+  for (const Vertex::Index vertex_index : final_order) {
+    const Vertex& vertex = graph[vertex_index];
+    aops->emplace_back();
+    aops->back().op = vertex.op;
+    aops->back().name = vertex.file_name;
+  }
+  return true;
+}
+
 bool InplaceGenerator::GenerateOperations(
     const PayloadGenerationConfig& config,
     int data_file_fd,
@@ -715,9 +770,9 @@
                         config.chunk_size / config.block_size);
 
   // Temporary list of operations used to construct the dependency graph.
-  vector<AnnotatedOperation> aops;
+  LOG(INFO) << "Delta compressing rootfs partition...";
   TEST_AND_RETURN_FALSE(
-      diff_utils::DeltaReadPartition(&aops,
+      diff_utils::DeltaReadPartition(rootfs_ops,
                                      config.source.rootfs,
                                      config.target.rootfs,
                                      chunk_blocks,
@@ -725,27 +780,7 @@
                                      data_file_size,
                                      true,  // skip_block_0
                                      false));  // src_ops_allowed
-  // Convert the rootfs operations to the graph.
-  Graph graph;
-  CheckGraph(graph);
-  vector<Block> blocks(config.target.rootfs.size / config.block_size);
-  for (const auto& aop : aops) {
-    AddInstallOpToGraph(
-        &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
-  }
-  LOG(INFO) << "done reading normal files";
-  CheckGraph(graph);
-
-  // Final scratch block (if there's space)
-  Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
-  if (blocks.size() < (config.rootfs_partition_size / kBlockSize)) {
-    scratch_vertex = graph.size();
-    graph.emplace_back();
-    CreateScratchNode(
-        blocks.size(),
-        (config.rootfs_partition_size / kBlockSize) - blocks.size(),
-        &graph.back());
-  }
+  LOG(INFO) << "Done reading rootfs";
 
   // Read kernel partition
   LOG(INFO) << "Delta compressing kernel partition...";
@@ -760,32 +795,26 @@
                                      data_file_size,
                                      false,  // skip_block_0
                                      false));  // src_ops_allowed
-  LOG(INFO) << "done reading kernel";
-  CheckGraph(graph);
+  LOG(INFO) << "Done reading kernel";
 
-  LOG(INFO) << "Creating edges...";
-  CreateEdges(&graph, blocks);
-  LOG(INFO) << "Done creating edges";
-  CheckGraph(graph);
+  TEST_AND_RETURN_FALSE(
+      ResolveReadAfterWriteDependencies(config.target.rootfs,
+                                        config.rootfs_partition_size,
+                                        config.block_size,
+                                        data_file_fd,
+                                        data_file_size,
+                                        rootfs_ops));
+  LOG(INFO) << "Done reordering rootfs";
 
-  vector<Vertex::Index> final_order;
-  TEST_AND_RETURN_FALSE(ConvertGraphToDag(
-      &graph,
-      config.target.rootfs.path,
-      data_file_fd,
-      data_file_size,
-      &final_order,
-      scratch_vertex));
-
-  // Copy operations over to the rootfs_ops in the final_order generated by the
-  // topological sort.
-  rootfs_ops->clear();
-  for (const Vertex::Index vertex_index : final_order) {
-    const Vertex& vertex = graph[vertex_index];
-    rootfs_ops->emplace_back();
-    rootfs_ops->back().op = vertex.op;
-    rootfs_ops->back().name = vertex.file_name;
-  }
+  // The kernel partition uses the whole partition as the "filesystem_size".
+  TEST_AND_RETURN_FALSE(
+      ResolveReadAfterWriteDependencies(config.target.kernel,
+                                        config.target.kernel.size,
+                                        config.block_size,
+                                        data_file_fd,
+                                        data_file_size,
+                                        kernel_ops));
+  LOG(INFO) << "Done reordering kernel";
 
   // Re-add the operation for the block 0.
   TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
diff --git a/payload_generator/inplace_generator.h b/payload_generator/inplace_generator.h
index 158e364..5432238 100644
--- a/payload_generator/inplace_generator.h
+++ b/payload_generator/inplace_generator.h
@@ -194,6 +194,23 @@
   static void ApplyMap(std::vector<uint64_t>* collection,
                        const std::map<uint64_t, uint64_t>& the_map);
 
+  // Resolve all read-after-write dependencies in the operation list |aops|. The
+  // operations in |aops| are such that they generate the desired |new_part| if
+  // applied reading always from the original image. This function reorders the
+  // operations and generates new operations when needed to make these
+  // operations produce the same |new_part| result when applied in-place.
+  // The new operations will create blobs in |data_file_fd| and update
+  // the file size pointed by |data_file_size| if needed.
+  // On success, stores the new operations in |aops| in the right order and
+  // returns true.
+  static bool ResolveReadAfterWriteDependencies(
+      const PartitionConfig& new_part,
+      uint64_t partition_size,
+      size_t block_size,
+      int data_file_fd,
+      off_t* data_file_size,
+      std::vector<AnnotatedOperation>* aops);
+
   // Generate the update payload operations for the kernel and rootfs using
   // only operations that read from the target and/or write to the target,
   // hence, applying the payload "in-place" in the target partition. This method