AU: Don't send rootfs hashes along with the delta payload.

We don't need the send these hashes because they get regenerated
on the client after installing the image.

BUG=7678
TEST=unit tests, sent a new style full update and delta update

Review URL: http://codereview.chromium.org/3748001

Change-Id: I5ecf0f67da1f64a84f87d11d73dba8e3990eb749
diff --git a/delta_diff_generator.cc b/delta_diff_generator.cc
index c330f6c..9659ad2 100644
--- a/delta_diff_generator.cc
+++ b/delta_diff_generator.cc
@@ -591,12 +591,23 @@
   return true;
 }
 
-bool InitializePartitionInfo(const string& partition, PartitionInfo* info) {
-  const off_t size = utils::FileSize(partition);
-  TEST_AND_RETURN_FALSE(size >= 0);
+bool InitializePartitionInfo(bool is_kernel,
+                             const string& partition,
+                             PartitionInfo* info) {
+  int64_t size = 0;
+  if (is_kernel) {
+    size = utils::FileSize(partition);
+  } else {
+    int block_count = 0, block_size = 0;
+    TEST_AND_RETURN_FALSE(utils::GetFilesystemSize(partition,
+                                                   &block_count,
+                                                   &block_size));
+    size = static_cast<int64_t>(block_count) * block_size;
+  }
+  TEST_AND_RETURN_FALSE(size > 0);
   info->set_size(size);
   OmahaHashCalculator hasher;
-  TEST_AND_RETURN_FALSE(hasher.UpdateFile(partition, -1) == size);
+  TEST_AND_RETURN_FALSE(hasher.UpdateFile(partition, size) == size);
   TEST_AND_RETURN_FALSE(hasher.Finalize());
   const vector<char>& hash = hasher.raw_hash();
   info->set_hash(hash.data(), hash.size());
@@ -610,18 +621,24 @@
                               DeltaArchiveManifest* manifest) {
   if (!old_kernel.empty()) {
     TEST_AND_RETURN_FALSE(
-        InitializePartitionInfo(old_kernel,
+        InitializePartitionInfo(true,
+                                old_kernel,
                                 manifest->mutable_old_kernel_info()));
   }
   TEST_AND_RETURN_FALSE(
-      InitializePartitionInfo(new_kernel, manifest->mutable_new_kernel_info()));
+      InitializePartitionInfo(true,
+                              new_kernel,
+                              manifest->mutable_new_kernel_info()));
   if (!old_rootfs.empty()) {
     TEST_AND_RETURN_FALSE(
-        InitializePartitionInfo(old_rootfs,
+        InitializePartitionInfo(false,
+                                old_rootfs,
                                 manifest->mutable_old_rootfs_info()));
   }
   TEST_AND_RETURN_FALSE(
-      InitializePartitionInfo(new_rootfs, manifest->mutable_new_rootfs_info()));
+      InitializePartitionInfo(false,
+                              new_rootfs,
+                              manifest->mutable_new_rootfs_info()));
   return true;
 }
 
@@ -1164,6 +1181,7 @@
     Graph* graph,
     const std::string& new_kernel_part,
     const std::string& new_image,
+    off_t image_size,
     int fd,
     off_t* data_file_size,
     off_t chunk_size,
@@ -1174,8 +1192,8 @@
 
   // Get the sizes early in the function, so we can fail fast if the user
   // passed us bad paths.
-  const off_t image_size = utils::FileSize(new_image);
-  TEST_AND_RETURN_FALSE(image_size >= 0);
+  TEST_AND_RETURN_FALSE(image_size >= 0 &&
+                        image_size <= utils::FileSize(new_image));
   const off_t kernel_size = utils::FileSize(new_kernel_part);
   TEST_AND_RETURN_FALSE(kernel_size >= 0);
 
@@ -1246,30 +1264,27 @@
     const string& new_kernel_part,
     const string& output_path,
     const string& private_key_path) {
-  struct stat old_image_stbuf;
-  struct stat new_image_stbuf;
-  TEST_AND_RETURN_FALSE_ERRNO(stat(new_image.c_str(), &new_image_stbuf) == 0);
+  int old_image_block_count = 0, old_image_block_size = 0;
+  int new_image_block_count = 0, new_image_block_size = 0;
+  TEST_AND_RETURN_FALSE(utils::GetFilesystemSize(new_image,
+                                                 &new_image_block_count,
+                                                 &new_image_block_size));
   if (!old_image.empty()) {
-    TEST_AND_RETURN_FALSE_ERRNO(stat(old_image.c_str(), &old_image_stbuf) == 0);
-    LOG_IF(WARNING, new_image_stbuf.st_size != old_image_stbuf.st_size)
-        << "Old and new images are different sizes.";
-    LOG_IF(FATAL, old_image_stbuf.st_size % kBlockSize)
-        << "Old image not a multiple of block size " << kBlockSize;
+    TEST_AND_RETURN_FALSE(utils::GetFilesystemSize(old_image,
+                                                   &old_image_block_count,
+                                                   &old_image_block_size));
+    TEST_AND_RETURN_FALSE(old_image_block_size == new_image_block_size);
+    LOG_IF(WARNING, old_image_block_count != new_image_block_count)
+        << "Old and new images have different block counts.";
     // Sanity check kernel partition arg
     TEST_AND_RETURN_FALSE(utils::FileSize(old_kernel_part) >= 0);
-  } else {
-    old_image_stbuf.st_size = 0;
   }
-  LOG_IF(FATAL, new_image_stbuf.st_size % kBlockSize)
-      << "New image not a multiple of block size " << kBlockSize;
-
   // Sanity check kernel partition arg
   TEST_AND_RETURN_FALSE(utils::FileSize(new_kernel_part) >= 0);
 
-  vector<Block> blocks(max(old_image_stbuf.st_size / kBlockSize,
-                           new_image_stbuf.st_size / kBlockSize));
-  LOG(INFO) << "invalid: " << Vertex::kInvalidIndex;
-  LOG(INFO) << "len: " << blocks.size();
+  vector<Block> blocks(max(old_image_block_count, new_image_block_count));
+  LOG(INFO) << "Invalid block index: " << Vertex::kInvalidIndex;
+  LOG(INFO) << "Block count: " << blocks.size();
   for (vector<Block>::size_type i = 0; i < blocks.size(); i++) {
     CHECK(blocks[i].reader == Vertex::kInvalidIndex);
     CHECK(blocks[i].writer == Vertex::kInvalidIndex);
@@ -1333,9 +1348,12 @@
                                               &final_order));
     } else {
       // Full update
+      off_t new_image_size =
+          static_cast<off_t>(new_image_block_count) * new_image_block_size;
       TEST_AND_RETURN_FALSE(ReadFullUpdateFromDisk(&graph,
                                                    new_kernel_part,
                                                    new_image,
+                                                   new_image_size,
                                                    fd,
                                                    &data_file_size,
                                                    kFullUpdateChunkSize,
diff --git a/delta_diff_generator.h b/delta_diff_generator.h
index bebe122..d3eee3e 100644
--- a/delta_diff_generator.h
+++ b/delta_diff_generator.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -125,7 +125,7 @@
   // Stores all Extents in 'extents' into 'out'.
   static void StoreExtents(const std::vector<Extent>& extents,
                            google::protobuf::RepeatedPtrField<Extent>* out);
-                           
+
   // Creates all the edges for the graph. Writers of a block point to
   // readers of the same block. This is because for an edge A->B, B
   // must complete before A executes.
@@ -145,7 +145,7 @@
   // Returns true iff there are no extents in the graph that refer to temp
   // blocks. Temp blocks are in the range [kTempBlockStart, kSparseHole).
   static bool NoTempBlocksRemain(const Graph& graph);
-  
+
   // Install operations in the manifest may reference data blobs, which
   // are in data_blobs_path. This function creates a new data blobs file
   // with the data blobs in the same order as the referencing install
@@ -156,7 +156,7 @@
   static bool ReorderDataBlobs(DeltaArchiveManifest* manifest,
                                const std::string& data_blobs_path,
                                const std::string& new_data_blobs_path);
-                               
+
   // Handles allocation of temp blocks to a cut edge by converting the
   // dest node to a full op. This removes the need for temp blocks, but
   // comes at the cost of a worse compression ratio.
@@ -169,14 +169,14 @@
                                  const std::string& new_root,
                                  int data_fd,
                                  off_t* data_file_size);
-                                 
+
   // Takes |op_indexes|, which is effectively a mapping from order in
   // which the op is performed -> graph vertex index, and produces the
   // reverse: a mapping from graph vertex index -> op_indexes index.
   static void GenerateReverseTopoOrderMap(
       std::vector<Vertex::Index>& op_indexes,
       std::vector<std::vector<Vertex::Index>::size_type>* reverse_op_indexes);
-                                 
+
   // Takes a |graph|, which has edges that must be cut, as listed in
   // |cuts|.  Cuts the edges. Maintains a list in which the operations
   // will be performed (in |op_indexes|) and the reverse (in
@@ -193,15 +193,17 @@
       std::vector<std::vector<Vertex::Index>::size_type>* reverse_op_indexes,
       std::vector<CutEdgeVertexes>& cuts);
 
-  // Given a new rootfs and kernel (|new_image|, |new_kernel_part|),
-  // Reads them sequentially, creating a full update of chunk_size chunks.
-  // Populates |graph|, |kernel_ops|, and |final_order|, with data
-  // about the update operations, and writes relevant data to |fd|,
-  // updating |data_file_size| as it does.
+  // Given a new rootfs and kernel (|new_image|, |new_kernel_part|), Reads them
+  // sequentially, creating a full update of chunk_size chunks.  Populates
+  // |graph|, |kernel_ops|, and |final_order|, with data about the update
+  // operations, and writes relevant data to |fd|, updating |data_file_size| as
+  // it does. Only the first |image_size| bytes are read from |new_image|
+  // assuming that this is the actual file system.
   static bool ReadFullUpdateFromDisk(
       Graph* graph,
       const std::string& new_kernel_part,
       const std::string& new_image,
+      off_t image_size,
       int fd,
       off_t* data_file_size,
       off_t chunk_size,
diff --git a/delta_performer_unittest.cc b/delta_performer_unittest.cc
index a3c43f6..e0db6e7 100755
--- a/delta_performer_unittest.cc
+++ b/delta_performer_unittest.cc
@@ -123,6 +123,20 @@
   CreateExtImageAtPath(a_img, NULL);
   CreateExtImageAtPath(b_img, NULL);
 
+  int image_size = static_cast<int>(utils::FileSize(a_img));
+
+  // Extend the "partitions" holding the file system a bit.
+  EXPECT_EQ(0, System(base::StringPrintf(
+      "dd if=/dev/zero of=%s seek=%d bs=1 count=1",
+      a_img.c_str(),
+      image_size + 1024 * 1024 - 1)));
+  EXPECT_EQ(0, System(base::StringPrintf(
+      "dd if=/dev/zero of=%s seek=%d bs=1 count=1",
+      b_img.c_str(),
+      image_size + 1024 * 1024 - 1)));
+  EXPECT_EQ(image_size + 1024 * 1024, utils::FileSize(a_img));
+  EXPECT_EQ(image_size + 1024 * 1024, utils::FileSize(b_img));
+
   // Make some changes to the A image.
   {
     string a_mnt;
@@ -246,8 +260,8 @@
 
     EXPECT_EQ(old_kernel_data.size(), manifest.old_kernel_info().size());
     EXPECT_EQ(new_kernel_data.size(), manifest.new_kernel_info().size());
-    EXPECT_EQ(utils::FileSize(a_img), manifest.old_rootfs_info().size());
-    EXPECT_EQ(utils::FileSize(b_img), manifest.new_rootfs_info().size());
+    EXPECT_EQ(image_size, manifest.old_rootfs_info().size());
+    EXPECT_EQ(image_size, manifest.new_rootfs_info().size());
 
     EXPECT_FALSE(manifest.old_kernel_info().hash().empty());
     EXPECT_FALSE(manifest.new_kernel_info().hash().empty());
@@ -307,6 +321,8 @@
   const off_t kChunkSize = 128 * 1024;
   FillWithData(&new_root);
   FillWithData(&new_kern);
+  // Assume hashes take 2 MiB beyond the rootfs.
+  off_t new_rootfs_size = new_root.size() - 2 * 1024 * 1024;
 
   string new_root_path;
   EXPECT_TRUE(utils::MakeTempFile("/tmp/NewFullUpdateTest_R.XXXXXX",
@@ -339,15 +355,16 @@
   EXPECT_TRUE(DeltaDiffGenerator::ReadFullUpdateFromDisk(&graph,
                                                          new_kern_path,
                                                          new_root_path,
+                                                         new_rootfs_size,
                                                          out_blobs_fd,
                                                          &out_blobs_length,
                                                          kChunkSize,
                                                          &kernel_ops,
                                                          &final_order));
-  EXPECT_EQ(new_root.size() / kChunkSize, graph.size());
-  EXPECT_EQ(new_root.size() / kChunkSize, final_order.size());
+  EXPECT_EQ(new_rootfs_size / kChunkSize, graph.size());
+  EXPECT_EQ(new_rootfs_size / kChunkSize, final_order.size());
   EXPECT_EQ(new_kern.size() / kChunkSize, kernel_ops.size());
-  for (size_t i = 0; i < (new_root.size() / kChunkSize); ++i) {
+  for (off_t i = 0; i < (new_rootfs_size / kChunkSize); ++i) {
     EXPECT_EQ(i, final_order[i]);
     EXPECT_EQ(1, graph[i].op.dst_extents_size());
     EXPECT_EQ(i * kChunkSize / kBlockSize,