AU: Reduce full payload generation log output.

Now that generation is much faster, we can certainly reduce the logging.

BUG=8747
TEST=unit tests, generated a full payload

Change-Id: I8eb0a9801986d21206bbce29a8228c7d3ebe654d

Review URL: http://codereview.chromium.org/4704002
diff --git a/full_update_generator.cc b/full_update_generator.cc
index 0140107..a7b630c 100644
--- a/full_update_generator.cc
+++ b/full_update_generator.cc
@@ -133,13 +133,11 @@
   for (int partition = 0; partition < 2; ++partition) {
     const string& path = paths[partition];
     LOG(INFO) << "compressing " << path;
-
     int in_fd = open(path.c_str(), O_RDONLY, 0);
     TEST_AND_RETURN_FALSE(in_fd >= 0);
     ScopedFdCloser in_fd_closer(&in_fd);
-
     deque<shared_ptr<ChunkProcessor> > threads;
-
+    int last_progress_update = INT_MIN;
     off_t bytes_left = part_sizes[partition], counter = 0, offset = 0;
     while (bytes_left > 0 || !threads.empty()) {
       // Check and start new chunk processors if possible.
@@ -184,10 +182,15 @@
       dst_extent->set_start_block(processor->offset() / block_size);
       dst_extent->set_num_blocks(chunk_size / block_size);
 
-      LOG(INFO)
-          << StringPrintf("%.1f",
-                          processor->offset() * 100.0 / part_sizes[partition])
-          << "% complete (output size: " << *data_file_size << ")";
+      int progress = static_cast<int>(
+          (processor->offset() + processor->buffer_in().size()) * 100.0 /
+          part_sizes[partition]);
+      if (last_progress_update < progress &&
+          (last_progress_update + 10 <= progress || progress == 100)) {
+        LOG(INFO) << progress << "% complete (output size: "
+                  << *data_file_size << ")";
+        last_progress_update = progress;
+      }
     }
   }