update_engine: Refactor operation generation methods.
We have so far three methods to generate the list of operations to
update the kernel and the rootfs partitions, which are different
strategies: full (using only REPLACE and REPLACE_BZ), delta "in-place"
(using also MOVE and BSDIFF) and "source" delta, or delta minor
version 2 (using SOURCE_COPY and SOURCE_BSDIFF instead).
As we consider ways to support updates for squashfs, the two delta
methods don't support filesystems other than ext2, so we need to use
yet another different strategy for those.
This patch splits the code that generates the list of operations for
the kernel and the rootfs in three methods with the same interface.
No functional changes were made. Some local functions were exposed
as public static methods. The new graph_types.cc is added to avoid
a link-time error when the kInvalidIndex member is not inlined.
BUG=chromium:430950
TEST=FEATURES=test emerge-link update_engine
Change-Id: Ib25ff7a6f17d0990637596dcd6b59568b9e15d26
Reviewed-on: https://chromium-review.googlesource.com/259462
Reviewed-by: Alex Vakulenko <avakulenko@chromium.org>
Tested-by: Alex Deymo <deymo@chromium.org>
Commit-Queue: Alex Deymo <deymo@chromium.org>
diff --git a/delta_performer_unittest.cc b/delta_performer_unittest.cc
index 4698256..b366c7e 100644
--- a/delta_performer_unittest.cc
+++ b/delta_performer_unittest.cc
@@ -476,6 +476,7 @@
PayloadGenerationConfig payload_config;
payload_config.is_delta = !full_rootfs;
payload_config.chunk_size = chunk_size;
+ payload_config.rootfs_partition_size = kRootFSPartitionSize;
if (!full_rootfs) {
payload_config.source.rootfs_part = state->a_img;
payload_config.source.rootfs_mountpt = a_mnt;
@@ -503,7 +504,6 @@
payload_config,
state->delta_path,
private_key,
- kRootFSPartitionSize,
&state->metadata_size));
}
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index ab21282..3fc298e 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -109,225 +109,6 @@
return true;
}
-// For each regular file within new_root, creates a node in the graph,
-// determines the best way to compress it (REPLACE, REPLACE_BZ, COPY, BSDIFF),
-// and writes any necessary data to the end of data_fd.
-bool DeltaReadFiles(Graph* graph,
- vector<Block>* blocks,
- const string& old_root,
- const string& new_root,
- off_t chunk_size,
- int data_fd,
- off_t* data_file_size,
- bool src_ops_allowed) {
- set<ino_t> visited_inodes;
- set<ino_t> visited_src_inodes;
- for (FilesystemIterator fs_iter(new_root,
- set<string>{"/lost+found"});
- !fs_iter.IsEnd(); fs_iter.Increment()) {
- // We never diff symlinks (here, we check that dst file is not a symlink).
- if (!S_ISREG(fs_iter.GetStat().st_mode))
- continue;
-
- // Make sure we visit each inode only once.
- if (utils::SetContainsKey(visited_inodes, fs_iter.GetStat().st_ino))
- continue;
- visited_inodes.insert(fs_iter.GetStat().st_ino);
- off_t dst_size = fs_iter.GetFileSize();
- if (dst_size == 0)
- continue;
-
- LOG(INFO) << "Encoding file " << fs_iter.GetPartialPath();
-
- // We can't visit each dst image inode more than once, as that would
- // duplicate work. Here, we avoid visiting each source image inode
- // more than once. Technically, we could have multiple operations
- // that read the same blocks from the source image for diffing, but
- // we choose not to avoid complexity. Eventually we will move away
- // from using a graph/cycle detection/etc to generate diffs, and at that
- // time, it will be easy (non-complex) to have many operations read
- // from the same source blocks. At that time, this code can die. -adlr
- bool should_diff_from_source = false;
- string src_path = old_root + fs_iter.GetPartialPath();
- struct stat src_stbuf;
- // We never diff symlinks (here, we check that src file is not a symlink).
- if (0 == lstat(src_path.c_str(), &src_stbuf) &&
- S_ISREG(src_stbuf.st_mode)) {
- should_diff_from_source = !utils::SetContainsKey(visited_src_inodes,
- src_stbuf.st_ino);
- visited_src_inodes.insert(src_stbuf.st_ino);
- }
-
- off_t size = chunk_size == -1 ? dst_size : chunk_size;
- off_t step = size;
- for (off_t offset = 0; offset < dst_size; offset += step) {
- if (offset + size >= dst_size) {
- size = -1; // Read through the end of the file.
- }
- TEST_AND_RETURN_FALSE(DeltaDiffGenerator::DeltaReadFile(
- graph,
- Vertex::kInvalidIndex,
- blocks,
- (should_diff_from_source ? old_root : kEmptyPath),
- new_root,
- fs_iter.GetPartialPath(),
- offset,
- size,
- data_fd,
- data_file_size,
- src_ops_allowed));
- }
- }
- return true;
-}
-
-// Reads blocks from image_path that are not yet marked as being written in the
-// blocks array. These blocks that remain are either unchanged files or
-// non-file-data blocks. We compare each of them to the old image, and compress
-// the ones that changed into a single REPLACE_BZ operation. This updates a
-// newly created node in the graph to write these blocks and writes the
-// appropriate blob to blobs_fd. Reads and updates blobs_length.
-bool ReadUnwrittenBlocks(const vector<Block>& blocks,
- int blobs_fd,
- off_t* blobs_length,
- const string& old_image_path,
- const string& new_image_path,
- Vertex* vertex) {
- vertex->file_name = "<rootfs-non-file-data>";
-
- DeltaArchiveManifest_InstallOperation* out_op = &vertex->op;
- int new_image_fd = open(new_image_path.c_str(), O_RDONLY, 000);
- TEST_AND_RETURN_FALSE_ERRNO(new_image_fd >= 0);
- ScopedFdCloser new_image_fd_closer(&new_image_fd);
- int old_image_fd = open(old_image_path.c_str(), O_RDONLY, 000);
- TEST_AND_RETURN_FALSE_ERRNO(old_image_fd >= 0);
- ScopedFdCloser old_image_fd_closer(&old_image_fd);
-
- string temp_file_path;
- TEST_AND_RETURN_FALSE(utils::MakeTempFile("CrAU_temp_data.XXXXXX",
- &temp_file_path,
- nullptr));
-
- FILE* file = fopen(temp_file_path.c_str(), "w");
- TEST_AND_RETURN_FALSE(file);
- int err = BZ_OK;
-
- BZFILE* bz_file = BZ2_bzWriteOpen(&err,
- file,
- 9, // max compression
- 0, // verbosity
- 0); // default work factor
- TEST_AND_RETURN_FALSE(err == BZ_OK);
-
- vector<Extent> extents;
- vector<Block>::size_type block_count = 0;
-
- LOG(INFO) << "Appending unwritten blocks to extents";
- for (vector<Block>::size_type i = 0; i < blocks.size(); i++) {
- if (blocks[i].writer != Vertex::kInvalidIndex)
- continue;
- graph_utils::AppendBlockToExtents(&extents, i);
- block_count++;
- }
-
- // Code will handle buffers of any size that's a multiple of kBlockSize,
- // so we arbitrarily set it to 1024 * kBlockSize.
- chromeos::Blob new_buf(1024 * kBlockSize);
- chromeos::Blob old_buf(1024 * kBlockSize);
-
- LOG(INFO) << "Scanning " << block_count << " unwritten blocks";
- vector<Extent> changed_extents;
- vector<Block>::size_type changed_block_count = 0;
- vector<Block>::size_type blocks_copied_count = 0;
-
- // For each extent in extents, write the unchanged blocks into BZ2_bzWrite,
- // which sends it to an output file. We use the temporary buffers to hold the
- // old and new data, which may be smaller than the extent, so in that case we
- // have to loop to get the extent's data (that's the inner while loop).
- for (const Extent& extent : extents) {
- vector<Block>::size_type blocks_read = 0;
- float printed_progress = -1;
- while (blocks_read < extent.num_blocks()) {
- const uint64_t copy_first_block = extent.start_block() + blocks_read;
- const int copy_block_cnt =
- min(new_buf.size() / kBlockSize,
- static_cast<chromeos::Blob::size_type>(
- extent.num_blocks() - blocks_read));
- const size_t count = copy_block_cnt * kBlockSize;
- const off_t offset = copy_first_block * kBlockSize;
- ssize_t rc = pread(new_image_fd, new_buf.data(), count, offset);
- TEST_AND_RETURN_FALSE_ERRNO(rc >= 0);
- TEST_AND_RETURN_FALSE(static_cast<size_t>(rc) == count);
-
- rc = pread(old_image_fd, old_buf.data(), count, offset);
- TEST_AND_RETURN_FALSE_ERRNO(rc >= 0);
- TEST_AND_RETURN_FALSE(static_cast<size_t>(rc) == count);
-
- // Compare each block in the buffer to its counterpart in the old image
- // and only compress it if its content has changed.
- int buf_offset = 0;
- for (int i = 0; i < copy_block_cnt; ++i) {
- int buf_end_offset = buf_offset + kBlockSize;
- if (!std::equal(new_buf.begin() + buf_offset,
- new_buf.begin() + buf_end_offset,
- old_buf.begin() + buf_offset)) {
- BZ2_bzWrite(&err, bz_file, &new_buf[buf_offset], kBlockSize);
- TEST_AND_RETURN_FALSE(err == BZ_OK);
- const uint64_t block_idx = copy_first_block + i;
- if (blocks[block_idx].reader != Vertex::kInvalidIndex) {
- graph_utils::AddReadBeforeDep(vertex, blocks[block_idx].reader,
- block_idx);
- }
- graph_utils::AppendBlockToExtents(&changed_extents, block_idx);
- changed_block_count++;
- }
- buf_offset = buf_end_offset;
- }
-
- blocks_read += copy_block_cnt;
- blocks_copied_count += copy_block_cnt;
- float current_progress =
- static_cast<float>(blocks_copied_count) / block_count;
- if (printed_progress + 0.1 < current_progress ||
- blocks_copied_count == block_count) {
- LOG(INFO) << "progress: " << current_progress;
- printed_progress = current_progress;
- }
- }
- }
- BZ2_bzWriteClose(&err, bz_file, 0, nullptr, nullptr);
- TEST_AND_RETURN_FALSE(err == BZ_OK);
- bz_file = nullptr;
- TEST_AND_RETURN_FALSE_ERRNO(0 == fclose(file));
- file = nullptr;
-
- LOG(INFO) << "Compressed " << changed_block_count << " blocks ("
- << block_count - changed_block_count << " blocks unchanged)";
- chromeos::Blob compressed_data;
- if (changed_block_count > 0) {
- LOG(INFO) << "Reading compressed data off disk";
- TEST_AND_RETURN_FALSE(utils::ReadFile(temp_file_path, &compressed_data));
- }
- TEST_AND_RETURN_FALSE(unlink(temp_file_path.c_str()) == 0);
-
- // Add node to graph to write these blocks
- out_op->set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ);
- out_op->set_data_offset(*blobs_length);
- out_op->set_data_length(compressed_data.size());
- LOG(INFO) << "Rootfs non-data blocks compressed take up "
- << compressed_data.size();
- *blobs_length += compressed_data.size();
- out_op->set_dst_length(kBlockSize * changed_block_count);
- DeltaDiffGenerator::StoreExtents(changed_extents,
- out_op->mutable_dst_extents());
-
- TEST_AND_RETURN_FALSE(utils::WriteAll(blobs_fd,
- compressed_data.data(),
- compressed_data.size()));
- LOG(INFO) << "Done processing unwritten blocks";
- return true;
-}
-
// Writes the uint64_t passed in in host-endian to the file as big-endian.
// Returns true on success.
bool WriteUint64AsBigEndian(FileWriter* writer, const uint64_t value) {
@@ -380,62 +161,6 @@
}
}
-// Delta compresses a kernel partition |new_kernel_part| with knowledge of the
-// old kernel partition |old_kernel_part|. If |old_kernel_part| is an empty
-// string, generates a full update of the partition.
-bool DeltaCompressKernelPartition(
- const string& old_kernel_part,
- const string& new_kernel_part,
- vector<DeltaArchiveManifest_InstallOperation>* ops,
- int blobs_fd,
- off_t* blobs_length,
- bool src_ops_allowed) {
- LOG(INFO) << "Delta compressing kernel partition...";
- LOG_IF(INFO, old_kernel_part.empty()) << "Generating full kernel update...";
-
- DeltaArchiveManifest_InstallOperation op;
- chromeos::Blob data;
- TEST_AND_RETURN_FALSE(
- DeltaDiffGenerator::ReadFileToDiff(old_kernel_part,
- new_kernel_part,
- 0, // chunk_offset
- -1, // chunk_size
- true, // bsdiff_allowed
- &data,
- &op,
- false,
- src_ops_allowed));
-
- // Check if the operation writes nothing.
- if (op.dst_extents_size() == 0) {
- if (op.type() == DeltaArchiveManifest_InstallOperation_Type_MOVE) {
- LOG(INFO) << "Empty MOVE operation, nothing to do.";
- return true;
- } else {
- LOG(ERROR) << "Empty non-MOVE operation";
- return false;
- }
- }
-
- // Write the data.
- if (op.type() != DeltaArchiveManifest_InstallOperation_Type_MOVE &&
- op.type() != DeltaArchiveManifest_InstallOperation_Type_SOURCE_COPY) {
- op.set_data_offset(*blobs_length);
- op.set_data_length(data.size());
- }
-
- // Add the new install operation.
- ops->clear();
- ops->push_back(op);
-
- TEST_AND_RETURN_FALSE(utils::WriteAll(blobs_fd, data.data(), data.size()));
- *blobs_length += data.size();
-
- LOG(INFO) << "Done delta compressing kernel partition: "
- << kInstallOperationTypes[op.type()];
- return true;
-}
-
struct DeltaObject {
DeltaObject(const string& in_name, const int in_type, const off_t in_size)
: name(in_name),
@@ -601,6 +326,75 @@
}
}
+bool DeltaDiffGenerator::DeltaReadFiles(Graph* graph,
+ vector<Block>* blocks,
+ const string& old_root,
+ const string& new_root,
+ off_t chunk_size,
+ int data_fd,
+ off_t* data_file_size,
+ bool src_ops_allowed) {
+ set<ino_t> visited_inodes;
+ set<ino_t> visited_src_inodes;
+ for (FilesystemIterator fs_iter(new_root,
+ set<string>{"/lost+found"});
+ !fs_iter.IsEnd(); fs_iter.Increment()) {
+ // We never diff symlinks (here, we check that dst file is not a symlink).
+ if (!S_ISREG(fs_iter.GetStat().st_mode))
+ continue;
+
+ // Make sure we visit each inode only once.
+ if (utils::SetContainsKey(visited_inodes, fs_iter.GetStat().st_ino))
+ continue;
+ visited_inodes.insert(fs_iter.GetStat().st_ino);
+ off_t dst_size = fs_iter.GetFileSize();
+ if (dst_size == 0)
+ continue;
+
+ LOG(INFO) << "Encoding file " << fs_iter.GetPartialPath();
+
+ // We can't visit each dst image inode more than once, as that would
+ // duplicate work. Here, we avoid visiting each source image inode
+ // more than once. Technically, we could have multiple operations
+ // that read the same blocks from the source image for diffing, but
+ // we choose not to avoid complexity. Eventually we will move away
+ // from using a graph/cycle detection/etc to generate diffs, and at that
+ // time, it will be easy (non-complex) to have many operations read
+ // from the same source blocks. At that time, this code can die. -adlr
+ bool should_diff_from_source = false;
+ string src_path = old_root + fs_iter.GetPartialPath();
+ struct stat src_stbuf;
+ // We never diff symlinks (here, we check that src file is not a symlink).
+ if (0 == lstat(src_path.c_str(), &src_stbuf) &&
+ S_ISREG(src_stbuf.st_mode)) {
+ should_diff_from_source = !utils::SetContainsKey(visited_src_inodes,
+ src_stbuf.st_ino);
+ visited_src_inodes.insert(src_stbuf.st_ino);
+ }
+
+ off_t size = chunk_size == -1 ? dst_size : chunk_size;
+ off_t step = size;
+ for (off_t offset = 0; offset < dst_size; offset += step) {
+ if (offset + size >= dst_size) {
+ size = -1; // Read through the end of the file.
+ }
+ TEST_AND_RETURN_FALSE(DeltaDiffGenerator::DeltaReadFile(
+ graph,
+ Vertex::kInvalidIndex,
+ blocks,
+ (should_diff_from_source ? old_root : kEmptyPath),
+ new_root,
+ fs_iter.GetPartialPath(),
+ offset,
+ size,
+ data_fd,
+ data_file_size,
+ src_ops_allowed));
+ }
+ }
+ return true;
+}
+
bool DeltaDiffGenerator::DeltaReadFile(Graph* graph,
Vertex::Index existing_vertex,
vector<Block>* blocks,
@@ -666,7 +460,7 @@
// Now, insert into graph and blocks vector
Vertex::Index vertex = existing_vertex;
if (vertex == Vertex::kInvalidIndex) {
- graph->resize(graph->size() + 1);
+ graph->emplace_back();
vertex = graph->size() - 1;
}
(*graph)[vertex].op = operation;
@@ -684,7 +478,6 @@
return true;
}
-
bool DeltaDiffGenerator::ReadFileToDiff(
const string& old_filename,
const string& new_filename,
@@ -831,10 +624,8 @@
}
// Embed extents in the operation.
- DeltaDiffGenerator::StoreExtents(src_extents,
- operation.mutable_src_extents());
- DeltaDiffGenerator::StoreExtents(dst_extents,
- operation.mutable_dst_extents());
+ StoreExtents(src_extents, operation.mutable_src_extents());
+ StoreExtents(dst_extents, operation.mutable_dst_extents());
}
out_data->swap(data);
@@ -843,6 +634,201 @@
return true;
}
+bool DeltaDiffGenerator::DeltaCompressKernelPartition(
+ const string& old_kernel_part,
+ const string& new_kernel_part,
+ vector<DeltaArchiveManifest_InstallOperation>* ops,
+ int blobs_fd,
+ off_t* blobs_length,
+ bool src_ops_allowed) {
+ LOG(INFO) << "Delta compressing kernel partition...";
+ LOG_IF(INFO, old_kernel_part.empty()) << "Generating full kernel update...";
+
+ DeltaArchiveManifest_InstallOperation op;
+ chromeos::Blob data;
+ TEST_AND_RETURN_FALSE(
+ ReadFileToDiff(old_kernel_part,
+ new_kernel_part,
+ 0, // chunk_offset
+ -1, // chunk_size
+ true, // bsdiff_allowed
+ &data,
+ &op,
+ false,
+ src_ops_allowed));
+
+ // Check if the operation writes nothing.
+ if (op.dst_extents_size() == 0) {
+ if (op.type() == DeltaArchiveManifest_InstallOperation_Type_MOVE) {
+ LOG(INFO) << "Empty MOVE operation, nothing to do.";
+ return true;
+ } else {
+ LOG(ERROR) << "Empty non-MOVE operation";
+ return false;
+ }
+ }
+
+ // Write the data.
+ if (op.type() != DeltaArchiveManifest_InstallOperation_Type_MOVE &&
+ op.type() != DeltaArchiveManifest_InstallOperation_Type_SOURCE_COPY) {
+ op.set_data_offset(*blobs_length);
+ op.set_data_length(data.size());
+ }
+
+ // Add the new install operation.
+ ops->clear();
+ ops->push_back(op);
+
+ TEST_AND_RETURN_FALSE(utils::WriteAll(blobs_fd, data.data(), data.size()));
+ *blobs_length += data.size();
+
+ LOG(INFO) << "Done delta compressing kernel partition: "
+ << kInstallOperationTypes[op.type()];
+ return true;
+}
+
+bool DeltaDiffGenerator::ReadUnwrittenBlocks(
+ const vector<Block>& blocks,
+ int blobs_fd,
+ off_t* blobs_length,
+ const string& old_image_path,
+ const string& new_image_path,
+ Vertex* vertex) {
+ vertex->file_name = "<rootfs-non-file-data>";
+
+ DeltaArchiveManifest_InstallOperation* out_op = &vertex->op;
+ int new_image_fd = open(new_image_path.c_str(), O_RDONLY, 000);
+ TEST_AND_RETURN_FALSE_ERRNO(new_image_fd >= 0);
+ ScopedFdCloser new_image_fd_closer(&new_image_fd);
+ int old_image_fd = open(old_image_path.c_str(), O_RDONLY, 000);
+ TEST_AND_RETURN_FALSE_ERRNO(old_image_fd >= 0);
+ ScopedFdCloser old_image_fd_closer(&old_image_fd);
+
+ string temp_file_path;
+ TEST_AND_RETURN_FALSE(utils::MakeTempFile("CrAU_temp_data.XXXXXX",
+ &temp_file_path,
+ nullptr));
+
+ FILE* file = fopen(temp_file_path.c_str(), "w");
+ TEST_AND_RETURN_FALSE(file);
+ int err = BZ_OK;
+
+ BZFILE* bz_file = BZ2_bzWriteOpen(&err,
+ file,
+ 9, // max compression
+ 0, // verbosity
+ 0); // default work factor
+ TEST_AND_RETURN_FALSE(err == BZ_OK);
+
+ vector<Extent> extents;
+ vector<Block>::size_type block_count = 0;
+
+ LOG(INFO) << "Appending unwritten blocks to extents";
+ for (vector<Block>::size_type i = 0; i < blocks.size(); i++) {
+ if (blocks[i].writer != Vertex::kInvalidIndex)
+ continue;
+ graph_utils::AppendBlockToExtents(&extents, i);
+ block_count++;
+ }
+
+ // Code will handle buffers of any size that's a multiple of kBlockSize,
+ // so we arbitrarily set it to 1024 * kBlockSize.
+ chromeos::Blob new_buf(1024 * kBlockSize);
+ chromeos::Blob old_buf(1024 * kBlockSize);
+
+ LOG(INFO) << "Scanning " << block_count << " unwritten blocks";
+ vector<Extent> changed_extents;
+ vector<Block>::size_type changed_block_count = 0;
+ vector<Block>::size_type blocks_copied_count = 0;
+
+ // For each extent in extents, write the unchanged blocks into BZ2_bzWrite,
+ // which sends it to an output file. We use the temporary buffers to hold the
+ // old and new data, which may be smaller than the extent, so in that case we
+ // have to loop to get the extent's data (that's the inner while loop).
+ for (const Extent& extent : extents) {
+ vector<Block>::size_type blocks_read = 0;
+ float printed_progress = -1;
+ while (blocks_read < extent.num_blocks()) {
+ const uint64_t copy_first_block = extent.start_block() + blocks_read;
+ const int copy_block_cnt =
+ min(new_buf.size() / kBlockSize,
+ static_cast<chromeos::Blob::size_type>(
+ extent.num_blocks() - blocks_read));
+ const size_t count = copy_block_cnt * kBlockSize;
+ const off_t offset = copy_first_block * kBlockSize;
+ ssize_t rc = pread(new_image_fd, new_buf.data(), count, offset);
+ TEST_AND_RETURN_FALSE_ERRNO(rc >= 0);
+ TEST_AND_RETURN_FALSE(static_cast<size_t>(rc) == count);
+
+ rc = pread(old_image_fd, old_buf.data(), count, offset);
+ TEST_AND_RETURN_FALSE_ERRNO(rc >= 0);
+ TEST_AND_RETURN_FALSE(static_cast<size_t>(rc) == count);
+
+ // Compare each block in the buffer to its counterpart in the old image
+ // and only compress it if its content has changed.
+ int buf_offset = 0;
+ for (int i = 0; i < copy_block_cnt; ++i) {
+ int buf_end_offset = buf_offset + kBlockSize;
+ if (!std::equal(new_buf.begin() + buf_offset,
+ new_buf.begin() + buf_end_offset,
+ old_buf.begin() + buf_offset)) {
+ BZ2_bzWrite(&err, bz_file, &new_buf[buf_offset], kBlockSize);
+ TEST_AND_RETURN_FALSE(err == BZ_OK);
+ const uint64_t block_idx = copy_first_block + i;
+ if (blocks[block_idx].reader != Vertex::kInvalidIndex) {
+ graph_utils::AddReadBeforeDep(vertex, blocks[block_idx].reader,
+ block_idx);
+ }
+ graph_utils::AppendBlockToExtents(&changed_extents, block_idx);
+ changed_block_count++;
+ }
+ buf_offset = buf_end_offset;
+ }
+
+ blocks_read += copy_block_cnt;
+ blocks_copied_count += copy_block_cnt;
+ float current_progress =
+ static_cast<float>(blocks_copied_count) / block_count;
+ if (printed_progress + 0.1 < current_progress ||
+ blocks_copied_count == block_count) {
+ LOG(INFO) << "progress: " << current_progress;
+ printed_progress = current_progress;
+ }
+ }
+ }
+ BZ2_bzWriteClose(&err, bz_file, 0, nullptr, nullptr);
+ TEST_AND_RETURN_FALSE(err == BZ_OK);
+ bz_file = nullptr;
+ TEST_AND_RETURN_FALSE_ERRNO(0 == fclose(file));
+ file = nullptr;
+
+ LOG(INFO) << "Compressed " << changed_block_count << " blocks ("
+ << block_count - changed_block_count << " blocks unchanged)";
+ chromeos::Blob compressed_data;
+ if (changed_block_count > 0) {
+ LOG(INFO) << "Reading compressed data off disk";
+ TEST_AND_RETURN_FALSE(utils::ReadFile(temp_file_path, &compressed_data));
+ }
+ TEST_AND_RETURN_FALSE(unlink(temp_file_path.c_str()) == 0);
+
+ // Add node to graph to write these blocks
+ out_op->set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ);
+ out_op->set_data_offset(*blobs_length);
+ out_op->set_data_length(compressed_data.size());
+ LOG(INFO) << "Rootfs non-data blocks compressed take up "
+ << compressed_data.size();
+ *blobs_length += compressed_data.size();
+ out_op->set_dst_length(kBlockSize * changed_block_count);
+ DeltaDiffGenerator::StoreExtents(changed_extents,
+ out_op->mutable_dst_extents());
+
+ TEST_AND_RETURN_FALSE(utils::WriteAll(blobs_fd,
+ compressed_data.data(),
+ compressed_data.size()));
+ LOG(INFO) << "Done processing unwritten blocks";
+ return true;
+}
+
bool DeltaDiffGenerator::InitializePartitionInfo(bool is_kernel,
const string& partition,
PartitionInfo* info) {
@@ -971,39 +957,78 @@
return order;
}
+bool DeltaDiffGenerator::GenerateDeltaWithSourceOperations(
+ const PayloadGenerationConfig& config,
+ int data_file_fd,
+ off_t* data_file_size,
+ Graph* graph,
+ vector<DeltaArchiveManifest_InstallOperation>* kernel_ops,
+ vector<Vertex::Index>* final_order) {
+ // List of blocks in the target partition, with the operation that needs to
+ // write it and the operation that needs to read it. This is used here to
+ // keep track of the blocks that no operation is writting it.
+ vector<Block> blocks(config.target.rootfs_size / config.block_size);
+
+ TEST_AND_RETURN_FALSE(DeltaReadFiles(graph,
+ &blocks,
+ config.source.rootfs_mountpt,
+ config.target.rootfs_mountpt,
+ config.chunk_size,
+ data_file_fd,
+ data_file_size,
+ true)); // src_ops_allowed
+
+ LOG(INFO) << "done reading normal files";
+ CheckGraph(*graph);
+
+ // Read kernel partition
+ TEST_AND_RETURN_FALSE(
+ DeltaCompressKernelPartition(config.source.kernel_part,
+ config.target.kernel_part,
+ kernel_ops,
+ data_file_fd,
+ data_file_size,
+ true)); // src_ops_allowed
+ LOG(INFO) << "done reading kernel";
+ CheckGraph(*graph);
+
+ graph->emplace_back();
+ TEST_AND_RETURN_FALSE(ReadUnwrittenBlocks(blocks,
+ data_file_fd,
+ data_file_size,
+ config.source.rootfs_part,
+ config.target.rootfs_part,
+ &graph->back()));
+ if (graph->back().op.data_length() == 0) {
+ LOG(INFO) << "No unwritten blocks to write, omitting operation";
+ graph->pop_back();
+ }
+
+ *final_order = OrderIndices(*graph);
+ return true;
+}
+
bool DeltaDiffGenerator::GenerateDeltaUpdateFile(
const PayloadGenerationConfig& config,
const string& output_path,
const string& private_key_path,
- size_t rootfs_partition_size,
uint64_t* metadata_size) {
- int old_image_block_count = config.source.rootfs_size / config.block_size;
- int new_image_block_count = config.target.rootfs_size / config.block_size;
-
if (config.is_delta) {
- LOG_IF(WARNING, old_image_block_count != new_image_block_count)
+ LOG_IF(WARNING, config.source.rootfs_size != config.target.rootfs_size)
<< "Old and new images have different block counts.";
// TODO(deymo): Our tools only support growing the filesystem size during
// an update. Remove this check when that's fixed. crbug.com/192136
- LOG_IF(FATAL, old_image_block_count > new_image_block_count)
+ LOG_IF(FATAL, config.source.rootfs_size > config.target.rootfs_size)
<< "Shirking the rootfs size is not supported at the moment.";
}
// Sanity checks for the partition size.
- TEST_AND_RETURN_FALSE(rootfs_partition_size % config.block_size == 0);
- LOG(INFO) << "Rootfs partition size: " << rootfs_partition_size;
+ LOG(INFO) << "Rootfs partition size: " << config.rootfs_partition_size;
LOG(INFO) << "Actual filesystem size: " << config.target.rootfs_size;
- TEST_AND_RETURN_FALSE(rootfs_partition_size >= config.target.rootfs_size);
- vector<Block> blocks(max(old_image_block_count, new_image_block_count));
LOG(INFO) << "Invalid block index: " << Vertex::kInvalidIndex;
- LOG(INFO) << "Block count: " << blocks.size();
- for (const Block& block : blocks) {
- CHECK(block.reader == Vertex::kInvalidIndex);
- CHECK(block.writer == Vertex::kInvalidIndex);
- }
- Graph graph;
- CheckGraph(graph);
+ LOG(INFO) << "Block count: "
+ << config.target.rootfs_size / config.block_size;
const string kTempFileTemplate("CrAU_temp_data.XXXXXX");
string temp_file_path;
@@ -1014,11 +1039,35 @@
// Create empty protobuf Manifest object
DeltaArchiveManifest manifest;
+ manifest.set_minor_version(config.minor_version);
vector<DeltaArchiveManifest_InstallOperation> kernel_ops;
+ Graph graph;
+ CheckGraph(graph);
vector<Vertex::Index> final_order;
- Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
+
+ // Select payload generation strategy based on the config.
+ OperationsGenerator* strategy = nullptr;
+ if (config.is_delta) {
+ // Delta update (with possibly a full kernel update).
+ if (config.minor_version == kInPlaceMinorPayloadVersion) {
+ LOG(INFO) << "Using generator InplaceGenerator::GenerateInplaceDelta";
+ strategy = &InplaceGenerator::GenerateInplaceDelta;
+ } else if (config.minor_version == kSourceMinorPayloadVersion) {
+ LOG(INFO) << "Using generator DeltaDiffGenerator::GenerateSourceDelta";
+ strategy = &DeltaDiffGenerator::GenerateDeltaWithSourceOperations;
+ } else {
+ LOG(ERROR) << "Unsupported minor version given for delta payload: "
+ << config.minor_version;
+ return false;
+ }
+ } else {
+ // Full update.
+ LOG(INFO) << "Using generator FullUpdateGenerator::Run";
+ strategy = &FullUpdateGenerator::Run;
+ }
+
{
int data_file_fd;
TEST_AND_RETURN_FALSE(
@@ -1026,135 +1075,14 @@
temp_file_unlinker.reset(new ScopedPathUnlinker(temp_file_path));
TEST_AND_RETURN_FALSE(data_file_fd >= 0);
ScopedFdCloser data_file_fd_closer(&data_file_fd);
- if (config.is_delta) {
- // Delta update
- // Set the minor version for this payload.
- LOG(INFO) << "Adding Delta Minor Version.";
- manifest.set_minor_version(config.minor_version);
- if (config.minor_version == kInPlaceMinorPayloadVersion) {
- TEST_AND_RETURN_FALSE(DeltaReadFiles(&graph,
- &blocks,
- config.source.rootfs_mountpt,
- config.target.rootfs_mountpt,
- config.chunk_size,
- data_file_fd,
- &data_file_size,
- false)); // src_ops_allowed
- LOG(INFO) << "done reading normal files";
- CheckGraph(graph);
-
- LOG(INFO) << "Starting metadata processing";
- TEST_AND_RETURN_FALSE(Metadata::DeltaReadMetadata(
- &graph,
- &blocks,
- config.source.rootfs_part,
- config.target.rootfs_part,
- data_file_fd,
- &data_file_size));
- LOG(INFO) << "Done metadata processing";
- CheckGraph(graph);
-
- graph.resize(graph.size() + 1);
- TEST_AND_RETURN_FALSE(ReadUnwrittenBlocks(blocks,
- data_file_fd,
- &data_file_size,
- config.source.rootfs_part,
- config.target.rootfs_part,
- &graph.back()));
- if (graph.back().op.data_length() == 0) {
- LOG(INFO) << "No unwritten blocks to write, omitting operation";
- graph.pop_back();
- }
-
- // Final scratch block (if there's space)
- if (blocks.size() < (rootfs_partition_size / kBlockSize)) {
- scratch_vertex = graph.size();
- graph.resize(graph.size() + 1);
- InplaceGenerator::CreateScratchNode(
- blocks.size(),
- (rootfs_partition_size / kBlockSize) - blocks.size(),
- &graph.back());
- }
-
- // Read kernel partition
- TEST_AND_RETURN_FALSE(
- DeltaCompressKernelPartition(config.source.kernel_part,
- config.target.kernel_part,
- &kernel_ops,
- data_file_fd,
- &data_file_size,
- false)); // src_ops_allowed
-
- LOG(INFO) << "done reading kernel";
- CheckGraph(graph);
-
- LOG(INFO) << "Creating edges...";
- InplaceGenerator::CreateEdges(&graph, blocks);
- LOG(INFO) << "Done creating edges";
- CheckGraph(graph);
-
- TEST_AND_RETURN_FALSE(InplaceGenerator::ConvertGraphToDag(
- &graph,
- config.target.rootfs_mountpt,
- data_file_fd,
- &data_file_size,
- &final_order,
- scratch_vertex));
- } else if (config.minor_version == kSourceMinorPayloadVersion) {
- TEST_AND_RETURN_FALSE(DeltaReadFiles(&graph,
- &blocks,
- config.source.rootfs_mountpt,
- config.target.rootfs_mountpt,
- config.chunk_size,
- data_file_fd,
- &data_file_size,
- true)); // src_ops_allowed
-
- LOG(INFO) << "done reading normal files";
- CheckGraph(graph);
-
- // Read kernel partition
- TEST_AND_RETURN_FALSE(
- DeltaCompressKernelPartition(config.source.kernel_part,
- config.target.kernel_part,
- &kernel_ops,
- data_file_fd,
- &data_file_size,
- true)); // src_ops_allowed
- LOG(INFO) << "done reading kernel";
- CheckGraph(graph);
-
- graph.resize(graph.size() + 1);
- TEST_AND_RETURN_FALSE(ReadUnwrittenBlocks(blocks,
- data_file_fd,
- &data_file_size,
- config.source.rootfs_part,
- config.target.rootfs_part,
- &graph.back()));
- if (graph.back().op.data_length() == 0) {
- LOG(INFO) << "No unwritten blocks to write, omitting operation";
- graph.pop_back();
- }
-
- final_order = OrderIndices(graph);
- } else {
- LOG(ERROR) << "Unsupported minor version given for delta payload: "
- << config.minor_version;
- return false;
- }
- } else {
- TEST_AND_RETURN_FALSE(FullUpdateGenerator::Run(config,
- data_file_fd,
- &data_file_size,
- &graph,
- &kernel_ops,
- &final_order));
-
- // Set the minor version for this payload.
- LOG(INFO) << "Adding Full Minor Version.";
- manifest.set_minor_version(DeltaPerformer::kFullPayloadMinorVersion);
- }
+ // Generate the operations using the strategy we selected above.
+ TEST_AND_RETURN_FALSE((*strategy)(config,
+ data_file_fd,
+ &data_file_size,
+ &graph,
+ &kernel_ops,
+ &final_order));
}
if (!config.source.ImageInfoIsEmpty())
@@ -1171,7 +1099,7 @@
&manifest,
&op_name_map);
CheckGraph(graph);
- manifest.set_block_size(kBlockSize);
+ manifest.set_block_size(config.block_size);
// Reorder the data blobs with the newly ordered manifest
string ordered_blobs_path;
@@ -1253,14 +1181,14 @@
ScopedFdCloser blobs_fd_closer(&blobs_fd);
TEST_AND_RETURN_FALSE(blobs_fd >= 0);
for (;;) {
- char buf[kBlockSize];
- ssize_t rc = read(blobs_fd, buf, sizeof(buf));
+ vector<char> buf(config.block_size);
+ ssize_t rc = read(blobs_fd, buf.data(), buf.size());
if (0 == rc) {
// EOF
break;
}
TEST_AND_RETURN_FALSE_ERRNO(rc > 0);
- TEST_AND_RETURN_FALSE(writer.Write(buf, rc));
+ TEST_AND_RETURN_FALSE(writer.Write(buf.data(), rc));
}
// Write signature blob.
diff --git a/payload_generator/delta_diff_generator.h b/payload_generator/delta_diff_generator.h
index 5698b74..3629afc 100644
--- a/payload_generator/delta_diff_generator.h
+++ b/payload_generator/delta_diff_generator.h
@@ -37,6 +37,32 @@
// The minor version used by the A to B delta generator algorithm.
extern const uint32_t kSourceMinorPayloadVersion;
+// The payload generation strategy prototype. This is the function that does
+// all the work to generate the operations for the rootfs and the kernel.
+// Given the |config|, generates the payload by populating the |graph| with
+// the operations required to update the rootfs when applied in the order
+// specified by |final_order|, and populating |kernel_ops| with the list of
+// kernel operations required to update the kernel.
+// The operations returned will refer to offsets in the file |data_file_fd|,
+// where this function stores the output, but not necessarily in the same
+// order as they appear in the |final_order| and |kernel_ops|.
+// This function stores the amount of data written to |data_file_fd| in
+// |data_file_size|.
+// TODO(deymo): Replace |graph|, |kernel_ops| and |final_order| by two vectors
+// of annotated InstallOperation (InstallOperation plus a name for logging
+// purposes). At this level, there's no need to have a graph.
+// TODO(deymo): Convert this type alias into a base class, so other parameter
+// can be passed to the particular generators while keeping the same interface
+// for operation generation.
+using OperationsGenerator = bool(
+ const PayloadGenerationConfig& /* config */,
+ int /* data_file_fd */,
+ off_t* /* data_file_size */,
+ Graph* /* graph */,
+ std::vector<DeltaArchiveManifest_InstallOperation>* /* kernel_ops */,
+ std::vector<Vertex::Index>* /* final_order */);
+
+
class DeltaDiffGenerator {
public:
// Represents a disk block on the install partition.
@@ -63,17 +89,45 @@
// |private_key_path| points to a private key used to sign the update.
// Pass empty string to not sign the update.
// |output_path| is the filename where the delta update should be written.
- // This method computes scratch space based on |rootfs_partition_size|.
// Returns true on success. Also writes the size of the metadata into
// |metadata_size|.
static bool GenerateDeltaUpdateFile(const PayloadGenerationConfig& config,
const std::string& output_path,
const std::string& private_key_path,
- size_t rootfs_partition_size,
uint64_t* metadata_size);
// These functions are public so that the unit tests can access them:
+ // Generate the update payload operations for the kernel and rootfs using
+ // SOURCE_* operations, used to generate deltas for the minor version
+ // kSourceMinorPayloadVersion. This function will generate operations in the
+ // rootfs that will read blocks from the source partition in random order and
+ // write the new image on the target partition, also possibly in random order.
+ // The rootfs operations are stored in |graph| and should be executed in the
+ // |final_order| order. The kernel operations are stored in |kernel_ops|. All
+ // the offsets in the operations reference the data written to |data_file_fd|.
+ // The total amount of data written to that file is stored in
+ // |data_file_size|.
+ static bool GenerateDeltaWithSourceOperations(
+ const PayloadGenerationConfig& config,
+ int data_file_fd,
+ off_t* data_file_size,
+ Graph* graph,
+ std::vector<DeltaArchiveManifest_InstallOperation>* kernel_ops,
+ std::vector<Vertex::Index>* final_order);
+
+ // For each regular file within new_root, creates a node in the graph,
+ // determines the best way to compress it (REPLACE, REPLACE_BZ, COPY, BSDIFF),
+ // and writes any necessary data to the end of data_fd.
+ static bool DeltaReadFiles(Graph* graph,
+ std::vector<Block>* blocks,
+ const std::string& old_root,
+ const std::string& new_root,
+ off_t chunk_size,
+ int data_fd,
+ off_t* data_file_size,
+ bool src_ops_allowed);
+
// For a given regular file which must exist at new_root + path, and
// may exist at old_root + path, creates a new InstallOperation and
// adds it to the graph. Also, populates the |blocks| array as
@@ -116,6 +170,31 @@
bool gather_extents,
bool src_ops_allowed);
+ // Delta compresses a kernel partition |new_kernel_part| with knowledge of the
+ // old kernel partition |old_kernel_part|. If |old_kernel_part| is an empty
+ // string, generates a full update of the partition.
+ static bool DeltaCompressKernelPartition(
+ const std::string& old_kernel_part,
+ const std::string& new_kernel_part,
+ std::vector<DeltaArchiveManifest_InstallOperation>* ops,
+ int blobs_fd,
+ off_t* blobs_length,
+ bool src_ops_allowed);
+
+ // Reads blocks from image_path that are not yet marked as being written in
+ // the blocks array. These blocks that remain are either unchanged files or
+ // non-file-data blocks. We compare each of them to the old image, and
+ // compress the ones that changed into a single REPLACE_BZ operation. This
+ // updates a newly created node in the graph to write these blocks and writes
+ // the appropriate blob to blobs_fd. Reads and updates blobs_length.
+ static bool ReadUnwrittenBlocks(
+ const std::vector<Block>& blocks,
+ int blobs_fd,
+ off_t* blobs_length,
+ const std::string& old_image_path,
+ const std::string& new_image_path,
+ Vertex* vertex);
+
// Stores all Extents in 'extents' into 'out'.
static void StoreExtents(const std::vector<Extent>& extents,
google::protobuf::RepeatedPtrField<Extent>* out);
diff --git a/payload_generator/delta_diff_generator_unittest.cc b/payload_generator/delta_diff_generator_unittest.cc
index c8b104d..9cb4317 100644
--- a/payload_generator/delta_diff_generator_unittest.cc
+++ b/payload_generator/delta_diff_generator_unittest.cc
@@ -77,6 +77,14 @@
string new_path_;
};
+TEST_F(DeltaDiffGeneratorTest, BlockDefaultValues) {
+ // Tests that a Block is initialized with the default values as a
+ // Vertex::kInvalidIndex. This is required by the delta generators.
+ DeltaDiffGenerator::Block block;
+ EXPECT_EQ(Vertex::kInvalidIndex, block.reader);
+ EXPECT_EQ(Vertex::kInvalidIndex, block.writer);
+}
+
TEST_F(DeltaDiffGeneratorTest, RunAsRootMoveSmallTest) {
EXPECT_TRUE(utils::WriteFile(old_path().c_str(),
reinterpret_cast<const char*>(kRandomString),
diff --git a/payload_generator/full_update_generator.cc b/payload_generator/full_update_generator.cc
index c634a70..fef6f78 100644
--- a/payload_generator/full_update_generator.cc
+++ b/payload_generator/full_update_generator.cc
@@ -155,7 +155,7 @@
DeltaArchiveManifest_InstallOperation* op = nullptr;
if (partition == 0) {
- graph->resize(graph->size() + 1);
+ graph->emplace_back();
graph->back().file_name =
base::StringPrintf("<rootfs-operation-%" PRIi64 ">", counter++);
op = &graph->back().op;
diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc
index c43ec49..33b219e 100644
--- a/payload_generator/full_update_generator_unittest.cc
+++ b/payload_generator/full_update_generator_unittest.cc
@@ -38,6 +38,7 @@
FillWithData(&new_kern);
// Assume hashes take 2 MiB beyond the rootfs.
+ config_.rootfs_partition_size = new_root.size();
config_.target.rootfs_size = new_root.size() - 2 * 1024 * 1024;
config_.target.kernel_size = new_kern.size();
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 0403d73..fb79a2f 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -251,7 +251,7 @@
"signature will be assigned a client version, starting from "
"kSignatureOriginalVersion.");
DEFINE_int32(chunk_size, -1, "Payload chunk size (-1 -- no limit/default)");
- DEFINE_int64(rootfs_partition_size,
+ DEFINE_uint64(rootfs_partition_size,
chromeos_update_engine::kRootFSPartitionSize,
"RootFS partition size for the image once installed");
DEFINE_int32(minor_version, DeltaPerformer::kFullPayloadMinorVersion,
@@ -384,6 +384,7 @@
FLAGS_old_build_version,
&payload_config.source.image_info);
+ payload_config.rootfs_partition_size = FLAGS_rootfs_partition_size;
payload_config.minor_version = FLAGS_minor_version;
// Look for the minor version in the old image if it was not given as an
// argument.
@@ -421,7 +422,6 @@
payload_config,
FLAGS_out_file,
FLAGS_private_key,
- FLAGS_rootfs_partition_size,
&metadata_size)) {
return 1;
}
diff --git a/payload_generator/graph_types.cc b/payload_generator/graph_types.cc
new file mode 100644
index 0000000..3e5adc6
--- /dev/null
+++ b/payload_generator/graph_types.cc
@@ -0,0 +1,11 @@
+// Copyright 2015 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "update_engine/payload_generator/graph_types.h"
+
+namespace chromeos_update_engine {
+
+const Vertex::Index Vertex::kInvalidIndex = static_cast<Vertex::Index>(-1);
+
+} // chromeos_update_engine
diff --git a/payload_generator/graph_types.h b/payload_generator/graph_types.h
index f94183f..9d3a6fc 100644
--- a/payload_generator/graph_types.h
+++ b/payload_generator/graph_types.h
@@ -68,7 +68,7 @@
off_t chunk_size;
typedef std::vector<Vertex>::size_type Index;
- static const Vertex::Index kInvalidIndex = -1;
+ static const Vertex::Index kInvalidIndex;
};
typedef std::vector<Vertex> Graph;
diff --git a/payload_generator/inplace_generator.cc b/payload_generator/inplace_generator.cc
index 766dcda..ca2d398 100644
--- a/payload_generator/inplace_generator.cc
+++ b/payload_generator/inplace_generator.cc
@@ -17,6 +17,7 @@
#include "update_engine/payload_generator/delta_diff_generator.h"
#include "update_engine/payload_generator/graph_types.h"
#include "update_engine/payload_generator/graph_utils.h"
+#include "update_engine/payload_generator/metadata.h"
#include "update_engine/payload_generator/topological_sort.h"
#include "update_engine/update_metadata.pb.h"
#include "update_engine/utils.h"
@@ -114,7 +115,7 @@
scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
// create vertex to copy original->scratch
cuts.back().new_vertex = graph->size();
- graph->resize(graph->size() + 1);
+ graph->emplace_back();
cuts.back().old_src = edge.first;
cuts.back().old_dst = edge.second;
@@ -675,4 +676,87 @@
return true;
}
+bool InplaceGenerator::GenerateInplaceDelta(
+ const PayloadGenerationConfig& config,
+ int data_file_fd,
+ off_t* data_file_size,
+ Graph* graph,
+ vector<DeltaArchiveManifest_InstallOperation>* kernel_ops,
+ vector<Vertex::Index>* final_order) {
+ vector<Block> blocks(config.target.rootfs_size / config.block_size);
+ TEST_AND_RETURN_FALSE(
+ DeltaDiffGenerator::DeltaReadFiles(graph,
+ &blocks,
+ config.source.rootfs_mountpt,
+ config.target.rootfs_mountpt,
+ config.chunk_size,
+ data_file_fd,
+ data_file_size,
+ false)); // src_ops_allowed
+ LOG(INFO) << "done reading normal files";
+ DeltaDiffGenerator::CheckGraph(*graph);
+
+ LOG(INFO) << "Starting metadata processing";
+ TEST_AND_RETURN_FALSE(Metadata::DeltaReadMetadata(
+ graph,
+ &blocks,
+ config.source.rootfs_part,
+ config.target.rootfs_part,
+ data_file_fd,
+ data_file_size));
+ LOG(INFO) << "Done metadata processing";
+ DeltaDiffGenerator::CheckGraph(*graph);
+
+ graph->emplace_back();
+ TEST_AND_RETURN_FALSE(
+ DeltaDiffGenerator::ReadUnwrittenBlocks(blocks,
+ data_file_fd,
+ data_file_size,
+ config.source.rootfs_part,
+ config.target.rootfs_part,
+ &graph->back()));
+ if (graph->back().op.data_length() == 0) {
+ LOG(INFO) << "No unwritten blocks to write, omitting operation";
+ graph->pop_back();
+ }
+
+ // Final scratch block (if there's space)
+ Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
+ if (blocks.size() < (config.rootfs_partition_size / kBlockSize)) {
+ scratch_vertex = graph->size();
+ graph->emplace_back();
+ CreateScratchNode(
+ blocks.size(),
+ (config.rootfs_partition_size / kBlockSize) - blocks.size(),
+ &graph->back());
+ }
+
+ // Read kernel partition
+ TEST_AND_RETURN_FALSE(
+ DeltaDiffGenerator::DeltaCompressKernelPartition(
+ config.source.kernel_part,
+ config.target.kernel_part,
+ kernel_ops,
+ data_file_fd,
+ data_file_size,
+ false)); // src_ops_allowed
+
+ LOG(INFO) << "done reading kernel";
+ DeltaDiffGenerator::CheckGraph(*graph);
+
+ LOG(INFO) << "Creating edges...";
+ CreateEdges(graph, blocks);
+ LOG(INFO) << "Done creating edges";
+ DeltaDiffGenerator::CheckGraph(*graph);
+
+ TEST_AND_RETURN_FALSE(ConvertGraphToDag(
+ graph,
+ config.target.rootfs_mountpt,
+ data_file_fd,
+ data_file_size,
+ final_order,
+ scratch_vertex));
+ return true;
+}
+
}; // namespace chromeos_update_engine
diff --git a/payload_generator/inplace_generator.h b/payload_generator/inplace_generator.h
index a120196..01d1e3a 100644
--- a/payload_generator/inplace_generator.h
+++ b/payload_generator/inplace_generator.h
@@ -154,6 +154,25 @@
Vertex::Index vertex,
std::vector<DeltaDiffGenerator::Block>* blocks);
+ // Generate the update payload operations for the kernel and rootfs using
+ // only operations that read from the target and/or write to the target,
+ // hence, applying the payload "in-place" in the target partition. This method
+ // assumes that the contents of the source image are pre-copied to the target
+ // partition, up to the size of the source image. Use this method to generate
+ // a delta update with the minor version kInPlaceMinorPayloadVersion.
+ // The rootfs operations are stored in |graph| and should be executed in the
+ // |final_order| order. The kernel operations are stored in |kernel_ops|. All
+ // the offsets in the operations reference the data written to |data_file_fd|.
+ // The total amount of data written to that file is stored in
+ // |data_file_size|.
+ static bool GenerateInplaceDelta(
+ const PayloadGenerationConfig& config,
+ int data_file_fd,
+ off_t* data_file_size,
+ Graph* graph,
+ std::vector<DeltaArchiveManifest_InstallOperation>* kernel_ops,
+ std::vector<Vertex::Index>* final_order);
+
private:
// This should never be constructed.
DISALLOW_IMPLICIT_CONSTRUCTORS(InplaceGenerator);
diff --git a/payload_generator/metadata.cc b/payload_generator/metadata.cc
index a96071a..8e51e27 100644
--- a/payload_generator/metadata.cc
+++ b/payload_generator/metadata.cc
@@ -187,7 +187,7 @@
*data_file_size += data.size();
// Now, insert into graph and blocks vector
- graph->resize(graph->size() + 1);
+ graph->emplace_back();
Vertex::Index vertex = graph->size() - 1;
(*graph)[vertex].op = op;
CHECK((*graph)[vertex].op.has_type());
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 8194970..956f362 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -109,6 +109,10 @@
TEST_AND_RETURN_FALSE(target.kernel_size % block_size == 0);
TEST_AND_RETURN_FALSE(chunk_size == -1 || chunk_size % block_size == 0);
+
+ TEST_AND_RETURN_FALSE(rootfs_partition_size % block_size == 0);
+ TEST_AND_RETURN_FALSE(rootfs_partition_size >= target.rootfs_size);
+
return true;
}
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index 1fd8aae..616148a 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -85,6 +85,11 @@
// The minor_version of the requested payload.
uint32_t minor_version;
+ // The size of the rootfs partition, that not necessarily is the same as the
+ // filesystem in either source or target version, since there is some space
+ // after the partition used to store the verity hashes and or the bootcache.
+ uint64_t rootfs_partition_size = 0;
+
// The chunk size is the maximum size that a single operation should write in
// the destination. Operations bigger than chunk_size should be split. A value
// of -1 means no chunk_size limit.
diff --git a/update_engine.gyp b/update_engine.gyp
index e6add56..8dd5d2d 100644
--- a/update_engine.gyp
+++ b/update_engine.gyp
@@ -280,6 +280,7 @@
'payload_generator/extent_mapper.cc',
'payload_generator/filesystem_iterator.cc',
'payload_generator/full_update_generator.cc',
+ 'payload_generator/graph_types.cc',
'payload_generator/graph_utils.cc',
'payload_generator/inplace_generator.cc',
'payload_generator/metadata.cc',