Merge "Merge Android R (rvc-dev-plus-aosp-without-vendor@6692709)" into stage-aosp-master
diff --git a/Android.bp b/Android.bp
index b6ee476..e5f0dd8 100644
--- a/Android.bp
+++ b/Android.bp
@@ -491,6 +491,7 @@
"payload_generator/extent_utils.cc",
"payload_generator/full_update_generator.cc",
"payload_generator/mapfile_filesystem.cc",
+ "payload_generator/merge_sequence_generator.cc",
"payload_generator/payload_file.cc",
"payload_generator/payload_generation_config_android.cc",
"payload_generator/payload_generation_config.cc",
@@ -695,6 +696,7 @@
"payload_generator/fake_filesystem.cc",
"payload_generator/full_update_generator_unittest.cc",
"payload_generator/mapfile_filesystem_unittest.cc",
+ "payload_generator/merge_sequence_generator_unittest.cc",
"payload_generator/payload_file_unittest.cc",
"payload_generator/payload_generation_config_android_unittest.cc",
"payload_generator/payload_generation_config_unittest.cc",
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index fbd754f..449201c 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -228,13 +228,13 @@
new_part.path = "/dev/zero";
new_part.size = 1234;
- payload.AddPartition(*old_part, new_part, aops);
+ payload.AddPartition(*old_part, new_part, aops, {});
// We include a kernel partition without operations.
old_part->name = kPartitionNameKernel;
new_part.name = kPartitionNameKernel;
new_part.size = 0;
- payload.AddPartition(*old_part, new_part, {});
+ payload.AddPartition(*old_part, new_part, {}, {});
test_utils::ScopedTempFile payload_file("Payload-XXXXXX");
string private_key =
diff --git a/payload_generator/delta_diff_generator.cc b/payload_generator/delta_diff_generator.cc
index aa49252..c2b35ee 100644
--- a/payload_generator/delta_diff_generator.cc
+++ b/payload_generator/delta_diff_generator.cc
@@ -39,6 +39,7 @@
#include "update_engine/payload_generator/blob_file_writer.h"
#include "update_engine/payload_generator/delta_diff_utils.h"
#include "update_engine/payload_generator/full_update_generator.h"
+#include "update_engine/payload_generator/merge_sequence_generator.h"
#include "update_engine/payload_generator/payload_file.h"
using std::string;
@@ -59,12 +60,14 @@
const PartitionConfig& new_part,
BlobFileWriter* file_writer,
std::vector<AnnotatedOperation>* aops,
+ std::vector<CowMergeOperation>* cow_merge_sequence,
std::unique_ptr<chromeos_update_engine::OperationsGenerator> strategy)
: config_(config),
old_part_(old_part),
new_part_(new_part),
file_writer_(file_writer),
aops_(aops),
+ cow_merge_sequence_(cow_merge_sequence),
strategy_(std::move(strategy)) {}
PartitionProcessor(PartitionProcessor&&) noexcept = default;
void Run() override {
@@ -78,6 +81,17 @@
LOG(FATAL) << "GenerateOperations(" << old_part_.name << ", "
<< new_part_.name << ") failed";
}
+
+ bool snapshot_enabled =
+ config_.target.dynamic_partition_metadata &&
+ config_.target.dynamic_partition_metadata->snapshot_enabled();
+ if (old_part_.path.empty() || !snapshot_enabled) {
+ return;
+ }
+ auto generator = MergeSequenceGenerator::Create(*aops_);
+ if (!generator || !generator->Generate(cow_merge_sequence_)) {
+ LOG(FATAL) << "Failed to generate merge sequence";
+ }
}
private:
@@ -86,6 +100,7 @@
const PartitionConfig& new_part_;
BlobFileWriter* file_writer_;
std::vector<AnnotatedOperation>* aops_;
+ std::vector<CowMergeOperation>* cow_merge_sequence_;
std::unique_ptr<chromeos_update_engine::OperationsGenerator> strategy_;
DISALLOW_COPY_AND_ASSIGN(PartitionProcessor);
};
@@ -123,6 +138,8 @@
PartitionConfig empty_part("");
std::vector<std::vector<AnnotatedOperation>> all_aops;
all_aops.resize(config.target.partitions.size());
+ std::vector<std::vector<CowMergeOperation>> all_merge_sequences;
+ all_merge_sequences.resize(config.target.partitions.size());
std::vector<PartitionProcessor> partition_tasks{};
auto thread_count = std::min<int>(diff_utils::GetMaxThreads(),
config.target.partitions.size());
@@ -153,6 +170,7 @@
new_part,
&blob_file,
&all_aops[i],
+ &all_merge_sequences[i],
std::move(strategy)));
}
thread_pool.Start();
@@ -166,7 +184,10 @@
config.is_delta ? config.source.partitions[i] : empty_part;
const PartitionConfig& new_part = config.target.partitions[i];
TEST_AND_RETURN_FALSE(
- payload.AddPartition(old_part, new_part, std::move(all_aops[i])));
+ payload.AddPartition(old_part,
+ new_part,
+ std::move(all_aops[i]),
+ std::move(all_merge_sequences[i])));
}
}
diff --git a/payload_generator/extent_ranges.cc b/payload_generator/extent_ranges.cc
index 4600efe..2098639 100644
--- a/payload_generator/extent_ranges.cc
+++ b/payload_generator/extent_ranges.cc
@@ -202,6 +202,15 @@
}
}
+bool ExtentRanges::OverlapsWithExtent(const Extent& extent) const {
+ for (const auto& entry : extent_set_) {
+ if (ExtentsOverlap(entry, extent)) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool ExtentRanges::ContainsBlock(uint64_t block) const {
auto lower = extent_set_.lower_bound(ExtentForRange(block, 1));
// The block could be on the extent before the one in |lower|.
diff --git a/payload_generator/extent_ranges.h b/payload_generator/extent_ranges.h
index 62ffff4..68aa27f 100644
--- a/payload_generator/extent_ranges.h
+++ b/payload_generator/extent_ranges.h
@@ -63,6 +63,9 @@
void AddRanges(const ExtentRanges& ranges);
void SubtractRanges(const ExtentRanges& ranges);
+ // Returns true if the input extent overlaps with the current ExtentRanges.
+ bool OverlapsWithExtent(const Extent& extent) const;
+
// Returns whether the block |block| is in this ExtentRange.
bool ContainsBlock(uint64_t block) const;
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 18cff4b..dd41a29 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -14,6 +14,7 @@
// limitations under the License.
//
+#include <map>
#include <string>
#include <vector>
@@ -22,6 +23,7 @@
#include <base/logging.h>
#include <base/strings/string_number_conversions.h>
#include <base/strings/string_split.h>
+#include <base/strings/string_util.h>
#include <brillo/flag_helper.h>
#include <brillo/key_value_store.h>
#include <brillo/message_loops/base_message_loop.h>
@@ -47,6 +49,7 @@
// and an output file as arguments and the path to an output file and
// generates a delta that can be sent to Chrome OS clients.
+using std::map;
using std::string;
using std::vector;
@@ -294,6 +297,39 @@
return true;
}
+template <typename Key, typename Val>
+string ToString(const map<Key, Val>& map) {
+ vector<string> result;
+ result.reserve(map.size());
+ for (const auto& it : map) {
+ result.emplace_back(it.first + ": " + it.second);
+ }
+ return "{" + base::JoinString(result, ",") + "}";
+}
+
+bool ParsePerPartitionTimestamps(const string& partition_timestamps,
+ PayloadGenerationConfig* config) {
+ base::StringPairs pairs;
+ CHECK(base::SplitStringIntoKeyValuePairs(
+ partition_timestamps, ':', ',', &pairs))
+ << "--partition_timestamps accepts commad "
+ "separated pairs. e.x. system:1234,vendor:5678";
+ map<string, string> partition_timestamps_map{
+ std::move_iterator(pairs.begin()), std::move_iterator(pairs.end())};
+ for (auto&& partition : config->target.partitions) {
+ auto&& it = partition_timestamps_map.find(partition.name);
+ if (it != partition_timestamps_map.end()) {
+ partition.version = std::move(it->second);
+ partition_timestamps_map.erase(it);
+ }
+ }
+ if (!partition_timestamps_map.empty()) {
+ LOG(ERROR) << "Unused timestamps: " << ToString(partition_timestamps_map);
+ return false;
+ }
+ return true;
+}
+
int Main(int argc, char** argv) {
DEFINE_string(old_image, "", "Path to the old rootfs");
DEFINE_string(new_image, "", "Path to the new rootfs");
@@ -384,6 +420,11 @@
0,
"The maximum timestamp of the OS allowed to apply this "
"payload.");
+ DEFINE_string(
+ partition_timestamps,
+ "",
+ "The per-partition maximum timestamps which the OS allowed to apply this "
+ "payload. Passed in comma separated pairs, e.x. system:1234,vendor:5678");
DEFINE_string(old_channel,
"",
@@ -709,6 +750,10 @@
}
payload_config.max_timestamp = FLAGS_max_timestamp;
+ if (!FLAGS_partition_timestamps.empty()) {
+ CHECK(ParsePerPartitionTimestamps(FLAGS_partition_timestamps,
+ &payload_config));
+ }
if (payload_config.is_delta &&
payload_config.version.minor >= kVerityMinorPayloadVersion)
diff --git a/payload_generator/merge_sequence_generator.cc b/payload_generator/merge_sequence_generator.cc
new file mode 100644
index 0000000..eaffeac
--- /dev/null
+++ b/payload_generator/merge_sequence_generator.cc
@@ -0,0 +1,269 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "update_engine/payload_generator/merge_sequence_generator.h"
+
+#include <algorithm>
+
+#include "update_engine/payload_generator/extent_utils.h"
+
+namespace chromeos_update_engine {
+
+CowMergeOperation CreateCowMergeOperation(const Extent& src_extent,
+ const Extent& dst_extent) {
+ CowMergeOperation ret;
+ ret.set_type(CowMergeOperation::COW_COPY);
+ *ret.mutable_src_extent() = src_extent;
+ *ret.mutable_dst_extent() = dst_extent;
+ return ret;
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const CowMergeOperation& merge_operation) {
+ os << "CowMergeOperation src extent: "
+ << ExtentsToString({merge_operation.src_extent()})
+ << ", dst extent: " << ExtentsToString({merge_operation.dst_extent()});
+ return os;
+}
+
+// The OTA generation guarantees that all blocks in the dst extent will be
+// written only once. So we can use it to order the CowMergeOperation.
+bool operator<(const CowMergeOperation& op1, const CowMergeOperation& op2) {
+ return op1.dst_extent().start_block() < op2.dst_extent().start_block();
+}
+
+bool operator==(const CowMergeOperation& op1, const CowMergeOperation& op2) {
+ return op1.type() == op2.type() && op1.src_extent() == op2.src_extent() &&
+ op1.dst_extent() == op2.dst_extent();
+}
+
+std::unique_ptr<MergeSequenceGenerator> MergeSequenceGenerator::Create(
+ const std::vector<AnnotatedOperation>& aops) {
+ std::vector<CowMergeOperation> sequence;
+ for (const auto& aop : aops) {
+ // Only handle SOURCE_COPY now for the cow size optimization.
+ if (aop.op.type() != InstallOperation::SOURCE_COPY) {
+ continue;
+ }
+ if (aop.op.dst_extents().size() != 1) {
+ std::vector<Extent> out_extents;
+ ExtentsToVector(aop.op.dst_extents(), &out_extents);
+ LOG(ERROR) << "The dst extents for source_copy expects to be contiguous,"
+ << " dst extents: " << ExtentsToString(out_extents);
+ return nullptr;
+ }
+
+ // Split the source extents.
+ size_t used_blocks = 0;
+ for (const auto& src_extent : aop.op.src_extents()) {
+ // The dst_extent in the merge sequence will be a subset of
+ // InstallOperation's dst_extent. This will simplify the OTA -> COW
+ // conversion when we install the payload.
+ Extent dst_extent =
+ ExtentForRange(aop.op.dst_extents(0).start_block() + used_blocks,
+ src_extent.num_blocks());
+ sequence.emplace_back(CreateCowMergeOperation(src_extent, dst_extent));
+ used_blocks += src_extent.num_blocks();
+ }
+
+ if (used_blocks != aop.op.dst_extents(0).num_blocks()) {
+ LOG(ERROR) << "Number of blocks in src extents doesn't equal to the"
+ << " ones in the dst extents, src blocks " << used_blocks
+ << ", dst blocks " << aop.op.dst_extents(0).num_blocks();
+ return nullptr;
+ }
+ }
+
+ std::sort(sequence.begin(), sequence.end());
+ return std::unique_ptr<MergeSequenceGenerator>(
+ new MergeSequenceGenerator(sequence));
+}
+
+bool MergeSequenceGenerator::FindDependency(
+ std::map<CowMergeOperation, std::set<CowMergeOperation>>* result) const {
+ CHECK(result);
+ LOG(INFO) << "Finding dependencies";
+
+ // Since the OTA operation may reuse some source blocks, use the binary
+ // search on sorted dst extents to find overlaps.
+ std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+ for (const auto& op : operations_) {
+ // lower bound (inclusive): dst extent's end block >= src extent's start
+ // block.
+ const auto lower_it = std::lower_bound(
+ operations_.begin(),
+ operations_.end(),
+ op,
+ [](const CowMergeOperation& it, const CowMergeOperation& op) {
+ auto dst_end_block =
+ it.dst_extent().start_block() + it.dst_extent().num_blocks() - 1;
+ return dst_end_block < op.src_extent().start_block();
+ });
+ // upper bound: dst extent's start block > src extent's end block
+ const auto upper_it = std::upper_bound(
+ lower_it,
+ operations_.end(),
+ op,
+ [](const CowMergeOperation& op, const CowMergeOperation& it) {
+ auto src_end_block =
+ op.src_extent().start_block() + op.src_extent().num_blocks() - 1;
+ return src_end_block < it.dst_extent().start_block();
+ });
+
+ // TODO(xunchang) skip inserting the empty set to merge_after.
+ if (lower_it == upper_it) {
+ merge_after.insert({op, {}});
+ } else {
+ std::set<CowMergeOperation> operations(lower_it, upper_it);
+ auto it = operations.find(op);
+ if (it != operations.end()) {
+ LOG(INFO) << "Self overlapping " << op;
+ operations.erase(it);
+ }
+ auto ret = merge_after.emplace(op, std::move(operations));
+ // Check the insertion indeed happens.
+ CHECK(ret.second);
+ }
+ }
+
+ *result = std::move(merge_after);
+ return true;
+}
+
+bool MergeSequenceGenerator::Generate(
+ std::vector<CowMergeOperation>* sequence) const {
+ sequence->clear();
+ std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+ if (!FindDependency(&merge_after)) {
+ LOG(ERROR) << "Failed to find dependencies";
+ return false;
+ }
+
+ LOG(INFO) << "Generating sequence";
+
+ // Use the non-DFS version of the topology sort. So we can control the
+ // operations to discard to break cycles; thus yielding a deterministic
+ // sequence.
+ std::map<CowMergeOperation, int> incoming_edges;
+ for (const auto& it : merge_after) {
+ for (const auto& blocked : it.second) {
+ // Value is default initialized to 0.
+ incoming_edges[blocked] += 1;
+ }
+ }
+
+ std::set<CowMergeOperation> free_operations;
+ for (const auto& op : operations_) {
+ if (incoming_edges.find(op) == incoming_edges.end()) {
+ free_operations.insert(op);
+ }
+ }
+
+ std::vector<CowMergeOperation> merge_sequence;
+ std::set<CowMergeOperation> convert_to_raw;
+ while (!incoming_edges.empty()) {
+ if (!free_operations.empty()) {
+ merge_sequence.insert(
+ merge_sequence.end(), free_operations.begin(), free_operations.end());
+ } else {
+ auto to_convert = incoming_edges.begin()->first;
+ free_operations.insert(to_convert);
+ convert_to_raw.insert(to_convert);
+ LOG(INFO) << "Converting operation to raw " << to_convert;
+ }
+
+ std::set<CowMergeOperation> next_free_operations;
+ for (const auto& op : free_operations) {
+ incoming_edges.erase(op);
+
+ // Now that this particular operation is merged, other operations blocked
+ // by this one may be free. Decrement the count of blocking operations,
+ // and set up the free operations for the next iteration.
+ for (const auto& blocked : merge_after[op]) {
+ auto it = incoming_edges.find(blocked);
+ if (it == incoming_edges.end()) {
+ continue;
+ }
+
+ auto blocking_transfer_count = &it->second;
+ if (*blocking_transfer_count <= 0) {
+ LOG(ERROR) << "Unexpected count in merge after map "
+ << blocking_transfer_count;
+ return false;
+ }
+ // This operation is no longer blocked by anyone. Add it to the merge
+ // sequence in the next iteration.
+ *blocking_transfer_count -= 1;
+ if (*blocking_transfer_count == 0) {
+ next_free_operations.insert(blocked);
+ }
+ }
+ }
+
+ LOG(INFO) << "Remaining transfers " << incoming_edges.size()
+ << ", free transfers " << free_operations.size()
+ << ", merge_sequence size " << merge_sequence.size();
+ free_operations = std::move(next_free_operations);
+ }
+
+ if (!free_operations.empty()) {
+ merge_sequence.insert(
+ merge_sequence.end(), free_operations.begin(), free_operations.end());
+ }
+
+ CHECK_EQ(operations_.size(), merge_sequence.size() + convert_to_raw.size());
+
+ size_t blocks_in_sequence = 0;
+ for (const CowMergeOperation& transfer : merge_sequence) {
+ blocks_in_sequence += transfer.dst_extent().num_blocks();
+ }
+
+ size_t blocks_in_raw = 0;
+ for (const CowMergeOperation& transfer : convert_to_raw) {
+ blocks_in_raw += transfer.dst_extent().num_blocks();
+ }
+
+ LOG(INFO) << "Blocks in merge sequence " << blocks_in_sequence
+ << ", blocks in raw " << blocks_in_raw;
+ if (!ValidateSequence(merge_sequence)) {
+ return false;
+ }
+
+ *sequence = std::move(merge_sequence);
+ return true;
+}
+
+bool MergeSequenceGenerator::ValidateSequence(
+ const std::vector<CowMergeOperation>& sequence) {
+ LOG(INFO) << "Validating merge sequence";
+ ExtentRanges visited;
+ for (const auto& op : sequence) {
+ if (visited.OverlapsWithExtent(op.src_extent())) {
+ LOG(ERROR) << "Transfer violates the merge sequence " << op
+ << "Visited extent ranges: ";
+ visited.Dump();
+ return false;
+ }
+
+ CHECK(!visited.OverlapsWithExtent(op.dst_extent()))
+ << "dst extent should write only once.";
+ visited.AddExtent(op.dst_extent());
+ }
+
+ return true;
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/merge_sequence_generator.h b/payload_generator/merge_sequence_generator.h
new file mode 100644
index 0000000..bc0158e
--- /dev/null
+++ b/payload_generator/merge_sequence_generator.h
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_MERGE_SEQUENCE_GENERATOR_H_
+#define UPDATE_ENGINE_PAYLOAD_GENERATOR_MERGE_SEQUENCE_GENERATOR_H_
+
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "update_engine/payload_generator/annotated_operation.h"
+#include "update_engine/payload_generator/extent_ranges.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/update_metadata.pb.h"
+
+namespace chromeos_update_engine {
+// Constructs CowMergeOperation from src & dst extents
+CowMergeOperation CreateCowMergeOperation(const Extent& src_extent,
+ const Extent& dst_extent);
+
+// Comparator for CowMergeOperation.
+bool operator<(const CowMergeOperation& op1, const CowMergeOperation& op2);
+bool operator==(const CowMergeOperation& op1, const CowMergeOperation& op2);
+
+std::ostream& operator<<(std::ostream& os,
+ const CowMergeOperation& merge_operation);
+
+// This class takes a list of CowMergeOperations; and sorts them so that no
+// read after write will happen by following the sequence. When there is a
+// cycle, we will omit some operations in the list. Therefore, the result
+// sequence may not contain all blocks in the input list.
+class MergeSequenceGenerator {
+ public:
+ // Creates an object from a list of OTA InstallOperations. Returns nullptr on
+ // failure.
+ static std::unique_ptr<MergeSequenceGenerator> Create(
+ const std::vector<AnnotatedOperation>& aops);
+ // Checks that no read after write happens in the given sequence.
+ static bool ValidateSequence(const std::vector<CowMergeOperation>& sequence);
+
+ // Generates a merge sequence from |operations_|, puts the result in
+ // |sequence|. Returns false on failure.
+ bool Generate(std::vector<CowMergeOperation>* sequence) const;
+
+ private:
+ friend class MergeSequenceGeneratorTest;
+ explicit MergeSequenceGenerator(std::vector<CowMergeOperation> transfers)
+ : operations_(std::move(transfers)) {}
+
+ // For a given merge operation, finds all the operations that should merge
+ // after myself. Put the result in |merge_after|.
+ bool FindDependency(std::map<CowMergeOperation, std::set<CowMergeOperation>>*
+ merge_after) const;
+ // The list of CowMergeOperations to sort.
+ std::vector<CowMergeOperation> operations_;
+};
+
+} // namespace chromeos_update_engine
+#endif
diff --git a/payload_generator/merge_sequence_generator_unittest.cc b/payload_generator/merge_sequence_generator_unittest.cc
new file mode 100644
index 0000000..567ede1
--- /dev/null
+++ b/payload_generator/merge_sequence_generator_unittest.cc
@@ -0,0 +1,196 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <algorithm>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include "update_engine/payload_consumer/payload_constants.h"
+#include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/merge_sequence_generator.h"
+
+namespace chromeos_update_engine {
+class MergeSequenceGeneratorTest : public ::testing::Test {
+ protected:
+ void VerifyTransfers(MergeSequenceGenerator* generator,
+ const std::vector<CowMergeOperation>& expected) {
+ ASSERT_EQ(expected, generator->operations_);
+ }
+
+ void FindDependency(
+ std::vector<CowMergeOperation> transfers,
+ std::map<CowMergeOperation, std::set<CowMergeOperation>>* result) {
+ std::sort(transfers.begin(), transfers.end());
+ MergeSequenceGenerator generator(std::move(transfers));
+ ASSERT_TRUE(generator.FindDependency(result));
+ }
+
+ void GenerateSequence(std::vector<CowMergeOperation> transfers,
+ const std::vector<CowMergeOperation>& expected) {
+ std::sort(transfers.begin(), transfers.end());
+ MergeSequenceGenerator generator(std::move(transfers));
+ std::vector<CowMergeOperation> sequence;
+ ASSERT_TRUE(generator.Generate(&sequence));
+ ASSERT_EQ(expected, sequence);
+ }
+};
+
+TEST_F(MergeSequenceGeneratorTest, Create) {
+ std::vector<AnnotatedOperation> aops{{"file1", {}}, {"file2", {}}};
+ aops[0].op.set_type(InstallOperation::SOURCE_COPY);
+ *aops[0].op.add_src_extents() = ExtentForRange(10, 10);
+ *aops[0].op.add_dst_extents() = ExtentForRange(30, 10);
+
+ aops[1].op.set_type(InstallOperation::SOURCE_COPY);
+ *aops[1].op.add_src_extents() = ExtentForRange(20, 10);
+ *aops[1].op.add_dst_extents() = ExtentForRange(40, 10);
+
+ auto generator = MergeSequenceGenerator::Create(aops);
+ ASSERT_TRUE(generator);
+ std::vector<CowMergeOperation> expected = {
+ CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(30, 10)),
+ CreateCowMergeOperation(ExtentForRange(20, 10), ExtentForRange(40, 10))};
+ VerifyTransfers(generator.get(), expected);
+
+ *aops[1].op.add_src_extents() = ExtentForRange(30, 5);
+ *aops[1].op.add_dst_extents() = ExtentForRange(50, 5);
+ generator = MergeSequenceGenerator::Create(aops);
+ ASSERT_FALSE(generator);
+}
+
+TEST_F(MergeSequenceGeneratorTest, Create_SplitSource) {
+ InstallOperation op;
+ op.set_type(InstallOperation::SOURCE_COPY);
+ *(op.add_src_extents()) = ExtentForRange(2, 3);
+ *(op.add_src_extents()) = ExtentForRange(6, 1);
+ *(op.add_src_extents()) = ExtentForRange(8, 4);
+ *(op.add_dst_extents()) = ExtentForRange(10, 8);
+
+ AnnotatedOperation aop{"file1", op};
+ auto generator = MergeSequenceGenerator::Create({aop});
+ ASSERT_TRUE(generator);
+ std::vector<CowMergeOperation> expected = {
+ CreateCowMergeOperation(ExtentForRange(2, 3), ExtentForRange(10, 3)),
+ CreateCowMergeOperation(ExtentForRange(6, 1), ExtentForRange(13, 1)),
+ CreateCowMergeOperation(ExtentForRange(8, 4), ExtentForRange(14, 4))};
+ VerifyTransfers(generator.get(), expected);
+}
+
+TEST_F(MergeSequenceGeneratorTest, FindDependency) {
+ std::vector<CowMergeOperation> transfers = {
+ CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+ CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(50, 10)),
+ };
+
+ std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+ FindDependency(transfers, &merge_after);
+ ASSERT_EQ(std::set<CowMergeOperation>(), merge_after.at(transfers[0]));
+ ASSERT_EQ(std::set<CowMergeOperation>(), merge_after.at(transfers[1]));
+
+ transfers = {
+ CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(25, 10)),
+ CreateCowMergeOperation(ExtentForRange(24, 5), ExtentForRange(35, 5)),
+ CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(15, 10)),
+ };
+
+ FindDependency(transfers, &merge_after);
+ ASSERT_EQ(std::set<CowMergeOperation>({transfers[2]}),
+ merge_after.at(transfers[0]));
+ ASSERT_EQ(std::set<CowMergeOperation>({transfers[0], transfers[2]}),
+ merge_after.at(transfers[1]));
+ ASSERT_EQ(std::set<CowMergeOperation>({transfers[0], transfers[1]}),
+ merge_after.at(transfers[2]));
+}
+
+TEST_F(MergeSequenceGeneratorTest, FindDependency_ReusedSourceBlocks) {
+ std::vector<CowMergeOperation> transfers = {
+ CreateCowMergeOperation(ExtentForRange(5, 10), ExtentForRange(15, 10)),
+ CreateCowMergeOperation(ExtentForRange(6, 5), ExtentForRange(30, 5)),
+ CreateCowMergeOperation(ExtentForRange(50, 5), ExtentForRange(5, 5)),
+ };
+
+ std::map<CowMergeOperation, std::set<CowMergeOperation>> merge_after;
+ FindDependency(transfers, &merge_after);
+ ASSERT_EQ(std::set<CowMergeOperation>({transfers[2]}),
+ merge_after.at(transfers[0]));
+ ASSERT_EQ(std::set<CowMergeOperation>({transfers[2]}),
+ merge_after.at(transfers[1]));
+}
+
+TEST_F(MergeSequenceGeneratorTest, ValidateSequence) {
+ std::vector<CowMergeOperation> transfers = {
+ CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+ CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(40, 10)),
+ };
+
+ // Self overlapping
+ ASSERT_TRUE(MergeSequenceGenerator::ValidateSequence(transfers));
+
+ transfers = {
+ CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(20, 10)),
+ CreateCowMergeOperation(ExtentForRange(15, 10), ExtentForRange(10, 10)),
+ };
+ ASSERT_FALSE(MergeSequenceGenerator::ValidateSequence(transfers));
+}
+
+TEST_F(MergeSequenceGeneratorTest, GenerateSequenceNoCycles) {
+ std::vector<CowMergeOperation> transfers = {
+ CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+ // file3 should merge before file2
+ CreateCowMergeOperation(ExtentForRange(40, 5), ExtentForRange(25, 5)),
+ CreateCowMergeOperation(ExtentForRange(25, 10), ExtentForRange(30, 10)),
+ };
+
+ std::vector<CowMergeOperation> expected{
+ transfers[0], transfers[2], transfers[1]};
+ GenerateSequence(transfers, expected);
+}
+
+TEST_F(MergeSequenceGeneratorTest, GenerateSequenceWithCycles) {
+ std::vector<CowMergeOperation> transfers = {
+ CreateCowMergeOperation(ExtentForRange(25, 10), ExtentForRange(30, 10)),
+ CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(40, 10)),
+ CreateCowMergeOperation(ExtentForRange(40, 10), ExtentForRange(25, 10)),
+ CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(15, 10)),
+ };
+
+ // file 1,2,3 form a cycle. And file3, whose dst ext has smallest offset, will
+ // be converted to raw blocks
+ std::vector<CowMergeOperation> expected{
+ transfers[3], transfers[1], transfers[0]};
+ GenerateSequence(transfers, expected);
+}
+
+TEST_F(MergeSequenceGeneratorTest, GenerateSequenceMultipleCycles) {
+ std::vector<CowMergeOperation> transfers = {
+ // cycle 1
+ CreateCowMergeOperation(ExtentForRange(10, 10), ExtentForRange(25, 10)),
+ CreateCowMergeOperation(ExtentForRange(24, 5), ExtentForRange(35, 5)),
+ CreateCowMergeOperation(ExtentForRange(30, 10), ExtentForRange(15, 10)),
+ // cycle 2
+ CreateCowMergeOperation(ExtentForRange(55, 10), ExtentForRange(60, 10)),
+ CreateCowMergeOperation(ExtentForRange(60, 10), ExtentForRange(70, 10)),
+ CreateCowMergeOperation(ExtentForRange(70, 10), ExtentForRange(55, 10)),
+ };
+
+ // file 3, 6 will be converted to raw.
+ std::vector<CowMergeOperation> expected{
+ transfers[1], transfers[0], transfers[4], transfers[3]};
+ GenerateSequence(transfers, expected);
+}
+
+} // namespace chromeos_update_engine
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index c1594c7..49dff4e 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -86,12 +86,15 @@
bool PayloadFile::AddPartition(const PartitionConfig& old_conf,
const PartitionConfig& new_conf,
- vector<AnnotatedOperation> aops) {
+ vector<AnnotatedOperation> aops,
+ vector<CowMergeOperation> merge_sequence) {
Partition part;
part.name = new_conf.name;
part.aops = std::move(aops);
+ part.cow_merge_sequence = std::move(merge_sequence);
part.postinstall = new_conf.postinstall;
part.verity = new_conf.verity;
+ part.version = new_conf.version;
// Initialize the PartitionInfo objects if present.
if (!old_conf.path.empty())
TEST_AND_RETURN_FALSE(
@@ -132,6 +135,9 @@
for (const auto& part : part_vec_) {
PartitionUpdate* partition = manifest_.add_partitions();
partition->set_partition_name(part.name);
+ if (!part.version.empty()) {
+ partition->set_version(part.version);
+ }
if (part.postinstall.run) {
partition->set_run_postinstall(true);
if (!part.postinstall.path.empty())
@@ -159,6 +165,10 @@
for (const AnnotatedOperation& aop : part.aops) {
*partition->add_operations() = aop.op;
}
+ for (const auto& merge_op : part.cow_merge_sequence) {
+ *partition->add_merge_operations() = merge_op;
+ }
+
if (part.old_info.has_size() || part.old_info.has_hash())
*(partition->mutable_old_partition_info()) = part.old_info;
if (part.new_info.has_size() || part.new_info.has_hash())
diff --git a/payload_generator/payload_file.h b/payload_generator/payload_file.h
index d1f8196..8b17956 100644
--- a/payload_generator/payload_file.h
+++ b/payload_generator/payload_file.h
@@ -43,7 +43,8 @@
// reference a blob stored in the file provided to WritePayload().
bool AddPartition(const PartitionConfig& old_conf,
const PartitionConfig& new_conf,
- std::vector<AnnotatedOperation> aops);
+ std::vector<AnnotatedOperation> aops,
+ std::vector<CowMergeOperation> merge_sequence);
// Write the payload to the |payload_file| file. The operations reference
// blobs in the |data_blobs_path| file and the blobs will be reordered in the
@@ -90,12 +91,15 @@
// The operations to be performed to this partition.
std::vector<AnnotatedOperation> aops;
+ std::vector<CowMergeOperation> cow_merge_sequence;
PartitionInfo old_info;
PartitionInfo new_info;
PostInstallConfig postinstall;
VerityConfig verity;
+ // Per partition timestamp.
+ std::string version;
};
std::vector<Partition> part_vec_;
diff --git a/payload_generator/payload_generation_config.h b/payload_generator/payload_generation_config.h
index 9abb97f..ec63043 100644
--- a/payload_generator/payload_generation_config.h
+++ b/payload_generator/payload_generation_config.h
@@ -119,6 +119,9 @@
// Enables the on device fec data computation by default.
bool disable_fec_computation = false;
+
+ // Per-partition version, usually a number representing timestamp.
+ std::string version;
};
// The ImageConfig struct describes a pair of binaries kernel and rootfs and the
diff --git a/payload_generator/payload_properties_unittest.cc b/payload_generator/payload_properties_unittest.cc
index db3902c..e0072fc 100644
--- a/payload_generator/payload_properties_unittest.cc
+++ b/payload_generator/payload_properties_unittest.cc
@@ -98,7 +98,7 @@
EXPECT_TRUE(strategy->GenerateOperations(
config, old_part, new_part, &blob_file_writer, &aops));
- payload.AddPartition(old_part, new_part, aops);
+ payload.AddPartition(old_part, new_part, aops, {});
uint64_t metadata_size;
EXPECT_TRUE(payload.WritePayload(
diff --git a/scripts/brillo_update_payload b/scripts/brillo_update_payload
index 9bae74e..3bc87bd 100755
--- a/scripts/brillo_update_payload
+++ b/scripts/brillo_update_payload
@@ -186,6 +186,10 @@
"Optional: The maximum unix timestamp of the OS allowed to apply this \
payload, should be set to a number higher than the build timestamp of the \
system running on the device, 0 if not specified."
+ DEFINE_string partition_timestamps "" \
+ "Optional: Per-partition maximum unix timestamp of the OS allowed to \
+apply this payload. Should be a comma separated key value pairs. e.x.\
+system:1234,vendor:456"
DEFINE_string disable_fec_computation "" \
"Optional: Disables the on device fec data computation for incremental \
update. This feature is enabled by default."
@@ -696,6 +700,10 @@
GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" )
fi
+ if [[ -n "${FLAGS_partition_timestamps}" ]]; then
+ GENERATOR_ARGS+=( --partition_timestamps="${FLAGS_partition_timestamps}" )
+ fi
+
if [[ -n "${POSTINSTALL_CONFIG_FILE}" ]]; then
GENERATOR_ARGS+=(
--new_postinstall_config_file="${POSTINSTALL_CONFIG_FILE}"
diff --git a/scripts/payload_info.py b/scripts/payload_info.py
index 965bb76..7625ee8 100755
--- a/scripts/payload_info.py
+++ b/scripts/payload_info.py
@@ -74,7 +74,9 @@
for partition in manifest.partitions:
DisplayValue(' Number of "%s" ops' % partition.partition_name,
len(partition.operations))
-
+ for partition in manifest.partitions:
+ DisplayValue("Timestamp for " +
+ partition.partition_name, partition.version)
DisplayValue('Block size', manifest.block_size)
DisplayValue('Minor version', manifest.minor_version)
diff --git a/update_metadata.proto b/update_metadata.proto
index f79e38b..373ee5e 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -225,6 +225,22 @@
optional bytes src_sha256_hash = 9;
}
+// Hints to VAB snapshot to skip writing some blocks if these blocks are
+// identical to the ones on the source image. The src & dst extents for each
+// CowMergeOperation should be contiguous, and they're a subset of an OTA
+// InstallOperation.
+// During merge time, we need to follow the pre-computed sequence to avoid
+// read after write, similar to the inplace update schema.
+message CowMergeOperation {
+ enum Type {
+ COW_COPY = 0; // identical blocks
+ }
+ optional Type type = 1;
+
+ optional Extent src_extent = 2;
+ optional Extent dst_extent = 3;
+}
+
// Describes the update to apply to a single partition.
message PartitionUpdate {
// A platform-specific name to identify the partition set being updated. For
@@ -293,6 +309,11 @@
// as an effort to support partial updates. For most partitions,
// this is the build timestamp.
optional string version = 17;
+
+ // A sorted list of CowMergeOperation. When writing cow, we can choose to
+ // skip writing the raw bytes for these extents. During snapshot merge, the
+ // bytes will read from the source partitions instead.
+ repeated CowMergeOperation merge_operations = 18;
}
message DynamicPartitionGroup {