blob: 2016af7b1822938cad1b78bb934f35569bb7e7a2 [file] [log] [blame]
Kelvin Zhang94f51cc2020-09-25 11:34:49 -04001//
2// Copyright (C) 2020 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17#include "update_engine/payload_consumer/vabc_partition_writer.h"
18
Kelvin Zhang76f10b82021-06-25 18:45:46 -040019#include <algorithm>
20#include <map>
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040021#include <memory>
Kelvin Zhang3f60d532020-11-09 13:33:17 -050022#include <string>
Kelvin Zhange52b6cd2021-02-09 15:28:40 -050023#include <utility>
Kelvin Zhang9b10dba2020-09-25 17:09:11 -040024#include <vector>
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040025
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +000026#include <android-base/properties.h>
Kelvin Zhang76f10b82021-06-25 18:45:46 -040027#include <brillo/secure_blob.h>
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040028#include <libsnapshot/cow_writer.h>
29
Kelvin Zhang9b10dba2020-09-25 17:09:11 -040030#include "update_engine/common/cow_operation_convert.h"
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040031#include "update_engine/common/utils.h"
Kelvin Zhang76f10b82021-06-25 18:45:46 -040032#include "update_engine/payload_consumer/extent_map.h"
Kelvin Zhang7a265752020-10-29 15:51:35 -040033#include "update_engine/payload_consumer/file_descriptor.h"
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040034#include "update_engine/payload_consumer/install_plan.h"
Kelvin Zhang9b10dba2020-09-25 17:09:11 -040035#include "update_engine/payload_consumer/snapshot_extent_writer.h"
Kelvin Zhang76f10b82021-06-25 18:45:46 -040036#include "update_engine/payload_consumer/xor_extent_writer.h"
Kelvin Zhanga37aafc2021-06-14 13:21:37 -040037#include "update_engine/payload_generator/extent_ranges.h"
38#include "update_engine/payload_generator/extent_utils.h"
39#include "update_engine/update_metadata.pb.h"
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040040
41namespace chromeos_update_engine {
Kelvin Zhang52cb1d72020-10-27 13:44:25 -040042// Expected layout of COW file:
43// === Beginning of Cow Image ===
44// All Source Copy Operations
45// ========== Label 0 ==========
46// Operation 0 in PartitionUpdate
47// ========== Label 1 ==========
48// Operation 1 in PartitionUpdate
49// ========== label 2 ==========
50// Operation 2 in PartitionUpdate
51// ========== label 3 ==========
52// .
53// .
54// .
55
56// When resuming, pass |next_op_index_| as label to
57// |InitializeWithAppend|.
58// For example, suppose we finished writing SOURCE_COPY, and we finished writing
59// operation 2 completely. Update is suspended when we are half way through
60// operation 3.
61// |cnext_op_index_| would be 3, so we pass 3 as
62// label to |InitializeWithAppend|. The CowWriter will retain all data before
63// label 3, Which contains all operation 2's data, but none of operation 3's
64// data.
65
Kelvin Zhanga37aafc2021-06-14 13:21:37 -040066using android::snapshot::ICowWriter;
67using ::google::protobuf::RepeatedPtrField;
68
Kelvin Zhang76f10b82021-06-25 18:45:46 -040069// Compute XOR map, a map from dst extent to corresponding merge operation
70static ExtentMap<const CowMergeOperation*, ExtentLess> ComputeXorMap(
71 const RepeatedPtrField<CowMergeOperation>& merge_ops) {
72 ExtentMap<const CowMergeOperation*, ExtentLess> xor_map;
73 for (const auto& merge_op : merge_ops) {
74 if (merge_op.type() == CowMergeOperation::COW_XOR) {
75 xor_map.AddExtent(merge_op.dst_extent(), &merge_op);
76 }
77 }
78 return xor_map;
79}
80
Kelvin Zhange52b6cd2021-02-09 15:28:40 -050081VABCPartitionWriter::VABCPartitionWriter(
82 const PartitionUpdate& partition_update,
83 const InstallPlan::Partition& install_part,
84 DynamicPartitionControlInterface* dynamic_control,
Kelvin Zhanga37aafc2021-06-14 13:21:37 -040085 size_t block_size)
Kelvin Zhange52b6cd2021-02-09 15:28:40 -050086 : partition_update_(partition_update),
87 install_part_(install_part),
88 dynamic_control_(dynamic_control),
Kelvin Zhange52b6cd2021-02-09 15:28:40 -050089 block_size_(block_size),
90 executor_(block_size),
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -070091 verified_source_fd_(block_size, install_part.source_path) {
92 for (const auto& cow_op : partition_update_.merge_operations()) {
93 if (cow_op.type() != CowMergeOperation::COW_COPY) {
94 continue;
95 }
96 copy_blocks_.AddExtent(cow_op.dst_extent());
97 }
98}
Kelvin Zhange52b6cd2021-02-09 15:28:40 -050099
Kelvin Zhangd7958752023-03-23 12:23:47 -0700100bool VABCPartitionWriter::DoesDeviceSupportsXor() {
101 return dynamic_control_->GetVirtualAbCompressionXorFeatureFlag().IsEnabled();
102}
103
104bool VABCPartitionWriter::WriteAllCopyOps() {
105 const bool userSnapshots = android::base::GetBoolProperty(
106 "ro.virtual_ab.userspace.snapshots.enabled", false);
107 for (const auto& cow_op : partition_update_.merge_operations()) {
108 if (cow_op.type() != CowMergeOperation::COW_COPY) {
109 continue;
110 }
111 if (cow_op.dst_extent() == cow_op.src_extent()) {
112 continue;
113 }
114 if (userSnapshots) {
115 TEST_AND_RETURN_FALSE(cow_op.src_extent().num_blocks() != 0);
116 TEST_AND_RETURN_FALSE(
117 cow_writer_->AddCopy(cow_op.dst_extent().start_block(),
118 cow_op.src_extent().start_block(),
119 cow_op.src_extent().num_blocks()));
120 } else {
121 // Add blocks in reverse order, because snapused specifically prefers
122 // this ordering. Since we already eliminated all self-overlapping
123 // SOURCE_COPY during delta generation, this should be safe to do.
124 for (size_t i = cow_op.src_extent().num_blocks(); i > 0; i--) {
125 TEST_AND_RETURN_FALSE(
126 cow_writer_->AddCopy(cow_op.dst_extent().start_block() + i - 1,
127 cow_op.src_extent().start_block() + i - 1));
128 }
129 }
130 }
131 return true;
132}
133
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400134bool VABCPartitionWriter::Init(const InstallPlan* install_plan,
Kelvin Zhang52cb1d72020-10-27 13:44:25 -0400135 bool source_may_exist,
136 size_t next_op_index) {
Kelvin Zhang1c4b9812022-04-06 17:29:00 -0700137 if (dynamic_control_->GetVirtualAbCompressionXorFeatureFlag().IsEnabled()) {
138 xor_map_ = ComputeXorMap(partition_update_.merge_operations());
139 if (xor_map_.size() > 0) {
140 LOG(INFO) << "Virtual AB Compression with XOR is enabled";
141 } else {
142 LOG(INFO) << "Device supports Virtual AB compression with XOR, but OTA "
143 "package does not.";
144 }
145 } else {
146 LOG(INFO) << "Virtual AB Compression with XOR is disabled.";
147 }
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400148 TEST_AND_RETURN_FALSE(install_plan != nullptr);
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700149 if (source_may_exist && install_part_.source_size > 0) {
150 TEST_AND_RETURN_FALSE(!install_part_.source_path.empty());
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500151 TEST_AND_RETURN_FALSE(verified_source_fd_.Open());
152 }
Kelvin Zhang3f60d532020-11-09 13:33:17 -0500153 std::optional<std::string> source_path;
154 if (!install_part_.source_path.empty()) {
155 // TODO(zhangkelvin) Make |source_path| a std::optional<std::string>
156 source_path = install_part_.source_path;
157 }
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400158 cow_writer_ = dynamic_control_->OpenCowWriter(
Kelvin Zhang3f60d532020-11-09 13:33:17 -0500159 install_part_.name, source_path, install_plan->is_resume);
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400160 TEST_AND_RETURN_FALSE(cow_writer_ != nullptr);
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400161
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400162 // ===== Resume case handling code goes here ====
Kelvin Zhang52cb1d72020-10-27 13:44:25 -0400163 // It is possible that the SOURCE_COPY are already written but
164 // |next_op_index_| is still 0. In this case we discard previously written
165 // SOURCE_COPY, and start over.
166 if (install_plan->is_resume && next_op_index > 0) {
167 LOG(INFO) << "Resuming update on partition `"
168 << partition_update_.partition_name() << "` op index "
169 << next_op_index;
170 TEST_AND_RETURN_FALSE(cow_writer_->InitializeAppend(next_op_index));
171 return true;
172 } else {
173 TEST_AND_RETURN_FALSE(cow_writer_->Initialize());
174 }
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400175
176 // ==============================================
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400177 if (!partition_update_.merge_operations().empty()) {
Kelvin Zhang5d74b722021-09-29 15:24:26 -0700178 if (IsXorEnabled()) {
179 LOG(INFO) << "VABC XOR enabled for partition "
180 << partition_update_.partition_name();
Kelvin Zhangd7958752023-03-23 12:23:47 -0700181 }
182 // When merge sequence is present in COW, snapuserd will merge blocks in
183 // order specified by the merge seuqnece op. Hence we have the freedom of
184 // writing COPY operations out of order. Delay processing of copy ops so
185 // that update_engine can be more responsive in progress updates.
186 if (DoesDeviceSupportsXor()) {
187 LOG(INFO) << "Snapuserd supports XOR and merge sequence, writing merge "
188 "sequence and delay writing COPY operations";
Kelvin Zhang5d74b722021-09-29 15:24:26 -0700189 TEST_AND_RETURN_FALSE(WriteMergeSequence(
190 partition_update_.merge_operations(), cow_writer_.get()));
Kelvin Zhangd7958752023-03-23 12:23:47 -0700191 } else {
192 LOG(INFO) << "Snapuserd does not support merge sequence, writing all "
193 "COPY operations up front, this may take few "
194 "minutes.";
195 TEST_AND_RETURN_FALSE(WriteAllCopyOps());
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700196 }
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400197 cow_writer_->AddLabel(0);
Kelvin Zhangab3ce602021-02-24 14:46:40 -0500198 }
Kelvin Zhang7a265752020-10-29 15:51:35 -0400199 return true;
200}
201
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400202bool VABCPartitionWriter::WriteMergeSequence(
203 const RepeatedPtrField<CowMergeOperation>& merge_sequence,
204 ICowWriter* cow_writer) {
205 std::vector<uint32_t> blocks_merge_order;
206 for (const auto& merge_op : merge_sequence) {
207 const auto& dst_extent = merge_op.dst_extent();
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700208 const auto& src_extent = merge_op.src_extent();
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400209 // In place copy are basically noops, they do not need to be "merged" at
210 // all, don't include them in merge sequence.
211 if (merge_op.type() == CowMergeOperation::COW_COPY &&
212 merge_op.src_extent() == merge_op.dst_extent()) {
213 continue;
214 }
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +0000215
216 const bool extent_overlap =
217 ExtentRanges::ExtentsOverlap(src_extent, dst_extent);
218 // TODO(193863443) Remove this check once this feature
219 // lands on all pixel devices.
220 const bool is_ascending = android::base::GetBoolProperty(
221 "ro.virtual_ab.userspace.snapshots.enabled", false);
222
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700223 // If this is a self-overlapping op and |dst_extent| comes after
224 // |src_extent|, we must write in reverse order for correctness.
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +0000225 //
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700226 // If this is self-overlapping op and |dst_extent| comes before
227 // |src_extent|, we must write in ascending order for correctness.
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +0000228 //
229 // If this isn't a self overlapping op, write block in ascending order
230 // if userspace snapshots are enabled
231 if (extent_overlap) {
232 if (dst_extent.start_block() <= src_extent.start_block()) {
233 for (size_t i = 0; i < dst_extent.num_blocks(); i++) {
234 blocks_merge_order.push_back(dst_extent.start_block() + i);
235 }
236 } else {
237 for (int i = dst_extent.num_blocks() - 1; i >= 0; i--) {
238 blocks_merge_order.push_back(dst_extent.start_block() + i);
239 }
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700240 }
241 } else {
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +0000242 if (is_ascending) {
243 for (size_t i = 0; i < dst_extent.num_blocks(); i++) {
244 blocks_merge_order.push_back(dst_extent.start_block() + i);
245 }
246 } else {
247 for (int i = dst_extent.num_blocks() - 1; i >= 0; i--) {
248 blocks_merge_order.push_back(dst_extent.start_block() + i);
249 }
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700250 }
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400251 }
252 }
253 return cow_writer->AddSequenceData(blocks_merge_order.size(),
254 blocks_merge_order.data());
255}
256
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400257std::unique_ptr<ExtentWriter> VABCPartitionWriter::CreateBaseExtentWriter() {
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400258 return std::make_unique<SnapshotExtentWriter>(cow_writer_.get());
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400259}
260
261[[nodiscard]] bool VABCPartitionWriter::PerformZeroOrDiscardOperation(
262 const InstallOperation& operation) {
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400263 for (const auto& extent : operation.dst_extents()) {
264 TEST_AND_RETURN_FALSE(
265 cow_writer_->AddZeroBlocks(extent.start_block(), extent.num_blocks()));
266 }
267 return true;
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400268}
269
270[[nodiscard]] bool VABCPartitionWriter::PerformSourceCopyOperation(
271 const InstallOperation& operation, ErrorCode* error) {
Kelvin Zhangb5bd0752022-03-10 15:45:25 -0800272 // COPY ops are already handled during Init(), no need to do actual work, but
273 // we still want to verify that all blocks contain expected data.
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700274 auto source_fd = verified_source_fd_.ChooseSourceFD(operation, error);
275 TEST_AND_RETURN_FALSE(source_fd != nullptr);
Kelvin Zhangd7958752023-03-23 12:23:47 -0700276 // For devices not supporting XOR, sequence op is not supported, so all COPY
277 // operations are written up front in strict merge order.
278 if (!DoesDeviceSupportsXor()) {
279 return true;
280 }
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700281 std::vector<CowOperation> converted;
282
283 const auto& src_extents = operation.src_extents();
284 const auto& dst_extents = operation.dst_extents();
285 BlockIterator it1{src_extents};
286 BlockIterator it2{dst_extents};
Kelvin Zhangd7958752023-03-23 12:23:47 -0700287 const bool userSnapshots = android::base::GetBoolProperty(
288 "ro.virtual_ab.userspace.snapshots.enabled", false);
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700289 while (!it1.is_end() && !it2.is_end()) {
290 const auto src_block = *it1;
291 const auto dst_block = *it2;
292 ++it1;
293 ++it2;
294 if (src_block == dst_block) {
295 continue;
296 }
Kelvin Zhangd7958752023-03-23 12:23:47 -0700297 if (copy_blocks_.ContainsBlock(dst_block)) {
298 push_back(&converted, {CowOperation::CowCopy, src_block, dst_block, 1});
299 } else {
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700300 push_back(&converted,
301 {CowOperation::CowReplace, src_block, dst_block, 1});
302 }
Kelvin Zhangdc122bc2022-03-15 14:19:04 -0700303 }
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700304 std::vector<uint8_t> buffer;
305 for (const auto& cow_op : converted) {
Kelvin Zhangd7958752023-03-23 12:23:47 -0700306 if (cow_op.op == CowOperation::CowCopy) {
307 if (userSnapshots) {
308 cow_writer_->AddCopy(
309 cow_op.dst_block, cow_op.src_block, cow_op.block_count);
310 } else {
311 // Add blocks in reverse order, because snapused specifically prefers
312 // this ordering. Since we already eliminated all self-overlapping
313 // SOURCE_COPY during delta generation, this should be safe to do.
314 for (size_t i = cow_op.block_count; i > 0; i--) {
315 TEST_AND_RETURN_FALSE(cow_writer_->AddCopy(cow_op.dst_block + i - 1,
316 cow_op.src_block + i - 1));
317 }
318 }
319 continue;
320 }
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700321 buffer.resize(block_size_ * cow_op.block_count);
322 ssize_t bytes_read = 0;
323 TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd,
324 buffer.data(),
325 block_size_ * cow_op.block_count,
326 cow_op.src_block * block_size_,
327 &bytes_read));
328 if (bytes_read <= 0 || static_cast<size_t>(bytes_read) != buffer.size()) {
329 LOG(ERROR) << "source_fd->Read failed: " << bytes_read;
330 return false;
331 }
332 TEST_AND_RETURN_FALSE(cow_writer_->AddRawBlocks(
333 cow_op.dst_block, buffer.data(), buffer.size()));
334 }
335 return true;
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400336}
337
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500338bool VABCPartitionWriter::PerformReplaceOperation(const InstallOperation& op,
339 const void* data,
340 size_t count) {
341 // Setup the ExtentWriter stack based on the operation type.
342 std::unique_ptr<ExtentWriter> writer = CreateBaseExtentWriter();
343
344 return executor_.ExecuteReplaceOperation(op, std::move(writer), data, count);
345}
346
Tianjie8e0090d2021-08-30 22:35:21 -0700347bool VABCPartitionWriter::PerformDiffOperation(
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500348 const InstallOperation& operation,
349 ErrorCode* error,
350 const void* data,
351 size_t count) {
352 FileDescriptorPtr source_fd =
353 verified_source_fd_.ChooseSourceFD(operation, error);
354 TEST_AND_RETURN_FALSE(source_fd != nullptr);
Kelvin Zhang76f10b82021-06-25 18:45:46 -0400355 TEST_AND_RETURN_FALSE(source_fd->IsOpen());
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500356
Kelvin Zhang76f10b82021-06-25 18:45:46 -0400357 std::unique_ptr<ExtentWriter> writer =
358 IsXorEnabled() ? std::make_unique<XORExtentWriter>(
359 operation, source_fd, cow_writer_.get(), xor_map_)
360 : CreateBaseExtentWriter();
Tianjie8e0090d2021-08-30 22:35:21 -0700361 return executor_.ExecuteDiffOperation(
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500362 operation, std::move(writer), source_fd, data, count);
363}
364
Kelvin Zhang52cb1d72020-10-27 13:44:25 -0400365void VABCPartitionWriter::CheckpointUpdateProgress(size_t next_op_index) {
366 // No need to call fsync/sync, as CowWriter flushes after a label is added
367 // added.
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500368 // if cow_writer_ failed, that means Init() failed. This function shouldn't be
369 // called if Init() fails.
370 TEST_AND_RETURN(cow_writer_ != nullptr);
Kelvin Zhang52cb1d72020-10-27 13:44:25 -0400371 cow_writer_->AddLabel(next_op_index);
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400372}
373
Kelvin Zhangec205cf2020-09-28 13:23:40 -0400374[[nodiscard]] bool VABCPartitionWriter::FinishedInstallOps() {
375 // Add a hardcoded magic label to indicate end of all install ops. This label
376 // is needed by filesystem verification, don't remove.
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500377 TEST_AND_RETURN_FALSE(cow_writer_ != nullptr);
Kelvin Zhang9e5e1ed2021-09-28 14:19:16 -0700378 TEST_AND_RETURN_FALSE(cow_writer_->AddLabel(kEndOfInstallLabel));
379 TEST_AND_RETURN_FALSE(cow_writer_->Finalize());
Kelvin Zhang02fe6622021-11-01 16:37:58 -0700380 TEST_AND_RETURN_FALSE(cow_writer_->VerifyMergeOps());
381 return true;
Kelvin Zhangec205cf2020-09-28 13:23:40 -0400382}
383
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400384VABCPartitionWriter::~VABCPartitionWriter() {
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500385 Close();
386}
387
388int VABCPartitionWriter::Close() {
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500389 if (cow_writer_) {
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700390 LOG(INFO) << "Finalizing " << partition_update_.partition_name()
391 << " COW image";
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500392 cow_writer_->Finalize();
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500393 cow_writer_ = nullptr;
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500394 }
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500395 return 0;
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400396}
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400397
398} // namespace chromeos_update_engine