blob: 17b7d50d664a69ccb1192539b7c2d5d8429edfc5 [file] [log] [blame]
Kelvin Zhang94f51cc2020-09-25 11:34:49 -04001//
2// Copyright (C) 2020 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17#include "update_engine/payload_consumer/vabc_partition_writer.h"
18
Kelvin Zhang76f10b82021-06-25 18:45:46 -040019#include <algorithm>
20#include <map>
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040021#include <memory>
Kelvin Zhang3f60d532020-11-09 13:33:17 -050022#include <string>
Kelvin Zhange52b6cd2021-02-09 15:28:40 -050023#include <utility>
Kelvin Zhang9b10dba2020-09-25 17:09:11 -040024#include <vector>
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040025
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +000026#include <android-base/properties.h>
Kelvin Zhang76f10b82021-06-25 18:45:46 -040027#include <brillo/secure_blob.h>
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040028#include <libsnapshot/cow_writer.h>
29
Kelvin Zhang9b10dba2020-09-25 17:09:11 -040030#include "update_engine/common/cow_operation_convert.h"
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040031#include "update_engine/common/utils.h"
Kelvin Zhang76f10b82021-06-25 18:45:46 -040032#include "update_engine/payload_consumer/extent_map.h"
Kelvin Zhang7a265752020-10-29 15:51:35 -040033#include "update_engine/payload_consumer/file_descriptor.h"
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040034#include "update_engine/payload_consumer/install_plan.h"
Kelvin Zhang9b10dba2020-09-25 17:09:11 -040035#include "update_engine/payload_consumer/snapshot_extent_writer.h"
Kelvin Zhang76f10b82021-06-25 18:45:46 -040036#include "update_engine/payload_consumer/xor_extent_writer.h"
Kelvin Zhanga37aafc2021-06-14 13:21:37 -040037#include "update_engine/payload_generator/extent_ranges.h"
38#include "update_engine/payload_generator/extent_utils.h"
39#include "update_engine/update_metadata.pb.h"
Kelvin Zhang94f51cc2020-09-25 11:34:49 -040040
41namespace chromeos_update_engine {
Kelvin Zhang52cb1d72020-10-27 13:44:25 -040042// Expected layout of COW file:
43// === Beginning of Cow Image ===
44// All Source Copy Operations
45// ========== Label 0 ==========
46// Operation 0 in PartitionUpdate
47// ========== Label 1 ==========
48// Operation 1 in PartitionUpdate
49// ========== label 2 ==========
50// Operation 2 in PartitionUpdate
51// ========== label 3 ==========
52// .
53// .
54// .
55
56// When resuming, pass |next_op_index_| as label to
57// |InitializeWithAppend|.
58// For example, suppose we finished writing SOURCE_COPY, and we finished writing
59// operation 2 completely. Update is suspended when we are half way through
60// operation 3.
61// |cnext_op_index_| would be 3, so we pass 3 as
62// label to |InitializeWithAppend|. The CowWriter will retain all data before
63// label 3, Which contains all operation 2's data, but none of operation 3's
64// data.
65
Kelvin Zhanga37aafc2021-06-14 13:21:37 -040066using android::snapshot::ICowWriter;
67using ::google::protobuf::RepeatedPtrField;
68
Kelvin Zhang76f10b82021-06-25 18:45:46 -040069// Compute XOR map, a map from dst extent to corresponding merge operation
70static ExtentMap<const CowMergeOperation*, ExtentLess> ComputeXorMap(
71 const RepeatedPtrField<CowMergeOperation>& merge_ops) {
72 ExtentMap<const CowMergeOperation*, ExtentLess> xor_map;
73 for (const auto& merge_op : merge_ops) {
74 if (merge_op.type() == CowMergeOperation::COW_XOR) {
75 xor_map.AddExtent(merge_op.dst_extent(), &merge_op);
76 }
77 }
78 return xor_map;
79}
80
Kelvin Zhange52b6cd2021-02-09 15:28:40 -050081VABCPartitionWriter::VABCPartitionWriter(
82 const PartitionUpdate& partition_update,
83 const InstallPlan::Partition& install_part,
84 DynamicPartitionControlInterface* dynamic_control,
Kelvin Zhanga37aafc2021-06-14 13:21:37 -040085 size_t block_size)
Kelvin Zhange52b6cd2021-02-09 15:28:40 -050086 : partition_update_(partition_update),
87 install_part_(install_part),
88 dynamic_control_(dynamic_control),
Kelvin Zhange52b6cd2021-02-09 15:28:40 -050089 block_size_(block_size),
90 executor_(block_size),
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -070091 verified_source_fd_(block_size, install_part.source_path) {
92 for (const auto& cow_op : partition_update_.merge_operations()) {
93 if (cow_op.type() != CowMergeOperation::COW_COPY) {
94 continue;
95 }
96 copy_blocks_.AddExtent(cow_op.dst_extent());
97 }
Kelvin Zhang67144e52023-04-03 18:17:07 -070098 LOG(INFO) << "Partition `" << partition_update.partition_name() << " has "
99 << copy_blocks_.blocks() << " copy blocks";
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700100}
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500101
Kelvin Zhangd7958752023-03-23 12:23:47 -0700102bool VABCPartitionWriter::DoesDeviceSupportsXor() {
103 return dynamic_control_->GetVirtualAbCompressionXorFeatureFlag().IsEnabled();
104}
105
106bool VABCPartitionWriter::WriteAllCopyOps() {
107 const bool userSnapshots = android::base::GetBoolProperty(
108 "ro.virtual_ab.userspace.snapshots.enabled", false);
109 for (const auto& cow_op : partition_update_.merge_operations()) {
110 if (cow_op.type() != CowMergeOperation::COW_COPY) {
111 continue;
112 }
113 if (cow_op.dst_extent() == cow_op.src_extent()) {
114 continue;
115 }
116 if (userSnapshots) {
117 TEST_AND_RETURN_FALSE(cow_op.src_extent().num_blocks() != 0);
118 TEST_AND_RETURN_FALSE(
119 cow_writer_->AddCopy(cow_op.dst_extent().start_block(),
120 cow_op.src_extent().start_block(),
121 cow_op.src_extent().num_blocks()));
122 } else {
123 // Add blocks in reverse order, because snapused specifically prefers
124 // this ordering. Since we already eliminated all self-overlapping
125 // SOURCE_COPY during delta generation, this should be safe to do.
126 for (size_t i = cow_op.src_extent().num_blocks(); i > 0; i--) {
127 TEST_AND_RETURN_FALSE(
128 cow_writer_->AddCopy(cow_op.dst_extent().start_block() + i - 1,
129 cow_op.src_extent().start_block() + i - 1));
130 }
131 }
132 }
133 return true;
134}
135
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400136bool VABCPartitionWriter::Init(const InstallPlan* install_plan,
Kelvin Zhang52cb1d72020-10-27 13:44:25 -0400137 bool source_may_exist,
138 size_t next_op_index) {
Kelvin Zhang1c4b9812022-04-06 17:29:00 -0700139 if (dynamic_control_->GetVirtualAbCompressionXorFeatureFlag().IsEnabled()) {
140 xor_map_ = ComputeXorMap(partition_update_.merge_operations());
141 if (xor_map_.size() > 0) {
142 LOG(INFO) << "Virtual AB Compression with XOR is enabled";
143 } else {
144 LOG(INFO) << "Device supports Virtual AB compression with XOR, but OTA "
145 "package does not.";
146 }
147 } else {
148 LOG(INFO) << "Virtual AB Compression with XOR is disabled.";
149 }
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400150 TEST_AND_RETURN_FALSE(install_plan != nullptr);
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700151 if (source_may_exist && install_part_.source_size > 0) {
152 TEST_AND_RETURN_FALSE(!install_part_.source_path.empty());
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500153 TEST_AND_RETURN_FALSE(verified_source_fd_.Open());
154 }
Kelvin Zhang3f60d532020-11-09 13:33:17 -0500155 std::optional<std::string> source_path;
156 if (!install_part_.source_path.empty()) {
157 // TODO(zhangkelvin) Make |source_path| a std::optional<std::string>
158 source_path = install_part_.source_path;
159 }
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400160 cow_writer_ = dynamic_control_->OpenCowWriter(
Kelvin Zhang3f60d532020-11-09 13:33:17 -0500161 install_part_.name, source_path, install_plan->is_resume);
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400162 TEST_AND_RETURN_FALSE(cow_writer_ != nullptr);
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400163
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400164 // ===== Resume case handling code goes here ====
Kelvin Zhang52cb1d72020-10-27 13:44:25 -0400165 // It is possible that the SOURCE_COPY are already written but
166 // |next_op_index_| is still 0. In this case we discard previously written
167 // SOURCE_COPY, and start over.
168 if (install_plan->is_resume && next_op_index > 0) {
169 LOG(INFO) << "Resuming update on partition `"
170 << partition_update_.partition_name() << "` op index "
171 << next_op_index;
172 TEST_AND_RETURN_FALSE(cow_writer_->InitializeAppend(next_op_index));
173 return true;
174 } else {
175 TEST_AND_RETURN_FALSE(cow_writer_->Initialize());
176 }
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400177
178 // ==============================================
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400179 if (!partition_update_.merge_operations().empty()) {
Kelvin Zhang5d74b722021-09-29 15:24:26 -0700180 if (IsXorEnabled()) {
181 LOG(INFO) << "VABC XOR enabled for partition "
182 << partition_update_.partition_name();
Kelvin Zhangd7958752023-03-23 12:23:47 -0700183 }
184 // When merge sequence is present in COW, snapuserd will merge blocks in
185 // order specified by the merge seuqnece op. Hence we have the freedom of
186 // writing COPY operations out of order. Delay processing of copy ops so
187 // that update_engine can be more responsive in progress updates.
188 if (DoesDeviceSupportsXor()) {
189 LOG(INFO) << "Snapuserd supports XOR and merge sequence, writing merge "
190 "sequence and delay writing COPY operations";
Kelvin Zhang5d74b722021-09-29 15:24:26 -0700191 TEST_AND_RETURN_FALSE(WriteMergeSequence(
192 partition_update_.merge_operations(), cow_writer_.get()));
Kelvin Zhangd7958752023-03-23 12:23:47 -0700193 } else {
194 LOG(INFO) << "Snapuserd does not support merge sequence, writing all "
195 "COPY operations up front, this may take few "
196 "minutes.";
197 TEST_AND_RETURN_FALSE(WriteAllCopyOps());
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700198 }
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400199 cow_writer_->AddLabel(0);
Kelvin Zhangab3ce602021-02-24 14:46:40 -0500200 }
Kelvin Zhang7a265752020-10-29 15:51:35 -0400201 return true;
202}
203
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400204bool VABCPartitionWriter::WriteMergeSequence(
205 const RepeatedPtrField<CowMergeOperation>& merge_sequence,
206 ICowWriter* cow_writer) {
207 std::vector<uint32_t> blocks_merge_order;
208 for (const auto& merge_op : merge_sequence) {
209 const auto& dst_extent = merge_op.dst_extent();
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700210 const auto& src_extent = merge_op.src_extent();
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400211 // In place copy are basically noops, they do not need to be "merged" at
212 // all, don't include them in merge sequence.
213 if (merge_op.type() == CowMergeOperation::COW_COPY &&
214 merge_op.src_extent() == merge_op.dst_extent()) {
215 continue;
216 }
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +0000217
218 const bool extent_overlap =
219 ExtentRanges::ExtentsOverlap(src_extent, dst_extent);
220 // TODO(193863443) Remove this check once this feature
221 // lands on all pixel devices.
222 const bool is_ascending = android::base::GetBoolProperty(
223 "ro.virtual_ab.userspace.snapshots.enabled", false);
224
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700225 // If this is a self-overlapping op and |dst_extent| comes after
226 // |src_extent|, we must write in reverse order for correctness.
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +0000227 //
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700228 // If this is self-overlapping op and |dst_extent| comes before
229 // |src_extent|, we must write in ascending order for correctness.
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +0000230 //
231 // If this isn't a self overlapping op, write block in ascending order
232 // if userspace snapshots are enabled
233 if (extent_overlap) {
234 if (dst_extent.start_block() <= src_extent.start_block()) {
235 for (size_t i = 0; i < dst_extent.num_blocks(); i++) {
236 blocks_merge_order.push_back(dst_extent.start_block() + i);
237 }
238 } else {
239 for (int i = dst_extent.num_blocks() - 1; i >= 0; i--) {
240 blocks_merge_order.push_back(dst_extent.start_block() + i);
241 }
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700242 }
243 } else {
Akilesh Kailash3e6e7df2021-11-18 23:29:15 +0000244 if (is_ascending) {
245 for (size_t i = 0; i < dst_extent.num_blocks(); i++) {
246 blocks_merge_order.push_back(dst_extent.start_block() + i);
247 }
248 } else {
249 for (int i = dst_extent.num_blocks() - 1; i >= 0; i--) {
250 blocks_merge_order.push_back(dst_extent.start_block() + i);
251 }
Kelvin Zhangd1f90bc2021-09-15 21:12:34 -0700252 }
Kelvin Zhanga37aafc2021-06-14 13:21:37 -0400253 }
254 }
255 return cow_writer->AddSequenceData(blocks_merge_order.size(),
256 blocks_merge_order.data());
257}
258
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400259std::unique_ptr<ExtentWriter> VABCPartitionWriter::CreateBaseExtentWriter() {
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400260 return std::make_unique<SnapshotExtentWriter>(cow_writer_.get());
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400261}
262
263[[nodiscard]] bool VABCPartitionWriter::PerformZeroOrDiscardOperation(
264 const InstallOperation& operation) {
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400265 for (const auto& extent : operation.dst_extents()) {
266 TEST_AND_RETURN_FALSE(
267 cow_writer_->AddZeroBlocks(extent.start_block(), extent.num_blocks()));
268 }
269 return true;
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400270}
271
272[[nodiscard]] bool VABCPartitionWriter::PerformSourceCopyOperation(
273 const InstallOperation& operation, ErrorCode* error) {
Kelvin Zhangb5bd0752022-03-10 15:45:25 -0800274 // COPY ops are already handled during Init(), no need to do actual work, but
275 // we still want to verify that all blocks contain expected data.
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700276 auto source_fd = verified_source_fd_.ChooseSourceFD(operation, error);
277 TEST_AND_RETURN_FALSE(source_fd != nullptr);
278 std::vector<CowOperation> converted;
279
280 const auto& src_extents = operation.src_extents();
281 const auto& dst_extents = operation.dst_extents();
282 BlockIterator it1{src_extents};
283 BlockIterator it2{dst_extents};
Kelvin Zhangd7958752023-03-23 12:23:47 -0700284 const bool userSnapshots = android::base::GetBoolProperty(
285 "ro.virtual_ab.userspace.snapshots.enabled", false);
Kelvin Zhang67144e52023-04-03 18:17:07 -0700286 // For devices not supporting XOR, sequence op is not supported, so all COPY
287 // operations are written up front in strict merge order.
288 const auto sequence_op_supported = DoesDeviceSupportsXor();
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700289 while (!it1.is_end() && !it2.is_end()) {
290 const auto src_block = *it1;
291 const auto dst_block = *it2;
292 ++it1;
293 ++it2;
294 if (src_block == dst_block) {
295 continue;
296 }
Kelvin Zhangd7958752023-03-23 12:23:47 -0700297 if (copy_blocks_.ContainsBlock(dst_block)) {
Kelvin Zhang67144e52023-04-03 18:17:07 -0700298 if (sequence_op_supported) {
299 push_back(&converted, {CowOperation::CowCopy, src_block, dst_block, 1});
300 }
Kelvin Zhangd7958752023-03-23 12:23:47 -0700301 } else {
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700302 push_back(&converted,
303 {CowOperation::CowReplace, src_block, dst_block, 1});
304 }
Kelvin Zhangdc122bc2022-03-15 14:19:04 -0700305 }
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700306 std::vector<uint8_t> buffer;
307 for (const auto& cow_op : converted) {
Kelvin Zhangd7958752023-03-23 12:23:47 -0700308 if (cow_op.op == CowOperation::CowCopy) {
309 if (userSnapshots) {
310 cow_writer_->AddCopy(
311 cow_op.dst_block, cow_op.src_block, cow_op.block_count);
312 } else {
313 // Add blocks in reverse order, because snapused specifically prefers
314 // this ordering. Since we already eliminated all self-overlapping
315 // SOURCE_COPY during delta generation, this should be safe to do.
316 for (size_t i = cow_op.block_count; i > 0; i--) {
317 TEST_AND_RETURN_FALSE(cow_writer_->AddCopy(cow_op.dst_block + i - 1,
318 cow_op.src_block + i - 1));
319 }
320 }
321 continue;
322 }
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700323 buffer.resize(block_size_ * cow_op.block_count);
324 ssize_t bytes_read = 0;
325 TEST_AND_RETURN_FALSE(utils::ReadAll(source_fd,
326 buffer.data(),
327 block_size_ * cow_op.block_count,
328 cow_op.src_block * block_size_,
329 &bytes_read));
330 if (bytes_read <= 0 || static_cast<size_t>(bytes_read) != buffer.size()) {
331 LOG(ERROR) << "source_fd->Read failed: " << bytes_read;
332 return false;
333 }
334 TEST_AND_RETURN_FALSE(cow_writer_->AddRawBlocks(
335 cow_op.dst_block, buffer.data(), buffer.size()));
336 }
337 return true;
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400338}
339
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500340bool VABCPartitionWriter::PerformReplaceOperation(const InstallOperation& op,
341 const void* data,
342 size_t count) {
343 // Setup the ExtentWriter stack based on the operation type.
344 std::unique_ptr<ExtentWriter> writer = CreateBaseExtentWriter();
345
346 return executor_.ExecuteReplaceOperation(op, std::move(writer), data, count);
347}
348
Tianjie8e0090d2021-08-30 22:35:21 -0700349bool VABCPartitionWriter::PerformDiffOperation(
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500350 const InstallOperation& operation,
351 ErrorCode* error,
352 const void* data,
353 size_t count) {
354 FileDescriptorPtr source_fd =
355 verified_source_fd_.ChooseSourceFD(operation, error);
356 TEST_AND_RETURN_FALSE(source_fd != nullptr);
Kelvin Zhang76f10b82021-06-25 18:45:46 -0400357 TEST_AND_RETURN_FALSE(source_fd->IsOpen());
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500358
Kelvin Zhang76f10b82021-06-25 18:45:46 -0400359 std::unique_ptr<ExtentWriter> writer =
360 IsXorEnabled() ? std::make_unique<XORExtentWriter>(
Kelvin Zhanga3a68a92023-04-05 13:17:18 -0700361 operation,
362 source_fd,
363 cow_writer_.get(),
364 xor_map_,
365 partition_update_.old_partition_info().size())
Kelvin Zhang76f10b82021-06-25 18:45:46 -0400366 : CreateBaseExtentWriter();
Tianjie8e0090d2021-08-30 22:35:21 -0700367 return executor_.ExecuteDiffOperation(
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500368 operation, std::move(writer), source_fd, data, count);
369}
370
Kelvin Zhang52cb1d72020-10-27 13:44:25 -0400371void VABCPartitionWriter::CheckpointUpdateProgress(size_t next_op_index) {
372 // No need to call fsync/sync, as CowWriter flushes after a label is added
373 // added.
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500374 // if cow_writer_ failed, that means Init() failed. This function shouldn't be
375 // called if Init() fails.
376 TEST_AND_RETURN(cow_writer_ != nullptr);
Kelvin Zhang52cb1d72020-10-27 13:44:25 -0400377 cow_writer_->AddLabel(next_op_index);
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400378}
379
Kelvin Zhangec205cf2020-09-28 13:23:40 -0400380[[nodiscard]] bool VABCPartitionWriter::FinishedInstallOps() {
381 // Add a hardcoded magic label to indicate end of all install ops. This label
382 // is needed by filesystem verification, don't remove.
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500383 TEST_AND_RETURN_FALSE(cow_writer_ != nullptr);
Kelvin Zhang9e5e1ed2021-09-28 14:19:16 -0700384 TEST_AND_RETURN_FALSE(cow_writer_->AddLabel(kEndOfInstallLabel));
385 TEST_AND_RETURN_FALSE(cow_writer_->Finalize());
Kelvin Zhang02fe6622021-11-01 16:37:58 -0700386 TEST_AND_RETURN_FALSE(cow_writer_->VerifyMergeOps());
387 return true;
Kelvin Zhangec205cf2020-09-28 13:23:40 -0400388}
389
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400390VABCPartitionWriter::~VABCPartitionWriter() {
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500391 Close();
392}
393
394int VABCPartitionWriter::Close() {
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500395 if (cow_writer_) {
Kelvin Zhangd11e2fc2022-10-24 15:40:30 -0700396 LOG(INFO) << "Finalizing " << partition_update_.partition_name()
397 << " COW image";
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500398 cow_writer_->Finalize();
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500399 cow_writer_ = nullptr;
Kelvin Zhang6a4d1ec2021-02-04 16:28:48 -0500400 }
Kelvin Zhange52b6cd2021-02-09 15:28:40 -0500401 return 0;
Kelvin Zhang9b10dba2020-09-25 17:09:11 -0400402}
Kelvin Zhang94f51cc2020-09-25 11:34:49 -0400403
404} // namespace chromeos_update_engine