blob: 19d9159814d6788a901bd0d04908507e697fb6f5 [file] [log] [blame]
Victor Hsiehac4f3f42021-02-26 12:35:58 -08001/*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//! A module for writing to a file from a trusted world to an untrusted storage.
18//!
19//! Architectural Model:
20//! * Trusted world: the writer, a signing secret, has some memory, but NO persistent storage.
21//! * Untrusted world: persistent storage, assuming untrusted.
22//! * IPC mechanism between trusted and untrusted world
23//!
24//! Use cases:
25//! * In the trusted world, we want to generate a large file, sign it, and share the signature for
26//! a third party to verify the file.
27//! * In the trusted world, we want to read a previously signed file back with signature check
28//! without having to touch the whole file.
29//!
30//! Requirements:
31//! * Communication between trusted and untrusted world is not cheap, and files can be large.
32//! * A file write pattern may not be sequential, neither does read.
33//!
34//! Considering the above, a technique similar to fs-verity is used. fs-verity uses an alternative
35//! hash function, a Merkle tree, to calculate the hash of file content. A file update at any
36//! location will propagate the hash update from the leaf to the root node. Unlike fs-verity, which
37//! assumes static files, to support write operation, we need to allow the file (thus tree) to
38//! update.
39//!
40//! For the trusted world to generate a large file with random write and hash it, the writer needs
41//! to hold some private information and update the Merkle tree during a file write (or even when
42//! the Merkle tree needs to be stashed to the untrusted storage).
43//!
44//! A write to a file must update the root hash. In order for the root hash to update, a tree
45//! walk to update from the write location to the root node is necessary. Importantly, in case when
46//! (part of) the Merkle tree needs to be read from the untrusted storage (e.g. not yet verified in
47//! cache), the original path must be verified by the trusted signature before the update to happen.
48//!
49//! Denial-of-service is a known weakness if the untrusted storage decides to simply remove the
50//! file. But there is nothing we can do in this architecture.
51//!
52//! Rollback attack is another possible attack, but can be addressed with a rollback counter when
53//! possible.
54
Victor Hsieh3e35f9a2022-03-03 22:17:23 +000055use log::warn;
Victor Hsiehac4f3f42021-02-26 12:35:58 -080056use std::io;
57use std::sync::{Arc, RwLock};
58
Victor Hsieh09e26262021-03-03 16:00:55 -080059use super::builder::MerkleLeaves;
Victor Hsiehac4f3f42021-02-26 12:35:58 -080060use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
61use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070062use crate::file::{ChunkBuffer, RandomWrite, ReadByChunk};
Victor Hsiehac4f3f42021-02-26 12:35:58 -080063
64// Implement the conversion from `CryptoError` to `io::Error` just to avoid manual error type
65// mapping below.
66impl From<CryptoError> for io::Error {
67 fn from(error: CryptoError) -> Self {
68 io::Error::new(io::ErrorKind::Other, error)
69 }
70}
71
Victor Hsieh9d0ab622021-04-26 17:07:02 -070072fn debug_assert_usize_is_u64() {
73 // Since we don't need to support 32-bit CPU, make an assert to make conversion between
74 // u64 and usize easy below. Otherwise, we need to check `divide_roundup(offset + buf.len()
75 // <= usize::MAX` or handle `TryInto` errors.
76 debug_assert!(usize::MAX as u64 == u64::MAX, "Only 64-bit arch is supported");
77}
78
Victor Hsiehac4f3f42021-02-26 12:35:58 -080079/// VerifiedFileEditor provides an integrity layer to an underlying read-writable file, which may
80/// not be stored in a trusted environment. Only new, empty files are currently supported.
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070081pub struct VerifiedFileEditor<F: ReadByChunk + RandomWrite> {
Victor Hsiehac4f3f42021-02-26 12:35:58 -080082 file: F,
83 merkle_tree: Arc<RwLock<MerkleLeaves>>,
84}
85
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070086impl<F: ReadByChunk + RandomWrite> VerifiedFileEditor<F> {
Victor Hsiehac4f3f42021-02-26 12:35:58 -080087 /// Wraps a supposedly new file for integrity protection.
88 pub fn new(file: F) -> Self {
89 Self { file, merkle_tree: Arc::new(RwLock::new(MerkleLeaves::new())) }
90 }
91
Victor Hsieh71f10032021-08-13 11:24:02 -070092 /// Returns the fs-verity digest size in bytes.
93 pub fn get_fsverity_digest_size(&self) -> usize {
94 Sha256Hasher::HASH_SIZE
95 }
96
Victor Hsiehac4f3f42021-02-26 12:35:58 -080097 /// Calculates the fs-verity digest of the current file.
98 pub fn calculate_fsverity_digest(&self) -> io::Result<Sha256Hash> {
99 let merkle_tree = self.merkle_tree.read().unwrap();
100 merkle_tree.calculate_fsverity_digest().map_err(|e| io::Error::new(io::ErrorKind::Other, e))
101 }
102
Victor Hsieh3e35f9a2022-03-03 22:17:23 +0000103 fn read_backing_chunk_unverified(
104 &self,
105 chunk_index: u64,
106 buf: &mut ChunkBuffer,
107 ) -> io::Result<usize> {
108 self.file.read_chunk(chunk_index, buf)
109 }
110
111 fn read_backing_chunk_verified(
112 &self,
113 chunk_index: u64,
114 buf: &mut ChunkBuffer,
115 ) -> io::Result<usize> {
116 let size = self.read_backing_chunk_unverified(chunk_index, buf)?;
117
118 // Ensure the returned buffer matches the known hash.
119 let merkle_tree = self.merkle_tree.read().unwrap();
120 debug_assert_usize_is_u64();
121 if merkle_tree.is_index_valid(chunk_index as usize) {
122 let hash = Sha256Hasher::new()?.update(buf)?.finalize()?;
123 if !merkle_tree.is_consistent(chunk_index as usize, &hash) {
124 return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
125 }
126 Ok(size)
127 } else {
128 if size != 0 {
129 // This is unexpected. For any reason that the file is changed and doesn't match
130 // the known state, ignore it at the moment. We can still generate correct
131 // fs-verity digest for an output file.
132 warn!(
133 "Ignoring the received {} bytes for index {} beyond the known file size",
134 size, chunk_index,
135 );
136 }
137 Ok(0)
138 }
139 }
140
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800141 fn new_hash_for_incomplete_write(
142 &self,
143 source: &[u8],
144 offset_from_alignment: usize,
145 output_chunk_index: usize,
146 merkle_tree: &mut MerkleLeaves,
147 ) -> io::Result<Sha256Hash> {
148 // The buffer is initialized to 0 purposely. To calculate the block hash, the data is
149 // 0-padded to the block size. When a chunk read is less than a chunk, the initial value
150 // conveniently serves the padding purpose.
151 let mut orig_data = [0u8; CHUNK_SIZE as usize];
152
153 // If previous data exists, read back and verify against the known hash (since the
154 // storage / remote server is not trusted).
155 if merkle_tree.is_index_valid(output_chunk_index) {
Victor Hsieh3e35f9a2022-03-03 22:17:23 +0000156 self.read_backing_chunk_unverified(output_chunk_index as u64, &mut orig_data)?;
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800157
158 // Verify original content
159 let hash = Sha256Hasher::new()?.update(&orig_data)?.finalize()?;
160 if !merkle_tree.is_consistent(output_chunk_index, &hash) {
161 return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
162 }
163 }
164
165 Ok(Sha256Hasher::new()?
166 .update(&orig_data[..offset_from_alignment])?
167 .update(source)?
168 .update(&orig_data[offset_from_alignment + source.len()..])?
169 .finalize()?)
170 }
171
172 fn new_chunk_hash(
173 &self,
174 source: &[u8],
175 offset_from_alignment: usize,
176 current_size: usize,
177 output_chunk_index: usize,
178 merkle_tree: &mut MerkleLeaves,
179 ) -> io::Result<Sha256Hash> {
180 if current_size as u64 == CHUNK_SIZE {
181 // Case 1: If the chunk is a complete one, just calculate the hash, regardless of
182 // write location.
183 Ok(Sha256Hasher::new()?.update(source)?.finalize()?)
184 } else {
185 // Case 2: For an incomplete write, calculate the hash based on previous data (if
186 // any).
187 self.new_hash_for_incomplete_write(
188 source,
189 offset_from_alignment,
190 output_chunk_index,
191 merkle_tree,
192 )
193 }
194 }
Victor Hsieh6a47e7f2021-03-03 15:53:49 -0800195
196 pub fn size(&self) -> u64 {
197 self.merkle_tree.read().unwrap().file_size()
198 }
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800199}
200
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700201impl<F: ReadByChunk + RandomWrite> RandomWrite for VerifiedFileEditor<F> {
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800202 fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700203 debug_assert_usize_is_u64();
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800204
205 // The write range may not be well-aligned with the chunk boundary. There are various cases
206 // to deal with:
207 // 1. A write of a full 4K chunk.
208 // 2. A write of an incomplete chunk, possibly beyond the original EOF.
209 //
210 // Note that a write beyond EOF can create a hole. But we don't need to handle it here
211 // because holes are zeros, and leaves in MerkleLeaves are hashes of 4096-zeros by
212 // default.
213
214 // Now iterate on the input data, considering the alignment at the destination.
215 for (output_offset, current_size) in
216 ChunkedSizeIter::new(buf.len(), offset, CHUNK_SIZE as usize)
217 {
218 // Lock the tree for the whole write for now. There may be room to improve to increase
219 // throughput.
220 let mut merkle_tree = self.merkle_tree.write().unwrap();
221
222 let offset_in_buf = (output_offset - offset) as usize;
223 let source = &buf[offset_in_buf as usize..offset_in_buf as usize + current_size];
224 let output_chunk_index = (output_offset / CHUNK_SIZE) as usize;
225 let offset_from_alignment = (output_offset % CHUNK_SIZE) as usize;
226
227 let new_hash = match self.new_chunk_hash(
228 source,
229 offset_from_alignment,
230 current_size,
231 output_chunk_index,
232 &mut merkle_tree,
233 ) {
234 Ok(hash) => hash,
235 Err(e) => {
236 // Return early when any error happens before the right. Even if the hash is not
237 // consistent for the current chunk, we can still consider the earlier writes
238 // successful. Note that nothing persistent has been done in this iteration.
239 let written = output_offset - offset;
240 if written > 0 {
241 return Ok(written as usize);
242 }
243 return Err(e);
244 }
245 };
246
247 // A failed, partial write here will make the backing file inconsistent to the (old)
248 // hash. Nothing can be done within this writer, but at least it still maintains the
249 // (original) integrity for the file. To matches what write(2) describes for an error
250 // case (though it's about direct I/O), "Partial data may be written ... should be
251 // considered inconsistent", an error below is propagated.
Chris Wailes68c39f82021-07-27 16:03:44 -0700252 self.file.write_all_at(source, output_offset)?;
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800253
254 // Update the hash only after the write succeeds. Note that this only attempts to keep
255 // the tree consistent to what has been written regardless the actual state beyond the
256 // writer.
257 let size_at_least = offset.saturating_add(buf.len() as u64);
258 merkle_tree.update_hash(output_chunk_index, &new_hash, size_at_least);
259 }
260 Ok(buf.len())
261 }
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700262
263 fn resize(&self, size: u64) -> io::Result<()> {
264 debug_assert_usize_is_u64();
265
266 let mut merkle_tree = self.merkle_tree.write().unwrap();
267 // In case when we are truncating the file, we may need to recalculate the hash of the (new)
268 // last chunk. Since the content is provided by the untrusted backend, we need to read the
269 // data back first, verify it, then override the truncated portion with 0-padding for
270 // hashing. As an optimization, we only need to read the data back if the new size isn't a
271 // multiple of CHUNK_SIZE (since the hash is already correct).
272 //
273 // The same thing does not need to happen when the size is growing. Since the new extended
274 // data is always 0, we can just resize the `MerkleLeaves`, where a new hash is always
275 // calculated from 4096 zeros.
276 if size < merkle_tree.file_size() && size % CHUNK_SIZE > 0 {
277 let new_tail_size = (size % CHUNK_SIZE) as usize;
278 let chunk_index = size / CHUNK_SIZE;
279 if new_tail_size > 0 {
280 let mut buf: ChunkBuffer = [0; CHUNK_SIZE as usize];
Victor Hsieh3e35f9a2022-03-03 22:17:23 +0000281 let s = self.read_backing_chunk_unverified(chunk_index, &mut buf)?;
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700282 debug_assert!(new_tail_size <= s);
283
284 let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
285 let new_hash = Sha256Hasher::new()?
286 .update(&buf[..new_tail_size])?
287 .update(&zeros)?
288 .finalize()?;
289 merkle_tree.update_hash(chunk_index as usize, &new_hash, size);
290 }
291 }
292
293 self.file.resize(size)?;
294 merkle_tree.resize(size as usize);
295
296 Ok(())
297 }
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800298}
299
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700300impl<F: ReadByChunk + RandomWrite> ReadByChunk for VerifiedFileEditor<F> {
301 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
Victor Hsieh3e35f9a2022-03-03 22:17:23 +0000302 self.read_backing_chunk_verified(chunk_index, buf)
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800303 }
304}
305
306#[cfg(test)]
307mod tests {
308 // Test data below can be generated by:
309 // $ perl -e 'print "\x{00}" x 6000' > foo
310 // $ perl -e 'print "\x{01}" x 5000' >> foo
311 // $ fsverity digest foo
312 use super::*;
313 use anyhow::Result;
314 use std::cell::RefCell;
315 use std::convert::TryInto;
316
Victor Hsieh09e26262021-03-03 16:00:55 -0800317 struct InMemoryEditor {
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800318 data: RefCell<Vec<u8>>,
319 fail_read: bool,
320 }
321
Victor Hsieh09e26262021-03-03 16:00:55 -0800322 impl InMemoryEditor {
323 pub fn new() -> InMemoryEditor {
324 InMemoryEditor { data: RefCell::new(Vec::new()), fail_read: false }
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800325 }
326 }
327
Victor Hsieh09e26262021-03-03 16:00:55 -0800328 impl RandomWrite for InMemoryEditor {
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800329 fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
330 let begin: usize =
331 offset.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
332 let end = begin + buf.len();
333 if end > self.data.borrow().len() {
334 self.data.borrow_mut().resize(end, 0);
335 }
Chris Wailes68c39f82021-07-27 16:03:44 -0700336 self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(buf);
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800337 Ok(buf.len())
338 }
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700339
340 fn resize(&self, size: u64) -> io::Result<()> {
341 let size: usize =
342 size.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
343 self.data.borrow_mut().resize(size, 0);
344 Ok(())
345 }
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800346 }
347
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700348 impl ReadByChunk for InMemoryEditor {
349 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800350 if self.fail_read {
351 return Err(io::Error::new(io::ErrorKind::Other, "test!"));
352 }
353
354 let borrowed = self.data.borrow();
355 let chunk = &borrowed
356 .chunks(CHUNK_SIZE as usize)
357 .nth(chunk_index as usize)
358 .ok_or_else(|| {
359 io::Error::new(
360 io::ErrorKind::InvalidInput,
361 format!("read_chunk out of bound: index {}", chunk_index),
362 )
363 })?;
Chris Wailes68c39f82021-07-27 16:03:44 -0700364 buf[..chunk.len()].copy_from_slice(chunk);
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800365 Ok(chunk.len())
366 }
367 }
368
369 #[test]
370 fn test_writer() -> Result<()> {
Victor Hsieh09e26262021-03-03 16:00:55 -0800371 let writer = InMemoryEditor::new();
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800372 let buf = [1; 4096];
373 assert_eq!(writer.data.borrow().len(), 0);
374
375 assert_eq!(writer.write_at(&buf, 16384)?, 4096);
376 assert_eq!(writer.data.borrow()[16384..16384 + 4096], buf);
377
378 assert_eq!(writer.write_at(&buf, 2048)?, 4096);
379 assert_eq!(writer.data.borrow()[2048..2048 + 4096], buf);
380
381 assert_eq!(writer.data.borrow().len(), 16384 + 4096);
382 Ok(())
383 }
384
385 #[test]
386 fn test_verified_writer_no_write() -> Result<()> {
387 // Verify fs-verity hash without any write.
Victor Hsieh09e26262021-03-03 16:00:55 -0800388 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800389 assert_eq!(
390 file.calculate_fsverity_digest()?,
391 to_u8_vec("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95")
392 .as_slice()
393 );
394 Ok(())
395 }
396
397 #[test]
398 fn test_verified_writer_from_zero() -> Result<()> {
399 // Verify a write of a full chunk.
Victor Hsieh09e26262021-03-03 16:00:55 -0800400 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800401 assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
402 assert_eq!(
403 file.calculate_fsverity_digest()?,
404 to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
405 .as_slice()
406 );
407
408 // Verify a write of across multiple chunks.
Victor Hsieh09e26262021-03-03 16:00:55 -0800409 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800410 assert_eq!(file.write_at(&[1; 4097], 0)?, 4097);
411 assert_eq!(
412 file.calculate_fsverity_digest()?,
413 to_u8_vec("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52")
414 .as_slice()
415 );
416
417 // Verify another write of across multiple chunks.
Victor Hsieh09e26262021-03-03 16:00:55 -0800418 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800419 assert_eq!(file.write_at(&[1; 10000], 0)?, 10000);
420 assert_eq!(
421 file.calculate_fsverity_digest()?,
422 to_u8_vec("7545409b556071554d18973a29b96409588c7cda4edd00d5586b27a11e1a523b")
423 .as_slice()
424 );
425 Ok(())
426 }
427
428 #[test]
429 fn test_verified_writer_unaligned() -> Result<()> {
430 // Verify small, unaligned write beyond EOF.
Victor Hsieh09e26262021-03-03 16:00:55 -0800431 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800432 assert_eq!(file.write_at(&[1; 5], 3)?, 5);
433 assert_eq!(
434 file.calculate_fsverity_digest()?,
435 to_u8_vec("a23fc5130d3d7b3323fc4b4a5e79d5d3e9ddf3a3f5872639e867713512c6702f")
436 .as_slice()
437 );
438
439 // Verify bigger, unaligned write beyond EOF.
Victor Hsieh09e26262021-03-03 16:00:55 -0800440 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800441 assert_eq!(file.write_at(&[1; 6000], 4000)?, 6000);
442 assert_eq!(
443 file.calculate_fsverity_digest()?,
444 to_u8_vec("d16d4c1c186d757e646f76208b21254f50d7f07ea07b1505ff48b2a6f603f989")
445 .as_slice()
446 );
447 Ok(())
448 }
449
450 #[test]
451 fn test_verified_writer_with_hole() -> Result<()> {
452 // Verify an aligned write beyond EOF with holes.
Victor Hsieh09e26262021-03-03 16:00:55 -0800453 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800454 assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
455 assert_eq!(
456 file.calculate_fsverity_digest()?,
457 to_u8_vec("4df2aefd8c2a9101d1d8770dca3ede418232eabce766bb8e020395eae2e97103")
458 .as_slice()
459 );
460
461 // Verify an unaligned write beyond EOF with holes.
Victor Hsieh09e26262021-03-03 16:00:55 -0800462 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800463 assert_eq!(file.write_at(&[1; 5000], 6000)?, 5000);
464 assert_eq!(
465 file.calculate_fsverity_digest()?,
466 to_u8_vec("47d5da26f6934484e260630a69eb2eebb21b48f69bc8fbf8486d1694b7dba94f")
467 .as_slice()
468 );
469
470 // Just another example with a small write.
Victor Hsieh09e26262021-03-03 16:00:55 -0800471 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800472 assert_eq!(file.write_at(&[1; 5], 16381)?, 5);
473 assert_eq!(
474 file.calculate_fsverity_digest()?,
475 to_u8_vec("8bd118821fb4aff26bb4b51d485cc481a093c68131b7f4f112e9546198449752")
476 .as_slice()
477 );
478 Ok(())
479 }
480
481 #[test]
482 fn test_verified_writer_various_writes() -> Result<()> {
Victor Hsieh09e26262021-03-03 16:00:55 -0800483 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800484 assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
485 assert_eq!(file.write_at(&[1; 2048], 4096 + 2048)?, 2048);
486 assert_eq!(
487 file.calculate_fsverity_digest()?,
488 to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
489 .as_slice()
490 );
491 assert_eq!(file.write_at(&[1; 2048], 2048)?, 2048);
492 assert_eq!(file.write_at(&[1; 2048], 4096)?, 2048);
493 assert_eq!(
494 file.calculate_fsverity_digest()?,
495 to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
496 .as_slice()
497 );
498 assert_eq!(file.write_at(&[0; 2048], 2048)?, 2048);
499 assert_eq!(file.write_at(&[0; 2048], 4096)?, 2048);
500 assert_eq!(
501 file.calculate_fsverity_digest()?,
502 to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
503 .as_slice()
504 );
505 assert_eq!(file.write_at(&[1; 4096], 2048)?, 4096);
506 assert_eq!(
507 file.calculate_fsverity_digest()?,
508 to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
509 .as_slice()
510 );
511 assert_eq!(file.write_at(&[1; 2048], 8192)?, 2048);
512 assert_eq!(file.write_at(&[1; 2048], 8192 + 2048)?, 2048);
513 assert_eq!(
514 file.calculate_fsverity_digest()?,
515 to_u8_vec("23cbac08371e6ee838ebcc7ae6512b939d2226e802337be7b383c3e046047d24")
516 .as_slice()
517 );
518 Ok(())
519 }
520
521 #[test]
522 fn test_verified_writer_inconsistent_read() -> Result<()> {
Victor Hsieh09e26262021-03-03 16:00:55 -0800523 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800524 assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
525
526 // Replace the expected hash of the first/0-th chunk. An incomplete write will fail when it
527 // detects the inconsistent read.
528 {
529 let mut merkle_tree = file.merkle_tree.write().unwrap();
530 let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
531 merkle_tree.update_hash(0, &overriding_hash, 8192);
532 }
533 assert!(file.write_at(&[1; 1], 2048).is_err());
534
535 // A write of full chunk can still succeed. Also fixed the inconsistency.
536 assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
537
538 // Replace the expected hash of the second/1-th chunk. A write range from previous chunk can
539 // still succeed, but returns early due to an inconsistent read but still successfully. A
540 // resumed write will fail since no bytes can be written due to the same inconsistency.
541 {
542 let mut merkle_tree = file.merkle_tree.write().unwrap();
543 let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
544 merkle_tree.update_hash(1, &overriding_hash, 8192);
545 }
546 assert_eq!(file.write_at(&[10; 8000], 0)?, 4096);
547 assert!(file.write_at(&[10; 8000 - 4096], 4096).is_err());
548 Ok(())
549 }
550
551 #[test]
552 fn test_verified_writer_failed_read_back() -> Result<()> {
Victor Hsieh09e26262021-03-03 16:00:55 -0800553 let mut writer = InMemoryEditor::new();
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800554 writer.fail_read = true;
555 let file = VerifiedFileEditor::new(writer);
556 assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
557
558 // When a read back is needed, a read failure will fail to write.
559 assert!(file.write_at(&[1; 1], 2048).is_err());
560 Ok(())
561 }
562
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700563 #[test]
564 fn test_resize_to_same_size() -> Result<()> {
565 let file = VerifiedFileEditor::new(InMemoryEditor::new());
566 assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
567
568 assert!(file.resize(2048).is_ok());
569 assert_eq!(file.size(), 2048);
570
571 assert_eq!(
572 file.calculate_fsverity_digest()?,
573 to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
574 .as_slice()
575 );
576 Ok(())
577 }
578
579 #[test]
580 fn test_resize_to_grow() -> Result<()> {
581 let file = VerifiedFileEditor::new(InMemoryEditor::new());
582 assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
583
584 // Resize should grow with 0s.
585 assert!(file.resize(4096).is_ok());
586 assert_eq!(file.size(), 4096);
587
588 assert_eq!(
589 file.calculate_fsverity_digest()?,
590 to_u8_vec("9e0e2745c21e4e74065240936d2047340d96a466680c3c9d177b82433e7a0bb1")
591 .as_slice()
592 );
593 Ok(())
594 }
595
596 #[test]
597 fn test_resize_to_shrink() -> Result<()> {
598 let file = VerifiedFileEditor::new(InMemoryEditor::new());
599 assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
600
601 // Truncate.
602 file.resize(2048)?;
603 assert_eq!(file.size(), 2048);
604
605 assert_eq!(
606 file.calculate_fsverity_digest()?,
607 to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
608 .as_slice()
609 );
610 Ok(())
611 }
612
613 #[test]
614 fn test_resize_to_shrink_with_read_failure() -> Result<()> {
615 let mut writer = InMemoryEditor::new();
616 writer.fail_read = true;
617 let file = VerifiedFileEditor::new(writer);
618 assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
619
620 // A truncate needs a read back. If the read fail, the resize should fail.
621 assert!(file.resize(2048).is_err());
622 Ok(())
623 }
624
625 #[test]
626 fn test_resize_to_shirink_to_chunk_boundary() -> Result<()> {
627 let mut writer = InMemoryEditor::new();
628 writer.fail_read = true;
629 let file = VerifiedFileEditor::new(writer);
630 assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
631
632 // Truncate to a chunk boundary. A read error doesn't matter since we won't need to
633 // recalcuate the leaf hash.
634 file.resize(4096)?;
635 assert_eq!(file.size(), 4096);
636
637 assert_eq!(
638 file.calculate_fsverity_digest()?,
639 to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
640 .as_slice()
641 );
642 Ok(())
643 }
644
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800645 fn to_u8_vec(hex_str: &str) -> Vec<u8> {
646 assert!(hex_str.len() % 2 == 0);
647 (0..hex_str.len())
648 .step_by(2)
649 .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap())
650 .collect()
651 }
652}