blob: 4af6e80233026fcd7b5677916b11072c0748179e [file] [log] [blame]
Victor Hsiehac4f3f42021-02-26 12:35:58 -08001/*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//! A module for writing to a file from a trusted world to an untrusted storage.
18//!
19//! Architectural Model:
20//! * Trusted world: the writer, a signing secret, has some memory, but NO persistent storage.
21//! * Untrusted world: persistent storage, assuming untrusted.
22//! * IPC mechanism between trusted and untrusted world
23//!
24//! Use cases:
25//! * In the trusted world, we want to generate a large file, sign it, and share the signature for
26//! a third party to verify the file.
27//! * In the trusted world, we want to read a previously signed file back with signature check
28//! without having to touch the whole file.
29//!
30//! Requirements:
31//! * Communication between trusted and untrusted world is not cheap, and files can be large.
32//! * A file write pattern may not be sequential, neither does read.
33//!
34//! Considering the above, a technique similar to fs-verity is used. fs-verity uses an alternative
35//! hash function, a Merkle tree, to calculate the hash of file content. A file update at any
36//! location will propagate the hash update from the leaf to the root node. Unlike fs-verity, which
37//! assumes static files, to support write operation, we need to allow the file (thus tree) to
38//! update.
39//!
40//! For the trusted world to generate a large file with random write and hash it, the writer needs
41//! to hold some private information and update the Merkle tree during a file write (or even when
42//! the Merkle tree needs to be stashed to the untrusted storage).
43//!
44//! A write to a file must update the root hash. In order for the root hash to update, a tree
45//! walk to update from the write location to the root node is necessary. Importantly, in case when
46//! (part of) the Merkle tree needs to be read from the untrusted storage (e.g. not yet verified in
47//! cache), the original path must be verified by the trusted signature before the update to happen.
48//!
49//! Denial-of-service is a known weakness if the untrusted storage decides to simply remove the
50//! file. But there is nothing we can do in this architecture.
51//!
52//! Rollback attack is another possible attack, but can be addressed with a rollback counter when
53//! possible.
54
55use std::io;
56use std::sync::{Arc, RwLock};
57
Victor Hsieh09e26262021-03-03 16:00:55 -080058use super::builder::MerkleLeaves;
Andrew Scull761db1e2022-05-23 18:31:35 +000059use super::common::{Sha256Hash, SHA256_HASH_SIZE};
Victor Hsiehac4f3f42021-02-26 12:35:58 -080060use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070061use crate::file::{ChunkBuffer, RandomWrite, ReadByChunk};
Andrew Scull761db1e2022-05-23 18:31:35 +000062use openssl::sha::{sha256, Sha256};
Victor Hsiehac4f3f42021-02-26 12:35:58 -080063
Victor Hsieh9d0ab622021-04-26 17:07:02 -070064fn debug_assert_usize_is_u64() {
65 // Since we don't need to support 32-bit CPU, make an assert to make conversion between
66 // u64 and usize easy below. Otherwise, we need to check `divide_roundup(offset + buf.len()
67 // <= usize::MAX` or handle `TryInto` errors.
68 debug_assert!(usize::MAX as u64 == u64::MAX, "Only 64-bit arch is supported");
69}
70
Victor Hsiehac4f3f42021-02-26 12:35:58 -080071/// VerifiedFileEditor provides an integrity layer to an underlying read-writable file, which may
72/// not be stored in a trusted environment. Only new, empty files are currently supported.
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070073pub struct VerifiedFileEditor<F: ReadByChunk + RandomWrite> {
Victor Hsiehac4f3f42021-02-26 12:35:58 -080074 file: F,
75 merkle_tree: Arc<RwLock<MerkleLeaves>>,
76}
77
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070078impl<F: ReadByChunk + RandomWrite> VerifiedFileEditor<F> {
Victor Hsiehac4f3f42021-02-26 12:35:58 -080079 /// Wraps a supposedly new file for integrity protection.
80 pub fn new(file: F) -> Self {
81 Self { file, merkle_tree: Arc::new(RwLock::new(MerkleLeaves::new())) }
82 }
83
Victor Hsieh71f10032021-08-13 11:24:02 -070084 /// Returns the fs-verity digest size in bytes.
85 pub fn get_fsverity_digest_size(&self) -> usize {
Andrew Scull761db1e2022-05-23 18:31:35 +000086 SHA256_HASH_SIZE
Victor Hsieh71f10032021-08-13 11:24:02 -070087 }
88
Victor Hsiehac4f3f42021-02-26 12:35:58 -080089 /// Calculates the fs-verity digest of the current file.
90 pub fn calculate_fsverity_digest(&self) -> io::Result<Sha256Hash> {
91 let merkle_tree = self.merkle_tree.read().unwrap();
92 merkle_tree.calculate_fsverity_digest().map_err(|e| io::Error::new(io::ErrorKind::Other, e))
93 }
94
Victor Hsieh3e35f9a2022-03-03 22:17:23 +000095 fn read_backing_chunk_unverified(
96 &self,
97 chunk_index: u64,
98 buf: &mut ChunkBuffer,
99 ) -> io::Result<usize> {
100 self.file.read_chunk(chunk_index, buf)
101 }
102
103 fn read_backing_chunk_verified(
104 &self,
105 chunk_index: u64,
106 buf: &mut ChunkBuffer,
Victor Hsiehafdfd0e2022-03-05 00:35:23 +0000107 merkle_tree_locked: &MerkleLeaves,
Victor Hsieh3e35f9a2022-03-03 22:17:23 +0000108 ) -> io::Result<usize> {
Victor Hsieh3e35f9a2022-03-03 22:17:23 +0000109 debug_assert_usize_is_u64();
Victor Hsieh887b2502022-03-07 20:32:54 +0000110
Victor Hsiehafdfd0e2022-03-05 00:35:23 +0000111 if merkle_tree_locked.is_index_valid(chunk_index as usize) {
Victor Hsieh887b2502022-03-07 20:32:54 +0000112 let size = self.read_backing_chunk_unverified(chunk_index, buf)?;
113
114 // Ensure the returned buffer matches the known hash.
Andrew Scull761db1e2022-05-23 18:31:35 +0000115 let hash = sha256(buf);
Victor Hsiehafdfd0e2022-03-05 00:35:23 +0000116 if !merkle_tree_locked.is_consistent(chunk_index as usize, &hash) {
Victor Hsieh3e35f9a2022-03-03 22:17:23 +0000117 return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
118 }
119 Ok(size)
120 } else {
Victor Hsieh3e35f9a2022-03-03 22:17:23 +0000121 Ok(0)
122 }
123 }
124
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800125 fn new_hash_for_incomplete_write(
126 &self,
127 source: &[u8],
128 offset_from_alignment: usize,
129 output_chunk_index: usize,
130 merkle_tree: &mut MerkleLeaves,
131 ) -> io::Result<Sha256Hash> {
132 // The buffer is initialized to 0 purposely. To calculate the block hash, the data is
133 // 0-padded to the block size. When a chunk read is less than a chunk, the initial value
134 // conveniently serves the padding purpose.
135 let mut orig_data = [0u8; CHUNK_SIZE as usize];
136
137 // If previous data exists, read back and verify against the known hash (since the
138 // storage / remote server is not trusted).
139 if merkle_tree.is_index_valid(output_chunk_index) {
Victor Hsieh3e35f9a2022-03-03 22:17:23 +0000140 self.read_backing_chunk_unverified(output_chunk_index as u64, &mut orig_data)?;
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800141
142 // Verify original content
Andrew Scull761db1e2022-05-23 18:31:35 +0000143 let hash = sha256(&orig_data);
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800144 if !merkle_tree.is_consistent(output_chunk_index, &hash) {
145 return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
146 }
147 }
148
Andrew Scull761db1e2022-05-23 18:31:35 +0000149 let mut ctx = Sha256::new();
150 ctx.update(&orig_data[..offset_from_alignment]);
151 ctx.update(source);
152 ctx.update(&orig_data[offset_from_alignment + source.len()..]);
153 Ok(ctx.finish())
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800154 }
155
156 fn new_chunk_hash(
157 &self,
158 source: &[u8],
159 offset_from_alignment: usize,
160 current_size: usize,
161 output_chunk_index: usize,
162 merkle_tree: &mut MerkleLeaves,
163 ) -> io::Result<Sha256Hash> {
164 if current_size as u64 == CHUNK_SIZE {
165 // Case 1: If the chunk is a complete one, just calculate the hash, regardless of
166 // write location.
Andrew Scull761db1e2022-05-23 18:31:35 +0000167 Ok(sha256(source))
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800168 } else {
169 // Case 2: For an incomplete write, calculate the hash based on previous data (if
170 // any).
171 self.new_hash_for_incomplete_write(
172 source,
173 offset_from_alignment,
174 output_chunk_index,
175 merkle_tree,
176 )
177 }
178 }
Victor Hsieh6a47e7f2021-03-03 15:53:49 -0800179
180 pub fn size(&self) -> u64 {
181 self.merkle_tree.read().unwrap().file_size()
182 }
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800183}
184
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700185impl<F: ReadByChunk + RandomWrite> RandomWrite for VerifiedFileEditor<F> {
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800186 fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700187 debug_assert_usize_is_u64();
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800188
189 // The write range may not be well-aligned with the chunk boundary. There are various cases
190 // to deal with:
191 // 1. A write of a full 4K chunk.
192 // 2. A write of an incomplete chunk, possibly beyond the original EOF.
193 //
194 // Note that a write beyond EOF can create a hole. But we don't need to handle it here
195 // because holes are zeros, and leaves in MerkleLeaves are hashes of 4096-zeros by
196 // default.
197
198 // Now iterate on the input data, considering the alignment at the destination.
199 for (output_offset, current_size) in
200 ChunkedSizeIter::new(buf.len(), offset, CHUNK_SIZE as usize)
201 {
202 // Lock the tree for the whole write for now. There may be room to improve to increase
203 // throughput.
204 let mut merkle_tree = self.merkle_tree.write().unwrap();
205
206 let offset_in_buf = (output_offset - offset) as usize;
Charisee96113f32023-01-26 09:00:42 +0000207 let source = &buf[offset_in_buf..offset_in_buf + current_size];
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800208 let output_chunk_index = (output_offset / CHUNK_SIZE) as usize;
209 let offset_from_alignment = (output_offset % CHUNK_SIZE) as usize;
210
211 let new_hash = match self.new_chunk_hash(
212 source,
213 offset_from_alignment,
214 current_size,
215 output_chunk_index,
216 &mut merkle_tree,
217 ) {
218 Ok(hash) => hash,
219 Err(e) => {
220 // Return early when any error happens before the right. Even if the hash is not
221 // consistent for the current chunk, we can still consider the earlier writes
222 // successful. Note that nothing persistent has been done in this iteration.
223 let written = output_offset - offset;
224 if written > 0 {
225 return Ok(written as usize);
226 }
227 return Err(e);
228 }
229 };
230
231 // A failed, partial write here will make the backing file inconsistent to the (old)
232 // hash. Nothing can be done within this writer, but at least it still maintains the
233 // (original) integrity for the file. To matches what write(2) describes for an error
234 // case (though it's about direct I/O), "Partial data may be written ... should be
235 // considered inconsistent", an error below is propagated.
Chris Wailes68c39f82021-07-27 16:03:44 -0700236 self.file.write_all_at(source, output_offset)?;
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800237
238 // Update the hash only after the write succeeds. Note that this only attempts to keep
239 // the tree consistent to what has been written regardless the actual state beyond the
240 // writer.
241 let size_at_least = offset.saturating_add(buf.len() as u64);
242 merkle_tree.update_hash(output_chunk_index, &new_hash, size_at_least);
243 }
244 Ok(buf.len())
245 }
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700246
247 fn resize(&self, size: u64) -> io::Result<()> {
248 debug_assert_usize_is_u64();
249
250 let mut merkle_tree = self.merkle_tree.write().unwrap();
251 // In case when we are truncating the file, we may need to recalculate the hash of the (new)
252 // last chunk. Since the content is provided by the untrusted backend, we need to read the
253 // data back first, verify it, then override the truncated portion with 0-padding for
254 // hashing. As an optimization, we only need to read the data back if the new size isn't a
255 // multiple of CHUNK_SIZE (since the hash is already correct).
256 //
257 // The same thing does not need to happen when the size is growing. Since the new extended
258 // data is always 0, we can just resize the `MerkleLeaves`, where a new hash is always
259 // calculated from 4096 zeros.
260 if size < merkle_tree.file_size() && size % CHUNK_SIZE > 0 {
261 let new_tail_size = (size % CHUNK_SIZE) as usize;
262 let chunk_index = size / CHUNK_SIZE;
263 if new_tail_size > 0 {
264 let mut buf: ChunkBuffer = [0; CHUNK_SIZE as usize];
Victor Hsiehafdfd0e2022-03-05 00:35:23 +0000265 let s = self.read_backing_chunk_verified(chunk_index, &mut buf, &merkle_tree)?;
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700266 debug_assert!(new_tail_size <= s);
267
268 let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
Andrew Scull761db1e2022-05-23 18:31:35 +0000269 let mut ctx = Sha256::new();
270 ctx.update(&buf[..new_tail_size]);
271 ctx.update(&zeros);
272 let new_hash = ctx.finish();
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700273 merkle_tree.update_hash(chunk_index as usize, &new_hash, size);
274 }
275 }
276
277 self.file.resize(size)?;
278 merkle_tree.resize(size as usize);
279
280 Ok(())
281 }
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800282}
283
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700284impl<F: ReadByChunk + RandomWrite> ReadByChunk for VerifiedFileEditor<F> {
285 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
Victor Hsiehafdfd0e2022-03-05 00:35:23 +0000286 let merkle_tree = self.merkle_tree.read().unwrap();
287 self.read_backing_chunk_verified(chunk_index, buf, &merkle_tree)
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800288 }
289}
290
291#[cfg(test)]
292mod tests {
293 // Test data below can be generated by:
294 // $ perl -e 'print "\x{00}" x 6000' > foo
295 // $ perl -e 'print "\x{01}" x 5000' >> foo
296 // $ fsverity digest foo
297 use super::*;
298 use anyhow::Result;
299 use std::cell::RefCell;
300 use std::convert::TryInto;
301
Victor Hsieh09e26262021-03-03 16:00:55 -0800302 struct InMemoryEditor {
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800303 data: RefCell<Vec<u8>>,
304 fail_read: bool,
305 }
306
Victor Hsieh09e26262021-03-03 16:00:55 -0800307 impl InMemoryEditor {
308 pub fn new() -> InMemoryEditor {
309 InMemoryEditor { data: RefCell::new(Vec::new()), fail_read: false }
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800310 }
311 }
312
Victor Hsieh09e26262021-03-03 16:00:55 -0800313 impl RandomWrite for InMemoryEditor {
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800314 fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
315 let begin: usize =
316 offset.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
317 let end = begin + buf.len();
318 if end > self.data.borrow().len() {
319 self.data.borrow_mut().resize(end, 0);
320 }
Chris Wailes68c39f82021-07-27 16:03:44 -0700321 self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(buf);
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800322 Ok(buf.len())
323 }
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700324
325 fn resize(&self, size: u64) -> io::Result<()> {
326 let size: usize =
327 size.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
328 self.data.borrow_mut().resize(size, 0);
329 Ok(())
330 }
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800331 }
332
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700333 impl ReadByChunk for InMemoryEditor {
334 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800335 if self.fail_read {
336 return Err(io::Error::new(io::ErrorKind::Other, "test!"));
337 }
338
339 let borrowed = self.data.borrow();
340 let chunk = &borrowed
341 .chunks(CHUNK_SIZE as usize)
342 .nth(chunk_index as usize)
343 .ok_or_else(|| {
344 io::Error::new(
345 io::ErrorKind::InvalidInput,
346 format!("read_chunk out of bound: index {}", chunk_index),
347 )
348 })?;
Chris Wailes68c39f82021-07-27 16:03:44 -0700349 buf[..chunk.len()].copy_from_slice(chunk);
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800350 Ok(chunk.len())
351 }
352 }
353
354 #[test]
355 fn test_writer() -> Result<()> {
Victor Hsieh09e26262021-03-03 16:00:55 -0800356 let writer = InMemoryEditor::new();
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800357 let buf = [1; 4096];
358 assert_eq!(writer.data.borrow().len(), 0);
359
360 assert_eq!(writer.write_at(&buf, 16384)?, 4096);
361 assert_eq!(writer.data.borrow()[16384..16384 + 4096], buf);
362
363 assert_eq!(writer.write_at(&buf, 2048)?, 4096);
364 assert_eq!(writer.data.borrow()[2048..2048 + 4096], buf);
365
366 assert_eq!(writer.data.borrow().len(), 16384 + 4096);
367 Ok(())
368 }
369
370 #[test]
371 fn test_verified_writer_no_write() -> Result<()> {
372 // Verify fs-verity hash without any write.
Victor Hsieh09e26262021-03-03 16:00:55 -0800373 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800374 assert_eq!(
375 file.calculate_fsverity_digest()?,
376 to_u8_vec("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95")
377 .as_slice()
378 );
379 Ok(())
380 }
381
382 #[test]
383 fn test_verified_writer_from_zero() -> Result<()> {
384 // Verify a write of a full chunk.
Victor Hsieh09e26262021-03-03 16:00:55 -0800385 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800386 assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
387 assert_eq!(
388 file.calculate_fsverity_digest()?,
389 to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
390 .as_slice()
391 );
392
393 // Verify a write of across multiple chunks.
Victor Hsieh09e26262021-03-03 16:00:55 -0800394 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800395 assert_eq!(file.write_at(&[1; 4097], 0)?, 4097);
396 assert_eq!(
397 file.calculate_fsverity_digest()?,
398 to_u8_vec("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52")
399 .as_slice()
400 );
401
402 // Verify another write of across multiple chunks.
Victor Hsieh09e26262021-03-03 16:00:55 -0800403 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800404 assert_eq!(file.write_at(&[1; 10000], 0)?, 10000);
405 assert_eq!(
406 file.calculate_fsverity_digest()?,
407 to_u8_vec("7545409b556071554d18973a29b96409588c7cda4edd00d5586b27a11e1a523b")
408 .as_slice()
409 );
410 Ok(())
411 }
412
413 #[test]
414 fn test_verified_writer_unaligned() -> Result<()> {
415 // Verify small, unaligned write beyond EOF.
Victor Hsieh09e26262021-03-03 16:00:55 -0800416 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800417 assert_eq!(file.write_at(&[1; 5], 3)?, 5);
418 assert_eq!(
419 file.calculate_fsverity_digest()?,
420 to_u8_vec("a23fc5130d3d7b3323fc4b4a5e79d5d3e9ddf3a3f5872639e867713512c6702f")
421 .as_slice()
422 );
423
424 // Verify bigger, unaligned write beyond EOF.
Victor Hsieh09e26262021-03-03 16:00:55 -0800425 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800426 assert_eq!(file.write_at(&[1; 6000], 4000)?, 6000);
427 assert_eq!(
428 file.calculate_fsverity_digest()?,
429 to_u8_vec("d16d4c1c186d757e646f76208b21254f50d7f07ea07b1505ff48b2a6f603f989")
430 .as_slice()
431 );
432 Ok(())
433 }
434
435 #[test]
436 fn test_verified_writer_with_hole() -> Result<()> {
437 // Verify an aligned write beyond EOF with holes.
Victor Hsieh09e26262021-03-03 16:00:55 -0800438 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800439 assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
440 assert_eq!(
441 file.calculate_fsverity_digest()?,
442 to_u8_vec("4df2aefd8c2a9101d1d8770dca3ede418232eabce766bb8e020395eae2e97103")
443 .as_slice()
444 );
445
446 // Verify an unaligned write beyond EOF with holes.
Victor Hsieh09e26262021-03-03 16:00:55 -0800447 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800448 assert_eq!(file.write_at(&[1; 5000], 6000)?, 5000);
449 assert_eq!(
450 file.calculate_fsverity_digest()?,
451 to_u8_vec("47d5da26f6934484e260630a69eb2eebb21b48f69bc8fbf8486d1694b7dba94f")
452 .as_slice()
453 );
454
455 // Just another example with a small write.
Victor Hsieh09e26262021-03-03 16:00:55 -0800456 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800457 assert_eq!(file.write_at(&[1; 5], 16381)?, 5);
458 assert_eq!(
459 file.calculate_fsverity_digest()?,
460 to_u8_vec("8bd118821fb4aff26bb4b51d485cc481a093c68131b7f4f112e9546198449752")
461 .as_slice()
462 );
463 Ok(())
464 }
465
466 #[test]
467 fn test_verified_writer_various_writes() -> Result<()> {
Victor Hsieh09e26262021-03-03 16:00:55 -0800468 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800469 assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
470 assert_eq!(file.write_at(&[1; 2048], 4096 + 2048)?, 2048);
471 assert_eq!(
472 file.calculate_fsverity_digest()?,
473 to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
474 .as_slice()
475 );
476 assert_eq!(file.write_at(&[1; 2048], 2048)?, 2048);
477 assert_eq!(file.write_at(&[1; 2048], 4096)?, 2048);
478 assert_eq!(
479 file.calculate_fsverity_digest()?,
480 to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
481 .as_slice()
482 );
483 assert_eq!(file.write_at(&[0; 2048], 2048)?, 2048);
484 assert_eq!(file.write_at(&[0; 2048], 4096)?, 2048);
485 assert_eq!(
486 file.calculate_fsverity_digest()?,
487 to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
488 .as_slice()
489 );
490 assert_eq!(file.write_at(&[1; 4096], 2048)?, 4096);
491 assert_eq!(
492 file.calculate_fsverity_digest()?,
493 to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
494 .as_slice()
495 );
496 assert_eq!(file.write_at(&[1; 2048], 8192)?, 2048);
497 assert_eq!(file.write_at(&[1; 2048], 8192 + 2048)?, 2048);
498 assert_eq!(
499 file.calculate_fsverity_digest()?,
500 to_u8_vec("23cbac08371e6ee838ebcc7ae6512b939d2226e802337be7b383c3e046047d24")
501 .as_slice()
502 );
503 Ok(())
504 }
505
506 #[test]
507 fn test_verified_writer_inconsistent_read() -> Result<()> {
Victor Hsieh09e26262021-03-03 16:00:55 -0800508 let file = VerifiedFileEditor::new(InMemoryEditor::new());
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800509 assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
510
511 // Replace the expected hash of the first/0-th chunk. An incomplete write will fail when it
512 // detects the inconsistent read.
513 {
514 let mut merkle_tree = file.merkle_tree.write().unwrap();
Andrew Scull761db1e2022-05-23 18:31:35 +0000515 let overriding_hash = [42; SHA256_HASH_SIZE];
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800516 merkle_tree.update_hash(0, &overriding_hash, 8192);
517 }
518 assert!(file.write_at(&[1; 1], 2048).is_err());
519
520 // A write of full chunk can still succeed. Also fixed the inconsistency.
521 assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
522
523 // Replace the expected hash of the second/1-th chunk. A write range from previous chunk can
524 // still succeed, but returns early due to an inconsistent read but still successfully. A
525 // resumed write will fail since no bytes can be written due to the same inconsistency.
526 {
527 let mut merkle_tree = file.merkle_tree.write().unwrap();
Andrew Scull761db1e2022-05-23 18:31:35 +0000528 let overriding_hash = [42; SHA256_HASH_SIZE];
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800529 merkle_tree.update_hash(1, &overriding_hash, 8192);
530 }
531 assert_eq!(file.write_at(&[10; 8000], 0)?, 4096);
532 assert!(file.write_at(&[10; 8000 - 4096], 4096).is_err());
533 Ok(())
534 }
535
536 #[test]
537 fn test_verified_writer_failed_read_back() -> Result<()> {
Victor Hsieh09e26262021-03-03 16:00:55 -0800538 let mut writer = InMemoryEditor::new();
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800539 writer.fail_read = true;
540 let file = VerifiedFileEditor::new(writer);
541 assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
542
543 // When a read back is needed, a read failure will fail to write.
544 assert!(file.write_at(&[1; 1], 2048).is_err());
545 Ok(())
546 }
547
Victor Hsieh9d0ab622021-04-26 17:07:02 -0700548 #[test]
549 fn test_resize_to_same_size() -> Result<()> {
550 let file = VerifiedFileEditor::new(InMemoryEditor::new());
551 assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
552
553 assert!(file.resize(2048).is_ok());
554 assert_eq!(file.size(), 2048);
555
556 assert_eq!(
557 file.calculate_fsverity_digest()?,
558 to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
559 .as_slice()
560 );
561 Ok(())
562 }
563
564 #[test]
565 fn test_resize_to_grow() -> Result<()> {
566 let file = VerifiedFileEditor::new(InMemoryEditor::new());
567 assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
568
569 // Resize should grow with 0s.
570 assert!(file.resize(4096).is_ok());
571 assert_eq!(file.size(), 4096);
572
573 assert_eq!(
574 file.calculate_fsverity_digest()?,
575 to_u8_vec("9e0e2745c21e4e74065240936d2047340d96a466680c3c9d177b82433e7a0bb1")
576 .as_slice()
577 );
578 Ok(())
579 }
580
581 #[test]
582 fn test_resize_to_shrink() -> Result<()> {
583 let file = VerifiedFileEditor::new(InMemoryEditor::new());
584 assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
585
586 // Truncate.
587 file.resize(2048)?;
588 assert_eq!(file.size(), 2048);
589
590 assert_eq!(
591 file.calculate_fsverity_digest()?,
592 to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
593 .as_slice()
594 );
595 Ok(())
596 }
597
598 #[test]
599 fn test_resize_to_shrink_with_read_failure() -> Result<()> {
600 let mut writer = InMemoryEditor::new();
601 writer.fail_read = true;
602 let file = VerifiedFileEditor::new(writer);
603 assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
604
605 // A truncate needs a read back. If the read fail, the resize should fail.
606 assert!(file.resize(2048).is_err());
607 Ok(())
608 }
609
610 #[test]
611 fn test_resize_to_shirink_to_chunk_boundary() -> Result<()> {
612 let mut writer = InMemoryEditor::new();
613 writer.fail_read = true;
614 let file = VerifiedFileEditor::new(writer);
615 assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
616
617 // Truncate to a chunk boundary. A read error doesn't matter since we won't need to
618 // recalcuate the leaf hash.
619 file.resize(4096)?;
620 assert_eq!(file.size(), 4096);
621
622 assert_eq!(
623 file.calculate_fsverity_digest()?,
624 to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
625 .as_slice()
626 );
627 Ok(())
628 }
629
Victor Hsiehac4f3f42021-02-26 12:35:58 -0800630 fn to_u8_vec(hex_str: &str) -> Vec<u8> {
631 assert!(hex_str.len() % 2 == 0);
632 (0..hex_str.len())
633 .step_by(2)
634 .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap())
635 .collect()
636 }
637}