blob: f32ccabe942d37b6998edf07a5ae8ac81df3fceb [file] [log] [blame]
Victor Hsiehec184562020-11-06 13:50:31 -08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17use std::io;
18use thiserror::Error;
19
20use crate::crypto::{CryptoError, Sha256Hasher};
21use crate::reader::ReadOnlyDataByChunk;
22
23const ZEROS: [u8; 4096] = [0u8; 4096];
24
25#[derive(Error, Debug)]
26pub enum FsverityError {
27 #[error("Cannot verify a block")]
28 CannotVerify,
29 #[error("I/O error")]
30 Io(#[from] io::Error),
31 #[error("Crypto")]
32 UnexpectedCryptoError(#[from] CryptoError),
33}
34
35type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
36
37fn divide_roundup(dividend: u64, divisor: u64) -> u64 {
38 (dividend + divisor - 1) / divisor
39}
40
41fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
42 let padding_size = pad_to - chunk.len();
43 Sha256Hasher::new()?.update(&chunk)?.update(&ZEROS[..padding_size])?.finalize()
44}
45
46#[allow(dead_code)]
47fn verity_check<T: ReadOnlyDataByChunk>(
48 chunk: &[u8],
49 chunk_index: u64,
50 file_size: u64,
51 merkle_tree: &T,
52) -> Result<HashBuffer, FsverityError> {
53 // The caller should not be able to produce a chunk at the first place if `file_size` is 0. The
54 // current implementation expects to crash when a `ReadOnlyDataByChunk` implementation reads
55 // beyone the file size, including empty file.
56 assert_ne!(file_size, 0);
57
58 let chunk_hash = hash_with_padding(&chunk, T::CHUNK_SIZE as usize)?;
59
60 fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
61 chunk_hash,
62 |actual_hash, result| {
63 let (merkle_chunk, hash_offset_in_chunk) = result?;
64 let expected_hash =
65 &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + Sha256Hasher::HASH_SIZE];
66 if actual_hash != expected_hash {
67 return Err(FsverityError::CannotVerify);
68 }
69 Ok(hash_with_padding(&merkle_chunk, T::CHUNK_SIZE as usize)?)
70 },
71 )
72}
73
74fn log128_ceil(num: u64) -> Option<u64> {
75 match num {
76 0 => None,
77 n => Some(divide_roundup(64 - (n - 1).leading_zeros() as u64, 7)),
78 }
79}
80
81/// Given a chunk index and the size of the file, returns an iterator that walks the Merkle tree
82/// from the leaf to the root. The iterator carries the slice of the chunk/node as well as the
83/// offset of the child node's hash. It is up to the iterator user to use the node and hash,
84/// e.g. for the actual verification.
85#[allow(clippy::needless_collect)]
86fn fsverity_walk<'a, T: ReadOnlyDataByChunk>(
87 chunk_index: u64,
88 file_size: u64,
89 merkle_tree: &'a T,
90) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + 'a, FsverityError> {
91 let hashes_per_node = T::CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
92 let hash_pages = divide_roundup(file_size, hashes_per_node * T::CHUNK_SIZE);
93 debug_assert_eq!(hashes_per_node, 128u64);
94 let max_level = log128_ceil(hash_pages).expect("file should not be empty") as u32;
95 let root_to_leaf_steps = (0..=max_level)
96 .rev()
97 .map(|x| {
98 let leaves_per_hash = hashes_per_node.pow(x);
99 let leaves_size_per_hash = T::CHUNK_SIZE * leaves_per_hash;
100 let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
101 let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
102 let level_size = nodes_at_level * T::CHUNK_SIZE;
103 let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
104 (level_size, offset_in_level)
105 })
106 .scan(0, |level_offset, (level_size, offset_in_level)| {
107 let this_level_offset = *level_offset;
108 *level_offset += level_size;
109 let global_hash_offset = this_level_offset + offset_in_level;
110 Some(global_hash_offset)
111 })
112 .map(|global_hash_offset| {
113 let chunk_index = global_hash_offset / T::CHUNK_SIZE;
114 let hash_offset_in_chunk = (global_hash_offset % T::CHUNK_SIZE) as usize;
115 (chunk_index, hash_offset_in_chunk)
116 })
117 .collect::<Vec<_>>();
118
119 Ok(root_to_leaf_steps.into_iter().rev().map(move |(chunk_index, hash_offset_in_chunk)| {
120 let mut merkle_chunk = [0u8; 4096];
121 let _ = merkle_tree.read_chunk(chunk_index, &mut merkle_chunk)?;
122 Ok((merkle_chunk, hash_offset_in_chunk))
123 }))
124}
125
126#[cfg(test)]
127mod tests {
128 use super::*;
129 use crate::reader::ReadOnlyDataByChunk;
130 use anyhow::Result;
131
132 fn total_chunk_number(file_size: u64) -> u64 {
133 (file_size + 4095) / 4096
134 }
135
136 #[test]
137 fn fsverity_verify_full_read_4k() -> Result<()> {
138 let file = &include_bytes!("../testdata/input.4k")[..];
139 let merkle_tree = &include_bytes!("../testdata/input.4k.merkle_dump")[..];
140
141 let mut buf = [0u8; 4096];
142
143 for i in 0..total_chunk_number(file.len() as u64) {
144 let size = file.read_chunk(i, &mut buf[..])?;
145 assert!(verity_check(&buf[..size], i, file.len() as u64, &merkle_tree).is_ok());
146 }
147 Ok(())
148 }
149
150 #[test]
151 fn fsverity_verify_full_read_4k1() -> Result<()> {
152 let file = &include_bytes!("../testdata/input.4k1")[..];
153 let merkle_tree = &include_bytes!("../testdata/input.4k1.merkle_dump")[..];
154
155 let mut buf = [0u8; 4096];
156 for i in 0..total_chunk_number(file.len() as u64) {
157 let size = file.read_chunk(i, &mut buf[..])?;
158 assert!(verity_check(&buf[..size], i, file.len() as u64, &merkle_tree).is_ok());
159 }
160 Ok(())
161 }
162
163 #[test]
164 fn fsverity_verify_full_read_4m() -> Result<()> {
165 let file = &include_bytes!("../testdata/input.4m")[..];
166 let merkle_tree = &include_bytes!("../testdata/input.4m.merkle_dump")[..];
167
168 let mut buf = [0u8; 4096];
169 for i in 0..total_chunk_number(file.len() as u64) {
170 let size = file.read_chunk(i, &mut buf[..])?;
171 assert!(verity_check(&buf[..size], i, file.len() as u64, &merkle_tree).is_ok());
172 }
173 Ok(())
174 }
175
176 #[test]
177 fn fsverity_verify_bad_merkle_tree() -> Result<()> {
178 let file = &include_bytes!("../testdata/input.4m")[..];
179 // First leaf node is corrupted.
180 let merkle_tree = &include_bytes!("../testdata/input.4m.merkle_dump.bad")[..];
181
182 // A lowest broken node (a 4K chunk that contains 128 sha256 hashes) will fail the read
183 // failure of the underlying chunks, but not before or after.
184 let mut buf = [0u8; 4096];
185 let num_hashes = 4096 / 32;
186 let last_index = num_hashes;
187 for i in 0..last_index {
188 let size = file.read_chunk(i, &mut buf[..])?;
189 assert!(verity_check(&buf[..size], i, file.len() as u64, &merkle_tree).is_err());
190 }
191 let size = file.read_chunk(last_index, &mut buf[..])?;
192 assert!(verity_check(&buf[..size], last_index, file.len() as u64, &merkle_tree).is_ok());
193 Ok(())
194 }
195}