blob: aaf4bf7a8af5ef6921f013e7e82a23549fdbac47 [file] [log] [blame]
Victor Hsiehdde17902021-02-26 12:35:31 -08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17use libc::EIO;
18use std::io;
19
20use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
Victor Hsiehdde17902021-02-26 12:35:31 -080021use crate::common::{divide_roundup, CHUNK_SIZE};
22use crate::crypto::{CryptoError, Sha256Hasher};
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070023use crate::file::{ChunkBuffer, ReadByChunk};
Victor Hsiehdde17902021-02-26 12:35:31 -080024
25const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
26
Victor Hsiehdde17902021-02-26 12:35:31 -080027type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
28
29fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
30 let padding_size = pad_to - chunk.len();
Chris Wailes68c39f82021-07-27 16:03:44 -070031 Sha256Hasher::new()?.update(chunk)?.update(&ZEROS[..padding_size])?.finalize()
Victor Hsiehdde17902021-02-26 12:35:31 -080032}
33
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070034fn verity_check<T: ReadByChunk>(
Victor Hsiehdde17902021-02-26 12:35:31 -080035 chunk: &[u8],
36 chunk_index: u64,
37 file_size: u64,
38 merkle_tree: &T,
39) -> Result<HashBuffer, FsverityError> {
40 // The caller should not be able to produce a chunk at the first place if `file_size` is 0. The
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070041 // current implementation expects to crash when a `ReadByChunk` implementation reads
Victor Hsiehdde17902021-02-26 12:35:31 -080042 // beyond the file size, including empty file.
43 assert_ne!(file_size, 0);
44
Chris Wailes68c39f82021-07-27 16:03:44 -070045 let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize)?;
Victor Hsiehdde17902021-02-26 12:35:31 -080046
Victor Hsieh35dfa1e2022-01-12 17:03:35 -080047 // When the file is smaller or equal to CHUNK_SIZE, the root of Merkle tree is defined as the
48 // hash of the file content, plus padding.
49 if file_size <= CHUNK_SIZE {
50 return Ok(chunk_hash);
51 }
52
Victor Hsiehdde17902021-02-26 12:35:31 -080053 fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
54 chunk_hash,
55 |actual_hash, result| {
56 let (merkle_chunk, hash_offset_in_chunk) = result?;
57 let expected_hash =
58 &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + Sha256Hasher::HASH_SIZE];
59 if actual_hash != expected_hash {
60 return Err(FsverityError::CannotVerify);
61 }
62 Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize)?)
63 },
64 )
65}
66
67/// Given a chunk index and the size of the file, returns an iterator that walks the Merkle tree
68/// from the leaf to the root. The iterator carries the slice of the chunk/node as well as the
69/// offset of the child node's hash. It is up to the iterator user to use the node and hash,
70/// e.g. for the actual verification.
71#[allow(clippy::needless_collect)]
Victor Hsiehd0bb5d32021-03-19 12:48:03 -070072fn fsverity_walk<T: ReadByChunk>(
Victor Hsiehdde17902021-02-26 12:35:31 -080073 chunk_index: u64,
74 file_size: u64,
75 merkle_tree: &T,
76) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
77 let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
78 debug_assert_eq!(hashes_per_node, 128u64);
79 let max_level = merkle_tree_height(file_size).expect("file should not be empty") as u32;
80 let root_to_leaf_steps = (0..=max_level)
81 .rev()
82 .map(|x| {
83 let leaves_per_hash = hashes_per_node.pow(x);
84 let leaves_size_per_hash = CHUNK_SIZE * leaves_per_hash;
85 let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
86 let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
87 let level_size = nodes_at_level * CHUNK_SIZE;
88 let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
89 (level_size, offset_in_level)
90 })
91 .scan(0, |level_offset, (level_size, offset_in_level)| {
92 let this_level_offset = *level_offset;
93 *level_offset += level_size;
94 let global_hash_offset = this_level_offset + offset_in_level;
95 Some(global_hash_offset)
96 })
97 .map(|global_hash_offset| {
98 let chunk_index = global_hash_offset / CHUNK_SIZE;
99 let hash_offset_in_chunk = (global_hash_offset % CHUNK_SIZE) as usize;
100 (chunk_index, hash_offset_in_chunk)
101 })
102 .collect::<Vec<_>>(); // Needs to collect first to be able to reverse below.
103
104 Ok(root_to_leaf_steps.into_iter().rev().map(move |(chunk_index, hash_offset_in_chunk)| {
105 let mut merkle_chunk = [0u8; 4096];
106 // read_chunk is supposed to return a full chunk, or an incomplete one at the end of the
107 // file. In the incomplete case, the hash is calculated with 0-padding to the chunk size.
108 // Therefore, we don't need to check the returned size here.
109 let _ = merkle_tree.read_chunk(chunk_index, &mut merkle_chunk)?;
110 Ok((merkle_chunk, hash_offset_in_chunk))
111 }))
112}
113
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700114pub struct VerifiedFileReader<F: ReadByChunk, M: ReadByChunk> {
Victor Hsiehe8137e32022-02-11 22:14:12 +0000115 pub file_size: u64,
Victor Hsiehdde17902021-02-26 12:35:31 -0800116 chunked_file: F,
Victor Hsiehdde17902021-02-26 12:35:31 -0800117 merkle_tree: M,
118 root_hash: HashBuffer,
119}
120
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700121impl<F: ReadByChunk, M: ReadByChunk> VerifiedFileReader<F, M> {
Victor Hsieh5deba522022-01-10 17:18:40 -0800122 pub fn new(
Victor Hsiehdde17902021-02-26 12:35:31 -0800123 chunked_file: F,
124 file_size: u64,
Victor Hsieh5deba522022-01-10 17:18:40 -0800125 expected_digest: &[u8],
Victor Hsiehdde17902021-02-26 12:35:31 -0800126 merkle_tree: M,
Victor Hsieh09e26262021-03-03 16:00:55 -0800127 ) -> Result<VerifiedFileReader<F, M>, FsverityError> {
Victor Hsiehdde17902021-02-26 12:35:31 -0800128 let mut buf = [0u8; CHUNK_SIZE as usize];
Victor Hsieh35dfa1e2022-01-12 17:03:35 -0800129 if file_size <= CHUNK_SIZE {
130 let _size = chunked_file.read_chunk(0, &mut buf)?;
131 // The rest of buffer is 0-padded.
132 } else {
133 let size = merkle_tree.read_chunk(0, &mut buf)?;
134 if buf.len() != size {
135 return Err(FsverityError::InsufficientData(size));
136 }
Victor Hsiehdde17902021-02-26 12:35:31 -0800137 }
138 let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
Victor Hsieh5deba522022-01-10 17:18:40 -0800139 if expected_digest == build_fsverity_digest(&root_hash, file_size)? {
140 // Once verified, use the root_hash for verification going forward.
Victor Hsieh09e26262021-03-03 16:00:55 -0800141 Ok(VerifiedFileReader { chunked_file, file_size, merkle_tree, root_hash })
Victor Hsiehdde17902021-02-26 12:35:31 -0800142 } else {
Victor Hsieh5deba522022-01-10 17:18:40 -0800143 Err(FsverityError::InvalidDigest)
Victor Hsiehdde17902021-02-26 12:35:31 -0800144 }
145 }
146}
147
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700148impl<F: ReadByChunk, M: ReadByChunk> ReadByChunk for VerifiedFileReader<F, M> {
149 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
Victor Hsiehdde17902021-02-26 12:35:31 -0800150 let size = self.chunked_file.read_chunk(chunk_index, buf)?;
151 let root_hash = verity_check(&buf[..size], chunk_index, self.file_size, &self.merkle_tree)
152 .map_err(|_| io::Error::from_raw_os_error(EIO))?;
153 if root_hash != self.root_hash {
154 Err(io::Error::from_raw_os_error(EIO))
155 } else {
156 Ok(size)
157 }
158 }
159}
160
161#[cfg(test)]
162mod tests {
163 use super::*;
Victor Hsieh88e50172021-10-15 13:27:13 -0700164 use crate::file::ReadByChunk;
Victor Hsiehdde17902021-02-26 12:35:31 -0800165 use anyhow::Result;
Inseob Kimc0886c22021-12-13 17:41:24 +0900166 use authfs_fsverity_metadata::{parse_fsverity_metadata, FSVerityMetadata};
Victor Hsieh88e50172021-10-15 13:27:13 -0700167 use std::cmp::min;
Inseob Kimc0886c22021-12-13 17:41:24 +0900168 use std::fs::File;
Victor Hsieh88e50172021-10-15 13:27:13 -0700169 use std::os::unix::fs::FileExt;
170
171 struct LocalFileReader {
172 file: File,
173 size: u64,
174 }
175
176 impl LocalFileReader {
177 fn new(file: File) -> io::Result<LocalFileReader> {
178 let size = file.metadata()?.len();
179 Ok(LocalFileReader { file, size })
180 }
181
182 fn len(&self) -> u64 {
183 self.size
184 }
185 }
186
187 impl ReadByChunk for LocalFileReader {
188 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
189 let start = chunk_index * CHUNK_SIZE;
190 if start >= self.size {
191 return Ok(0);
192 }
193 let end = min(self.size, start + CHUNK_SIZE);
194 let read_size = (end - start) as usize;
195 debug_assert!(read_size <= buf.len());
196 self.file.read_exact_at(&mut buf[..read_size], start)?;
197 Ok(read_size)
198 }
199 }
Victor Hsiehdde17902021-02-26 12:35:31 -0800200
Inseob Kimc0886c22021-12-13 17:41:24 +0900201 type LocalVerifiedFileReader = VerifiedFileReader<LocalFileReader, MerkleTreeReader>;
202
203 pub struct MerkleTreeReader {
204 metadata: Box<FSVerityMetadata>,
205 }
206
207 impl ReadByChunk for MerkleTreeReader {
208 fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
209 self.metadata.read_merkle_tree(chunk_index * CHUNK_SIZE, buf)
210 }
211 }
Victor Hsiehdde17902021-02-26 12:35:31 -0800212
213 fn total_chunk_number(file_size: u64) -> u64 {
214 (file_size + 4095) / 4096
215 }
216
217 // Returns a reader with fs-verity verification and the file size.
218 fn new_reader_with_fsverity(
219 content_path: &str,
Inseob Kimc0886c22021-12-13 17:41:24 +0900220 metadata_path: &str,
Victor Hsieh09e26262021-03-03 16:00:55 -0800221 ) -> Result<(LocalVerifiedFileReader, u64)> {
222 let file_reader = LocalFileReader::new(File::open(content_path)?)?;
Victor Hsiehdde17902021-02-26 12:35:31 -0800223 let file_size = file_reader.len();
Inseob Kimc0886c22021-12-13 17:41:24 +0900224 let metadata = parse_fsverity_metadata(File::open(metadata_path)?)?;
Victor Hsiehdde17902021-02-26 12:35:31 -0800225 Ok((
Inseob Kimc0886c22021-12-13 17:41:24 +0900226 VerifiedFileReader::new(
Inseob Kimc0886c22021-12-13 17:41:24 +0900227 file_reader,
228 file_size,
Victor Hsieh5deba522022-01-10 17:18:40 -0800229 &metadata.digest.clone(),
Inseob Kimc0886c22021-12-13 17:41:24 +0900230 MerkleTreeReader { metadata },
231 )?,
Victor Hsiehdde17902021-02-26 12:35:31 -0800232 file_size,
233 ))
234 }
235
236 #[test]
237 fn fsverity_verify_full_read_4k() -> Result<()> {
Inseob Kimc0886c22021-12-13 17:41:24 +0900238 let (file_reader, file_size) =
239 new_reader_with_fsverity("testdata/input.4k", "testdata/input.4k.fsv_meta")?;
Victor Hsiehdde17902021-02-26 12:35:31 -0800240
241 for i in 0..total_chunk_number(file_size) {
242 let mut buf = [0u8; 4096];
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700243 assert!(file_reader.read_chunk(i, &mut buf).is_ok());
Victor Hsiehdde17902021-02-26 12:35:31 -0800244 }
245 Ok(())
246 }
247
248 #[test]
249 fn fsverity_verify_full_read_4k1() -> Result<()> {
Inseob Kimc0886c22021-12-13 17:41:24 +0900250 let (file_reader, file_size) =
251 new_reader_with_fsverity("testdata/input.4k1", "testdata/input.4k1.fsv_meta")?;
Victor Hsiehdde17902021-02-26 12:35:31 -0800252
253 for i in 0..total_chunk_number(file_size) {
254 let mut buf = [0u8; 4096];
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700255 assert!(file_reader.read_chunk(i, &mut buf).is_ok());
Victor Hsiehdde17902021-02-26 12:35:31 -0800256 }
257 Ok(())
258 }
259
260 #[test]
261 fn fsverity_verify_full_read_4m() -> Result<()> {
Inseob Kimc0886c22021-12-13 17:41:24 +0900262 let (file_reader, file_size) =
263 new_reader_with_fsverity("testdata/input.4m", "testdata/input.4m.fsv_meta")?;
Victor Hsiehdde17902021-02-26 12:35:31 -0800264
265 for i in 0..total_chunk_number(file_size) {
266 let mut buf = [0u8; 4096];
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700267 assert!(file_reader.read_chunk(i, &mut buf).is_ok());
Victor Hsiehdde17902021-02-26 12:35:31 -0800268 }
269 Ok(())
270 }
271
272 #[test]
273 fn fsverity_verify_bad_merkle_tree() -> Result<()> {
274 let (file_reader, _) = new_reader_with_fsverity(
275 "testdata/input.4m",
Inseob Kimc0886c22021-12-13 17:41:24 +0900276 "testdata/input.4m.fsv_meta.bad_merkle", // First leaf node is corrupted.
Victor Hsiehdde17902021-02-26 12:35:31 -0800277 )?;
278
279 // A lowest broken node (a 4K chunk that contains 128 sha256 hashes) will fail the read
280 // failure of the underlying chunks, but not before or after.
281 let mut buf = [0u8; 4096];
282 let num_hashes = 4096 / 32;
283 let last_index = num_hashes;
284 for i in 0..last_index {
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700285 assert!(file_reader.read_chunk(i, &mut buf).is_err());
Victor Hsiehdde17902021-02-26 12:35:31 -0800286 }
Victor Hsiehd0bb5d32021-03-19 12:48:03 -0700287 assert!(file_reader.read_chunk(last_index, &mut buf).is_ok());
Victor Hsiehdde17902021-02-26 12:35:31 -0800288 Ok(())
289 }
Victor Hsiehdde17902021-02-26 12:35:31 -0800290}