Fix warnings in preparation for Rust 1.54.0
This CL fixes several new warnings generated by rustc 1.54.0.
Bug: 194812675
Test: m rust
Change-Id: I104aaf07897db4df89dd1598969dd74221bfdd0b
diff --git a/apkdmverity/src/dm.rs b/apkdmverity/src/dm.rs
index 2b44876..4cb24fc 100644
--- a/apkdmverity/src/dm.rs
+++ b/apkdmverity/src/dm.rs
@@ -147,15 +147,15 @@
/// The path to the generated device is "/dev/mapper/<name>".
pub fn create_device(&self, name: &str, target: &DmVerityTarget) -> Result<PathBuf> {
// Step 1: create an empty device
- let mut data = DmIoctl::new(&name)?;
+ let mut data = DmIoctl::new(name)?;
data.set_uuid(&uuid()?)?;
- dm_dev_create(&self, &mut data)
+ dm_dev_create(self, &mut data)
.context(format!("failed to create an empty device with name {}", &name))?;
// Step 2: load table onto the device
let payload_size = size_of::<DmIoctl>() + target.as_slice().len();
- let mut data = DmIoctl::new(&name)?;
+ let mut data = DmIoctl::new(name)?;
data.data_size = payload_size as u32;
data.data_start = size_of::<DmIoctl>() as u32;
data.target_count = 1;
@@ -164,13 +164,13 @@
let mut payload = Vec::with_capacity(payload_size);
payload.extend_from_slice(data.as_slice());
payload.extend_from_slice(target.as_slice());
- dm_table_load(&self, payload.as_mut_ptr() as *mut DmIoctl)
+ dm_table_load(self, payload.as_mut_ptr() as *mut DmIoctl)
.context("failed to load table")?;
// Step 3: activate the device (note: the term 'suspend' might be misleading, but it
// actually activates the table. See include/uapi/linux/dm-ioctl.h
- let mut data = DmIoctl::new(&name)?;
- dm_dev_suspend(&self, &mut data).context("failed to activate")?;
+ let mut data = DmIoctl::new(name)?;
+ dm_dev_suspend(self, &mut data).context("failed to activate")?;
// Step 4: wait unti the device is created and return the device path
let path = Path::new(MAPPER_DEV_ROOT).join(&name);
@@ -181,9 +181,9 @@
/// Removes a mapper device
#[cfg(test)]
pub fn delete_device_deferred(&self, name: &str) -> Result<()> {
- let mut data = DmIoctl::new(&name)?;
+ let mut data = DmIoctl::new(name)?;
data.flags |= Flag::DM_DEFERRED_REMOVE;
- dm_dev_remove(&self, &mut data)
+ dm_dev_remove(self, &mut data)
.context(format!("failed to remove device with name {}", &name))?;
Ok(())
}
diff --git a/apkdmverity/src/main.rs b/apkdmverity/src/main.rs
index f09af79..ff3944e 100644
--- a/apkdmverity/src/main.rs
+++ b/apkdmverity/src/main.rs
@@ -121,7 +121,7 @@
// Actually create a dm-verity block device using the spec.
let dm = dm::DeviceMapper::new()?;
let mapper_device =
- dm.create_device(&name, &target).context("Failed to create dm-verity device")?;
+ dm.create_device(name, &target).context("Failed to create dm-verity device")?;
Ok(VerityResult { data_device, hash_device, mapper_device })
}
@@ -173,7 +173,7 @@
return;
}
let test_dir = tempfile::TempDir::new().unwrap();
- let (apk_path, idsig_path) = prepare_inputs(&test_dir.path(), apk, idsig);
+ let (apk_path, idsig_path) = prepare_inputs(test_dir.path(), apk, idsig);
// Run the program and register clean-ups.
let ret = enable_verity(&apk_path, &idsig_path, name).unwrap();
@@ -296,7 +296,7 @@
let idsig = include_bytes!("../testdata/test.apk.idsig");
let test_dir = tempfile::TempDir::new().unwrap();
- let (apk_path, idsig_path) = prepare_inputs(&test_dir.path(), apk, idsig);
+ let (apk_path, idsig_path) = prepare_inputs(test_dir.path(), apk, idsig);
// attach the files to loop devices to make them block devices
let apk_size = fs::metadata(&apk_path).unwrap().len();
@@ -314,7 +314,7 @@
let name = "loop_as_input";
// Run the program WITH the loop devices, not the regular files.
- let ret = enable_verity(apk_loop_device.deref(), idsig_loop_device.deref(), &name).unwrap();
+ let ret = enable_verity(apk_loop_device.deref(), idsig_loop_device.deref(), name).unwrap();
let ret = scopeguard::guard(ret, |ret| {
loopdevice::detach(ret.data_device).unwrap();
loopdevice::detach(ret.hash_device).unwrap();
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index 5137a2e..d63fe93 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -118,7 +118,7 @@
match self.get_file_config(id)? {
FdConfig::Readonly { file, .. } | FdConfig::ReadWrite(file) => {
- read_into_buf(&file, size, offset).map_err(|e| {
+ read_into_buf(file, size, offset).map_err(|e| {
error!("readFile: read error: {}", e);
Status::from(ERROR_IO)
})
@@ -133,7 +133,7 @@
match &self.get_file_config(id)? {
FdConfig::Readonly { file, alt_merkle_tree, .. } => {
if let Some(tree_file) = &alt_merkle_tree {
- read_into_buf(&tree_file, size, offset).map_err(|e| {
+ read_into_buf(tree_file, size, offset).map_err(|e| {
error!("readFsverityMerkleTree: read error: {}", e);
Status::from(ERROR_IO)
})
@@ -165,7 +165,7 @@
// Supposedly big enough buffer size to store signature.
let size = MAX_REQUESTING_DATA as usize;
let offset = 0;
- read_into_buf(&sig_file, size, offset).map_err(|e| {
+ read_into_buf(sig_file, size, offset).map_err(|e| {
error!("readFsveritySignature: read error: {}", e);
Status::from(ERROR_IO)
})
diff --git a/authfs/src/file.rs b/authfs/src/file.rs
index 44d5000..703eddb 100644
--- a/authfs/src/file.rs
+++ b/authfs/src/file.rs
@@ -20,7 +20,7 @@
fn get_local_binder() -> io::Result<VirtFdService> {
let service_name = "authfs_fd_server";
- get_interface(&service_name).map_err(|e| {
+ get_interface(service_name).map_err(|e| {
io::Error::new(
io::ErrorKind::AddrNotAvailable,
format!("Cannot reach authfs_fd_server binder service: {}", e),
diff --git a/authfs/src/file/remote_file.rs b/authfs/src/file/remote_file.rs
index 037b8ec..903c143 100644
--- a/authfs/src/file/remote_file.rs
+++ b/authfs/src/file/remote_file.rs
@@ -101,7 +101,7 @@
i64::try_from(offset).map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
let size = self
.service
- .writeFile(self.file_fd, &buf, offset)
+ .writeFile(self.file_fd, buf, offset)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
Ok(size as usize) // within range because size is supposed to <= buf.len(), which is a usize
}
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
index 1842425..fda47bc 100644
--- a/authfs/src/fsverity/builder.rs
+++ b/authfs/src/fsverity/builder.rs
@@ -248,7 +248,7 @@
let mut tree = MerkleLeaves::new();
for (index, chunk) in test_data.chunks(CHUNK_SIZE as usize).enumerate() {
let hash = Sha256Hasher::new()?
- .update(&chunk)?
+ .update(chunk)?
.update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()])?
.finalize()?;
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index 8468cc9..86ff4d6 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -206,7 +206,7 @@
// (original) integrity for the file. To matches what write(2) describes for an error
// case (though it's about direct I/O), "Partial data may be written ... should be
// considered inconsistent", an error below is propagated.
- self.file.write_all_at(&source, output_offset)?;
+ self.file.write_all_at(source, output_offset)?;
// Update the hash only after the write succeeds. Note that this only attempts to keep
// the tree consistent to what has been written regardless the actual state beyond the
@@ -290,7 +290,7 @@
if end > self.data.borrow().len() {
self.data.borrow_mut().resize(end, 0);
}
- self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(&buf);
+ self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(buf);
Ok(buf.len())
}
@@ -318,7 +318,7 @@
format!("read_chunk out of bound: index {}", chunk_index),
)
})?;
- buf[..chunk.len()].copy_from_slice(&chunk);
+ buf[..chunk.len()].copy_from_slice(chunk);
Ok(chunk.len())
}
}
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index 13de42a..1f21b13 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -33,7 +33,7 @@
fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
let padding_size = pad_to - chunk.len();
- Sha256Hasher::new()?.update(&chunk)?.update(&ZEROS[..padding_size])?.finalize()
+ Sha256Hasher::new()?.update(chunk)?.update(&ZEROS[..padding_size])?.finalize()
}
fn verity_check<T: ReadByChunk>(
@@ -47,7 +47,7 @@
// beyond the file size, including empty file.
assert_ne!(file_size, 0);
- let chunk_hash = hash_with_padding(&chunk, CHUNK_SIZE as usize)?;
+ let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize)?;
fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
chunk_hash,
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index d2948c7..77743bd 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -49,23 +49,23 @@
pub enum FileConfig {
/// A file type that is verified against fs-verity signature (thus read-only). The file is
/// backed by a local file. Debug only.
- LocalVerifiedReadonlyFile {
+ LocalVerifiedReadonly {
reader: VerifiedFileReader<LocalFileReader, LocalFileReader>,
file_size: u64,
},
/// A file type that is a read-only passthrough from a local file. Debug only.
- LocalUnverifiedReadonlyFile { reader: LocalFileReader, file_size: u64 },
+ LocalUnverifiedReadonly { reader: LocalFileReader, file_size: u64 },
/// A file type that is verified against fs-verity signature (thus read-only). The file is
/// served from a remote server.
- RemoteVerifiedReadonlyFile {
+ RemoteVerifiedReadonly {
reader: VerifiedFileReader<RemoteFileReader, RemoteMerkleTreeReader>,
file_size: u64,
},
/// A file type that is a read-only passthrough from a file on a remote serrver.
- RemoteUnverifiedReadonlyFile { reader: RemoteFileReader, file_size: u64 },
+ RemoteUnverifiedReadonly { reader: RemoteFileReader, file_size: u64 },
/// A file type that is initially empty, and the content is stored on a remote server. File
/// integrity is guaranteed with private Merkle tree.
- RemoteVerifiedNewFile { editor: VerifiedFileEditor<RemoteFileEditor> },
+ RemoteVerifiedNew { editor: VerifiedFileEditor<RemoteFileEditor> },
}
struct AuthFs {
@@ -87,7 +87,7 @@
}
fn get_file_config(&self, inode: &Inode) -> io::Result<&FileConfig> {
- self.file_pool.get(&inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
+ self.file_pool.get(inode).ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))
}
}
@@ -207,13 +207,13 @@
// be static.
let inode = num.parse::<Inode>().map_err(|_| io::Error::from_raw_os_error(libc::ENOENT))?;
let st = match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonlyFile { file_size, .. }
- | FileConfig::LocalUnverifiedReadonlyFile { file_size, .. }
- | FileConfig::RemoteUnverifiedReadonlyFile { file_size, .. }
- | FileConfig::RemoteVerifiedReadonlyFile { file_size, .. } => {
+ FileConfig::LocalVerifiedReadonly { file_size, .. }
+ | FileConfig::LocalUnverifiedReadonly { file_size, .. }
+ | FileConfig::RemoteUnverifiedReadonly { file_size, .. }
+ | FileConfig::RemoteVerifiedReadonly { file_size, .. } => {
create_stat(inode, *file_size, FileMode::ReadOnly)?
}
- FileConfig::RemoteVerifiedNewFile { editor } => {
+ FileConfig::RemoteVerifiedNew { editor } => {
create_stat(inode, editor.size(), FileMode::ReadWrite)?
}
};
@@ -234,13 +234,13 @@
) -> io::Result<(libc::stat64, Duration)> {
Ok((
match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonlyFile { file_size, .. }
- | FileConfig::LocalUnverifiedReadonlyFile { file_size, .. }
- | FileConfig::RemoteUnverifiedReadonlyFile { file_size, .. }
- | FileConfig::RemoteVerifiedReadonlyFile { file_size, .. } => {
+ FileConfig::LocalVerifiedReadonly { file_size, .. }
+ | FileConfig::LocalUnverifiedReadonly { file_size, .. }
+ | FileConfig::RemoteUnverifiedReadonly { file_size, .. }
+ | FileConfig::RemoteVerifiedReadonly { file_size, .. } => {
create_stat(inode, *file_size, FileMode::ReadOnly)?
}
- FileConfig::RemoteVerifiedNewFile { editor } => {
+ FileConfig::RemoteVerifiedNew { editor } => {
create_stat(inode, editor.size(), FileMode::ReadWrite)?
}
},
@@ -257,13 +257,13 @@
// Since file handle is not really used in later operations (which use Inode directly),
// return None as the handle.
match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonlyFile { .. }
- | FileConfig::LocalUnverifiedReadonlyFile { .. }
- | FileConfig::RemoteVerifiedReadonlyFile { .. }
- | FileConfig::RemoteUnverifiedReadonlyFile { .. } => {
+ FileConfig::LocalVerifiedReadonly { .. }
+ | FileConfig::LocalUnverifiedReadonly { .. }
+ | FileConfig::RemoteVerifiedReadonly { .. }
+ | FileConfig::RemoteUnverifiedReadonly { .. } => {
check_access_mode(flags, libc::O_RDONLY)?;
}
- FileConfig::RemoteVerifiedNewFile { .. } => {
+ FileConfig::RemoteVerifiedNew { .. } => {
// No need to check access modes since all the modes are allowed to the
// read-writable file.
}
@@ -285,19 +285,19 @@
_flags: u32,
) -> io::Result<usize> {
match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedReadonlyFile { reader, file_size } => {
+ FileConfig::LocalVerifiedReadonly { reader, file_size } => {
read_chunks(w, reader, *file_size, offset, size)
}
- FileConfig::LocalUnverifiedReadonlyFile { reader, file_size } => {
+ FileConfig::LocalUnverifiedReadonly { reader, file_size } => {
read_chunks(w, reader, *file_size, offset, size)
}
- FileConfig::RemoteVerifiedReadonlyFile { reader, file_size } => {
+ FileConfig::RemoteVerifiedReadonly { reader, file_size } => {
read_chunks(w, reader, *file_size, offset, size)
}
- FileConfig::RemoteUnverifiedReadonlyFile { reader, file_size } => {
+ FileConfig::RemoteUnverifiedReadonly { reader, file_size } => {
read_chunks(w, reader, *file_size, offset, size)
}
- FileConfig::RemoteVerifiedNewFile { editor } => {
+ FileConfig::RemoteVerifiedNew { editor } => {
// Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
// request a read even if the file is open with O_WRONLY.
read_chunks(w, editor, editor.size(), offset, size)
@@ -318,7 +318,7 @@
_flags: u32,
) -> io::Result<usize> {
match self.get_file_config(&inode)? {
- FileConfig::RemoteVerifiedNewFile { editor } => {
+ FileConfig::RemoteVerifiedNew { editor } => {
let mut buf = vec![0; size as usize];
r.read_exact(&mut buf)?;
editor.write_at(&buf, offset)
@@ -336,7 +336,7 @@
valid: SetattrValid,
) -> io::Result<(libc::stat64, Duration)> {
match self.get_file_config(&inode)? {
- FileConfig::RemoteVerifiedNewFile { editor } => {
+ FileConfig::RemoteVerifiedNew { editor } => {
// Initialize the default stat.
let mut new_attr = create_stat(inode, editor.size(), FileMode::ReadWrite)?;
// `valid` indicates what fields in `attr` are valid. Update to return correctly.
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 9d36c3f..d583f92 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -224,7 +224,7 @@
let signature = service.readFsveritySignature(remote_id).context("Failed to read signature")?;
let authenticator = FakeAuthenticator::always_succeed();
- Ok(FileConfig::RemoteVerifiedReadonlyFile {
+ Ok(FileConfig::RemoteVerifiedReadonly {
reader: VerifiedFileReader::new(
&authenticator,
RemoteFileReader::new(service.clone(), remote_id),
@@ -242,7 +242,7 @@
file_size: u64,
) -> Result<FileConfig> {
let reader = RemoteFileReader::new(service, remote_id);
- Ok(FileConfig::RemoteUnverifiedReadonlyFile { reader, file_size })
+ Ok(FileConfig::RemoteUnverifiedReadonly { reader, file_size })
}
fn new_config_local_ro_file(
@@ -259,13 +259,13 @@
let _ = File::open(signature)?.read_to_end(&mut sig)?;
let reader =
VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree_reader)?;
- Ok(FileConfig::LocalVerifiedReadonlyFile { reader, file_size })
+ Ok(FileConfig::LocalVerifiedReadonly { reader, file_size })
}
fn new_config_local_ro_file_unverified(file_path: &Path) -> Result<FileConfig> {
let reader = LocalFileReader::new(File::open(file_path)?)?;
let file_size = reader.len();
- Ok(FileConfig::LocalUnverifiedReadonlyFile { reader, file_size })
+ Ok(FileConfig::LocalUnverifiedReadonly { reader, file_size })
}
fn new_config_remote_new_verified_file(
@@ -273,7 +273,7 @@
remote_id: i32,
) -> Result<FileConfig> {
let remote_file = RemoteFileEditor::new(service, remote_id);
- Ok(FileConfig::RemoteVerifiedNewFile { editor: VerifiedFileEditor::new(remote_file) })
+ Ok(FileConfig::RemoteVerifiedNew { editor: VerifiedFileEditor::new(remote_file) })
}
fn prepare_file_pool(args: &Args) -> Result<BTreeMap<Inode, FileConfig>> {
diff --git a/compos/src/compos_key_service.rs b/compos/src/compos_key_service.rs
index 40d0f48..779b798 100644
--- a/compos/src/compos_key_service.rs
+++ b/compos/src/compos_key_service.rs
@@ -203,7 +203,7 @@
return Err(anyhow!("Key requires user authorization"));
}
- let signature = operation.finish(Some(&data), None).context("Signing failed")?;
+ let signature = operation.finish(Some(data), None).context("Signing failed")?;
// Operation has finished, we're no longer responsible for aborting it
ScopeGuard::into_inner(operation);
diff --git a/compos/src/compsvc.rs b/compos/src/compsvc.rs
index 24e52f5..ae242de 100644
--- a/compos/src/compsvc.rs
+++ b/compos/src/compsvc.rs
@@ -82,7 +82,7 @@
} else {
vec![]
};
- let _pid = jail.run(&self.worker_bin, &inheritable_fds, &args)?;
+ let _pid = jail.run(&self.worker_bin, &inheritable_fds, args)?;
jail.wait()
}
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index 8c5eb97..389dd23 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -658,7 +658,7 @@
fn as_ref(&self) -> &T {
match self {
Self::Borrowed(b) => b,
- Self::Owned(o) => &o,
+ Self::Owned(o) => o,
}
}
}
diff --git a/vmconfig/src/lib.rs b/vmconfig/src/lib.rs
index 4a5b3b1..890051e 100644
--- a/vmconfig/src/lib.rs
+++ b/vmconfig/src/lib.rs
@@ -146,7 +146,7 @@
let images = self
.paths
.iter()
- .map(|path| open_parcel_file(&path, self.writable))
+ .map(|path| open_parcel_file(path, self.writable))
.collect::<Result<Vec<_>, _>>()?;
Ok(AidlPartition { images, writable: self.writable, label: self.label.to_owned() })
}
diff --git a/zipfuse/src/main.rs b/zipfuse/src/main.rs
index 9b70d08..4ab934d 100644
--- a/zipfuse/src/main.rs
+++ b/zipfuse/src/main.rs
@@ -44,7 +44,7 @@
.short("o")
.takes_value(true)
.required(false)
- .help("Comma separated list of mount options")
+ .help("Comma separated list of mount options"),
)
.arg(Arg::with_name("ZIPFILE").required(true))
.arg(Arg::with_name("MOUNTPOINT").required(true))
@@ -650,7 +650,7 @@
let mnt_path = test_dir.join("mnt");
assert!(fs::create_dir(&mnt_path).is_ok());
- start_fuse(&zip_path, &mnt_path);
+ start_fuse(zip_path, &mnt_path);
// Give some time for the fuse to boot up
assert!(wait_for_mount(&mnt_path).is_ok());
@@ -669,7 +669,7 @@
let mut zip_file = File::create(&zip_path).unwrap();
zip_file.write_all(include_bytes!("../testdata/test.zip")).unwrap();
- run_fuse_and_check_test_zip(&test_dir.path(), &zip_path);
+ run_fuse_and_check_test_zip(test_dir.path(), &zip_path);
}
#[cfg(not(target_os = "android"))] // Android doesn't have the loopdev crate