Merge "Require unsafe blocks in unsafe functions"
diff --git a/pvmfw/Android.bp b/pvmfw/Android.bp
index 2416714..7ea1189 100644
--- a/pvmfw/Android.bp
+++ b/pvmfw/Android.bp
@@ -8,6 +8,8 @@
     defaults: ["vmbase_ffi_defaults"],
     srcs: ["src/main.rs"],
     edition: "2021",
+    // Require unsafe blocks for inside unsafe functions.
+    flags: ["-Dunsafe_op_in_unsafe_fn"],
     features: [
         "legacy",
     ],
@@ -30,6 +32,7 @@
         "libuuid_nostd",
         "libvirtio_drivers",
         "libvmbase",
+        "libzerocopy_nostd",
         "libzeroize_nostd",
         "libspin_nostd",
     ],
diff --git a/pvmfw/avb/Android.bp b/pvmfw/avb/Android.bp
index 7ed4895..90f3971 100644
--- a/pvmfw/avb/Android.bp
+++ b/pvmfw/avb/Android.bp
@@ -7,6 +7,8 @@
     crate_name: "pvmfw_avb",
     srcs: ["src/lib.rs"],
     prefer_rlib: true,
+    // Require unsafe blocks for inside unsafe functions.
+    flags: ["-Dunsafe_op_in_unsafe_fn"],
     rustlibs: [
         "libavb_bindgen_nostd",
         "libtinyvec_nostd",
diff --git a/pvmfw/avb/src/descriptor.rs b/pvmfw/avb/src/descriptor.rs
index c54d416..cd623ac 100644
--- a/pvmfw/avb/src/descriptor.rs
+++ b/pvmfw/avb/src/descriptor.rs
@@ -14,8 +14,6 @@
 
 //! Structs and functions relating to the descriptors.
 
-#![warn(unsafe_op_in_unsafe_fn)]
-
 use crate::error::{AvbIOError, AvbSlotVerifyError};
 use crate::partition::PartitionName;
 use crate::utils::{self, is_not_null, to_nonnull, to_usize, usize_checked_add};
diff --git a/pvmfw/src/config.rs b/pvmfw/src/config.rs
index f62a580..b90b136 100644
--- a/pvmfw/src/config.rs
+++ b/pvmfw/src/config.rs
@@ -19,10 +19,11 @@
 use core::mem;
 use core::ops::Range;
 use core::result;
+use zerocopy::{FromBytes, LayoutVerified};
 
 /// Configuration data header.
 #[repr(C, packed)]
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, FromBytes)]
 struct Header {
     /// Magic number; must be `Header::MAGIC`.
     magic: u32,
@@ -40,6 +41,8 @@
 pub enum Error {
     /// Reserved region can't fit configuration header.
     BufferTooSmall,
+    /// Header has the wrong alignment
+    HeaderMisaligned,
     /// Header doesn't contain the expect magic value.
     InvalidMagic,
     /// Version of the header isn't supported.
@@ -58,6 +61,7 @@
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
             Self::BufferTooSmall => write!(f, "Reserved region is smaller than config header"),
+            Self::HeaderMisaligned => write!(f, "Reserved region is misaligned"),
             Self::InvalidMagic => write!(f, "Wrong magic number"),
             Self::UnsupportedVersion(x, y) => write!(f, "Version {x}.{y} not supported"),
             Self::InvalidFlags(v) => write!(f, "Flags value {v:#x} is incorrect or reserved"),
@@ -167,7 +171,7 @@
 }
 
 #[repr(packed)]
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, FromBytes)]
 struct HeaderEntry {
     offset: u32,
     size: u32,
@@ -187,7 +191,9 @@
     pub unsafe fn new(data: &'a mut [u8]) -> Result<Self> {
         let header = data.get(..Header::PADDED_SIZE).ok_or(Error::BufferTooSmall)?;
 
-        let header = &*(header.as_ptr() as *const Header);
+        let (header, _) =
+            LayoutVerified::<_, Header>::new_from_prefix(header).ok_or(Error::HeaderMisaligned)?;
+        let header = header.into_ref();
 
         if header.magic != Header::MAGIC {
             return Err(Error::InvalidMagic);
@@ -206,11 +212,13 @@
             header.get_body_range(Entry::Bcc)?.ok_or(Error::MissingEntry(Entry::Bcc))?;
         let dp_range = header.get_body_range(Entry::DebugPolicy)?;
 
+        let body_size = header.body_size();
+        let total_size = header.total_size();
         let body = data
             .get_mut(Header::PADDED_SIZE..)
             .ok_or(Error::BufferTooSmall)?
-            .get_mut(..header.body_size())
-            .ok_or_else(|| Error::InvalidSize(header.total_size()))?;
+            .get_mut(..body_size)
+            .ok_or(Error::InvalidSize(total_size))?;
 
         Ok(Self { body, bcc_range, dp_range })
     }
diff --git a/pvmfw/src/crypto.rs b/pvmfw/src/crypto.rs
index 0785a7a..d607bee 100644
--- a/pvmfw/src/crypto.rs
+++ b/pvmfw/src/crypto.rs
@@ -248,13 +248,14 @@
 ///
 /// # Safety
 ///
-/// The caller needs to ensure that the pointer points to a valid C string and that the C lifetime
-/// of the string is compatible with a static Rust lifetime.
+/// The caller needs to ensure that the pointer is null or points to a valid C string and that the
+/// C lifetime of the string is compatible with a static Rust lifetime.
 unsafe fn as_static_cstr(p: *const c_char) -> Option<&'static CStr> {
     if p.is_null() {
         None
     } else {
-        Some(CStr::from_ptr(p))
+        // Safety: Safe given the requirements of this function.
+        Some(unsafe { CStr::from_ptr(p) })
     }
 }
 
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 5dd1d2d..398c8df 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -317,7 +317,9 @@
     // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
     let size = helpers::align_up(base, helpers::SIZE_2MB).unwrap() - base;
 
-    slice::from_raw_parts_mut(base as *mut u8, size)
+    // SAFETY: This region is mapped and the linker script prevents it from overlapping with other
+    // objects.
+    unsafe { slice::from_raw_parts_mut(base as *mut u8, size) }
 }
 
 enum AppendedConfigType {
@@ -336,8 +338,13 @@
 impl<'a> AppendedPayload<'a> {
     /// SAFETY - 'data' should respect the alignment of config::Header.
     unsafe fn new(data: &'a mut [u8]) -> Option<Self> {
-        match Self::guess_config_type(data) {
-            AppendedConfigType::Valid => Some(Self::Config(config::Config::new(data).unwrap())),
+        // Safety: This fn has the same constraint as us.
+        match unsafe { Self::guess_config_type(data) } {
+            AppendedConfigType::Valid => {
+                // Safety: This fn has the same constraint as us.
+                let config = unsafe { config::Config::new(data) };
+                Some(Self::Config(config.unwrap()))
+            }
             AppendedConfigType::NotFound if cfg!(feature = "legacy") => {
                 const BCC_SIZE: usize = helpers::SIZE_4KB;
                 warn!("Assuming the appended data at {:?} to be a raw BCC", data.as_ptr());
@@ -347,11 +354,14 @@
         }
     }
 
+    /// SAFETY - 'data' should respect the alignment of config::Header.
     unsafe fn guess_config_type(data: &mut [u8]) -> AppendedConfigType {
         // This function is necessary to prevent the borrow checker from getting confused
         // about the ownership of data in new(); see https://users.rust-lang.org/t/78467.
         let addr = data.as_ptr();
-        match config::Config::new(data) {
+
+        // Safety: This fn has the same constraint as us.
+        match unsafe { config::Config::new(data) } {
             Err(config::Error::InvalidMagic) => {
                 warn!("No configuration data found at {addr:?}");
                 AppendedConfigType::NotFound
diff --git a/pvmfw/src/heap.rs b/pvmfw/src/heap.rs
index eea2e98..151049e 100644
--- a/pvmfw/src/heap.rs
+++ b/pvmfw/src/heap.rs
@@ -27,15 +27,22 @@
 
 use buddy_system_allocator::LockedHeap;
 
-#[global_allocator]
-static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::<32>::new();
-
 /// 128 KiB
 const HEAP_SIZE: usize = 0x20000;
 static mut HEAP: [u8; HEAP_SIZE] = [0; HEAP_SIZE];
 
+#[global_allocator]
+static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::<32>::new();
+
+/// SAFETY: Must be called no more than once.
 pub unsafe fn init() {
-    HEAP_ALLOCATOR.lock().init(HEAP.as_mut_ptr() as usize, HEAP.len());
+    // SAFETY: Nothing else accesses this memory, and we hand it over to the heap to manage and
+    // never touch it again. The heap is locked, so there cannot be any races.
+    let (start, size) = unsafe { (HEAP.as_mut_ptr() as usize, HEAP.len()) };
+
+    let mut heap = HEAP_ALLOCATOR.lock();
+    // SAFETY: We are supplying a valid memory range, and we only do this once.
+    unsafe { heap.init(start, size) };
 }
 
 /// Allocate an aligned but uninitialized slice of heap.
@@ -53,7 +60,7 @@
 
 #[no_mangle]
 unsafe extern "C" fn malloc(size: usize) -> *mut c_void {
-    malloc_(size, false).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
+    allocate(size, false).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
 }
 
 #[no_mangle]
@@ -61,31 +68,69 @@
     let Some(size) = nmemb.checked_mul(size) else {
         return ptr::null_mut()
     };
-    malloc_(size, true).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
+    allocate(size, true).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
 }
 
 #[no_mangle]
+/// SAFETY: ptr must be null or point to a currently-allocated block returned by allocate (either
+/// directly or via malloc or calloc). Note that this function is called directly from C, so we have
+/// to trust that the C code is doing the right thing; there are checks below which will catch some
+/// errors.
 unsafe extern "C" fn free(ptr: *mut c_void) {
-    if let Some(ptr) = NonNull::new(ptr).map(|p| p.cast::<usize>().as_ptr().offset(-1)) {
-        if let Some(size) = NonZeroUsize::new(*ptr) {
-            if let Some(layout) = malloc_layout(size) {
-                HEAP_ALLOCATOR.dealloc(ptr as *mut u8, layout);
-            }
+    let Some(ptr) = NonNull::new(ptr) else { return };
+    // SAFETY: The contents of the HEAP slice may change, but the address range never does.
+    let heap_range = unsafe { HEAP.as_ptr_range() };
+    assert!(
+        heap_range.contains(&(ptr.as_ptr() as *const u8)),
+        "free() called on a pointer that is not part of the HEAP: {ptr:?}"
+    );
+    let (ptr, size) = unsafe {
+        // SAFETY: ptr is non-null and was allocated by allocate, which prepends a correctly aligned
+        // usize.
+        let ptr = ptr.cast::<usize>().as_ptr().offset(-1);
+        (ptr, *ptr)
+    };
+    let size = NonZeroUsize::new(size).unwrap();
+    let layout = malloc_layout(size).unwrap();
+    // SAFETY: If our precondition is satisfied, then this is a valid currently-allocated block.
+    unsafe { HEAP_ALLOCATOR.dealloc(ptr as *mut u8, layout) }
+}
+
+/// Allocate a block of memory suitable to return from `malloc()` etc. Returns a valid pointer
+/// to a suitable aligned region of size bytes, optionally zeroed (and otherwise uninitialized), or
+/// None if size is 0 or allocation fails. The block can be freed by passing the returned pointer to
+/// `free()`.
+fn allocate(size: usize, zeroed: bool) -> Option<NonNull<usize>> {
+    let size = NonZeroUsize::new(size)?.checked_add(mem::size_of::<usize>())?;
+    let layout = malloc_layout(size)?;
+    // SAFETY: layout is known to have non-zero size.
+    let ptr = unsafe {
+        if zeroed {
+            HEAP_ALLOCATOR.alloc_zeroed(layout)
+        } else {
+            HEAP_ALLOCATOR.alloc(layout)
         }
+    };
+    let ptr = NonNull::new(ptr)?.cast::<usize>().as_ptr();
+    // SAFETY: ptr points to a newly allocated block of memory which is properly aligned
+    // for a usize and is big enough to hold a usize as well as the requested number of
+    // bytes.
+    unsafe {
+        *ptr = size.get();
+        NonNull::new(ptr.offset(1))
     }
 }
 
-unsafe fn malloc_(size: usize, zeroed: bool) -> Option<NonNull<usize>> {
-    let size = NonZeroUsize::new(size)?.checked_add(mem::size_of::<usize>())?;
-    let layout = malloc_layout(size)?;
-    let ptr =
-        if zeroed { HEAP_ALLOCATOR.alloc_zeroed(layout) } else { HEAP_ALLOCATOR.alloc(layout) };
-    let ptr = NonNull::new(ptr)?.cast::<usize>().as_ptr();
-    *ptr = size.get();
-    NonNull::new(ptr.offset(1))
+fn malloc_layout(size: NonZeroUsize) -> Option<Layout> {
+    // We want at least 8 byte alignment, and we need to be able to store a usize.
+    const ALIGN: usize = const_max_size(mem::size_of::<usize>(), mem::size_of::<u64>());
+    Layout::from_size_align(size.get(), ALIGN).ok()
 }
 
-fn malloc_layout(size: NonZeroUsize) -> Option<Layout> {
-    const ALIGN: usize = mem::size_of::<u64>();
-    Layout::from_size_align(size.get(), ALIGN).ok()
+const fn const_max_size(a: usize, b: usize) -> usize {
+    if a > b {
+        a
+    } else {
+        b
+    }
 }
diff --git a/vm_payload/Android.bp b/vm_payload/Android.bp
index 967d1cf..7862702 100644
--- a/vm_payload/Android.bp
+++ b/vm_payload/Android.bp
@@ -10,6 +10,8 @@
     srcs: ["src/*.rs"],
     include_dirs: ["include"],
     prefer_rlib: true,
+    // Require unsafe blocks for inside unsafe functions.
+    flags: ["-Dunsafe_op_in_unsafe_fn"],
     rustlibs: [
         "android.system.virtualization.payload-rust",
         "libandroid_logger",
diff --git a/vm_payload/src/api.rs b/vm_payload/src/api.rs
index 4b565e0..87e29a6 100644
--- a/vm_payload/src/api.rs
+++ b/vm_payload/src/api.rs
@@ -14,9 +14,6 @@
 
 //! This module handles the interaction with virtual machine payload service.
 
-// We're implementing unsafe functions, but we still want warnings on unsafe usage within them.
-#![warn(unsafe_op_in_unsafe_fn)]
-
 use android_system_virtualization_payload::aidl::android::system::virtualization::payload::IVmPayloadService::{
     ENCRYPTEDSTORE_MOUNTPOINT, IVmPayloadService, VM_PAYLOAD_SERVICE_SOCKET_NAME, VM_APK_CONTENTS_PATH};
 use anyhow::{ensure, bail, Context, Result};