pvmfw: Stop confusing MEM_SHARE granule & DMA size

The VirtIO HAL assumes that shared allocations are aligned to a size
that is a multiple of the DMA alignment required by virtio devices. This
is wrong because 1. the allocator isn't forced to align the region it
returns to the memory granule (e.g. if 2 regions are requested that fit
in a single granule, it should be allowed to allocate only one granule)
and 2. nothing guarantees the granule to be a multiple of the DMA
alignment.

Therefore, modify the {de,}alloc_shared API to more closely follow
Rust-standard APIs (e.g. GlobalAllocator or LockedHeap) by taking a
Layout, allowing the caller to pass the alignment it requires.

Use that in the virtio_drivers HAL to request:

    - PAGE_SIZE-aligned regions for DMA;
    - size_of::<u128>()-aligned regions for bounce buffers

Keep the memory.rs code functionally unchanged for now, so that
allocations are still granule-aligned after this patch.

Bug: 280644106
Test: atest MicrodroidTests
Change-Id: I2b22e2095cde48e59e7303efeed1e3c09ee5ad5b
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 16c1a37..7e8423a 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -16,7 +16,7 @@
 
 #![deny(unsafe_op_in_unsafe_fn)]
 
-use crate::helpers::{self, align_down, align_up, page_4kb_of, RangeExt, SIZE_4KB, SIZE_4MB};
+use crate::helpers::{self, align_down, page_4kb_of, RangeExt, SIZE_4KB, SIZE_4MB};
 use crate::mmu;
 use alloc::alloc::alloc_zeroed;
 use alloc::alloc::dealloc;
@@ -346,13 +346,12 @@
 /// host. Returns a pointer to the buffer.
 ///
 /// It will be aligned to the memory sharing granule size supported by the hypervisor.
-pub fn alloc_shared(size: usize) -> hyp::Result<NonNull<u8>> {
-    let layout = shared_buffer_layout(size)?;
-    let granule = layout.align();
-
+pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
+    assert_ne!(layout.size(), 0);
+    let granule = get_hypervisor().memory_protection_granule()?;
+    let layout = layout.align_to(granule).unwrap().pad_to_align();
     if let Some(shared_pool) = SHARED_POOL.get() {
-        // Safe because `shared_buffer_layout` panics if the size is 0, so the
-        // layout must have a non-zero size.
+        // SAFETY - layout has a non-zero size.
         let buffer = unsafe { shared_pool.alloc_zeroed(layout) };
 
         let Some(buffer) = NonNull::new(buffer) else {
@@ -363,8 +362,7 @@
         return Ok(buffer);
     }
 
-    // Safe because `shared_buffer_layout` panics if the size is 0, so the layout must have a
-    // non-zero size.
+    // SAFETY - layout has a non-zero size.
     let buffer = unsafe { alloc_zeroed(layout) };
 
     let Some(buffer) = NonNull::new(buffer) else {
@@ -388,10 +386,9 @@
 ///
 /// The memory must have been allocated by `alloc_shared` with the same size, and not yet
 /// deallocated.
-pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, size: usize) -> hyp::Result<()> {
-    let layout = shared_buffer_layout(size)?;
-    let granule = layout.align();
-
+pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
+    let granule = get_hypervisor().memory_protection_granule()?;
+    let layout = layout.align_to(granule).unwrap().pad_to_align();
     if let Some(shared_pool) = SHARED_POOL.get() {
         // Safe because the memory was allocated by `alloc_shared` above using
         // the same allocator, and the layout is the same as was used then.
@@ -411,20 +408,6 @@
     Ok(())
 }
 
-/// Returns the layout to use for allocating a buffer of at least the given size shared with the
-/// host.
-///
-/// It will be aligned to the memory sharing granule size supported by the hypervisor.
-///
-/// Panics if `size` is 0.
-fn shared_buffer_layout(size: usize) -> hyp::Result<Layout> {
-    assert_ne!(size, 0);
-    let granule = get_hypervisor().memory_protection_granule()?;
-    let allocated_size =
-        align_up(size, granule).expect("Memory protection granule was not a power of two");
-    Ok(Layout::from_size_align(allocated_size, granule).unwrap())
-}
-
 /// Returns an iterator which yields the base address of each 4 KiB page within the given range.
 fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> {
     (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB)