pvmfw: Pass BCC to next stage through DT

Generate the next stage BCC in a heap-allocated page-aligned buffer that
our allocator leaks so that it outlives the execution of pvmfw and can
be accessed by the next stage. Flush the cache to ensure that it isn't
destroyed during invalidation (by the next stage) or missed if accessed
with the caches disabled.

Pass the size and location of the region through a pKVM-standard
device tree node.

Bug: 256827715
Test: atest MicrodroidHostTests
Change-Id: I5931054f74063eac3b3b21a6bcbe4881af2e1e8e
diff --git a/pvmfw/src/heap.rs b/pvmfw/src/heap.rs
index eab3bc4..e412f69 100644
--- a/pvmfw/src/heap.rs
+++ b/pvmfw/src/heap.rs
@@ -14,8 +14,11 @@
 
 //! Heap implementation.
 
+use alloc::alloc::alloc;
+use alloc::alloc::Layout;
+use alloc::boxed::Box;
+
 use core::alloc::GlobalAlloc as _;
-use core::alloc::Layout;
 use core::ffi::c_void;
 use core::mem;
 use core::num::NonZeroUsize;
@@ -33,6 +36,19 @@
     HEAP_ALLOCATOR.lock().init(HEAP.as_mut_ptr() as usize, HEAP.len());
 }
 
+/// Allocate an aligned but uninitialized slice of heap.
+pub fn aligned_boxed_slice(size: usize, align: usize) -> Option<Box<[u8]>> {
+    let size = NonZeroUsize::new(size)?.get();
+    let layout = Layout::from_size_align(size, align).ok()?;
+    // SAFETY - We verify that `size` and the returned `ptr` are non-null.
+    let ptr = unsafe { alloc(layout) };
+    let ptr = NonNull::new(ptr)?.as_ptr();
+    let slice_ptr = ptr::slice_from_raw_parts_mut(ptr, size);
+
+    // SAFETY - The memory was allocated using the proper layout by our global_allocator.
+    Some(unsafe { Box::from_raw(slice_ptr) })
+}
+
 #[no_mangle]
 unsafe extern "C" fn malloc(size: usize) -> *mut c_void {
     malloc_(size).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())