Update layout/PageTable memory range to Range<VirtualAddress>

This cl updates the return type of the layout functions and the
parameter type in PageTable memory mapping functions from
Range<usize> to Range<VirtualAddress>. This makes it explicit
that the ranges used here are virtual memory ranges.

Test: atest vmbase_example.integration_test rialto_test
Test: m pvmfw_img
Bug: 284462758
Change-Id: I19d4859a03edffedb00ab2831f43929befcb98d8
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 6f96fc0..3d2fea8 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -278,19 +278,19 @@
 
     let scratch = layout::scratch_range();
 
-    assert_ne!(scratch.len(), 0, "scratch memory is empty.");
-    assert_eq!(scratch.start % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
-    assert_eq!(scratch.end % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+    assert_ne!(scratch.end - scratch.start, 0, "scratch memory is empty.");
+    assert_eq!(scratch.start.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+    assert_eq!(scratch.end.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
 
-    assert!(bcc.is_within(&scratch));
+    assert!(bcc.is_within(&(scratch.start.0..scratch.end.0)));
     assert_eq!(bcc.start % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
     assert_eq!(bcc.end % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
 
     let stack = memory::stack_range();
 
-    assert_ne!(stack.len(), 0, "stack region is empty.");
-    assert_eq!(stack.start % ASM_STP_ALIGN, 0, "Misaligned stack region.");
-    assert_eq!(stack.end % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+    assert_ne!(stack.end - stack.start, 0, "stack region is empty.");
+    assert_eq!(stack.start.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+    assert_eq!(stack.end.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
 
     // Zero all memory that could hold secrets and that can't be safely written to from Rust.
     // Disable the exception vector, caches and page table and then jump to the payload at the
@@ -375,11 +375,11 @@
             sctlr_el1_val = in(reg) SCTLR_EL1_VAL,
             bcc = in(reg) u64::try_from(bcc.start).unwrap(),
             bcc_end = in(reg) u64::try_from(bcc.end).unwrap(),
-            cache_line = in(reg) u64::try_from(scratch.start).unwrap(),
-            scratch = in(reg) u64::try_from(scratch.start).unwrap(),
-            scratch_end = in(reg) u64::try_from(scratch.end).unwrap(),
-            stack = in(reg) u64::try_from(stack.start).unwrap(),
-            stack_end = in(reg) u64::try_from(stack.end).unwrap(),
+            cache_line = in(reg) u64::try_from(scratch.start.0).unwrap(),
+            scratch = in(reg) u64::try_from(scratch.start.0).unwrap(),
+            scratch_end = in(reg) u64::try_from(scratch.end.0).unwrap(),
+            stack = in(reg) u64::try_from(stack.start.0).unwrap(),
+            stack_end = in(reg) u64::try_from(stack.end.0).unwrap(),
             dcache_line_size = in(reg) u64::try_from(min_dcache_line_size()).unwrap(),
             in("x0") fdt_address,
             in("x30") payload_start,
@@ -396,7 +396,7 @@
     let range = memory::appended_payload_range();
     // SAFETY: This region is mapped and the linker script prevents it from overlapping with other
     // objects.
-    unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) }
+    unsafe { slice::from_raw_parts_mut(range.start.0 as *mut u8, range.end - range.start) }
 }
 
 enum AppendedConfigType {
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 11fcd7c..5930ec9 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -17,25 +17,27 @@
 #![deny(unsafe_op_in_unsafe_fn)]
 
 use crate::helpers::PVMFW_PAGE_SIZE;
+use aarch64_paging::paging::VirtualAddress;
 use aarch64_paging::MapError;
+use core::ops::Range;
 use core::result;
 use log::error;
 use vmbase::{
     layout,
-    memory::{MemoryRange, PageTable, SIZE_2MB, SIZE_4KB},
+    memory::{PageTable, SIZE_2MB, SIZE_4KB},
     util::align_up,
 };
 
 /// Returns memory range reserved for the appended payload.
-pub fn appended_payload_range() -> MemoryRange {
+pub fn appended_payload_range() -> Range<VirtualAddress> {
     let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
     // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
     let end = align_up(start, SIZE_2MB).unwrap();
-    start..end
+    VirtualAddress(start)..VirtualAddress(end)
 }
 
 /// Region allocated for the stack.
-pub fn stack_range() -> MemoryRange {
+pub fn stack_range() -> Range<VirtualAddress> {
     const STACK_PAGES: usize = 8;
 
     layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
@@ -46,12 +48,12 @@
 
     // Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload,
     // so dirty state management can be omitted.
-    page_table.map_data(&layout::scratch_range())?;
-    page_table.map_data(&stack_range())?;
-    page_table.map_code(&layout::text_range())?;
-    page_table.map_rodata(&layout::rodata_range())?;
-    page_table.map_data_dbm(&appended_payload_range())?;
-    if let Err(e) = page_table.map_device(&layout::console_uart_range()) {
+    page_table.map_data(&layout::scratch_range().into())?;
+    page_table.map_data(&stack_range().into())?;
+    page_table.map_code(&layout::text_range().into())?;
+    page_table.map_rodata(&layout::rodata_range().into())?;
+    page_table.map_data_dbm(&appended_payload_range().into())?;
+    if let Err(e) = page_table.map_device(&layout::console_uart_range().into()) {
         error!("Failed to remap the UART as a dynamic page table entry: {e}");
         return Err(e);
     }