Update layout/PageTable memory range to Range<VirtualAddress>

This cl updates the return type of the layout functions and the
parameter type in PageTable memory mapping functions from
Range<usize> to Range<VirtualAddress>. This makes it explicit
that the ranges used here are virtual memory ranges.

Test: atest vmbase_example.integration_test rialto_test
Test: m pvmfw_img
Bug: 284462758
Change-Id: I19d4859a03edffedb00ab2831f43929befcb98d8
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 6f96fc0..3d2fea8 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -278,19 +278,19 @@
 
     let scratch = layout::scratch_range();
 
-    assert_ne!(scratch.len(), 0, "scratch memory is empty.");
-    assert_eq!(scratch.start % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
-    assert_eq!(scratch.end % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+    assert_ne!(scratch.end - scratch.start, 0, "scratch memory is empty.");
+    assert_eq!(scratch.start.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+    assert_eq!(scratch.end.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
 
-    assert!(bcc.is_within(&scratch));
+    assert!(bcc.is_within(&(scratch.start.0..scratch.end.0)));
     assert_eq!(bcc.start % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
     assert_eq!(bcc.end % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
 
     let stack = memory::stack_range();
 
-    assert_ne!(stack.len(), 0, "stack region is empty.");
-    assert_eq!(stack.start % ASM_STP_ALIGN, 0, "Misaligned stack region.");
-    assert_eq!(stack.end % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+    assert_ne!(stack.end - stack.start, 0, "stack region is empty.");
+    assert_eq!(stack.start.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+    assert_eq!(stack.end.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
 
     // Zero all memory that could hold secrets and that can't be safely written to from Rust.
     // Disable the exception vector, caches and page table and then jump to the payload at the
@@ -375,11 +375,11 @@
             sctlr_el1_val = in(reg) SCTLR_EL1_VAL,
             bcc = in(reg) u64::try_from(bcc.start).unwrap(),
             bcc_end = in(reg) u64::try_from(bcc.end).unwrap(),
-            cache_line = in(reg) u64::try_from(scratch.start).unwrap(),
-            scratch = in(reg) u64::try_from(scratch.start).unwrap(),
-            scratch_end = in(reg) u64::try_from(scratch.end).unwrap(),
-            stack = in(reg) u64::try_from(stack.start).unwrap(),
-            stack_end = in(reg) u64::try_from(stack.end).unwrap(),
+            cache_line = in(reg) u64::try_from(scratch.start.0).unwrap(),
+            scratch = in(reg) u64::try_from(scratch.start.0).unwrap(),
+            scratch_end = in(reg) u64::try_from(scratch.end.0).unwrap(),
+            stack = in(reg) u64::try_from(stack.start.0).unwrap(),
+            stack_end = in(reg) u64::try_from(stack.end.0).unwrap(),
             dcache_line_size = in(reg) u64::try_from(min_dcache_line_size()).unwrap(),
             in("x0") fdt_address,
             in("x30") payload_start,
@@ -396,7 +396,7 @@
     let range = memory::appended_payload_range();
     // SAFETY: This region is mapped and the linker script prevents it from overlapping with other
     // objects.
-    unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) }
+    unsafe { slice::from_raw_parts_mut(range.start.0 as *mut u8, range.end - range.start) }
 }
 
 enum AppendedConfigType {
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 11fcd7c..5930ec9 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -17,25 +17,27 @@
 #![deny(unsafe_op_in_unsafe_fn)]
 
 use crate::helpers::PVMFW_PAGE_SIZE;
+use aarch64_paging::paging::VirtualAddress;
 use aarch64_paging::MapError;
+use core::ops::Range;
 use core::result;
 use log::error;
 use vmbase::{
     layout,
-    memory::{MemoryRange, PageTable, SIZE_2MB, SIZE_4KB},
+    memory::{PageTable, SIZE_2MB, SIZE_4KB},
     util::align_up,
 };
 
 /// Returns memory range reserved for the appended payload.
-pub fn appended_payload_range() -> MemoryRange {
+pub fn appended_payload_range() -> Range<VirtualAddress> {
     let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
     // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
     let end = align_up(start, SIZE_2MB).unwrap();
-    start..end
+    VirtualAddress(start)..VirtualAddress(end)
 }
 
 /// Region allocated for the stack.
-pub fn stack_range() -> MemoryRange {
+pub fn stack_range() -> Range<VirtualAddress> {
     const STACK_PAGES: usize = 8;
 
     layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
@@ -46,12 +48,12 @@
 
     // Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload,
     // so dirty state management can be omitted.
-    page_table.map_data(&layout::scratch_range())?;
-    page_table.map_data(&stack_range())?;
-    page_table.map_code(&layout::text_range())?;
-    page_table.map_rodata(&layout::rodata_range())?;
-    page_table.map_data_dbm(&appended_payload_range())?;
-    if let Err(e) = page_table.map_device(&layout::console_uart_range()) {
+    page_table.map_data(&layout::scratch_range().into())?;
+    page_table.map_data(&stack_range().into())?;
+    page_table.map_code(&layout::text_range().into())?;
+    page_table.map_rodata(&layout::rodata_range().into())?;
+    page_table.map_data_dbm(&appended_payload_range().into())?;
+    if let Err(e) = page_table.map_device(&layout::console_uart_range().into()) {
         error!("Failed to remap the UART as a dynamic page table entry: {e}");
         return Err(e);
     }
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index 61c985e..5e693c8 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -43,11 +43,11 @@
 fn new_page_table() -> Result<PageTable> {
     let mut page_table = PageTable::default();
 
-    page_table.map_data(&layout::scratch_range())?;
-    page_table.map_data(&layout::stack_range(40 * PAGE_SIZE))?;
-    page_table.map_code(&layout::text_range())?;
-    page_table.map_rodata(&layout::rodata_range())?;
-    page_table.map_device(&layout::console_uart_range())?;
+    page_table.map_data(&layout::scratch_range().into())?;
+    page_table.map_data(&layout::stack_range(40 * PAGE_SIZE).into())?;
+    page_table.map_code(&layout::text_range().into())?;
+    page_table.map_rodata(&layout::rodata_range().into())?;
+    page_table.map_device(&layout::console_uart_range().into())?;
 
     Ok(page_table)
 }
diff --git a/vmbase/example/src/layout.rs b/vmbase/example/src/layout.rs
index 1954a90..4d14b1c 100644
--- a/vmbase/example/src/layout.rs
+++ b/vmbase/example/src/layout.rs
@@ -23,44 +23,10 @@
 /// The first 1 GiB of memory are used for MMIO.
 pub const DEVICE_REGION: MemoryRegion = MemoryRegion::new(0, 0x40000000);
 
-fn into_va_range(r: Range<usize>) -> Range<VirtualAddress> {
-    VirtualAddress(r.start)..VirtualAddress(r.end)
-}
-
-/// Memory reserved for the DTB.
-pub fn dtb_range() -> Range<VirtualAddress> {
-    into_va_range(layout::dtb_range())
-}
-
-/// Executable code.
-pub fn text_range() -> Range<VirtualAddress> {
-    into_va_range(layout::text_range())
-}
-
-/// Read-only data.
-pub fn rodata_range() -> Range<VirtualAddress> {
-    into_va_range(layout::rodata_range())
-}
-
-/// Initialised writable data.
-pub fn data_range() -> Range<VirtualAddress> {
-    into_va_range(layout::data_range())
-}
-
-/// Zero-initialized writable data.
-pub fn bss_range() -> Range<VirtualAddress> {
-    into_va_range(layout::bss_range())
-}
-
 /// Writable data region for the stack.
 pub fn boot_stack_range() -> Range<VirtualAddress> {
     const PAGE_SIZE: usize = 4 << 10;
-    into_va_range(layout::stack_range(40 * PAGE_SIZE))
-}
-
-/// Writable data region for allocations.
-pub fn scratch_range() -> Range<VirtualAddress> {
-    into_va_range(layout::scratch_range())
+    layout::stack_range(40 * PAGE_SIZE)
 }
 
 fn data_load_address() -> VirtualAddress {
@@ -72,14 +38,14 @@
 }
 
 pub fn print_addresses() {
-    let dtb = dtb_range();
+    let dtb = layout::dtb_range();
     info!("dtb:        {}..{} ({} bytes)", dtb.start, dtb.end, dtb.end - dtb.start);
-    let text = text_range();
+    let text = layout::text_range();
     info!("text:       {}..{} ({} bytes)", text.start, text.end, text.end - text.start);
-    let rodata = rodata_range();
+    let rodata = layout::rodata_range();
     info!("rodata:     {}..{} ({} bytes)", rodata.start, rodata.end, rodata.end - rodata.start);
     info!("binary end: {}", binary_end());
-    let data = data_range();
+    let data = layout::data_range();
     info!(
         "data:       {}..{} ({} bytes, loaded at {})",
         data.start,
@@ -87,7 +53,7 @@
         data.end - data.start,
         data_load_address(),
     );
-    let bss = bss_range();
+    let bss = layout::bss_range();
     info!("bss:        {}..{} ({} bytes)", bss.start, bss.end, bss.end - bss.start);
     let boot_stack = boot_stack_range();
     info!(
diff --git a/vmbase/example/src/main.rs b/vmbase/example/src/main.rs
index 021daa4..cc26036 100644
--- a/vmbase/example/src/main.rs
+++ b/vmbase/example/src/main.rs
@@ -23,17 +23,19 @@
 
 extern crate alloc;
 
-use crate::layout::{
-    bionic_tls, boot_stack_range, dtb_range, print_addresses, rodata_range, scratch_range,
-    text_range, DEVICE_REGION,
-};
+use crate::layout::{bionic_tls, boot_stack_range, print_addresses, DEVICE_REGION};
 use crate::pci::{check_pci, get_bar_region};
 use aarch64_paging::{idmap::IdMap, paging::Attributes};
 use alloc::{vec, vec::Vec};
 use fdtpci::PciInfo;
 use libfdt::Fdt;
 use log::{debug, error, info, trace, warn, LevelFilter};
-use vmbase::{configure_heap, cstr, layout::stack_chk_guard, logger, main, memory::SIZE_64KB};
+use vmbase::{
+    configure_heap, cstr,
+    layout::{dtb_range, rodata_range, scratch_range, stack_chk_guard, text_range},
+    logger, main,
+    memory::SIZE_64KB,
+};
 
 static INITIALISED_DATA: [u32; 4] = [1, 2, 3, 4];
 static mut ZEROED_DATA: [u32; 10] = [0; 10];
diff --git a/vmbase/src/layout/mod.rs b/vmbase/src/layout/mod.rs
index bca5115..ffa29e7 100644
--- a/vmbase/src/layout/mod.rs
+++ b/vmbase/src/layout/mod.rs
@@ -18,6 +18,7 @@
 
 use crate::console::BASE_ADDRESS;
 use crate::linker::__stack_chk_guard;
+use aarch64_paging::paging::VirtualAddress;
 use core::ops::Range;
 use core::ptr::addr_of;
 
@@ -34,61 +35,61 @@
     }};
 }
 
-/// Get the address range between a pair of linker-defined symbols.
+/// Gets the virtual address range between a pair of linker-defined symbols.
 #[macro_export]
 macro_rules! linker_region {
     ($begin:ident,$end:ident) => {{
         let start = linker_addr!($begin);
         let end = linker_addr!($end);
 
-        start..end
+        VirtualAddress(start)..VirtualAddress(end)
     }};
 }
 
 /// Memory reserved for the DTB.
-pub fn dtb_range() -> Range<usize> {
+pub fn dtb_range() -> Range<VirtualAddress> {
     linker_region!(dtb_begin, dtb_end)
 }
 
 /// Executable code.
-pub fn text_range() -> Range<usize> {
+pub fn text_range() -> Range<VirtualAddress> {
     linker_region!(text_begin, text_end)
 }
 
 /// Read-only data.
-pub fn rodata_range() -> Range<usize> {
+pub fn rodata_range() -> Range<VirtualAddress> {
     linker_region!(rodata_begin, rodata_end)
 }
 
 /// Initialised writable data.
-pub fn data_range() -> Range<usize> {
+pub fn data_range() -> Range<VirtualAddress> {
     linker_region!(data_begin, data_end)
 }
 
 /// Zero-initialized writable data.
-pub fn bss_range() -> Range<usize> {
+pub fn bss_range() -> Range<VirtualAddress> {
     linker_region!(bss_begin, bss_end)
 }
 
 /// Writable data region for the stack.
-pub fn stack_range(stack_size: usize) -> Range<usize> {
+pub fn stack_range(stack_size: usize) -> Range<VirtualAddress> {
     let end = linker_addr!(init_stack_pointer);
     let start = end.checked_sub(stack_size).unwrap();
     assert!(start >= linker_addr!(stack_limit));
 
-    start..end
+    VirtualAddress(start)..VirtualAddress(end)
 }
 
 /// All writable sections, excluding the stack.
-pub fn scratch_range() -> Range<usize> {
+pub fn scratch_range() -> Range<VirtualAddress> {
     linker_region!(eh_stack_limit, bss_end)
 }
 
 /// UART console range.
-pub fn console_uart_range() -> Range<usize> {
+pub fn console_uart_range() -> Range<VirtualAddress> {
     const CONSOLE_LEN: usize = 1; // `uart::Uart` only uses one u8 register.
 
-    BASE_ADDRESS..(BASE_ADDRESS + CONSOLE_LEN)
+    VirtualAddress(BASE_ADDRESS)..VirtualAddress(BASE_ADDRESS + CONSOLE_LEN)
 }
 
 /// Read-write data (original).
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index 3943b03..e067e96 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -18,7 +18,7 @@
 use aarch64_paging::idmap::IdMap;
 use aarch64_paging::paging::{Attributes, MemoryRegion, PteUpdater};
 use aarch64_paging::MapError;
-use core::{ops::Range, result};
+use core::result;
 
 /// Software bit used to indicate a device that should be lazily mapped.
 pub(super) const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
@@ -88,50 +88,44 @@
 
     /// Maps the given range of virtual addresses to the physical addresses as lazily mapped
     /// nGnRE device memory.
-    pub fn map_device_lazy(&mut self, range: &Range<usize>) -> Result<()> {
-        self.map_range(range, DEVICE_LAZY)
+    pub fn map_device_lazy(&mut self, range: &MemoryRegion) -> Result<()> {
+        self.idmap.map_range(range, DEVICE_LAZY)
     }
 
     /// Maps the given range of virtual addresses to the physical addresses as valid device
     /// nGnRE device memory.
-    pub fn map_device(&mut self, range: &Range<usize>) -> Result<()> {
-        self.map_range(range, DEVICE)
+    pub fn map_device(&mut self, range: &MemoryRegion) -> Result<()> {
+        self.idmap.map_range(range, DEVICE)
     }
 
     /// Maps the given range of virtual addresses to the physical addresses as non-executable
     /// and writable normal memory.
-    pub fn map_data(&mut self, range: &Range<usize>) -> Result<()> {
-        self.map_range(range, DATA)
+    pub fn map_data(&mut self, range: &MemoryRegion) -> Result<()> {
+        self.idmap.map_range(range, DATA)
     }
 
     /// Maps the given range of virtual addresses to the physical addresses as non-executable,
     /// read-only and writable-clean normal memory.
-    pub fn map_data_dbm(&mut self, range: &Range<usize>) -> Result<()> {
-        self.map_range(range, DATA_DBM)
+    pub fn map_data_dbm(&mut self, range: &MemoryRegion) -> Result<()> {
+        self.idmap.map_range(range, DATA_DBM)
     }
 
     /// Maps the given range of virtual addresses to the physical addresses as read-only
     /// normal memory.
-    pub fn map_code(&mut self, range: &Range<usize>) -> Result<()> {
-        self.map_range(range, CODE)
+    pub fn map_code(&mut self, range: &MemoryRegion) -> Result<()> {
+        self.idmap.map_range(range, CODE)
     }
 
     /// Maps the given range of virtual addresses to the physical addresses as non-executable
     /// and read-only normal memory.
-    pub fn map_rodata(&mut self, range: &Range<usize>) -> Result<()> {
-        self.map_range(range, RODATA)
-    }
-
-    /// Maps the given range of virtual addresses to the physical addresses with the given
-    /// attributes.
-    fn map_range(&mut self, range: &Range<usize>, attr: Attributes) -> Result<()> {
-        self.idmap.map_range(&MemoryRegion::new(range.start, range.end), attr)
+    pub fn map_rodata(&mut self, range: &MemoryRegion) -> Result<()> {
+        self.idmap.map_range(range, RODATA)
     }
 
     /// Applies the provided updater function to a number of PTEs corresponding to a given memory
     /// range.
-    pub fn modify_range(&mut self, range: &Range<usize>, f: &PteUpdater) -> Result<()> {
-        self.idmap.modify_range(&MemoryRegion::new(range.start, range.end), f)
+    pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<()> {
+        self.idmap.modify_range(range, f)
     }
 }
 
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 4a75b97..c8b7d35 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -20,7 +20,7 @@
 use super::util::{page_4kb_of, virt_to_phys};
 use crate::dsb;
 use crate::util::RangeExt as _;
-use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
 use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
 use alloc::boxed::Box;
 use alloc::vec::Vec;
@@ -44,6 +44,11 @@
 
 /// Memory range.
 pub type MemoryRange = Range<usize>;
+
+fn get_va_range(range: &MemoryRange) -> VaRange {
+    VaRange::new(range.start, range.end)
+}
+
 type Result<T> = result::Result<T, MemoryTrackerError>;
 
 #[derive(Clone, Copy, Debug, Default, PartialEq)]
@@ -82,7 +87,7 @@
         mut page_table: PageTable,
         total: MemoryRange,
         mmio_range: MemoryRange,
-        payload_range: Option<MemoryRange>,
+        payload_range: Option<Range<VirtualAddress>>,
     ) -> Self {
         assert!(
             !total.overlaps(&mmio_range),
@@ -106,7 +111,7 @@
             regions: ArrayVec::new(),
             mmio_regions: ArrayVec::new(),
             mmio_range,
-            payload_range,
+            payload_range: payload_range.map(|r| r.start.0..r.end.0),
         }
     }
 
@@ -132,7 +137,7 @@
     pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
         self.check(&region)?;
-        self.page_table.map_rodata(range).map_err(|e| {
+        self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
             error!("Error during range allocation: {e}");
             MemoryTrackerError::FailedToMap
         })?;
@@ -143,7 +148,7 @@
     pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
         self.check(&region)?;
-        self.page_table.map_data_dbm(range).map_err(|e| {
+        self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
             error!("Error during mutable range allocation: {e}");
             MemoryTrackerError::FailedToMap
         })?;
@@ -173,7 +178,7 @@
             return Err(MemoryTrackerError::Full);
         }
 
-        self.page_table.map_device_lazy(&range).map_err(|e| {
+        self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
             error!("Error during MMIO device mapping: {e}");
             MemoryTrackerError::FailedToMap
         })?;
@@ -215,7 +220,7 @@
     pub fn mmio_unmap_all(&mut self) -> Result<()> {
         for range in &self.mmio_regions {
             self.page_table
-                .modify_range(range, &mmio_guard_unmap_page)
+                .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
                 .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
         }
         Ok(())
@@ -266,11 +271,12 @@
     /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
     /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
     pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> {
-        let page_range = page_4kb_of(addr)..page_4kb_of(addr) + MMIO_GUARD_GRANULE_SIZE;
+        let page_start = VirtualAddress(page_4kb_of(addr));
+        let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
         self.page_table
             .modify_range(&page_range, &verify_lazy_mapped_block)
             .map_err(|_| MemoryTrackerError::InvalidPte)?;
-        get_hypervisor().mmio_guard_map(page_range.start)?;
+        get_hypervisor().mmio_guard_map(page_start.0)?;
         // Maps a single device page, breaking up block mappings if necessary.
         self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
     }
@@ -286,7 +292,7 @@
         // Now flush writable-dirty pages in those regions.
         for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
             self.page_table
-                .modify_range(range, &flush_dirty_range)
+                .modify_range(&get_va_range(range), &flush_dirty_range)
                 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
         }
         Ok(())
@@ -296,8 +302,9 @@
     /// In general, this should be called from the exception handler when hardware dirty
     /// state management is disabled or unavailable.
     pub fn handle_permission_fault(&mut self, addr: usize) -> Result<()> {
+        let addr = VirtualAddress(addr);
         self.page_table
-            .modify_range(&(addr..addr + 1), &mark_dirty_block)
+            .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
             .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
     }
 }