vmbase: Move stack to end of writable_data
Instead of imposing an arbitrary size for the stack, allows clients to
query a validated address range of a given size with boot_stack_range(),
which places the stack at the largest address possible, extending
downwards (common on AArch64).
Keep allocating 40 pages of stack in vmbase_example, rialto, and pvmfw.
Bug: 279209532
Bug: 270684188
Test: atest vmbase_example.integration_test
Test: atest rialto_test
Test: atest MicrodroidTests
Change-Id: If205ccd4fa408e32e5533b880a85f4cccbd3f005
diff --git a/pvmfw/src/helpers.rs b/pvmfw/src/helpers.rs
index 8c05217..1f0a764 100644
--- a/pvmfw/src/helpers.rs
+++ b/pvmfw/src/helpers.rs
@@ -22,6 +22,7 @@
pub const SIZE_4MB: usize = 4 << 20;
pub const GUEST_PAGE_SIZE: usize = SIZE_4KB;
+pub const PVMFW_PAGE_SIZE: usize = SIZE_4KB;
/// Read a value from a system register.
#[macro_export]
diff --git a/pvmfw/src/mmu.rs b/pvmfw/src/mmu.rs
index 455edad..ee99710 100644
--- a/pvmfw/src/mmu.rs
+++ b/pvmfw/src/mmu.rs
@@ -15,6 +15,7 @@
//! Memory management.
use crate::helpers;
+use crate::helpers::PVMFW_PAGE_SIZE;
use aarch64_paging::idmap::IdMap;
use aarch64_paging::paging::Attributes;
use aarch64_paging::paging::MemoryRegion;
@@ -44,6 +45,13 @@
start..end
}
+/// Region allocated for the stack.
+fn stack_range() -> Range<usize> {
+ const STACK_PAGES: usize = 40;
+
+ layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
+}
+
impl PageTable {
const ASID: usize = 1;
const ROOT_LEVEL: usize = 1;
@@ -54,7 +62,7 @@
page_table.map_code(&layout::text_range())?;
page_table.map_data(&layout::scratch_range())?;
- page_table.map_data(&layout::boot_stack_range())?;
+ page_table.map_data(&stack_range())?;
page_table.map_rodata(&layout::rodata_range())?;
page_table.map_data(&appended_payload_range())?;