Merge "pvmfw: Use zerocopy in gpt.rs and instance.rs"
diff --git a/compos/tests/java/android/compos/test/ComposTestCase.java b/compos/tests/java/android/compos/test/ComposTestCase.java
index 8c7aeeb..3a09475 100644
--- a/compos/tests/java/android/compos/test/ComposTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposTestCase.java
@@ -221,8 +221,11 @@
// compos.info.signature since it's only generated by CompOS.
// TODO(b/211458160): Remove cache-info.xml once we can plumb timestamp and isFactory of
// APEXes to the VM.
- return runner.run("cd " + path + "; find -type f -exec sha256sum {} \\;"
- + "| grep -v cache-info.xml | grep -v compos.info"
- + "| sort -k2");
+ return runner.run(
+ "cd "
+ + path
+ + " && find -type f -exec sha256sum {} \\;"
+ + "| grep -v cache-info.xml | grep -v compos.info"
+ + "| sort -k2");
}
}
diff --git a/microdroid/init.rc b/microdroid/init.rc
index 29f8970..871db94 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -146,6 +146,7 @@
capabilities CHOWN DAC_OVERRIDE DAC_READ_SEARCH FOWNER SYS_ADMIN
service ueventd /system/bin/ueventd
+ user root
class core
critical
seclabel u:r:ueventd:s0
@@ -161,6 +162,7 @@
setenv HOSTNAME console
service init_debug_policy /system/bin/init_debug_policy
+ user root
oneshot
disabled
stdio_to_kmsg
diff --git a/pvmfw/Android.bp b/pvmfw/Android.bp
index 7ea1189..0ae2203 100644
--- a/pvmfw/Android.bp
+++ b/pvmfw/Android.bp
@@ -17,6 +17,8 @@
"libaarch64_paging",
"libbssl_ffi_nostd",
"libbuddy_system_allocator",
+ "libciborium_nostd",
+ "libciborium_io_nostd",
"libdiced_open_dice_nostd",
"libfdtpci",
"libhyp",
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index a773f1a..1c22861 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -37,6 +37,7 @@
mod virtio;
use alloc::boxed::Box;
+use alloc::string::ToString;
use crate::dice::PartialInputs;
use crate::entry::RebootReason;
@@ -46,6 +47,7 @@
use crate::instance::get_or_generate_instance_salt;
use crate::memory::MemoryTracker;
use crate::virtio::pci;
+use ciborium::{de::from_reader, value::Value};
use diced_open_dice::bcc_handover_main_flow;
use diced_open_dice::bcc_handover_parse;
use diced_open_dice::DiceArtifacts;
@@ -58,6 +60,19 @@
const NEXT_BCC_SIZE: usize = GUEST_PAGE_SIZE;
+type CiboriumError = ciborium::de::Error<ciborium_io::EndOfFile>;
+
+/// Decodes the provided binary CBOR-encoded value and returns a
+/// ciborium::Value struct wrapped in Result.
+fn value_from_bytes(mut bytes: &[u8]) -> Result<Value, CiboriumError> {
+ let value = from_reader(&mut bytes)?;
+ // Ciborium tries to read one Value, but doesn't care if there is trailing data. We do.
+ if !bytes.is_empty() {
+ return Err(CiboriumError::Semantic(Some(0), "unexpected trailing data".to_string()));
+ }
+ Ok(value)
+}
+
fn main(
fdt: &mut Fdt,
signed_kernel: &[u8],
@@ -81,6 +96,18 @@
})?;
trace!("BCC: {bcc_handover:x?}");
+ // Minimal BCC verification - check the BCC exists & is valid CBOR.
+ // TODO(alanstokes): Do something more useful.
+ if let Some(bytes) = bcc_handover.bcc() {
+ let _ = value_from_bytes(bytes).map_err(|e| {
+ error!("Invalid BCC: {e:?}");
+ RebootReason::InvalidBcc
+ })?;
+ } else {
+ error!("Missing BCC");
+ return Err(RebootReason::InvalidBcc);
+ }
+
// Set up PCI bus for VirtIO devices.
let pci_info = PciInfo::from_fdt(fdt).map_err(handle_pci_error)?;
debug!("PCI: {:#x?}", pci_info);
diff --git a/pvmfw/src/virtio/hal.rs b/pvmfw/src/virtio/hal.rs
index 5f70b33..7598a55 100644
--- a/pvmfw/src/virtio/hal.rs
+++ b/pvmfw/src/virtio/hal.rs
@@ -9,7 +9,21 @@
pub struct HalImpl;
-impl Hal for HalImpl {
+/// Implements the `Hal` trait for `HalImpl`.
+///
+/// # Safety
+///
+/// Callers of this implementatation must follow the safety requirements documented for the unsafe
+/// methods.
+unsafe impl Hal for HalImpl {
+ /// Allocates the given number of contiguous physical pages of DMA memory for VirtIO use.
+ ///
+ /// # Implementation Safety
+ ///
+ /// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
+ /// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
+ /// block of memory using `alloc_shared` and returning a non-null pointer to it that is
+ /// aligned to `PAGE_SIZE`.
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
debug!("dma_alloc: pages={}", pages);
let size = pages * PAGE_SIZE;
@@ -19,7 +33,7 @@
(paddr, vaddr)
}
- fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
+ unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
let size = pages * PAGE_SIZE;
// Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
@@ -30,7 +44,13 @@
0
}
- fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
+ /// Converts a physical address used for MMIO to a virtual address which the driver can access.
+ ///
+ /// # Implementation Safety
+ ///
+ /// `mmio_phys_to_virt` satisfies the requirement by checking that the mapped memory region
+ /// is within the PCI MMIO range.
+ unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
// Check that the region is within the PCI MMIO range that we read from the device tree. If
// not, the host is probably trying to do something malicious.
@@ -48,7 +68,7 @@
phys_to_virt(paddr)
}
- fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
+ unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
let size = buffer.len();
// TODO: Copy to a pre-shared region rather than allocating and sharing each time.
@@ -63,7 +83,7 @@
virt_to_phys(copy)
}
- fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
+ unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
let vaddr = phys_to_virt(paddr);
let size = buffer.len();
if direction == BufferDirection::DeviceToDriver {
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index 99e07b6..3398d50 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -28,9 +28,10 @@
paging::{Attributes, MemoryRegion},
};
use buddy_system_allocator::LockedHeap;
+use core::ops::Range;
use hyp::get_hypervisor;
use log::{debug, error, info};
-use vmbase::{main, power::reboot};
+use vmbase::{layout, main, power::reboot};
const SZ_1K: usize = 1024;
const SZ_64K: usize = 64 * SZ_1K;
@@ -63,16 +64,8 @@
static mut HEAP: [u8; SZ_64K] = [0; SZ_64K];
-unsafe fn kimg_ptr(sym: &u8) -> *const u8 {
- sym as *const u8
-}
-
-unsafe fn kimg_addr(sym: &u8) -> usize {
- kimg_ptr(sym) as usize
-}
-
-unsafe fn kimg_region(begin: &u8, end: &u8) -> MemoryRegion {
- MemoryRegion::new(kimg_addr(begin), kimg_addr(end))
+fn into_memreg(r: &Range<usize>) -> MemoryRegion {
+ MemoryRegion::new(r.start, r.end)
}
fn init_heap() {
@@ -86,11 +79,9 @@
fn init_kernel_pgt(pgt: &mut IdMap) -> Result<()> {
// The first 1 GiB of address space is used by crosvm for MMIO.
let reg_dev = MemoryRegion::new(0, SZ_1G);
- // SAFETY: Taking addresses of kernel image sections to set up page table
- // mappings. Not taking ownerhip of the memory.
- let reg_text = unsafe { kimg_region(&text_begin, &text_end) };
- let reg_rodata = unsafe { kimg_region(&rodata_begin, &rodata_end) };
- let reg_data = unsafe { kimg_region(&data_begin, &boot_stack_end) };
+ let reg_text = into_memreg(&layout::text_range());
+ let reg_rodata = into_memreg(&layout::rodata_range());
+ let reg_data = into_memreg(&layout::writable_region());
debug!("Preparing kernel page table.");
debug!(" dev: {}-{}", reg_dev.start(), reg_dev.end());
@@ -143,13 +134,4 @@
}
}
-extern "C" {
- static text_begin: u8;
- static text_end: u8;
- static rodata_begin: u8;
- static rodata_end: u8;
- static data_begin: u8;
- static boot_stack_end: u8;
-}
-
main!(main);
diff --git a/vmbase/example/src/pci.rs b/vmbase/example/src/pci.rs
index 117cbc8..41a3ff4 100644
--- a/vmbase/example/src/pci.rs
+++ b/vmbase/example/src/pci.rs
@@ -98,7 +98,7 @@
struct HalImpl;
-impl Hal for HalImpl {
+unsafe impl Hal for HalImpl {
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
debug!("dma_alloc: pages={}", pages);
let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
@@ -110,7 +110,7 @@
(paddr, vaddr)
}
- fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
+ unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
// Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
@@ -121,17 +121,17 @@
0
}
- fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
+ unsafe fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
NonNull::new(paddr as _).unwrap()
}
- fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
+ unsafe fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
let vaddr = buffer.cast();
// Nothing to do, as the host already has access to all memory.
virt_to_phys(vaddr)
}
- fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
+ unsafe fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
// Nothing to do, as the host already has access to all memory and we didn't copy the buffer
// anywhere else.
}
diff --git a/vmbase/sections.ld b/vmbase/sections.ld
index 87b909d..9ed2455 100644
--- a/vmbase/sections.ld
+++ b/vmbase/sections.ld
@@ -39,12 +39,10 @@
* Collect together the code. This is page aligned so it can be mapped
* as executable-only.
*/
- .init : ALIGN(4096) {
+ .text : ALIGN(4096) {
text_begin = .;
*(.init.entry)
*(.init.*)
- } >image
- .text : {
*(.text.*)
} >image
text_end = .;
diff --git a/vmbase/src/layout.rs b/vmbase/src/layout.rs
index b0a5173..b5ab449 100644
--- a/vmbase/src/layout.rs
+++ b/vmbase/src/layout.rs
@@ -14,53 +14,69 @@
//! Memory layout.
-use crate::linker;
use core::ops::Range;
use core::ptr::addr_of;
+/// Get an address from a linker-defined symbol.
+#[macro_export]
+macro_rules! linker_addr {
+ ($symbol:ident) => {{
+ unsafe { addr_of!($crate::linker::$symbol) as usize }
+ }};
+}
+
+/// Get the address range between a pair of linker-defined symbols.
+#[macro_export]
+macro_rules! linker_region {
+ ($begin:ident,$end:ident) => {{
+ let start = linker_addr!($begin);
+ let end = linker_addr!($end);
+
+ start..end
+ }};
+}
+
/// Memory reserved for the DTB.
pub fn dtb_range() -> Range<usize> {
- unsafe { (addr_of!(linker::dtb_begin) as usize)..(addr_of!(linker::dtb_end) as usize) }
+ linker_region!(dtb_begin, dtb_end)
}
/// Executable code.
pub fn text_range() -> Range<usize> {
- unsafe { (addr_of!(linker::text_begin) as usize)..(addr_of!(linker::text_end) as usize) }
+ linker_region!(text_begin, text_end)
}
/// Read-only data.
pub fn rodata_range() -> Range<usize> {
- unsafe { (addr_of!(linker::rodata_begin) as usize)..(addr_of!(linker::rodata_end) as usize) }
+ linker_region!(rodata_begin, rodata_end)
}
/// Initialised writable data.
pub fn data_range() -> Range<usize> {
- unsafe { (addr_of!(linker::data_begin) as usize)..(addr_of!(linker::data_end) as usize) }
+ linker_region!(data_begin, data_end)
}
/// Zero-initialised writable data.
pub fn bss_range() -> Range<usize> {
- unsafe { (addr_of!(linker::bss_begin) as usize)..(addr_of!(linker::bss_end) as usize) }
+ linker_region!(bss_begin, bss_end)
}
/// Writable data region for the stack.
pub fn boot_stack_range() -> Range<usize> {
- unsafe {
- (addr_of!(linker::boot_stack_begin) as usize)..(addr_of!(linker::boot_stack_end) as usize)
- }
+ linker_region!(boot_stack_begin, boot_stack_end)
}
/// Writable data, including the stack.
pub fn writable_region() -> Range<usize> {
- data_range().start..boot_stack_range().end
+ linker_region!(data_begin, boot_stack_end)
}
/// Read-write data (original).
pub fn data_load_address() -> usize {
- unsafe { addr_of!(linker::data_lma) as usize }
+ linker_addr!(data_lma)
}
/// End of the binary image.
pub fn binary_end() -> usize {
- unsafe { addr_of!(linker::bin_end) as usize }
+ linker_addr!(bin_end)
}