Merge "[cbor] Separate cbor conversion functions in an independent lib" into main
diff --git a/pvmfw/src/config.rs b/pvmfw/src/config.rs
index 926f42b..78b6323 100644
--- a/pvmfw/src/config.rs
+++ b/pvmfw/src/config.rs
@@ -16,8 +16,10 @@
 
 use core::fmt;
 use core::mem;
+use core::num::NonZeroUsize;
 use core::ops::Range;
 use core::result;
+use core::slice;
 use log::{info, warn};
 use static_assertions::const_assert_eq;
 use vmbase::util::RangeExt;
@@ -53,6 +55,8 @@
     MissingEntry(Entry),
     /// Range described by entry does not fit within config data.
     EntryOutOfBounds(Entry, Range<usize>, Range<usize>),
+    /// Entries are in out of order
+    EntryOutOfOrder,
 }
 
 impl fmt::Display for Error {
@@ -70,6 +74,7 @@
                     "Entry {entry:?} out of bounds: {range:#x?} must be within range {limits:#x?}"
                 )
             }
+            Self::EntryOutOfOrder => write!(f, "Entries are out of order"),
         }
     }
 }
@@ -133,19 +138,6 @@
     size: u32,
 }
 
-impl HeaderEntry {
-    pub fn as_range(&self) -> Option<Range<usize>> {
-        let size = usize::try_from(self.size).unwrap();
-        if size != 0 {
-            let offset = self.offset.try_into().unwrap();
-            // Allow overflows here for the Range to properly describe the entry (validated later).
-            Some(offset..(offset + size))
-        } else {
-            None
-        }
-    }
-}
-
 #[repr(C, packed)]
 #[derive(Clone, Copy, Debug, Eq, FromZeroes, FromBytes, PartialEq)]
 pub struct Version {
@@ -161,10 +153,38 @@
     }
 }
 
+/// Range with non-empty length.
+#[derive(Debug, Copy, Clone)]
+struct NonEmptyRange {
+    start: usize,
+    size: NonZeroUsize,
+}
+
+impl NonEmptyRange {
+    pub fn new(start: usize, size: usize) -> Option<Self> {
+        // Ensure end() is safe.
+        start.checked_add(size).unwrap();
+
+        Some(Self { start, size: NonZeroUsize::new(size)? })
+    }
+
+    fn end(&self) -> usize {
+        self.start + self.len()
+    }
+
+    fn len(&self) -> usize {
+        self.size.into()
+    }
+
+    fn as_range(&self) -> Range<usize> {
+        self.start..self.end()
+    }
+}
+
 #[derive(Debug)]
 pub struct Config<'a> {
     body: &'a mut [u8],
-    ranges: [Option<Range<usize>>; Entry::COUNT],
+    ranges: [Option<NonEmptyRange>; Entry::COUNT],
 }
 
 impl<'a> Config<'a> {
@@ -209,68 +229,64 @@
         // Validate that we won't get an invalid alignment in the following due to padding to u64.
         const_assert_eq!(mem::size_of::<HeaderEntry>() % mem::size_of::<u64>(), 0);
 
+        // Ensure entries are in the body.
         let limits = header.body_lowest_bound()?..total_size;
-        let ranges = [
-            // TODO: Find a way to do this programmatically even if the trait
-            // `core::marker::Copy` is not implemented for `core::ops::Range<usize>`.
-            Self::validated_body_range(Entry::Bcc, &header_entries, &limits)?,
-            Self::validated_body_range(Entry::DebugPolicy, &header_entries, &limits)?,
-            Self::validated_body_range(Entry::VmDtbo, &header_entries, &limits)?,
-        ];
+        let mut ranges: [Option<NonEmptyRange>; Entry::COUNT] = [None; Entry::COUNT];
+        let mut last_end = 0;
+        for entry in [Entry::Bcc, Entry::DebugPolicy, Entry::VmDtbo] {
+            let Some(header_entry) = header_entries.get(entry as usize) else { continue };
+            let entry_offset = header_entry.offset.try_into().unwrap();
+            let entry_size = header_entry.size.try_into().unwrap();
+            let Some(range) = NonEmptyRange::new(entry_offset, entry_size) else { continue };
+            let range = range.as_range();
+            if !range.is_within(&limits) {
+                return Err(Error::EntryOutOfBounds(entry, range, limits));
+            }
+
+            if last_end > range.start {
+                return Err(Error::EntryOutOfOrder);
+            }
+            last_end = range.end;
+
+            ranges[entry as usize] = NonEmptyRange::new(
+                entry_offset - limits.start, // is_within() validates safety of this.
+                entry_size,
+            );
+        }
+        // Ensures that BCC exists.
+        ranges[Entry::Bcc as usize].ok_or(Error::MissingEntry(Entry::Bcc))?;
 
         Ok(Self { body, ranges })
     }
 
     /// Get slice containing the platform BCC.
-    pub fn get_entries(&mut self) -> Result<(&mut [u8], Option<&mut [u8]>)> {
+    pub fn get_entries(&mut self) -> (&mut [u8], Option<&mut [u8]>) {
         // This assumes that the blobs are in-order w.r.t. the entries.
-        let bcc_range = self.get_entry_range(Entry::Bcc).ok_or(Error::MissingEntry(Entry::Bcc))?;
+        let bcc_range = self.get_entry_range(Entry::Bcc);
         let dp_range = self.get_entry_range(Entry::DebugPolicy);
         let vm_dtbo_range = self.get_entry_range(Entry::VmDtbo);
         // TODO(b/291191157): Provision device assignment with this.
         if let Some(vm_dtbo_range) = vm_dtbo_range {
             info!("Found VM DTBO at {:?}", vm_dtbo_range);
         }
-        let bcc_start = bcc_range.start;
-        let bcc_end = bcc_range.len();
-        let (_, rest) = self.body.split_at_mut(bcc_start);
-        let (bcc, rest) = rest.split_at_mut(bcc_end);
 
-        let dp = if let Some(dp_range) = dp_range {
-            let dp_start = dp_range.start.checked_sub(bcc_range.end).unwrap();
-            let dp_end = dp_range.len();
-            let (_, rest) = rest.split_at_mut(dp_start);
-            let (dp, _) = rest.split_at_mut(dp_end);
-            Some(dp)
-        } else {
-            None
-        };
-
-        Ok((bcc, dp))
-    }
-
-    pub fn get_entry_range(&self, entry: Entry) -> Option<Range<usize>> {
-        self.ranges[entry as usize].clone()
-    }
-
-    fn validated_body_range(
-        entry: Entry,
-        header_entries: &[HeaderEntry],
-        limits: &Range<usize>,
-    ) -> Result<Option<Range<usize>>> {
-        if let Some(header_entry) = header_entries.get(entry as usize) {
-            if let Some(r) = header_entry.as_range() {
-                return if r.start <= r.end && r.is_within(limits) {
-                    let start = r.start - limits.start;
-                    let end = r.end - limits.start;
-
-                    Ok(Some(start..end))
-                } else {
-                    Err(Error::EntryOutOfBounds(entry, r, limits.clone()))
-                };
-            }
+        // SAFETY: When instantiate, ranges are validated to be in the body range without
+        // overlapping.
+        unsafe {
+            let ptr = self.body.as_mut_ptr() as usize;
+            (
+                Self::from_raw_range_mut(ptr, bcc_range.unwrap()),
+                dp_range.map(|dp_range| Self::from_raw_range_mut(ptr, dp_range)),
+            )
         }
+    }
 
-        Ok(None)
+    fn get_entry_range(&self, entry: Entry) -> Option<NonEmptyRange> {
+        self.ranges[entry as usize]
+    }
+
+    unsafe fn from_raw_range_mut(ptr: usize, range: NonEmptyRange) -> &'a mut [u8] {
+        // SAFETY: The caller must ensure that the range is valid from ptr.
+        unsafe { slice::from_raw_parts_mut((ptr + range.start) as *mut u8, range.end()) }
     }
 }
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 3efa61e..9c929a9 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -207,10 +207,7 @@
         RebootReason::InvalidConfig
     })?;
 
-    let (bcc_slice, debug_policy) = appended.get_entries().map_err(|e| {
-        error!("Failed to obtained the config entries: {e}");
-        RebootReason::InvalidConfig
-    })?;
+    let (bcc_slice, debug_policy) = appended.get_entries();
 
     // Up to this point, we were using the built-in static (from .rodata) page tables.
     MEMORY.lock().replace(MemoryTracker::new(
@@ -430,10 +427,10 @@
         }
     }
 
-    fn get_entries(&mut self) -> config::Result<(&mut [u8], Option<&mut [u8]>)> {
+    fn get_entries(&mut self) -> (&mut [u8], Option<&mut [u8]>) {
         match self {
             Self::Config(ref mut cfg) => cfg.get_entries(),
-            Self::LegacyBcc(ref mut bcc) => Ok((bcc, None)),
+            Self::LegacyBcc(ref mut bcc) => (bcc, None),
         }
     }
 }
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index 684aa64..5283ffe 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -474,7 +474,7 @@
                 .into_iter()
                 .map(|x| VfioDevice {
                     sysfs_path: PathBuf::from(&x.sysfsPath),
-                    dtbo_node: x.dtboNode,
+                    dtbo_label: x.dtboLabel,
                 })
                 .collect::<Vec<_>>()
         } else {
diff --git a/virtualizationmanager/src/crosvm.rs b/virtualizationmanager/src/crosvm.rs
index b053d99..bb6066f 100644
--- a/virtualizationmanager/src/crosvm.rs
+++ b/virtualizationmanager/src/crosvm.rs
@@ -128,7 +128,7 @@
 #[derive(Clone, Debug)]
 pub struct VfioDevice {
     pub sysfs_path: PathBuf,
-    pub dtbo_node: String,
+    pub dtbo_label: String,
 }
 
 /// The lifecycle state which the payload in the VM has reported itself to be in.
@@ -716,7 +716,7 @@
     }
 
     if let Some(p) = path.to_str() {
-        Ok(format!("--vfio={p},iommu=viommu,dt-symbol={0}", device.dtbo_node))
+        Ok(format!("--vfio={p},iommu=viommu,dt-symbol={0}", device.dtbo_label))
     } else {
         bail!("invalid path {path:?}");
     }
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
index f3a7617..172dc59 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
@@ -25,7 +25,7 @@
 interface IVirtualizationServiceInternal {
     parcelable BoundDevice {
         String sysfsPath;
-        String dtboNode;
+        String dtboLabel;
     }
     /**
      * Removes the memlock rlimit of the calling process.
diff --git a/virtualizationservice/assignable_devices.xsd b/virtualizationservice/assignable_devices.xsd
index 8f43019..2fbc1c9 100644
--- a/virtualizationservice/assignable_devices.xsd
+++ b/virtualizationservice/assignable_devices.xsd
@@ -25,7 +25,7 @@
     </xs:element>
     <xs:complexType name="device">
         <xs:attribute name="kind" type="xs:string"/>
-        <xs:attribute name="dtbo_node" type="xs:string"/>
+        <xs:attribute name="dtbo_label" type="xs:string"/>
         <xs:attribute name="sysfs_path" type="xs:string"/>
     </xs:complexType>
 </xs:schema>
diff --git a/virtualizationservice/schema/current.txt b/virtualizationservice/schema/current.txt
index ef99294..6e3fbb6 100644
--- a/virtualizationservice/schema/current.txt
+++ b/virtualizationservice/schema/current.txt
@@ -3,10 +3,10 @@
 
   public class Device {
     ctor public Device();
-    method public String getDtbo_node();
+    method public String getDtbo_label();
     method public String getKind();
     method public String getSysfs_path();
-    method public void setDtbo_node(String);
+    method public void setDtbo_label(String);
     method public void setKind(String);
     method public void setSysfs_path(String);
   }
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index ed5c513..a19ecd2 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -209,7 +209,7 @@
             .into_iter()
             .filter_map(|x| {
                 if devices.contains(&x.sysfs_path) {
-                    Some(BoundDevice { sysfsPath: x.sysfs_path, dtboNode: x.dtbo_node })
+                    Some(BoundDevice { sysfsPath: x.sysfs_path, dtboLabel: x.dtbo_label })
                 } else {
                     None
                 }
@@ -222,7 +222,7 @@
 #[derive(Debug, Deserialize)]
 struct Device {
     kind: String,
-    dtbo_node: String,
+    dtbo_label: String,
     sysfs_path: String,
 }
 
diff --git a/virtualizationservice/vfio_handler/src/aidl.rs b/virtualizationservice/vfio_handler/src/aidl.rs
index 618c165..2968ff9 100644
--- a/virtualizationservice/vfio_handler/src/aidl.rs
+++ b/virtualizationservice/vfio_handler/src/aidl.rs
@@ -239,7 +239,7 @@
     if dt_table_header.magic.get() != DT_TABLE_MAGIC
         || dt_table_header.header_size.get() as usize != size_of::<DtTableHeader>()
     {
-        return Err(anyhow!("DtTableHeader is invalid")).or_service_specific_exception(-1)?;
+        return Err(anyhow!("DtTableHeader is invalid")).or_service_specific_exception(-1);
     }
     Ok(dt_table_header)
 }
@@ -250,7 +250,7 @@
     index: u32,
 ) -> binder::Result<DtTableEntry> {
     if index >= header.dt_entry_count.get() {
-        return Err(anyhow!("Invalid dtbo index {index}")).or_service_specific_exception(-1)?;
+        return Err(anyhow!("Invalid dtbo index {index}")).or_service_specific_exception(-1);
     }
     let Some(prev_dt_entry_total_size) = header.dt_entry_size.get().checked_mul(index) else {
         return Err(anyhow!("Unexpected arithmetic result"))