Merge "More javadoc for capabilities" into main
diff --git a/Android.bp b/Android.bp
index 696a963..03bea5a 100644
--- a/Android.bp
+++ b/Android.bp
@@ -62,6 +62,27 @@
},
}
+soong_config_module_type {
+ name: "avf_flag_aware_cc_defaults",
+ module_type: "cc_defaults",
+ config_namespace: "ANDROID",
+ bool_variables: [
+ "release_avf_enable_virt_cpufreq",
+ ],
+ properties: [
+ "cflags",
+ ],
+}
+
+avf_flag_aware_cc_defaults {
+ name: "avf_build_flags_cc",
+ soong_config_variables: {
+ release_avf_enable_virt_cpufreq: {
+ cflags: ["-DAVF_ENABLE_VIRT_CPUFREQ=1"],
+ },
+ },
+}
+
genrule_defaults {
name: "dts_to_dtb",
tools: ["dtc"],
diff --git a/apex/canned_fs_config b/apex/canned_fs_config
index a990901..5afd9d6 100644
--- a/apex/canned_fs_config
+++ b/apex/canned_fs_config
@@ -1,3 +1 @@
/bin/virtualizationservice 0 2000 0755 capabilities=0x1000001 # CAP_CHOWN, CAP_SYS_RESOURCE
-/bin/crosvm 0 3013 0755 capabilities=0x800000 # CAP_SYS_NICE
-/bin/virtmgr 0 3013 0755 capabilities=0x800000 # CAP_SYS_NICE
diff --git a/apex/empty-payload-apk/Android.bp b/apex/empty-payload-apk/Android.bp
index e78daec..72ec392 100644
--- a/apex/empty-payload-apk/Android.bp
+++ b/apex/empty-payload-apk/Android.bp
@@ -18,6 +18,7 @@
cc_library {
name: "MicrodroidEmptyPayloadJniLib",
+ defaults: ["avf_build_flags_cc"],
srcs: ["empty_binary.cpp"],
shared_libs: ["libvm_payload#current"],
installable: true,
diff --git a/authfs/fd_server/src/aidl.rs b/authfs/fd_server/src/aidl.rs
index ada3ffb..8edd899 100644
--- a/authfs/fd_server/src/aidl.rs
+++ b/authfs/fd_server/src/aidl.rs
@@ -375,6 +375,10 @@
}
}
+// FFI types like `c_long` vary on 32/64-bit, and the check is only needed on
+// 64-bit conversions. Fixing this lint makes the code less readable.
+#[allow(unknown_lints)]
+#[allow(clippy::unnecessary_fallible_conversions)]
fn try_into_fs_stat(st: Statvfs) -> Result<FsStat, std::num::TryFromIntError> {
Ok(FsStat {
blockSize: st.block_size().try_into()?,
diff --git a/authfs/tests/benchmarks/Android.bp b/authfs/tests/benchmarks/Android.bp
index 5ef01cc..5820cb4 100644
--- a/authfs/tests/benchmarks/Android.bp
+++ b/authfs/tests/benchmarks/Android.bp
@@ -29,6 +29,7 @@
cc_binary {
name: "measure_io",
+ defaults: ["avf_build_flags_cc"],
srcs: [
"src/measure_io.cpp",
],
diff --git a/compos/apex/composd.rc b/compos/apex/composd.rc
index aa4b575..55f3737 100644
--- a/compos/apex/composd.rc
+++ b/compos/apex/composd.rc
@@ -19,10 +19,7 @@
interface aidl android.system.composd
disabled
oneshot
- # Explicitly specify empty capabilities, otherwise composd will inherit all
- # the capabilities from init.
- # Note: whether a process can use capabilities is controlled by SELinux, so
- # inheriting all the capabilities from init is not a security issue.
- # However, for defense-in-depth and just for the sake of bookkeeping it's
- # better to explicitly state that composd doesn't need any capabilities.
+ # We need SYS_NICE in order to allow the crosvm child process to use it.
+ # (b/322197421). composd itself never uses it (and isn't allowed to by
+ # SELinux).
capabilities SYS_NICE
diff --git a/compos/compos_key_helper/Android.bp b/compos/compos_key_helper/Android.bp
index 6b4b61e..7d27525 100644
--- a/compos/compos_key_helper/Android.bp
+++ b/compos/compos_key_helper/Android.bp
@@ -5,6 +5,7 @@
cc_defaults {
name: "compos_key_defaults",
+ defaults: ["avf_build_flags_cc"],
apex_available: ["com.android.compos"],
shared_libs: [
diff --git a/compos/verify/native/Android.bp b/compos/verify/native/Android.bp
index 70cb2ab..ac2fcfb 100644
--- a/compos/verify/native/Android.bp
+++ b/compos/verify/native/Android.bp
@@ -25,6 +25,7 @@
cc_library_static {
name: "libcompos_verify_native_cpp",
+ defaults: ["avf_build_flags_cc"],
srcs: ["verify_native.cpp"],
static_libs: ["libcompos_key"],
shared_libs: [
diff --git a/demo_native/Android.bp b/demo_native/Android.bp
index 7b6967e..facb2bb 100644
--- a/demo_native/Android.bp
+++ b/demo_native/Android.bp
@@ -5,6 +5,7 @@
cc_binary {
name: "vm_demo_native",
+ defaults: ["avf_build_flags_cc"],
srcs: ["main.cpp"],
static_libs: [
"libbase",
diff --git a/javalib/api/test-current.txt b/javalib/api/test-current.txt
index 958005f..5aff93f 100644
--- a/javalib/api/test-current.txt
+++ b/javalib/api/test-current.txt
@@ -7,12 +7,14 @@
}
public final class VirtualMachineConfig {
+ method @FlaggedApi("RELEASE_AVF_ENABLE_MULTI_TENANT_MICRODROID_VM") @NonNull public java.util.List<java.lang.String> getExtraApks();
method @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES") @Nullable public String getOs();
method @Nullable public String getPayloadConfigPath();
method public boolean isVmConsoleInputSupported();
}
public static final class VirtualMachineConfig.Builder {
+ method @FlaggedApi("RELEASE_AVF_ENABLE_MULTI_TENANT_MICRODROID_VM") @NonNull public android.system.virtualmachine.VirtualMachineConfig.Builder addExtraApk(@NonNull String);
method @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES") @NonNull public android.system.virtualmachine.VirtualMachineConfig.Builder setOs(@NonNull String);
method @NonNull @RequiresPermission(android.system.virtualmachine.VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION) public android.system.virtualmachine.VirtualMachineConfig.Builder setPayloadConfigPath(@NonNull String);
method @FlaggedApi("RELEASE_AVF_ENABLE_VENDOR_MODULES") @NonNull @RequiresPermission(android.system.virtualmachine.VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION) public android.system.virtualmachine.VirtualMachineConfig.Builder setVendorDiskImage(@NonNull java.io.File);
diff --git a/javalib/jni/Android.bp b/javalib/jni/Android.bp
index 6e2a129..24dece2 100644
--- a/javalib/jni/Android.bp
+++ b/javalib/jni/Android.bp
@@ -5,6 +5,7 @@
cc_library_shared {
name: "libvirtualizationservice_jni",
+ defaults: ["avf_build_flags_cc"],
srcs: [
"android_system_virtualmachine_VirtualizationService.cpp",
],
@@ -20,6 +21,7 @@
cc_library_shared {
name: "libvirtualmachine_jni",
+ defaults: ["avf_build_flags_cc"],
srcs: [
"android_system_virtualmachine_VirtualMachine.cpp",
],
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachine.java b/javalib/src/android/system/virtualmachine/VirtualMachine.java
index 16f9631..5025e88 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachine.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachine.java
@@ -54,6 +54,8 @@
import android.annotation.WorkerThread;
import android.content.ComponentCallbacks2;
import android.content.Context;
+import android.content.pm.ApplicationInfo;
+import android.content.pm.PackageManager;
import android.content.res.Configuration;
import android.os.Binder;
import android.os.IBinder;
@@ -76,7 +78,6 @@
import java.io.File;
import java.io.FileInputStream;
-import java.io.FileOutputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
@@ -807,10 +808,30 @@
createVmInputPipes();
}
+ VirtualMachineConfig vmConfig = getConfig();
VirtualMachineAppConfig appConfig =
- getConfig().toVsConfig(mContext.getPackageManager());
+ vmConfig.toVsConfig(mContext.getPackageManager());
appConfig.name = mName;
+ if (!vmConfig.getExtraApks().isEmpty()) {
+ // Extra APKs were specified directly, rather than via config file.
+ // We've already populated the file names for the extra APKs and IDSigs
+ // (via setupExtraApks). But we also need to open the APK files and add
+ // fds for them to the payload config.
+ // This isn't needed when the extra APKs are specified in a config file - then
+ // Virtualization Manager opens them itself.
+ List<ParcelFileDescriptor> extraApkFiles = new ArrayList<>(mExtraApks.size());
+ for (ExtraApkSpec extraApk : mExtraApks) {
+ try {
+ extraApkFiles.add(
+ ParcelFileDescriptor.open(extraApk.apk, MODE_READ_ONLY));
+ } catch (FileNotFoundException e) {
+ throw new VirtualMachineException("Failed to open extra APK", e);
+ }
+ }
+ appConfig.payload.getPayloadConfig().extraApks = extraApkFiles;
+ }
+
try {
createIdSigs(service, appConfig);
} catch (FileNotFoundException e) {
@@ -1239,6 +1260,46 @@
return result.toString();
}
+ /**
+ * Reads the payload config inside the application, parses extra APK information, and then
+ * creates corresponding idsig file paths.
+ */
+ private static List<ExtraApkSpec> setupExtraApks(
+ @NonNull Context context, @NonNull VirtualMachineConfig config, @NonNull File vmDir)
+ throws VirtualMachineException {
+ String configPath = config.getPayloadConfigPath();
+ List<String> extraApks = config.getExtraApks();
+ if (configPath != null) {
+ return setupExtraApksFromConfigFile(context, vmDir, configPath);
+ } else if (!extraApks.isEmpty()) {
+ return setupExtraApksFromList(context, vmDir, extraApks);
+ } else {
+ return Collections.emptyList();
+ }
+ }
+
+ private static List<ExtraApkSpec> setupExtraApksFromConfigFile(
+ Context context, File vmDir, String configPath) throws VirtualMachineException {
+ try (ZipFile zipFile = new ZipFile(context.getPackageCodePath())) {
+ InputStream inputStream = zipFile.getInputStream(zipFile.getEntry(configPath));
+ List<String> apkList =
+ parseExtraApkListFromPayloadConfig(
+ new JsonReader(new InputStreamReader(inputStream)));
+
+ List<ExtraApkSpec> extraApks = new ArrayList<>(apkList.size());
+ for (int i = 0; i < apkList.size(); ++i) {
+ extraApks.add(
+ new ExtraApkSpec(
+ new File(apkList.get(i)),
+ new File(vmDir, EXTRA_IDSIG_FILE_PREFIX + i)));
+ }
+
+ return extraApks;
+ } catch (IOException e) {
+ throw new VirtualMachineException("Couldn't parse extra apks from the vm config", e);
+ }
+ }
+
private static List<String> parseExtraApkListFromPayloadConfig(JsonReader reader)
throws VirtualMachineException {
/*
@@ -1275,36 +1336,28 @@
}
}
- /**
- * Reads the payload config inside the application, parses extra APK information, and then
- * creates corresponding idsig file paths.
- */
- private static List<ExtraApkSpec> setupExtraApks(
- @NonNull Context context, @NonNull VirtualMachineConfig config, @NonNull File vmDir)
- throws VirtualMachineException {
- String configPath = config.getPayloadConfigPath();
- if (configPath == null) {
- return Collections.emptyList();
- }
- try (ZipFile zipFile = new ZipFile(context.getPackageCodePath())) {
- InputStream inputStream =
- zipFile.getInputStream(zipFile.getEntry(configPath));
- List<String> apkList =
- parseExtraApkListFromPayloadConfig(
- new JsonReader(new InputStreamReader(inputStream)));
-
- List<ExtraApkSpec> extraApks = new ArrayList<>();
- for (int i = 0; i < apkList.size(); ++i) {
- extraApks.add(
- new ExtraApkSpec(
- new File(apkList.get(i)),
- new File(vmDir, EXTRA_IDSIG_FILE_PREFIX + i)));
+ private static List<ExtraApkSpec> setupExtraApksFromList(
+ Context context, File vmDir, List<String> extraApkInfo) throws VirtualMachineException {
+ int count = extraApkInfo.size();
+ List<ExtraApkSpec> extraApks = new ArrayList<>(count);
+ for (int i = 0; i < count; i++) {
+ String packageName = extraApkInfo.get(i);
+ ApplicationInfo appInfo;
+ try {
+ appInfo =
+ context.getPackageManager()
+ .getApplicationInfo(
+ packageName, PackageManager.ApplicationInfoFlags.of(0));
+ } catch (PackageManager.NameNotFoundException e) {
+ throw new VirtualMachineException("Extra APK package not found", e);
}
- return Collections.unmodifiableList(extraApks);
- } catch (IOException e) {
- throw new VirtualMachineException("Couldn't parse extra apks from the vm config", e);
+ extraApks.add(
+ new ExtraApkSpec(
+ new File(appInfo.sourceDir),
+ new File(vmDir, EXTRA_IDSIG_FILE_PREFIX + i)));
}
+ return extraApks;
}
private void importInstanceFrom(@NonNull ParcelFileDescriptor instanceFd)
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
index e8ef195..9688789 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
@@ -49,7 +49,10 @@
import java.io.OutputStream;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
+import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
+import java.util.List;
import java.util.Objects;
import java.util.zip.ZipFile;
@@ -67,7 +70,7 @@
private static String[] EMPTY_STRING_ARRAY = {};
// These define the schema of the config file persisted on disk.
- private static final int VERSION = 7;
+ private static final int VERSION = 8;
private static final String KEY_VERSION = "version";
private static final String KEY_PACKAGENAME = "packageName";
private static final String KEY_APKPATH = "apkPath";
@@ -82,6 +85,7 @@
private static final String KEY_VM_CONSOLE_INPUT_SUPPORTED = "vmConsoleInputSupported";
private static final String KEY_VENDOR_DISK_IMAGE_PATH = "vendorDiskImagePath";
private static final String KEY_OS = "os";
+ private static final String KEY_EXTRA_APKS = "extraApks";
/** @hide */
@Retention(RetentionPolicy.SOURCE)
@@ -140,6 +144,8 @@
/** Absolute path to the APK file containing the VM payload. */
@Nullable private final String mApkPath;
+ private final List<String> mExtraApks;
+
@DebugLevel private final int mDebugLevel;
/**
@@ -181,6 +187,7 @@
private VirtualMachineConfig(
@Nullable String packageName,
@Nullable String apkPath,
+ List<String> extraApks,
@Nullable String payloadConfigPath,
@Nullable String payloadBinaryName,
@DebugLevel int debugLevel,
@@ -195,6 +202,11 @@
// This is only called from Builder.build(); the builder handles parameter validation.
mPackageName = packageName;
mApkPath = apkPath;
+ mExtraApks =
+ extraApks.isEmpty()
+ ? Collections.emptyList()
+ : Collections.unmodifiableList(
+ Arrays.asList(extraApks.toArray(new String[0])));
mPayloadConfigPath = payloadConfigPath;
mPayloadBinaryName = payloadBinaryName;
mDebugLevel = debugLevel;
@@ -292,6 +304,13 @@
builder.setOs(os);
}
+ String[] extraApks = b.getStringArray(KEY_EXTRA_APKS);
+ if (extraApks != null) {
+ for (String extraApk : extraApks) {
+ builder.addExtraApk(extraApk);
+ }
+ }
+
return builder.build();
}
@@ -331,6 +350,10 @@
b.putString(KEY_VENDOR_DISK_IMAGE_PATH, mVendorDiskImage.getAbsolutePath());
}
b.putString(KEY_OS, mOs);
+ if (!mExtraApks.isEmpty()) {
+ String[] extraApks = mExtraApks.toArray(new String[0]);
+ b.putStringArray(KEY_EXTRA_APKS, extraApks);
+ }
b.writeToStream(output);
}
@@ -347,6 +370,19 @@
}
/**
+ * Returns the package names of any extra APKs that have been requested for the VM. They are
+ * returned in the order in which they were added via {@link Builder#addExtraApk}.
+ *
+ * @hide
+ */
+ @TestApi
+ @FlaggedApi("RELEASE_AVF_ENABLE_MULTI_TENANT_MICRODROID_VM")
+ @NonNull
+ public List<String> getExtraApks() {
+ return mExtraApks;
+ }
+
+ /**
* Returns the path within the APK to the payload config file that defines software aspects of
* the VM.
*
@@ -495,7 +531,8 @@
&& Objects.equals(this.mPayloadConfigPath, other.mPayloadConfigPath)
&& Objects.equals(this.mPayloadBinaryName, other.mPayloadBinaryName)
&& Objects.equals(this.mPackageName, other.mPackageName)
- && Objects.equals(this.mOs, other.mOs);
+ && Objects.equals(this.mOs, other.mOs)
+ && Objects.equals(this.mExtraApks, other.mExtraApks);
}
/**
@@ -623,6 +660,7 @@
@Nullable private final String mPackageName;
@Nullable private String mApkPath;
+ private final List<String> mExtraApks = new ArrayList<>();
@Nullable private String mPayloadConfigPath;
@Nullable private String mPayloadBinaryName;
@DebugLevel private int mDebugLevel = DEBUG_LEVEL_NONE;
@@ -683,6 +721,10 @@
throw new IllegalStateException(
"setPayloadConfigPath and setOs may not both be called");
}
+ if (!mExtraApks.isEmpty()) {
+ throw new IllegalStateException(
+ "setPayloadConfigPath and addExtraApk may not both be called");
+ }
} else {
if (mPayloadConfigPath != null) {
throw new IllegalStateException(
@@ -710,6 +752,7 @@
return new VirtualMachineConfig(
packageName,
apkPath,
+ mExtraApks,
mPayloadConfigPath,
mPayloadBinaryName,
mDebugLevel,
@@ -742,6 +785,21 @@
}
/**
+ * Specify the package name of an extra APK to be included in the VM. Each extra APK is
+ * mounted, in unzipped form, inside the VM, allowing access to the code and/or data within
+ * it. The VM entry point must be in the main APK.
+ *
+ * @hide
+ */
+ @TestApi
+ @FlaggedApi("RELEASE_AVF_ENABLE_MULTI_TENANT_MICRODROID_VM")
+ @NonNull
+ public Builder addExtraApk(@NonNull String packageName) {
+ mExtraApks.add(requireNonNull(packageName, "extra APK package name must not be null"));
+ return this;
+ }
+
+ /**
* Sets the path within the APK to the payload config file that defines software aspects of
* the VM. The file is a JSON file; see
* packages/modules/Virtualization/microdroid/payload/config/src/lib.rs for the format.
diff --git a/launcher/Android.bp b/launcher/Android.bp
index c6873ce..9835fc9 100644
--- a/launcher/Android.bp
+++ b/launcher/Android.bp
@@ -5,6 +5,7 @@
cc_binary {
name: "microdroid_launcher",
+ defaults: ["avf_build_flags_cc"],
srcs: ["main.cpp"],
shared_libs: [
"libbase",
diff --git a/libs/apkmanifest/Android.bp b/libs/apkmanifest/Android.bp
index e6fcbef..54c4f6c 100644
--- a/libs/apkmanifest/Android.bp
+++ b/libs/apkmanifest/Android.bp
@@ -4,6 +4,7 @@
cc_library_shared {
name: "libapkmanifest_native",
+ defaults: ["avf_build_flags_cc"],
srcs: ["native/*.cpp"],
shared_libs: [
"libandroidfw",
diff --git a/libs/libfdt/src/iterators.rs b/libs/libfdt/src/iterators.rs
index e818c68..cb7afda 100644
--- a/libs/libfdt/src/iterators.rs
+++ b/libs/libfdt/src/iterators.rs
@@ -23,6 +23,8 @@
use core::marker::PhantomData;
use core::{mem::size_of, ops::Range, slice::ChunksExact};
+use zerocopy::transmute;
+
/// Iterator over nodes sharing a same compatible string.
pub struct CompatibleIterator<'a> {
node: FdtNode<'a>,
@@ -132,12 +134,6 @@
}
}
-// Converts two cells into bytes of the same size
-fn two_cells_to_bytes(cells: [u32; 2]) -> [u8; 2 * size_of::<u32>()] {
- // SAFETY: the size of the two arrays are the same
- unsafe { core::mem::transmute::<[u32; 2], [u8; 2 * size_of::<u32>()]>(cells) }
-}
-
impl Reg<u64> {
const NUM_CELLS: usize = 2;
/// Converts addr and (optional) size to the format that is consumable by libfdt.
@@ -145,14 +141,10 @@
&self,
) -> ([u8; Self::NUM_CELLS * size_of::<u32>()], Option<[u8; Self::NUM_CELLS * size_of::<u32>()]>)
{
- let addr =
- two_cells_to_bytes([((self.addr >> 32) as u32).to_be(), (self.addr as u32).to_be()]);
- let size = if self.size.is_some() {
- let size = self.size.unwrap();
- Some(two_cells_to_bytes([((size >> 32) as u32).to_be(), (size as u32).to_be()]))
- } else {
- None
- };
+ let addr = transmute!([((self.addr >> 32) as u32).to_be(), (self.addr as u32).to_be()]);
+ let size =
+ self.size.map(|sz| transmute!([((sz >> 32) as u32).to_be(), (sz as u32).to_be()]));
+
(addr, size)
}
}
@@ -288,12 +280,8 @@
((self.size >> 32) as u32).to_be(),
(self.size as u32).to_be(),
];
- // SAFETY: the size of the two arrays are the same
- unsafe {
- core::mem::transmute::<[u32; Self::SIZE_CELLS], [u8; Self::SIZE_CELLS * size_of::<u32>()]>(
- buf,
- )
- }
+
+ transmute!(buf)
}
}
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 8a4e251..0249d0d 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -18,141 +18,22 @@
#![no_std]
mod iterators;
+mod libfdt;
+mod result;
pub use iterators::{
AddressRange, CellIterator, CompatibleIterator, DescendantsIterator, MemRegIterator,
PropertyIterator, RangesIterator, Reg, RegIterator, SubnodeIterator,
};
+pub use result::{FdtError, Result};
-use core::cmp::max;
use core::ffi::{c_int, c_void, CStr};
-use core::fmt;
-use core::mem;
use core::ops::Range;
-use core::ptr;
-use core::result;
use cstr::cstr;
+use result::{fdt_err, fdt_err_expect_zero, fdt_err_or_option};
use zerocopy::AsBytes as _;
-/// Error type corresponding to libfdt error codes.
-#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-pub enum FdtError {
- /// FDT_ERR_NOTFOUND
- NotFound,
- /// FDT_ERR_EXISTS
- Exists,
- /// FDT_ERR_NOSPACE
- NoSpace,
- /// FDT_ERR_BADOFFSET
- BadOffset,
- /// FDT_ERR_BADPATH
- BadPath,
- /// FDT_ERR_BADPHANDLE
- BadPhandle,
- /// FDT_ERR_BADSTATE
- BadState,
- /// FDT_ERR_TRUNCATED
- Truncated,
- /// FDT_ERR_BADMAGIC
- BadMagic,
- /// FDT_ERR_BADVERSION
- BadVersion,
- /// FDT_ERR_BADSTRUCTURE
- BadStructure,
- /// FDT_ERR_BADLAYOUT
- BadLayout,
- /// FDT_ERR_INTERNAL
- Internal,
- /// FDT_ERR_BADNCELLS
- BadNCells,
- /// FDT_ERR_BADVALUE
- BadValue,
- /// FDT_ERR_BADOVERLAY
- BadOverlay,
- /// FDT_ERR_NOPHANDLES
- NoPhandles,
- /// FDT_ERR_BADFLAGS
- BadFlags,
- /// FDT_ERR_ALIGNMENT
- Alignment,
- /// Unexpected error code
- Unknown(i32),
-}
-
-impl fmt::Display for FdtError {
- /// Prints error messages from libfdt.h documentation.
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Self::NotFound => write!(f, "The requested node or property does not exist"),
- Self::Exists => write!(f, "Attempted to create an existing node or property"),
- Self::NoSpace => write!(f, "Insufficient buffer space to contain the expanded tree"),
- Self::BadOffset => write!(f, "Structure block offset is out-of-bounds or invalid"),
- Self::BadPath => write!(f, "Badly formatted path"),
- Self::BadPhandle => write!(f, "Invalid phandle length or value"),
- Self::BadState => write!(f, "Received incomplete device tree"),
- Self::Truncated => write!(f, "Device tree or sub-block is improperly terminated"),
- Self::BadMagic => write!(f, "Device tree header missing its magic number"),
- Self::BadVersion => write!(f, "Device tree has a version which can't be handled"),
- Self::BadStructure => write!(f, "Device tree has a corrupt structure block"),
- Self::BadLayout => write!(f, "Device tree sub-blocks in unsupported order"),
- Self::Internal => write!(f, "libfdt has failed an internal assertion"),
- Self::BadNCells => write!(f, "Bad format or value of #address-cells or #size-cells"),
- Self::BadValue => write!(f, "Unexpected property value"),
- Self::BadOverlay => write!(f, "Overlay cannot be applied"),
- Self::NoPhandles => write!(f, "Device tree doesn't have any phandle available anymore"),
- Self::BadFlags => write!(f, "Invalid flag or invalid combination of flags"),
- Self::Alignment => write!(f, "Device tree base address is not 8-byte aligned"),
- Self::Unknown(e) => write!(f, "Unknown libfdt error '{e}'"),
- }
- }
-}
-
-/// Result type with FdtError enum.
-pub type Result<T> = result::Result<T, FdtError>;
-
-fn fdt_err(val: c_int) -> Result<c_int> {
- if val >= 0 {
- Ok(val)
- } else {
- Err(match -val as _ {
- libfdt_bindgen::FDT_ERR_NOTFOUND => FdtError::NotFound,
- libfdt_bindgen::FDT_ERR_EXISTS => FdtError::Exists,
- libfdt_bindgen::FDT_ERR_NOSPACE => FdtError::NoSpace,
- libfdt_bindgen::FDT_ERR_BADOFFSET => FdtError::BadOffset,
- libfdt_bindgen::FDT_ERR_BADPATH => FdtError::BadPath,
- libfdt_bindgen::FDT_ERR_BADPHANDLE => FdtError::BadPhandle,
- libfdt_bindgen::FDT_ERR_BADSTATE => FdtError::BadState,
- libfdt_bindgen::FDT_ERR_TRUNCATED => FdtError::Truncated,
- libfdt_bindgen::FDT_ERR_BADMAGIC => FdtError::BadMagic,
- libfdt_bindgen::FDT_ERR_BADVERSION => FdtError::BadVersion,
- libfdt_bindgen::FDT_ERR_BADSTRUCTURE => FdtError::BadStructure,
- libfdt_bindgen::FDT_ERR_BADLAYOUT => FdtError::BadLayout,
- libfdt_bindgen::FDT_ERR_INTERNAL => FdtError::Internal,
- libfdt_bindgen::FDT_ERR_BADNCELLS => FdtError::BadNCells,
- libfdt_bindgen::FDT_ERR_BADVALUE => FdtError::BadValue,
- libfdt_bindgen::FDT_ERR_BADOVERLAY => FdtError::BadOverlay,
- libfdt_bindgen::FDT_ERR_NOPHANDLES => FdtError::NoPhandles,
- libfdt_bindgen::FDT_ERR_BADFLAGS => FdtError::BadFlags,
- libfdt_bindgen::FDT_ERR_ALIGNMENT => FdtError::Alignment,
- _ => FdtError::Unknown(val),
- })
- }
-}
-
-fn fdt_err_expect_zero(val: c_int) -> Result<()> {
- match fdt_err(val)? {
- 0 => Ok(()),
- _ => Err(FdtError::Unknown(val)),
- }
-}
-
-fn fdt_err_or_option(val: c_int) -> Result<Option<c_int>> {
- match fdt_err(val) {
- Ok(val) => Ok(Some(val)),
- Err(FdtError::NotFound) => Ok(None),
- Err(e) => Err(e),
- }
-}
+use crate::libfdt::{Libfdt, LibfdtMut};
/// Value of a #address-cells property.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
@@ -162,14 +43,14 @@
Triple = 3,
}
-impl TryFrom<c_int> for AddrCells {
+impl TryFrom<usize> for AddrCells {
type Error = FdtError;
- fn try_from(res: c_int) -> Result<Self> {
- match fdt_err(res)? {
- x if x == Self::Single as c_int => Ok(Self::Single),
- x if x == Self::Double as c_int => Ok(Self::Double),
- x if x == Self::Triple as c_int => Ok(Self::Triple),
+ fn try_from(value: usize) -> Result<Self> {
+ match value {
+ x if x == Self::Single as _ => Ok(Self::Single),
+ x if x == Self::Double as _ => Ok(Self::Double),
+ x if x == Self::Triple as _ => Ok(Self::Triple),
_ => Err(FdtError::BadNCells),
}
}
@@ -183,14 +64,14 @@
Double = 2,
}
-impl TryFrom<c_int> for SizeCells {
+impl TryFrom<usize> for SizeCells {
type Error = FdtError;
- fn try_from(res: c_int) -> Result<Self> {
- match fdt_err(res)? {
- x if x == Self::None as c_int => Ok(Self::None),
- x if x == Self::Single as c_int => Ok(Self::Single),
- x if x == Self::Double as c_int => Ok(Self::Double),
+ fn try_from(value: usize) -> Result<Self> {
+ match value {
+ x if x == Self::None as _ => Ok(Self::None),
+ x if x == Self::Single as _ => Ok(Self::Single),
+ x if x == Self::Double as _ => Ok(Self::Double),
_ => Err(FdtError::BadNCells),
}
}
@@ -201,6 +82,14 @@
#[derive(Debug)]
struct FdtPropertyStruct(libfdt_bindgen::fdt_property);
+impl AsRef<FdtPropertyStruct> for libfdt_bindgen::fdt_property {
+ fn as_ref(&self) -> &FdtPropertyStruct {
+ let ptr = self as *const _ as *const _;
+ // SAFETY: Types have the same layout (transparent) so the valid reference remains valid.
+ unsafe { &*ptr }
+ }
+}
+
impl FdtPropertyStruct {
fn from_offset(fdt: &Fdt, offset: c_int) -> Result<&Self> {
let mut len = 0;
@@ -212,7 +101,8 @@
return Err(FdtError::Internal); // shouldn't happen.
}
// SAFETY: prop is only returned when it points to valid libfdt_bindgen.
- Ok(unsafe { &*prop.cast::<FdtPropertyStruct>() })
+ let prop = unsafe { &*prop };
+ Ok(prop.as_ref())
}
fn name_offset(&self) -> c_int {
@@ -224,7 +114,7 @@
}
fn data_ptr(&self) -> *const c_void {
- self.0.data.as_ptr().cast::<_>()
+ self.0.data.as_ptr().cast()
}
}
@@ -257,7 +147,11 @@
// SAFETY: Accesses (read-only) are constrained to the DT totalsize.
unsafe { libfdt_bindgen::fdt_next_property_offset(self.fdt.as_ptr(), self.offset) };
- fdt_err_or_option(ret)?.map(|offset| Self::new(self.fdt, offset)).transpose()
+ if let Some(offset) = fdt_err_or_option(ret)? {
+ Ok(Some(Self::new(self.fdt, offset)?))
+ } else {
+ Ok(None)
+ }
}
}
@@ -269,31 +163,18 @@
}
impl<'a> FdtNode<'a> {
- /// Creates immutable node from a mutable node at the same offset.
- pub fn from_mut(other: &'a FdtNodeMut) -> Self {
- FdtNode { fdt: other.fdt, offset: other.offset }
- }
/// Returns parent node.
pub fn parent(&self) -> Result<Self> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe { libfdt_bindgen::fdt_parent_offset(self.fdt.as_ptr(), self.offset) };
+ let offset = self.fdt.parent_offset(self.offset)?;
- Ok(Self { fdt: self.fdt, offset: fdt_err(ret)? })
+ Ok(Self { fdt: self.fdt, offset })
}
/// Returns supernode with depth. Note that root is at depth 0.
pub fn supernode_at_depth(&self, depth: usize) -> Result<Self> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe {
- libfdt_bindgen::fdt_supernode_atdepth_offset(
- self.fdt.as_ptr(),
- self.offset,
- depth.try_into().unwrap(),
- ptr::null_mut(),
- )
- };
+ let offset = self.fdt.supernode_atdepth_offset(self.offset, depth)?;
- Ok(Self { fdt: self.fdt, offset: fdt_err(ret)? })
+ Ok(Self { fdt: self.fdt, offset })
}
/// Returns the standard (deprecated) device_type <string> property.
@@ -303,9 +184,7 @@
/// Returns the standard reg <prop-encoded-array> property.
pub fn reg(&self) -> Result<Option<RegIterator<'a>>> {
- let reg = cstr!("reg");
-
- if let Some(cells) = self.getprop_cells(reg)? {
+ if let Some(cells) = self.getprop_cells(cstr!("reg"))? {
let parent = self.parent()?;
let addr_cells = parent.address_cells()?;
@@ -319,8 +198,7 @@
/// Returns the standard ranges property.
pub fn ranges<A, P, S>(&self) -> Result<Option<RangesIterator<'a, A, P, S>>> {
- let ranges = cstr!("ranges");
- if let Some(cells) = self.getprop_cells(ranges)? {
+ if let Some(cells) = self.getprop_cells(cstr!("ranges"))? {
let parent = self.parent()?;
let addr_cells = self.address_cells()?;
let parent_addr_cells = parent.address_cells()?;
@@ -350,12 +228,11 @@
/// Returns the value of a given <string> property.
pub fn getprop_str(&self, name: &CStr) -> Result<Option<&CStr>> {
- let value = if let Some(bytes) = self.getprop(name)? {
- Some(CStr::from_bytes_with_nul(bytes).map_err(|_| FdtError::BadValue)?)
+ if let Some(bytes) = self.getprop(name)? {
+ Ok(Some(CStr::from_bytes_with_nul(bytes).map_err(|_| FdtError::BadValue)?))
} else {
- None
- };
- Ok(value)
+ Ok(None)
+ }
}
/// Returns the value of a given property as an array of cells.
@@ -369,22 +246,20 @@
/// Returns the value of a given <u32> property.
pub fn getprop_u32(&self, name: &CStr) -> Result<Option<u32>> {
- let value = if let Some(bytes) = self.getprop(name)? {
- Some(u32::from_be_bytes(bytes.try_into().map_err(|_| FdtError::BadValue)?))
+ if let Some(bytes) = self.getprop(name)? {
+ Ok(Some(u32::from_be_bytes(bytes.try_into().map_err(|_| FdtError::BadValue)?)))
} else {
- None
- };
- Ok(value)
+ Ok(None)
+ }
}
/// Returns the value of a given <u64> property.
pub fn getprop_u64(&self, name: &CStr) -> Result<Option<u64>> {
- let value = if let Some(bytes) = self.getprop(name)? {
- Some(u64::from_be_bytes(bytes.try_into().map_err(|_| FdtError::BadValue)?))
+ if let Some(bytes) = self.getprop(name)? {
+ Ok(Some(u64::from_be_bytes(bytes.try_into().map_err(|_| FdtError::BadValue)?)))
} else {
- None
- };
- Ok(value)
+ Ok(None)
+ }
}
/// Returns the value of a given property.
@@ -392,7 +267,7 @@
if let Some((prop, len)) = Self::getprop_internal(self.fdt, self.offset, name)? {
Ok(Some(self.fdt.get_from_ptr(prop, len)?))
} else {
- Ok(None) // property was not found
+ Ok(None)
}
}
@@ -436,16 +311,9 @@
/// Returns the compatible node of the given name that is next after this node.
pub fn next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe {
- libfdt_bindgen::fdt_node_offset_by_compatible(
- self.fdt.as_ptr(),
- self.offset,
- compatible.as_ptr(),
- )
- };
+ let offset = self.fdt.node_offset_by_compatible(self.offset, compatible)?;
- Ok(fdt_err_or_option(ret)?.map(|offset| Self { fdt: self.fdt, offset }))
+ Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
}
/// Returns the first range of `reg` in this node.
@@ -454,17 +322,11 @@
}
fn address_cells(&self) -> Result<AddrCells> {
- // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
- unsafe { libfdt_bindgen::fdt_address_cells(self.fdt.as_ptr(), self.offset) }
- .try_into()
- .map_err(|_| FdtError::Internal)
+ self.fdt.address_cells(self.offset)?.try_into()
}
fn size_cells(&self) -> Result<SizeCells> {
- // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
- unsafe { libfdt_bindgen::fdt_size_cells(self.fdt.as_ptr(), self.offset) }
- .try_into()
- .map_err(|_| FdtError::Internal)
+ self.fdt.size_cells(self.offset)?.try_into()
}
/// Returns an iterator of subnodes
@@ -473,17 +335,15 @@
}
fn first_subnode(&self) -> Result<Option<Self>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe { libfdt_bindgen::fdt_first_subnode(self.fdt.as_ptr(), self.offset) };
+ let offset = self.fdt.first_subnode(self.offset)?;
- Ok(fdt_err_or_option(ret)?.map(|offset| FdtNode { fdt: self.fdt, offset }))
+ Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
}
fn next_subnode(&self) -> Result<Option<Self>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe { libfdt_bindgen::fdt_next_subnode(self.fdt.as_ptr(), self.offset) };
+ let offset = self.fdt.next_subnode(self.offset)?;
- Ok(fdt_err_or_option(ret)?.map(|offset| FdtNode { fdt: self.fdt, offset }))
+ Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
}
/// Returns an iterator of descendants
@@ -492,15 +352,11 @@
}
fn next_node(&self, depth: usize) -> Result<Option<(Self, usize)>> {
- let mut next_depth: c_int = depth.try_into().unwrap();
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe {
- libfdt_bindgen::fdt_next_node(self.fdt.as_ptr(), self.offset, &mut next_depth)
- };
- let Ok(next_depth) = usize::try_from(next_depth) else {
- return Ok(None);
- };
- Ok(fdt_err_or_option(ret)?.map(|offset| (FdtNode { fdt: self.fdt, offset }, next_depth)))
+ if let Some((offset, depth)) = self.fdt.next_node(self.offset, depth)? {
+ Ok(Some((Self { fdt: self.fdt, offset }, depth)))
+ } else {
+ Ok(None)
+ }
}
/// Returns an iterator of properties
@@ -513,7 +369,11 @@
// SAFETY: Accesses (read-only) are constrained to the DT totalsize.
unsafe { libfdt_bindgen::fdt_first_property_offset(self.fdt.as_ptr(), self.offset) };
- fdt_err_or_option(ret)?.map(|offset| FdtProperty::new(self.fdt, offset)).transpose()
+ if let Some(offset) = fdt_err_or_option(ret)? {
+ Ok(Some(FdtProperty::new(self.fdt, offset)?))
+ } else {
+ Ok(None)
+ }
}
/// Returns the phandle
@@ -530,28 +390,17 @@
/// Returns the subnode of the given name. The name doesn't need to be nul-terminated.
pub fn subnode(&self, name: &CStr) -> Result<Option<Self>> {
- let offset = self.subnode_offset(name.to_bytes())?;
+ let name = name.to_bytes();
+ let offset = self.fdt.subnode_offset_namelen(self.offset, name)?;
+
Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
}
/// Returns the subnode of the given name bytes
pub fn subnode_with_name_bytes(&self, name: &[u8]) -> Result<Option<Self>> {
- let offset = self.subnode_offset(name)?;
- Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
- }
+ let offset = self.fdt.subnode_offset_namelen(self.offset, name)?;
- fn subnode_offset(&self, name: &[u8]) -> Result<Option<c_int>> {
- let namelen = name.len().try_into().unwrap();
- // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
- let ret = unsafe {
- libfdt_bindgen::fdt_subnode_offset_namelen(
- self.fdt.as_ptr(),
- self.offset,
- name.as_ptr().cast::<_>(),
- namelen,
- )
- };
- fdt_err_or_option(ret)
+ Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
}
}
@@ -753,126 +602,76 @@
/// Adds new subnodes to the given node.
pub fn add_subnodes(&mut self, names: &[&CStr]) -> Result<()> {
for name in names {
- self.add_subnode_offset(name.to_bytes())?;
+ self.fdt.add_subnode_namelen(self.offset, name.to_bytes())?;
}
Ok(())
}
/// Adds a new subnode to the given node and return it as a FdtNodeMut on success.
pub fn add_subnode(&'a mut self, name: &CStr) -> Result<Self> {
- let offset = self.add_subnode_offset(name.to_bytes())?;
+ let name = name.to_bytes();
+ let offset = self.fdt.add_subnode_namelen(self.offset, name)?;
+
Ok(Self { fdt: self.fdt, offset })
}
/// Adds a new subnode to the given node with name and namelen, and returns it as a FdtNodeMut
/// on success.
pub fn add_subnode_with_namelen(&'a mut self, name: &CStr, namelen: usize) -> Result<Self> {
- let offset = { self.add_subnode_offset(&name.to_bytes()[..namelen])? };
- Ok(Self { fdt: self.fdt, offset })
- }
+ let name = &name.to_bytes()[..namelen];
+ let offset = self.fdt.add_subnode_namelen(self.offset, name)?;
- fn add_subnode_offset(&mut self, name: &[u8]) -> Result<c_int> {
- let namelen = name.len().try_into().unwrap();
- // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
- let ret = unsafe {
- libfdt_bindgen::fdt_add_subnode_namelen(
- self.fdt.as_mut_ptr(),
- self.offset,
- name.as_ptr().cast::<_>(),
- namelen,
- )
- };
- fdt_err(ret)
+ Ok(Self { fdt: self.fdt, offset })
}
/// Returns the first subnode of this
pub fn first_subnode(&'a mut self) -> Result<Option<Self>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe { libfdt_bindgen::fdt_first_subnode(self.fdt.as_ptr(), self.offset) };
+ let offset = self.fdt.first_subnode(self.offset)?;
- Ok(fdt_err_or_option(ret)?.map(|offset| Self { fdt: self.fdt, offset }))
+ Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
}
/// Returns the next subnode that shares the same parent with this
pub fn next_subnode(self) -> Result<Option<Self>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe { libfdt_bindgen::fdt_next_subnode(self.fdt.as_ptr(), self.offset) };
+ let offset = self.fdt.next_subnode(self.offset)?;
- Ok(fdt_err_or_option(ret)?.map(|offset| Self { fdt: self.fdt, offset }))
+ Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
}
/// Deletes the current node and returns the next subnode
- pub fn delete_and_next_subnode(mut self) -> Result<Option<Self>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe { libfdt_bindgen::fdt_next_subnode(self.fdt.as_ptr(), self.offset) };
+ pub fn delete_and_next_subnode(self) -> Result<Option<Self>> {
+ let next_offset = self.fdt.next_subnode(self.offset)?;
- let next_offset = fdt_err_or_option(ret)?;
-
- if Some(self.offset) == next_offset {
- return Err(FdtError::Internal);
- }
-
- // SAFETY: nop_self() only touches bytes of the self and its properties and subnodes, and
- // doesn't alter any other blob in the tree. self.fdt and next_offset would remain valid.
- unsafe { self.nop_self()? };
-
- Ok(next_offset.map(|offset| Self { fdt: self.fdt, offset }))
- }
-
- fn next_node_offset(&self, depth: usize) -> Result<Option<(c_int, usize)>> {
- let mut next_depth: c_int = depth.try_into().or(Err(FdtError::BadValue))?;
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe {
- libfdt_bindgen::fdt_next_node(self.fdt.as_ptr(), self.offset, &mut next_depth)
- };
- let Ok(next_depth) = usize::try_from(next_depth) else {
- return Ok(None);
- };
- Ok(fdt_err_or_option(ret)?.map(|offset| (offset, next_depth)))
+ self.delete_and_next(next_offset)
}
/// Returns the next node
pub fn next_node(self, depth: usize) -> Result<Option<(Self, usize)>> {
- Ok(self
- .next_node_offset(depth)?
- .map(|(offset, next_depth)| (FdtNodeMut { fdt: self.fdt, offset }, next_depth)))
+ let next = self.fdt.next_node(self.offset, depth)?;
+
+ Ok(next.map(|(offset, depth)| (Self { fdt: self.fdt, offset }, depth)))
}
/// Deletes this and returns the next node
- pub fn delete_and_next_node(mut self, depth: usize) -> Result<Option<(Self, usize)>> {
- // Skip all would-be-removed descendants.
- let mut iter = self.next_node_offset(depth)?;
- while let Some((descendant_offset, descendant_depth)) = iter {
- if descendant_depth <= depth {
- break;
- }
- let descendant = FdtNodeMut { fdt: self.fdt, offset: descendant_offset };
- iter = descendant.next_node_offset(descendant_depth)?;
+ pub fn delete_and_next_node(self, depth: usize) -> Result<Option<(Self, usize)>> {
+ let next_node = self.fdt.next_node_skip_subnodes(self.offset, depth)?;
+ if let Some((offset, depth)) = next_node {
+ let next_node = self.delete_and_next(Some(offset))?.unwrap();
+ Ok(Some((next_node, depth)))
+ } else {
+ Ok(None)
}
- // SAFETY: This consumes self, so invalid node wouldn't be used any further
- unsafe { self.nop_self()? };
- Ok(iter.map(|(offset, next_depth)| (FdtNodeMut { fdt: self.fdt, offset }, next_depth)))
}
fn parent(&'a self) -> Result<FdtNode<'a>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe { libfdt_bindgen::fdt_parent_offset(self.fdt.as_ptr(), self.offset) };
-
- Ok(FdtNode { fdt: &*self.fdt, offset: fdt_err(ret)? })
+ self.as_node().parent()
}
/// Returns the compatible node of the given name that is next after this node.
pub fn next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe {
- libfdt_bindgen::fdt_node_offset_by_compatible(
- self.fdt.as_ptr(),
- self.offset,
- compatible.as_ptr(),
- )
- };
+ let offset = self.fdt.node_offset_by_compatible(self.offset, compatible)?;
- Ok(fdt_err_or_option(ret)?.map(|offset| Self { fdt: self.fdt, offset }))
+ Ok(offset.map(|offset| Self { fdt: self.fdt, offset }))
}
/// Deletes the node effectively by overwriting this node and its subtree with nop tags.
@@ -887,43 +686,25 @@
// node, and delete the current node, the Rust borrow checker kicks in. The next node has a
// mutable reference to DT, so we can't use current node (which also has a mutable reference to
// DT).
- pub fn delete_and_next_compatible(mut self, compatible: &CStr) -> Result<Option<Self>> {
- // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
- let ret = unsafe {
- libfdt_bindgen::fdt_node_offset_by_compatible(
- self.fdt.as_ptr(),
- self.offset,
- compatible.as_ptr(),
- )
- };
- let next_offset = fdt_err_or_option(ret)?;
+ pub fn delete_and_next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
+ let next_offset = self.fdt.node_offset_by_compatible(self.offset, compatible)?;
+ self.delete_and_next(next_offset)
+ }
+
+ fn delete_and_next(self, next_offset: Option<c_int>) -> Result<Option<Self>> {
if Some(self.offset) == next_offset {
return Err(FdtError::Internal);
}
- // SAFETY: nop_self() only touches bytes of the self and its properties and subnodes, and
- // doesn't alter any other blob in the tree. self.fdt and next_offset would remain valid.
- unsafe { self.nop_self()? };
+ self.fdt.nop_node(self.offset)?;
Ok(next_offset.map(|offset| Self { fdt: self.fdt, offset }))
}
/// Deletes this node effectively from DT, by setting it with FDT_NOP
- pub fn nop(mut self) -> Result<()> {
- // SAFETY: This consumes self, so invalid node wouldn't be used any further
- unsafe { self.nop_self() }
- }
-
- /// Deletes this node effectively from DT, by setting it with FDT_NOP.
- /// This only changes bytes of the node and its properties and subnodes, and doesn't alter or
- /// move any other part of the tree.
- /// SAFETY: This node is no longer valid.
- unsafe fn nop_self(&mut self) -> Result<()> {
- // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
- let ret = unsafe { libfdt_bindgen::fdt_nop_node(self.fdt.as_mut_ptr(), self.offset) };
-
- fdt_err_expect_zero(ret)
+ pub fn nop(self) -> Result<()> {
+ self.fdt.nop_node(self.offset)
}
}
@@ -934,14 +715,31 @@
buffer: [u8],
}
+// SAFETY: Fdt calls check_full() before safely returning a &Self, making it impossible for trait
+// methods to be called on invalid device trees.
+unsafe impl Libfdt for Fdt {
+ fn as_fdt_slice(&self) -> &[u8] {
+ &self.buffer[..self.totalsize()]
+ }
+}
+
+// SAFETY: Fdt calls check_full() before safely returning a &Self, making it impossible for trait
+// methods to be called on invalid device trees.
+unsafe impl LibfdtMut for Fdt {
+ fn as_fdt_slice_mut(&mut self) -> &mut [u8] {
+ &mut self.buffer
+ }
+}
+
impl Fdt {
/// Wraps a slice containing a Flattened Device Tree.
///
/// Fails if the FDT does not pass validation.
pub fn from_slice(fdt: &[u8]) -> Result<&Self> {
- // SAFETY: The FDT will be validated before it is returned.
+ libfdt::check_full(fdt)?;
+ // SAFETY: The FDT was validated.
let fdt = unsafe { Self::unchecked_from_slice(fdt) };
- fdt.check_full()?;
+
Ok(fdt)
}
@@ -949,67 +747,58 @@
///
/// Fails if the FDT does not pass validation.
pub fn from_mut_slice(fdt: &mut [u8]) -> Result<&mut Self> {
- // SAFETY: The FDT will be validated before it is returned.
+ libfdt::check_full(fdt)?;
+ // SAFETY: The FDT was validated.
let fdt = unsafe { Self::unchecked_from_mut_slice(fdt) };
- fdt.check_full()?;
+
Ok(fdt)
}
/// Creates an empty Flattened Device Tree with a mutable slice.
pub fn create_empty_tree(fdt: &mut [u8]) -> Result<&mut Self> {
- // SAFETY: fdt_create_empty_tree() only write within the specified length,
- // and returns error if buffer was insufficient.
- // There will be no memory write outside of the given fdt.
- let ret = unsafe {
- libfdt_bindgen::fdt_create_empty_tree(
- fdt.as_mut_ptr().cast::<c_void>(),
- fdt.len() as i32,
- )
- };
- fdt_err_expect_zero(ret)?;
+ libfdt::create_empty_tree(fdt)?;
- // SAFETY: The FDT will be validated before it is returned.
- let fdt = unsafe { Self::unchecked_from_mut_slice(fdt) };
- fdt.check_full()?;
-
- Ok(fdt)
+ Self::from_mut_slice(fdt)
}
/// Wraps a slice containing a Flattened Device Tree.
///
/// # Safety
///
- /// The returned FDT might be invalid, only use on slices containing a valid DT.
+ /// It is undefined to call this function on a slice that does not contain a valid device tree.
pub unsafe fn unchecked_from_slice(fdt: &[u8]) -> &Self {
- // SAFETY: Fdt is a wrapper around a [u8], so the transmute is valid. The caller is
- // responsible for ensuring that it is actually a valid FDT.
- unsafe { mem::transmute::<&[u8], &Self>(fdt) }
+ let self_ptr = fdt as *const _ as *const _;
+ // SAFETY: The pointer is non-null, dereferenceable, and points to allocated memory.
+ unsafe { &*self_ptr }
}
/// Wraps a mutable slice containing a Flattened Device Tree.
///
/// # Safety
///
- /// The returned FDT might be invalid, only use on slices containing a valid DT.
+ /// It is undefined to call this function on a slice that does not contain a valid device tree.
pub unsafe fn unchecked_from_mut_slice(fdt: &mut [u8]) -> &mut Self {
- // SAFETY: Fdt is a wrapper around a [u8], so the transmute is valid. The caller is
- // responsible for ensuring that it is actually a valid FDT.
- unsafe { mem::transmute::<&mut [u8], &mut Self>(fdt) }
+ let self_mut_ptr = fdt as *mut _ as *mut _;
+ // SAFETY: The pointer is non-null, dereferenceable, and points to allocated memory.
+ unsafe { &mut *self_mut_ptr }
}
- /// Updates this FDT from a slice containing another FDT.
- pub fn copy_from_slice(&mut self, new_fdt: &[u8]) -> Result<()> {
- if self.buffer.len() < new_fdt.len() {
- Err(FdtError::NoSpace)
- } else {
- let totalsize = self.totalsize();
- self.buffer[..new_fdt.len()].clone_from_slice(new_fdt);
- // Zeroize the remaining part. We zeroize up to the size of the original DT because
- // zeroizing the entire buffer (max 2MB) is not necessary and may increase the VM boot
- // time.
- self.buffer[new_fdt.len()..max(new_fdt.len(), totalsize)].fill(0_u8);
- Ok(())
+ /// Updates this FDT from another FDT.
+ pub fn clone_from(&mut self, other: &Self) -> Result<()> {
+ let new_len = other.buffer.len();
+ if self.buffer.len() < new_len {
+ return Err(FdtError::NoSpace);
}
+
+ let zeroed_len = self.totalsize().checked_sub(new_len);
+ let (cloned, zeroed) = self.buffer.split_at_mut(new_len);
+
+ cloned.clone_from_slice(&other.buffer);
+ if let Some(len) = zeroed_len {
+ zeroed[..len].fill(0);
+ }
+
+ Ok(())
}
/// Unpacks the DT to cover the whole slice it is contained in.
@@ -1056,11 +845,8 @@
///
/// NOTE: This does not support individual "/memory@XXXX" banks.
pub fn memory(&self) -> Result<MemRegIterator> {
- let memory_node_name = cstr!("/memory");
- let memory_device_type = cstr!("memory");
-
- let node = self.node(memory_node_name)?.ok_or(FdtError::NotFound)?;
- if node.device_type()? != Some(memory_device_type) {
+ let node = self.node(cstr!("/memory"))?.ok_or(FdtError::NotFound)?;
+ if node.device_type()? != Some(cstr!("memory")) {
return Err(FdtError::BadValue);
}
node.reg()?.ok_or(FdtError::BadValue).map(MemRegIterator::new)
@@ -1098,7 +884,9 @@
/// Returns a tree node by its full path.
pub fn node(&self, path: &CStr) -> Result<Option<FdtNode>> {
- Ok(self.path_offset(path.to_bytes())?.map(|offset| FdtNode { fdt: self, offset }))
+ let offset = self.path_offset_namelen(path.to_bytes())?;
+
+ Ok(offset.map(|offset| FdtNode { fdt: self, offset }))
}
/// Iterate over nodes with a given compatible string.
@@ -1141,34 +929,26 @@
/// Returns a mutable tree node by its full path.
pub fn node_mut(&mut self, path: &CStr) -> Result<Option<FdtNodeMut>> {
- Ok(self.path_offset(path.to_bytes())?.map(|offset| FdtNodeMut { fdt: self, offset }))
+ let offset = self.path_offset_namelen(path.to_bytes())?;
+
+ Ok(offset.map(|offset| FdtNodeMut { fdt: self, offset }))
+ }
+
+ fn next_node_skip_subnodes(&self, node: c_int, depth: usize) -> Result<Option<(c_int, usize)>> {
+ let mut iter = self.next_node(node, depth)?;
+ while let Some((offset, next_depth)) = iter {
+ if next_depth <= depth {
+ return Ok(Some((offset, next_depth)));
+ }
+ iter = self.next_node(offset, next_depth)?;
+ }
+
+ Ok(None)
}
/// Returns the device tree as a slice (may be smaller than the containing buffer).
pub fn as_slice(&self) -> &[u8] {
- &self.buffer[..self.totalsize()]
- }
-
- fn path_offset(&self, path: &[u8]) -> Result<Option<c_int>> {
- let len = path.len().try_into().map_err(|_| FdtError::BadPath)?;
- // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) and the
- // function respects the passed number of characters.
- let ret = unsafe {
- // *_namelen functions don't include the trailing nul terminator in 'len'.
- libfdt_bindgen::fdt_path_offset_namelen(self.as_ptr(), path.as_ptr().cast::<_>(), len)
- };
-
- fdt_err_or_option(ret)
- }
-
- fn check_full(&self) -> Result<()> {
- // SAFETY: Only performs read accesses within the limits of the slice. If successful, this
- // call guarantees to other unsafe calls that the header contains a valid totalsize (w.r.t.
- // 'len' i.e. the self.fdt slice) that those C functions can use to perform bounds
- // checking. The library doesn't maintain an internal state (such as pointers) between
- // calls as it expects the client code to keep track of the objects (DT, nodes, ...).
- let ret = unsafe { libfdt_bindgen::fdt_check_full(self.as_ptr(), self.capacity()) };
- fdt_err_expect_zero(ret)
+ self.as_fdt_slice()
}
fn get_from_ptr(&self, ptr: *const c_void, len: usize) -> Result<&[u8]> {
@@ -1190,7 +970,7 @@
/// Returns a shared pointer to the device tree.
pub fn as_ptr(&self) -> *const c_void {
- self.buffer.as_ptr().cast::<_>()
+ self.buffer.as_ptr().cast()
}
fn as_mut_ptr(&mut self) -> *mut c_void {
@@ -1202,7 +982,7 @@
}
fn header(&self) -> &libfdt_bindgen::fdt_header {
- let p = self.as_ptr().cast::<_>();
+ let p = self.as_ptr().cast();
// SAFETY: A valid FDT (verified by constructor) must contain a valid fdt_header.
unsafe { &*p }
}
diff --git a/libs/libfdt/src/libfdt.rs b/libs/libfdt/src/libfdt.rs
new file mode 100644
index 0000000..7e3b65a
--- /dev/null
+++ b/libs/libfdt/src/libfdt.rs
@@ -0,0 +1,219 @@
+// Copyright 2024, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Low-level libfdt_bindgen wrapper, easy to integrate safely in higher-level APIs.
+//!
+//! These traits decouple the safe libfdt C function calls from the representation of those
+//! user-friendly higher-level types, allowing the trait to be shared between different ones,
+//! adapted to their use-cases (e.g. alloc-based userspace or statically allocated no_std).
+
+use core::ffi::{c_int, CStr};
+use core::ptr;
+
+use crate::{fdt_err, fdt_err_expect_zero, fdt_err_or_option, FdtError, Result};
+
+// Function names are the C function names without the `fdt_` prefix.
+
+/// Safe wrapper around `fdt_create_empty_tree()` (C function).
+pub(crate) fn create_empty_tree(fdt: &mut [u8]) -> Result<()> {
+ let len = fdt.len().try_into().unwrap();
+ let fdt = fdt.as_mut_ptr().cast();
+ // SAFETY: fdt_create_empty_tree() only write within the specified length,
+ // and returns error if buffer was insufficient.
+ // There will be no memory write outside of the given fdt.
+ let ret = unsafe { libfdt_bindgen::fdt_create_empty_tree(fdt, len) };
+
+ fdt_err_expect_zero(ret)
+}
+
+/// Safe wrapper around `fdt_check_full()` (C function).
+pub(crate) fn check_full(fdt: &[u8]) -> Result<()> {
+ let len = fdt.len();
+ let fdt = fdt.as_ptr().cast();
+ // SAFETY: Only performs read accesses within the limits of the slice. If successful, this
+ // call guarantees to other unsafe calls that the header contains a valid totalsize (w.r.t.
+ // 'len' i.e. the self.fdt slice) that those C functions can use to perform bounds
+ // checking. The library doesn't maintain an internal state (such as pointers) between
+ // calls as it expects the client code to keep track of the objects (DT, nodes, ...).
+ let ret = unsafe { libfdt_bindgen::fdt_check_full(fdt, len) };
+
+ fdt_err_expect_zero(ret)
+}
+
+/// Wrapper for the read-only libfdt.h functions.
+///
+/// # Safety
+///
+/// Implementors must ensure that at any point where a method of this trait is called, the
+/// underlying type returns the bytes of a valid device tree (as validated by `check_full`)
+/// through its `.as_fdt_slice` method.
+pub(crate) unsafe trait Libfdt {
+ /// Provides an immutable slice containing the device tree.
+ ///
+ /// The implementation must ensure that the size of the returned slice and
+ /// `fdt_header::totalsize` match.
+ fn as_fdt_slice(&self) -> &[u8];
+
+ /// Safe wrapper around `fdt_path_offset_namelen()` (C function).
+ fn path_offset_namelen(&self, path: &[u8]) -> Result<Option<c_int>> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ // *_namelen functions don't include the trailing nul terminator in 'len'.
+ let len = path.len().try_into().map_err(|_| FdtError::BadPath)?;
+ let path = path.as_ptr().cast();
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) and the
+ // function respects the passed number of characters.
+ let ret = unsafe { libfdt_bindgen::fdt_path_offset_namelen(fdt, path, len) };
+
+ fdt_err_or_option(ret)
+ }
+
+ /// Safe wrapper around `fdt_node_offset_by_compatible()` (C function).
+ fn node_offset_by_compatible(&self, prev: c_int, compatible: &CStr) -> Result<Option<c_int>> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ let compatible = compatible.as_ptr();
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe { libfdt_bindgen::fdt_node_offset_by_compatible(fdt, prev, compatible) };
+
+ fdt_err_or_option(ret)
+ }
+
+ /// Safe wrapper around `fdt_next_node()` (C function).
+ fn next_node(&self, node: c_int, depth: usize) -> Result<Option<(c_int, usize)>> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ let mut depth = depth.try_into().unwrap();
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe { libfdt_bindgen::fdt_next_node(fdt, node, &mut depth) };
+
+ match fdt_err_or_option(ret)? {
+ Some(offset) if depth >= 0 => {
+ let depth = depth.try_into().unwrap();
+ Ok(Some((offset, depth)))
+ }
+ _ => Ok(None),
+ }
+ }
+
+ /// Safe wrapper around `fdt_parent_offset()` (C function).
+ ///
+ /// Note that this function returns a `Err` when called on a root.
+ fn parent_offset(&self, node: c_int) -> Result<c_int> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe { libfdt_bindgen::fdt_parent_offset(fdt, node) };
+
+ fdt_err(ret)
+ }
+
+ /// Safe wrapper around `fdt_supernode_atdepth_offset()` (C function).
+ ///
+ /// Note that this function returns a `Err` when called on a node at a depth shallower than
+ /// the provided `depth`.
+ fn supernode_atdepth_offset(&self, node: c_int, depth: usize) -> Result<c_int> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ let depth = depth.try_into().unwrap();
+ let nodedepth = ptr::null_mut();
+ let ret =
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ unsafe { libfdt_bindgen::fdt_supernode_atdepth_offset(fdt, node, depth, nodedepth) };
+
+ fdt_err(ret)
+ }
+
+ /// Safe wrapper around `fdt_subnode_offset_namelen()` (C function).
+ fn subnode_offset_namelen(&self, parent: c_int, name: &[u8]) -> Result<Option<c_int>> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ let namelen = name.len().try_into().unwrap();
+ let name = name.as_ptr().cast();
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
+ let ret = unsafe { libfdt_bindgen::fdt_subnode_offset_namelen(fdt, parent, name, namelen) };
+
+ fdt_err_or_option(ret)
+ }
+ /// Safe wrapper around `fdt_first_subnode()` (C function).
+ fn first_subnode(&self, node: c_int) -> Result<Option<c_int>> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe { libfdt_bindgen::fdt_first_subnode(fdt, node) };
+
+ fdt_err_or_option(ret)
+ }
+
+ /// Safe wrapper around `fdt_next_subnode()` (C function).
+ fn next_subnode(&self, node: c_int) -> Result<Option<c_int>> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe { libfdt_bindgen::fdt_next_subnode(fdt, node) };
+
+ fdt_err_or_option(ret)
+ }
+
+ /// Safe wrapper around `fdt_address_cells()` (C function).
+ fn address_cells(&self, node: c_int) -> Result<usize> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
+ let ret = unsafe { libfdt_bindgen::fdt_address_cells(fdt, node) };
+
+ Ok(fdt_err(ret)?.try_into().unwrap())
+ }
+
+ /// Safe wrapper around `fdt_size_cells()` (C function).
+ fn size_cells(&self, node: c_int) -> Result<usize> {
+ let fdt = self.as_fdt_slice().as_ptr().cast();
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
+ let ret = unsafe { libfdt_bindgen::fdt_size_cells(fdt, node) };
+
+ Ok(fdt_err(ret)?.try_into().unwrap())
+ }
+}
+
+/// Wrapper for the read-write libfdt.h functions.
+///
+/// # Safety
+///
+/// Implementors must ensure that at any point where a method of this trait is called, the
+/// underlying type returns the bytes of a valid device tree (as validated by `check_full`)
+/// through its `.as_fdt_slice_mut` method.
+///
+/// Some methods may make previously returned values such as node or string offsets or phandles
+/// invalid by modifying the device tree (e.g. by inserting or removing new nodes or properties).
+/// As most methods take or return such values, instead of marking them all as unsafe, this trait
+/// is marked as unsafe as implementors must ensure that methods that modify the validity of those
+/// values are never called while the values are still in use.
+pub(crate) unsafe trait LibfdtMut {
+ /// Provides a mutable pointer to a buffer containing the device tree.
+ ///
+ /// The implementation must ensure that the size of the returned slice is at least
+ /// `fdt_header::totalsize`, to allow for device tree growth.
+ fn as_fdt_slice_mut(&mut self) -> &mut [u8];
+
+ /// Safe wrapper around `fdt_nop_node()` (C function).
+ fn nop_node(&mut self, node: c_int) -> Result<()> {
+ let fdt = self.as_fdt_slice_mut().as_mut_ptr().cast();
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
+ let ret = unsafe { libfdt_bindgen::fdt_nop_node(fdt, node) };
+
+ fdt_err_expect_zero(ret)
+ }
+
+ /// Safe wrapper around `fdt_add_subnode_namelen()` (C function).
+ fn add_subnode_namelen(&mut self, node: c_int, name: &[u8]) -> Result<c_int> {
+ let fdt = self.as_fdt_slice_mut().as_mut_ptr().cast();
+ let namelen = name.len().try_into().unwrap();
+ let name = name.as_ptr().cast();
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
+ let ret = unsafe { libfdt_bindgen::fdt_add_subnode_namelen(fdt, node, name, namelen) };
+
+ fdt_err(ret)
+ }
+}
diff --git a/libs/libfdt/src/result.rs b/libs/libfdt/src/result.rs
new file mode 100644
index 0000000..9643e1e
--- /dev/null
+++ b/libs/libfdt/src/result.rs
@@ -0,0 +1,139 @@
+// Copyright 2024, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Rust types related to the libfdt C integer results.
+
+use core::ffi::c_int;
+use core::fmt;
+use core::result;
+
+/// Error type corresponding to libfdt error codes.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum FdtError {
+ /// FDT_ERR_NOTFOUND
+ NotFound,
+ /// FDT_ERR_EXISTS
+ Exists,
+ /// FDT_ERR_NOSPACE
+ NoSpace,
+ /// FDT_ERR_BADOFFSET
+ BadOffset,
+ /// FDT_ERR_BADPATH
+ BadPath,
+ /// FDT_ERR_BADPHANDLE
+ BadPhandle,
+ /// FDT_ERR_BADSTATE
+ BadState,
+ /// FDT_ERR_TRUNCATED
+ Truncated,
+ /// FDT_ERR_BADMAGIC
+ BadMagic,
+ /// FDT_ERR_BADVERSION
+ BadVersion,
+ /// FDT_ERR_BADSTRUCTURE
+ BadStructure,
+ /// FDT_ERR_BADLAYOUT
+ BadLayout,
+ /// FDT_ERR_INTERNAL
+ Internal,
+ /// FDT_ERR_BADNCELLS
+ BadNCells,
+ /// FDT_ERR_BADVALUE
+ BadValue,
+ /// FDT_ERR_BADOVERLAY
+ BadOverlay,
+ /// FDT_ERR_NOPHANDLES
+ NoPhandles,
+ /// FDT_ERR_BADFLAGS
+ BadFlags,
+ /// FDT_ERR_ALIGNMENT
+ Alignment,
+ /// Unexpected error code
+ Unknown(i32),
+}
+
+impl fmt::Display for FdtError {
+ /// Prints error messages from libfdt.h documentation.
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::NotFound => write!(f, "The requested node or property does not exist"),
+ Self::Exists => write!(f, "Attempted to create an existing node or property"),
+ Self::NoSpace => write!(f, "Insufficient buffer space to contain the expanded tree"),
+ Self::BadOffset => write!(f, "Structure block offset is out-of-bounds or invalid"),
+ Self::BadPath => write!(f, "Badly formatted path"),
+ Self::BadPhandle => write!(f, "Invalid phandle length or value"),
+ Self::BadState => write!(f, "Received incomplete device tree"),
+ Self::Truncated => write!(f, "Device tree or sub-block is improperly terminated"),
+ Self::BadMagic => write!(f, "Device tree header missing its magic number"),
+ Self::BadVersion => write!(f, "Device tree has a version which can't be handled"),
+ Self::BadStructure => write!(f, "Device tree has a corrupt structure block"),
+ Self::BadLayout => write!(f, "Device tree sub-blocks in unsupported order"),
+ Self::Internal => write!(f, "libfdt has failed an internal assertion"),
+ Self::BadNCells => write!(f, "Bad format or value of #address-cells or #size-cells"),
+ Self::BadValue => write!(f, "Unexpected property value"),
+ Self::BadOverlay => write!(f, "Overlay cannot be applied"),
+ Self::NoPhandles => write!(f, "Device tree doesn't have any phandle available anymore"),
+ Self::BadFlags => write!(f, "Invalid flag or invalid combination of flags"),
+ Self::Alignment => write!(f, "Device tree base address is not 8-byte aligned"),
+ Self::Unknown(e) => write!(f, "Unknown libfdt error '{e}'"),
+ }
+ }
+}
+
+/// Result type with FdtError enum.
+pub type Result<T> = result::Result<T, FdtError>;
+
+pub(crate) fn fdt_err(val: c_int) -> Result<c_int> {
+ if val >= 0 {
+ Ok(val)
+ } else {
+ Err(match -val as _ {
+ libfdt_bindgen::FDT_ERR_NOTFOUND => FdtError::NotFound,
+ libfdt_bindgen::FDT_ERR_EXISTS => FdtError::Exists,
+ libfdt_bindgen::FDT_ERR_NOSPACE => FdtError::NoSpace,
+ libfdt_bindgen::FDT_ERR_BADOFFSET => FdtError::BadOffset,
+ libfdt_bindgen::FDT_ERR_BADPATH => FdtError::BadPath,
+ libfdt_bindgen::FDT_ERR_BADPHANDLE => FdtError::BadPhandle,
+ libfdt_bindgen::FDT_ERR_BADSTATE => FdtError::BadState,
+ libfdt_bindgen::FDT_ERR_TRUNCATED => FdtError::Truncated,
+ libfdt_bindgen::FDT_ERR_BADMAGIC => FdtError::BadMagic,
+ libfdt_bindgen::FDT_ERR_BADVERSION => FdtError::BadVersion,
+ libfdt_bindgen::FDT_ERR_BADSTRUCTURE => FdtError::BadStructure,
+ libfdt_bindgen::FDT_ERR_BADLAYOUT => FdtError::BadLayout,
+ libfdt_bindgen::FDT_ERR_INTERNAL => FdtError::Internal,
+ libfdt_bindgen::FDT_ERR_BADNCELLS => FdtError::BadNCells,
+ libfdt_bindgen::FDT_ERR_BADVALUE => FdtError::BadValue,
+ libfdt_bindgen::FDT_ERR_BADOVERLAY => FdtError::BadOverlay,
+ libfdt_bindgen::FDT_ERR_NOPHANDLES => FdtError::NoPhandles,
+ libfdt_bindgen::FDT_ERR_BADFLAGS => FdtError::BadFlags,
+ libfdt_bindgen::FDT_ERR_ALIGNMENT => FdtError::Alignment,
+ _ => FdtError::Unknown(val),
+ })
+ }
+}
+
+pub(crate) fn fdt_err_expect_zero(val: c_int) -> Result<()> {
+ match fdt_err(val)? {
+ 0 => Ok(()),
+ _ => Err(FdtError::Unknown(val)),
+ }
+}
+
+pub(crate) fn fdt_err_or_option(val: c_int) -> Result<Option<c_int>> {
+ match fdt_err(val) {
+ Ok(val) => Ok(Some(val)),
+ Err(FdtError::NotFound) => Ok(None),
+ Err(e) => Err(e),
+ }
+}
diff --git a/microdroid/kdump/Android.bp b/microdroid/kdump/Android.bp
index ff73fdb..5d129f4 100644
--- a/microdroid/kdump/Android.bp
+++ b/microdroid/kdump/Android.bp
@@ -5,6 +5,7 @@
cc_binary {
name: "microdroid_kexec",
+ defaults: ["avf_build_flags_cc"],
stem: "kexec_load",
srcs: ["kexec.c"],
installable: false,
@@ -14,6 +15,7 @@
cc_binary {
name: "microdroid_crashdump",
+ defaults: ["avf_build_flags_cc"],
stem: "crashdump",
srcs: ["crashdump.c"],
static_executable: true,
diff --git a/microdroid/payload/Android.bp b/microdroid/payload/Android.bp
index 7e11de3..e67a6d5 100644
--- a/microdroid/payload/Android.bp
+++ b/microdroid/payload/Android.bp
@@ -5,6 +5,7 @@
cc_defaults {
name: "microdroid_metadata_default",
+ defaults: ["avf_build_flags_cc"],
host_supported: true,
srcs: [
"metadata.proto",
diff --git a/pvmfw/Android.bp b/pvmfw/Android.bp
index ee77f14..110d46a 100644
--- a/pvmfw/Android.bp
+++ b/pvmfw/Android.bp
@@ -295,6 +295,7 @@
// numbers defined in the ARM DT binding headers
cc_object {
name: "pvmfw_platform.dts.preprocessed",
+ defaults: ["avf_build_flags_cc"],
header_libs: ["arm_dt_bindings_headers"],
host_supported: true,
srcs: [":pvmfw_platform.dts.renamed"],
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index d2aad61..ac52be9 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -35,12 +35,12 @@
use libfdt::CellIterator;
use libfdt::Fdt;
use libfdt::FdtError;
-use libfdt::FdtNode;
use libfdt::FdtNodeMut;
use log::debug;
use log::error;
use log::info;
use log::warn;
+use static_assertions::const_assert;
use tinyvec::ArrayVec;
use vmbase::fdt::SwiotlbInfo;
use vmbase::layout::{crosvm::MEM_START, MAX_VIRT_ADDR};
@@ -172,33 +172,40 @@
.setprop_inplace(cstr!("reg"), [addr.to_be(), size.to_be()].as_bytes())
}
-/// Read the number of CPUs from DT
-fn read_num_cpus_from(fdt: &Fdt) -> libfdt::Result<usize> {
- Ok(fdt.compatible_nodes(cstr!("arm,arm-v8"))?.count())
-}
+#[derive(Debug, Default)]
+struct CpuInfo {}
-/// Validate number of CPUs
-fn validate_num_cpus(num_cpus: usize) -> Result<(), FdtValidationError> {
- if num_cpus == 0 || DeviceTreeInfo::gic_patched_size(num_cpus).is_none() {
- Err(FdtValidationError::InvalidCpuCount(num_cpus))
- } else {
- Ok(())
+fn read_cpu_info_from(fdt: &Fdt) -> libfdt::Result<ArrayVec<[CpuInfo; DeviceTreeInfo::MAX_CPUS]>> {
+ let mut cpus = ArrayVec::new();
+
+ let mut cpu_nodes = fdt.compatible_nodes(cstr!("arm,arm-v8"))?;
+ for _cpu in cpu_nodes.by_ref().take(cpus.capacity()) {
+ let info = CpuInfo {};
+ cpus.push(info);
}
+ if cpu_nodes.next().is_some() {
+ warn!("DT has more than {} CPU nodes: discarding extra nodes.", cpus.capacity());
+ }
+
+ Ok(cpus)
}
-/// Patch DT by keeping `num_cpus` number of arm,arm-v8 compatible nodes, and pruning the rest.
-fn patch_num_cpus(fdt: &mut Fdt, num_cpus: usize) -> libfdt::Result<()> {
- let cpu = cstr!("arm,arm-v8");
- let mut next = fdt.root_mut()?.next_compatible(cpu)?;
- for _ in 0..num_cpus {
- next = if let Some(current) = next {
- current.next_compatible(cpu)?
- } else {
- return Err(FdtError::NoSpace);
- };
+fn validate_cpu_info(cpus: &[CpuInfo]) -> Result<(), FdtValidationError> {
+ if cpus.is_empty() {
+ return Err(FdtValidationError::InvalidCpuCount(0));
+ }
+
+ Ok(())
+}
+
+fn patch_cpus(fdt: &mut Fdt, cpus: &[CpuInfo]) -> libfdt::Result<()> {
+ const COMPAT: &CStr = cstr!("arm,arm-v8");
+ let mut next = fdt.root_mut()?.next_compatible(COMPAT)?;
+ for _cpu in cpus {
+ next = next.ok_or(FdtError::NoSpace)?.next_compatible(COMPAT)?;
}
while let Some(current) = next {
- next = current.delete_and_next_compatible(cpu)?;
+ next = current.delete_and_next_compatible(COMPAT)?;
}
Ok(())
}
@@ -507,11 +514,8 @@
let name = cstr!("ns16550a");
let mut next = fdt.root_mut()?.next_compatible(name);
while let Some(current) = next? {
- let reg = FdtNode::from_mut(¤t)
- .reg()?
- .ok_or(FdtError::NotFound)?
- .next()
- .ok_or(FdtError::NotFound)?;
+ let reg =
+ current.as_node().reg()?.ok_or(FdtError::NotFound)?.next().ok_or(FdtError::NotFound)?;
next = if !serial_info.addrs.contains(®.addr) {
current.delete_and_next_compatible(name)
} else {
@@ -582,7 +586,8 @@
let mut range1 = ranges.next().ok_or(FdtError::NotFound)?;
let addr = range0.addr;
- // `validate_num_cpus()` checked that this wouldn't panic
+ // `read_cpu_info_from()` guarantees that we have at most MAX_CPUS.
+ const_assert!(DeviceTreeInfo::gic_patched_size(DeviceTreeInfo::MAX_CPUS).is_some());
let size = u64::try_from(DeviceTreeInfo::gic_patched_size(num_cpus).unwrap()).unwrap();
// range1 is just below range0
@@ -628,7 +633,7 @@
pub initrd_range: Option<Range<usize>>,
pub memory_range: Range<usize>,
bootargs: Option<CString>,
- num_cpus: usize,
+ cpus: ArrayVec<[CpuInfo; DeviceTreeInfo::MAX_CPUS]>,
pci_info: PciInfo,
serial_info: SerialInfo,
pub swiotlb_info: SwiotlbInfo,
@@ -637,7 +642,9 @@
}
impl DeviceTreeInfo {
- fn gic_patched_size(num_cpus: usize) -> Option<usize> {
+ const MAX_CPUS: usize = 16;
+
+ const fn gic_patched_size(num_cpus: usize) -> Option<usize> {
const GIC_REDIST_SIZE_PER_CPU: usize = 32 * SIZE_4KB;
GIC_REDIST_SIZE_PER_CPU.checked_mul(num_cpus)
@@ -664,7 +671,9 @@
let info = parse_device_tree(fdt, vm_dtbo.as_deref())?;
- fdt.copy_from_slice(pvmfw_fdt_template::RAW).map_err(|e| {
+ // SAFETY: We trust that the template (hardcoded in our RO data) is a valid DT.
+ let fdt_template = unsafe { Fdt::unchecked_from_slice(pvmfw_fdt_template::RAW) };
+ fdt.clone_from(fdt_template).map_err(|e| {
error!("Failed to instantiate FDT from the template DT: {e}");
RebootReason::InvalidFdt
})?;
@@ -733,12 +742,12 @@
RebootReason::InvalidFdt
})?;
- let num_cpus = read_num_cpus_from(fdt).map_err(|e| {
- error!("Failed to read num cpus from DT: {e}");
+ let cpus = read_cpu_info_from(fdt).map_err(|e| {
+ error!("Failed to read CPU info from DT: {e}");
RebootReason::InvalidFdt
})?;
- validate_num_cpus(num_cpus).map_err(|e| {
- error!("Failed to validate num cpus from DT: {e}");
+ validate_cpu_info(&cpus).map_err(|e| {
+ error!("Failed to validate CPU info from DT: {e}");
RebootReason::InvalidFdt
})?;
@@ -786,7 +795,7 @@
initrd_range,
memory_range,
bootargs,
- num_cpus,
+ cpus,
pci_info,
serial_info,
swiotlb_info,
@@ -812,7 +821,7 @@
RebootReason::InvalidFdt
})?;
}
- patch_num_cpus(fdt, info.num_cpus).map_err(|e| {
+ patch_cpus(fdt, &info.cpus).map_err(|e| {
error!("Failed to patch cpus to DT: {e}");
RebootReason::InvalidFdt
})?;
@@ -828,11 +837,11 @@
error!("Failed to patch swiotlb info to DT: {e}");
RebootReason::InvalidFdt
})?;
- patch_gic(fdt, info.num_cpus).map_err(|e| {
+ patch_gic(fdt, info.cpus.len()).map_err(|e| {
error!("Failed to patch gic info to DT: {e}");
RebootReason::InvalidFdt
})?;
- patch_timer(fdt, info.num_cpus).map_err(|e| {
+ patch_timer(fdt, info.cpus.len()).map_err(|e| {
error!("Failed to patch timer info to DT: {e}");
RebootReason::InvalidFdt
})?;
@@ -938,7 +947,7 @@
// SAFETY: on failure, the corrupted DT is restored using the backup.
if let Err(e) = unsafe { fdt.apply_overlay(overlay) } {
warn!("Failed to apply debug policy: {e}. Recovering...");
- fdt.copy_from_slice(backup_fdt.as_slice())?;
+ fdt.clone_from(backup_fdt)?;
// A successful restoration is considered success because an invalid debug policy
// shouldn't DOS the pvmfw
Ok(false)
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index e705562..ad9b776 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -37,7 +37,7 @@
use hyp::{get_mem_sharer, get_mmio_guard};
use libfdt::FdtError;
use log::{debug, error, info};
-use service_vm_comm::{RequestProcessingError, Response, ServiceVmRequest, VmType};
+use service_vm_comm::{ServiceVmRequest, VmType};
use service_vm_fake_chain::service_vm;
use service_vm_requests::process_request;
use virtio_drivers::{
@@ -177,15 +177,7 @@
let mut vsock_stream = VsockStream::new(socket_device, host_addr())?;
while let ServiceVmRequest::Process(req) = vsock_stream.read_request()? {
- let mut response = process_request(req, bcc_handover.as_ref());
- // TODO(b/185878400): We don't want to issue a certificate to pVM when the client VM
- // attestation is unfinished. The following code should be removed once the
- // verification is completed.
- if vm_type() == VmType::ProtectedVm
- && matches!(response, Response::RequestClientVmAttestation(_))
- {
- response = Response::Err(RequestProcessingError::OperationUnimplemented);
- }
+ let response = process_request(req, bcc_handover.as_ref());
vsock_stream.write_response(&response)?;
vsock_stream.flush()?;
}
diff --git a/service_vm/test_apk/Android.bp b/service_vm/demo_apk/Android.bp
similarity index 69%
rename from service_vm/test_apk/Android.bp
rename to service_vm/demo_apk/Android.bp
index 681f4e8..5644819 100644
--- a/service_vm/test_apk/Android.bp
+++ b/service_vm/demo_apk/Android.bp
@@ -4,9 +4,9 @@
}
android_app {
- name: "ServiceVmClientTestApp",
+ name: "VmAttestationDemoApp",
installable: true,
- jni_libs: ["libservice_vm_client"],
+ jni_libs: ["libvm_attestation_payload"],
jni_uses_platform_apis: true,
use_embedded_native_libs: true,
sdk_version: "system_current",
@@ -15,8 +15,8 @@
}
rust_defaults {
- name: "service_vm_client_defaults",
- crate_name: "service_vm_client",
+ name: "vm_attestation_payload_defaults",
+ crate_name: "vm_attestation_payload",
defaults: ["avf_build_flags_rust"],
srcs: ["src/main.rs"],
prefer_rlib: true,
@@ -29,6 +29,6 @@
}
rust_ffi {
- name: "libservice_vm_client",
- defaults: ["service_vm_client_defaults"],
+ name: "libvm_attestation_payload",
+ defaults: ["vm_attestation_payload_defaults"],
}
diff --git a/service_vm/test_apk/AndroidManifest.xml b/service_vm/demo_apk/AndroidManifest.xml
similarity index 94%
rename from service_vm/test_apk/AndroidManifest.xml
rename to service_vm/demo_apk/AndroidManifest.xml
index b3598fc..228195d 100644
--- a/service_vm/test_apk/AndroidManifest.xml
+++ b/service_vm/demo_apk/AndroidManifest.xml
@@ -13,7 +13,7 @@
limitations under the License.
-->
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
- package="com.android.virt.service_vm.client">
+ package="com.android.virt.vm_attestation.demo">
<uses-permission android:name="android.permission.MANAGE_VIRTUAL_MACHINE" />
<uses-permission android:name="android.permission.USE_CUSTOM_VIRTUAL_MACHINE" />
diff --git a/service_vm/demo_apk/README.md b/service_vm/demo_apk/README.md
new file mode 100644
index 0000000..551d47b
--- /dev/null
+++ b/service_vm/demo_apk/README.md
@@ -0,0 +1,53 @@
+# VmAttestationDemoApp
+
+## Overview
+
+The *VmAttestationDemoApp* is an Android application that provides a practical
+demonstration of how to interact with the VM Attestation APIs. This app focuses
+on the payload of the Android app and the payload performs two main tasks:
+requesting attestation and validating the attestation result.
+
+## Building
+
+To build the VmAttestationDemoApp, use the following command:
+
+```
+m VmAttestationDemoApp
+```
+
+## Installing
+
+To install the app on your device, execute the following command:
+
+```
+adb install $ANDROID_PRODUCT_OUT/system/app/VmAttestationDemoApp/VmAttestationDemoApp.apk
+```
+
+## Running
+
+Before running the app, make sure that the device has an internet connection and
+that the remote provisioning host is not empty. You can use the following
+command to check the remote provisioning host:
+
+```
+$ adb shell getprop remote_provisioning.hostname
+remoteprovisioning.googleapis.com
+```
+
+Once you have confirmed the remote provisioning host, you can run the app using
+the following command:
+
+```
+TEST_ROOT=/data/local/tmp/virt && adb shell /apex/com.android.virt/bin/vm run-app \
+ --config-path assets/config.json --debug full \
+ $(adb shell pm path com.android.virt.vm_attestation.demo | cut -c 9-) \
+ $TEST_ROOT/VmAttestationDemoApp.apk.idsig \
+ $TEST_ROOT/instance.vm_attestation.debug.img --protected
+```
+
+Please note that remote attestation is only available for protected VMs.
+Therefore, ensure that the VM is launched in protected mode using the
+`--protected` flag.
+
+If everything is set up correctly, you should be able to see the attestation
+result printed out in the VM logs.
diff --git a/service_vm/test_apk/assets/config.json b/service_vm/demo_apk/assets/config.json
similarity index 74%
rename from service_vm/test_apk/assets/config.json
rename to service_vm/demo_apk/assets/config.json
index 02749fe..1684696 100644
--- a/service_vm/test_apk/assets/config.json
+++ b/service_vm/demo_apk/assets/config.json
@@ -4,7 +4,7 @@
},
"task": {
"type": "microdroid_launcher",
- "command": "libservice_vm_client.so"
+ "command": "libvm_attestation_payload.so"
},
"export_tombstones": true
}
\ No newline at end of file
diff --git a/service_vm/test_apk/src/main.rs b/service_vm/demo_apk/src/main.rs
similarity index 99%
rename from service_vm/test_apk/src/main.rs
rename to service_vm/demo_apk/src/main.rs
index df60325..0d1efb0 100644
--- a/service_vm/test_apk/src/main.rs
+++ b/service_vm/demo_apk/src/main.rs
@@ -224,6 +224,6 @@
// static string.
let message = unsafe { AVmAttestationResult_resultToString(status) };
// SAFETY: The pointer returned by `AVmAttestationResult_resultToString` is guaranteed to
- // point to a valid C String.
+ // point to a valid C String that lives forever.
unsafe { CStr::from_ptr(message) }
}
diff --git a/tests/benchmark/Android.bp b/tests/benchmark/Android.bp
index 31fe0f6..c31c929 100644
--- a/tests/benchmark/Android.bp
+++ b/tests/benchmark/Android.bp
@@ -34,6 +34,7 @@
cc_library_shared {
name: "MicrodroidBenchmarkNativeLib",
+ defaults: ["avf_build_flags_cc"],
srcs: ["src/native/*.cpp"],
local_include_dirs: ["src/native/include"],
static_libs: [
diff --git a/tests/benchmark/src/jni/Android.bp b/tests/benchmark/src/jni/Android.bp
index c2e1b7c..06de3e5 100644
--- a/tests/benchmark/src/jni/Android.bp
+++ b/tests/benchmark/src/jni/Android.bp
@@ -5,6 +5,7 @@
cc_library_shared {
name: "libiovsock_host_jni",
+ defaults: ["avf_build_flags_cc"],
srcs: ["io_vsock_host_jni.cpp"],
header_libs: ["jni_headers"],
shared_libs: ["libbase"],
diff --git a/tests/testapk/Android.bp b/tests/testapk/Android.bp
index 10bbfb4..86172b0 100644
--- a/tests/testapk/Android.bp
+++ b/tests/testapk/Android.bp
@@ -68,6 +68,7 @@
// (MicrodroidTestApp) can start a payload defined in the another app (MicrodroidVmShareApp).
cc_defaults {
name: "MicrodroidTestNativeLibDefaults",
+ defaults: ["avf_build_flags_cc"],
srcs: ["src/native/testbinary.cpp"],
stl: "libc++_static",
header_libs: ["vm_payload_restricted_headers"],
@@ -99,12 +100,14 @@
cc_library_shared {
name: "MicrodroidTestNativeLibSub",
+ defaults: ["avf_build_flags_cc"],
srcs: ["src/native/testlib.cpp"],
stl: "libc++_static",
}
cc_library_shared {
name: "MicrodroidIdleNativeLib",
+ defaults: ["avf_build_flags_cc"],
srcs: ["src/native/idlebinary.cpp"],
header_libs: ["vm_payload_headers"],
stl: "libc++_static",
@@ -113,6 +116,7 @@
// An empty payload missing AVmPayload_main
cc_library_shared {
name: "MicrodroidEmptyNativeLib",
+ defaults: ["avf_build_flags_cc"],
srcs: ["src/native/emptybinary.cpp"],
stl: "none",
}
@@ -120,6 +124,7 @@
// A payload that exits immediately on start
cc_library_shared {
name: "MicrodroidExitNativeLib",
+ defaults: ["avf_build_flags_cc"],
srcs: ["src/native/exitbinary.cpp"],
header_libs: ["vm_payload_headers"],
stl: "libc++_static",
@@ -128,6 +133,7 @@
// A payload which tries to link against libselinux, one of private libraries
cc_library_shared {
name: "MicrodroidPrivateLinkingNativeLib",
+ defaults: ["avf_build_flags_cc"],
srcs: ["src/native/idlebinary.cpp"],
header_libs: ["vm_payload_headers"],
// HACK: linking against "libselinux" will embed libselinux.so into the apk
@@ -139,6 +145,7 @@
// A payload that crashes immediately on start
cc_library_shared {
name: "MicrodroidCrashNativeLib",
+ defaults: ["avf_build_flags_cc"],
srcs: ["src/native/crashbinary.cpp"],
header_libs: ["vm_payload_headers"],
stl: "libc++_static",
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index 25dab0e..1dd0309 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -453,29 +453,32 @@
// Minimal has as little as specified as possible; everything that can be is defaulted.
VirtualMachineConfig.Builder minimalBuilder =
new VirtualMachineConfig.Builder(getContext())
- .setPayloadBinaryName("binary.so")
+ .setPayloadConfigPath("config/path")
.setProtectedVm(isProtectedVm());
VirtualMachineConfig minimal = minimalBuilder.build();
assertThat(minimal.getApkPath()).isNull();
+ assertThat(minimal.getExtraApks()).isEmpty();
assertThat(minimal.getDebugLevel()).isEqualTo(DEBUG_LEVEL_NONE);
assertThat(minimal.getMemoryBytes()).isEqualTo(0);
assertThat(minimal.getCpuTopology()).isEqualTo(CPU_TOPOLOGY_ONE_CPU);
- assertThat(minimal.getPayloadBinaryName()).isEqualTo("binary.so");
- assertThat(minimal.getPayloadConfigPath()).isNull();
+ assertThat(minimal.getPayloadBinaryName()).isNull();
+ assertThat(minimal.getPayloadConfigPath()).isEqualTo("config/path");
assertThat(minimal.isProtectedVm()).isEqualTo(isProtectedVm());
assertThat(minimal.isEncryptedStorageEnabled()).isFalse();
assertThat(minimal.getEncryptedStorageBytes()).isEqualTo(0);
assertThat(minimal.isVmOutputCaptured()).isEqualTo(false);
- assertThat(minimal.getOs()).isEqualTo("microdroid");
+ assertThat(minimal.getOs()).isNull();
// Maximal has everything that can be set to some non-default value. (And has different
// values than minimal for the required fields.)
VirtualMachineConfig.Builder maximalBuilder =
new VirtualMachineConfig.Builder(getContext())
.setProtectedVm(mProtectedVm)
- .setPayloadConfigPath("config/path")
+ .setPayloadBinaryName("binary.so")
.setApkPath("/apk/path")
+ .addExtraApk("package.name1:split")
+ .addExtraApk("package.name2")
.setDebugLevel(DEBUG_LEVEL_FULL)
.setMemoryBytes(42)
.setCpuTopology(CPU_TOPOLOGY_MATCH_HOST)
@@ -484,25 +487,28 @@
VirtualMachineConfig maximal = maximalBuilder.build();
assertThat(maximal.getApkPath()).isEqualTo("/apk/path");
+ assertThat(maximal.getExtraApks())
+ .containsExactly("package.name1:split", "package.name2")
+ .inOrder();
assertThat(maximal.getDebugLevel()).isEqualTo(DEBUG_LEVEL_FULL);
assertThat(maximal.getMemoryBytes()).isEqualTo(42);
assertThat(maximal.getCpuTopology()).isEqualTo(CPU_TOPOLOGY_MATCH_HOST);
- assertThat(maximal.getPayloadBinaryName()).isNull();
- assertThat(maximal.getPayloadConfigPath()).isEqualTo("config/path");
+ assertThat(maximal.getPayloadBinaryName()).isEqualTo("binary.so");
+ assertThat(maximal.getPayloadConfigPath()).isNull();
assertThat(maximal.isProtectedVm()).isEqualTo(isProtectedVm());
assertThat(maximal.isEncryptedStorageEnabled()).isTrue();
assertThat(maximal.getEncryptedStorageBytes()).isEqualTo(1_000_000);
assertThat(maximal.isVmOutputCaptured()).isEqualTo(true);
- assertThat(maximal.getOs()).isNull();
+ assertThat(maximal.getOs()).isEqualTo("microdroid");
assertThat(minimal.isCompatibleWith(maximal)).isFalse();
assertThat(minimal.isCompatibleWith(minimal)).isTrue();
assertThat(maximal.isCompatibleWith(maximal)).isTrue();
- VirtualMachineConfig os = minimalBuilder.setOs("microdroid_gki-android14-6.1").build();
+ VirtualMachineConfig os = maximalBuilder.setOs("microdroid_gki-android14-6.1").build();
assertThat(os.getPayloadBinaryName()).isEqualTo("binary.so");
assertThat(os.getOs()).isEqualTo("microdroid_gki-android14-6.1");
- assertThat(os.isCompatibleWith(minimal)).isFalse();
+ assertThat(os.isCompatibleWith(maximal)).isFalse();
}
@Test
@@ -514,6 +520,7 @@
// All your null are belong to me.
assertThrows(NullPointerException.class, () -> new VirtualMachineConfig.Builder(null));
assertThrows(NullPointerException.class, () -> builder.setApkPath(null));
+ assertThrows(NullPointerException.class, () -> builder.addExtraApk(null));
assertThrows(NullPointerException.class, () -> builder.setPayloadConfigPath(null));
assertThrows(NullPointerException.class, () -> builder.setPayloadBinaryName(null));
assertThrows(NullPointerException.class, () -> builder.setVendorDiskImage(null));
@@ -579,6 +586,7 @@
.isTrue();
// Changes that must be incompatible, since they must change the VM identity.
+ assertConfigCompatible(baseline, newBaselineBuilder().addExtraApk("foo")).isFalse();
assertConfigCompatible(baseline, newBaselineBuilder().setDebugLevel(DEBUG_LEVEL_FULL))
.isFalse();
assertConfigCompatible(baseline, newBaselineBuilder().setPayloadBinaryName("different"))
@@ -903,7 +911,34 @@
vm,
(ts, tr) -> {
tr.mExtraApkTestProp =
- ts.readProperty("debug.microdroid.test.extra_apk");
+ ts.readProperty(
+ "debug.microdroid.test.extra_apk_build_manifest");
+ });
+ assertThat(testResults.mExtraApkTestProp).isEqualTo("PASS");
+ }
+
+ @Test
+ @CddTest(requirements = {"9.17/C-1-1", "9.17/C-2-1"})
+ public void extraApkInVmConfig() throws Exception {
+ assumeSupportedDevice();
+ assumeFeatureEnabled(VirtualMachineManager.FEATURE_MULTI_TENANT);
+
+ grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
+ VirtualMachineConfig config =
+ newVmConfigBuilderWithPayloadBinary("MicrodroidTestNativeLib.so")
+ .setMemoryBytes(minMemoryRequired())
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .addExtraApk(VM_SHARE_APP_PACKAGE_NAME)
+ .build();
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_vm_extra_apk", config);
+
+ TestResults testResults =
+ runVmTestService(
+ TAG,
+ vm,
+ (ts, tr) -> {
+ tr.mExtraApkTestProp =
+ ts.readProperty("debug.microdroid.test.extra_apk_vm_share");
});
assertThat(testResults.mExtraApkTestProp).isEqualTo("PASS");
}
diff --git a/tests/testapk/src/native/testbinary.cpp b/tests/testapk/src/native/testbinary.cpp
index c9b5e3a..1a75102 100644
--- a/tests/testapk/src/native/testbinary.cpp
+++ b/tests/testapk/src/native/testbinary.cpp
@@ -349,7 +349,7 @@
return {};
}
-Result<void> verify_apk() {
+Result<void> verify_build_manifest() {
const char* path = "/mnt/extra-apk/0/assets/build_manifest.pb";
std::string str;
@@ -364,6 +364,17 @@
return {};
}
+Result<void> verify_vm_share() {
+ const char* path = "/mnt/extra-apk/0/assets/vmshareapp.txt";
+
+ std::string str;
+ if (!android::base::ReadFileToString(path, &str)) {
+ return ErrnoError() << "failed to read vmshareapp.txt";
+ }
+
+ return {};
+}
+
} // Anonymous namespace
extern "C" int AVmPayload_main() {
@@ -372,8 +383,10 @@
// Make sure we can call into other shared libraries.
testlib_sub();
- // Extra apks may be missing; this is not a fatal error
- report_test("extra_apk", verify_apk());
+ // Report various things that aren't always fatal - these are checked in MicrodroidTests as
+ // appropriate.
+ report_test("extra_apk_build_manifest", verify_build_manifest());
+ report_test("extra_apk_vm_share", verify_vm_share());
__system_property_set("debug.microdroid.app.run", "true");
diff --git a/tests/vmshareapp/assets/vmshareapp.txt b/tests/vmshareapp/assets/vmshareapp.txt
new file mode 100644
index 0000000..02fdd71
--- /dev/null
+++ b/tests/vmshareapp/assets/vmshareapp.txt
@@ -0,0 +1 @@
+Marker file for the vmshareapp APK
diff --git a/virtualizationmanager/fsfdt/Android.bp b/virtualizationmanager/fsfdt/Android.bp
index 0aa2ef0..7199485 100644
--- a/virtualizationmanager/fsfdt/Android.bp
+++ b/virtualizationmanager/fsfdt/Android.bp
@@ -18,8 +18,8 @@
],
}
-rust_library_rlib {
- name: "libfsfdt",
+rust_defaults {
+ name: "libfsfdt_default",
crate_name: "fsfdt",
defaults: ["avf_build_flags_rust"],
edition: "2021",
@@ -31,3 +31,17 @@
],
apex_available: ["com.android.virt"],
}
+
+rust_library_rlib {
+ name: "libfsfdt",
+ defaults: ["libfsfdt_default"],
+}
+
+rust_test {
+ name: "libfsfdt_test",
+ defaults: ["libfsfdt_default"],
+ data: ["testdata/**/*"],
+ data_bins: ["dtc_static"],
+ rustlibs: ["libtempfile"],
+ compile_multilib: "first",
+}
diff --git a/virtualizationmanager/fsfdt/src/lib.rs b/virtualizationmanager/fsfdt/src/lib.rs
index a2ca519..549df04 100644
--- a/virtualizationmanager/fsfdt/src/lib.rs
+++ b/virtualizationmanager/fsfdt/src/lib.rs
@@ -98,3 +98,56 @@
Ok(())
}
}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use std::io::Write;
+ use std::process::Command;
+ use tempfile::NamedTempFile;
+
+ const TEST_FS_FDT_ROOT_PATH: &str = "testdata/fs";
+ const BUF_SIZE_MAX: usize = 1024;
+
+ fn dts_from_fs(path: &Path) -> String {
+ let path = path.to_str().unwrap();
+ let res = Command::new("./dtc_static")
+ .args(["-f", "-s", "-I", "fs", "-O", "dts", path])
+ .output()
+ .unwrap();
+ assert!(res.status.success(), "{res:?}");
+ String::from_utf8(res.stdout).unwrap()
+ }
+
+ fn dts_from_dtb(path: &Path) -> String {
+ let path = path.to_str().unwrap();
+ let res = Command::new("./dtc_static")
+ .args(["-f", "-s", "-I", "dtb", "-O", "dts", path])
+ .output()
+ .unwrap();
+ assert!(res.status.success(), "{res:?}");
+ String::from_utf8(res.stdout).unwrap()
+ }
+
+ fn to_temp_file(fdt: &Fdt) -> Result<NamedTempFile> {
+ let mut file = NamedTempFile::new()?;
+ file.as_file_mut().write_all(fdt.as_slice())?;
+ file.as_file_mut().sync_all()?;
+
+ Ok(file)
+ }
+
+ #[test]
+ fn test_from_fs() {
+ let fs_path = Path::new(TEST_FS_FDT_ROOT_PATH);
+
+ let mut data = vec![0_u8; BUF_SIZE_MAX];
+ let fdt = Fdt::from_fs(fs_path, &mut data).unwrap();
+ let file = to_temp_file(fdt).unwrap();
+
+ let expected = dts_from_fs(fs_path);
+ let actual = dts_from_dtb(file.path());
+
+ assert_eq!(&expected, &actual);
+ }
+}
diff --git a/virtualizationmanager/fsfdt/testdata/fs/avf/reference/oem/stub b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/oem/stub
new file mode 100644
index 0000000..2e73f18
--- /dev/null
+++ b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/oem/stub
Binary files differ
diff --git a/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor/empty b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor/empty
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor/empty
diff --git a/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor/vendor_extra_node/flag b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor/vendor_extra_node/flag
new file mode 100644
index 0000000..accba00
--- /dev/null
+++ b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor/vendor_extra_node/flag
Binary files differ
diff --git a/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor_hashtree_descriptor_root_digest b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor_hashtree_descriptor_root_digest
new file mode 100644
index 0000000..e901bb1
--- /dev/null
+++ b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor_hashtree_descriptor_root_digest
Binary files differ
diff --git a/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor_image_key b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor_image_key
new file mode 100644
index 0000000..4d02944
--- /dev/null
+++ b/virtualizationmanager/fsfdt/testdata/fs/avf/reference/vendor_image_key
Binary files differ
diff --git a/vm_payload/Android.bp b/vm_payload/Android.bp
index 286612c..a745fd6 100644
--- a/vm_payload/Android.bp
+++ b/vm_payload/Android.bp
@@ -53,7 +53,7 @@
],
visibility: [
"//packages/modules/Virtualization/compos",
- "//packages/modules/Virtualization/service_vm/test_apk",
+ "//packages/modules/Virtualization/service_vm:__subpackages__",
],
shared_libs: [
"libvm_payload#current",
@@ -63,6 +63,7 @@
// Shared library for clients to link against.
cc_library_shared {
name: "libvm_payload",
+ defaults: ["avf_build_flags_cc"],
shared_libs: [
"libbinder_ndk",
"libbinder_rpc_unstable",
@@ -84,6 +85,7 @@
// declaration of AVmPayload_main().
cc_library_headers {
name: "vm_payload_headers",
+ defaults: ["avf_build_flags_cc"],
apex_available: ["com.android.compos"],
export_include_dirs: ["include"],
}
@@ -91,6 +93,7 @@
// Restricted headers for use by internal clients & associated tests.
cc_library_headers {
name: "vm_payload_restricted_headers",
+ defaults: ["avf_build_flags_cc"],
header_libs: ["vm_payload_headers"],
export_header_lib_headers: ["vm_payload_headers"],
export_include_dirs: ["include-restricted"],
diff --git a/vmbase/Android.bp b/vmbase/Android.bp
index e682773..07e1b4c 100644
--- a/vmbase/Android.bp
+++ b/vmbase/Android.bp
@@ -41,6 +41,7 @@
// Used by extra cc_library_static linked into the final ELF.
cc_defaults {
name: "vmbase_cc_defaults",
+ defaults: ["avf_build_flags_cc"],
nocrt: true,
no_libcrt: true,
system_shared_libs: [],