Use &raw rather than addr_of macros.
This was added in Rust 1.82 which we now have in AOSP.
Test: m vmbase_example_kernel_bin
Change-Id: I5b3f3f47a261cb07d0eca3487005795d0cb050f6
diff --git a/guest/pvmfw/avb/tests/api_test.rs b/guest/pvmfw/avb/tests/api_test.rs
index 23e05d4..29a6277 100644
--- a/guest/pvmfw/avb/tests/api_test.rs
+++ b/guest/pvmfw/avb/tests/api_test.rs
@@ -23,7 +23,6 @@
use std::{
fs,
mem::{offset_of, size_of},
- ptr,
};
use utils::*;
@@ -414,9 +413,9 @@
// vbmeta_header is unaligned; copy flags to local variable
let vbmeta_header_flags = vbmeta_header.flags;
assert_eq!(0, vbmeta_header_flags, "The disable flag should not be set in the latest kernel.");
- let flags_addr = ptr::addr_of!(vbmeta_header.flags) as *const u8;
+ let flags_addr = (&raw const vbmeta_header.flags).cast::<u8>();
// SAFETY: It is safe as both raw pointers `flags_addr` and `vbmeta_header` are not null.
- let flags_offset = unsafe { flags_addr.offset_from(ptr::addr_of!(vbmeta_header) as *const u8) };
+ let flags_offset = unsafe { flags_addr.offset_from((&raw const vbmeta_header).cast::<u8>()) };
let flags_offset = usize::try_from(footer.vbmeta_offset)? + usize::try_from(flags_offset)?;
// Act.
diff --git a/guest/vmbase_example/src/main.rs b/guest/vmbase_example/src/main.rs
index f5b41bd..52a5f3e 100644
--- a/guest/vmbase_example/src/main.rs
+++ b/guest/vmbase_example/src/main.rs
@@ -26,7 +26,6 @@
use crate::layout::print_addresses;
use crate::pci::check_pci;
use alloc::{vec, vec::Vec};
-use core::ptr::addr_of_mut;
use libfdt::Fdt;
use log::{debug, error, info, trace, warn, LevelFilter};
use vmbase::{
@@ -101,6 +100,7 @@
);
}
+#[allow(static_mut_refs)]
fn check_data() {
info!("INITIALISED_DATA: {:?}", INITIALISED_DATA.as_ptr());
// SAFETY: We only print the addresses of the static mutable variable, not actually access it.
@@ -115,10 +115,10 @@
// SAFETY: Nowhere else in the program accesses this static mutable variable, so there is no
// chance of concurrent access.
- let zeroed_data = unsafe { &mut *addr_of_mut!(ZEROED_DATA) };
+ let zeroed_data = unsafe { &mut ZEROED_DATA };
// SAFETY: Nowhere else in the program accesses this static mutable variable, so there is no
// chance of concurrent access.
- let mutable_data = unsafe { &mut *addr_of_mut!(MUTABLE_DATA) };
+ let mutable_data = unsafe { &mut MUTABLE_DATA };
for element in zeroed_data.iter() {
assert_eq!(*element, 0);
diff --git a/libs/libvmbase/src/bionic.rs b/libs/libvmbase/src/bionic.rs
index 7b9fe2a..ac9f80f 100644
--- a/libs/libvmbase/src/bionic.rs
+++ b/libs/libvmbase/src/bionic.rs
@@ -20,7 +20,6 @@
use core::ffi::c_int;
use core::ffi::c_void;
use core::ffi::CStr;
-use core::ptr::addr_of_mut;
use core::slice;
use core::str;
@@ -74,7 +73,7 @@
// SAFETY: C functions which call this are only called from the main thread, not from exception
// handlers.
unsafe extern "C" fn __errno() -> *mut c_int {
- addr_of_mut!(ERRNO) as *mut _
+ (&raw mut ERRNO).cast()
}
fn set_errno(value: c_int) {
diff --git a/libs/libvmbase/src/layout.rs b/libs/libvmbase/src/layout.rs
index ad7a390..4c45eb2 100644
--- a/libs/libvmbase/src/layout.rs
+++ b/libs/libvmbase/src/layout.rs
@@ -22,7 +22,6 @@
use crate::memory::{max_stack_size, page_4kb_of, PAGE_SIZE};
use aarch64_paging::paging::VirtualAddress;
use core::ops::Range;
-use core::ptr::addr_of;
use static_assertions::const_assert_eq;
/// First address that can't be translated by a level 1 TTBR0_EL1.
@@ -44,7 +43,7 @@
#[macro_export]
macro_rules! linker_addr {
($symbol:ident) => {{
- let addr = addr_of!($crate::linker::$symbol) as usize;
+ let addr = (&raw const $crate::linker::$symbol) as usize;
VirtualAddress(addr)
}};
}
@@ -130,5 +129,5 @@
// SAFETY: __stack_chk_guard shouldn't have any mutable aliases unless the stack overflows. If
// it does, then there could be undefined behaviour all over the program, but we want to at
// least have a chance at catching it.
- unsafe { addr_of!(__stack_chk_guard).read_volatile() }
+ unsafe { (&raw const __stack_chk_guard).read_volatile() }
}