vmbase: Support LLVM and Bionic stack guard
C code compiled with the Android toolchain has stack guard checks
enabled. So as to avoid having to recompile every target with
-fno-stack-guard, instead add the expected symbols to vmbase.
This means we need __stack_chk_guard variable containing the canary
(used by all libc implementations [1]) and a thread-local storage containing
another copy of the canary at offset 40 (Bionic only [2]).
If the instrumentation detects an error, it calls __stack_chk_fail.
Provide an implementation that simply panics.
[1] bionic/tests/stack_protector_test.cpp
[2] https://reviews.llvm.org/D18632
Bug: 237372981
Test: atest vmbase_example.integration_test
Change-Id: I00de7c75aef6a57a73667df6983a12b01fbe9e51
diff --git a/vmbase/entry.S b/vmbase/entry.S
index 490c2f3..75ab90b 100644
--- a/vmbase/entry.S
+++ b/vmbase/entry.S
@@ -73,6 +73,14 @@
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
+/* Bionic-compatible stack protector */
+.section .data.stack_protector, "aw"
+__bionic_tls:
+ .zero 40
+.global __stack_chk_guard
+__stack_chk_guard:
+ .quad 0x23d6d3f3c3b84098 /* TODO: randomize */
+
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
@@ -150,6 +158,10 @@
adr x30, vector_table_el1
msr vbar_el1, x30
+ /* Set up Bionic-compatible thread-local storage. */
+ adr_l x30, __bionic_tls
+ msr tpidr_el0, x30
+
/* Call into Rust code. */
bl rust_entry
diff --git a/vmbase/example/src/layout.rs b/vmbase/example/src/layout.rs
index 9cf1a69..463738e 100644
--- a/vmbase/example/src/layout.rs
+++ b/vmbase/example/src/layout.rs
@@ -15,6 +15,7 @@
//! Memory layout.
use aarch64_paging::paging::{MemoryRegion, VirtualAddress};
+use core::arch::asm;
use core::ops::Range;
use vmbase::println;
@@ -106,6 +107,21 @@
);
}
+/// Bionic-compatible thread-local storage entry, at the given offset from TPIDR_EL0.
+pub fn bionic_tls(off: usize) -> u64 {
+ let mut base: usize;
+ unsafe {
+ asm!("mrs {base}, tpidr_el0", base = out(reg) base);
+ let ptr = (base + off) as *const u64;
+ *ptr
+ }
+}
+
+/// Value of __stack_chk_guard.
+pub fn stack_chk_guard() -> u64 {
+ unsafe { __stack_chk_guard }
+}
+
extern "C" {
static dtb_begin: u8;
static dtb_end: u8;
@@ -120,4 +136,5 @@
static bss_end: u8;
static boot_stack_begin: u8;
static boot_stack_end: u8;
+ static __stack_chk_guard: u64;
}
diff --git a/vmbase/example/src/main.rs b/vmbase/example/src/main.rs
index 3c91f97..9b362b2 100644
--- a/vmbase/example/src/main.rs
+++ b/vmbase/example/src/main.rs
@@ -24,7 +24,8 @@
extern crate alloc;
use crate::layout::{
- dtb_range, print_addresses, rodata_range, text_range, writable_region, DEVICE_REGION,
+ bionic_tls, dtb_range, print_addresses, rodata_range, stack_chk_guard, text_range,
+ writable_region, DEVICE_REGION,
};
use aarch64_paging::{idmap::IdMap, paging::Attributes};
use alloc::{vec, vec::Vec};
@@ -55,6 +56,7 @@
print_addresses();
assert_eq!(arg0, dtb_range().start.0 as u64);
check_data();
+ check_stack_guard();
unsafe {
HEAP_ALLOCATOR.lock().init(&mut HEAP as *mut u8 as usize, HEAP.len());
@@ -94,6 +96,13 @@
check_data();
}
+fn check_stack_guard() {
+ const BIONIC_TLS_STACK_GRD_OFF: usize = 40;
+
+ info!("Testing stack guard");
+ assert_eq!(bionic_tls(BIONIC_TLS_STACK_GRD_OFF), stack_chk_guard());
+}
+
fn check_data() {
info!("INITIALISED_DATA: {:#010x}", &INITIALISED_DATA as *const u32 as usize);
unsafe {
diff --git a/vmbase/src/lib.rs b/vmbase/src/lib.rs
index 257f415..9c9417a 100644
--- a/vmbase/src/lib.rs
+++ b/vmbase/src/lib.rs
@@ -30,3 +30,8 @@
eprintln!("{}", info);
reboot()
}
+
+#[no_mangle]
+extern "C" fn __stack_chk_fail() -> ! {
+ panic!("stack guard check failed");
+}