Move entry point and exception vector to library.
Bug: 223166344
Test: Ran unprotected VM under crosvm.
Change-Id: Id0189800ff9fc896989c838e3b30d88494e99cd5
diff --git a/vmbase/Android.bp b/vmbase/Android.bp
index 972cd1b..c335bda 100644
--- a/vmbase/Android.bp
+++ b/vmbase/Android.bp
@@ -16,3 +16,22 @@
},
apex_available: ["com.android.virt"],
}
+
+cc_library_static {
+ name: "libvmbase_entry",
+ srcs: [
+ "entry.S",
+ "exceptions.S",
+ ],
+ nocrt: true,
+ no_libcrt: true,
+ system_shared_libs: [],
+ stl: "none",
+ enabled: false,
+ target: {
+ android_arm64: {
+ enabled: true,
+ },
+ },
+ apex_available: ["com.android.virt"],
+}
diff --git a/vmbase/entry.S b/vmbase/entry.S
new file mode 100644
index 0000000..f0021be
--- /dev/null
+++ b/vmbase/entry.S
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.macro adr_l, reg:req, sym:req
+ adrp \reg, \sym
+ add \reg, \reg, :lo12:\sym
+.endm
+
+.macro mov_i, reg:req, imm:req
+ movz \reg, :abs_g3:\imm
+ movk \reg, :abs_g2_nc:\imm
+ movk \reg, :abs_g1_nc:\imm
+ movk \reg, :abs_g0_nc:\imm
+.endm
+
+.set .L_MAIR_DEV_nGnRE, 0x04
+.set .L_MAIR_MEM_WBWA, 0xff
+.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
+
+/* 4 KiB granule size for TTBR0_EL1. */
+.set .L_TCR_TG0_4KB, 0x0 << 14
+/* 4 KiB granule size for TTBR1_EL1. */
+.set .L_TCR_TG1_4KB, 0x2 << 30
+/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */
+.set .L_TCR_EPD1, 0x1 << 23
+/* Translation table walks for TTBR0_EL1 are inner sharable. */
+.set .L_TCR_SH_INNER, 0x3 << 12
+/*
+ * Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
+ * cacheable.
+ */
+.set .L_TCR_RGN_OWB, 0x1 << 10
+/*
+ * Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
+ * cacheable.
+ */
+.set .L_TCR_RGN_IWB, 0x1 << 8
+/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
+.set .L_TCR_T0SZ_512, 64 - 39
+.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
+.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
+
+/* Stage 1 instruction access cacheability is unaffected. */
+.set .L_SCTLR_ELx_I, 0x1 << 12
+/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
+.set .L_SCTLR_ELx_SA, 0x1 << 3
+/* Stage 1 data access cacheability is unaffected. */
+.set .L_SCTLR_ELx_C, 0x1 << 2
+/* EL0 and EL1 stage 1 MMU enabled. */
+.set .L_SCTLR_ELx_M, 0x1 << 0
+/* Privileged Access Never is unchanged on taking an exception to EL1. */
+.set .L_SCTLR_EL1_SPAN, 0x1 << 23
+/* All writable memory regions are treated as XN. */
+.set .L_SCTLR_EL1_WXN, 0x1 << 19
+/* SETEND instruction disabled at EL0 in aarch32 mode. */
+.set .L_SCTLR_EL1_SED, 0x1 << 8
+/* Various IT instructions are disabled at EL0 in aarch32 mode. */
+.set .L_SCTLR_EL1_ITD, 0x1 << 7
+.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
+.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
+.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
+
+/**
+ * This is a generic entry point for an image. It carries out the operations
+ * required to prepare the loaded image to be run. Specifically, it zeroes the
+ * bss section using registers x25 and above, prepares the stack, enables
+ * floating point, and sets up the exception vector.
+ */
+.section .init.entry, "ax"
+.global entry
+entry:
+ /* Enable MMU and caches. */
+
+ /*
+ * Load and apply the memory management configuration.
+ */
+ adrp x1, idmap
+ mov_i x2, .Lmairval
+ mov_i x3, .Ltcrval
+ mov_i x4, .Lsctlrval
+
+ /* Copy the supported PA range into TCR_EL1.IPS. */
+ mrs x6, id_aa64mmfr0_el1
+ bfi x3, x6, #32, #4
+
+ msr ttbr0_el1, x1
+ msr mair_el1, x2
+ msr tcr_el1, x3
+
+ /*
+ * Ensure everything before this point has completed, then invalidate any potentially stale
+ * local TLB entries before they start being used.
+ */
+ isb
+ tlbi vmalle1
+ ic iallu
+ dsb nsh
+ isb
+
+ /*
+ * Configure sctlr_el1 to enable MMU and cache and don't proceed until
+ * this has completed.
+ */
+ msr sctlr_el1, x4
+ isb
+
+ /* Disable trapping floating point access in EL1. */
+ mrs x30, cpacr_el1
+ orr x30, x30, #(0x3 << 20)
+ msr cpacr_el1, x30
+ isb
+
+ /* Zero out the bss section. */
+ adr_l x29, bss_begin
+ adr_l x30, bss_end
+0: cmp x29, x30
+ b.hs 1f
+ stp xzr, xzr, [x29], #16
+ b 0b
+
+1: /* Copy the data section. */
+ adr_l x28, data_begin
+ adr_l x29, data_end
+ adr_l x30, data_lma
+2: cmp x28, x29
+ b.ge 3f
+ ldp q0, q1, [x30], #32
+ stp q0, q1, [x28], #32
+ b 2b
+
+3: /* Prepare the stack. */
+ adr_l x30, boot_stack_end
+ mov sp, x30
+
+ /* Set up exception vector. */
+ adr x30, vector_table_el1
+ msr vbar_el1, x30
+
+ /* Call into Rust code. */
+ bl main
+
+ /* Loop forever waiting for interrupts. */
+4: wfi
+ b 4b
diff --git a/vmbase/exceptions.S b/vmbase/exceptions.S
new file mode 100644
index 0000000..86ef83c
--- /dev/null
+++ b/vmbase/exceptions.S
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Saves the volatile registers onto the stack. This currently takes 14
+ * instructions, so it can be used in exception handlers with 18 instructions
+ * left.
+ *
+ * On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
+ * which can be used as the first and second arguments of a subsequent call.
+ */
+.macro save_volatile_to_stack
+ /* Reserve stack space and save registers x0-x18, x29 & x30. */
+ stp x0, x1, [sp, #-(8 * 24)]!
+ stp x2, x3, [sp, #8 * 2]
+ stp x4, x5, [sp, #8 * 4]
+ stp x6, x7, [sp, #8 * 6]
+ stp x8, x9, [sp, #8 * 8]
+ stp x10, x11, [sp, #8 * 10]
+ stp x12, x13, [sp, #8 * 12]
+ stp x14, x15, [sp, #8 * 14]
+ stp x16, x17, [sp, #8 * 16]
+ str x18, [sp, #8 * 18]
+ stp x29, x30, [sp, #8 * 20]
+
+ /*
+ * Save elr_el1 & spsr_el1. This such that we can take nested exception
+ * and still be able to unwind.
+ */
+ mrs x0, elr_el1
+ mrs x1, spsr_el1
+ stp x0, x1, [sp, #8 * 22]
+.endm
+
+/**
+ * Restores the volatile registers from the stack. This currently takes 14
+ * instructions, so it can be used in exception handlers while still leaving 18
+ * instructions left; if paired with save_volatile_to_stack, there are 4
+ * instructions to spare.
+ */
+.macro restore_volatile_from_stack
+ /* Restore registers x2-x18, x29 & x30. */
+ ldp x2, x3, [sp, #8 * 2]
+ ldp x4, x5, [sp, #8 * 4]
+ ldp x6, x7, [sp, #8 * 6]
+ ldp x8, x9, [sp, #8 * 8]
+ ldp x10, x11, [sp, #8 * 10]
+ ldp x12, x13, [sp, #8 * 12]
+ ldp x14, x15, [sp, #8 * 14]
+ ldp x16, x17, [sp, #8 * 16]
+ ldr x18, [sp, #8 * 18]
+ ldp x29, x30, [sp, #8 * 20]
+
+ /* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
+ ldp x0, x1, [sp, #8 * 22]
+ msr elr_el1, x0
+ msr spsr_el1, x1
+
+ /* Restore x0 & x1, and release stack space. */
+ ldp x0, x1, [sp], #8 * 24
+.endm
+
+/**
+ * This is a generic handler for exceptions taken at the current EL while using
+ * SP0. It behaves similarly to the SPx case by first switching to SPx, doing
+ * the work, then switching back to SP0 before returning.
+ *
+ * Switching to SPx and calling the Rust handler takes 16 instructions. To
+ * restore and return we need an additional 16 instructions, so we can implement
+ * the whole handler within the allotted 32 instructions.
+ */
+.macro current_exception_sp0 handler:req
+ msr spsel, #1
+ save_volatile_to_stack
+ bl \handler
+ restore_volatile_from_stack
+ msr spsel, #0
+ eret
+.endm
+
+/**
+ * This is a generic handler for exceptions taken at the current EL while using
+ * SPx. It saves volatile registers, calls the Rust handler, restores volatile
+ * registers, then returns.
+ *
+ * This also works for exceptions taken from EL0, if we don't care about
+ * non-volatile registers.
+ *
+ * Saving state and jumping to the Rust handler takes 15 instructions, and
+ * restoring and returning also takes 15 instructions, so we can fit the whole
+ * handler in 30 instructions, under the limit of 32.
+ */
+.macro current_exception_spx handler:req
+ save_volatile_to_stack
+ bl \handler
+ restore_volatile_from_stack
+ eret
+.endm
+
+.section .text.vector_table_el1, "ax"
+.global vector_table_el1
+.balign 0x800
+vector_table_el1:
+sync_cur_sp0:
+ current_exception_sp0 sync_exception_current
+
+.balign 0x80
+irq_cur_sp0:
+ current_exception_sp0 irq_current
+
+.balign 0x80
+fiq_cur_sp0:
+ current_exception_sp0 fiq_current
+
+.balign 0x80
+serr_cur_sp0:
+ current_exception_sp0 serr_current
+
+.balign 0x80
+sync_cur_spx:
+ current_exception_spx sync_exception_current
+
+.balign 0x80
+irq_cur_spx:
+ current_exception_spx irq_current
+
+.balign 0x80
+fiq_cur_spx:
+ current_exception_spx fiq_current
+
+.balign 0x80
+serr_cur_spx:
+ current_exception_spx serr_current
+
+.balign 0x80
+sync_lower_64:
+ current_exception_spx sync_lower
+
+.balign 0x80
+irq_lower_64:
+ current_exception_spx irq_lower
+
+.balign 0x80
+fiq_lower_64:
+ current_exception_spx fiq_lower
+
+.balign 0x80
+serr_lower_64:
+ current_exception_spx serr_lower
+
+.balign 0x80
+sync_lower_32:
+ current_exception_spx sync_lower
+
+.balign 0x80
+irq_lower_32:
+ current_exception_spx irq_lower
+
+.balign 0x80
+fiq_lower_32:
+ current_exception_spx fiq_lower
+
+.balign 0x80
+serr_lower_32:
+ current_exception_spx serr_lower