Unified sysroot: kill arch-specific include dirs.

<machine/asm.h> was internal use only.

<machine/fenv.h> is quite large, but can live in <bits/...>.

<machine/regdef.h> is trivially replaced by saying $x instead of x in
our assembler.

<machine/setjmp.h> is trivially inlined into <setjmp.h>.

<sgidefs.h> is unused.

Bug: N/A
Test: builds
Change-Id: Id05dbab43a2f9537486efb8f27a5ef167b055815
diff --git a/libc/arch-mips/bionic/__bionic_clone.S b/libc/arch-mips/bionic/__bionic_clone.S
index a3cacd1..b6056f2 100644
--- a/libc/arch-mips/bionic/__bionic_clone.S
+++ b/libc/arch-mips/bionic/__bionic_clone.S
@@ -33,39 +33,39 @@
 // pid_t __bionic_clone(int flags, void* child_stack, pid_t* parent_tid, void* tls, pid_t* child_tid, int (*fn)(void*), void* arg);
 ENTRY_PRIVATE(__bionic_clone)
         .set	noreorder
-        .cpload t9
+        .cpload $t9
         .set	reorder
 
 	# set up child stack
-	subu	a1,16
-	lw	t0,20(sp)     # fn
-	lw	t1,24(sp)     # arg
-	sw	t0,0(a1)	# fn
-	sw	t1,4(a1)	# arg
+	subu	$a1,16
+	lw	$t0,20($sp)     # fn
+	lw	$t1,24($sp)     # arg
+	sw	$t0,0($a1)	# fn
+	sw	$t1,4($a1)	# arg
 
 	# remainder of arguments are correct for clone system call
-        li	v0,__NR_clone
+        li	$v0,__NR_clone
         syscall
 
-        bnez	a3,.L__error_bc
+        bnez	$a3,.L__error_bc
 
-        beqz	v0,.L__thread_start_bc
+        beqz	$v0,.L__thread_start_bc
 
-        j ra
+        j $ra
 
 .L__thread_start_bc:
         # Clear return address in child so we don't unwind further.
-        li      ra,0
+        li      $ra,0
 
-        lw	a0,0(sp)	#  fn
-        lw	a1,4(sp)	#  arg
+        lw	$a0,0($sp)	#  fn
+        lw	$a1,4($sp)	#  arg
 
 	# void __start_thread(int (*func)(void*), void *arg)
-        la	t9,__start_thread
-        j	t9
+        la	$t9,__start_thread
+        j	$t9
 
 .L__error_bc:
-	move	a0,v0
-	la	t9,__set_errno_internal
-	j	t9
+	move	$a0,$v0
+	la	$t9,__set_errno_internal
+	j	$t9
 END(__bionic_clone)
diff --git a/libc/arch-mips/bionic/_exit_with_stack_teardown.S b/libc/arch-mips/bionic/_exit_with_stack_teardown.S
index 7d47160..566b1c8 100644
--- a/libc/arch-mips/bionic/_exit_with_stack_teardown.S
+++ b/libc/arch-mips/bionic/_exit_with_stack_teardown.S
@@ -30,12 +30,12 @@
 
 // void _exit_with_stack_teardown(void* stackBase, size_t stackSize)
 ENTRY_PRIVATE(_exit_with_stack_teardown)
-	li	v0, __NR_munmap
+	li	$v0, __NR_munmap
 	syscall
 	// If munmap failed, we ignore the failure and exit anyway.
 
-	li	a0, 0
-	li	v0, __NR_exit
+	li	$a0, 0
+	li	$v0, __NR_exit
 	syscall
         // The exit syscall does not return.
 END(_exit_with_stack_teardown)
diff --git a/libc/arch-mips/bionic/setjmp.S b/libc/arch-mips/bionic/setjmp.S
index 3b4ff55..1728054 100644
--- a/libc/arch-mips/bionic/setjmp.S
+++ b/libc/arch-mips/bionic/setjmp.S
@@ -119,7 +119,6 @@
  */
 
 #include <private/bionic_asm.h>
-#include <machine/setjmp.h>
 
 /* jmpbuf is declared to users as an array of longs, which is only
  * 4-byte aligned in 32-bit builds.  The Mips jmpbuf begins with a
@@ -161,10 +160,6 @@
 #define	SC_TOTAL_BYTES	(SC_FPREGS_OFFSET + SC_FPREGS_BYTES)
 #define	SC_TOTAL_LONGS	(SC_TOTAL_BYTES/REGSZ)
 
-#if SC_TOTAL_LONGS > _JBLEN
-#error _JBLEN is too small
-#endif
-
 #define USE_CHECKSUM 1
 
 .macro m_mangle_reg_and_store reg, cookie, temp, offset
@@ -203,138 +198,138 @@
 GPOFF= FRAMESZ-2*REGSZ
 RAOFF= FRAMESZ-1*REGSZ
 
-NON_LEAF(sigsetjmp, FRAMESZ, ra)
+NON_LEAF(sigsetjmp, FRAMESZ, $ra)
 	.mask	0x80000000, RAOFF
-	PTR_SUBU sp, FRAMESZ			# allocate stack frame
+	PTR_SUBU $sp, FRAMESZ			# allocate stack frame
 	SETUP_GP64(GPOFF, sigsetjmp)
 	SAVE_GP(GPOFF)
 	.set	reorder
 
 setjmp_common:
 #ifndef __LP64__
-	li	t0, ~7
-	and	a0, t0				# round jmpbuf addr DOWN to 8-byte boundary
+	li	$t0, ~7
+	and	$a0, $t0				# round jmpbuf addr DOWN to 8-byte boundary
 #endif
-	REG_S	ra, RAOFF(sp)			# spill state
-	REG_S	a0, A0OFF(sp)
+	REG_S	$ra, RAOFF($sp)			# spill state
+	REG_S	$a0, A0OFF($sp)
 
 	# get the cookie and store it along with the signal flag.
-	move	a0, a1
+	move	$a0, $a1
 	jal	__bionic_setjmp_cookie_get
-	REG_L	a0, A0OFF(sp)
+	REG_L	$a0, A0OFF($sp)
 
-	REG_S	v0, SC_FLAG_OFFSET(a0)		# save cookie and savesigs flag
-	andi	t0, v0, 1			# extract savesigs flag
+	REG_S	$v0, SC_FLAG_OFFSET($a0)		# save cookie and savesigs flag
+	andi	$t0, $v0, 1			# extract savesigs flag
 
-	beqz	t0, 1f				# do saving of signal mask?
+	beqz	$t0, 1f				# do saving of signal mask?
 
 	# call sigprocmask(int how ignored, sigset_t* null, sigset_t* SC_MASK(a0)):
-	LA	a2, SC_MASK_OFFSET(a0)		# gets current signal mask
-	li	a0, 0				# how; ignored when new mask is null
-	li	a1, 0				# null new mask
+	LA	$a2, SC_MASK_OFFSET($a0)		# gets current signal mask
+	li	$a0, 0				# how; ignored when new mask is null
+	li	$a1, 0				# null new mask
 	jal	sigprocmask			# get current signal mask
-	REG_L	a0, A0OFF(sp)
+	REG_L	$a0, A0OFF($sp)
 1:
-	REG_L	gp, GPOFF(sp)			# restore spills
-	REG_L	ra, RAOFF(sp)
-	REG_L	t0, SC_FLAG_OFFSET(a0)		# move cookie to temp reg
+	REG_L	$gp, GPOFF($sp)			# restore spills
+	REG_L	$ra, RAOFF($sp)
+	REG_L	$t0, SC_FLAG_OFFSET($a0)		# move cookie to temp reg
 
 	# callee-saved long-sized regs:
-	PTR_ADDU v1, sp, FRAMESZ		# save orig sp
+	PTR_ADDU $v1, $sp, FRAMESZ		# save orig sp
 
 	# m_mangle_reg_and_store reg, cookie, temp, offset
-	m_mangle_reg_and_store	ra, t0, t1, SC_REGS+0*REGSZ(a0)
-	m_mangle_reg_and_store	s0, t0, t2, SC_REGS+1*REGSZ(a0)
-	m_mangle_reg_and_store	s1, t0, t3, SC_REGS+2*REGSZ(a0)
-	m_mangle_reg_and_store	s2, t0, t1, SC_REGS+3*REGSZ(a0)
-	m_mangle_reg_and_store	s3, t0, t2, SC_REGS+4*REGSZ(a0)
-	m_mangle_reg_and_store	s4, t0, t3, SC_REGS+5*REGSZ(a0)
-	m_mangle_reg_and_store	s5, t0, t1, SC_REGS+6*REGSZ(a0)
-	m_mangle_reg_and_store	s6, t0, t2, SC_REGS+7*REGSZ(a0)
-	m_mangle_reg_and_store	s7, t0, t3, SC_REGS+8*REGSZ(a0)
-	m_mangle_reg_and_store	s8, t0, t1, SC_REGS+9*REGSZ(a0)
-	m_mangle_reg_and_store	gp, t0, t2, SC_REGS+10*REGSZ(a0)
-	m_mangle_reg_and_store	v1, t0, t3, SC_REGS+11*REGSZ(a0)
+	m_mangle_reg_and_store	$ra, $t0, $t1, SC_REGS+0*REGSZ($a0)
+	m_mangle_reg_and_store	$s0, $t0, $t2, SC_REGS+1*REGSZ($a0)
+	m_mangle_reg_and_store	$s1, $t0, $t3, SC_REGS+2*REGSZ($a0)
+	m_mangle_reg_and_store	$s2, $t0, $t1, SC_REGS+3*REGSZ($a0)
+	m_mangle_reg_and_store	$s3, $t0, $t2, SC_REGS+4*REGSZ($a0)
+	m_mangle_reg_and_store	$s4, $t0, $t3, SC_REGS+5*REGSZ($a0)
+	m_mangle_reg_and_store	$s5, $t0, $t1, SC_REGS+6*REGSZ($a0)
+	m_mangle_reg_and_store	$s6, $t0, $t2, SC_REGS+7*REGSZ($a0)
+	m_mangle_reg_and_store	$s7, $t0, $t3, SC_REGS+8*REGSZ($a0)
+	m_mangle_reg_and_store	$s8, $t0, $t1, SC_REGS+9*REGSZ($a0)
+	m_mangle_reg_and_store	$gp, $t0, $t2, SC_REGS+10*REGSZ($a0)
+	m_mangle_reg_and_store	$v1, $t0, $t3, SC_REGS+11*REGSZ($a0)
 
-	cfc1	v0, $31
+	cfc1	$v0, $31
 
 #ifdef __LP64__
 	# callee-saved fp regs on mips n64 ABI are $f24..$f31
-	s.d	$f24, SC_FPREGS+0*REGSZ_FP(a0)
-	s.d	$f25, SC_FPREGS+1*REGSZ_FP(a0)
-	s.d	$f26, SC_FPREGS+2*REGSZ_FP(a0)
-	s.d	$f27, SC_FPREGS+3*REGSZ_FP(a0)
-	s.d	$f28, SC_FPREGS+4*REGSZ_FP(a0)
-	s.d	$f29, SC_FPREGS+5*REGSZ_FP(a0)
-	s.d	$f30, SC_FPREGS+6*REGSZ_FP(a0)
-	s.d	$f31, SC_FPREGS+7*REGSZ_FP(a0)
+	s.d	$f24, SC_FPREGS+0*REGSZ_FP($a0)
+	s.d	$f25, SC_FPREGS+1*REGSZ_FP($a0)
+	s.d	$f26, SC_FPREGS+2*REGSZ_FP($a0)
+	s.d	$f27, SC_FPREGS+3*REGSZ_FP($a0)
+	s.d	$f28, SC_FPREGS+4*REGSZ_FP($a0)
+	s.d	$f29, SC_FPREGS+5*REGSZ_FP($a0)
+	s.d	$f30, SC_FPREGS+6*REGSZ_FP($a0)
+	s.d	$f31, SC_FPREGS+7*REGSZ_FP($a0)
 #else
 	# callee-saved fp regs on mips o32 ABI are
 	#   the even-numbered double fp regs $f20,$f22,...$f30
-	s.d	$f20, SC_FPREGS+0*REGSZ_FP(a0)
-	s.d	$f22, SC_FPREGS+1*REGSZ_FP(a0)
-	s.d	$f24, SC_FPREGS+2*REGSZ_FP(a0)
-	s.d	$f26, SC_FPREGS+3*REGSZ_FP(a0)
-	s.d	$f28, SC_FPREGS+4*REGSZ_FP(a0)
-	s.d	$f30, SC_FPREGS+5*REGSZ_FP(a0)
+	s.d	$f20, SC_FPREGS+0*REGSZ_FP($a0)
+	s.d	$f22, SC_FPREGS+1*REGSZ_FP($a0)
+	s.d	$f24, SC_FPREGS+2*REGSZ_FP($a0)
+	s.d	$f26, SC_FPREGS+3*REGSZ_FP($a0)
+	s.d	$f28, SC_FPREGS+4*REGSZ_FP($a0)
+	s.d	$f30, SC_FPREGS+5*REGSZ_FP($a0)
 #endif
-	sw	v0, SC_FPSR_OFFSET(a0)
+	sw	$v0, SC_FPSR_OFFSET($a0)
 #if USE_CHECKSUM
-	m_calculate_checksum t0, a0, t1
-	REG_S t0, SC_CKSUM_OFFSET(a0)
+	m_calculate_checksum $t0, $a0, $t1
+	REG_S $t0, SC_CKSUM_OFFSET($a0)
 #endif
-	move	v0, zero
+	move	$v0, $zero
 	RESTORE_GP64
-	PTR_ADDU sp, FRAMESZ
-	j	ra
+	PTR_ADDU $sp, FRAMESZ
+	j	$ra
 END(sigsetjmp)
 
 
 # Alternate entry points:
 
-NON_LEAF(setjmp, FRAMESZ, ra)
+NON_LEAF(setjmp, FRAMESZ, $ra)
 	.mask	0x80000000, RAOFF
-	PTR_SUBU sp, FRAMESZ
+	PTR_SUBU $sp, FRAMESZ
 	SETUP_GP64(GPOFF, setjmp)		# can't share sigsetjmp's gp code
 	SAVE_GP(GPOFF)
 	.set	reorder
 
-	li	a1, 1				# save/restore signals state
+	li	$a1, 1				# save/restore signals state
 	b	setjmp_common			# tail call
 END(setjmp)
 
 
-NON_LEAF(_setjmp, FRAMESZ, ra)
+NON_LEAF(_setjmp, FRAMESZ, $ra)
 	.mask	0x80000000, RAOFF
-	PTR_SUBU sp, FRAMESZ
+	PTR_SUBU $sp, FRAMESZ
 	SETUP_GP64(GPOFF, _setjmp)		# can't share sigsetjmp's gp code
 	SAVE_GP(GPOFF)
 	.set	reorder
 
-	li	a1, 0				# don't save/restore signals
+	li	$a1, 0				# don't save/restore signals
 	b	setjmp_common			# tail call
 END(_setjmp)
 
 
-NON_LEAF(siglongjmp, FRAMESZ, ra)
+NON_LEAF(siglongjmp, FRAMESZ, $ra)
 	.mask	0x80000000, RAOFF
-	PTR_SUBU sp, FRAMESZ
+	PTR_SUBU $sp, FRAMESZ
 	SETUP_GP64(GPOFF, siglongjmp)
 	SAVE_GP(GPOFF)
 	.set	reorder
 
 #ifndef __LP64__
-	li	t0, ~7
-	and	a0, t0				# round jmpbuf addr DOWN to 8-byte boundary
+	li	$t0, ~7
+	and	$a0, $t0				# round jmpbuf addr DOWN to 8-byte boundary
 #endif
 
-	move	s1, a1				# temp spill
-	move	s0, a0
+	move	$s1, $a1				# temp spill
+	move	$s0, $a0
 
 #if USE_CHECKSUM
-	m_calculate_checksum t0, s0, s2
-	REG_L	s2, SC_CKSUM_OFFSET(s0)
-	beq	t0, s2, 0f
+	m_calculate_checksum $t0, $s0, $s2
+	REG_L	$s2, SC_CKSUM_OFFSET($s0)
+	beq	$t0, $s2, 0f
 	nop
 	jal	__bionic_setjmp_checksum_mismatch
 	nop
@@ -342,75 +337,75 @@
 #endif
 
 	# extract savesigs flag
-	REG_L	s2, SC_FLAG_OFFSET(s0)
-	andi	t0, s2, 1
-	beqz	t0, 1f				# restore signal mask?
+	REG_L	$s2, SC_FLAG_OFFSET($s0)
+	andi	$t0, $s2, 1
+	beqz	$t0, 1f				# restore signal mask?
 
 	# call sigprocmask(int how SIG_SETMASK, sigset_t* SC_MASK(a0), sigset_t* null):
-	LA	a1, SC_MASK_OFFSET(s0)		# signals being restored
-	li	a0, 3				# mips SIG_SETMASK
-	li	a2, 0				# null
+	LA	$a1, SC_MASK_OFFSET($s0)		# signals being restored
+	li	$a0, 3				# mips SIG_SETMASK
+	li	$a2, 0				# null
 	jal	sigprocmask			# restore signal mask
 1:
-	move	t0, s2				# get cookie to temp reg
-	move	a1, s1
-	move	a0, s0
+	move	$t0, $s2				# get cookie to temp reg
+	move	$a1, $s1
+	move	$a0, $s0
 
 	# callee-saved long-sized regs:
 
 	# m_unmangle_reg_and_load reg, cookie, temp, offset
 	# don't restore gp yet, old value is needed for cookie_check call
-	m_unmangle_reg_and_load ra, t0, t1, SC_REGS+0*REGSZ(a0)
-	m_unmangle_reg_and_load s0, t0, t2, SC_REGS+1*REGSZ(a0)
-	m_unmangle_reg_and_load s1, t0, t3, SC_REGS+2*REGSZ(a0)
-	m_unmangle_reg_and_load s2, t0, t1, SC_REGS+3*REGSZ(a0)
-	m_unmangle_reg_and_load s3, t0, t2, SC_REGS+4*REGSZ(a0)
-	m_unmangle_reg_and_load s4, t0, t3, SC_REGS+5*REGSZ(a0)
-	m_unmangle_reg_and_load s5, t0, t1, SC_REGS+6*REGSZ(a0)
-	m_unmangle_reg_and_load s6, t0, t2, SC_REGS+7*REGSZ(a0)
-	m_unmangle_reg_and_load s7, t0, t3, SC_REGS+8*REGSZ(a0)
-	m_unmangle_reg_and_load s8, t0, t1, SC_REGS+9*REGSZ(a0)
-	m_unmangle_reg_and_load v1, t0, t2, SC_REGS+10*REGSZ(a0)
-	m_unmangle_reg_and_load sp, t0, t3, SC_REGS+11*REGSZ(a0)
+	m_unmangle_reg_and_load $ra, $t0, $t1, SC_REGS+0*REGSZ($a0)
+	m_unmangle_reg_and_load $s0, $t0, $t2, SC_REGS+1*REGSZ($a0)
+	m_unmangle_reg_and_load $s1, $t0, $t3, SC_REGS+2*REGSZ($a0)
+	m_unmangle_reg_and_load $s2, $t0, $t1, SC_REGS+3*REGSZ($a0)
+	m_unmangle_reg_and_load $s3, $t0, $t2, SC_REGS+4*REGSZ($a0)
+	m_unmangle_reg_and_load $s4, $t0, $t3, SC_REGS+5*REGSZ($a0)
+	m_unmangle_reg_and_load $s5, $t0, $t1, SC_REGS+6*REGSZ($a0)
+	m_unmangle_reg_and_load $s6, $t0, $t2, SC_REGS+7*REGSZ($a0)
+	m_unmangle_reg_and_load $s7, $t0, $t3, SC_REGS+8*REGSZ($a0)
+	m_unmangle_reg_and_load $s8, $t0, $t1, SC_REGS+9*REGSZ($a0)
+	m_unmangle_reg_and_load $v1, $t0, $t2, SC_REGS+10*REGSZ($a0)
+	m_unmangle_reg_and_load $sp, $t0, $t3, SC_REGS+11*REGSZ($a0)
 
-	lw	v0, SC_FPSR_OFFSET(a0)
-	ctc1	v0, $31			# restore old fr mode before fp values
+	lw	$v0, SC_FPSR_OFFSET($a0)
+	ctc1	$v0, $31			# restore old fr mode before fp values
 #ifdef __LP64__
 	# callee-saved fp regs on mips n64 ABI are $f24..$f31
-	l.d	$f24, SC_FPREGS+0*REGSZ_FP(a0)
-	l.d	$f25, SC_FPREGS+1*REGSZ_FP(a0)
-	l.d	$f26, SC_FPREGS+2*REGSZ_FP(a0)
-	l.d	$f27, SC_FPREGS+3*REGSZ_FP(a0)
-	l.d	$f28, SC_FPREGS+4*REGSZ_FP(a0)
-	l.d	$f29, SC_FPREGS+5*REGSZ_FP(a0)
-	l.d	$f30, SC_FPREGS+6*REGSZ_FP(a0)
-	l.d	$f31, SC_FPREGS+7*REGSZ_FP(a0)
+	l.d	$f24, SC_FPREGS+0*REGSZ_FP($a0)
+	l.d	$f25, SC_FPREGS+1*REGSZ_FP($a0)
+	l.d	$f26, SC_FPREGS+2*REGSZ_FP($a0)
+	l.d	$f27, SC_FPREGS+3*REGSZ_FP($a0)
+	l.d	$f28, SC_FPREGS+4*REGSZ_FP($a0)
+	l.d	$f29, SC_FPREGS+5*REGSZ_FP($a0)
+	l.d	$f30, SC_FPREGS+6*REGSZ_FP($a0)
+	l.d	$f31, SC_FPREGS+7*REGSZ_FP($a0)
 #else
 	# callee-saved fp regs on mips o32 ABI are
 	#   the even-numbered double fp regs $f20,$f22,...$f30
-	l.d	$f20, SC_FPREGS+0*REGSZ_FP(a0)
-	l.d	$f22, SC_FPREGS+1*REGSZ_FP(a0)
-	l.d	$f24, SC_FPREGS+2*REGSZ_FP(a0)
-	l.d	$f26, SC_FPREGS+3*REGSZ_FP(a0)
-	l.d	$f28, SC_FPREGS+4*REGSZ_FP(a0)
-	l.d	$f30, SC_FPREGS+5*REGSZ_FP(a0)
+	l.d	$f20, SC_FPREGS+0*REGSZ_FP($a0)
+	l.d	$f22, SC_FPREGS+1*REGSZ_FP($a0)
+	l.d	$f24, SC_FPREGS+2*REGSZ_FP($a0)
+	l.d	$f26, SC_FPREGS+3*REGSZ_FP($a0)
+	l.d	$f28, SC_FPREGS+4*REGSZ_FP($a0)
+	l.d	$f30, SC_FPREGS+5*REGSZ_FP($a0)
 #endif
 
 	# check cookie
-	PTR_SUBU sp, FRAMESZ
-	REG_S	v1, GPOFF(sp)
-	REG_S	ra, RAOFF(sp)
-	REG_S	a1, A1OFF(sp)
-	move	a0, t0
+	PTR_SUBU $sp, FRAMESZ
+	REG_S	$v1, GPOFF($sp)
+	REG_S	$ra, RAOFF($sp)
+	REG_S	$a1, A1OFF($sp)
+	move	$a0, $t0
 	jal	__bionic_setjmp_cookie_check
-	REG_L	gp, GPOFF(sp)
-	REG_L	ra, RAOFF(sp)
-	REG_L	a1, A1OFF(sp)
-	PTR_ADDU sp, FRAMESZ
+	REG_L	$gp, GPOFF($sp)
+	REG_L	$ra, RAOFF($sp)
+	REG_L	$a1, A1OFF($sp)
+	PTR_ADDU $sp, FRAMESZ
 
-	sltiu	t0, a1, 1		# never return 0!
-	xor	v0, a1, t0
-	j	ra			# return to setjmp call site
+	sltiu	$t0, $a1, 1		# never return 0!
+	xor	$v0, $a1, $t0
+	j	$ra			# return to setjmp call site
 END(siglongjmp)
 
 ALIAS_SYMBOL(longjmp, siglongjmp)
diff --git a/libc/arch-mips/bionic/syscall.S b/libc/arch-mips/bionic/syscall.S
index 5fed0ac..857bbab 100644
--- a/libc/arch-mips/bionic/syscall.S
+++ b/libc/arch-mips/bionic/syscall.S
@@ -36,26 +36,26 @@
 
 ENTRY(syscall)
     .set noreorder
-    .cpload t9
-    move    v0, a0
-    move    a0, a1
-    move    a1, a2
-    move    a2, a3
-    lw      a3, 16(sp)
-    lw      t0, 20(sp)
-    lw      t1, 24(sp)
-    subu    sp, STACKSIZE
-    sw      t0, 16(sp)
-    sw      t1, 20(sp)
+    .cpload $t9
+    move    $v0, $a0
+    move    $a0, $a1
+    move    $a1, $a2
+    move    $a2, $a3
+    lw      $a3, 16($sp)
+    lw      $t0, 20($sp)
+    lw      $t1, 24($sp)
+    subu    $sp, STACKSIZE
+    sw      $t0, 16($sp)
+    sw      $t1, 20($sp)
     syscall
-    addu    sp, STACKSIZE
-    bnez    a3, 1f
-    move    a0, v0
-    j       ra
+    addu    $sp, STACKSIZE
+    bnez    $a3, 1f
+    move    $a0, $v0
+    j       $ra
     nop
 1:
-    la      t9,__set_errno_internal
-    j       t9
+    la      $t9, __set_errno_internal
+    j       $t9
     nop
     .set reorder
 END(syscall)
diff --git a/libc/arch-mips/bionic/vfork.S b/libc/arch-mips/bionic/vfork.S
index 7ccf70b..fdd6a69 100644
--- a/libc/arch-mips/bionic/vfork.S
+++ b/libc/arch-mips/bionic/vfork.S
@@ -35,32 +35,32 @@
 
 ENTRY(vfork)
 	.set	noreorder
-	.cpload	t9
+	.cpload	$t9
 
 	// __get_tls()[TLS_SLOT_THREAD_ID]->cached_pid_ = 0
 	.set	push
 	.set	mips32r2
-	rdhwr	v0, $29			// v0 = tls; kernel trap on mips32r1
+	rdhwr	$v0, $29			// v0 = tls; kernel trap on mips32r1
 	.set	pop
-	lw	v0, REGSZ*1(v0)		// v0 = v0[TLS_SLOT_THREAD_ID ie 1]
-	sw	$0, REGSZ*2+4(v0)	// v0->cached_pid_ = 0
+	lw	$v0, REGSZ*1($v0)		// v0 = v0[TLS_SLOT_THREAD_ID ie 1]
+	sw	$0, REGSZ*2+4($v0)	// v0->cached_pid_ = 0
 
-	li	a0, (CLONE_VM | CLONE_VFORK | SIGCHLD)
-	li	a1, 0
-	li	a2, 0
-	li	a3, 0
-	subu	sp, 8
-	sw	$0, 16(sp)
-	li	v0, __NR_clone
+	li	$a0, (CLONE_VM | CLONE_VFORK | SIGCHLD)
+	li	$a1, 0
+	li	$a2, 0
+	li	$a3, 0
+	subu	$sp, 8
+	sw	$0, 16($sp)
+	li	$v0, __NR_clone
 	syscall
-	addu	sp, 8
-	bnez	a3, 1f
-	 move	a0, v0
+	addu	$sp, 8
+	bnez	$a3, 1f
+	 move	$a0, $v0
 
-	j	ra
+	j	$ra
 	 nop
 1:
-	la	t9, __set_errno_internal
-	j	t9
+	la	$t9, __set_errno_internal
+	j	$t9
 	 nop
 END(vfork)