Remove unnecessary instructions from x86/x86_64 syscalls.
__set_errno returns -1 exactly so that callers don't need to bother.
The other architectures were already taking advantage of this, but
no one had ever fixed x86 and x86_64.
Change-Id: Ie131494be664f6c4a1bbf8c61bbbed58eac56122
diff --git a/libc/arch-x86/bionic/__bionic_clone.S b/libc/arch-x86/bionic/__bionic_clone.S
index 672512c..0c0feff 100644
--- a/libc/arch-x86/bionic/__bionic_clone.S
+++ b/libc/arch-x86/bionic/__bionic_clone.S
@@ -34,7 +34,6 @@
pushl %eax
call __set_errno
addl $4, %esp
- orl $-1, %eax
jmp .L_bc_return
.L_bc_child:
diff --git a/libc/arch-x86/bionic/syscall.S b/libc/arch-x86/bionic/syscall.S
index 0178f41..8e76c4e 100644
--- a/libc/arch-x86/bionic/syscall.S
+++ b/libc/arch-x86/bionic/syscall.S
@@ -40,7 +40,6 @@
pushl %eax
call __set_errno
addl $4, %esp
- orl $-1, %eax
1:
# Restore the callee save registers.
pop %ebp
diff --git a/libc/arch-x86/bionic/vfork.S b/libc/arch-x86/bionic/vfork.S
index ec6f6ca..ffa6b16 100644
--- a/libc/arch-x86/bionic/vfork.S
+++ b/libc/arch-x86/bionic/vfork.S
@@ -39,7 +39,6 @@
negl %eax
pushl %eax
call __set_errno
- orl $-1, %eax
1:
- jmp *%ecx // Jump to the stored return address.
+ jmp *%ecx // Jump to the stored return address.
END(vfork)