libc: implement kernel vdso syscalls for i386
This patch uses __kernel_vsyscall instead of "int 0x80"
as the syscall entry point. AT_SYSINFO points to
an adapter to mask the arch specific difference and gives a
performance boost on i386 architecture.
Change-ID: Ib340c604d02c6c25714a95793737e3cfdc3fc5d7
Signed-off-by: Mingwei Shi <mingwei.shi@intel.com>
diff --git a/libc/tools/gensyscalls.py b/libc/tools/gensyscalls.py
index 4d0afe2..f7785d6 100755
--- a/libc/tools/gensyscalls.py
+++ b/libc/tools/gensyscalls.py
@@ -166,9 +166,20 @@
x86_registers = [ "ebx", "ecx", "edx", "esi", "edi", "ebp" ]
+x86_call_prepare = """\
+
+ call __kernel_syscall
+ pushl %eax
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset eax, 0
+
+"""
+
x86_call = """\
movl $%(__NR_name)s, %%eax
- int $0x80
+ call *(%%esp)
+ addl $4, %%esp
+
cmpl $-MAX_ERRNO, %%eax
jb 1f
negl %%eax
@@ -311,7 +322,7 @@
result = syscall_stub_header % syscall
numparams = count_generic_param_registers(syscall["params"])
- stack_bias = numparams*4 + 4
+ stack_bias = numparams*4 + 8
offset = 0
mov_result = ""
first_push = True
@@ -327,6 +338,7 @@
mov_result += " mov %d(%%esp), %%%s\n" % (stack_bias+offset, register)
offset += 4
+ result += x86_call_prepare
result += mov_result
result += x86_call % syscall
@@ -352,7 +364,9 @@
result += " pushl %ecx\n"
result += " .cfi_adjust_cfa_offset 4\n"
result += " .cfi_rel_offset ecx, 0\n"
- stack_bias = 12
+ stack_bias = 16
+
+ result += x86_call_prepare
# set the call id (%ebx)
result += " mov $%d, %%ebx\n" % syscall["socketcall_id"]