Merge "Fix unwinding through x86-64 __bionic_clone."
diff --git a/libc/Android.mk b/libc/Android.mk
index 77c77c8..ee8981a 100644
--- a/libc/Android.mk
+++ b/libc/Android.mk
@@ -481,7 +481,6 @@
upstream-openbsd/lib/libc/string/strtok.c \
upstream-openbsd/lib/libc/string/wcslcpy.c \
upstream-openbsd/lib/libc/string/wcsstr.c \
- upstream-openbsd/lib/libc/string/wcswcs.c \
upstream-openbsd/lib/libc/string/wcswidth.c \
libc_arch_static_src_files := \
diff --git a/libc/arch-arm64/arm64.mk b/libc/arch-arm64/arm64.mk
index 62974b6..223bc74 100644
--- a/libc/arch-arm64/arm64.mk
+++ b/libc/arch-arm64/arm64.mk
@@ -1,7 +1,6 @@
# arm64 specific configs
libc_common_src_files_arm64 := \
- bionic/index.cpp \
bionic/memchr.c \
bionic/__memcmp16.cpp \
bionic/memrchr.c \
diff --git a/libc/arch-mips64/mips64.mk b/libc/arch-mips64/mips64.mk
index 9a24c61..2b18042 100644
--- a/libc/arch-mips64/mips64.mk
+++ b/libc/arch-mips64/mips64.mk
@@ -1,7 +1,6 @@
# mips64 specific configs
libc_common_src_files_mips64 := \
- bionic/index.cpp \
bionic/memchr.c \
bionic/memcmp.c \
bionic/memmove.c \
diff --git a/libc/arch-x86/silvermont/string/sse2-memmove-slm.S b/libc/arch-x86/silvermont/string/sse2-memmove-slm.S
index 79a0a36..b971f0b 100644
--- a/libc/arch-x86/silvermont/string/sse2-memmove-slm.S
+++ b/libc/arch-x86/silvermont/string/sse2-memmove-slm.S
@@ -74,13 +74,13 @@
#endif
#ifdef USE_AS_BCOPY
-# define SRC PARMS
-# define DEST SRC+4
-# define LEN DEST+4
+# define SRC PARMS
+# define DEST SRC+4
+# define LEN DEST+4
#else
-# define DEST PARMS
-# define SRC DEST+4
-# define LEN SRC+4
+# define DEST PARMS
+# define SRC DEST+4
+# define LEN SRC+4
#endif
#define CFI_PUSH(REG) \
@@ -109,15 +109,15 @@
/* Check whether we should copy backward or forward. */
cmp %eax, %edx
je L(mm_return)
- ja L(mm_len_0_or_more_backward)
+ jg L(mm_len_0_or_more_backward)
/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
separately. */
cmp $16, %ecx
jbe L(mm_len_0_16_bytes_forward)
- cmpl $32, %ecx
- jg L(mm_len_32_or_more_forward)
+ cmpl $32, %ecx
+ ja L(mm_len_32_or_more_forward)
/* Copy [0..32] and return. */
movdqu (%eax), %xmm0
@@ -127,8 +127,8 @@
jmp L(mm_return)
L(mm_len_32_or_more_forward):
- cmpl $64, %ecx
- jg L(mm_len_64_or_more_forward)
+ cmpl $64, %ecx
+ ja L(mm_len_64_or_more_forward)
/* Copy [0..64] and return. */
movdqu (%eax), %xmm0
@@ -142,8 +142,8 @@
jmp L(mm_return)
L(mm_len_64_or_more_forward):
- cmpl $128, %ecx
- jg L(mm_len_128_or_more_forward)
+ cmpl $128, %ecx
+ ja L(mm_len_128_or_more_forward)
/* Copy [0..128] and return. */
movdqu (%eax), %xmm0
@@ -165,72 +165,66 @@
jmp L(mm_return)
L(mm_len_128_or_more_forward):
-
- cmp $SHARED_CACHE_SIZE_HALF, %ecx
- jae L(mm_large_page_forward)
-
PUSH (%esi)
PUSH (%edi)
- movl %eax, %esi
- movl %edx, %edi
/* Aligning the address of destination. */
- movdqu (%esi), %xmm0
- movdqu 16(%esi), %xmm1
- movdqu 32(%esi), %xmm2
- movdqu 48(%esi), %xmm3
+ movdqu (%eax), %xmm0
+ movdqu 16(%eax), %xmm1
+ movdqu 32(%eax), %xmm2
+ movdqu 48(%eax), %xmm3
- leal 64(%edi), %edx
- andl $-64, %edx
+ leal 64(%edx), %edi
+ andl $-64, %edi
+ subl %edx, %eax
- movl %esi, %eax
- subl %edi, %eax
+ movdqu (%eax, %edi), %xmm4
+ movdqu 16(%eax, %edi), %xmm5
+ movdqu 32(%eax, %edi), %xmm6
+ movdqu 48(%eax, %edi), %xmm7
- movdqu (%edx, %eax), %xmm4
- movdqu 16(%edx, %eax), %xmm5
- movdqu 32(%edx, %eax), %xmm6
- movdqu 48(%edx, %eax), %xmm7
+ movdqu %xmm0, (%edx)
+ movdqu %xmm1, 16(%edx)
+ movdqu %xmm2, 32(%edx)
+ movdqu %xmm3, 48(%edx)
+ movdqa %xmm4, (%edi)
+ movaps %xmm5, 16(%edi)
+ movaps %xmm6, 32(%edi)
+ movaps %xmm7, 48(%edi)
+ addl $64, %edi
- movdqu %xmm0, (%edi)
- movdqu %xmm1, 16(%edi)
- movdqu %xmm2, 32(%edi)
- movdqu %xmm3, 48(%edi)
- movdqa %xmm4, (%edx)
- movdqa %xmm5, 16(%edx)
- movdqa %xmm6, 32(%edx)
- movdqa %xmm7, 48(%edx)
- addl $64, %edx
-
- leal (%edi, %ecx), %ebx
+ leal (%edx, %ecx), %ebx
andl $-64, %ebx
-
- cmp %edx, %ebx
+ cmp %edi, %ebx
jbe L(mm_copy_remaining_forward)
+ cmp $SHARED_CACHE_SIZE_HALF, %ecx
+ jae L(mm_large_page_loop_forward)
+
.p2align 4
L(mm_main_loop_forward):
- prefetcht0 128(%edx, %eax)
+ prefetcht0 128(%eax, %edi)
- movdqu (%edx, %eax), %xmm0
- movdqu 16(%edx, %eax), %xmm1
- movdqu 32(%edx, %eax), %xmm2
- movdqu 48(%edx, %eax), %xmm3
- movdqa %xmm0, (%edx)
- movdqa %xmm1, 16(%edx)
- movdqa %xmm2, 32(%edx)
- movdqa %xmm3, 48(%edx)
- leal 64(%edx), %edx
- cmp %edx, %ebx
+ movdqu (%eax, %edi), %xmm0
+ movdqu 16(%eax, %edi), %xmm1
+ movdqu 32(%eax, %edi), %xmm2
+ movdqu 48(%eax, %edi), %xmm3
+ movdqa %xmm0, (%edi)
+ movaps %xmm1, 16(%edi)
+ movaps %xmm2, 32(%edi)
+ movaps %xmm3, 48(%edi)
+ leal 64(%edi), %edi
+ cmp %edi, %ebx
ja L(mm_main_loop_forward)
L(mm_copy_remaining_forward):
- addl %edi, %ecx
- subl %edx, %ecx
-/* We copied all up till %edx position in the dst.
+ addl %edx, %ecx
+ subl %edi, %ecx
+/* We copied all up till %edi position in the dst.
In %ecx now is how many bytes are left to copy.
Now we need to advance %esi. */
- leal (%edx, %eax), %esi
+ leal (%edi, %eax), %esi
L(mm_remaining_0_64_bytes_forward):
cmp $32, %ecx
@@ -251,8 +245,8 @@
ja L(mm_remaining_3_4_bytes_forward)
movzbl -1(%esi,%ecx), %eax
movzbl (%esi), %ebx
- movb %al, -1(%edx,%ecx)
- movb %bl, (%edx)
+ movb %al, -1(%edi,%ecx)
+ movb %bl, (%edi)
jmp L(mm_return_pop_all)
L(mm_remaining_33_64_bytes_forward):
@@ -260,40 +254,39 @@
movdqu 16(%esi), %xmm1
movdqu -32(%esi, %ecx), %xmm2
movdqu -16(%esi, %ecx), %xmm3
- movdqu %xmm0, (%edx)
- movdqu %xmm1, 16(%edx)
- movdqu %xmm2, -32(%edx, %ecx)
- movdqu %xmm3, -16(%edx, %ecx)
+ movdqu %xmm0, (%edi)
+ movdqu %xmm1, 16(%edi)
+ movdqu %xmm2, -32(%edi, %ecx)
+ movdqu %xmm3, -16(%edi, %ecx)
jmp L(mm_return_pop_all)
L(mm_remaining_17_32_bytes_forward):
movdqu (%esi), %xmm0
movdqu -16(%esi, %ecx), %xmm1
- movdqu %xmm0, (%edx)
- movdqu %xmm1, -16(%edx, %ecx)
- jmp L(mm_return_pop_all)
-
-L(mm_remaining_3_4_bytes_forward):
- movzwl -2(%esi,%ecx), %eax
- movzwl (%esi), %ebx
- movw %ax, -2(%edx,%ecx)
- movw %bx, (%edx)
- jmp L(mm_return_pop_all)
-
-L(mm_remaining_5_8_bytes_forward):
- movl (%esi), %eax
- movl -4(%esi,%ecx), %ebx
- movl %eax, (%edx)
- movl %ebx, -4(%edx,%ecx)
+ movdqu %xmm0, (%edi)
+ movdqu %xmm1, -16(%edi, %ecx)
jmp L(mm_return_pop_all)
L(mm_remaining_9_16_bytes_forward):
movq (%esi), %xmm0
movq -8(%esi, %ecx), %xmm1
- movq %xmm0, (%edx)
- movq %xmm1, -8(%edx, %ecx)
+ movq %xmm0, (%edi)
+ movq %xmm1, -8(%edi, %ecx)
jmp L(mm_return_pop_all)
+L(mm_remaining_5_8_bytes_forward):
+ movl (%esi), %eax
+ movl -4(%esi,%ecx), %ebx
+ movl %eax, (%edi)
+ movl %ebx, -4(%edi,%ecx)
+ jmp L(mm_return_pop_all)
+
+L(mm_remaining_3_4_bytes_forward):
+ movzwl -2(%esi,%ecx), %eax
+ movzwl (%esi), %ebx
+ movw %ax, -2(%edi,%ecx)
+ movw %bx, (%edi)
+ jmp L(mm_return_pop_all)
L(mm_len_0_16_bytes_forward):
testb $24, %cl
@@ -334,15 +327,20 @@
movq %xmm1, -8(%edx, %ecx)
jmp L(mm_return)
+L(mm_recalc_len):
+/* Compute in %ecx how many bytes are left to copy after
+ the main loop stops. */
+ movl %ebx, %ecx
+ subl %edx, %ecx
/* The code for copying backwards. */
L(mm_len_0_or_more_backward):
-/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
+/* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128]
separately. */
cmp $16, %ecx
jbe L(mm_len_0_16_bytes_backward)
- cmpl $32, %ecx
+ cmpl $32, %ecx
jg L(mm_len_32_or_more_backward)
/* Copy [0..32] and return. */
@@ -353,7 +351,7 @@
jmp L(mm_return)
L(mm_len_32_or_more_backward):
- cmpl $64, %ecx
+ cmpl $64, %ecx
jg L(mm_len_64_or_more_backward)
/* Copy [0..64] and return. */
@@ -368,7 +366,7 @@
jmp L(mm_return)
L(mm_len_64_or_more_backward):
- cmpl $128, %ecx
+ cmpl $128, %ecx
jg L(mm_len_128_or_more_backward)
/* Copy [0..128] and return. */
@@ -391,10 +389,6 @@
jmp L(mm_return)
L(mm_len_128_or_more_backward):
-
- cmp $SHARED_CACHE_SIZE_HALF, %ecx
- jae L(mm_large_page_backward)
-
PUSH (%esi)
PUSH (%edi)
@@ -429,17 +423,11 @@
leal 64(%edx), %ebx
andl $-64, %ebx
-/* Compute in %ecx how many bytes are left to copy after
- the main loop stops. */
- movl %ebx, %ecx
- subl %edx, %ecx
-
cmp %edi, %ebx
- jb L(mm_main_loop_backward)
+ jae L(mm_main_loop_backward_end)
- POP (%edi)
- POP (%esi)
- jmp L(mm_len_0_or_more_backward)
+ cmp $SHARED_CACHE_SIZE_HALF, %ecx
+ jae L(mm_large_page_loop_backward)
.p2align 4
L(mm_main_loop_backward):
@@ -457,9 +445,10 @@
leal -64(%edi), %edi
cmp %edi, %ebx
jb L(mm_main_loop_backward)
+L(mm_main_loop_backward_end):
POP (%edi)
POP (%esi)
- jmp L(mm_len_0_or_more_backward)
+ jmp L(mm_recalc_len)
/* Copy [0..16] and return. */
L(mm_len_0_16_bytes_backward):
@@ -508,151 +497,30 @@
RETURN
L(mm_return_pop_all):
- movl %edi, %eax
+ movl %edx, %eax
POP (%edi)
POP (%esi)
RETURN
/* Big length copy forward part. */
-L(mm_large_page_forward):
-/* Aligning the address of destination. We need to save
- 16 bits from the source in order not to overwrite them. */
-
- PUSH (%esi)
- PUSH (%edi)
- movl %eax, %esi
- movl %edx, %edi
-
- movdqu (%esi), %xmm0
- movdqu 16(%esi), %xmm1
- movdqu 32(%esi), %xmm2
- movdqu 48(%esi), %xmm3
-
- leal 64(%edi), %edx
- andl $-64, %edx
-
- movl %esi, %eax
- subl %edi, %eax
-
- movdqu (%edx, %eax), %xmm4
- movdqu 16(%edx, %eax), %xmm5
- movdqu 32(%edx, %eax), %xmm6
- movdqu 48(%edx, %eax), %xmm7
-
- movdqu %xmm0, (%edi)
- movdqu %xmm1, 16(%edi)
- movdqu %xmm2, 32(%edi)
- movdqu %xmm3, 48(%edi)
- movntdq %xmm4, (%edx)
- movntdq %xmm5, 16(%edx)
- movntdq %xmm6, 32(%edx)
- movntdq %xmm7, 48(%edx)
- addl $64, %edx
-
- leal (%edi, %ecx), %ebx
- andl $-128, %ebx
-
- cmp %edx, %ebx
- jbe L(mm_copy_remaining_forward)
-
.p2align 4
L(mm_large_page_loop_forward):
- movdqu (%edx, %eax), %xmm0
- movdqu 16(%edx, %eax), %xmm1
- movdqu 32(%edx, %eax), %xmm2
- movdqu 48(%edx, %eax), %xmm3
- movdqu 64(%edx, %eax), %xmm4
- movdqu 80(%edx, %eax), %xmm5
- movdqu 96(%edx, %eax), %xmm6
- movdqu 112(%edx, %eax), %xmm7
- movntdq %xmm0, (%edx)
- movntdq %xmm1, 16(%edx)
- movntdq %xmm2, 32(%edx)
- movntdq %xmm3, 48(%edx)
- movntdq %xmm4, 64(%edx)
- movntdq %xmm5, 80(%edx)
- movntdq %xmm6, 96(%edx)
- movntdq %xmm7, 112(%edx)
- leal 128(%edx), %edx
- cmp %edx, %ebx
+ movdqu (%eax, %edi), %xmm0
+ movdqu 16(%eax, %edi), %xmm1
+ movdqu 32(%eax, %edi), %xmm2
+ movdqu 48(%eax, %edi), %xmm3
+ movntdq %xmm0, (%edi)
+ movntdq %xmm1, 16(%edi)
+ movntdq %xmm2, 32(%edi)
+ movntdq %xmm3, 48(%edi)
+ leal 64(%edi), %edi
+ cmp %edi, %ebx
ja L(mm_large_page_loop_forward)
sfence
-
- addl %edi, %ecx
- subl %edx, %ecx
-/* We copied all up till %edx position in the dst.
- In %ecx now is how many bytes are left to copy.
- Now we need to advance %esi. */
- leal (%edx, %eax), %esi
-
- cmp $64, %ecx
- jb L(mm_remaining_0_64_bytes_forward)
-
- movdqu (%esi), %xmm0
- movdqu 16(%esi), %xmm1
- movdqu 32(%esi), %xmm2
- movdqu 48(%esi), %xmm3
- movdqu -64(%esi, %ecx), %xmm4
- movdqu -48(%esi, %ecx), %xmm5
- movdqu -32(%esi, %ecx), %xmm6
- movdqu -16(%esi, %ecx), %xmm7
- movdqu %xmm0, (%edx)
- movdqu %xmm1, 16(%edx)
- movdqu %xmm2, 32(%edx)
- movdqu %xmm3, 48(%edx)
- movdqu %xmm4, -64(%edx, %ecx)
- movdqu %xmm5, -48(%edx, %ecx)
- movdqu %xmm6, -32(%edx, %ecx)
- movdqu %xmm7, -16(%edx, %ecx)
- jmp L(mm_return_pop_all)
-
+ jmp L(mm_copy_remaining_forward)
/* Big length copy backward part. */
-L(mm_large_page_backward):
-/* Aligning the address of destination. We need to save
- 16 bits from the source in order not to overwrite them. */
-
- PUSH (%esi)
- PUSH (%edi)
-
- movdqu -16(%eax, %ecx), %xmm0
- movdqu -32(%eax, %ecx), %xmm1
- movdqu -48(%eax, %ecx), %xmm2
- movdqu -64(%eax, %ecx), %xmm3
-
- leal (%edx, %ecx), %edi
- andl $-64, %edi
-
- movl %eax, %esi
- subl %edx, %esi
-
- movdqu -16(%edi, %esi), %xmm4
- movdqu -32(%edi, %esi), %xmm5
- movdqu -48(%edi, %esi), %xmm6
- movdqu -64(%edi, %esi), %xmm7
-
- movdqu %xmm0, -16(%edx, %ecx)
- movdqu %xmm1, -32(%edx, %ecx)
- movdqu %xmm2, -48(%edx, %ecx)
- movdqu %xmm3, -64(%edx, %ecx)
- movntdq %xmm4, -16(%edi)
- movntdq %xmm5, -32(%edi)
- movntdq %xmm6, -48(%edi)
- movntdq %xmm7, -64(%edi)
- leal -64(%edi), %edi
-
- leal 128(%edx), %ebx
- andl $-64, %ebx
-
-/* Compute in %ecx how many bytes are left to copy after
- the main loop stops. */
- movl %ebx, %ecx
- subl %edx, %ecx
-
- cmp %edi, %ebx
- jae L(mm_len_0_or_more_backward)
-
.p2align 4
L(mm_large_page_loop_backward):
movdqu -64(%edi, %esi), %xmm0
@@ -666,8 +534,9 @@
leal -64(%edi), %edi
cmp %edi, %ebx
jb L(mm_large_page_loop_backward)
+ sfence
POP (%edi)
POP (%esi)
- jmp L(mm_len_0_or_more_backward)
+ jmp L(mm_recalc_len)
END (MEMMOVE)
diff --git a/libc/arch-x86_64/string/sse2-memmove-slm.S b/libc/arch-x86_64/string/sse2-memmove-slm.S
index ee8440e..0dbffad 100644
--- a/libc/arch-x86_64/string/sse2-memmove-slm.S
+++ b/libc/arch-x86_64/string/sse2-memmove-slm.S
@@ -99,7 +99,7 @@
/* Check whether we should copy backward or forward. */
cmp %rsi, %rdi
je L(mm_return)
- ja L(mm_len_0_or_more_backward)
+ jg L(mm_len_0_or_more_backward)
/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
separately. */
@@ -107,7 +107,7 @@
jbe L(mm_len_0_16_bytes_forward)
cmp $32, %rdx
- jg L(mm_len_32_or_more_forward)
+ ja L(mm_len_32_or_more_forward)
/* Copy [0..32] and return. */
movdqu (%rsi), %xmm0
@@ -118,7 +118,7 @@
L(mm_len_32_or_more_forward):
cmp $64, %rdx
- jg L(mm_len_64_or_more_forward)
+ ja L(mm_len_64_or_more_forward)
/* Copy [0..64] and return. */
movdqu (%rsi), %xmm0
@@ -133,7 +133,7 @@
L(mm_len_64_or_more_forward):
cmp $128, %rdx
- jg L(mm_len_128_or_more_forward)
+ ja L(mm_len_128_or_more_forward)
/* Copy [0..128] and return. */
movdqu (%rsi), %xmm0
@@ -155,13 +155,6 @@
jmp L(mm_return)
L(mm_len_128_or_more_forward):
-
- cmp $SHARED_CACHE_SIZE_HALF, %rdx
- jae L(mm_large_page_forward)
-
- mov %rsi, %r8 // copy src to r8
- mov %rdi, %r9 // copy dst to r9
-
/* Aligning the address of destination. */
/* save first unaligned 64 bytes */
movdqu (%rsi), %xmm0
@@ -169,56 +162,57 @@
movdqu 32(%rsi), %xmm2
movdqu 48(%rsi), %xmm3
- lea 64(%r9), %rdi
- and $-64, %rdi /* rdi now aligned to next 64 byte boundary */
+ lea 64(%rdi), %r8
+ and $-64, %r8 /* r8 now aligned to next 64 byte boundary */
+ sub %rdi, %rsi /* rsi = src - dst = diff */
- sub %r9, %rsi /* rsi = src - dst = diff */
+ movdqu (%r8, %rsi), %xmm4
+ movdqu 16(%r8, %rsi), %xmm5
+ movdqu 32(%r8, %rsi), %xmm6
+ movdqu 48(%r8, %rsi), %xmm7
- movdqu (%rdi, %rsi), %xmm4
- movdqu 16(%rdi, %rsi), %xmm5
- movdqu 32(%rdi, %rsi), %xmm6
- movdqu 48(%rdi, %rsi), %xmm7
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm1, 16(%rdi)
+ movdqu %xmm2, 32(%rdi)
+ movdqu %xmm3, 48(%rdi)
+ movdqa %xmm4, (%r8)
+ movaps %xmm5, 16(%r8)
+ movaps %xmm6, 32(%r8)
+ movaps %xmm7, 48(%r8)
+ add $64, %r8
- movdqu %xmm0, (%r9)
- movdqu %xmm1, 16(%r9)
- movdqu %xmm2, 32(%r9)
- movdqu %xmm3, 48(%r9)
- movdqa %xmm4, (%rdi)
- movdqa %xmm5, 16(%rdi)
- movdqa %xmm6, 32(%rdi)
- movdqa %xmm7, 48(%rdi)
- add $64, %rdi
-
- lea (%r9, %rdx), %rbx
+ lea (%rdi, %rdx), %rbx
and $-64, %rbx
-
- cmp %rdi, %rbx
+ cmp %r8, %rbx
jbe L(mm_copy_remaining_forward)
+ cmp $SHARED_CACHE_SIZE_HALF, %rdx
+ jae L(mm_large_page_loop_forward)
+
.p2align 4
L(mm_main_loop_forward):
- prefetcht0 128(%rdi, %rsi)
+ prefetcht0 128(%r8, %rsi)
- movdqu (%rdi, %rsi), %xmm0
- movdqu 16(%rdi, %rsi), %xmm1
- movdqu 32(%rdi, %rsi), %xmm2
- movdqu 48(%rdi, %rsi), %xmm3
- movdqa %xmm0, (%rdi)
- movdqa %xmm1, 16(%rdi)
- movdqa %xmm2, 32(%rdi)
- movdqa %xmm3, 48(%rdi)
- lea 64(%rdi), %rdi
- cmp %rdi, %rbx
+ movdqu (%r8, %rsi), %xmm0
+ movdqu 16(%r8, %rsi), %xmm1
+ movdqu 32(%r8, %rsi), %xmm2
+ movdqu 48(%r8, %rsi), %xmm3
+ movdqa %xmm0, (%r8)
+ movaps %xmm1, 16(%r8)
+ movaps %xmm2, 32(%r8)
+ movaps %xmm3, 48(%r8)
+ lea 64(%r8), %r8
+ cmp %r8, %rbx
ja L(mm_main_loop_forward)
L(mm_copy_remaining_forward):
- add %r9, %rdx
- sub %rdi, %rdx
+ add %rdi, %rdx
+ sub %r8, %rdx
/* We copied all up till %rdi position in the dst.
In %rdx now is how many bytes are left to copy.
Now we need to advance %r8. */
- lea (%rdi, %rsi), %r8
+ lea (%r8, %rsi), %r9
L(mm_remaining_0_64_bytes_forward):
cmp $32, %rdx
@@ -237,49 +231,49 @@
cmpb $2, %dl
.p2align 4,,1
ja L(mm_remaining_3_4_bytes_forward)
- movzbl -1(%r8,%rdx), %esi
- movzbl (%r8), %ebx
- movb %sil, -1(%rdi,%rdx)
- movb %bl, (%rdi)
+ movzbl -1(%r9,%rdx), %esi
+ movzbl (%r9), %ebx
+ movb %sil, -1(%r8,%rdx)
+ movb %bl, (%r8)
jmp L(mm_return)
L(mm_remaining_33_64_bytes_forward):
- movdqu (%r8), %xmm0
- movdqu 16(%r8), %xmm1
- movdqu -32(%r8, %rdx), %xmm2
- movdqu -16(%r8, %rdx), %xmm3
- movdqu %xmm0, (%rdi)
- movdqu %xmm1, 16(%rdi)
- movdqu %xmm2, -32(%rdi, %rdx)
- movdqu %xmm3, -16(%rdi, %rdx)
+ movdqu (%r9), %xmm0
+ movdqu 16(%r9), %xmm1
+ movdqu -32(%r9, %rdx), %xmm2
+ movdqu -16(%r9, %rdx), %xmm3
+ movdqu %xmm0, (%r8)
+ movdqu %xmm1, 16(%r8)
+ movdqu %xmm2, -32(%r8, %rdx)
+ movdqu %xmm3, -16(%r8, %rdx)
jmp L(mm_return)
L(mm_remaining_17_32_bytes_forward):
- movdqu (%r8), %xmm0
- movdqu -16(%r8, %rdx), %xmm1
- movdqu %xmm0, (%rdi)
- movdqu %xmm1, -16(%rdi, %rdx)
- jmp L(mm_return)
-
-L(mm_remaining_3_4_bytes_forward):
- movzwl -2(%r8,%rdx), %esi
- movzwl (%r8), %ebx
- movw %si, -2(%rdi,%rdx)
- movw %bx, (%rdi)
+ movdqu (%r9), %xmm0
+ movdqu -16(%r9, %rdx), %xmm1
+ movdqu %xmm0, (%r8)
+ movdqu %xmm1, -16(%r8, %rdx)
jmp L(mm_return)
L(mm_remaining_5_8_bytes_forward):
- movl (%r8), %esi
- movl -4(%r8,%rdx), %ebx
- movl %esi, (%rdi)
- movl %ebx, -4(%rdi,%rdx)
+ movl (%r9), %esi
+ movl -4(%r9,%rdx), %ebx
+ movl %esi, (%r8)
+ movl %ebx, -4(%r8,%rdx)
jmp L(mm_return)
L(mm_remaining_9_16_bytes_forward):
- mov (%r8), %rsi
- mov -8(%r8, %rdx), %rbx
- mov %rsi, (%rdi)
- mov %rbx, -8(%rdi, %rdx)
+ mov (%r9), %rsi
+ mov -8(%r9, %rdx), %rbx
+ mov %rsi, (%r8)
+ mov %rbx, -8(%r8, %rdx)
+ jmp L(mm_return)
+
+L(mm_remaining_3_4_bytes_forward):
+ movzwl -2(%r9,%rdx), %esi
+ movzwl (%r9), %ebx
+ movw %si, -2(%r8,%rdx)
+ movw %bx, (%r8)
jmp L(mm_return)
L(mm_len_0_16_bytes_forward):
@@ -321,16 +315,21 @@
mov %rsi, -8(%rdi, %rdx)
jmp L(mm_return)
+L(mm_recalc_len):
+/* Compute in %rdx how many bytes are left to copy after
+ the main loop stops. */
+ mov %rbx, %rdx
+ sub %rdi, %rdx
/* The code for copying backwards. */
L(mm_len_0_or_more_backward):
-/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
+/* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128]
separately. */
cmp $16, %rdx
jbe L(mm_len_0_16_bytes_backward)
cmp $32, %rdx
- jg L(mm_len_32_or_more_backward)
+ ja L(mm_len_32_or_more_backward)
/* Copy [0..32] and return. */
movdqu (%rsi), %xmm0
@@ -341,7 +340,7 @@
L(mm_len_32_or_more_backward):
cmp $64, %rdx
- jg L(mm_len_64_or_more_backward)
+ ja L(mm_len_64_or_more_backward)
/* Copy [0..64] and return. */
movdqu (%rsi), %xmm0
@@ -356,7 +355,7 @@
L(mm_len_64_or_more_backward):
cmp $128, %rdx
- jg L(mm_len_128_or_more_backward)
+ ja L(mm_len_128_or_more_backward)
/* Copy [0..128] and return. */
movdqu (%rsi), %xmm0
@@ -378,10 +377,6 @@
jmp L(mm_return)
L(mm_len_128_or_more_backward):
-
- cmp $SHARED_CACHE_SIZE_HALF, %rdx
- jae L(mm_large_page_backward)
-
/* Aligning the address of destination. We need to save
16 bits from the source in order not to overwrite them. */
movdqu -16(%rsi, %rdx), %xmm0
@@ -405,22 +400,19 @@
movdqu %xmm2, -48(%rdi, %rdx)
movdqu %xmm3, -64(%rdi, %rdx)
movdqa %xmm4, -16(%r9)
- movdqa %xmm5, -32(%r9)
- movdqa %xmm6, -48(%r9)
- movdqa %xmm7, -64(%r9)
+ movaps %xmm5, -32(%r9)
+ movaps %xmm6, -48(%r9)
+ movaps %xmm7, -64(%r9)
lea -64(%r9), %r9
lea 64(%rdi), %rbx
and $-64, %rbx
-/* Compute in %rdx how many bytes are left to copy after
- the main loop stops. */
- mov %rbx, %rdx
- sub %rdi, %rdx
-
cmp %r9, %rbx
- jb L(mm_main_loop_backward)
- jmp L(mm_len_0_or_more_backward)
+ jae L(mm_recalc_len)
+
+ cmp $SHARED_CACHE_SIZE_HALF, %rdx
+ jae L(mm_large_page_loop_backward)
.p2align 4
L(mm_main_loop_backward):
@@ -432,13 +424,13 @@
movdqu -32(%r9, %r8), %xmm2
movdqu -16(%r9, %r8), %xmm3
movdqa %xmm0, -64(%r9)
- movdqa %xmm1, -48(%r9)
- movdqa %xmm2, -32(%r9)
- movdqa %xmm3, -16(%r9)
+ movaps %xmm1, -48(%r9)
+ movaps %xmm2, -32(%r9)
+ movaps %xmm3, -16(%r9)
lea -64(%r9), %r9
cmp %r9, %rbx
jb L(mm_main_loop_backward)
- jmp L(mm_len_0_or_more_backward)
+ jmp L(mm_recalc_len)
/* Copy [0..16] and return. */
L(mm_len_0_16_bytes_backward):
@@ -485,138 +477,23 @@
/* Big length copy forward part. */
-L(mm_large_page_forward):
-/* Aligning the address of destination. We need to save
- 16 bits from the source in order not to overwrite them. */
-
- mov %rsi, %r8
- mov %rdi, %r9
-
- movdqu (%rsi), %xmm0
- movdqu 16(%rsi), %xmm1
- movdqu 32(%rsi), %xmm2
- movdqu 48(%rsi), %xmm3
-
- lea 64(%r9), %rdi
- and $-64, %rdi /* rdi = aligned dst */
-
- sub %r9, %rsi /* rsi = diff */
-
- movdqu (%rdi, %rsi), %xmm4
- movdqu 16(%rdi, %rsi), %xmm5
- movdqu 32(%rdi, %rsi), %xmm6
- movdqu 48(%rdi, %rsi), %xmm7
-
- movdqu %xmm0, (%r9)
- movdqu %xmm1, 16(%r9)
- movdqu %xmm2, 32(%r9)
- movdqu %xmm3, 48(%r9)
- movntdq %xmm4, (%rdi)
- movntdq %xmm5, 16(%rdi)
- movntdq %xmm6, 32(%rdi)
- movntdq %xmm7, 48(%rdi)
- add $64, %rdi
-
- lea (%r9, %rdx), %rbx
- and $-128, %rbx
-
- cmp %rdi, %rbx
- jbe L(mm_copy_remaining_forward)
-
.p2align 4
L(mm_large_page_loop_forward):
- movdqu (%rdi, %rsi), %xmm0
- movdqu 16(%rdi, %rsi), %xmm1
- movdqu 32(%rdi, %rsi), %xmm2
- movdqu 48(%rdi, %rsi), %xmm3
- movdqu 64(%rdi, %rsi), %xmm4
- movdqu 80(%rdi, %rsi), %xmm5
- movdqu 96(%rdi, %rsi), %xmm6
- movdqu 112(%rdi, %rsi), %xmm7
- movntdq %xmm0, (%rdi)
- movntdq %xmm1, 16(%rdi)
- movntdq %xmm2, 32(%rdi)
- movntdq %xmm3, 48(%rdi)
- movntdq %xmm4, 64(%rdi)
- movntdq %xmm5, 80(%rdi)
- movntdq %xmm6, 96(%rdi)
- movntdq %xmm7, 112(%rdi)
- lea 128(%rdi), %rdi
- cmp %rdi, %rbx
+ movdqu (%r8, %rsi), %xmm0
+ movdqu 16(%r8, %rsi), %xmm1
+ movdqu 32(%r8, %rsi), %xmm2
+ movdqu 48(%r8, %rsi), %xmm3
+ movntdq %xmm0, (%r8)
+ movntdq %xmm1, 16(%r8)
+ movntdq %xmm2, 32(%r8)
+ movntdq %xmm3, 48(%r8)
+ lea 64(%r8), %r8
+ cmp %r8, %rbx
ja L(mm_large_page_loop_forward)
sfence
-
- add %r9, %rdx
- sub %rdi, %rdx
-/* We copied all up till %rdi position in the dst.
- In %rdx now is how many bytes are left to copy.
- Now we need to advance %r8. */
- lea (%rdi, %rsi), %r8
-
- cmp $64, %rdx
- jb L(mm_remaining_0_64_bytes_forward)
-
- movdqu (%r8), %xmm0
- movdqu 16(%r8), %xmm1
- movdqu 32(%r8), %xmm2
- movdqu 48(%r8), %xmm3
- movdqu -64(%r8, %rdx), %xmm4
- movdqu -48(%r8, %rdx), %xmm5
- movdqu -32(%r8, %rdx), %xmm6
- movdqu -16(%r8, %rdx), %xmm7
- movdqu %xmm0, (%rdi)
- movdqu %xmm1, 16(%rdi)
- movdqu %xmm2, 32(%rdi)
- movdqu %xmm3, 48(%rdi)
- movdqu %xmm4, -64(%rdi, %rdx)
- movdqu %xmm5, -48(%rdi, %rdx)
- movdqu %xmm6, -32(%rdi, %rdx)
- movdqu %xmm7, -16(%rdi, %rdx)
- jmp L(mm_return)
-
+ jmp L(mm_copy_remaining_forward)
/* Big length copy backward part. */
-L(mm_large_page_backward):
-/* Aligning the address of destination. We need to save
- 16 bits from the source in order not to overwrite them. */
-
- movdqu -16(%rsi, %rdx), %xmm0
- movdqu -32(%rsi, %rdx), %xmm1
- movdqu -48(%rsi, %rdx), %xmm2
- movdqu -64(%rsi, %rdx), %xmm3
-
- lea (%rdi, %rdx), %r9
- and $-64, %r9
-
- mov %rsi, %r8
- sub %rdi, %r8
-
- movdqu -16(%r9, %r8), %xmm4
- movdqu -32(%r9, %r8), %xmm5
- movdqu -48(%r9, %r8), %xmm6
- movdqu -64(%r9, %r8), %xmm7
-
- movdqu %xmm0, -16(%rdi, %rdx)
- movdqu %xmm1, -32(%rdi, %rdx)
- movdqu %xmm2, -48(%rdi, %rdx)
- movdqu %xmm3, -64(%rdi, %rdx)
- movntdq %xmm4, -16(%r9)
- movntdq %xmm5, -32(%r9)
- movntdq %xmm6, -48(%r9)
- movntdq %xmm7, -64(%r9)
- lea -64(%r9), %r9
-
- lea 128(%rdi), %rbx
- and $-64, %rbx
-
-/* Compute in %rdx how many bytes are left to copy after
- the main loop stops. */
- mov %rbx, %rdx
- sub %rdi, %rdx
-
- cmp %r9, %rbx
- jae L(mm_len_0_or_more_backward)
-
.p2align 4
L(mm_large_page_loop_backward):
movdqu -64(%r9, %r8), %xmm0
@@ -630,6 +507,7 @@
lea -64(%r9), %r9
cmp %r9, %rbx
jb L(mm_large_page_loop_backward)
- jmp L(mm_len_0_or_more_backward)
+ sfence
+ jmp L(mm_recalc_len)
END (MEMMOVE)
diff --git a/libc/arch-x86_64/x86_64.mk b/libc/arch-x86_64/x86_64.mk
index 2f0cf2d..bd5e9c1 100644
--- a/libc/arch-x86_64/x86_64.mk
+++ b/libc/arch-x86_64/x86_64.mk
@@ -1,7 +1,6 @@
# x86_64 specific configs
libc_common_src_files_x86_64 := \
- bionic/index.cpp \
bionic/memchr.c \
bionic/memrchr.c \
bionic/strchr.cpp \
diff --git a/libc/bionic/dlmalloc.h b/libc/bionic/dlmalloc.h
index e065687..482fe0e 100644
--- a/libc/bionic/dlmalloc.h
+++ b/libc/bionic/dlmalloc.h
@@ -32,10 +32,15 @@
#define USE_SPIN_LOCKS 0
#define DEFAULT_MMAP_THRESHOLD (64U * 1024U)
-/* Export two symbols used by the VM. */
__BEGIN_DECLS
+
+/* Export two symbols used by the VM. */
int dlmalloc_trim(size_t) __LIBC_ABI_PUBLIC__;
void dlmalloc_inspect_all(void (*handler)(void*, void*, size_t, void*), void*) __LIBC_ABI_PUBLIC__;
+
+/* NVIDIA's libglcore.so has a reference to dlmalloc_usable_size. TODO: remove this. */
+size_t dlmalloc_usable_size(const void*) __LIBC_ABI_PUBLIC__;
+
__END_DECLS
/* Include the proper definitions. */
diff --git a/libc/bionic/libc_init_common.h b/libc/bionic/libc_init_common.h
index 59dc7df..3032f99 100644
--- a/libc/bionic/libc_init_common.h
+++ b/libc/bionic/libc_init_common.h
@@ -43,14 +43,14 @@
__noreturn void __libc_init(void* raw_args,
void (*onexit)(void),
int (*slingshot)(int, char**, char**),
- structors_array_t const * const structors);
-void __libc_fini(void* finit_array);
+ structors_array_t const* const structors);
+__LIBC_HIDDEN__ void __libc_fini(void* finit_array);
__END_DECLS
#if defined(__cplusplus)
class KernelArgumentBlock;
-void __LIBC_HIDDEN__ __libc_init_common(KernelArgumentBlock& args);
+__LIBC_HIDDEN__ void __libc_init_common(KernelArgumentBlock& args);
#endif
#endif
diff --git a/libc/bionic/malloc_debug_check.cpp b/libc/bionic/malloc_debug_check.cpp
index 2590ce7..0575595 100644
--- a/libc/bionic/malloc_debug_check.cpp
+++ b/libc/bionic/malloc_debug_check.cpp
@@ -51,10 +51,6 @@
#include "malloc_debug_common.h"
#include "private/ScopedPthreadMutexLocker.h"
-/* libc.debug.malloc.backlog */
-extern unsigned int g_malloc_debug_backlog;
-extern int g_malloc_debug_level;
-
#define MAX_BACKTRACE_DEPTH 16
#define ALLOCATION_TAG 0x1ee7d00d
#define BACKLOG_TAG 0xbabecafe
@@ -120,6 +116,12 @@
static hdr_t* backlog_head;
static pthread_mutex_t backlog_lock = PTHREAD_MUTEX_INITIALIZER;
+// This variable is set to the value of property libc.debug.malloc.backlog.
+// It determines the size of the backlog we use to detect multiple frees.
+static unsigned g_malloc_debug_backlog = 100;
+
+__LIBC_HIDDEN__ HashTable* g_hash_table;
+
static inline void init_front_guard(hdr_t* hdr) {
memset(hdr->front_guard, FRONT_GUARD, FRONT_GUARD_LEN);
}
@@ -508,11 +510,6 @@
}
static void ReportMemoryLeaks() {
- // We only track leaks at level 10.
- if (g_malloc_debug_level != 10) {
- return;
- }
-
// Use /proc/self/exe link to obtain the program name for logging
// purposes. If it's not available, we set it to "<unknown>".
char exe[PATH_MAX];
@@ -546,12 +543,23 @@
}
}
-extern "C" int malloc_debug_initialize() {
+extern "C" bool malloc_debug_initialize(HashTable* hash_table) {
+ g_hash_table = hash_table;
+
+ char debug_backlog[PROP_VALUE_MAX];
+ if (__system_property_get("libc.debug.malloc.backlog", debug_backlog)) {
+ g_malloc_debug_backlog = atoi(debug_backlog);
+ info_log("%s: setting backlog length to %d\n", getprogname(), g_malloc_debug_backlog);
+ }
+
backtrace_startup();
- return 0;
+ return true;
}
-extern "C" void malloc_debug_finalize() {
- ReportMemoryLeaks();
+extern "C" void malloc_debug_finalize(int malloc_debug_level) {
+ // We only track leaks at level 10.
+ if (malloc_debug_level == 10) {
+ ReportMemoryLeaks();
+ }
backtrace_shutdown();
}
diff --git a/libc/bionic/malloc_debug_common.cpp b/libc/bionic/malloc_debug_common.cpp
index db3f995..77ec080 100644
--- a/libc/bionic/malloc_debug_common.cpp
+++ b/libc/bionic/malloc_debug_common.cpp
@@ -48,13 +48,49 @@
#include "private/ScopedPthreadMutexLocker.h"
-/*
- * In a VM process, this is set to 1 after fork()ing out of zygote.
- */
+// In a VM process, this is set to 1 after fork()ing out of zygote.
int gMallocLeakZygoteChild = 0;
-__LIBC_HIDDEN__ pthread_mutex_t g_allocations_mutex = PTHREAD_MUTEX_INITIALIZER;
-__LIBC_HIDDEN__ HashTable g_hash_table;
+static HashTable g_hash_table;
+
+// Support for malloc debugging.
+// Table for dispatching malloc calls, initialized with default dispatchers.
+static const MallocDebug __libc_malloc_default_dispatch __attribute__((aligned(32))) = {
+ Malloc(malloc), Malloc(free), Malloc(calloc), Malloc(realloc), Malloc(memalign), Malloc(malloc_usable_size),
+};
+
+// Selector of dispatch table to use for dispatching malloc calls.
+// TODO: fix http://b/15432753 and make this static again.
+const MallocDebug* __libc_malloc_dispatch = &__libc_malloc_default_dispatch;
+
+// Handle to shared library where actual memory allocation is implemented.
+// This library is loaded and memory allocation calls are redirected there
+// when libc.debug.malloc environment variable contains value other than
+// zero:
+// 1 - For memory leak detections.
+// 5 - For filling allocated / freed memory with patterns defined by
+// CHK_SENTINEL_VALUE, and CHK_FILL_FREE macros.
+// 10 - For adding pre-, and post- allocation stubs in order to detect
+// buffer overruns.
+// Note that emulator's memory allocation instrumentation is not controlled by
+// libc.debug.malloc value, but rather by emulator, started with -memcheck
+// option. Note also, that if emulator has started with -memcheck option,
+// emulator's instrumented memory allocation will take over value saved in
+// libc.debug.malloc. In other words, if emulator has started with -memcheck
+// option, libc.debug.malloc value is ignored.
+// Actual functionality for debug levels 1-10 is implemented in
+// libc_malloc_debug_leak.so, while functionality for emulator's instrumented
+// allocations is implemented in libc_malloc_debug_qemu.so and can be run inside
+// the emulator only.
+#if !defined(LIBC_STATIC)
+static void* libc_malloc_impl_handle = NULL;
+#endif
+
+
+// The value of libc.debug.malloc.
+#if !defined(LIBC_STATIC)
+static int g_malloc_debug_level = 0;
+#endif
// =============================================================================
// output functions
@@ -123,7 +159,7 @@
}
*totalMemory = 0;
- ScopedPthreadMutexLocker locker(&g_allocations_mutex);
+ ScopedPthreadMutexLocker locker(&g_hash_table.lock);
if (g_hash_table.count == 0) {
*info = NULL;
@@ -204,17 +240,6 @@
return Malloc(posix_memalign)(memptr, alignment, size);
}
-// Support for malloc debugging.
-// Table for dispatching malloc calls, initialized with default dispatchers.
-extern const MallocDebug __libc_malloc_default_dispatch;
-const MallocDebug __libc_malloc_default_dispatch __attribute__((aligned(32))) =
-{
- Malloc(malloc), Malloc(free), Malloc(calloc), Malloc(realloc), Malloc(memalign), Malloc(malloc_usable_size),
-};
-
-/* Selector of dispatch table to use for dispatching malloc calls. */
-const MallocDebug* __libc_malloc_dispatch = &__libc_malloc_default_dispatch;
-
extern "C" void* malloc(size_t bytes) {
return __libc_malloc_dispatch->malloc(bytes);
}
@@ -248,59 +273,19 @@
#include <stdio.h>
#include "private/libc_logging.h"
-/* Table for dispatching malloc calls, depending on environment. */
-static MallocDebug g_malloc_dispatch_table __attribute__((aligned(32))) = {
- Malloc(malloc), Malloc(free), Malloc(calloc), Malloc(realloc), Malloc(memalign), Malloc(malloc_usable_size)
-};
-
-extern const char* __progname;
-
-/* Handle to shared library where actual memory allocation is implemented.
- * This library is loaded and memory allocation calls are redirected there
- * when libc.debug.malloc environment variable contains value other than
- * zero:
- * 1 - For memory leak detections.
- * 5 - For filling allocated / freed memory with patterns defined by
- * CHK_SENTINEL_VALUE, and CHK_FILL_FREE macros.
- * 10 - For adding pre-, and post- allocation stubs in order to detect
- * buffer overruns.
- * Note that emulator's memory allocation instrumentation is not controlled by
- * libc.debug.malloc value, but rather by emulator, started with -memcheck
- * option. Note also, that if emulator has started with -memcheck option,
- * emulator's instrumented memory allocation will take over value saved in
- * libc.debug.malloc. In other words, if emulator has started with -memcheck
- * option, libc.debug.malloc value is ignored.
- * Actual functionality for debug levels 1-10 is implemented in
- * libc_malloc_debug_leak.so, while functionality for emultor's instrumented
- * allocations is implemented in libc_malloc_debug_qemu.so and can be run inside
- * the emulator only.
- */
-static void* libc_malloc_impl_handle = NULL;
-
-/* This variable is set to the value of property libc.debug.malloc.backlog,
- * when the value of libc.debug.malloc = 10. It determines the size of the
- * backlog we use to detect multiple frees. If the property is not set, the
- * backlog length defaults to BACKLOG_DEFAULT_LEN.
- */
-__LIBC_HIDDEN__ unsigned int g_malloc_debug_backlog;
-#define BACKLOG_DEFAULT_LEN 100
-
-/* The value of libc.debug.malloc. */
-__LIBC_HIDDEN__ int g_malloc_debug_level;
-
template<typename FunctionType>
static void InitMallocFunction(void* malloc_impl_handler, FunctionType* func, const char* prefix, const char* suffix) {
char symbol[128];
snprintf(symbol, sizeof(symbol), "%s_%s", prefix, suffix);
*func = reinterpret_cast<FunctionType>(dlsym(malloc_impl_handler, symbol));
if (*func == NULL) {
- error_log("%s: dlsym(\"%s\") failed", __progname, symbol);
+ error_log("%s: dlsym(\"%s\") failed", getprogname(), symbol);
}
}
static void InitMalloc(void* malloc_impl_handler, MallocDebug* table, const char* prefix) {
__libc_format_log(ANDROID_LOG_INFO, "libc", "%s: using libc.debug.malloc %d (%s)\n",
- __progname, g_malloc_debug_level, prefix);
+ getprogname(), g_malloc_debug_level, prefix);
InitMallocFunction<MallocDebugMalloc>(malloc_impl_handler, &table->malloc, prefix, "malloc");
InitMallocFunction<MallocDebugFree>(malloc_impl_handler, &table->free, prefix, "free");
@@ -349,14 +334,14 @@
* then exit.
*/
if (__system_property_get("libc.debug.malloc.program", debug_program)) {
- if (!strstr(__progname, debug_program)) {
+ if (!strstr(getprogname(), debug_program)) {
return;
}
}
// mksh is way too leaky. http://b/7291287.
if (g_malloc_debug_level >= 10) {
- if (strcmp(__progname, "sh") == 0 || strcmp(__progname, "/system/bin/sh") == 0) {
+ if (strcmp(getprogname(), "sh") == 0 || strcmp(getprogname(), "/system/bin/sh") == 0) {
return;
}
}
@@ -365,35 +350,26 @@
switch (g_malloc_debug_level) {
case 1:
case 5:
- case 10: {
- char debug_backlog[PROP_VALUE_MAX];
- if (__system_property_get("libc.debug.malloc.backlog", debug_backlog)) {
- g_malloc_debug_backlog = atoi(debug_backlog);
- info_log("%s: setting backlog length to %d\n", __progname, g_malloc_debug_backlog);
- }
- if (g_malloc_debug_backlog == 0) {
- g_malloc_debug_backlog = BACKLOG_DEFAULT_LEN;
- }
+ case 10:
so_name = "libc_malloc_debug_leak.so";
break;
- }
case 20:
// Quick check: debug level 20 can only be handled in emulator.
if (!qemu_running) {
error_log("%s: Debug level %d can only be set in emulator\n",
- __progname, g_malloc_debug_level);
+ getprogname(), g_malloc_debug_level);
return;
}
// Make sure that memory checking has been enabled in emulator.
if (!memcheck_enabled) {
error_log("%s: Memory checking is not enabled in the emulator\n",
- __progname);
+ getprogname());
return;
}
so_name = "libc_malloc_debug_qemu.so";
break;
default:
- error_log("%s: Debug level %d is unknown\n", __progname, g_malloc_debug_level);
+ error_log("%s: Debug level %d is unknown\n", getprogname(), g_malloc_debug_level);
return;
}
@@ -401,7 +377,7 @@
void* malloc_impl_handle = dlopen(so_name, RTLD_LAZY);
if (malloc_impl_handle == NULL) {
error_log("%s: Missing module %s required for malloc debug level %d: %s",
- __progname, so_name, g_malloc_debug_level, dlerror());
+ getprogname(), so_name, g_malloc_debug_level, dlerror());
return;
}
@@ -410,11 +386,11 @@
"malloc_debug_initialize"));
if (malloc_debug_initialize == NULL) {
error_log("%s: Initialization routine is not found in %s\n",
- __progname, so_name);
+ getprogname(), so_name);
dlclose(malloc_impl_handle);
return;
}
- if (malloc_debug_initialize() == -1) {
+ if (malloc_debug_initialize(&g_hash_table) == -1) {
dlclose(malloc_impl_handle);
return;
}
@@ -427,7 +403,7 @@
"memcheck_initialize"));
if (memcheck_initialize == NULL) {
error_log("%s: memcheck_initialize routine is not found in %s\n",
- __progname, so_name);
+ getprogname(), so_name);
dlclose(malloc_impl_handle);
return;
}
@@ -438,44 +414,52 @@
}
}
-
// Initialize malloc dispatch table with appropriate routines.
+ static MallocDebug malloc_dispatch_table __attribute__((aligned(32))) = {
+ Malloc(malloc),
+ Malloc(free),
+ Malloc(calloc),
+ Malloc(realloc),
+ Malloc(memalign),
+ Malloc(malloc_usable_size)
+ };
+
switch (g_malloc_debug_level) {
case 1:
- InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "leak");
+ InitMalloc(malloc_impl_handle, &malloc_dispatch_table, "leak");
break;
case 5:
- InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "fill");
+ InitMalloc(malloc_impl_handle, &malloc_dispatch_table, "fill");
break;
case 10:
- InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "chk");
+ InitMalloc(malloc_impl_handle, &malloc_dispatch_table, "chk");
break;
case 20:
- InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "qemu_instrumented");
+ InitMalloc(malloc_impl_handle, &malloc_dispatch_table, "qemu_instrumented");
break;
default:
break;
}
// Make sure dispatch table is initialized
- if ((g_malloc_dispatch_table.malloc == NULL) ||
- (g_malloc_dispatch_table.free == NULL) ||
- (g_malloc_dispatch_table.calloc == NULL) ||
- (g_malloc_dispatch_table.realloc == NULL) ||
- (g_malloc_dispatch_table.memalign == NULL) ||
- (g_malloc_dispatch_table.malloc_usable_size == NULL)) {
+ if ((malloc_dispatch_table.malloc == NULL) ||
+ (malloc_dispatch_table.free == NULL) ||
+ (malloc_dispatch_table.calloc == NULL) ||
+ (malloc_dispatch_table.realloc == NULL) ||
+ (malloc_dispatch_table.memalign == NULL) ||
+ (malloc_dispatch_table.malloc_usable_size == NULL)) {
error_log("%s: some symbols for libc.debug.malloc level %d were not found (see above)",
- __progname, g_malloc_debug_level);
+ getprogname(), g_malloc_debug_level);
dlclose(malloc_impl_handle);
} else {
- __libc_malloc_dispatch = &g_malloc_dispatch_table;
+ __libc_malloc_dispatch = &malloc_dispatch_table;
libc_malloc_impl_handle = malloc_impl_handle;
}
}
static void malloc_fini_impl() {
// Our BSD stdio implementation doesn't close the standard streams, it only flushes them.
- // And it doesn't do that until its atexit handler (_cleanup) is run, and we run first!
+ // And it doesn't do that until its atexit handler is run, and we run first!
// It's great that other unclosed FILE*s show up as malloc leaks, but we need to manually
// clean up the standard streams ourselves.
fclose(stdin);
@@ -487,14 +471,11 @@
reinterpret_cast<MallocDebugFini>(dlsym(libc_malloc_impl_handle,
"malloc_debug_finalize"));
if (malloc_debug_finalize != NULL) {
- malloc_debug_finalize();
+ malloc_debug_finalize(g_malloc_debug_level);
}
}
}
-static pthread_once_t malloc_init_once_ctl = PTHREAD_ONCE_INIT;
-static pthread_once_t malloc_fini_once_ctl = PTHREAD_ONCE_INIT;
-
#endif // !LIBC_STATIC
/* Initializes memory allocation framework.
@@ -502,21 +483,19 @@
* in libc_init_static.c and libc_init_dynamic.c files.
*/
extern "C" __LIBC_HIDDEN__ void malloc_debug_init() {
- /* We need to initialize malloc iff we implement here custom
- * malloc routines (i.e. USE_DL_PREFIX is defined) for libc.so */
#if defined(USE_DL_PREFIX) && !defined(LIBC_STATIC)
- if (pthread_once(&malloc_init_once_ctl, malloc_init_impl)) {
- error_log("Unable to initialize malloc_debug component.");
- }
+ static pthread_once_t malloc_init_once_ctl = PTHREAD_ONCE_INIT;
+ if (pthread_once(&malloc_init_once_ctl, malloc_init_impl)) {
+ error_log("Unable to initialize malloc_debug component.");
+ }
#endif // USE_DL_PREFIX && !LIBC_STATIC
}
extern "C" __LIBC_HIDDEN__ void malloc_debug_fini() {
- /* We need to finalize malloc iff we implement here custom
- * malloc routines (i.e. USE_DL_PREFIX is defined) for libc.so */
#if defined(USE_DL_PREFIX) && !defined(LIBC_STATIC)
- if (pthread_once(&malloc_fini_once_ctl, malloc_fini_impl)) {
- error_log("Unable to finalize malloc_debug component.");
- }
+ static pthread_once_t malloc_fini_once_ctl = PTHREAD_ONCE_INIT;
+ if (pthread_once(&malloc_fini_once_ctl, malloc_fini_impl)) {
+ error_log("Unable to finalize malloc_debug component.");
+ }
#endif // USE_DL_PREFIX && !LIBC_STATIC
}
diff --git a/libc/bionic/malloc_debug_common.h b/libc/bionic/malloc_debug_common.h
index c1c3c89..21cb44c 100644
--- a/libc/bionic/malloc_debug_common.h
+++ b/libc/bionic/malloc_debug_common.h
@@ -33,6 +33,8 @@
#ifndef MALLOC_DEBUG_COMMON_H
#define MALLOC_DEBUG_COMMON_H
+#include <pthread.h>
+#include <stdint.h>
#include <stdlib.h>
#include "private/libc_logging.h"
@@ -43,8 +45,6 @@
#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
#define SIZE_FLAG_MASK (SIZE_FLAG_ZYGOTE_CHILD)
-#define MAX_SIZE_T (~(size_t)0)
-
// This must match the alignment used by the malloc implementation.
#ifndef MALLOC_ALIGNMENT
#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
@@ -77,6 +77,7 @@
};
struct HashTable {
+ pthread_mutex_t lock;
size_t count;
HashEntry* slots[HASHTABLE_SIZE];
};
@@ -97,20 +98,8 @@
MallocDebugMallocUsableSize malloc_usable_size;
};
-/* Malloc debugging initialization and finalization routines.
- *
- * These routines must be implemented in .so modules that implement malloc
- * debugging. The are is called once per process from malloc_init_impl and
- * malloc_fini_impl respectively.
- *
- * They are implemented in bionic/libc/bionic/malloc_debug_common.c when malloc
- * debugging gets initialized for the process.
- *
- * MallocDebugInit returns:
- * 0 on success, -1 on failure.
- */
-typedef int (*MallocDebugInit)();
-typedef void (*MallocDebugFini)();
+typedef bool (*MallocDebugInit)(HashTable*);
+typedef void (*MallocDebugFini)(int);
// =============================================================================
// log functions
diff --git a/libc/bionic/malloc_debug_leak.cpp b/libc/bionic/malloc_debug_leak.cpp
index 035765f..aa7c072 100644
--- a/libc/bionic/malloc_debug_leak.cpp
+++ b/libc/bionic/malloc_debug_leak.cpp
@@ -58,10 +58,8 @@
#error MALLOC_LEAK_CHECK is not defined.
#endif // !MALLOC_LEAK_CHECK
-// Global variables defined in malloc_debug_common.c
extern int gMallocLeakZygoteChild;
-extern pthread_mutex_t g_allocations_mutex;
-extern HashTable g_hash_table;
+extern HashTable* g_hash_table;
// =============================================================================
// stack trace functions
@@ -137,7 +135,7 @@
size |= SIZE_FLAG_ZYGOTE_CHILD;
}
- HashEntry* entry = find_entry(&g_hash_table, slot, backtrace, numEntries, size);
+ HashEntry* entry = find_entry(g_hash_table, slot, backtrace, numEntries, size);
if (entry != NULL) {
entry->allocations++;
@@ -150,58 +148,54 @@
entry->allocations = 1;
entry->slot = slot;
entry->prev = NULL;
- entry->next = g_hash_table.slots[slot];
+ entry->next = g_hash_table->slots[slot];
entry->numEntries = numEntries;
entry->size = size;
memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
- g_hash_table.slots[slot] = entry;
+ g_hash_table->slots[slot] = entry;
if (entry->next != NULL) {
entry->next->prev = entry;
}
// we just added an entry, increase the size of the hashtable
- g_hash_table.count++;
+ g_hash_table->count++;
}
return entry;
}
static int is_valid_entry(HashEntry* entry) {
- if (entry != NULL) {
- int i;
- for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
- HashEntry* e1 = g_hash_table.slots[i];
-
- while (e1 != NULL) {
- if (e1 == entry) {
- return 1;
- }
-
- e1 = e1->next;
- }
+ if (entry != NULL) {
+ for (size_t i = 0; i < HASHTABLE_SIZE; ++i) {
+ HashEntry* e1 = g_hash_table->slots[i];
+ while (e1 != NULL) {
+ if (e1 == entry) {
+ return 1;
}
+ e1 = e1->next;
+ }
}
-
- return 0;
+ }
+ return 0;
}
static void remove_entry(HashEntry* entry) {
- HashEntry* prev = entry->prev;
- HashEntry* next = entry->next;
+ HashEntry* prev = entry->prev;
+ HashEntry* next = entry->next;
- if (prev != NULL) entry->prev->next = next;
- if (next != NULL) entry->next->prev = prev;
+ if (prev != NULL) entry->prev->next = next;
+ if (next != NULL) entry->next->prev = prev;
- if (prev == NULL) {
- // we are the head of the list. set the head to be next
- g_hash_table.slots[entry->slot] = entry->next;
- }
+ if (prev == NULL) {
+ // we are the head of the list. set the head to be next
+ g_hash_table->slots[entry->slot] = entry->next;
+ }
- // we just removed and entry, decrease the size of the hashtable
- g_hash_table.count--;
+ // we just removed and entry, decrease the size of the hashtable
+ g_hash_table->count--;
}
// =============================================================================
@@ -276,7 +270,7 @@
void* base = Malloc(malloc)(size);
if (base != NULL) {
- ScopedPthreadMutexLocker locker(&g_allocations_mutex);
+ ScopedPthreadMutexLocker locker(&g_hash_table->lock);
uintptr_t backtrace[BACKTRACE_SIZE];
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
@@ -294,43 +288,45 @@
}
extern "C" void leak_free(void* mem) {
- if (mem != NULL) {
- ScopedPthreadMutexLocker locker(&g_allocations_mutex);
+ if (mem == NULL) {
+ return;
+ }
- // check the guard to make sure it is valid
- AllocationEntry* header = to_header(mem);
+ ScopedPthreadMutexLocker locker(&g_hash_table->lock);
- if (header->guard != GUARD) {
- // could be a memaligned block
- if (header->guard == MEMALIGN_GUARD) {
- // For memaligned blocks, header->entry points to the memory
- // allocated through leak_malloc.
- header = to_header(header->entry);
- }
- }
+ // check the guard to make sure it is valid
+ AllocationEntry* header = to_header(mem);
- if (header->guard == GUARD || is_valid_entry(header->entry)) {
- // decrement the allocations
- HashEntry* entry = header->entry;
- entry->allocations--;
- if (entry->allocations <= 0) {
- remove_entry(entry);
- Malloc(free)(entry);
- }
-
- // now free the memory!
- Malloc(free)(header);
- } else {
- debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
- header->guard, header->entry);
- }
+ if (header->guard != GUARD) {
+ // could be a memaligned block
+ if (header->guard == MEMALIGN_GUARD) {
+ // For memaligned blocks, header->entry points to the memory
+ // allocated through leak_malloc.
+ header = to_header(header->entry);
}
+ }
+
+ if (header->guard == GUARD || is_valid_entry(header->entry)) {
+ // decrement the allocations
+ HashEntry* entry = header->entry;
+ entry->allocations--;
+ if (entry->allocations <= 0) {
+ remove_entry(entry);
+ Malloc(free)(entry);
+ }
+
+ // now free the memory!
+ Malloc(free)(header);
+ } else {
+ debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
+ header->guard, header->entry);
+ }
}
extern "C" void* leak_calloc(size_t n_elements, size_t elem_size) {
- /* Fail on overflow - just to be safe even though this code runs only
- * within the debugging C library, not the production one */
- if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
+ // Fail on overflow - just to be safe even though this code runs only
+ // within the debugging C library, not the production one.
+ if (n_elements && SIZE_MAX / n_elements < elem_size) {
return NULL;
}
size_t size = n_elements * elem_size;
diff --git a/libc/bionic/malloc_debug_qemu.cpp b/libc/bionic/malloc_debug_qemu.cpp
index ac60c3b..2dda767 100644
--- a/libc/bionic/malloc_debug_qemu.cpp
+++ b/libc/bionic/malloc_debug_qemu.cpp
@@ -589,7 +589,7 @@
* Return:
* 0 on success, or -1 on failure.
*/
-extern "C" int malloc_debug_initialize() {
+extern "C" bool malloc_debug_initialize(HashTable*) {
/* We will be using emulator's magic page to report memory allocation
* activities. In essence, what magic page does, it translates writes to
* the memory mapped spaces into writes to an I/O port that emulator
@@ -598,7 +598,7 @@
int fd = open("/dev/qemu_trace", O_RDWR);
if (fd < 0) {
error_log("Unable to open /dev/qemu_trace");
- return -1;
+ return false;
} else {
qtrace = mmap(NULL, PAGESIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
@@ -606,14 +606,13 @@
if (qtrace == MAP_FAILED) {
qtrace = NULL;
error_log("Unable to mmap /dev/qemu_trace");
- return -1;
+ return false;
}
}
/* Cache pid of the process this library has been initialized for. */
malloc_pid = getpid();
-
- return 0;
+ return true;
}
/* Completes malloc debugging instrumentation for the emulator.
@@ -759,9 +758,9 @@
return qemu_instrumented_malloc(0);
}
- /* Fail on overflow - just to be safe even though this code runs only
- * within the debugging C library, not the production one */
- if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
+ // Fail on overflow - just to be safe even though this code runs only
+ // within the debugging C library, not the production one.
+ if (n_elements && SIZE_MAX / n_elements < elem_size) {
return NULL;
}
diff --git a/libc/bionic/ndk_cruft.cpp b/libc/bionic/ndk_cruft.cpp
index 1284b9a..cb9c9c9 100644
--- a/libc/bionic/ndk_cruft.cpp
+++ b/libc/bionic/ndk_cruft.cpp
@@ -32,6 +32,7 @@
#include <ctype.h>
#include <inttypes.h>
#include <pthread.h>
+#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/resource.h>
@@ -40,6 +41,7 @@
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
+#include <wchar.h>
// These were accidentally declared in <unistd.h> because we stupidly used to inline
// getpagesize() and __getpageshift(). Needed for backwards compatibility with old NDK apps.
@@ -221,4 +223,19 @@
return syscall(__NR_tkill, tid, sig);
}
+extern "C" wchar_t* wcswcs(wchar_t* haystack, wchar_t* needle) {
+ return wcsstr(haystack, needle);
+}
+
+// This was removed from POSIX 2008.
+extern "C" sighandler_t bsd_signal(int signum, sighandler_t handler) {
+ return signal(signum, handler);
+}
+
+// sysv_signal() was never in POSIX.
+extern sighandler_t _signal(int signum, sighandler_t handler, int flags);
+extern "C" sighandler_t sysv_signal(int signum, sighandler_t handler) {
+ return _signal(signum, handler, SA_RESETHAND);
+}
+
#endif
diff --git a/libc/bionic/signal.cpp b/libc/bionic/signal.cpp
index 48b2e72..66d75bd 100644
--- a/libc/bionic/signal.cpp
+++ b/libc/bionic/signal.cpp
@@ -28,7 +28,12 @@
#include <signal.h>
-static sighandler_t _signal(int signum, sighandler_t handler, int flags) {
+#ifdef __LP64__
+static
+#else
+__LIBC_HIDDEN__
+#endif
+sighandler_t _signal(int signum, sighandler_t handler, int flags) {
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_handler = handler;
@@ -41,14 +46,6 @@
return (sighandler_t) sa.sa_handler;
}
-sighandler_t bsd_signal(int signum, sighandler_t handler) {
- return _signal(signum, handler, SA_RESTART);
-}
-
-sighandler_t sysv_signal(int signum, sighandler_t handler) {
- return _signal(signum, handler, SA_RESETHAND);
-}
-
sighandler_t signal(int signum, sighandler_t handler) {
- return bsd_signal(signum, handler);
+ return _signal(signum, handler, SA_RESTART);
}
diff --git a/libc/include/signal.h b/libc/include/signal.h
index 45c1cda..0063b24 100644
--- a/libc/include/signal.h
+++ b/libc/include/signal.h
@@ -102,8 +102,6 @@
extern int sigaction(int, const struct sigaction*, struct sigaction*);
extern sighandler_t signal(int, sighandler_t);
-extern sighandler_t bsd_signal(int, sighandler_t);
-extern sighandler_t sysv_signal(int, sighandler_t);
extern int siginterrupt(int, int);
diff --git a/libc/include/string.h b/libc/include/string.h
index c9ae03b..7727c0e 100644
--- a/libc/include/string.h
+++ b/libc/include/string.h
@@ -43,7 +43,6 @@
extern void* memset(void *, int, size_t);
extern void* memmem(const void *, size_t, const void *, size_t) __purefunc;
-extern char* index(const char *, int) __purefunc;
extern char* strchr(const char *, int) __purefunc;
extern char* __strchr_chk(const char *, int, size_t);
diff --git a/libc/include/strings.h b/libc/include/strings.h
index 8f5fec5..c4d5f6c 100644
--- a/libc/include/strings.h
+++ b/libc/include/strings.h
@@ -47,7 +47,6 @@
#define bzero(b, len) (void)(memset((b), '\0', (len)))
int ffs(int);
-char *index(const char *, int);
int strcasecmp(const char *, const char *);
int strncasecmp(const char *, const char *, size_t);
diff --git a/libc/include/wchar.h b/libc/include/wchar.h
index af7593f..d50e9ec 100644
--- a/libc/include/wchar.h
+++ b/libc/include/wchar.h
@@ -138,7 +138,6 @@
extern long double wcstold(const wchar_t*, wchar_t**);
extern unsigned long wcstoul(const wchar_t*, wchar_t**, int);
extern unsigned long long wcstoull(const wchar_t*, wchar_t**, int);
-extern wchar_t *wcswcs(const wchar_t *, const wchar_t *);
extern int wcswidth(const wchar_t *, size_t);
extern size_t wcsxfrm(wchar_t *, const wchar_t *, size_t);
extern int wctob(wint_t);
diff --git a/libc/upstream-openbsd/lib/libc/string/wcswcs.c b/libc/upstream-openbsd/lib/libc/string/wcswcs.c
deleted file mode 100644
index bd35605..0000000
--- a/libc/upstream-openbsd/lib/libc/string/wcswcs.c
+++ /dev/null
@@ -1,5 +0,0 @@
-/* $OpenBSD: wcswcs.c,v 1.1 2005/04/13 16:35:58 espie Exp $ */
-/* $NetBSD: wcswcs.c,v 1.1 2003/03/05 20:18:17 tshiozak Exp $ */
-
-#define WCSWCS
-#include "wcsstr.c"
diff --git a/tests/string_test.cpp b/tests/string_test.cpp
index f17e575..c62f43b 100644
--- a/tests/string_test.cpp
+++ b/tests/string_test.cpp
@@ -909,6 +909,35 @@
}
}
+TEST(string, memmove_cache_size) {
+ size_t len = 600000;
+ int max_alignment = 31;
+ int alignments[] = {0, 5, 11, 29, 30};
+ char* ptr = reinterpret_cast<char*>(malloc(sizeof(char) * len));
+ char* ptr1 = reinterpret_cast<char*>(malloc(2 * sizeof(char) * len));
+ char* glob_ptr2 = reinterpret_cast<char*>(malloc(2 * sizeof(char) * len + max_alignment));
+ size_t pos = 64;
+
+ ASSERT_TRUE(ptr != NULL);
+ ASSERT_TRUE(ptr1 != NULL);
+ ASSERT_TRUE(glob_ptr2 != NULL);
+
+ for (int i = 0; i < 5; i++) {
+ char* ptr2 = glob_ptr2 + alignments[i];
+ memset(ptr1, random() & 255, 2 * len);
+ memset(ptr1, random() & 255, len);
+ memcpy(ptr2, ptr1, 2 * len);
+ memcpy(ptr, ptr1, len);
+ memcpy(ptr1 + pos, ptr, len);
+
+ ASSERT_TRUE(memmove(ptr2 + pos, ptr, len) == ptr2 + pos);
+ ASSERT_EQ(0, memcmp(ptr2, ptr1, 2 * len));
+ }
+ free(ptr);
+ free(ptr1);
+ free(glob_ptr2);
+}
+
static void verify_memmove(char* src_copy, char* dst, char* src, size_t size) {
memset(dst, 0, size);
memcpy(src, src_copy, size);
diff --git a/tests/wchar_test.cpp b/tests/wchar_test.cpp
index 5a250a2..e76026f 100644
--- a/tests/wchar_test.cpp
+++ b/tests/wchar_test.cpp
@@ -221,7 +221,7 @@
ASSERT_LT(WCHAR_MIN, WCHAR_MAX);
}
-TEST(wchar, wcsstr_wcswcs) {
+TEST(wchar, wcsstr) {
const wchar_t* haystack = L"matches hello world, not the second hello world";
const wchar_t* empty_needle = L"";
const wchar_t* good_needle = L"ll";
@@ -230,10 +230,6 @@
ASSERT_EQ(haystack, wcsstr(haystack, empty_needle));
ASSERT_EQ(&haystack[10], wcsstr(haystack, good_needle));
ASSERT_EQ(NULL, wcsstr(haystack, bad_needle));
-
- ASSERT_EQ(haystack, wcswcs(haystack, empty_needle));
- ASSERT_EQ(&haystack[10], wcswcs(haystack, good_needle));
- ASSERT_EQ(NULL, wcswcs(haystack, bad_needle));
}
TEST(wchar, mbtowc) {