libc: ARM64: update memset/strlen/memcpy/memmove to newlib/cortex-strings
* Bionic benchmarks results at the bottom
* This is a squash of the following commits:
libc: ARM64: optimize memset.
This is an optimized memset for AArch64. Memset is split into 4 main
cases: small sets of up to 16 bytes, medium of 16..96 bytes which are
fully unrolled. Large memsets of more than 96 bytes align the
destination and use an unrolled loop processing 64 bytes per
iteration. Memsets of zero of more than 256 use the dc zva
instruction, and there are faster versions for the common ZVA sizes 64
or 128. STP of Q registers is used to reduce codesize without loss of
performance.
Change-Id: I0c5b5ec5ab8a1fd0f23eee8fbacada0be08e841f
libc: ARM64: improve performance in strlen
Change-Id: Ic20f93a0052a49bd76cd6795f51e8606ccfbf11c
libc: ARM64: Optimize memcpy.
This is an optimized memcpy for AArch64. Copies are split into 3 main
cases: small copies of up to 16 bytes, medium copies of 17..96 bytes
which are fully unrolled. Large copies of more than 96 bytes align
the destination and use an unrolled loop processing 64 bytes per
iteration. In order to share code with memmove, small and medium
copies read all data before writing, allowing any kind of overlap. On
a random copy test memcpy is 40.8% faster on A57 and 28.4% on A53.
Change-Id: Ibb9483e45bbc0e8ca3d5ce98a31c55dfd8a5ac28
libc: AArch64: Tune memcpy
* Further tuning for performance.
Change-Id: Id08eaab885f9743fa7575077924a947c1b88e4ff
libc: ARM64: optimize memmove for Cortex-A53
* Sadly does not work on Denver or Kryo, so can't go to generic
This is an optimized memmove for AArch64. All copies of up to 96
bytes and all backward copies are done by the new memcpy. The only
remaining case is large forward copies which are done in the same way
as the memcpy loop, but copying from the end rather than the start.
Tested on the Nextbit Robin with MSM8992 (Snapdragon 808):
Before
BM_string_memcmp/8 1000k 27 0.286 GiB/s
BM_string_memcmp/64 50M 20 3.053 GiB/s
BM_string_memcmp/512 20M 126 4.060 GiB/s
BM_string_memcmp/1024 10M 234 4.372 GiB/s
BM_string_memcmp/8Ki 1000k 1726 4.745 GiB/s
BM_string_memcmp/16Ki 500k 3711 4.415 GiB/s
BM_string_memcmp/32Ki 200k 8276 3.959 GiB/s
BM_string_memcmp/64Ki 100k 16351 4.008 GiB/s
BM_string_memcpy/8 1000k 13 0.612 GiB/s
BM_string_memcpy/64 1000k 8 7.187 GiB/s
BM_string_memcpy/512 50M 38 13.311 GiB/s
BM_string_memcpy/1024 20M 86 11.858 GiB/s
BM_string_memcpy/8Ki 5M 620 13.203 GiB/s
BM_string_memcpy/16Ki 1000k 1265 12.950 GiB/s
BM_string_memcpy/32Ki 500k 2977 11.004 GiB/s
BM_string_memcpy/64Ki 500k 8003 8.188 GiB/s
BM_string_memmove/8 1000k 11 0.684 GiB/s
BM_string_memmove/64 1000k 16 3.855 GiB/s
BM_string_memmove/512 50M 57 8.915 GiB/s
BM_string_memmove/1024 20M 117 8.720 GiB/s
BM_string_memmove/8Ki 2M 853 9.594 GiB/s
BM_string_memmove/16Ki 1000k 1731 9.462 GiB/s
BM_string_memmove/32Ki 500k 3566 9.189 GiB/s
BM_string_memmove/64Ki 500k 7708 8.501 GiB/s
BM_string_memset/8 1000k 16 0.487 GiB/s
BM_string_memset/64 1000k 16 3.995 GiB/s
BM_string_memset/512 50M 37 13.489 GiB/s
BM_string_memset/1024 50M 58 17.405 GiB/s
BM_string_memset/8Ki 5M 451 18.160 GiB/s
BM_string_memset/16Ki 2M 883 18.554 GiB/s
BM_string_memset/32Ki 1000k 2181 15.022 GiB/s
BM_string_memset/64Ki 500k 4563 14.362 GiB/s
BM_string_strlen/8 1000k 8 0.965 GiB/s
BM_string_strlen/64 1000k 16 3.855 GiB/s
BM_string_strlen/512 20M 92 5.540 GiB/s
BM_string_strlen/1024 10M 167 6.111 GiB/s
BM_string_strlen/8Ki 1000k 1237 6.620 GiB/s
BM_string_strlen/16Ki 1000k 2765 5.923 GiB/s
BM_string_strlen/32Ki 500k 6135 5.341 GiB/s
BM_string_strlen/64Ki 200k 13168 4.977 GiB/s
After
BM_string_memcmp/8 1000k 21 0.369 GiB/s
BM_string_memcmp/64 1000k 28 2.272 GiB/s
BM_string_memcmp/512 20M 128 3.983 GiB/s
BM_string_memcmp/1024 10M 234 4.375 GiB/s
BM_string_memcmp/8Ki 1000k 1732 4.728 GiB/s
BM_string_memcmp/16Ki 500k 3485 4.701 GiB/s
BM_string_memcmp/32Ki 500k 7031 4.660 GiB/s
BM_string_memcmp/64Ki 200k 14296 4.584 GiB/s
BM_string_memcpy/8 1000k 5 1.458 GiB/s
BM_string_memcpy/64 1000k 7 8.952 GiB/s
BM_string_memcpy/512 50M 36 13.907 GiB/s
BM_string_memcpy/1024 20M 80 12.750 GiB/s
BM_string_memcpy/8Ki 5M 572 14.307 GiB/s
BM_string_memcpy/16Ki 1000k 1165 14.053 GiB/s
BM_string_memcpy/32Ki 500k 3141 10.430 GiB/s
BM_string_memcpy/64Ki 500k 7008 9.351 GiB/s
BM_string_memmove/8 50M 7 1.074 GiB/s
BM_string_memmove/64 1000k 9 6.593 GiB/s
BM_string_memmove/512 50M 37 13.502 GiB/s
BM_string_memmove/1024 20M 80 12.656 GiB/s
BM_string_memmove/8Ki 5M 573 14.281 GiB/s
BM_string_memmove/16Ki 1000k 1168 14.018 GiB/s
BM_string_memmove/32Ki 1000k 2825 11.599 GiB/s
BM_string_memmove/64Ki 500k 6548 10.008 GiB/s
BM_string_memset/8 1000k 7 1.038 GiB/s
BM_string_memset/64 1000k 8 7.151 GiB/s
BM_string_memset/512 1000k 29 17.272 GiB/s
BM_string_memset/1024 50M 53 18.969 GiB/s
BM_string_memset/8Ki 5M 424 19.300 GiB/s
BM_string_memset/16Ki 2M 846 19.350 GiB/s
BM_string_memset/32Ki 1000k 2028 16.156 GiB/s
BM_string_memset/64Ki 500k 4514 14.517 GiB/s
BM_string_strlen/8 1000k 7 1.120 GiB/s
BM_string_strlen/64 1000k 16 3.918 GiB/s
BM_string_strlen/512 50M 64 7.894 GiB/s
BM_string_strlen/1024 20M 104 9.815 GiB/s
BM_string_strlen/8Ki 5M 664 12.337 GiB/s
BM_string_strlen/16Ki 1000k 1291 12.682 GiB/s
BM_string_strlen/32Ki 1000k 2940 11.143 GiB/s
BM_string_strlen/64Ki 500k 6440 10.175 GiB/s
Change-Id: I635bd2798a755256f748b2af19b1a56fb85a40c6
diff --git a/libc/arch-arm64/generic/bionic/memset.S b/libc/arch-arm64/generic/bionic/memset.S
index 9626c0b..3416cfb 100644
--- a/libc/arch-arm64/generic/bionic/memset.S
+++ b/libc/arch-arm64/generic/bionic/memset.S
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Linaro Limited
+/* Copyright (c) 2012-2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -22,13 +22,39 @@
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+/*
+ * Copyright (c) 2015 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
/* Assumptions:
*
- * ARMv8-a, AArch64
- * Unaligned accesses
+ * ARMv8-a, AArch64, unaligned accesses
*
*/
@@ -45,24 +71,22 @@
values rather than re-reading them each call. */
#define dstin x0
-#define val w1
+#define val x1
+#define valw w1
#define count x2
-#define dst_count x3 /* for __memset_chk */
-#define tmp1 x3
-#define tmp1w w3
-#define tmp2 x4
-#define tmp2w w4
-#define zva_len_x x5
-#define zva_len w5
-#define zva_bits_x x6
+#define dst x3
+#define dstend x4
+#define tmp1 x5
+#define tmp1w w5
+#define tmp2 x6
+#define tmp2w w6
+#define zva_len x5
+#define zva_lenw w7
-#define A_l x7
-#define A_lw w7
-#define dst x8
-#define tmp3w w9
+#define L(l) .L ## l
ENTRY(__memset_chk)
- cmp count, dst_count
+ cmp count, dst
bls memset
// Preserve for accurate backtrace.
@@ -76,171 +100,152 @@
ENTRY(memset)
- mov dst, dstin /* Preserve return value. */
- ands A_lw, val, #255
- b.eq .Lzero_mem
- orr A_lw, A_lw, A_lw, lsl #8
- orr A_lw, A_lw, A_lw, lsl #16
- orr A_l, A_l, A_l, lsl #32
-.Ltail_maybe_long:
- cmp count, #64
- b.ge .Lnot_short
-.Ltail_maybe_tiny:
- cmp count, #15
- b.le .Ltail15tiny
-.Ltail63:
- ands tmp1, count, #0x30
- b.eq .Ltail15
- add dst, dst, tmp1
- cmp tmp1w, #0x20
- b.eq 1f
- b.lt 2f
- stp A_l, A_l, [dst, #-48]
-1:
- stp A_l, A_l, [dst, #-32]
-2:
- stp A_l, A_l, [dst, #-16]
+ dup v0.16B, valw
+ add dstend, dstin, count
-.Ltail15:
- and count, count, #15
- add dst, dst, count
- stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
+ cmp count, 96
+ b.hi L(set_long)
+ cmp count, 16
+ b.hs L(set_medium)
+ mov val, v0.D[0]
+
+ /* Set 0..15 bytes. */
+ tbz count, 3, 1f
+ str val, [dstin]
+ str val, [dstend, -8]
+ ret
+ nop
+1: tbz count, 2, 2f
+ str valw, [dstin]
+ str valw, [dstend, -4]
+ ret
+2: cbz count, 3f
+ strb valw, [dstin]
+ tbz count, 1, 3f
+ strh valw, [dstend, -2]
+3: ret
+
+ /* Set 17..96 bytes. */
+L(set_medium):
+ str q0, [dstin]
+ tbnz count, 6, L(set96)
+ str q0, [dstend, -16]
+ tbz count, 5, 1f
+ str q0, [dstin, 16]
+ str q0, [dstend, -32]
+1: ret
+
+ .p2align 4
+ /* Set 64..96 bytes. Write 64 bytes from the start and
+ 32 bytes from the end. */
+L(set96):
+ str q0, [dstin, 16]
+ stp q0, q0, [dstin, 32]
+ stp q0, q0, [dstend, -32]
ret
-.Ltail15tiny:
- /* Set up to 15 bytes. Does not assume earlier memory
- being set. */
- tbz count, #3, 1f
- str A_l, [dst], #8
-1:
- tbz count, #2, 1f
- str A_lw, [dst], #4
-1:
- tbz count, #1, 1f
- strh A_lw, [dst], #2
-1:
- tbz count, #0, 1f
- strb A_lw, [dst]
-1:
+ .p2align 3
+ nop
+L(set_long):
+ and valw, valw, 255
+ bic dst, dstin, 15
+ str q0, [dstin]
+ cmp count, 256
+ ccmp valw, 0, 0, cs
+ b.eq L(try_zva)
+L(no_zva):
+ sub count, dstend, dst /* Count is 16 too large. */
+ add dst, dst, 16
+ sub count, count, 64 + 16 /* Adjust count and bias for loop. */
+1: stp q0, q0, [dst], 64
+ stp q0, q0, [dst, -32]
+L(tail64):
+ subs count, count, 64
+ b.hi 1b
+2: stp q0, q0, [dstend, -64]
+ stp q0, q0, [dstend, -32]
ret
- /* Critical loop. Start at a new cache line boundary. Assuming
- * 64 bytes per line, this ensures the entire loop is in one line. */
- .p2align 6
-.Lnot_short:
- neg tmp2, dst
- ands tmp2, tmp2, #15
- b.eq 2f
- /* Bring DST to 128-bit (16-byte) alignment. We know that there's
- * more than that to set, so we simply store 16 bytes and advance by
- * the amount required to reach alignment. */
- sub count, count, tmp2
- stp A_l, A_l, [dst]
- add dst, dst, tmp2
- /* There may be less than 63 bytes to go now. */
- cmp count, #63
- b.le .Ltail63
-2:
- sub dst, dst, #16 /* Pre-bias. */
- sub count, count, #64
-1:
- stp A_l, A_l, [dst, #16]
- stp A_l, A_l, [dst, #32]
- stp A_l, A_l, [dst, #48]
- stp A_l, A_l, [dst, #64]!
- subs count, count, #64
- b.ge 1b
- tst count, #0x3f
- add dst, dst, #16
- b.ne .Ltail63
- ret
-
- /* For zeroing memory, check to see if we can use the ZVA feature to
- * zero entire 'cache' lines. */
-.Lzero_mem:
- mov A_l, #0
- cmp count, #63
- b.le .Ltail_maybe_tiny
- neg tmp2, dst
- ands tmp2, tmp2, #15
- b.eq 1f
- sub count, count, tmp2
- stp A_l, A_l, [dst]
- add dst, dst, tmp2
- cmp count, #63
- b.le .Ltail63
-1:
- /* For zeroing small amounts of memory, it's not worth setting up
- * the line-clear code. */
- cmp count, #128
- b.lt .Lnot_short
-#ifdef MAYBE_VIRT
- /* For efficiency when virtualized, we cache the ZVA capability. */
- adrp tmp2, .Lcache_clear
- ldr zva_len, [tmp2, #:lo12:.Lcache_clear]
- tbnz zva_len, #31, .Lnot_short
- cbnz zva_len, .Lzero_by_line
+ .p2align 3
+L(try_zva):
mrs tmp1, dczid_el0
- tbz tmp1, #4, 1f
- /* ZVA not available. Remember this for next time. */
- mov zva_len, #~0
- str zva_len, [tmp2, #:lo12:.Lcache_clear]
- b .Lnot_short
-1:
- mov tmp3w, #4
- and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
- lsl zva_len, tmp3w, zva_len
- str zva_len, [tmp2, #:lo12:.Lcache_clear]
-#else
- mrs tmp1, dczid_el0
- tbnz tmp1, #4, .Lnot_short
- mov tmp3w, #4
- and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
- lsl zva_len, tmp3w, zva_len
-#endif
+ tbnz tmp1w, 4, L(no_zva)
+ and tmp1w, tmp1w, 15
+ cmp tmp1w, 4 /* ZVA size is 64 bytes. */
+ b.ne L(zva_128)
-.Lzero_by_line:
- /* Compute how far we need to go to become suitably aligned. We're
- * already at quad-word alignment. */
- cmp count, zva_len_x
- b.lt .Lnot_short /* Not enough to reach alignment. */
- sub zva_bits_x, zva_len_x, #1
- neg tmp2, dst
- ands tmp2, tmp2, zva_bits_x
- b.eq 1f /* Already aligned. */
- /* Not aligned, check that there's enough to copy after alignment. */
- sub tmp1, count, tmp2
- cmp tmp1, #64
- ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
- b.lt .Lnot_short
- /* We know that there's at least 64 bytes to zero and that it's safe
- * to overrun by 64 bytes. */
- mov count, tmp1
-2:
- stp A_l, A_l, [dst]
- stp A_l, A_l, [dst, #16]
- stp A_l, A_l, [dst, #32]
- subs tmp2, tmp2, #64
- stp A_l, A_l, [dst, #48]
- add dst, dst, #64
- b.ge 2b
- /* We've overrun a bit, so adjust dst downwards. */
- add dst, dst, tmp2
-1:
- sub count, count, zva_len_x
-3:
- dc zva, dst
- add dst, dst, zva_len_x
- subs count, count, zva_len_x
- b.ge 3b
- ands count, count, zva_bits_x
- b.ne .Ltail_maybe_long
+ /* Write the first and last 64 byte aligned block using stp rather
+ than using DC ZVA. This is faster on some cores.
+ */
+L(zva_64):
+ str q0, [dst, 16]
+ stp q0, q0, [dst, 32]
+ bic dst, dst, 63
+ stp q0, q0, [dst, 64]
+ stp q0, q0, [dst, 96]
+ sub count, dstend, dst /* Count is now 128 too large. */
+ sub count, count, 128+64+64 /* Adjust count and bias for loop. */
+ add dst, dst, 128
+ nop
+1: dc zva, dst
+ add dst, dst, 64
+ subs count, count, 64
+ b.hi 1b
+ stp q0, q0, [dst, 0]
+ stp q0, q0, [dst, 32]
+ stp q0, q0, [dstend, -64]
+ stp q0, q0, [dstend, -32]
ret
+
+ .p2align 3
+L(zva_128):
+ cmp tmp1w, 5 /* ZVA size is 128 bytes. */
+ b.ne L(zva_other)
+
+ str q0, [dst, 16]
+ stp q0, q0, [dst, 32]
+ stp q0, q0, [dst, 64]
+ stp q0, q0, [dst, 96]
+ bic dst, dst, 127
+ sub count, dstend, dst /* Count is now 128 too large. */
+ sub count, count, 128+128 /* Adjust count and bias for loop. */
+ add dst, dst, 128
+1: dc zva, dst
+ add dst, dst, 128
+ subs count, count, 128
+ b.hi 1b
+ stp q0, q0, [dstend, -128]
+ stp q0, q0, [dstend, -96]
+ stp q0, q0, [dstend, -64]
+ stp q0, q0, [dstend, -32]
+ ret
+
+L(zva_other):
+ mov tmp2w, 4
+ lsl zva_lenw, tmp2w, tmp1w
+ add tmp1, zva_len, 64 /* Max alignment bytes written. */
+ cmp count, tmp1
+ blo L(no_zva)
+
+ sub tmp2, zva_len, 1
+ add tmp1, dst, zva_len
+ add dst, dst, 16
+ subs count, tmp1, dst /* Actual alignment bytes to write. */
+ bic tmp1, tmp1, tmp2 /* Aligned dc zva start address. */
+ beq 2f
+1: stp q0, q0, [dst], 64
+ stp q0, q0, [dst, -32]
+ subs count, count, 64
+ b.hi 1b
+2: mov dst, tmp1
+ sub count, dstend, tmp1 /* Remaining bytes to write. */
+ subs count, count, zva_len
+ b.lo 4f
+3: dc zva, dst
+ add dst, dst, zva_len
+ subs count, count, zva_len
+ b.hs 3b
+4: add count, count, zva_len
+ b L(tail64)
+
END(memset)
-
-#ifdef MAYBE_VIRT
- .bss
- .p2align 2
-.Lcache_clear:
- .space 4
-#endif