Use unified syntax to compile with both llvm and gcc.
All arch-arm and arch-arm64 .S files were compiled
by gcc with and without this patch. The output object files
were identical. When compiled with llvm and this patch,
the output files were also identical to gcc's output.
BUG: 18061004
Change-Id: I458914d512ddf5496e4eb3d288bf032cd526d32b
diff --git a/libc/arch-arm64/generic/bionic/memchr.S b/libc/arch-arm64/generic/bionic/memchr.S
index e5ea57d..a00dd8d 100644
--- a/libc/arch-arm64/generic/bionic/memchr.S
+++ b/libc/arch-arm64/generic/bionic/memchr.S
@@ -101,7 +101,7 @@
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */
- mov synd, vend.2d[0]
+ mov synd, vend.d[0]
/* Clear the soff*2 lower bits */
lsl tmp, soff, #1
lsr synd, synd, tmp
@@ -121,7 +121,7 @@
/* Use a fast check for the termination condition */
orr vend.16b, vhas_chr1.16b, vhas_chr2.16b
addp vend.2d, vend.2d, vend.2d
- mov synd, vend.2d[0]
+ mov synd, vend.d[0]
/* We're not out of data, loop if we haven't found the character */
cbz synd, .Lloop
@@ -131,7 +131,7 @@
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */
- mov synd, vend.2d[0]
+ mov synd, vend.d[0]
/* Only do the clear for the last possible block */
b.hi .Ltail
diff --git a/libc/arch-arm64/generic/bionic/strchr.S b/libc/arch-arm64/generic/bionic/strchr.S
index 469b83c..b54106d 100644
--- a/libc/arch-arm64/generic/bionic/strchr.S
+++ b/libc/arch-arm64/generic/bionic/strchr.S
@@ -109,7 +109,7 @@
addp vend1.16b, vend1.16b, vend2.16b // 128->64
lsr tmp1, tmp3, tmp1
- mov tmp3, vend1.2d[0]
+ mov tmp3, vend1.d[0]
bic tmp1, tmp3, tmp1 // Mask padding bits.
cbnz tmp1, .Ltail
@@ -124,7 +124,7 @@
orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b
orr vend1.16b, vend1.16b, vend2.16b
addp vend1.2d, vend1.2d, vend1.2d
- mov tmp1, vend1.2d[0]
+ mov tmp1, vend1.d[0]
cbz tmp1, .Lloop
/* Termination condition found. Now need to establish exactly why
@@ -138,7 +138,7 @@
addp vend1.16b, vend1.16b, vend2.16b // 256->128
addp vend1.16b, vend1.16b, vend2.16b // 128->64
- mov tmp1, vend1.2d[0]
+ mov tmp1, vend1.d[0]
.Ltail:
/* Count the trailing zeros, by bit reversing... */
rbit tmp1, tmp1