Remove optimized code for bzero, which was removed from POSIX in 2008.

I'll come back for the last bcopy remnant...

Bug: http://b/26407170
Change-Id: Iabfeb95fc8a4b4b3992e3cc209ec5221040e7c26
diff --git a/libc/arch-x86/atom/string/sse2-memset-atom.S b/libc/arch-x86/atom/string/sse2-memset-atom.S
index b0963a1..e8ceee1 100644
--- a/libc/arch-x86/atom/string/sse2-memset-atom.S
+++ b/libc/arch-x86/atom/string/sse2-memset-atom.S
@@ -84,16 +84,10 @@
 #define PUSH(REG)	pushl REG; CFI_PUSH (REG)
 #define POP(REG)	popl REG; CFI_POP (REG)
 
-#ifdef USE_AS_BZERO
-# define DEST		PARMS
-# define LEN		DEST+4
-# define SETRTNVAL
-#else
-# define DEST		PARMS
-# define CHR		DEST+4
-# define LEN		CHR+4
-# define SETRTNVAL	movl DEST(%esp), %eax
-#endif
+#define DEST		PARMS
+#define CHR		DEST+4
+#define LEN		CHR+4
+#define SETRTNVAL	movl DEST(%esp), %eax
 
 #if (defined SHARED || defined __PIC__)
 # define ENTRANCE	PUSH (%ebx);
@@ -148,16 +142,12 @@
 	ENTRANCE
 
 	movl	LEN(%esp), %ecx
-#ifdef USE_AS_BZERO
-	xor	%eax, %eax
-#else
 	movzbl	CHR(%esp), %eax
 	movb	%al, %ah
 	/* Fill the whole EAX with pattern.  */
 	movl	%eax, %edx
 	shl	$16, %eax
 	or	%edx, %eax
-#endif
 	movl	DEST(%esp), %edx
 	cmp	$32, %ecx
 	jae	L(32bytesormore)
@@ -287,12 +277,8 @@
 /* ECX > 32 and EDX is 4 byte aligned.  */
 L(32bytesormore):
 	/* Fill xmm0 with the pattern.  */
-#ifdef USE_AS_BZERO
-	pxor	%xmm0, %xmm0
-#else
 	movd	%eax, %xmm0
 	pshufd	$0, %xmm0, %xmm0
-#endif
 	testl	$0xf, %edx
 	jz	L(aligned_16)
 /* ECX > 32 and EDX is not 16 byte aligned.  */