blob: dd94f3b7bb92ae37ad0948e06849e4def3d87d75 [file] [log] [blame]
Raghu Gandham405b8022012-07-25 18:16:42 -07001/*
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +02002 * Copyright (c) 2013
Raghu Gandham405b8022012-07-25 18:16:42 -07003 * MIPS Technologies, Inc., California.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +020030#ifdef __ANDROID__
31# include <private/bionic_asm.h>
32# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
33#elif _LIBC
34# include <sysdep.h>
35# include <regdef.h>
36# include <sys/asm.h>
37# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
38#elif _COMPILING_NEWLIB
39# include "machine/asm.h"
40# include "machine/regdef.h"
41# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
Raghu Gandham405b8022012-07-25 18:16:42 -070042#else
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +020043# include <regdef.h>
44# include <sys/asm.h>
Raghu Gandham405b8022012-07-25 18:16:42 -070045#endif
46
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +020047/* Check to see if the MIPS architecture we are compiling for supports
48 prefetching. */
Raghu Gandham405b8022012-07-25 18:16:42 -070049
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +020050#if (__mips == 4) || (__mips == 5) || (__mips == 32) || (__mips == 64)
51# ifndef DISABLE_PREFETCH
52# define USE_PREFETCH
53# endif
54#endif
55
56#if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32))
57# ifndef DISABLE_DOUBLE
58# define USE_DOUBLE
59# endif
60#endif
61
62#ifndef USE_DOUBLE
63# ifndef DISABLE_DOUBLE_ALIGN
64# define DOUBLE_ALIGN
65# endif
66#endif
67
68/* Some asm.h files do not have the L macro definition. */
69#ifndef L
70# if _MIPS_SIM == _ABIO32
71# define L(label) $L ## label
72# else
73# define L(label) .L ## label
74# endif
75#endif
76
77/* Some asm.h files do not have the PTR_ADDIU macro definition. */
78#ifndef PTR_ADDIU
79# if _MIPS_SIM == _ABIO32
80# define PTR_ADDIU addiu
81# else
82# define PTR_ADDIU daddiu
83# endif
84#endif
85
86/* New R6 instructions that may not be in asm.h. */
87#ifndef PTR_LSA
88# if _MIPS_SIM == _ABIO32
89# define PTR_LSA lsa
90# else
91# define PTR_LSA dlsa
92# endif
93#endif
94
95/* Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
96 or PREFETCH_STORE_STREAMED offers a large performance advantage
97 but PREPAREFORSTORE has some special restrictions to consider.
98
99 Prefetch with the 'prepare for store' hint does not copy a memory
100 location into the cache, it just allocates a cache line and zeros
101 it out. This means that if you do not write to the entire cache
102 line before writing it out to memory some data will get zero'ed out
103 when the cache line is written back to memory and data will be lost.
104
105 There are ifdef'ed sections of this memcpy to make sure that it does not
106 do prefetches on cache lines that are not going to be completely written.
107 This code is only needed and only used when PREFETCH_STORE_HINT is set to
108 PREFETCH_HINT_PREPAREFORSTORE. This code assumes that cache lines are
109 less than MAX_PREFETCH_SIZE bytes and if the cache line is larger it will
110 not work correctly. */
111
112#ifdef USE_PREFETCH
113# define PREFETCH_HINT_STORE 1
114# define PREFETCH_HINT_STORE_STREAMED 5
115# define PREFETCH_HINT_STORE_RETAINED 7
116# define PREFETCH_HINT_PREPAREFORSTORE 30
117
118/* If we have not picked out what hints to use at this point use the
119 standard load and store prefetch hints. */
120# ifndef PREFETCH_STORE_HINT
121# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE
122# endif
123
124/* We double everything when USE_DOUBLE is true so we do 2 prefetches to
125 get 64 bytes in that case. The assumption is that each individual
126 prefetch brings in 32 bytes. */
127# ifdef USE_DOUBLE
128# define PREFETCH_CHUNK 64
129# define PREFETCH_FOR_STORE(chunk, reg) \
130 pref PREFETCH_STORE_HINT, (chunk)*64(reg); \
131 pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg)
132# else
133# define PREFETCH_CHUNK 32
134# define PREFETCH_FOR_STORE(chunk, reg) \
135 pref PREFETCH_STORE_HINT, (chunk)*32(reg)
136# endif
137
138/* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less
139 than PREFETCH_CHUNK, the assumed size of each prefetch. If the real size
140 of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE
141 hint is used, the code will not work correctly. If PREPAREFORSTORE is not
142 used than MAX_PREFETCH_SIZE does not matter. */
143# define MAX_PREFETCH_SIZE 128
144/* PREFETCH_LIMIT is set based on the fact that we never use an offset greater
145 than 5 on a STORE prefetch and that a single prefetch can never be larger
146 than MAX_PREFETCH_SIZE. We add the extra 32 when USE_DOUBLE is set because
147 we actually do two prefetches in that case, one 32 bytes after the other. */
148# ifdef USE_DOUBLE
149# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE
150# else
151# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE
152# endif
153
154# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \
155 && ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE)
156/* We cannot handle this because the initial prefetches may fetch bytes that
157 are before the buffer being copied. We start copies with an offset
158 of 4 so avoid this situation when using PREPAREFORSTORE. */
159# error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small."
160# endif
161#else /* USE_PREFETCH not defined */
162# define PREFETCH_FOR_STORE(offset, reg)
163#endif
164
165#if __mips_isa_rev > 5
166# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
167# undef PREFETCH_STORE_HINT
168# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED
169# endif
170# define R6_CODE
171#endif
172
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200173/* We load/store 64 bits at a time when USE_DOUBLE is true.
174 The C_ prefix stands for CHUNK and is used to avoid macro name
175 conflicts with system header files. */
176
177#ifdef USE_DOUBLE
178# define C_ST sd
179# if __MIPSEB
180# define C_STHI sdl /* high part is left in big-endian */
181# else
182# define C_STHI sdr /* high part is right in little-endian */
183# endif
184#else
185# define C_ST sw
186# if __MIPSEB
187# define C_STHI swl /* high part is left in big-endian */
188# else
189# define C_STHI swr /* high part is right in little-endian */
190# endif
191#endif
192
193/* Bookkeeping values for 32 vs. 64 bit mode. */
194#ifdef USE_DOUBLE
195# define NSIZE 8
196# define NSIZEMASK 0x3f
197# define NSIZEDMASK 0x7f
198#else
199# define NSIZE 4
200# define NSIZEMASK 0x1f
201# define NSIZEDMASK 0x3f
202#endif
203#define UNIT(unit) ((unit)*NSIZE)
204#define UNITM1(unit) (((unit)*NSIZE)-1)
205
206#ifdef __ANDROID__
Douglas Leung29d4b712016-03-03 18:55:39 -0800207LEAF(__memset_chk,0)
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200208#else
Douglas Leung29d4b712016-03-03 18:55:39 -0800209LEAF(__memset_chk)
210#endif
211 bgtu a2, a3, __memset_chk_fail
212
213 // Fall through to memset...
214END(__memset_chk)
215
216#ifdef __ANDROID__
217LEAF(memset,0)
218#else
219LEAF(memset)
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200220#endif
221
222 .set nomips16
Raghu Gandham405b8022012-07-25 18:16:42 -0700223 .set noreorder
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200224/* If the size is less than 2*NSIZE (8 or 16), go to L(lastb). Regardless of
225 size, copy dst pointer to v0 for the return value. */
226 slti t2,a2,(2 * NSIZE)
227 bne t2,zero,L(lastb)
228 move v0,a0
Raghu Gandham405b8022012-07-25 18:16:42 -0700229
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200230/* If memset value is not zero, we copy it to all the bytes in a 32 or 64
231 bit word. */
232 beq a1,zero,L(set0) /* If memset value is zero no smear */
233 PTR_SUBU a3,zero,a0
234 nop
Raghu Gandham405b8022012-07-25 18:16:42 -0700235
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200236 /* smear byte into 32 or 64 bit word */
237#if ((__mips == 64) || (__mips == 32)) && (__mips_isa_rev >= 2)
238# ifdef USE_DOUBLE
239 dins a1, a1, 8, 8 /* Replicate fill byte into half-word. */
240 dins a1, a1, 16, 16 /* Replicate fill byte into word. */
241 dins a1, a1, 32, 32 /* Replicate fill byte into dbl word. */
242# else
243 ins a1, a1, 8, 8 /* Replicate fill byte into half-word. */
244 ins a1, a1, 16, 16 /* Replicate fill byte into word. */
245# endif
Raghu Gandham405b8022012-07-25 18:16:42 -0700246#else
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200247# ifdef USE_DOUBLE
248 and a1,0xff
249 dsll t2,a1,8
250 or a1,t2
251 dsll t2,a1,16
252 or a1,t2
253 dsll t2,a1,32
254 or a1,t2
255# else
256 and a1,0xff
257 sll t2,a1,8
258 or a1,t2
259 sll t2,a1,16
260 or a1,t2
261# endif
Raghu Gandham405b8022012-07-25 18:16:42 -0700262#endif
263
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200264/* If the destination address is not aligned do a partial store to get it
265 aligned. If it is already aligned just jump to L(aligned). */
266L(set0):
267#ifndef R6_CODE
268 andi t2,a3,(NSIZE-1) /* word-unaligned address? */
269 beq t2,zero,L(aligned) /* t2 is the unalignment count */
270 PTR_SUBU a2,a2,t2
271 C_STHI a1,0(a0)
272 PTR_ADDU a0,a0,t2
273#else /* R6_CODE */
274 andi t2,a0,(NSIZE-1)
275 lapc t9,L(atable)
276 PTR_LSA t9,t2,t9,2
277 jrc t9
278L(atable):
279 bc L(aligned)
280# ifdef USE_DOUBLE
281 bc L(lb7)
282 bc L(lb6)
283 bc L(lb5)
284 bc L(lb4)
285# endif
286 bc L(lb3)
287 bc L(lb2)
288 bc L(lb1)
289L(lb7):
290 sb a1,6(a0)
291L(lb6):
292 sb a1,5(a0)
293L(lb5):
294 sb a1,4(a0)
295L(lb4):
296 sb a1,3(a0)
297L(lb3):
298 sb a1,2(a0)
299L(lb2):
300 sb a1,1(a0)
301L(lb1):
302 sb a1,0(a0)
Raghu Gandham405b8022012-07-25 18:16:42 -0700303
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200304 li t9,NSIZE
305 subu t2,t9,t2
306 PTR_SUBU a2,a2,t2
307 PTR_ADDU a0,a0,t2
308#endif /* R6_CODE */
Raghu Gandham405b8022012-07-25 18:16:42 -0700309
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200310L(aligned):
311/* If USE_DOUBLE is not set we may still want to align the data on a 16
312 byte boundry instead of an 8 byte boundry to maximize the opportunity
313 of proAptiv chips to do memory bonding (combining two sequential 4
314 byte stores into one 8 byte store). We know there are at least 4 bytes
315 left to store or we would have jumped to L(lastb) earlier in the code. */
316#ifdef DOUBLE_ALIGN
317 andi t2,a3,4
318 beq t2,zero,L(double_aligned)
319 PTR_SUBU a2,a2,t2
Raghu Gandham405b8022012-07-25 18:16:42 -0700320 sw a1,0(a0)
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200321 PTR_ADDU a0,a0,t2
322L(double_aligned):
323#endif
Raghu Gandham405b8022012-07-25 18:16:42 -0700324
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200325/* Now the destination is aligned to (word or double word) aligned address
326 Set a2 to count how many bytes we have to copy after all the 64/128 byte
327 chunks are copied and a3 to the dest pointer after all the 64/128 byte
328 chunks have been copied. We will loop, incrementing a0 until it equals
329 a3. */
330 andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
331 beq a2,t8,L(chkw) /* if a2==t8, no 64-byte/128-byte chunks */
332 PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */
333 PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */
Raghu Gandham405b8022012-07-25 18:16:42 -0700334
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200335/* When in the loop we may prefetch with the 'prepare to store' hint,
336 in this case the a0+x should not be past the "t0-32" address. This
337 means: for x=128 the last "safe" a0 address is "t0-160". Alternatively,
338 for x=64 the last "safe" a0 address is "t0-96" In the current version we
339 will use "prefetch hint,128(a0)", so "t0-160" is the limit. */
340#if defined(USE_PREFETCH) \
341 && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
342 PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */
343 PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
344#endif
345#if defined(USE_PREFETCH) \
346 && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
347 PREFETCH_FOR_STORE (1, a0)
348 PREFETCH_FOR_STORE (2, a0)
349 PREFETCH_FOR_STORE (3, a0)
350#endif
Raghu Gandham405b8022012-07-25 18:16:42 -0700351
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200352L(loop16w):
353#if defined(USE_PREFETCH) \
354 && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
355 sltu v1,t9,a0 /* If a0 > t9 don't use next prefetch */
356 bgtz v1,L(skip_pref)
357 nop
358#endif
359#ifndef R6_CODE
360 PREFETCH_FOR_STORE (4, a0)
361 PREFETCH_FOR_STORE (5, a0)
362#else
363 PREFETCH_FOR_STORE (2, a0)
364#endif
365L(skip_pref):
366 C_ST a1,UNIT(0)(a0)
367 C_ST a1,UNIT(1)(a0)
368 C_ST a1,UNIT(2)(a0)
369 C_ST a1,UNIT(3)(a0)
370 C_ST a1,UNIT(4)(a0)
371 C_ST a1,UNIT(5)(a0)
372 C_ST a1,UNIT(6)(a0)
373 C_ST a1,UNIT(7)(a0)
374 C_ST a1,UNIT(8)(a0)
375 C_ST a1,UNIT(9)(a0)
376 C_ST a1,UNIT(10)(a0)
377 C_ST a1,UNIT(11)(a0)
378 C_ST a1,UNIT(12)(a0)
379 C_ST a1,UNIT(13)(a0)
380 C_ST a1,UNIT(14)(a0)
381 C_ST a1,UNIT(15)(a0)
382 PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */
383 bne a0,a3,L(loop16w)
384 nop
385 move a2,t8
386
387/* Here we have dest word-aligned but less than 64-bytes or 128 bytes to go.
388 Check for a 32(64) byte chunk and copy if if there is one. Otherwise
389 jump down to L(chk1w) to handle the tail end of the copy. */
390L(chkw):
391 andi t8,a2,NSIZEMASK /* is there a 32-byte/64-byte chunk. */
392 /* the t8 is the reminder count past 32-bytes */
393 beq a2,t8,L(chk1w)/* when a2==t8, no 32-byte chunk */
394 nop
395 C_ST a1,UNIT(0)(a0)
396 C_ST a1,UNIT(1)(a0)
397 C_ST a1,UNIT(2)(a0)
398 C_ST a1,UNIT(3)(a0)
399 C_ST a1,UNIT(4)(a0)
400 C_ST a1,UNIT(5)(a0)
401 C_ST a1,UNIT(6)(a0)
402 C_ST a1,UNIT(7)(a0)
403 PTR_ADDIU a0,a0,UNIT(8)
404
405/* Here we have less than 32(64) bytes to set. Set up for a loop to
406 copy one word (or double word) at a time. Set a2 to count how many
407 bytes we have to copy after all the word (or double word) chunks are
408 copied and a3 to the dest pointer after all the (d)word chunks have
409 been copied. We will loop, incrementing a0 until a0 equals a3. */
410L(chk1w):
411 andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */
412 beq a2,t8,L(lastb)
413 PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */
414 PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */
415
416/* copying in words (4-byte or 8 byte chunks) */
417L(wordCopy_loop):
418 PTR_ADDIU a0,a0,UNIT(1)
419 bne a0,a3,L(wordCopy_loop)
420 C_ST a1,UNIT(-1)(a0)
421
422/* Copy the last 8 (or 16) bytes */
423L(lastb):
424 blez a2,L(leave)
425 PTR_ADDU a3,a0,a2 /* a3 is the last dst address */
426L(lastbloop):
427 PTR_ADDIU a0,a0,1
428 bne a0,a3,L(lastbloop)
429 sb a1,-1(a0)
430L(leave):
Raghu Gandham405b8022012-07-25 18:16:42 -0700431 j ra
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200432 nop
Raghu Gandham405b8022012-07-25 18:16:42 -0700433
434 .set at
435 .set reorder
Douglas Leung29d4b712016-03-03 18:55:39 -0800436END(memset)
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200437#ifndef __ANDROID__
438# ifdef _LIBC
Douglas Leung29d4b712016-03-03 18:55:39 -0800439libc_hidden_builtin_def (memset)
440libc_hidden_builtin_def (__memset_chk)
Nikola Veljkovic38f2eaa2015-05-26 12:06:09 +0200441# endif
442#endif