Merge tag 'for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[deliverable/linux.git] / arch / powerpc / lib / copy_32.S
1 /*
2 * Memory copy functions for 32-bit PowerPC.
3 *
4 * Copyright (C) 1996-2005 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11 #include <asm/processor.h>
12 #include <asm/cache.h>
13 #include <asm/errno.h>
14 #include <asm/ppc_asm.h>
15
16 #define COPY_16_BYTES \
17 lwz r7,4(r4); \
18 lwz r8,8(r4); \
19 lwz r9,12(r4); \
20 lwzu r10,16(r4); \
21 stw r7,4(r6); \
22 stw r8,8(r6); \
23 stw r9,12(r6); \
24 stwu r10,16(r6)
25
26 #define COPY_16_BYTES_WITHEX(n) \
27 8 ## n ## 0: \
28 lwz r7,4(r4); \
29 8 ## n ## 1: \
30 lwz r8,8(r4); \
31 8 ## n ## 2: \
32 lwz r9,12(r4); \
33 8 ## n ## 3: \
34 lwzu r10,16(r4); \
35 8 ## n ## 4: \
36 stw r7,4(r6); \
37 8 ## n ## 5: \
38 stw r8,8(r6); \
39 8 ## n ## 6: \
40 stw r9,12(r6); \
41 8 ## n ## 7: \
42 stwu r10,16(r6)
43
44 #define COPY_16_BYTES_EXCODE(n) \
45 9 ## n ## 0: \
46 addi r5,r5,-(16 * n); \
47 b 104f; \
48 9 ## n ## 1: \
49 addi r5,r5,-(16 * n); \
50 b 105f; \
51 .section __ex_table,"a"; \
52 .align 2; \
53 .long 8 ## n ## 0b,9 ## n ## 0b; \
54 .long 8 ## n ## 1b,9 ## n ## 0b; \
55 .long 8 ## n ## 2b,9 ## n ## 0b; \
56 .long 8 ## n ## 3b,9 ## n ## 0b; \
57 .long 8 ## n ## 4b,9 ## n ## 1b; \
58 .long 8 ## n ## 5b,9 ## n ## 1b; \
59 .long 8 ## n ## 6b,9 ## n ## 1b; \
60 .long 8 ## n ## 7b,9 ## n ## 1b; \
61 .text
62
63 .text
64 .stabs "arch/powerpc/lib/",N_SO,0,0,0f
65 .stabs "copy_32.S",N_SO,0,0,0f
66 0:
67
68 CACHELINE_BYTES = L1_CACHE_BYTES
69 LG_CACHELINE_BYTES = L1_CACHE_SHIFT
70 CACHELINE_MASK = (L1_CACHE_BYTES-1)
71
72 /*
73 * Use dcbz on the complete cache lines in the destination
74 * to set them to zero. This requires that the destination
75 * area is cacheable. -- paulus
76 *
77 * During early init, cache might not be active yet, so dcbz cannot be used.
78 * We therefore skip the optimised bloc that uses dcbz. This jump is
79 * replaced by a nop once cache is active. This is done in machine_init()
80 */
81 _GLOBAL(memset)
82 rlwimi r4,r4,8,16,23
83 rlwimi r4,r4,16,0,15
84
85 addi r6,r3,-4
86 cmplwi 0,r5,4
87 blt 7f
88 stwu r4,4(r6)
89 beqlr
90 andi. r0,r6,3
91 add r5,r0,r5
92 subf r6,r0,r6
93 cmplwi 0,r4,0
94 bne 2f /* Use normal procedure if r4 is not zero */
95 _GLOBAL(memset_nocache_branch)
96 b 2f /* Skip optimised bloc until cache is enabled */
97
98 clrlwi r7,r6,32-LG_CACHELINE_BYTES
99 add r8,r7,r5
100 srwi r9,r8,LG_CACHELINE_BYTES
101 addic. r9,r9,-1 /* total number of complete cachelines */
102 ble 2f
103 xori r0,r7,CACHELINE_MASK & ~3
104 srwi. r0,r0,2
105 beq 3f
106 mtctr r0
107 4: stwu r4,4(r6)
108 bdnz 4b
109 3: mtctr r9
110 li r7,4
111 10: dcbz r7,r6
112 addi r6,r6,CACHELINE_BYTES
113 bdnz 10b
114 clrlwi r5,r8,32-LG_CACHELINE_BYTES
115 addi r5,r5,4
116
117 2: srwi r0,r5,2
118 mtctr r0
119 bdz 6f
120 1: stwu r4,4(r6)
121 bdnz 1b
122 6: andi. r5,r5,3
123 7: cmpwi 0,r5,0
124 beqlr
125 mtctr r5
126 addi r6,r6,3
127 8: stbu r4,1(r6)
128 bdnz 8b
129 blr
130
131 /*
132 * This version uses dcbz on the complete cache lines in the
133 * destination area to reduce memory traffic. This requires that
134 * the destination area is cacheable.
135 * We only use this version if the source and dest don't overlap.
136 * -- paulus.
137 *
138 * During early init, cache might not be active yet, so dcbz cannot be used.
139 * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
140 * replaced by a nop once cache is active. This is done in machine_init()
141 */
142 _GLOBAL(memmove)
143 cmplw 0,r3,r4
144 bgt backwards_memcpy
145 /* fall through */
146
147 _GLOBAL(memcpy)
148 b generic_memcpy
149 add r7,r3,r5 /* test if the src & dst overlap */
150 add r8,r4,r5
151 cmplw 0,r4,r7
152 cmplw 1,r3,r8
153 crand 0,0,4 /* cr0.lt &= cr1.lt */
154 blt generic_memcpy /* if regions overlap */
155
156 addi r4,r4,-4
157 addi r6,r3,-4
158 neg r0,r3
159 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
160 beq 58f
161
162 cmplw 0,r5,r0 /* is this more than total to do? */
163 blt 63f /* if not much to do */
164 andi. r8,r0,3 /* get it word-aligned first */
165 subf r5,r0,r5
166 mtctr r8
167 beq+ 61f
168 70: lbz r9,4(r4) /* do some bytes */
169 addi r4,r4,1
170 addi r6,r6,1
171 stb r9,3(r6)
172 bdnz 70b
173 61: srwi. r0,r0,2
174 mtctr r0
175 beq 58f
176 72: lwzu r9,4(r4) /* do some words */
177 stwu r9,4(r6)
178 bdnz 72b
179
180 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
181 clrlwi r5,r5,32-LG_CACHELINE_BYTES
182 li r11,4
183 mtctr r0
184 beq 63f
185 53:
186 dcbz r11,r6
187 COPY_16_BYTES
188 #if L1_CACHE_BYTES >= 32
189 COPY_16_BYTES
190 #if L1_CACHE_BYTES >= 64
191 COPY_16_BYTES
192 COPY_16_BYTES
193 #if L1_CACHE_BYTES >= 128
194 COPY_16_BYTES
195 COPY_16_BYTES
196 COPY_16_BYTES
197 COPY_16_BYTES
198 #endif
199 #endif
200 #endif
201 bdnz 53b
202
203 63: srwi. r0,r5,2
204 mtctr r0
205 beq 64f
206 30: lwzu r0,4(r4)
207 stwu r0,4(r6)
208 bdnz 30b
209
210 64: andi. r0,r5,3
211 mtctr r0
212 beq+ 65f
213 addi r4,r4,3
214 addi r6,r6,3
215 40: lbzu r0,1(r4)
216 stbu r0,1(r6)
217 bdnz 40b
218 65: blr
219
220 generic_memcpy:
221 srwi. r7,r5,3
222 addi r6,r3,-4
223 addi r4,r4,-4
224 beq 2f /* if less than 8 bytes to do */
225 andi. r0,r6,3 /* get dest word aligned */
226 mtctr r7
227 bne 5f
228 1: lwz r7,4(r4)
229 lwzu r8,8(r4)
230 stw r7,4(r6)
231 stwu r8,8(r6)
232 bdnz 1b
233 andi. r5,r5,7
234 2: cmplwi 0,r5,4
235 blt 3f
236 lwzu r0,4(r4)
237 addi r5,r5,-4
238 stwu r0,4(r6)
239 3: cmpwi 0,r5,0
240 beqlr
241 mtctr r5
242 addi r4,r4,3
243 addi r6,r6,3
244 4: lbzu r0,1(r4)
245 stbu r0,1(r6)
246 bdnz 4b
247 blr
248 5: subfic r0,r0,4
249 mtctr r0
250 6: lbz r7,4(r4)
251 addi r4,r4,1
252 stb r7,4(r6)
253 addi r6,r6,1
254 bdnz 6b
255 subf r5,r0,r5
256 rlwinm. r7,r5,32-3,3,31
257 beq 2b
258 mtctr r7
259 b 1b
260
261 _GLOBAL(backwards_memcpy)
262 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
263 add r6,r3,r5
264 add r4,r4,r5
265 beq 2f
266 andi. r0,r6,3
267 mtctr r7
268 bne 5f
269 1: lwz r7,-4(r4)
270 lwzu r8,-8(r4)
271 stw r7,-4(r6)
272 stwu r8,-8(r6)
273 bdnz 1b
274 andi. r5,r5,7
275 2: cmplwi 0,r5,4
276 blt 3f
277 lwzu r0,-4(r4)
278 subi r5,r5,4
279 stwu r0,-4(r6)
280 3: cmpwi 0,r5,0
281 beqlr
282 mtctr r5
283 4: lbzu r0,-1(r4)
284 stbu r0,-1(r6)
285 bdnz 4b
286 blr
287 5: mtctr r0
288 6: lbzu r7,-1(r4)
289 stbu r7,-1(r6)
290 bdnz 6b
291 subf r5,r0,r5
292 rlwinm. r7,r5,32-3,3,31
293 beq 2b
294 mtctr r7
295 b 1b
296
297 _GLOBAL(__copy_tofrom_user)
298 addi r4,r4,-4
299 addi r6,r3,-4
300 neg r0,r3
301 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
302 beq 58f
303
304 cmplw 0,r5,r0 /* is this more than total to do? */
305 blt 63f /* if not much to do */
306 andi. r8,r0,3 /* get it word-aligned first */
307 mtctr r8
308 beq+ 61f
309 70: lbz r9,4(r4) /* do some bytes */
310 71: stb r9,4(r6)
311 addi r4,r4,1
312 addi r6,r6,1
313 bdnz 70b
314 61: subf r5,r0,r5
315 srwi. r0,r0,2
316 mtctr r0
317 beq 58f
318 72: lwzu r9,4(r4) /* do some words */
319 73: stwu r9,4(r6)
320 bdnz 72b
321
322 .section __ex_table,"a"
323 .align 2
324 .long 70b,100f
325 .long 71b,101f
326 .long 72b,102f
327 .long 73b,103f
328 .text
329
330 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
331 clrlwi r5,r5,32-LG_CACHELINE_BYTES
332 li r11,4
333 beq 63f
334
335 /* Here we decide how far ahead to prefetch the source */
336 li r3,4
337 cmpwi r0,1
338 li r7,0
339 ble 114f
340 li r7,1
341 #if MAX_COPY_PREFETCH > 1
342 /* Heuristically, for large transfers we prefetch
343 MAX_COPY_PREFETCH cachelines ahead. For small transfers
344 we prefetch 1 cacheline ahead. */
345 cmpwi r0,MAX_COPY_PREFETCH
346 ble 112f
347 li r7,MAX_COPY_PREFETCH
348 112: mtctr r7
349 111: dcbt r3,r4
350 addi r3,r3,CACHELINE_BYTES
351 bdnz 111b
352 #else
353 dcbt r3,r4
354 addi r3,r3,CACHELINE_BYTES
355 #endif /* MAX_COPY_PREFETCH > 1 */
356
357 114: subf r8,r7,r0
358 mr r0,r7
359 mtctr r8
360
361 53: dcbt r3,r4
362 54: dcbz r11,r6
363 .section __ex_table,"a"
364 .align 2
365 .long 54b,105f
366 .text
367 /* the main body of the cacheline loop */
368 COPY_16_BYTES_WITHEX(0)
369 #if L1_CACHE_BYTES >= 32
370 COPY_16_BYTES_WITHEX(1)
371 #if L1_CACHE_BYTES >= 64
372 COPY_16_BYTES_WITHEX(2)
373 COPY_16_BYTES_WITHEX(3)
374 #if L1_CACHE_BYTES >= 128
375 COPY_16_BYTES_WITHEX(4)
376 COPY_16_BYTES_WITHEX(5)
377 COPY_16_BYTES_WITHEX(6)
378 COPY_16_BYTES_WITHEX(7)
379 #endif
380 #endif
381 #endif
382 bdnz 53b
383 cmpwi r0,0
384 li r3,4
385 li r7,0
386 bne 114b
387
388 63: srwi. r0,r5,2
389 mtctr r0
390 beq 64f
391 30: lwzu r0,4(r4)
392 31: stwu r0,4(r6)
393 bdnz 30b
394
395 64: andi. r0,r5,3
396 mtctr r0
397 beq+ 65f
398 40: lbz r0,4(r4)
399 41: stb r0,4(r6)
400 addi r4,r4,1
401 addi r6,r6,1
402 bdnz 40b
403 65: li r3,0
404 blr
405
406 /* read fault, initial single-byte copy */
407 100: li r9,0
408 b 90f
409 /* write fault, initial single-byte copy */
410 101: li r9,1
411 90: subf r5,r8,r5
412 li r3,0
413 b 99f
414 /* read fault, initial word copy */
415 102: li r9,0
416 b 91f
417 /* write fault, initial word copy */
418 103: li r9,1
419 91: li r3,2
420 b 99f
421
422 /*
423 * this stuff handles faults in the cacheline loop and branches to either
424 * 104f (if in read part) or 105f (if in write part), after updating r5
425 */
426 COPY_16_BYTES_EXCODE(0)
427 #if L1_CACHE_BYTES >= 32
428 COPY_16_BYTES_EXCODE(1)
429 #if L1_CACHE_BYTES >= 64
430 COPY_16_BYTES_EXCODE(2)
431 COPY_16_BYTES_EXCODE(3)
432 #if L1_CACHE_BYTES >= 128
433 COPY_16_BYTES_EXCODE(4)
434 COPY_16_BYTES_EXCODE(5)
435 COPY_16_BYTES_EXCODE(6)
436 COPY_16_BYTES_EXCODE(7)
437 #endif
438 #endif
439 #endif
440
441 /* read fault in cacheline loop */
442 104: li r9,0
443 b 92f
444 /* fault on dcbz (effectively a write fault) */
445 /* or write fault in cacheline loop */
446 105: li r9,1
447 92: li r3,LG_CACHELINE_BYTES
448 mfctr r8
449 add r0,r0,r8
450 b 106f
451 /* read fault in final word loop */
452 108: li r9,0
453 b 93f
454 /* write fault in final word loop */
455 109: li r9,1
456 93: andi. r5,r5,3
457 li r3,2
458 b 99f
459 /* read fault in final byte loop */
460 110: li r9,0
461 b 94f
462 /* write fault in final byte loop */
463 111: li r9,1
464 94: li r5,0
465 li r3,0
466 /*
467 * At this stage the number of bytes not copied is
468 * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
469 */
470 99: mfctr r0
471 106: slw r3,r0,r3
472 add. r3,r3,r5
473 beq 120f /* shouldn't happen */
474 cmpwi 0,r9,0
475 bne 120f
476 /* for a read fault, first try to continue the copy one byte at a time */
477 mtctr r3
478 130: lbz r0,4(r4)
479 131: stb r0,4(r6)
480 addi r4,r4,1
481 addi r6,r6,1
482 bdnz 130b
483 /* then clear out the destination: r3 bytes starting at 4(r6) */
484 132: mfctr r3
485 srwi. r0,r3,2
486 li r9,0
487 mtctr r0
488 beq 113f
489 112: stwu r9,4(r6)
490 bdnz 112b
491 113: andi. r0,r3,3
492 mtctr r0
493 beq 120f
494 114: stb r9,4(r6)
495 addi r6,r6,1
496 bdnz 114b
497 120: blr
498
499 .section __ex_table,"a"
500 .align 2
501 .long 30b,108b
502 .long 31b,109b
503 .long 40b,110b
504 .long 41b,111b
505 .long 130b,132b
506 .long 131b,120b
507 .long 112b,120b
508 .long 114b,120b
509 .text
This page took 0.057993 seconds and 5 git commands to generate.