arch/tile: core support for Tilera 32-bit chips.
[deliverable/linux.git] / arch / tile / lib / memcpy_32.S
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * This file shares the implementation of the userspace memcpy and
15 * the kernel's memcpy, copy_to_user and copy_from_user.
16 */
17
18 #include <arch/chip.h>
19
20 #if CHIP_HAS_WH64() || defined(MEMCPY_TEST_WH64)
21 #define MEMCPY_USE_WH64
22 #endif
23
24
25 #include <linux/linkage.h>
26
27 /* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */
28 #if !CHIP_HAS_COHERENT_LOCAL_CACHE()
29 #define memcpy __memcpy_asm
30 #define __copy_to_user_inatomic __copy_to_user_inatomic_asm
31 #define __copy_from_user_inatomic __copy_from_user_inatomic_asm
32 #define __copy_from_user_zeroing __copy_from_user_zeroing_asm
33 #endif
34
35 #define IS_MEMCPY 0
36 #define IS_COPY_FROM_USER 1
37 #define IS_COPY_FROM_USER_ZEROING 2
38 #define IS_COPY_TO_USER -1
39
40 .section .text.memcpy_common, "ax"
41 .align 64
42
43 /* Use this to preface each bundle that can cause an exception so
44 * the kernel can clean up properly. The special cleanup code should
45 * not use these, since it knows what it is doing.
46 */
47 #define EX \
48 .pushsection __ex_table, "a"; \
49 .word 9f, memcpy_common_fixup; \
50 .popsection; \
51 9
52
53
54 /* __copy_from_user_inatomic takes the kernel target address in r0,
55 * the user source in r1, and the bytes to copy in r2.
56 * It returns the number of uncopiable bytes (hopefully zero) in r0.
57 */
58 ENTRY(__copy_from_user_inatomic)
59 .type __copy_from_user_inatomic, @function
60 FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
61 .text.memcpy_common, \
62 .Lend_memcpy_common - __copy_from_user_inatomic)
63 { movei r29, IS_COPY_FROM_USER; j memcpy_common }
64 .size __copy_from_user_inatomic, . - __copy_from_user_inatomic
65
66 /* __copy_from_user_zeroing is like __copy_from_user_inatomic, but
67 * any uncopiable bytes are zeroed in the target.
68 */
69 ENTRY(__copy_from_user_zeroing)
70 .type __copy_from_user_zeroing, @function
71 FEEDBACK_REENTER(__copy_from_user_inatomic)
72 { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
73 .size __copy_from_user_zeroing, . - __copy_from_user_zeroing
74
75 /* __copy_to_user_inatomic takes the user target address in r0,
76 * the kernel source in r1, and the bytes to copy in r2.
77 * It returns the number of uncopiable bytes (hopefully zero) in r0.
78 */
79 ENTRY(__copy_to_user_inatomic)
80 .type __copy_to_user_inatomic, @function
81 FEEDBACK_REENTER(__copy_from_user_inatomic)
82 { movei r29, IS_COPY_TO_USER; j memcpy_common }
83 .size __copy_to_user_inatomic, . - __copy_to_user_inatomic
84
85 ENTRY(memcpy)
86 .type memcpy, @function
87 FEEDBACK_REENTER(__copy_from_user_inatomic)
88 { movei r29, IS_MEMCPY }
89 .size memcpy, . - memcpy
90 /* Fall through */
91
92 .type memcpy_common, @function
93 memcpy_common:
94 /* On entry, r29 holds one of the IS_* macro values from above. */
95
96
97 /* r0 is the dest, r1 is the source, r2 is the size. */
98
99 /* Save aside original dest so we can return it at the end. */
100 { sw sp, lr; move r23, r0; or r4, r0, r1 }
101
102 /* Check for an empty size. */
103 { bz r2, .Ldone; andi r4, r4, 3 }
104
105 /* Save aside original values in case of a fault. */
106 { move r24, r1; move r25, r2 }
107 move r27, lr
108
109 /* Check for an unaligned source or dest. */
110 { bnz r4, .Lcopy_unaligned_maybe_many; addli r4, r2, -256 }
111
112 .Lcheck_aligned_copy_size:
113 /* If we are copying < 256 bytes, branch to simple case. */
114 { blzt r4, .Lcopy_8_check; slti_u r8, r2, 8 }
115
116 /* Copying >= 256 bytes, so jump to complex prefetching loop. */
117 { andi r6, r1, 63; j .Lcopy_many }
118
119 /*
120 *
121 * Aligned 4 byte at a time copy loop
122 *
123 */
124
125 .Lcopy_8_loop:
126 /* Copy two words at a time to hide load latency. */
127 EX: { lw r3, r1; addi r1, r1, 4; slti_u r8, r2, 16 }
128 EX: { lw r4, r1; addi r1, r1, 4 }
129 EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
130 EX: { sw r0, r4; addi r0, r0, 4; addi r2, r2, -4 }
131 .Lcopy_8_check:
132 { bzt r8, .Lcopy_8_loop; slti_u r4, r2, 4 }
133
134 /* Copy odd leftover word, if any. */
135 { bnzt r4, .Lcheck_odd_stragglers }
136 EX: { lw r3, r1; addi r1, r1, 4 }
137 EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
138
139 .Lcheck_odd_stragglers:
140 { bnz r2, .Lcopy_unaligned_few }
141
142 .Ldone:
143 /* For memcpy return original dest address, else zero. */
144 { mz r0, r29, r23; jrp lr }
145
146
147 /*
148 *
149 * Prefetching multiple cache line copy handler (for large transfers).
150 *
151 */
152
153 /* Copy words until r1 is cache-line-aligned. */
154 .Lalign_loop:
155 EX: { lw r3, r1; addi r1, r1, 4 }
156 { andi r6, r1, 63 }
157 EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
158 .Lcopy_many:
159 { bnzt r6, .Lalign_loop; addi r9, r0, 63 }
160
161 { addi r3, r1, 60; andi r9, r9, -64 }
162
163 #ifdef MEMCPY_USE_WH64
164 /* No need to prefetch dst, we'll just do the wh64
165 * right before we copy a line.
166 */
167 #endif
168
169 EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 }
170 /* Intentionally stall for a few cycles to leave L2 cache alone. */
171 { bnzt zero, .; move r27, lr }
172 EX: { lw r6, r3; addi r3, r3, 64 }
173 /* Intentionally stall for a few cycles to leave L2 cache alone. */
174 { bnzt zero, . }
175 EX: { lw r7, r3; addi r3, r3, 64 }
176 #ifndef MEMCPY_USE_WH64
177 /* Prefetch the dest */
178 /* Intentionally stall for a few cycles to leave L2 cache alone. */
179 { bnzt zero, . }
180 /* Use a real load to cause a TLB miss if necessary. We aren't using
181 * r28, so this should be fine.
182 */
183 EX: { lw r28, r9; addi r9, r9, 64 }
184 /* Intentionally stall for a few cycles to leave L2 cache alone. */
185 { bnzt zero, . }
186 { prefetch r9; addi r9, r9, 64 }
187 /* Intentionally stall for a few cycles to leave L2 cache alone. */
188 { bnzt zero, . }
189 { prefetch r9; addi r9, r9, 64 }
190 #endif
191 /* Intentionally stall for a few cycles to leave L2 cache alone. */
192 { bz zero, .Lbig_loop2 }
193
194 /* On entry to this loop:
195 * - r0 points to the start of dst line 0
196 * - r1 points to start of src line 0
197 * - r2 >= (256 - 60), only the first time the loop trips.
198 * - r3 contains r1 + 128 + 60 [pointer to end of source line 2]
199 * This is our prefetch address. When we get near the end
200 * rather than prefetching off the end this is changed to point
201 * to some "safe" recently loaded address.
202 * - r5 contains *(r1 + 60) [i.e. last word of source line 0]
203 * - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1]
204 * - r9 contains ((r0 + 63) & -64)
205 * [start of next dst cache line.]
206 */
207
208 .Lbig_loop:
209 { jal .Lcopy_line2; add r15, r1, r2 }
210
211 .Lbig_loop2:
212 /* Copy line 0, first stalling until r5 is ready. */
213 EX: { move r12, r5; lw r16, r1 }
214 { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
215 /* Prefetch several lines ahead. */
216 EX: { lw r5, r3; addi r3, r3, 64 }
217 { jal .Lcopy_line }
218
219 /* Copy line 1, first stalling until r6 is ready. */
220 EX: { move r12, r6; lw r16, r1 }
221 { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
222 /* Prefetch several lines ahead. */
223 EX: { lw r6, r3; addi r3, r3, 64 }
224 { jal .Lcopy_line }
225
226 /* Copy line 2, first stalling until r7 is ready. */
227 EX: { move r12, r7; lw r16, r1 }
228 { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
229 /* Prefetch several lines ahead. */
230 EX: { lw r7, r3; addi r3, r3, 64 }
231 /* Use up a caches-busy cycle by jumping back to the top of the
232 * loop. Might as well get it out of the way now.
233 */
234 { j .Lbig_loop }
235
236
237 /* On entry:
238 * - r0 points to the destination line.
239 * - r1 points to the source line.
240 * - r3 is the next prefetch address.
241 * - r9 holds the last address used for wh64.
242 * - r12 = WORD_15
243 * - r16 = WORD_0.
244 * - r17 == r1 + 16.
245 * - r27 holds saved lr to restore.
246 *
247 * On exit:
248 * - r0 is incremented by 64.
249 * - r1 is incremented by 64, unless that would point to a word
250 * beyond the end of the source array, in which case it is redirected
251 * to point to an arbitrary word already in the cache.
252 * - r2 is decremented by 64.
253 * - r3 is unchanged, unless it points to a word beyond the
254 * end of the source array, in which case it is redirected
255 * to point to an arbitrary word already in the cache.
256 * Redirecting is OK since if we are that close to the end
257 * of the array we will not come back to this subroutine
258 * and use the contents of the prefetched address.
259 * - r4 is nonzero iff r2 >= 64.
260 * - r9 is incremented by 64, unless it points beyond the
261 * end of the last full destination cache line, in which
262 * case it is redirected to a "safe address" that can be
263 * clobbered (sp - 64)
264 * - lr contains the value in r27.
265 */
266
267 /* r26 unused */
268
269 .Lcopy_line:
270 /* TODO: when r3 goes past the end, we would like to redirect it
271 * to prefetch the last partial cache line (if any) just once, for the
272 * benefit of the final cleanup loop. But we don't want to
273 * prefetch that line more than once, or subsequent prefetches
274 * will go into the RTF. But then .Lbig_loop should unconditionally
275 * branch to top of loop to execute final prefetch, and its
276 * nop should become a conditional branch.
277 */
278
279 /* We need two non-memory cycles here to cover the resources
280 * used by the loads initiated by the caller.
281 */
282 { add r15, r1, r2 }
283 .Lcopy_line2:
284 { slt_u r13, r3, r15; addi r17, r1, 16 }
285
286 /* NOTE: this will stall for one cycle as L1 is busy. */
287
288 /* Fill second L1D line. */
289 EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
290
291 #ifdef MEMCPY_TEST_WH64
292 /* Issue a fake wh64 that clobbers the destination words
293 * with random garbage, for testing.
294 */
295 { movei r19, 64; crc32_32 r10, r2, r9 }
296 .Lwh64_test_loop:
297 EX: { sw r9, r10; addi r9, r9, 4; addi r19, r19, -4 }
298 { bnzt r19, .Lwh64_test_loop; crc32_32 r10, r10, r19 }
299 #elif CHIP_HAS_WH64()
300 /* Prepare destination line for writing. */
301 EX: { wh64 r9; addi r9, r9, 64 }
302 #else
303 /* Prefetch dest line */
304 { prefetch r9; addi r9, r9, 64 }
305 #endif
306 /* Load seven words that are L1D hits to cover wh64 L2 usage. */
307
308 /* Load the three remaining words from the last L1D line, which
309 * we know has already filled the L1D.
310 */
311 EX: { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */
312 EX: { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */
313 EX: { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */
314
315 /* Load the three remaining words from the first L1D line, first
316 * stalling until it has filled by "looking at" r16.
317 */
318 EX: { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */
319 EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */
320 EX: { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */
321
322 /* Load second word from the second L1D line, first
323 * stalling until it has filled by "looking at" r17.
324 */
325 EX: { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */
326
327 /* Store last word to the destination line, potentially dirtying it
328 * for the first time, which keeps the L2 busy for two cycles.
329 */
330 EX: { sw r10, r12 } /* store(WORD_15) */
331
332 /* Use two L1D hits to cover the sw L2 access above. */
333 EX: { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */
334 EX: { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */
335
336 /* Fill third L1D line. */
337 EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */
338
339 /* Store first L1D line. */
340 EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
341 EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
342 EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
343 #ifdef MEMCPY_USE_WH64
344 EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
345 #else
346 /* Back up the r9 to a cache line we are already storing to
347 * if it gets past the end of the dest vector. Strictly speaking,
348 * we don't need to back up to the start of a cache line, but it's free
349 * and tidy, so why not?
350 */
351 EX: { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */
352 #endif
353 /* Store second L1D line. */
354 EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
355 EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */
356 EX: { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */
357 EX: { sw r0, r12; addi r0, r0, 4 } /* store(WORD_7) */
358
359 EX: { lw r13, r1; addi r1, r1, 4; move zero, r18 } /* r13 = WORD_9 */
360 EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */
361 EX: { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */
362
363 /* Store third L1D line. */
364 EX: { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */
365 EX: { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */
366 EX: { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */
367 EX: { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */
368
369 /* Store rest of fourth L1D line. */
370 EX: { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */
371 {
372 EX: sw r0, r8 /* store(WORD_13) */
373 addi r0, r0, 4
374 /* Will r2 be > 64 after we subtract 64 below? */
375 shri r4, r2, 7
376 }
377 {
378 EX: sw r0, r11 /* store(WORD_14) */
379 addi r0, r0, 8
380 /* Record 64 bytes successfully copied. */
381 addi r2, r2, -64
382 }
383
384 { jrp lr; move lr, r27 }
385
386 /* Convey to the backtrace library that the stack frame is size
387 * zero, and the real return address is on the stack rather than
388 * in 'lr'.
389 */
390 { info 8 }
391
392 .align 64
393 .Lcopy_unaligned_maybe_many:
394 /* Skip the setup overhead if we aren't copying many bytes. */
395 { slti_u r8, r2, 20; sub r4, zero, r0 }
396 { bnzt r8, .Lcopy_unaligned_few; andi r4, r4, 3 }
397 { bz r4, .Ldest_is_word_aligned; add r18, r1, r2 }
398
399 /*
400 *
401 * unaligned 4 byte at a time copy handler.
402 *
403 */
404
405 /* Copy single bytes until r0 == 0 mod 4, so we can store words. */
406 .Lalign_dest_loop:
407 EX: { lb_u r3, r1; addi r1, r1, 1; addi r4, r4, -1 }
408 EX: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
409 { bnzt r4, .Lalign_dest_loop; andi r3, r1, 3 }
410
411 /* If source and dest are now *both* aligned, do an aligned copy. */
412 { bz r3, .Lcheck_aligned_copy_size; addli r4, r2, -256 }
413
414 .Ldest_is_word_aligned:
415
416 #if CHIP_HAS_DWORD_ALIGN()
417 EX: { andi r8, r0, 63; lwadd_na r6, r1, 4}
418 { slti_u r9, r2, 64; bz r8, .Ldest_is_L2_line_aligned }
419
420 /* This copies unaligned words until either there are fewer
421 * than 4 bytes left to copy, or until the destination pointer
422 * is cache-aligned, whichever comes first.
423 *
424 * On entry:
425 * - r0 is the next store address.
426 * - r1 points 4 bytes past the load address corresponding to r0.
427 * - r2 >= 4
428 * - r6 is the next aligned word loaded.
429 */
430 .Lcopy_unaligned_src_words:
431 EX: { lwadd_na r7, r1, 4; slti_u r8, r2, 4 + 4 }
432 /* stall */
433 { dword_align r6, r7, r1; slti_u r9, r2, 64 + 4 }
434 EX: { swadd r0, r6, 4; addi r2, r2, -4 }
435 { bnz r8, .Lcleanup_unaligned_words; andi r8, r0, 63 }
436 { bnzt r8, .Lcopy_unaligned_src_words; move r6, r7 }
437
438 /* On entry:
439 * - r0 is the next store address.
440 * - r1 points 4 bytes past the load address corresponding to r0.
441 * - r2 >= 4 (# of bytes left to store).
442 * - r6 is the next aligned src word value.
443 * - r9 = (r2 < 64U).
444 * - r18 points one byte past the end of source memory.
445 */
446 .Ldest_is_L2_line_aligned:
447
448 {
449 /* Not a full cache line remains. */
450 bnz r9, .Lcleanup_unaligned_words
451 move r7, r6
452 }
453
454 /* r2 >= 64 */
455
456 /* Kick off two prefetches, but don't go past the end. */
457 { addi r3, r1, 63 - 4; addi r8, r1, 64 + 63 - 4 }
458 { prefetch r3; move r3, r8; slt_u r8, r8, r18 }
459 { mvz r3, r8, r1; addi r8, r3, 64 }
460 { prefetch r3; move r3, r8; slt_u r8, r8, r18 }
461 { mvz r3, r8, r1; movei r17, 0 }
462
463 .Lcopy_unaligned_line:
464 /* Prefetch another line. */
465 { prefetch r3; addi r15, r1, 60; addi r3, r3, 64 }
466 /* Fire off a load of the last word we are about to copy. */
467 EX: { lw_na r15, r15; slt_u r8, r3, r18 }
468
469 EX: { mvz r3, r8, r1; wh64 r0 }
470
471 /* This loop runs twice.
472 *
473 * On entry:
474 * - r17 is even before the first iteration, and odd before
475 * the second. It is incremented inside the loop. Encountering
476 * an even value at the end of the loop makes it stop.
477 */
478 .Lcopy_half_an_unaligned_line:
479 EX: {
480 /* Stall until the last byte is ready. In the steady state this
481 * guarantees all words to load below will be in the L2 cache, which
482 * avoids shunting the loads to the RTF.
483 */
484 move zero, r15
485 lwadd_na r7, r1, 16
486 }
487 EX: { lwadd_na r11, r1, 12 }
488 EX: { lwadd_na r14, r1, -24 }
489 EX: { lwadd_na r8, r1, 4 }
490 EX: { lwadd_na r9, r1, 4 }
491 EX: {
492 lwadd_na r10, r1, 8
493 /* r16 = (r2 < 64), after we subtract 32 from r2 below. */
494 slti_u r16, r2, 64 + 32
495 }
496 EX: { lwadd_na r12, r1, 4; addi r17, r17, 1 }
497 EX: { lwadd_na r13, r1, 8; dword_align r6, r7, r1 }
498 EX: { swadd r0, r6, 4; dword_align r7, r8, r1 }
499 EX: { swadd r0, r7, 4; dword_align r8, r9, r1 }
500 EX: { swadd r0, r8, 4; dword_align r9, r10, r1 }
501 EX: { swadd r0, r9, 4; dword_align r10, r11, r1 }
502 EX: { swadd r0, r10, 4; dword_align r11, r12, r1 }
503 EX: { swadd r0, r11, 4; dword_align r12, r13, r1 }
504 EX: { swadd r0, r12, 4; dword_align r13, r14, r1 }
505 EX: { swadd r0, r13, 4; addi r2, r2, -32 }
506 { move r6, r14; bbst r17, .Lcopy_half_an_unaligned_line }
507
508 { bzt r16, .Lcopy_unaligned_line; move r7, r6 }
509
510 /* On entry:
511 * - r0 is the next store address.
512 * - r1 points 4 bytes past the load address corresponding to r0.
513 * - r2 >= 0 (# of bytes left to store).
514 * - r7 is the next aligned src word value.
515 */
516 .Lcleanup_unaligned_words:
517 /* Handle any trailing bytes. */
518 { bz r2, .Lcopy_unaligned_done; slti_u r8, r2, 4 }
519 { bzt r8, .Lcopy_unaligned_src_words; move r6, r7 }
520
521 /* Move r1 back to the point where it corresponds to r0. */
522 { addi r1, r1, -4 }
523
524 #else /* !CHIP_HAS_DWORD_ALIGN() */
525
526 /* Compute right/left shift counts and load initial source words. */
527 { andi r5, r1, -4; andi r3, r1, 3 }
528 EX: { lw r6, r5; addi r5, r5, 4; shli r3, r3, 3 }
529 EX: { lw r7, r5; addi r5, r5, 4; sub r4, zero, r3 }
530
531 /* Load and store one word at a time, using shifts and ORs
532 * to correct for the misaligned src.
533 */
534 .Lcopy_unaligned_src_loop:
535 { shr r6, r6, r3; shl r8, r7, r4 }
536 EX: { lw r7, r5; or r8, r8, r6; move r6, r7 }
537 EX: { sw r0, r8; addi r0, r0, 4; addi r2, r2, -4 }
538 { addi r5, r5, 4; slti_u r8, r2, 8 }
539 { bzt r8, .Lcopy_unaligned_src_loop; addi r1, r1, 4 }
540
541 { bz r2, .Lcopy_unaligned_done }
542 #endif /* !CHIP_HAS_DWORD_ALIGN() */
543
544 /* Fall through */
545
546 /*
547 *
548 * 1 byte at a time copy handler.
549 *
550 */
551
552 .Lcopy_unaligned_few:
553 EX: { lb_u r3, r1; addi r1, r1, 1 }
554 EX: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
555 { bnzt r2, .Lcopy_unaligned_few }
556
557 .Lcopy_unaligned_done:
558
559 /* For memcpy return original dest address, else zero. */
560 { mz r0, r29, r23; jrp lr }
561
562 .Lend_memcpy_common:
563 .size memcpy_common, .Lend_memcpy_common - memcpy_common
564
565 .section .fixup,"ax"
566 memcpy_common_fixup:
567 .type memcpy_common_fixup, @function
568
569 /* Skip any bytes we already successfully copied.
570 * r2 (num remaining) is correct, but r0 (dst) and r1 (src)
571 * may not be quite right because of unrolling and prefetching.
572 * So we need to recompute their values as the address just
573 * after the last byte we are sure was successfully loaded and
574 * then stored.
575 */
576
577 /* Determine how many bytes we successfully copied. */
578 { sub r3, r25, r2 }
579
580 /* Add this to the original r0 and r1 to get their new values. */
581 { add r0, r23, r3; add r1, r24, r3 }
582
583 { bzt r29, memcpy_fixup_loop }
584 { blzt r29, copy_to_user_fixup_loop }
585
586 copy_from_user_fixup_loop:
587 /* Try copying the rest one byte at a time, expecting a load fault. */
588 .Lcfu: { lb_u r3, r1; addi r1, r1, 1 }
589 { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
590 { bnzt r2, copy_from_user_fixup_loop }
591
592 .Lcopy_from_user_fixup_zero_remainder:
593 { bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */
594 /* byte-at-a-time loop faulted, so zero the rest. */
595 { move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
596 1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
597 { bnzt r3, 1b }
598 2: move lr, r27
599 { move r0, r2; jrp lr }
600
601 copy_to_user_fixup_loop:
602 /* Try copying the rest one byte at a time, expecting a store fault. */
603 { lb_u r3, r1; addi r1, r1, 1 }
604 .Lctu: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
605 { bnzt r2, copy_to_user_fixup_loop }
606 .Lcopy_to_user_fixup_done:
607 move lr, r27
608 { move r0, r2; jrp lr }
609
610 memcpy_fixup_loop:
611 /* Try copying the rest one byte at a time. We expect a disastrous
612 * fault to happen since we are in fixup code, but let it happen.
613 */
614 { lb_u r3, r1; addi r1, r1, 1 }
615 { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
616 { bnzt r2, memcpy_fixup_loop }
617 /* This should be unreachable, we should have faulted again.
618 * But be paranoid and handle it in case some interrupt changed
619 * the TLB or something.
620 */
621 move lr, r27
622 { move r0, r23; jrp lr }
623
624 .size memcpy_common_fixup, . - memcpy_common_fixup
625
626 .section __ex_table,"a"
627 .word .Lcfu, .Lcopy_from_user_fixup_zero_remainder
628 .word .Lctu, .Lcopy_to_user_fixup_done
This page took 0.047905 seconds and 5 git commands to generate.