Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Itanium 2-optimized version of memcpy and copy_user function | |
3 | * | |
4 | * Inputs: | |
5 | * in0: destination address | |
6 | * in1: source address | |
7 | * in2: number of bytes to copy | |
8 | * Output: | |
3e6e1556 CK |
9 | * for memcpy: return dest |
10 | * for copy_user: return 0 if success, | |
11 | * or number of byte NOT copied if error occurred. | |
1da177e4 LT |
12 | * |
13 | * Copyright (C) 2002 Intel Corp. | |
14 | * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com> | |
15 | */ | |
1da177e4 LT |
16 | #include <asm/asmmacro.h> |
17 | #include <asm/page.h> | |
18 | ||
19 | #define EK(y...) EX(y) | |
20 | ||
21 | /* McKinley specific optimization */ | |
22 | ||
23 | #define retval r8 | |
24 | #define saved_pfs r31 | |
25 | #define saved_lc r10 | |
26 | #define saved_pr r11 | |
27 | #define saved_in0 r14 | |
28 | #define saved_in1 r15 | |
29 | #define saved_in2 r16 | |
30 | ||
31 | #define src0 r2 | |
32 | #define src1 r3 | |
33 | #define dst0 r17 | |
34 | #define dst1 r18 | |
35 | #define cnt r9 | |
36 | ||
37 | /* r19-r30 are temp for each code section */ | |
38 | #define PREFETCH_DIST 8 | |
39 | #define src_pre_mem r19 | |
40 | #define dst_pre_mem r20 | |
41 | #define src_pre_l2 r21 | |
42 | #define dst_pre_l2 r22 | |
43 | #define t1 r23 | |
44 | #define t2 r24 | |
45 | #define t3 r25 | |
46 | #define t4 r26 | |
47 | #define t5 t1 // alias! | |
48 | #define t6 t2 // alias! | |
49 | #define t7 t3 // alias! | |
50 | #define n8 r27 | |
51 | #define t9 t5 // alias! | |
52 | #define t10 t4 // alias! | |
53 | #define t11 t7 // alias! | |
54 | #define t12 t6 // alias! | |
55 | #define t14 t10 // alias! | |
56 | #define t13 r28 | |
57 | #define t15 r29 | |
58 | #define tmp r30 | |
59 | ||
60 | /* defines for long_copy block */ | |
61 | #define A 0 | |
62 | #define B (PREFETCH_DIST) | |
63 | #define C (B + PREFETCH_DIST) | |
64 | #define D (C + 1) | |
65 | #define N (D + 1) | |
66 | #define Nrot ((N + 7) & ~7) | |
67 | ||
68 | /* alias */ | |
69 | #define in0 r32 | |
70 | #define in1 r33 | |
71 | #define in2 r34 | |
72 | ||
73 | GLOBAL_ENTRY(memcpy) | |
74 | and r28=0x7,in0 | |
75 | and r29=0x7,in1 | |
76 | mov f6=f0 | |
3e6e1556 | 77 | mov retval=in0 |
1da177e4 LT |
78 | br.cond.sptk .common_code |
79 | ;; | |
512f6429 | 80 | END(memcpy) |
1da177e4 LT |
81 | GLOBAL_ENTRY(__copy_user) |
82 | .prologue | |
83 | // check dest alignment | |
84 | and r28=0x7,in0 | |
85 | and r29=0x7,in1 | |
86 | mov f6=f1 | |
87 | mov saved_in0=in0 // save dest pointer | |
88 | mov saved_in1=in1 // save src pointer | |
3e6e1556 | 89 | mov retval=r0 // initialize return value |
1da177e4 LT |
90 | ;; |
91 | .common_code: | |
92 | cmp.gt p15,p0=8,in2 // check for small size | |
93 | cmp.ne p13,p0=0,r28 // check dest alignment | |
94 | cmp.ne p14,p0=0,r29 // check src alignment | |
95 | add src0=0,in1 | |
96 | sub r30=8,r28 // for .align_dest | |
3e6e1556 | 97 | mov saved_in2=in2 // save len |
1da177e4 LT |
98 | ;; |
99 | add dst0=0,in0 | |
100 | add dst1=1,in0 // dest odd index | |
101 | cmp.le p6,p0 = 1,r30 // for .align_dest | |
102 | (p15) br.cond.dpnt .memcpy_short | |
103 | (p13) br.cond.dpnt .align_dest | |
104 | (p14) br.cond.dpnt .unaligned_src | |
105 | ;; | |
106 | ||
107 | // both dest and src are aligned on 8-byte boundary | |
108 | .aligned_src: | |
109 | .save ar.pfs, saved_pfs | |
110 | alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot | |
111 | .save pr, saved_pr | |
112 | mov saved_pr=pr | |
113 | ||
114 | shr.u cnt=in2,7 // this much cache line | |
115 | ;; | |
116 | cmp.lt p6,p0=2*PREFETCH_DIST,cnt | |
117 | cmp.lt p7,p8=1,cnt | |
118 | .save ar.lc, saved_lc | |
119 | mov saved_lc=ar.lc | |
120 | .body | |
121 | add cnt=-1,cnt | |
122 | add src_pre_mem=0,in1 // prefetch src pointer | |
123 | add dst_pre_mem=0,in0 // prefetch dest pointer | |
124 | ;; | |
125 | (p7) mov ar.lc=cnt // prefetch count | |
126 | (p8) mov ar.lc=r0 | |
127 | (p6) br.cond.dpnt .long_copy | |
128 | ;; | |
129 | ||
130 | .prefetch: | |
131 | lfetch.fault [src_pre_mem], 128 | |
132 | lfetch.fault.excl [dst_pre_mem], 128 | |
133 | br.cloop.dptk.few .prefetch | |
134 | ;; | |
135 | ||
136 | .medium_copy: | |
137 | and tmp=31,in2 // copy length after iteration | |
138 | shr.u r29=in2,5 // number of 32-byte iteration | |
139 | add dst1=8,dst0 // 2nd dest pointer | |
140 | ;; | |
141 | add cnt=-1,r29 // ctop iteration adjustment | |
142 | cmp.eq p10,p0=r29,r0 // do we really need to loop? | |
143 | add src1=8,src0 // 2nd src pointer | |
144 | cmp.le p6,p0=8,tmp | |
145 | ;; | |
146 | cmp.le p7,p0=16,tmp | |
147 | mov ar.lc=cnt // loop setup | |
148 | cmp.eq p16,p17 = r0,r0 | |
149 | mov ar.ec=2 | |
150 | (p10) br.dpnt.few .aligned_src_tail | |
151 | ;; | |
152 | TEXT_ALIGN(32) | |
153 | 1: | |
154 | EX(.ex_handler, (p16) ld8 r34=[src0],16) | |
155 | EK(.ex_handler, (p16) ld8 r38=[src1],16) | |
156 | EX(.ex_handler, (p17) st8 [dst0]=r33,16) | |
157 | EK(.ex_handler, (p17) st8 [dst1]=r37,16) | |
158 | ;; | |
159 | EX(.ex_handler, (p16) ld8 r32=[src0],16) | |
160 | EK(.ex_handler, (p16) ld8 r36=[src1],16) | |
161 | EX(.ex_handler, (p16) st8 [dst0]=r34,16) | |
162 | EK(.ex_handler, (p16) st8 [dst1]=r38,16) | |
163 | br.ctop.dptk.few 1b | |
164 | ;; | |
165 | ||
166 | .aligned_src_tail: | |
167 | EX(.ex_handler, (p6) ld8 t1=[src0]) | |
168 | mov ar.lc=saved_lc | |
169 | mov ar.pfs=saved_pfs | |
170 | EX(.ex_hndlr_s, (p7) ld8 t2=[src1],8) | |
171 | cmp.le p8,p0=24,tmp | |
172 | and r21=-8,tmp | |
173 | ;; | |
174 | EX(.ex_hndlr_s, (p8) ld8 t3=[src1]) | |
175 | EX(.ex_handler, (p6) st8 [dst0]=t1) // store byte 1 | |
176 | and in2=7,tmp // remaining length | |
177 | EX(.ex_hndlr_d, (p7) st8 [dst1]=t2,8) // store byte 2 | |
178 | add src0=src0,r21 // setting up src pointer | |
179 | add dst0=dst0,r21 // setting up dest pointer | |
180 | ;; | |
181 | EX(.ex_handler, (p8) st8 [dst1]=t3) // store byte 3 | |
182 | mov pr=saved_pr,-1 | |
183 | br.dptk.many .memcpy_short | |
184 | ;; | |
185 | ||
186 | /* code taken from copy_page_mck */ | |
187 | .long_copy: | |
188 | .rotr v[2*PREFETCH_DIST] | |
189 | .rotp p[N] | |
190 | ||
191 | mov src_pre_mem = src0 | |
192 | mov pr.rot = 0x10000 | |
193 | mov ar.ec = 1 // special unrolled loop | |
194 | ||
195 | mov dst_pre_mem = dst0 | |
196 | ||
197 | add src_pre_l2 = 8*8, src0 | |
198 | add dst_pre_l2 = 8*8, dst0 | |
199 | ;; | |
200 | add src0 = 8, src_pre_mem // first t1 src | |
201 | mov ar.lc = 2*PREFETCH_DIST - 1 | |
202 | shr.u cnt=in2,7 // number of lines | |
203 | add src1 = 3*8, src_pre_mem // first t3 src | |
204 | add dst0 = 8, dst_pre_mem // first t1 dst | |
205 | add dst1 = 3*8, dst_pre_mem // first t3 dst | |
206 | ;; | |
207 | and tmp=127,in2 // remaining bytes after this block | |
208 | add cnt = -(2*PREFETCH_DIST) - 1, cnt | |
209 | // same as .line_copy loop, but with all predicated-off instructions removed: | |
210 | .prefetch_loop: | |
211 | EX(.ex_hndlr_lcpy_1, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 | |
212 | EK(.ex_hndlr_lcpy_1, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 | |
213 | br.ctop.sptk .prefetch_loop | |
214 | ;; | |
215 | cmp.eq p16, p0 = r0, r0 // reset p16 to 1 | |
216 | mov ar.lc = cnt | |
217 | mov ar.ec = N // # of stages in pipeline | |
218 | ;; | |
219 | .line_copy: | |
220 | EX(.ex_handler, (p[D]) ld8 t2 = [src0], 3*8) // M0 | |
221 | EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1 | |
222 | EX(.ex_handler_lcpy, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 prefetch dst from memory | |
223 | EK(.ex_handler_lcpy, (p[D]) st8 [dst_pre_l2] = n8, 128) // M3 prefetch dst from L2 | |
224 | ;; | |
225 | EX(.ex_handler_lcpy, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 prefetch src from memory | |
226 | EK(.ex_handler_lcpy, (p[C]) ld8 n8 = [src_pre_l2], 128) // M1 prefetch src from L2 | |
227 | EX(.ex_handler, (p[D]) st8 [dst0] = t1, 8) // M2 | |
228 | EK(.ex_handler, (p[D]) st8 [dst1] = t3, 8) // M3 | |
229 | ;; | |
230 | EX(.ex_handler, (p[D]) ld8 t5 = [src0], 8) | |
231 | EK(.ex_handler, (p[D]) ld8 t7 = [src1], 3*8) | |
232 | EX(.ex_handler, (p[D]) st8 [dst0] = t2, 3*8) | |
233 | EK(.ex_handler, (p[D]) st8 [dst1] = t4, 3*8) | |
234 | ;; | |
235 | EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8) | |
236 | EK(.ex_handler, (p[D]) ld8 t10 = [src1], 8) | |
237 | EX(.ex_handler, (p[D]) st8 [dst0] = t5, 8) | |
238 | EK(.ex_handler, (p[D]) st8 [dst1] = t7, 3*8) | |
239 | ;; | |
240 | EX(.ex_handler, (p[D]) ld8 t9 = [src0], 3*8) | |
241 | EK(.ex_handler, (p[D]) ld8 t11 = [src1], 3*8) | |
242 | EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8) | |
243 | EK(.ex_handler, (p[D]) st8 [dst1] = t10, 8) | |
244 | ;; | |
245 | EX(.ex_handler, (p[D]) ld8 t12 = [src0], 8) | |
246 | EK(.ex_handler, (p[D]) ld8 t14 = [src1], 8) | |
247 | EX(.ex_handler, (p[D]) st8 [dst0] = t9, 3*8) | |
248 | EK(.ex_handler, (p[D]) st8 [dst1] = t11, 3*8) | |
249 | ;; | |
250 | EX(.ex_handler, (p[D]) ld8 t13 = [src0], 4*8) | |
251 | EK(.ex_handler, (p[D]) ld8 t15 = [src1], 4*8) | |
252 | EX(.ex_handler, (p[D]) st8 [dst0] = t12, 8) | |
253 | EK(.ex_handler, (p[D]) st8 [dst1] = t14, 8) | |
254 | ;; | |
255 | EX(.ex_handler, (p[C]) ld8 t1 = [src0], 8) | |
256 | EK(.ex_handler, (p[C]) ld8 t3 = [src1], 8) | |
257 | EX(.ex_handler, (p[D]) st8 [dst0] = t13, 4*8) | |
258 | EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8) | |
259 | br.ctop.sptk .line_copy | |
260 | ;; | |
261 | ||
262 | add dst0=-8,dst0 | |
263 | add src0=-8,src0 | |
264 | mov in2=tmp | |
265 | .restore sp | |
266 | br.sptk.many .medium_copy | |
267 | ;; | |
268 | ||
269 | #define BLOCK_SIZE 128*32 | |
270 | #define blocksize r23 | |
271 | #define curlen r24 | |
272 | ||
273 | // dest is on 8-byte boundary, src is not. We need to do | |
274 | // ld8-ld8, shrp, then st8. Max 8 byte copy per cycle. | |
275 | .unaligned_src: | |
276 | .prologue | |
277 | .save ar.pfs, saved_pfs | |
278 | alloc saved_pfs=ar.pfs,3,5,0,8 | |
279 | .save ar.lc, saved_lc | |
280 | mov saved_lc=ar.lc | |
281 | .save pr, saved_pr | |
282 | mov saved_pr=pr | |
283 | .body | |
284 | .4k_block: | |
285 | mov saved_in0=dst0 // need to save all input arguments | |
286 | mov saved_in2=in2 | |
287 | mov blocksize=BLOCK_SIZE | |
288 | ;; | |
289 | cmp.lt p6,p7=blocksize,in2 | |
290 | mov saved_in1=src0 | |
291 | ;; | |
292 | (p6) mov in2=blocksize | |
293 | ;; | |
294 | shr.u r21=in2,7 // this much cache line | |
295 | shr.u r22=in2,4 // number of 16-byte iteration | |
296 | and curlen=15,in2 // copy length after iteration | |
297 | and r30=7,src0 // source alignment | |
298 | ;; | |
299 | cmp.lt p7,p8=1,r21 | |
300 | add cnt=-1,r21 | |
301 | ;; | |
302 | ||
303 | add src_pre_mem=0,src0 // prefetch src pointer | |
304 | add dst_pre_mem=0,dst0 // prefetch dest pointer | |
305 | and src0=-8,src0 // 1st src pointer | |
6118ec84 | 306 | (p7) mov ar.lc = cnt |
1da177e4 LT |
307 | (p8) mov ar.lc = r0 |
308 | ;; | |
309 | TEXT_ALIGN(32) | |
310 | 1: lfetch.fault [src_pre_mem], 128 | |
311 | lfetch.fault.excl [dst_pre_mem], 128 | |
312 | br.cloop.dptk.few 1b | |
313 | ;; | |
314 | ||
315 | shladd dst1=r22,3,dst0 // 2nd dest pointer | |
316 | shladd src1=r22,3,src0 // 2nd src pointer | |
317 | cmp.eq p8,p9=r22,r0 // do we really need to loop? | |
318 | cmp.le p6,p7=8,curlen; // have at least 8 byte remaining? | |
319 | add cnt=-1,r22 // ctop iteration adjustment | |
320 | ;; | |
321 | EX(.ex_handler, (p9) ld8 r33=[src0],8) // loop primer | |
322 | EK(.ex_handler, (p9) ld8 r37=[src1],8) | |
323 | (p8) br.dpnt.few .noloop | |
324 | ;; | |
325 | ||
326 | // The jump address is calculated based on src alignment. The COPYU | |
327 | // macro below need to confine its size to power of two, so an entry | |
328 | // can be caulated using shl instead of an expensive multiply. The | |
329 | // size is then hard coded by the following #define to match the | |
330 | // actual size. This make it somewhat tedious when COPYU macro gets | |
331 | // changed and this need to be adjusted to match. | |
332 | #define LOOP_SIZE 6 | |
333 | 1: | |
334 | mov r29=ip // jmp_table thread | |
335 | mov ar.lc=cnt | |
336 | ;; | |
337 | add r29=.jump_table - 1b - (.jmp1-.jump_table), r29 | |
338 | shl r28=r30, LOOP_SIZE // jmp_table thread | |
339 | mov ar.ec=2 // loop setup | |
340 | ;; | |
341 | add r29=r29,r28 // jmp_table thread | |
342 | cmp.eq p16,p17=r0,r0 | |
343 | ;; | |
344 | mov b6=r29 // jmp_table thread | |
345 | ;; | |
346 | br.cond.sptk.few b6 | |
347 | ||
348 | // for 8-15 byte case | |
349 | // We will skip the loop, but need to replicate the side effect | |
350 | // that the loop produces. | |
351 | .noloop: | |
352 | EX(.ex_handler, (p6) ld8 r37=[src1],8) | |
353 | add src0=8,src0 | |
354 | (p6) shl r25=r30,3 | |
355 | ;; | |
356 | EX(.ex_handler, (p6) ld8 r27=[src1]) | |
357 | (p6) shr.u r28=r37,r25 | |
358 | (p6) sub r26=64,r25 | |
359 | ;; | |
360 | (p6) shl r27=r27,r26 | |
361 | ;; | |
362 | (p6) or r21=r28,r27 | |
363 | ||
364 | .unaligned_src_tail: | |
365 | /* check if we have more than blocksize to copy, if so go back */ | |
366 | cmp.gt p8,p0=saved_in2,blocksize | |
367 | ;; | |
368 | (p8) add dst0=saved_in0,blocksize | |
369 | (p8) add src0=saved_in1,blocksize | |
370 | (p8) sub in2=saved_in2,blocksize | |
371 | (p8) br.dpnt .4k_block | |
372 | ;; | |
373 | ||
374 | /* we have up to 15 byte to copy in the tail. | |
375 | * part of work is already done in the jump table code | |
376 | * we are at the following state. | |
377 | * src side: | |
378 | * | |
379 | * xxxxxx xx <----- r21 has xxxxxxxx already | |
380 | * -------- -------- -------- | |
381 | * 0 8 16 | |
382 | * ^ | |
383 | * | | |
384 | * src1 | |
385 | * | |
386 | * dst | |
387 | * -------- -------- -------- | |
388 | * ^ | |
389 | * | | |
390 | * dst1 | |
391 | */ | |
392 | EX(.ex_handler, (p6) st8 [dst1]=r21,8) // more than 8 byte to copy | |
393 | (p6) add curlen=-8,curlen // update length | |
394 | mov ar.pfs=saved_pfs | |
395 | ;; | |
396 | mov ar.lc=saved_lc | |
397 | mov pr=saved_pr,-1 | |
398 | mov in2=curlen // remaining length | |
399 | mov dst0=dst1 // dest pointer | |
400 | add src0=src1,r30 // forward by src alignment | |
401 | ;; | |
402 | ||
403 | // 7 byte or smaller. | |
404 | .memcpy_short: | |
405 | cmp.le p8,p9 = 1,in2 | |
406 | cmp.le p10,p11 = 2,in2 | |
407 | cmp.le p12,p13 = 3,in2 | |
408 | cmp.le p14,p15 = 4,in2 | |
409 | add src1=1,src0 // second src pointer | |
410 | add dst1=1,dst0 // second dest pointer | |
411 | ;; | |
412 | ||
413 | EX(.ex_handler_short, (p8) ld1 t1=[src0],2) | |
414 | EK(.ex_handler_short, (p10) ld1 t2=[src1],2) | |
415 | (p9) br.ret.dpnt rp // 0 byte copy | |
416 | ;; | |
417 | ||
418 | EX(.ex_handler_short, (p8) st1 [dst0]=t1,2) | |
419 | EK(.ex_handler_short, (p10) st1 [dst1]=t2,2) | |
420 | (p11) br.ret.dpnt rp // 1 byte copy | |
421 | ||
422 | EX(.ex_handler_short, (p12) ld1 t3=[src0],2) | |
423 | EK(.ex_handler_short, (p14) ld1 t4=[src1],2) | |
424 | (p13) br.ret.dpnt rp // 2 byte copy | |
425 | ;; | |
426 | ||
427 | cmp.le p6,p7 = 5,in2 | |
428 | cmp.le p8,p9 = 6,in2 | |
429 | cmp.le p10,p11 = 7,in2 | |
430 | ||
431 | EX(.ex_handler_short, (p12) st1 [dst0]=t3,2) | |
432 | EK(.ex_handler_short, (p14) st1 [dst1]=t4,2) | |
433 | (p15) br.ret.dpnt rp // 3 byte copy | |
434 | ;; | |
435 | ||
436 | EX(.ex_handler_short, (p6) ld1 t5=[src0],2) | |
437 | EK(.ex_handler_short, (p8) ld1 t6=[src1],2) | |
438 | (p7) br.ret.dpnt rp // 4 byte copy | |
439 | ;; | |
440 | ||
441 | EX(.ex_handler_short, (p6) st1 [dst0]=t5,2) | |
442 | EK(.ex_handler_short, (p8) st1 [dst1]=t6,2) | |
443 | (p9) br.ret.dptk rp // 5 byte copy | |
444 | ||
445 | EX(.ex_handler_short, (p10) ld1 t7=[src0],2) | |
446 | (p11) br.ret.dptk rp // 6 byte copy | |
447 | ;; | |
448 | ||
449 | EX(.ex_handler_short, (p10) st1 [dst0]=t7,2) | |
450 | br.ret.dptk rp // done all cases | |
451 | ||
452 | ||
453 | /* Align dest to nearest 8-byte boundary. We know we have at | |
454 | * least 7 bytes to copy, enough to crawl to 8-byte boundary. | |
455 | * Actual number of byte to crawl depend on the dest alignment. | |
456 | * 7 byte or less is taken care at .memcpy_short | |
457 | ||
458 | * src0 - source even index | |
459 | * src1 - source odd index | |
460 | * dst0 - dest even index | |
461 | * dst1 - dest odd index | |
462 | * r30 - distance to 8-byte boundary | |
463 | */ | |
464 | ||
465 | .align_dest: | |
466 | add src1=1,in1 // source odd index | |
467 | cmp.le p7,p0 = 2,r30 // for .align_dest | |
468 | cmp.le p8,p0 = 3,r30 // for .align_dest | |
469 | EX(.ex_handler_short, (p6) ld1 t1=[src0],2) | |
470 | cmp.le p9,p0 = 4,r30 // for .align_dest | |
471 | cmp.le p10,p0 = 5,r30 | |
472 | ;; | |
473 | EX(.ex_handler_short, (p7) ld1 t2=[src1],2) | |
474 | EK(.ex_handler_short, (p8) ld1 t3=[src0],2) | |
475 | cmp.le p11,p0 = 6,r30 | |
476 | EX(.ex_handler_short, (p6) st1 [dst0] = t1,2) | |
477 | cmp.le p12,p0 = 7,r30 | |
478 | ;; | |
479 | EX(.ex_handler_short, (p9) ld1 t4=[src1],2) | |
480 | EK(.ex_handler_short, (p10) ld1 t5=[src0],2) | |
481 | EX(.ex_handler_short, (p7) st1 [dst1] = t2,2) | |
482 | EK(.ex_handler_short, (p8) st1 [dst0] = t3,2) | |
483 | ;; | |
484 | EX(.ex_handler_short, (p11) ld1 t6=[src1],2) | |
485 | EK(.ex_handler_short, (p12) ld1 t7=[src0],2) | |
486 | cmp.eq p6,p7=r28,r29 | |
487 | EX(.ex_handler_short, (p9) st1 [dst1] = t4,2) | |
488 | EK(.ex_handler_short, (p10) st1 [dst0] = t5,2) | |
489 | sub in2=in2,r30 | |
490 | ;; | |
491 | EX(.ex_handler_short, (p11) st1 [dst1] = t6,2) | |
492 | EK(.ex_handler_short, (p12) st1 [dst0] = t7) | |
493 | add dst0=in0,r30 // setup arguments | |
494 | add src0=in1,r30 | |
495 | (p6) br.cond.dptk .aligned_src | |
496 | (p7) br.cond.dpnt .unaligned_src | |
497 | ;; | |
498 | ||
499 | /* main loop body in jump table format */ | |
500 | #define COPYU(shift) \ | |
501 | 1: \ | |
502 | EX(.ex_handler, (p16) ld8 r32=[src0],8); /* 1 */ \ | |
503 | EK(.ex_handler, (p16) ld8 r36=[src1],8); \ | |
504 | (p17) shrp r35=r33,r34,shift;; /* 1 */ \ | |
505 | EX(.ex_handler, (p6) ld8 r22=[src1]); /* common, prime for tail section */ \ | |
506 | nop.m 0; \ | |
507 | (p16) shrp r38=r36,r37,shift; \ | |
508 | EX(.ex_handler, (p17) st8 [dst0]=r35,8); /* 1 */ \ | |
509 | EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ | |
510 | br.ctop.dptk.few 1b;; \ | |
511 | (p7) add src1=-8,src1; /* back out for <8 byte case */ \ | |
512 | shrp r21=r22,r38,shift; /* speculative work */ \ | |
513 | br.sptk.few .unaligned_src_tail /* branch out of jump table */ \ | |
514 | ;; | |
515 | TEXT_ALIGN(32) | |
516 | .jump_table: | |
517 | COPYU(8) // unaligned cases | |
518 | .jmp1: | |
519 | COPYU(16) | |
520 | COPYU(24) | |
521 | COPYU(32) | |
522 | COPYU(40) | |
523 | COPYU(48) | |
524 | COPYU(56) | |
525 | ||
526 | #undef A | |
527 | #undef B | |
528 | #undef C | |
529 | #undef D | |
1da177e4 LT |
530 | |
531 | /* | |
532 | * Due to lack of local tag support in gcc 2.x assembler, it is not clear which | |
533 | * instruction failed in the bundle. The exception algorithm is that we | |
534 | * first figure out the faulting address, then detect if there is any | |
535 | * progress made on the copy, if so, redo the copy from last known copied | |
536 | * location up to the faulting address (exclusive). In the copy_from_user | |
537 | * case, remaining byte in kernel buffer will be zeroed. | |
538 | * | |
539 | * Take copy_from_user as an example, in the code there are multiple loads | |
540 | * in a bundle and those multiple loads could span over two pages, the | |
541 | * faulting address is calculated as page_round_down(max(src0, src1)). | |
542 | * This is based on knowledge that if we can access one byte in a page, we | |
543 | * can access any byte in that page. | |
544 | * | |
545 | * predicate used in the exception handler: | |
546 | * p6-p7: direction | |
547 | * p10-p11: src faulting addr calculation | |
548 | * p12-p13: dst faulting addr calculation | |
549 | */ | |
550 | ||
551 | #define A r19 | |
552 | #define B r20 | |
553 | #define C r21 | |
554 | #define D r22 | |
555 | #define F r28 | |
556 | ||
557 | #define memset_arg0 r32 | |
558 | #define memset_arg2 r33 | |
559 | ||
560 | #define saved_retval loc0 | |
561 | #define saved_rtlink loc1 | |
562 | #define saved_pfs_stack loc2 | |
563 | ||
564 | .ex_hndlr_s: | |
565 | add src0=8,src0 | |
566 | br.sptk .ex_handler | |
567 | ;; | |
568 | .ex_hndlr_d: | |
569 | add dst0=8,dst0 | |
570 | br.sptk .ex_handler | |
571 | ;; | |
572 | .ex_hndlr_lcpy_1: | |
573 | mov src1=src_pre_mem | |
574 | mov dst1=dst_pre_mem | |
575 | cmp.gtu p10,p11=src_pre_mem,saved_in1 | |
576 | cmp.gtu p12,p13=dst_pre_mem,saved_in0 | |
577 | ;; | |
578 | (p10) add src0=8,saved_in1 | |
579 | (p11) mov src0=saved_in1 | |
580 | (p12) add dst0=8,saved_in0 | |
581 | (p13) mov dst0=saved_in0 | |
582 | br.sptk .ex_handler | |
583 | .ex_handler_lcpy: | |
584 | // in line_copy block, the preload addresses should always ahead | |
585 | // of the other two src/dst pointers. Furthermore, src1/dst1 should | |
586 | // always ahead of src0/dst0. | |
587 | mov src1=src_pre_mem | |
588 | mov dst1=dst_pre_mem | |
589 | .ex_handler: | |
590 | mov pr=saved_pr,-1 // first restore pr, lc, and pfs | |
591 | mov ar.lc=saved_lc | |
592 | mov ar.pfs=saved_pfs | |
593 | ;; | |
594 | .ex_handler_short: // fault occurred in these sections didn't change pr, lc, pfs | |
595 | cmp.ltu p6,p7=saved_in0, saved_in1 // get the copy direction | |
596 | cmp.ltu p10,p11=src0,src1 | |
597 | cmp.ltu p12,p13=dst0,dst1 | |
598 | fcmp.eq p8,p0=f6,f0 // is it memcpy? | |
599 | mov tmp = dst0 | |
600 | ;; | |
601 | (p11) mov src1 = src0 // pick the larger of the two | |
602 | (p13) mov dst0 = dst1 // make dst0 the smaller one | |
603 | (p13) mov dst1 = tmp // and dst1 the larger one | |
604 | ;; | |
605 | (p6) dep F = r0,dst1,0,PAGE_SHIFT // usr dst round down to page boundary | |
606 | (p7) dep F = r0,src1,0,PAGE_SHIFT // usr src round down to page boundary | |
607 | ;; | |
608 | (p6) cmp.le p14,p0=dst0,saved_in0 // no progress has been made on store | |
609 | (p7) cmp.le p14,p0=src0,saved_in1 // no progress has been made on load | |
610 | mov retval=saved_in2 | |
611 | (p8) ld1 tmp=[src1] // force an oops for memcpy call | |
612 | (p8) st1 [dst1]=r0 // force an oops for memcpy call | |
613 | (p14) br.ret.sptk.many rp | |
614 | ||
615 | /* | |
616 | * The remaining byte to copy is calculated as: | |
617 | * | |
618 | * A = (faulting_addr - orig_src) -> len to faulting ld address | |
619 | * or | |
620 | * (faulting_addr - orig_dst) -> len to faulting st address | |
621 | * B = (cur_dst - orig_dst) -> len copied so far | |
622 | * C = A - B -> len need to be copied | |
623 | * D = orig_len - A -> len need to be zeroed | |
624 | */ | |
625 | (p6) sub A = F, saved_in0 | |
626 | (p7) sub A = F, saved_in1 | |
627 | clrrrb | |
628 | ;; | |
629 | alloc saved_pfs_stack=ar.pfs,3,3,3,0 | |
295bd892 | 630 | cmp.lt p8,p0=A,r0 |
1da177e4 LT |
631 | sub B = dst0, saved_in0 // how many byte copied so far |
632 | ;; | |
295bd892 CK |
633 | (p8) mov A = 0; // A shouldn't be negative, cap it |
634 | ;; | |
1da177e4 LT |
635 | sub C = A, B |
636 | sub D = saved_in2, A | |
637 | ;; | |
638 | cmp.gt p8,p0=C,r0 // more than 1 byte? | |
639 | add memset_arg0=saved_in0, A | |
640 | (p6) mov memset_arg2=0 // copy_to_user should not call memset | |
641 | (p7) mov memset_arg2=D // copy_from_user need to have kbuf zeroed | |
642 | mov r8=0 | |
643 | mov saved_retval = D | |
644 | mov saved_rtlink = b0 | |
645 | ||
646 | add out0=saved_in0, B | |
647 | add out1=saved_in1, B | |
648 | mov out2=C | |
649 | (p8) br.call.sptk.few b0=__copy_user // recursive call | |
650 | ;; | |
651 | ||
652 | add saved_retval=saved_retval,r8 // above might return non-zero value | |
653 | cmp.gt p8,p0=memset_arg2,r0 // more than 1 byte? | |
654 | mov out0=memset_arg0 // *s | |
655 | mov out1=r0 // c | |
656 | mov out2=memset_arg2 // n | |
657 | (p8) br.call.sptk.few b0=memset | |
658 | ;; | |
659 | ||
660 | mov retval=saved_retval | |
661 | mov ar.pfs=saved_pfs_stack | |
662 | mov b0=saved_rtlink | |
663 | br.ret.sptk.many rp | |
664 | ||
665 | /* end of McKinley specific optimization */ | |
666 | END(__copy_user) |