Commit | Line | Data |
---|---|---|
e821ea70 | 1 | #include <asm/processor.h> |
14cf11af | 2 | #include <asm/ppc_asm.h> |
b3b8dc6c | 3 | #include <asm/reg.h> |
e821ea70 BH |
4 | #include <asm/asm-offsets.h> |
5 | #include <asm/cputable.h> | |
6 | #include <asm/thread_info.h> | |
7 | #include <asm/page.h> | |
46f52210 | 8 | #include <asm/ptrace.h> |
e821ea70 BH |
9 | |
10 | /* | |
11 | * load_up_altivec(unused, unused, tsk) | |
12 | * Disable VMX for the task which had it previously, | |
13 | * and save its vector registers in its thread_struct. | |
14 | * Enables the VMX for use in the kernel on return. | |
15 | * On SMP we know the VMX is free, since we give it up every | |
16 | * switch (ie, no lazy save of the vector registers). | |
17 | */ | |
18 | _GLOBAL(load_up_altivec) | |
19 | mfmsr r5 /* grab the current MSR */ | |
20 | oris r5,r5,MSR_VEC@h | |
21 | MTMSRD(r5) /* enable use of AltiVec now */ | |
22 | isync | |
23 | ||
24 | /* | |
25 | * For SMP, we don't do lazy VMX switching because it just gets too | |
26 | * horrendously complex, especially when a task switches from one CPU | |
27 | * to another. Instead we call giveup_altvec in switch_to. | |
28 | * VRSAVE isn't dealt with here, that is done in the normal context | |
29 | * switch code. Note that we could rely on vrsave value to eventually | |
30 | * avoid saving all of the VREGs here... | |
31 | */ | |
32 | #ifndef CONFIG_SMP | |
33 | LOAD_REG_ADDRBASE(r3, last_task_used_altivec) | |
34 | toreal(r3) | |
35 | PPC_LL r4,ADDROFF(last_task_used_altivec)(r3) | |
36 | PPC_LCMPI 0,r4,0 | |
37 | beq 1f | |
38 | ||
39 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | |
40 | toreal(r4) | |
41 | addi r4,r4,THREAD | |
42 | SAVE_32VRS(0,r5,r4) | |
43 | mfvscr vr0 | |
44 | li r10,THREAD_VSCR | |
45 | stvx vr0,r10,r4 | |
46 | /* Disable VMX for last_task_used_altivec */ | |
47 | PPC_LL r5,PT_REGS(r4) | |
48 | toreal(r5) | |
49 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
50 | lis r10,MSR_VEC@h | |
51 | andc r4,r4,r10 | |
52 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
53 | 1: | |
54 | #endif /* CONFIG_SMP */ | |
55 | ||
56 | /* Hack: if we get an altivec unavailable trap with VRSAVE | |
57 | * set to all zeros, we assume this is a broken application | |
58 | * that fails to set it properly, and thus we switch it to | |
59 | * all 1's | |
60 | */ | |
61 | mfspr r4,SPRN_VRSAVE | |
e090aa80 | 62 | cmpwi 0,r4,0 |
e821ea70 BH |
63 | bne+ 1f |
64 | li r4,-1 | |
65 | mtspr SPRN_VRSAVE,r4 | |
66 | 1: | |
67 | /* enable use of VMX after return */ | |
68 | #ifdef CONFIG_PPC32 | |
ee43eb78 | 69 | mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ |
e821ea70 BH |
70 | oris r9,r9,MSR_VEC@h |
71 | #else | |
72 | ld r4,PACACURRENT(r13) | |
73 | addi r5,r4,THREAD /* Get THREAD */ | |
74 | oris r12,r12,MSR_VEC@h | |
75 | std r12,_MSR(r1) | |
76 | #endif | |
77 | li r4,1 | |
78 | li r10,THREAD_VSCR | |
79 | stw r4,THREAD_USED_VR(r5) | |
80 | lvx vr0,r10,r5 | |
81 | mtvscr vr0 | |
82 | REST_32VRS(0,r4,r5) | |
83 | #ifndef CONFIG_SMP | |
0115cb54 | 84 | /* Update last_task_used_altivec to 'current' */ |
e821ea70 BH |
85 | subi r4,r5,THREAD /* Back to 'current' */ |
86 | fromreal(r4) | |
0115cb54 | 87 | PPC_STL r4,ADDROFF(last_task_used_altivec)(r3) |
e821ea70 BH |
88 | #endif /* CONFIG_SMP */ |
89 | /* restore registers and return */ | |
90 | blr | |
91 | ||
92 | /* | |
93 | * giveup_altivec(tsk) | |
94 | * Disable VMX for the task given as the argument, | |
95 | * and save the vector registers in its thread_struct. | |
96 | * Enables the VMX for use in the kernel on return. | |
97 | */ | |
98 | _GLOBAL(giveup_altivec) | |
99 | mfmsr r5 | |
100 | oris r5,r5,MSR_VEC@h | |
101 | SYNC | |
102 | MTMSRD(r5) /* enable use of VMX now */ | |
103 | isync | |
104 | PPC_LCMPI 0,r3,0 | |
105 | beqlr- /* if no previous owner, done */ | |
106 | addi r3,r3,THREAD /* want THREAD of task */ | |
107 | PPC_LL r5,PT_REGS(r3) | |
108 | PPC_LCMPI 0,r5,0 | |
109 | SAVE_32VRS(0,r4,r3) | |
110 | mfvscr vr0 | |
111 | li r4,THREAD_VSCR | |
112 | stvx vr0,r4,r3 | |
113 | beq 1f | |
114 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
115 | #ifdef CONFIG_VSX | |
116 | BEGIN_FTR_SECTION | |
117 | lis r3,(MSR_VEC|MSR_VSX)@h | |
118 | FTR_SECTION_ELSE | |
119 | lis r3,MSR_VEC@h | |
120 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | |
121 | #else | |
122 | lis r3,MSR_VEC@h | |
123 | #endif | |
124 | andc r4,r4,r3 /* disable FP for previous task */ | |
125 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
126 | 1: | |
127 | #ifndef CONFIG_SMP | |
128 | li r5,0 | |
129 | LOAD_REG_ADDRBASE(r4,last_task_used_altivec) | |
130 | PPC_STL r5,ADDROFF(last_task_used_altivec)(r4) | |
131 | #endif /* CONFIG_SMP */ | |
132 | blr | |
133 | ||
134 | #ifdef CONFIG_VSX | |
135 | ||
136 | #ifdef CONFIG_PPC32 | |
137 | #error This asm code isn't ready for 32-bit kernels | |
138 | #endif | |
139 | ||
140 | /* | |
141 | * load_up_vsx(unused, unused, tsk) | |
142 | * Disable VSX for the task which had it previously, | |
143 | * and save its vector registers in its thread_struct. | |
144 | * Reuse the fp and vsx saves, but first check to see if they have | |
145 | * been saved already. | |
146 | */ | |
147 | _GLOBAL(load_up_vsx) | |
148 | /* Load FP and VSX registers if they haven't been done yet */ | |
149 | andi. r5,r12,MSR_FP | |
150 | beql+ load_up_fpu /* skip if already loaded */ | |
151 | andis. r5,r12,MSR_VEC@h | |
152 | beql+ load_up_altivec /* skip if already loaded */ | |
153 | ||
154 | #ifndef CONFIG_SMP | |
155 | ld r3,last_task_used_vsx@got(r2) | |
156 | ld r4,0(r3) | |
157 | cmpdi 0,r4,0 | |
158 | beq 1f | |
159 | /* Disable VSX for last_task_used_vsx */ | |
160 | addi r4,r4,THREAD | |
161 | ld r5,PT_REGS(r4) | |
162 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
163 | lis r6,MSR_VSX@h | |
164 | andc r6,r4,r6 | |
165 | std r6,_MSR-STACK_FRAME_OVERHEAD(r5) | |
166 | 1: | |
167 | #endif /* CONFIG_SMP */ | |
168 | ld r4,PACACURRENT(r13) | |
169 | addi r4,r4,THREAD /* Get THREAD */ | |
170 | li r6,1 | |
171 | stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ | |
172 | /* enable use of VSX after return */ | |
173 | oris r12,r12,MSR_VSX@h | |
174 | std r12,_MSR(r1) | |
175 | #ifndef CONFIG_SMP | |
0115cb54 | 176 | /* Update last_task_used_vsx to 'current' */ |
e821ea70 BH |
177 | ld r4,PACACURRENT(r13) |
178 | std r4,0(r3) | |
179 | #endif /* CONFIG_SMP */ | |
180 | b fast_exception_return | |
181 | ||
182 | /* | |
183 | * __giveup_vsx(tsk) | |
184 | * Disable VSX for the task given as the argument. | |
185 | * Does NOT save vsx registers. | |
186 | * Enables the VSX for use in the kernel on return. | |
187 | */ | |
188 | _GLOBAL(__giveup_vsx) | |
189 | mfmsr r5 | |
190 | oris r5,r5,MSR_VSX@h | |
191 | mtmsrd r5 /* enable use of VSX now */ | |
192 | isync | |
193 | ||
194 | cmpdi 0,r3,0 | |
195 | beqlr- /* if no previous owner, done */ | |
196 | addi r3,r3,THREAD /* want THREAD of task */ | |
197 | ld r5,PT_REGS(r3) | |
198 | cmpdi 0,r5,0 | |
199 | beq 1f | |
200 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
201 | lis r3,MSR_VSX@h | |
202 | andc r4,r4,r3 /* disable VSX for previous task */ | |
203 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
204 | 1: | |
205 | #ifndef CONFIG_SMP | |
206 | li r5,0 | |
207 | ld r4,last_task_used_vsx@got(r2) | |
208 | std r5,0(r4) | |
209 | #endif /* CONFIG_SMP */ | |
210 | blr | |
211 | ||
212 | #endif /* CONFIG_VSX */ | |
213 | ||
14cf11af PM |
214 | |
215 | /* | |
216 | * The routines below are in assembler so we can closely control the | |
217 | * usage of floating-point registers. These routines must be called | |
218 | * with preempt disabled. | |
219 | */ | |
220 | #ifdef CONFIG_PPC32 | |
221 | .data | |
222 | fpzero: | |
223 | .long 0 | |
224 | fpone: | |
225 | .long 0x3f800000 /* 1.0 in single-precision FP */ | |
226 | fphalf: | |
227 | .long 0x3f000000 /* 0.5 in single-precision FP */ | |
228 | ||
229 | #define LDCONST(fr, name) \ | |
230 | lis r11,name@ha; \ | |
231 | lfs fr,name@l(r11) | |
232 | #else | |
233 | ||
234 | .section ".toc","aw" | |
235 | fpzero: | |
236 | .tc FD_0_0[TC],0 | |
237 | fpone: | |
238 | .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */ | |
239 | fphalf: | |
240 | .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ | |
241 | ||
242 | #define LDCONST(fr, name) \ | |
243 | lfd fr,name@toc(r2) | |
244 | #endif | |
245 | ||
246 | .text | |
247 | /* | |
248 | * Internal routine to enable floating point and set FPSCR to 0. | |
249 | * Don't call it from C; it doesn't use the normal calling convention. | |
250 | */ | |
251 | fpenable: | |
252 | #ifdef CONFIG_PPC32 | |
253 | stwu r1,-64(r1) | |
254 | #else | |
255 | stdu r1,-64(r1) | |
256 | #endif | |
257 | mfmsr r10 | |
258 | ori r11,r10,MSR_FP | |
259 | mtmsr r11 | |
260 | isync | |
261 | stfd fr0,24(r1) | |
262 | stfd fr1,16(r1) | |
263 | stfd fr31,8(r1) | |
264 | LDCONST(fr1, fpzero) | |
265 | mffs fr31 | |
3a2c48cf | 266 | MTFSF_L(fr1) |
14cf11af PM |
267 | blr |
268 | ||
269 | fpdisable: | |
270 | mtlr r12 | |
3a2c48cf | 271 | MTFSF_L(fr31) |
14cf11af PM |
272 | lfd fr31,8(r1) |
273 | lfd fr1,16(r1) | |
274 | lfd fr0,24(r1) | |
275 | mtmsr r10 | |
276 | isync | |
277 | addi r1,r1,64 | |
278 | blr | |
279 | ||
280 | /* | |
281 | * Vector add, floating point. | |
282 | */ | |
283 | _GLOBAL(vaddfp) | |
284 | mflr r12 | |
285 | bl fpenable | |
286 | li r0,4 | |
287 | mtctr r0 | |
288 | li r6,0 | |
289 | 1: lfsx fr0,r4,r6 | |
290 | lfsx fr1,r5,r6 | |
291 | fadds fr0,fr0,fr1 | |
292 | stfsx fr0,r3,r6 | |
293 | addi r6,r6,4 | |
294 | bdnz 1b | |
295 | b fpdisable | |
296 | ||
297 | /* | |
298 | * Vector subtract, floating point. | |
299 | */ | |
300 | _GLOBAL(vsubfp) | |
301 | mflr r12 | |
302 | bl fpenable | |
303 | li r0,4 | |
304 | mtctr r0 | |
305 | li r6,0 | |
306 | 1: lfsx fr0,r4,r6 | |
307 | lfsx fr1,r5,r6 | |
308 | fsubs fr0,fr0,fr1 | |
309 | stfsx fr0,r3,r6 | |
310 | addi r6,r6,4 | |
311 | bdnz 1b | |
312 | b fpdisable | |
313 | ||
314 | /* | |
315 | * Vector multiply and add, floating point. | |
316 | */ | |
317 | _GLOBAL(vmaddfp) | |
318 | mflr r12 | |
319 | bl fpenable | |
320 | stfd fr2,32(r1) | |
321 | li r0,4 | |
322 | mtctr r0 | |
323 | li r7,0 | |
324 | 1: lfsx fr0,r4,r7 | |
325 | lfsx fr1,r5,r7 | |
326 | lfsx fr2,r6,r7 | |
327 | fmadds fr0,fr0,fr2,fr1 | |
328 | stfsx fr0,r3,r7 | |
329 | addi r7,r7,4 | |
330 | bdnz 1b | |
331 | lfd fr2,32(r1) | |
332 | b fpdisable | |
333 | ||
334 | /* | |
335 | * Vector negative multiply and subtract, floating point. | |
336 | */ | |
337 | _GLOBAL(vnmsubfp) | |
338 | mflr r12 | |
339 | bl fpenable | |
340 | stfd fr2,32(r1) | |
341 | li r0,4 | |
342 | mtctr r0 | |
343 | li r7,0 | |
344 | 1: lfsx fr0,r4,r7 | |
345 | lfsx fr1,r5,r7 | |
346 | lfsx fr2,r6,r7 | |
347 | fnmsubs fr0,fr0,fr2,fr1 | |
348 | stfsx fr0,r3,r7 | |
349 | addi r7,r7,4 | |
350 | bdnz 1b | |
351 | lfd fr2,32(r1) | |
352 | b fpdisable | |
353 | ||
354 | /* | |
355 | * Vector reciprocal estimate. We just compute 1.0/x. | |
356 | * r3 -> destination, r4 -> source. | |
357 | */ | |
358 | _GLOBAL(vrefp) | |
359 | mflr r12 | |
360 | bl fpenable | |
361 | li r0,4 | |
362 | LDCONST(fr1, fpone) | |
363 | mtctr r0 | |
364 | li r6,0 | |
365 | 1: lfsx fr0,r4,r6 | |
366 | fdivs fr0,fr1,fr0 | |
367 | stfsx fr0,r3,r6 | |
368 | addi r6,r6,4 | |
369 | bdnz 1b | |
370 | b fpdisable | |
371 | ||
372 | /* | |
373 | * Vector reciprocal square-root estimate, floating point. | |
374 | * We use the frsqrte instruction for the initial estimate followed | |
375 | * by 2 iterations of Newton-Raphson to get sufficient accuracy. | |
376 | * r3 -> destination, r4 -> source. | |
377 | */ | |
378 | _GLOBAL(vrsqrtefp) | |
379 | mflr r12 | |
380 | bl fpenable | |
381 | stfd fr2,32(r1) | |
382 | stfd fr3,40(r1) | |
383 | stfd fr4,48(r1) | |
384 | stfd fr5,56(r1) | |
385 | li r0,4 | |
386 | LDCONST(fr4, fpone) | |
387 | LDCONST(fr5, fphalf) | |
388 | mtctr r0 | |
389 | li r6,0 | |
390 | 1: lfsx fr0,r4,r6 | |
391 | frsqrte fr1,fr0 /* r = frsqrte(s) */ | |
392 | fmuls fr3,fr1,fr0 /* r * s */ | |
393 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | |
394 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | |
395 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | |
396 | fmuls fr3,fr1,fr0 /* r * s */ | |
397 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | |
398 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | |
399 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | |
400 | stfsx fr1,r3,r6 | |
401 | addi r6,r6,4 | |
402 | bdnz 1b | |
403 | lfd fr5,56(r1) | |
404 | lfd fr4,48(r1) | |
405 | lfd fr3,40(r1) | |
406 | lfd fr2,32(r1) | |
407 | b fpdisable |