Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1994 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | */ | |
78f7f1e5 | 8 | #include <asm/fpu/internal.h> |
59a36d16 | 9 | #include <asm/fpu/regset.h> |
fcbc99c4 | 10 | #include <asm/fpu/signal.h> |
e1cebad4 | 11 | #include <asm/traps.h> |
fcbc99c4 | 12 | |
91066588 | 13 | #include <linux/hardirq.h> |
1da177e4 | 14 | |
085cc281 IM |
15 | /* |
16 | * Track whether the kernel is using the FPU state | |
17 | * currently. | |
18 | * | |
19 | * This flag is used: | |
20 | * | |
21 | * - by IRQ context code to potentially use the FPU | |
22 | * if it's unused. | |
23 | * | |
24 | * - to debug kernel_fpu_begin()/end() correctness | |
25 | */ | |
14e153ef ON |
26 | static DEFINE_PER_CPU(bool, in_kernel_fpu); |
27 | ||
b0c050c5 | 28 | /* |
36b544dc | 29 | * Track which context is using the FPU on the CPU: |
b0c050c5 | 30 | */ |
36b544dc | 31 | DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
b0c050c5 | 32 | |
416d49ac | 33 | static void kernel_fpu_disable(void) |
7575637a ON |
34 | { |
35 | WARN_ON(this_cpu_read(in_kernel_fpu)); | |
36 | this_cpu_write(in_kernel_fpu, true); | |
37 | } | |
38 | ||
416d49ac | 39 | static void kernel_fpu_enable(void) |
7575637a | 40 | { |
3103ae3a | 41 | WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu)); |
7575637a ON |
42 | this_cpu_write(in_kernel_fpu, false); |
43 | } | |
44 | ||
085cc281 IM |
45 | static bool kernel_fpu_disabled(void) |
46 | { | |
47 | return this_cpu_read(in_kernel_fpu); | |
48 | } | |
49 | ||
8546c008 LT |
50 | /* |
51 | * Were we in an interrupt that interrupted kernel mode? | |
52 | * | |
304bceda | 53 | * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that |
8546c008 LT |
54 | * pair does nothing at all: the thread must not have fpu (so |
55 | * that we don't try to save the FPU state), and TS must | |
56 | * be set (so that the clts/stts pair does nothing that is | |
57 | * visible in the interrupted kernel thread). | |
5187b28f | 58 | * |
4b2e762e ON |
59 | * Except for the eagerfpu case when we return true; in the likely case |
60 | * the thread has FPU but we are not going to set/clear TS. | |
8546c008 | 61 | */ |
416d49ac | 62 | static bool interrupted_kernel_fpu_idle(void) |
8546c008 | 63 | { |
085cc281 | 64 | if (kernel_fpu_disabled()) |
14e153ef ON |
65 | return false; |
66 | ||
5d2bd700 | 67 | if (use_eager_fpu()) |
4b2e762e | 68 | return true; |
304bceda | 69 | |
d5cea9b0 | 70 | return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS); |
8546c008 LT |
71 | } |
72 | ||
73 | /* | |
74 | * Were we in user mode (or vm86 mode) when we were | |
75 | * interrupted? | |
76 | * | |
77 | * Doing kernel_fpu_begin/end() is ok if we are running | |
78 | * in an interrupt context from user mode - we'll just | |
79 | * save the FPU state as required. | |
80 | */ | |
416d49ac | 81 | static bool interrupted_user_mode(void) |
8546c008 LT |
82 | { |
83 | struct pt_regs *regs = get_irq_regs(); | |
f39b6f0e | 84 | return regs && user_mode(regs); |
8546c008 LT |
85 | } |
86 | ||
87 | /* | |
88 | * Can we use the FPU in kernel mode with the | |
89 | * whole "kernel_fpu_begin/end()" sequence? | |
90 | * | |
91 | * It's always ok in process context (ie "not interrupt") | |
92 | * but it is sometimes ok even from an irq. | |
93 | */ | |
94 | bool irq_fpu_usable(void) | |
95 | { | |
96 | return !in_interrupt() || | |
97 | interrupted_user_mode() || | |
98 | interrupted_kernel_fpu_idle(); | |
99 | } | |
100 | EXPORT_SYMBOL(irq_fpu_usable); | |
101 | ||
b1a74bf8 | 102 | void __kernel_fpu_begin(void) |
8546c008 | 103 | { |
36b544dc | 104 | struct fpu *fpu = ¤t->thread.fpu; |
8546c008 | 105 | |
3103ae3a | 106 | kernel_fpu_disable(); |
14e153ef | 107 | |
d5cea9b0 | 108 | if (fpu->fpregs_active) { |
4f836347 | 109 | copy_fpregs_to_fpstate(fpu); |
7aeccb83 | 110 | } else { |
36b544dc | 111 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
32b49b3c | 112 | __fpregs_activate_hw(); |
8546c008 LT |
113 | } |
114 | } | |
b1a74bf8 | 115 | EXPORT_SYMBOL(__kernel_fpu_begin); |
8546c008 | 116 | |
b1a74bf8 | 117 | void __kernel_fpu_end(void) |
8546c008 | 118 | { |
af2d94fd | 119 | struct fpu *fpu = ¤t->thread.fpu; |
33a3ebdc | 120 | |
d5cea9b0 | 121 | if (fpu->fpregs_active) { |
0e75c54f | 122 | if (WARN_ON(copy_fpstate_to_fpregs(fpu))) |
fbce7782 | 123 | fpu__clear(fpu); |
32b49b3c IM |
124 | } else { |
125 | __fpregs_deactivate_hw(); | |
731bd6a9 | 126 | } |
14e153ef | 127 | |
3103ae3a | 128 | kernel_fpu_enable(); |
8546c008 | 129 | } |
b1a74bf8 | 130 | EXPORT_SYMBOL(__kernel_fpu_end); |
8546c008 | 131 | |
d63e79b1 IM |
132 | void kernel_fpu_begin(void) |
133 | { | |
134 | preempt_disable(); | |
135 | WARN_ON_ONCE(!irq_fpu_usable()); | |
136 | __kernel_fpu_begin(); | |
137 | } | |
138 | EXPORT_SYMBOL_GPL(kernel_fpu_begin); | |
139 | ||
140 | void kernel_fpu_end(void) | |
141 | { | |
142 | __kernel_fpu_end(); | |
143 | preempt_enable(); | |
144 | } | |
145 | EXPORT_SYMBOL_GPL(kernel_fpu_end); | |
146 | ||
91066588 IM |
147 | /* |
148 | * CR0::TS save/restore functions: | |
149 | */ | |
150 | int irq_ts_save(void) | |
151 | { | |
152 | /* | |
153 | * If in process context and not atomic, we can take a spurious DNA fault. | |
154 | * Otherwise, doing clts() in process context requires disabling preemption | |
155 | * or some heavy lifting like kernel_fpu_begin() | |
156 | */ | |
157 | if (!in_atomic()) | |
158 | return 0; | |
159 | ||
160 | if (read_cr0() & X86_CR0_TS) { | |
161 | clts(); | |
162 | return 1; | |
163 | } | |
164 | ||
165 | return 0; | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(irq_ts_save); | |
168 | ||
169 | void irq_ts_restore(int TS_state) | |
170 | { | |
171 | if (TS_state) | |
172 | stts(); | |
173 | } | |
174 | EXPORT_SYMBOL_GPL(irq_ts_restore); | |
175 | ||
4af08f2f | 176 | /* |
48c4717f | 177 | * Save the FPU state (mark it for reload if necessary): |
87cdb98a IM |
178 | * |
179 | * This only ever gets called for the current task. | |
4af08f2f | 180 | */ |
0c070595 | 181 | void fpu__save(struct fpu *fpu) |
8546c008 | 182 | { |
0c070595 | 183 | WARN_ON(fpu != ¤t->thread.fpu); |
87cdb98a | 184 | |
8546c008 | 185 | preempt_disable(); |
d5cea9b0 | 186 | if (fpu->fpregs_active) { |
48c4717f | 187 | if (!copy_fpregs_to_fpstate(fpu)) |
66af8e27 | 188 | fpregs_deactivate(fpu); |
a9241ea5 | 189 | } |
8546c008 LT |
190 | preempt_enable(); |
191 | } | |
4af08f2f | 192 | EXPORT_SYMBOL_GPL(fpu__save); |
8546c008 | 193 | |
c0ee2cf6 | 194 | void fpstate_init(struct fpu *fpu) |
1da177e4 | 195 | { |
60e019eb | 196 | if (!cpu_has_fpu) { |
7366ed77 | 197 | finit_soft_fpu(&fpu->state.soft); |
86603283 | 198 | return; |
e8a496ac | 199 | } |
e8a496ac | 200 | |
7366ed77 | 201 | memset(&fpu->state, 0, xstate_size); |
1d23c451 | 202 | |
1da177e4 | 203 | if (cpu_has_fxsr) { |
7366ed77 | 204 | fx_finit(&fpu->state.fxsave); |
1da177e4 | 205 | } else { |
7366ed77 | 206 | struct i387_fsave_struct *fp = &fpu->state.fsave; |
61c4628b SS |
207 | fp->cwd = 0xffff037fu; |
208 | fp->swd = 0xffff0000u; | |
209 | fp->twd = 0xffffffffu; | |
210 | fp->fos = 0xffff0000u; | |
1da177e4 | 211 | } |
86603283 | 212 | } |
c0ee2cf6 | 213 | EXPORT_SYMBOL_GPL(fpstate_init); |
86603283 | 214 | |
bfd6fc05 IM |
215 | /* |
216 | * Copy the current task's FPU state to a new task's FPU context. | |
217 | * | |
218 | * In the 'eager' case we just save to the destination context. | |
219 | * | |
220 | * In the 'lazy' case we save to the source context, mark the FPU lazy | |
221 | * via stts() and copy the source context into the destination context. | |
222 | */ | |
f9bc977f | 223 | static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
e102f30f | 224 | { |
f9bc977f | 225 | WARN_ON(src_fpu != ¤t->thread.fpu); |
bfd6fc05 | 226 | |
b1652900 IM |
227 | /* |
228 | * Don't let 'init optimized' areas of the XSAVE area | |
229 | * leak into the child task: | |
230 | */ | |
231 | if (use_eager_fpu()) | |
7366ed77 | 232 | memset(&dst_fpu->state.xsave, 0, xstate_size); |
b1652900 IM |
233 | |
234 | /* | |
235 | * Save current FPU registers directly into the child | |
236 | * FPU context, without any memory-to-memory copying. | |
237 | * | |
238 | * If the FPU context got destroyed in the process (FNSAVE | |
239 | * done on old CPUs) then copy it back into the source | |
240 | * context and mark the current task for lazy restore. | |
241 | * | |
242 | * We have to do all this with preemption disabled, | |
243 | * mostly because of the FNSAVE case, because in that | |
244 | * case we must not allow preemption in the window | |
245 | * between the FNSAVE and us marking the context lazy. | |
246 | * | |
247 | * It shouldn't be an issue as even FNSAVE is plenty | |
248 | * fast in terms of critical section length. | |
249 | */ | |
250 | preempt_disable(); | |
251 | if (!copy_fpregs_to_fpstate(dst_fpu)) { | |
252 | memcpy(&src_fpu->state, &dst_fpu->state, xstate_size); | |
253 | fpregs_deactivate(src_fpu); | |
e102f30f | 254 | } |
b1652900 | 255 | preempt_enable(); |
e102f30f IM |
256 | } |
257 | ||
c69e098b | 258 | int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
a752b53d | 259 | { |
c69e098b | 260 | dst_fpu->counter = 0; |
d5cea9b0 | 261 | dst_fpu->fpregs_active = 0; |
c69e098b | 262 | dst_fpu->last_cpu = -1; |
a752b53d | 263 | |
c4d6ee6e | 264 | if (src_fpu->fpstate_active) |
f9bc977f | 265 | fpu_copy(dst_fpu, src_fpu); |
c4d6ee6e | 266 | |
a752b53d IM |
267 | return 0; |
268 | } | |
269 | ||
97185c95 | 270 | /* |
c4d72e2d IM |
271 | * Activate the current task's in-memory FPU context, |
272 | * if it has not been used before: | |
97185c95 | 273 | */ |
c4d72e2d | 274 | void fpu__activate_curr(struct fpu *fpu) |
97185c95 | 275 | { |
91d93d0e | 276 | WARN_ON_ONCE(fpu != ¤t->thread.fpu); |
97185c95 | 277 | |
c4d72e2d IM |
278 | if (!fpu->fpstate_active) { |
279 | fpstate_init(fpu); | |
97185c95 | 280 | |
c4d72e2d IM |
281 | /* Safe to do for the current task: */ |
282 | fpu->fpstate_active = 1; | |
283 | } | |
97185c95 | 284 | } |
c4d72e2d | 285 | EXPORT_SYMBOL_GPL(fpu__activate_curr); |
97185c95 | 286 | |
86603283 | 287 | /* |
67ee658e IM |
288 | * This function must be called before we modify a stopped child's |
289 | * fpstate. | |
af7f8721 IM |
290 | * |
291 | * If the child has not used the FPU before then initialize its | |
67ee658e | 292 | * fpstate. |
af7f8721 IM |
293 | * |
294 | * If the child has used the FPU before then unlazy it. | |
295 | * | |
67ee658e IM |
296 | * [ After this function call, after registers in the fpstate are |
297 | * modified and the child task has woken up, the child task will | |
298 | * restore the modified FPU state from the modified context. If we | |
af7f8721 | 299 | * didn't clear its lazy status here then the lazy in-registers |
67ee658e | 300 | * state pending on its former CPU could be restored, corrupting |
af7f8721 IM |
301 | * the modifications. ] |
302 | * | |
303 | * This function is also called before we read a stopped child's | |
67ee658e IM |
304 | * FPU state - to make sure it's initialized if the child has |
305 | * no active FPU state. | |
af7f8721 IM |
306 | * |
307 | * TODO: A future optimization would be to skip the unlazying in | |
308 | * the read-only case, it's not strictly necessary for | |
309 | * read-only access to the context. | |
86603283 | 310 | */ |
67ee658e | 311 | static void fpu__activate_stopped(struct fpu *child_fpu) |
86603283 | 312 | { |
2fb29fc7 | 313 | WARN_ON_ONCE(child_fpu == ¤t->thread.fpu); |
67e97fc2 | 314 | |
c5bedc68 | 315 | if (child_fpu->fpstate_active) { |
cc08d545 | 316 | child_fpu->last_cpu = -1; |
2fb29fc7 IM |
317 | } else { |
318 | fpstate_init(child_fpu); | |
071ae621 | 319 | |
2fb29fc7 IM |
320 | /* Safe to do for stopped child tasks: */ |
321 | child_fpu->fpstate_active = 1; | |
322 | } | |
1da177e4 LT |
323 | } |
324 | ||
93b90712 | 325 | /* |
be7436d5 IM |
326 | * 'fpu__restore()' is called to copy FPU registers from |
327 | * the FPU fpstate to the live hw registers and to activate | |
328 | * access to the hardware registers, so that FPU instructions | |
329 | * can be used afterwards. | |
93b90712 | 330 | * |
be7436d5 IM |
331 | * Must be called with kernel preemption disabled (for example |
332 | * with local interrupts disabled, as it is in the case of | |
333 | * do_device_not_available()). | |
93b90712 | 334 | */ |
3a0aee48 | 335 | void fpu__restore(void) |
93b90712 IM |
336 | { |
337 | struct task_struct *tsk = current; | |
4540d3fa | 338 | struct fpu *fpu = &tsk->thread.fpu; |
93b90712 | 339 | |
c4d72e2d | 340 | fpu__activate_curr(fpu); |
93b90712 | 341 | |
232f62cd | 342 | /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ |
93b90712 | 343 | kernel_fpu_disable(); |
232f62cd | 344 | fpregs_activate(fpu); |
0e75c54f | 345 | if (unlikely(copy_fpstate_to_fpregs(fpu))) { |
fbce7782 | 346 | fpu__clear(fpu); |
93b90712 IM |
347 | force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); |
348 | } else { | |
349 | tsk->thread.fpu.counter++; | |
350 | } | |
351 | kernel_fpu_enable(); | |
352 | } | |
3a0aee48 | 353 | EXPORT_SYMBOL_GPL(fpu__restore); |
93b90712 | 354 | |
6ffc152e IM |
355 | /* |
356 | * Drops current FPU state: deactivates the fpregs and | |
357 | * the fpstate. NOTE: it still leaves previous contents | |
358 | * in the fpregs in the eager-FPU case. | |
359 | * | |
360 | * This function can be used in cases where we know that | |
361 | * a state-restore is coming: either an explicit one, | |
362 | * or a reschedule. | |
363 | */ | |
364 | void fpu__drop(struct fpu *fpu) | |
365 | { | |
366 | preempt_disable(); | |
367 | fpu->counter = 0; | |
368 | ||
369 | if (fpu->fpregs_active) { | |
370 | /* Ignore delayed exceptions from user space */ | |
371 | asm volatile("1: fwait\n" | |
372 | "2:\n" | |
373 | _ASM_EXTABLE(1b, 2b)); | |
374 | fpregs_deactivate(fpu); | |
375 | } | |
376 | ||
377 | fpu->fpstate_active = 0; | |
378 | ||
379 | preempt_enable(); | |
380 | } | |
381 | ||
382 | /* | |
fbce7782 IM |
383 | * Clear the FPU state back to init state. |
384 | * | |
385 | * Called by sys_execve(), by the signal handler code and by various | |
386 | * error paths. | |
2e85591a | 387 | */ |
04c8e01d | 388 | void fpu__clear(struct fpu *fpu) |
81683cc8 | 389 | { |
04c8e01d | 390 | WARN_ON_ONCE(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ |
4c138410 | 391 | |
81683cc8 IM |
392 | if (!use_eager_fpu()) { |
393 | /* FPU state will be reallocated lazily at the first use. */ | |
50338615 | 394 | fpu__drop(fpu); |
81683cc8 | 395 | } else { |
c5bedc68 | 396 | if (!fpu->fpstate_active) { |
c4d72e2d | 397 | fpu__activate_curr(fpu); |
81683cc8 IM |
398 | user_fpu_begin(); |
399 | } | |
400 | restore_init_xstate(); | |
401 | } | |
402 | } | |
403 | ||
5b3efd50 | 404 | /* |
678eaf60 | 405 | * The xstateregs_active() routine is the same as the regset_fpregs_active() routine, |
5b3efd50 SS |
406 | * as the "regset->n" for the xstate regset will be updated based on the feature |
407 | * capabilites supported by the xsave. | |
408 | */ | |
678eaf60 | 409 | int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset) |
44210111 | 410 | { |
c5bedc68 IM |
411 | struct fpu *target_fpu = &target->thread.fpu; |
412 | ||
413 | return target_fpu->fpstate_active ? regset->n : 0; | |
44210111 | 414 | } |
1da177e4 | 415 | |
678eaf60 | 416 | int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) |
1da177e4 | 417 | { |
c5bedc68 IM |
418 | struct fpu *target_fpu = &target->thread.fpu; |
419 | ||
420 | return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0; | |
44210111 | 421 | } |
1da177e4 | 422 | |
44210111 RM |
423 | int xfpregs_get(struct task_struct *target, const struct user_regset *regset, |
424 | unsigned int pos, unsigned int count, | |
425 | void *kbuf, void __user *ubuf) | |
426 | { | |
cc08d545 | 427 | struct fpu *fpu = &target->thread.fpu; |
aa283f49 | 428 | |
44210111 RM |
429 | if (!cpu_has_fxsr) |
430 | return -ENODEV; | |
431 | ||
67ee658e | 432 | fpu__activate_stopped(fpu); |
36e49e7f | 433 | fpstate_sanitize_xstate(fpu); |
29104e10 | 434 | |
44210111 | 435 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
7366ed77 | 436 | &fpu->state.fxsave, 0, -1); |
1da177e4 | 437 | } |
44210111 RM |
438 | |
439 | int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |
440 | unsigned int pos, unsigned int count, | |
441 | const void *kbuf, const void __user *ubuf) | |
442 | { | |
cc08d545 | 443 | struct fpu *fpu = &target->thread.fpu; |
44210111 RM |
444 | int ret; |
445 | ||
446 | if (!cpu_has_fxsr) | |
447 | return -ENODEV; | |
448 | ||
67ee658e | 449 | fpu__activate_stopped(fpu); |
36e49e7f | 450 | fpstate_sanitize_xstate(fpu); |
29104e10 | 451 | |
44210111 | 452 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
7366ed77 | 453 | &fpu->state.fxsave, 0, -1); |
44210111 RM |
454 | |
455 | /* | |
456 | * mxcsr reserved bits must be masked to zero for security reasons. | |
457 | */ | |
7366ed77 | 458 | fpu->state.fxsave.mxcsr &= mxcsr_feature_mask; |
44210111 | 459 | |
42deec6f SS |
460 | /* |
461 | * update the header bits in the xsave header, indicating the | |
462 | * presence of FP and SSE state. | |
463 | */ | |
464 | if (cpu_has_xsave) | |
7366ed77 | 465 | fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE; |
42deec6f | 466 | |
44210111 RM |
467 | return ret; |
468 | } | |
469 | ||
5b3efd50 SS |
470 | int xstateregs_get(struct task_struct *target, const struct user_regset *regset, |
471 | unsigned int pos, unsigned int count, | |
472 | void *kbuf, void __user *ubuf) | |
473 | { | |
cc08d545 | 474 | struct fpu *fpu = &target->thread.fpu; |
18ecb3bf | 475 | struct xsave_struct *xsave; |
5b3efd50 SS |
476 | int ret; |
477 | ||
478 | if (!cpu_has_xsave) | |
479 | return -ENODEV; | |
480 | ||
67ee658e | 481 | fpu__activate_stopped(fpu); |
5b3efd50 | 482 | |
7366ed77 | 483 | xsave = &fpu->state.xsave; |
18ecb3bf | 484 | |
5b3efd50 | 485 | /* |
ff7fbc72 SS |
486 | * Copy the 48bytes defined by the software first into the xstate |
487 | * memory layout in the thread struct, so that we can copy the entire | |
488 | * xstateregs to the user using one user_regset_copyout(). | |
5b3efd50 | 489 | */ |
e7f180dc ON |
490 | memcpy(&xsave->i387.sw_reserved, |
491 | xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); | |
5b3efd50 | 492 | /* |
ff7fbc72 | 493 | * Copy the xstate memory layout. |
5b3efd50 | 494 | */ |
e7f180dc | 495 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
5b3efd50 SS |
496 | return ret; |
497 | } | |
498 | ||
499 | int xstateregs_set(struct task_struct *target, const struct user_regset *regset, | |
500 | unsigned int pos, unsigned int count, | |
501 | const void *kbuf, const void __user *ubuf) | |
502 | { | |
cc08d545 | 503 | struct fpu *fpu = &target->thread.fpu; |
18ecb3bf | 504 | struct xsave_struct *xsave; |
5b3efd50 | 505 | int ret; |
5b3efd50 SS |
506 | |
507 | if (!cpu_has_xsave) | |
508 | return -ENODEV; | |
509 | ||
67ee658e | 510 | fpu__activate_stopped(fpu); |
5b3efd50 | 511 | |
7366ed77 | 512 | xsave = &fpu->state.xsave; |
18ecb3bf | 513 | |
e7f180dc | 514 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
5b3efd50 SS |
515 | /* |
516 | * mxcsr reserved bits must be masked to zero for security reasons. | |
517 | */ | |
e7f180dc | 518 | xsave->i387.mxcsr &= mxcsr_feature_mask; |
400e4b20 | 519 | xsave->header.xfeatures &= xfeatures_mask; |
5b3efd50 SS |
520 | /* |
521 | * These bits must be zero. | |
522 | */ | |
3a54450b | 523 | memset(&xsave->header.reserved, 0, 48); |
8dcea8db | 524 | |
5b3efd50 SS |
525 | return ret; |
526 | } | |
527 | ||
44210111 | 528 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
1da177e4 | 529 | |
1da177e4 LT |
530 | /* |
531 | * FPU tag word conversions. | |
532 | */ | |
533 | ||
3b095a04 | 534 | static inline unsigned short twd_i387_to_fxsr(unsigned short twd) |
1da177e4 LT |
535 | { |
536 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | |
3b095a04 | 537 | |
1da177e4 | 538 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ |
3b095a04 | 539 | tmp = ~twd; |
44210111 | 540 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ |
3b095a04 CG |
541 | /* and move the valid bits to the lower byte. */ |
542 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | |
543 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | |
544 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | |
f668964e | 545 | |
3b095a04 | 546 | return tmp; |
1da177e4 LT |
547 | } |
548 | ||
497888cf | 549 | #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16) |
44210111 RM |
550 | #define FP_EXP_TAG_VALID 0 |
551 | #define FP_EXP_TAG_ZERO 1 | |
552 | #define FP_EXP_TAG_SPECIAL 2 | |
553 | #define FP_EXP_TAG_EMPTY 3 | |
554 | ||
555 | static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) | |
556 | { | |
557 | struct _fpxreg *st; | |
558 | u32 tos = (fxsave->swd >> 11) & 7; | |
559 | u32 twd = (unsigned long) fxsave->twd; | |
560 | u32 tag; | |
561 | u32 ret = 0xffff0000u; | |
562 | int i; | |
1da177e4 | 563 | |
44210111 | 564 | for (i = 0; i < 8; i++, twd >>= 1) { |
3b095a04 CG |
565 | if (twd & 0x1) { |
566 | st = FPREG_ADDR(fxsave, (i - tos) & 7); | |
1da177e4 | 567 | |
3b095a04 | 568 | switch (st->exponent & 0x7fff) { |
1da177e4 | 569 | case 0x7fff: |
44210111 | 570 | tag = FP_EXP_TAG_SPECIAL; |
1da177e4 LT |
571 | break; |
572 | case 0x0000: | |
3b095a04 CG |
573 | if (!st->significand[0] && |
574 | !st->significand[1] && | |
575 | !st->significand[2] && | |
44210111 RM |
576 | !st->significand[3]) |
577 | tag = FP_EXP_TAG_ZERO; | |
578 | else | |
579 | tag = FP_EXP_TAG_SPECIAL; | |
1da177e4 LT |
580 | break; |
581 | default: | |
44210111 RM |
582 | if (st->significand[3] & 0x8000) |
583 | tag = FP_EXP_TAG_VALID; | |
584 | else | |
585 | tag = FP_EXP_TAG_SPECIAL; | |
1da177e4 LT |
586 | break; |
587 | } | |
588 | } else { | |
44210111 | 589 | tag = FP_EXP_TAG_EMPTY; |
1da177e4 | 590 | } |
44210111 | 591 | ret |= tag << (2 * i); |
1da177e4 LT |
592 | } |
593 | return ret; | |
594 | } | |
595 | ||
596 | /* | |
44210111 | 597 | * FXSR floating point environment conversions. |
1da177e4 LT |
598 | */ |
599 | ||
72a671ce | 600 | void |
f668964e | 601 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) |
1da177e4 | 602 | { |
7366ed77 | 603 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave; |
44210111 RM |
604 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; |
605 | struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; | |
606 | int i; | |
1da177e4 | 607 | |
44210111 RM |
608 | env->cwd = fxsave->cwd | 0xffff0000u; |
609 | env->swd = fxsave->swd | 0xffff0000u; | |
610 | env->twd = twd_fxsr_to_i387(fxsave); | |
611 | ||
612 | #ifdef CONFIG_X86_64 | |
613 | env->fip = fxsave->rip; | |
614 | env->foo = fxsave->rdp; | |
10c11f30 BG |
615 | /* |
616 | * should be actually ds/cs at fpu exception time, but | |
617 | * that information is not available in 64bit mode. | |
618 | */ | |
619 | env->fcs = task_pt_regs(tsk)->cs; | |
44210111 | 620 | if (tsk == current) { |
10c11f30 | 621 | savesegment(ds, env->fos); |
1da177e4 | 622 | } else { |
10c11f30 | 623 | env->fos = tsk->thread.ds; |
1da177e4 | 624 | } |
10c11f30 | 625 | env->fos |= 0xffff0000; |
44210111 RM |
626 | #else |
627 | env->fip = fxsave->fip; | |
609b5297 | 628 | env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); |
44210111 RM |
629 | env->foo = fxsave->foo; |
630 | env->fos = fxsave->fos; | |
631 | #endif | |
1da177e4 | 632 | |
44210111 RM |
633 | for (i = 0; i < 8; ++i) |
634 | memcpy(&to[i], &from[i], sizeof(to[0])); | |
1da177e4 LT |
635 | } |
636 | ||
72a671ce SS |
637 | void convert_to_fxsr(struct task_struct *tsk, |
638 | const struct user_i387_ia32_struct *env) | |
1da177e4 | 639 | |
1da177e4 | 640 | { |
7366ed77 | 641 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave; |
44210111 RM |
642 | struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; |
643 | struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; | |
644 | int i; | |
1da177e4 | 645 | |
44210111 RM |
646 | fxsave->cwd = env->cwd; |
647 | fxsave->swd = env->swd; | |
648 | fxsave->twd = twd_i387_to_fxsr(env->twd); | |
649 | fxsave->fop = (u16) ((u32) env->fcs >> 16); | |
650 | #ifdef CONFIG_X86_64 | |
651 | fxsave->rip = env->fip; | |
652 | fxsave->rdp = env->foo; | |
653 | /* cs and ds ignored */ | |
654 | #else | |
655 | fxsave->fip = env->fip; | |
656 | fxsave->fcs = (env->fcs & 0xffff); | |
657 | fxsave->foo = env->foo; | |
658 | fxsave->fos = env->fos; | |
659 | #endif | |
1da177e4 | 660 | |
44210111 RM |
661 | for (i = 0; i < 8; ++i) |
662 | memcpy(&to[i], &from[i], sizeof(from[0])); | |
1da177e4 LT |
663 | } |
664 | ||
44210111 RM |
665 | int fpregs_get(struct task_struct *target, const struct user_regset *regset, |
666 | unsigned int pos, unsigned int count, | |
667 | void *kbuf, void __user *ubuf) | |
1da177e4 | 668 | { |
cc08d545 | 669 | struct fpu *fpu = &target->thread.fpu; |
44210111 | 670 | struct user_i387_ia32_struct env; |
1da177e4 | 671 | |
67ee658e | 672 | fpu__activate_stopped(fpu); |
1da177e4 | 673 | |
60e019eb | 674 | if (!static_cpu_has(X86_FEATURE_FPU)) |
e8a496ac SS |
675 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); |
676 | ||
60e019eb | 677 | if (!cpu_has_fxsr) |
44210111 | 678 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
7366ed77 | 679 | &fpu->state.fsave, 0, |
61c4628b | 680 | -1); |
1da177e4 | 681 | |
36e49e7f | 682 | fpstate_sanitize_xstate(fpu); |
29104e10 | 683 | |
44210111 RM |
684 | if (kbuf && pos == 0 && count == sizeof(env)) { |
685 | convert_from_fxsr(kbuf, target); | |
686 | return 0; | |
1da177e4 | 687 | } |
44210111 RM |
688 | |
689 | convert_from_fxsr(&env, target); | |
f668964e | 690 | |
44210111 | 691 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1); |
1da177e4 LT |
692 | } |
693 | ||
44210111 RM |
694 | int fpregs_set(struct task_struct *target, const struct user_regset *regset, |
695 | unsigned int pos, unsigned int count, | |
696 | const void *kbuf, const void __user *ubuf) | |
1da177e4 | 697 | { |
cc08d545 | 698 | struct fpu *fpu = &target->thread.fpu; |
44210111 RM |
699 | struct user_i387_ia32_struct env; |
700 | int ret; | |
1da177e4 | 701 | |
67ee658e | 702 | fpu__activate_stopped(fpu); |
36e49e7f | 703 | fpstate_sanitize_xstate(fpu); |
29104e10 | 704 | |
60e019eb | 705 | if (!static_cpu_has(X86_FEATURE_FPU)) |
e8a496ac SS |
706 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); |
707 | ||
60e019eb | 708 | if (!cpu_has_fxsr) |
44210111 | 709 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
7366ed77 | 710 | &fpu->state.fsave, 0, |
60e019eb | 711 | -1); |
44210111 RM |
712 | |
713 | if (pos > 0 || count < sizeof(env)) | |
714 | convert_from_fxsr(&env, target); | |
715 | ||
716 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1); | |
717 | if (!ret) | |
718 | convert_to_fxsr(target, &env); | |
719 | ||
42deec6f SS |
720 | /* |
721 | * update the header bit in the xsave header, indicating the | |
722 | * presence of FP. | |
723 | */ | |
724 | if (cpu_has_xsave) | |
7366ed77 | 725 | fpu->state.xsave.header.xfeatures |= XSTATE_FP; |
44210111 | 726 | return ret; |
1da177e4 LT |
727 | } |
728 | ||
1da177e4 LT |
729 | /* |
730 | * FPU state for core dumps. | |
60b3b9af RM |
731 | * This is only used for a.out dumps now. |
732 | * It is declared generically using elf_fpregset_t (which is | |
733 | * struct user_i387_struct) but is in fact only used for 32-bit | |
734 | * dumps, so on 64-bit it is really struct user_i387_ia32_struct. | |
1da177e4 | 735 | */ |
c5bedc68 | 736 | int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu) |
1da177e4 | 737 | { |
1da177e4 | 738 | struct task_struct *tsk = current; |
c5bedc68 | 739 | struct fpu *fpu = &tsk->thread.fpu; |
f668964e | 740 | int fpvalid; |
1da177e4 | 741 | |
c5bedc68 | 742 | fpvalid = fpu->fpstate_active; |
60b3b9af RM |
743 | if (fpvalid) |
744 | fpvalid = !fpregs_get(tsk, NULL, | |
745 | 0, sizeof(struct user_i387_ia32_struct), | |
c5bedc68 | 746 | ufpu, NULL); |
1da177e4 LT |
747 | |
748 | return fpvalid; | |
749 | } | |
129f6946 | 750 | EXPORT_SYMBOL(dump_fpu); |
1da177e4 | 751 | |
60b3b9af | 752 | #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ |
e1cebad4 IM |
753 | |
754 | /* | |
755 | * x87 math exception handling: | |
756 | */ | |
757 | ||
758 | static inline unsigned short get_fpu_cwd(struct fpu *fpu) | |
759 | { | |
760 | if (cpu_has_fxsr) { | |
761 | return fpu->state.fxsave.cwd; | |
762 | } else { | |
763 | return (unsigned short)fpu->state.fsave.cwd; | |
764 | } | |
765 | } | |
766 | ||
767 | static inline unsigned short get_fpu_swd(struct fpu *fpu) | |
768 | { | |
769 | if (cpu_has_fxsr) { | |
770 | return fpu->state.fxsave.swd; | |
771 | } else { | |
772 | return (unsigned short)fpu->state.fsave.swd; | |
773 | } | |
774 | } | |
775 | ||
776 | static inline unsigned short get_fpu_mxcsr(struct fpu *fpu) | |
777 | { | |
778 | if (cpu_has_xmm) { | |
779 | return fpu->state.fxsave.mxcsr; | |
780 | } else { | |
781 | return MXCSR_DEFAULT; | |
782 | } | |
783 | } | |
784 | ||
785 | int fpu__exception_code(struct fpu *fpu, int trap_nr) | |
786 | { | |
787 | int err; | |
788 | ||
789 | if (trap_nr == X86_TRAP_MF) { | |
790 | unsigned short cwd, swd; | |
791 | /* | |
792 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | |
793 | * status. 0x3f is the exception bits in these regs, 0x200 is the | |
794 | * C1 reg you need in case of a stack fault, 0x040 is the stack | |
795 | * fault bit. We should only be taking one exception at a time, | |
796 | * so if this combination doesn't produce any single exception, | |
797 | * then we have a bad program that isn't synchronizing its FPU usage | |
798 | * and it will suffer the consequences since we won't be able to | |
799 | * fully reproduce the context of the exception | |
800 | */ | |
801 | cwd = get_fpu_cwd(fpu); | |
802 | swd = get_fpu_swd(fpu); | |
803 | ||
804 | err = swd & ~cwd; | |
805 | } else { | |
806 | /* | |
807 | * The SIMD FPU exceptions are handled a little differently, as there | |
808 | * is only a single status/control register. Thus, to determine which | |
809 | * unmasked exception was caught we must mask the exception mask bits | |
810 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | |
811 | */ | |
812 | unsigned short mxcsr = get_fpu_mxcsr(fpu); | |
813 | err = ~(mxcsr >> 7) & mxcsr; | |
814 | } | |
815 | ||
816 | if (err & 0x001) { /* Invalid op */ | |
817 | /* | |
818 | * swd & 0x240 == 0x040: Stack Underflow | |
819 | * swd & 0x240 == 0x240: Stack Overflow | |
820 | * User must clear the SF bit (0x40) if set | |
821 | */ | |
822 | return FPE_FLTINV; | |
823 | } else if (err & 0x004) { /* Divide by Zero */ | |
824 | return FPE_FLTDIV; | |
825 | } else if (err & 0x008) { /* Overflow */ | |
826 | return FPE_FLTOVF; | |
827 | } else if (err & 0x012) { /* Denormal, Underflow */ | |
828 | return FPE_FLTUND; | |
829 | } else if (err & 0x020) { /* Precision */ | |
830 | return FPE_FLTRES; | |
831 | } | |
832 | ||
833 | /* | |
834 | * If we're using IRQ 13, or supposedly even some trap | |
835 | * X86_TRAP_MF implementations, it's possible | |
836 | * we get a spurious trap, which is not an error. | |
837 | */ | |
838 | return 0; | |
839 | } |