2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/regset.h>
10 #include <asm/fpu/signal.h>
11 #include <asm/traps.h>
13 #include <linux/hardirq.h>
16 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
17 * depending on the FPU hardware format:
19 union fpregs_state init_fpstate __read_mostly
;
22 * Track whether the kernel is using the FPU state
27 * - by IRQ context code to potentially use the FPU
30 * - to debug kernel_fpu_begin()/end() correctness
32 static DEFINE_PER_CPU(bool, in_kernel_fpu
);
35 * Track which context is using the FPU on the CPU:
37 DEFINE_PER_CPU(struct fpu
*, fpu_fpregs_owner_ctx
);
39 static void kernel_fpu_disable(void)
41 WARN_ON_FPU(this_cpu_read(in_kernel_fpu
));
42 this_cpu_write(in_kernel_fpu
, true);
45 static void kernel_fpu_enable(void)
47 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu
));
48 this_cpu_write(in_kernel_fpu
, false);
51 static bool kernel_fpu_disabled(void)
53 return this_cpu_read(in_kernel_fpu
);
57 * Were we in an interrupt that interrupted kernel mode?
59 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
60 * pair does nothing at all: the thread must not have fpu (so
61 * that we don't try to save the FPU state), and TS must
62 * be set (so that the clts/stts pair does nothing that is
63 * visible in the interrupted kernel thread).
65 * Except for the eagerfpu case when we return true; in the likely case
66 * the thread has FPU but we are not going to set/clear TS.
68 static bool interrupted_kernel_fpu_idle(void)
70 if (kernel_fpu_disabled())
76 return !current
->thread
.fpu
.fpregs_active
&& (read_cr0() & X86_CR0_TS
);
80 * Were we in user mode (or vm86 mode) when we were
83 * Doing kernel_fpu_begin/end() is ok if we are running
84 * in an interrupt context from user mode - we'll just
85 * save the FPU state as required.
87 static bool interrupted_user_mode(void)
89 struct pt_regs
*regs
= get_irq_regs();
90 return regs
&& user_mode(regs
);
94 * Can we use the FPU in kernel mode with the
95 * whole "kernel_fpu_begin/end()" sequence?
97 * It's always ok in process context (ie "not interrupt")
98 * but it is sometimes ok even from an irq.
100 bool irq_fpu_usable(void)
102 return !in_interrupt() ||
103 interrupted_user_mode() ||
104 interrupted_kernel_fpu_idle();
106 EXPORT_SYMBOL(irq_fpu_usable
);
108 void __kernel_fpu_begin(void)
110 struct fpu
*fpu
= ¤t
->thread
.fpu
;
112 WARN_ON_FPU(!irq_fpu_usable());
114 kernel_fpu_disable();
116 if (fpu
->fpregs_active
) {
118 * Ignore return value -- we don't care if reg state
121 copy_fpregs_to_fpstate(fpu
);
123 this_cpu_write(fpu_fpregs_owner_ctx
, NULL
);
124 __fpregs_activate_hw();
127 EXPORT_SYMBOL(__kernel_fpu_begin
);
129 void __kernel_fpu_end(void)
131 struct fpu
*fpu
= ¤t
->thread
.fpu
;
133 if (fpu
->fpregs_active
)
134 copy_kernel_to_fpregs(&fpu
->state
);
136 __fpregs_deactivate_hw();
140 EXPORT_SYMBOL(__kernel_fpu_end
);
142 void kernel_fpu_begin(void)
145 __kernel_fpu_begin();
147 EXPORT_SYMBOL_GPL(kernel_fpu_begin
);
149 void kernel_fpu_end(void)
154 EXPORT_SYMBOL_GPL(kernel_fpu_end
);
157 * CR0::TS save/restore functions:
159 int irq_ts_save(void)
162 * If in process context and not atomic, we can take a spurious DNA fault.
163 * Otherwise, doing clts() in process context requires disabling preemption
164 * or some heavy lifting like kernel_fpu_begin()
169 if (read_cr0() & X86_CR0_TS
) {
176 EXPORT_SYMBOL_GPL(irq_ts_save
);
178 void irq_ts_restore(int TS_state
)
183 EXPORT_SYMBOL_GPL(irq_ts_restore
);
186 * Save the FPU state (mark it for reload if necessary):
188 * This only ever gets called for the current task.
190 void fpu__save(struct fpu
*fpu
)
192 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
);
195 if (fpu
->fpregs_active
) {
196 if (!copy_fpregs_to_fpstate(fpu
)) {
198 copy_kernel_to_fpregs(&fpu
->state
);
200 fpregs_deactivate(fpu
);
205 EXPORT_SYMBOL_GPL(fpu__save
);
208 * Legacy x87 fpstate state init:
210 static inline void fpstate_init_fstate(struct fregs_state
*fp
)
212 fp
->cwd
= 0xffff037fu
;
213 fp
->swd
= 0xffff0000u
;
214 fp
->twd
= 0xffffffffu
;
215 fp
->fos
= 0xffff0000u
;
218 void fpstate_init(union fpregs_state
*state
)
220 if (!static_cpu_has(X86_FEATURE_FPU
)) {
221 fpstate_init_soft(&state
->soft
);
225 memset(state
, 0, xstate_size
);
227 if (static_cpu_has(X86_FEATURE_FXSR
))
228 fpstate_init_fxstate(&state
->fxsave
);
230 fpstate_init_fstate(&state
->fsave
);
232 EXPORT_SYMBOL_GPL(fpstate_init
);
234 int fpu__copy(struct fpu
*dst_fpu
, struct fpu
*src_fpu
)
236 dst_fpu
->counter
= 0;
237 dst_fpu
->fpregs_active
= 0;
238 dst_fpu
->last_cpu
= -1;
240 if (!src_fpu
->fpstate_active
|| !static_cpu_has(X86_FEATURE_FPU
))
243 WARN_ON_FPU(src_fpu
!= ¤t
->thread
.fpu
);
246 * Don't let 'init optimized' areas of the XSAVE area
247 * leak into the child task:
250 memset(&dst_fpu
->state
.xsave
, 0, xstate_size
);
253 * Save current FPU registers directly into the child
254 * FPU context, without any memory-to-memory copying.
255 * In lazy mode, if the FPU context isn't loaded into
256 * fpregs, CR0.TS will be set and do_device_not_available
257 * will load the FPU context.
259 * We have to do all this with preemption disabled,
260 * mostly because of the FNSAVE case, because in that
261 * case we must not allow preemption in the window
262 * between the FNSAVE and us marking the context lazy.
264 * It shouldn't be an issue as even FNSAVE is plenty
265 * fast in terms of critical section length.
268 if (!copy_fpregs_to_fpstate(dst_fpu
)) {
269 memcpy(&src_fpu
->state
, &dst_fpu
->state
, xstate_size
);
272 copy_kernel_to_fpregs(&src_fpu
->state
);
274 fpregs_deactivate(src_fpu
);
282 * Activate the current task's in-memory FPU context,
283 * if it has not been used before:
285 void fpu__activate_curr(struct fpu
*fpu
)
287 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
);
289 if (!fpu
->fpstate_active
) {
290 fpstate_init(&fpu
->state
);
292 /* Safe to do for the current task: */
293 fpu
->fpstate_active
= 1;
296 EXPORT_SYMBOL_GPL(fpu__activate_curr
);
299 * This function must be called before we read a task's fpstate.
301 * If the task has not used the FPU before then initialize its
304 * If the task has used the FPU before then save it.
306 void fpu__activate_fpstate_read(struct fpu
*fpu
)
309 * If fpregs are active (in the current CPU), then
310 * copy them to the fpstate:
312 if (fpu
->fpregs_active
) {
315 if (!fpu
->fpstate_active
) {
316 fpstate_init(&fpu
->state
);
318 /* Safe to do for current and for stopped child tasks: */
319 fpu
->fpstate_active
= 1;
325 * This function must be called before we write a task's fpstate.
327 * If the task has used the FPU before then unlazy it.
328 * If the task has not used the FPU before then initialize its fpstate.
330 * After this function call, after registers in the fpstate are
331 * modified and the child task has woken up, the child task will
332 * restore the modified FPU state from the modified context. If we
333 * didn't clear its lazy status here then the lazy in-registers
334 * state pending on its former CPU could be restored, corrupting
337 void fpu__activate_fpstate_write(struct fpu
*fpu
)
340 * Only stopped child tasks can be used to modify the FPU
341 * state in the fpstate buffer:
343 WARN_ON_FPU(fpu
== ¤t
->thread
.fpu
);
345 if (fpu
->fpstate_active
) {
346 /* Invalidate any lazy state: */
349 fpstate_init(&fpu
->state
);
351 /* Safe to do for stopped child tasks: */
352 fpu
->fpstate_active
= 1;
357 * This function must be called before we write the current
360 * This call gets the current FPU register state and moves
361 * it in to the 'fpstate'. Preemption is disabled so that
362 * no writes to the 'fpstate' can occur from context
365 * Must be followed by a fpu__current_fpstate_write_end().
367 void fpu__current_fpstate_write_begin(void)
369 struct fpu
*fpu
= ¤t
->thread
.fpu
;
372 * Ensure that the context-switching code does not write
373 * over the fpstate while we are doing our update.
378 * Move the fpregs in to the fpu's 'fpstate'.
380 fpu__activate_fpstate_read(fpu
);
383 * The caller is about to write to 'fpu'. Ensure that no
384 * CPU thinks that its fpregs match the fpstate. This
385 * ensures we will not be lazy and skip a XRSTOR in the
392 * This function must be paired with fpu__current_fpstate_write_begin()
394 * This will ensure that the modified fpstate gets placed back in
395 * the fpregs if necessary.
397 * Note: This function may be called whether or not an _actual_
398 * write to the fpstate occurred.
400 void fpu__current_fpstate_write_end(void)
402 struct fpu
*fpu
= ¤t
->thread
.fpu
;
405 * 'fpu' now has an updated copy of the state, but the
406 * registers may still be out of date. Update them with
407 * an XRSTOR if they are active.
410 copy_kernel_to_fpregs(&fpu
->state
);
413 * Our update is done and the fpregs/fpstate are in sync
414 * if necessary. Context switches can happen again.
420 * 'fpu__restore()' is called to copy FPU registers from
421 * the FPU fpstate to the live hw registers and to activate
422 * access to the hardware registers, so that FPU instructions
423 * can be used afterwards.
425 * Must be called with kernel preemption disabled (for example
426 * with local interrupts disabled, as it is in the case of
427 * do_device_not_available()).
429 void fpu__restore(struct fpu
*fpu
)
431 fpu__activate_curr(fpu
);
433 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
434 kernel_fpu_disable();
435 fpregs_activate(fpu
);
436 copy_kernel_to_fpregs(&fpu
->state
);
440 EXPORT_SYMBOL_GPL(fpu__restore
);
443 * Drops current FPU state: deactivates the fpregs and
444 * the fpstate. NOTE: it still leaves previous contents
445 * in the fpregs in the eager-FPU case.
447 * This function can be used in cases where we know that
448 * a state-restore is coming: either an explicit one,
451 void fpu__drop(struct fpu
*fpu
)
456 if (fpu
->fpregs_active
) {
457 /* Ignore delayed exceptions from user space */
458 asm volatile("1: fwait\n"
460 _ASM_EXTABLE(1b
, 2b
));
461 fpregs_deactivate(fpu
);
464 fpu
->fpstate_active
= 0;
470 * Clear FPU registers by setting them up from
473 static inline void copy_init_fpstate_to_fpregs(void)
476 copy_kernel_to_xregs(&init_fpstate
.xsave
, -1);
477 else if (static_cpu_has(X86_FEATURE_FXSR
))
478 copy_kernel_to_fxregs(&init_fpstate
.fxsave
);
480 copy_kernel_to_fregs(&init_fpstate
.fsave
);
484 * Clear the FPU state back to init state.
486 * Called by sys_execve(), by the signal handler code and by various
489 void fpu__clear(struct fpu
*fpu
)
491 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
); /* Almost certainly an anomaly */
493 if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU
)) {
494 /* FPU state will be reallocated lazily at the first use. */
497 if (!fpu
->fpstate_active
) {
498 fpu__activate_curr(fpu
);
501 copy_init_fpstate_to_fpregs();
506 * x87 math exception handling:
509 int fpu__exception_code(struct fpu
*fpu
, int trap_nr
)
513 if (trap_nr
== X86_TRAP_MF
) {
514 unsigned short cwd
, swd
;
516 * (~cwd & swd) will mask out exceptions that are not set to unmasked
517 * status. 0x3f is the exception bits in these regs, 0x200 is the
518 * C1 reg you need in case of a stack fault, 0x040 is the stack
519 * fault bit. We should only be taking one exception at a time,
520 * so if this combination doesn't produce any single exception,
521 * then we have a bad program that isn't synchronizing its FPU usage
522 * and it will suffer the consequences since we won't be able to
523 * fully reproduce the context of the exception.
525 if (boot_cpu_has(X86_FEATURE_FXSR
)) {
526 cwd
= fpu
->state
.fxsave
.cwd
;
527 swd
= fpu
->state
.fxsave
.swd
;
529 cwd
= (unsigned short)fpu
->state
.fsave
.cwd
;
530 swd
= (unsigned short)fpu
->state
.fsave
.swd
;
536 * The SIMD FPU exceptions are handled a little differently, as there
537 * is only a single status/control register. Thus, to determine which
538 * unmasked exception was caught we must mask the exception mask bits
539 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
541 unsigned short mxcsr
= MXCSR_DEFAULT
;
543 if (boot_cpu_has(X86_FEATURE_XMM
))
544 mxcsr
= fpu
->state
.fxsave
.mxcsr
;
546 err
= ~(mxcsr
>> 7) & mxcsr
;
549 if (err
& 0x001) { /* Invalid op */
551 * swd & 0x240 == 0x040: Stack Underflow
552 * swd & 0x240 == 0x240: Stack Overflow
553 * User must clear the SF bit (0x40) if set
556 } else if (err
& 0x004) { /* Divide by Zero */
558 } else if (err
& 0x008) { /* Overflow */
560 } else if (err
& 0x012) { /* Denormal, Underflow */
562 } else if (err
& 0x020) { /* Precision */
567 * If we're using IRQ 13, or supposedly even some trap
568 * X86_TRAP_MF implementations, it's possible
569 * we get a spurious trap, which is not an error.