2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _ASM_X86_I387_H
11 #define _ASM_X86_I387_H
15 #include <linux/sched.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/regset.h>
18 #include <linux/hardirq.h>
19 #include <linux/slab.h>
21 #include <asm/cpufeature.h>
22 #include <asm/processor.h>
23 #include <asm/sigcontext.h>
25 #include <asm/uaccess.h>
26 #include <asm/xsave.h>
28 extern unsigned int sig_xstate_size
;
29 extern void fpu_init(void);
30 extern void mxcsr_feature_mask_init(void);
31 extern int init_fpu(struct task_struct
*child
);
32 extern void math_state_restore(void);
33 extern void __math_state_restore(void);
34 extern int dump_fpu(struct pt_regs
*, struct user_i387_struct
*);
36 extern user_regset_active_fn fpregs_active
, xfpregs_active
;
37 extern user_regset_get_fn fpregs_get
, xfpregs_get
, fpregs_soft_get
,
39 extern user_regset_set_fn fpregs_set
, xfpregs_set
, fpregs_soft_set
,
43 * xstateregs_active == fpregs_active. Please refer to the comment
44 * at the definition of fpregs_active.
46 #define xstateregs_active fpregs_active
48 extern struct _fpx_sw_bytes fx_sw_reserved
;
49 #ifdef CONFIG_IA32_EMULATION
50 extern unsigned int sig_xstate_ia32_size
;
51 extern struct _fpx_sw_bytes fx_sw_reserved_ia32
;
54 extern int save_i387_xstate_ia32(void __user
*buf
);
55 extern int restore_i387_xstate_ia32(void __user
*buf
);
58 #ifdef CONFIG_MATH_EMULATION
59 extern void finit_soft_fpu(struct i387_soft_struct
*soft
);
61 static inline void finit_soft_fpu(struct i387_soft_struct
*soft
) {}
64 #define X87_FSW_ES (1 << 7) /* Exception Summary */
66 static __always_inline __pure
bool use_xsaveopt(void)
68 return static_cpu_has(X86_FEATURE_XSAVEOPT
);
71 static __always_inline __pure
bool use_xsave(void)
73 return static_cpu_has(X86_FEATURE_XSAVE
);
76 static __always_inline __pure
bool use_fxsr(void)
78 return static_cpu_has(X86_FEATURE_FXSR
);
81 extern void __sanitize_i387_state(struct task_struct
*);
83 static inline void sanitize_i387_state(struct task_struct
*tsk
)
87 __sanitize_i387_state(tsk
);
91 static inline int fxrstor_checking(struct i387_fxsave_struct
*fx
)
95 /* See comment in fxsave() below. */
96 #ifdef CONFIG_AS_FXSAVEQ
97 asm volatile("1: fxrstorq %[fx]\n\t"
99 ".section .fixup,\"ax\"\n"
100 "3: movl $-1,%[err]\n"
105 : [fx
] "m" (*fx
), "0" (0));
107 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
109 ".section .fixup,\"ax\"\n"
110 "3: movl $-1,%[err]\n"
115 : [fx
] "R" (fx
), "m" (*fx
), "0" (0));
120 static inline int fxsave_user(struct i387_fxsave_struct __user
*fx
)
125 * Clear the bytes not touched by the fxsave and reserved
128 err
= __clear_user(&fx
->sw_reserved
,
129 sizeof(struct _fpx_sw_bytes
));
133 /* See comment in fxsave() below. */
134 #ifdef CONFIG_AS_FXSAVEQ
135 asm volatile("1: fxsaveq %[fx]\n\t"
137 ".section .fixup,\"ax\"\n"
138 "3: movl $-1,%[err]\n"
142 : [err
] "=r" (err
), [fx
] "=m" (*fx
)
145 asm volatile("1: rex64/fxsave (%[fx])\n\t"
147 ".section .fixup,\"ax\"\n"
148 "3: movl $-1,%[err]\n"
152 : [err
] "=r" (err
), "=m" (*fx
)
153 : [fx
] "R" (fx
), "0" (0));
156 __clear_user(fx
, sizeof(struct i387_fxsave_struct
)))
158 /* No need to clear here because the caller clears USED_MATH */
162 static inline void fpu_fxsave(struct fpu
*fpu
)
164 /* Using "rex64; fxsave %0" is broken because, if the memory operand
165 uses any extended registers for addressing, a second REX prefix
166 will be generated (to the assembler, rex64 followed by semicolon
167 is a separate instruction), and hence the 64-bitness is lost. */
169 #ifdef CONFIG_AS_FXSAVEQ
170 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
171 starting with gas 2.16. */
172 __asm__
__volatile__("fxsaveq %0"
173 : "=m" (fpu
->state
->fxsave
));
175 /* Using, as a workaround, the properly prefixed form below isn't
176 accepted by any binutils version so far released, complaining that
177 the same type of prefix is used twice if an extended register is
178 needed for addressing (fix submitted to mainline 2005-11-21).
179 asm volatile("rex64/fxsave %0"
180 : "=m" (fpu->state->fxsave));
181 This, however, we can work around by forcing the compiler to select
182 an addressing mode that doesn't require extended registers. */
183 asm volatile("rex64/fxsave (%[fx])"
184 : "=m" (fpu
->state
->fxsave
)
185 : [fx
] "R" (&fpu
->state
->fxsave
));
189 #else /* CONFIG_X86_32 */
191 /* perform fxrstor iff the processor has extended states, otherwise frstor */
192 static inline int fxrstor_checking(struct i387_fxsave_struct
*fx
)
195 * The "nop" is needed to make the instructions the same
207 static inline void fpu_fxsave(struct fpu
*fpu
)
209 asm volatile("fxsave %[fx]"
210 : [fx
] "=m" (fpu
->state
->fxsave
));
213 #endif /* CONFIG_X86_64 */
215 /* We need a safe address that is cheap to find and that is already
216 in L1 during context switch. The best choices are unfortunately
217 different for UP and SMP */
219 #define safe_address (__per_cpu_offset[0])
221 #define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER])
225 * These must be called with preempt disabled
227 static inline void fpu_save_init(struct fpu
*fpu
)
233 * xsave header may indicate the init state of the FP.
235 if (!(fpu
->state
->xsave
.xsave_hdr
.xstate_bv
& XSTATE_FP
))
237 } else if (use_fxsr()) {
240 asm volatile("fnsave %[fx]; fwait"
241 : [fx
] "=m" (fpu
->state
->fsave
));
245 if (unlikely(fpu
->state
->fxsave
.swd
& X87_FSW_ES
))
246 asm volatile("fnclex");
248 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
249 is pending. Clear the x87 state here by setting it to fixed
250 values. safe_address is a random variable that should be in L1 */
253 "emms\n\t" /* clear stack tags */
254 "fildl %P[addr]", /* set F?P to defined value */
255 X86_FEATURE_FXSAVE_LEAK
,
256 [addr
] "m" (safe_address
));
259 static inline void __save_init_fpu(struct task_struct
*tsk
)
261 fpu_save_init(&tsk
->thread
.fpu
);
264 static inline int fpu_fxrstor_checking(struct fpu
*fpu
)
266 return fxrstor_checking(&fpu
->state
->fxsave
);
269 static inline int fpu_restore_checking(struct fpu
*fpu
)
272 return fpu_xrstor_checking(fpu
);
274 return fpu_fxrstor_checking(fpu
);
277 static inline int restore_fpu_checking(struct task_struct
*tsk
)
279 return fpu_restore_checking(&tsk
->thread
.fpu
);
283 * Signal frame handlers...
285 extern int save_i387_xstate(void __user
*buf
);
286 extern int restore_i387_xstate(void __user
*buf
);
288 static inline void __unlazy_fpu(struct task_struct
*tsk
)
290 if (task_thread_info(tsk
)->status
& TS_USEDFPU
) {
291 __save_init_fpu(tsk
);
292 task_thread_info(tsk
)->status
&= ~TS_USEDFPU
;
295 tsk
->fpu_counter
= 0;
298 static inline void __clear_fpu(struct task_struct
*tsk
)
300 if (task_thread_info(tsk
)->status
& TS_USEDFPU
) {
301 /* Ignore delayed exceptions from user space */
302 asm volatile("1: fwait\n"
304 _ASM_EXTABLE(1b
, 2b
));
305 task_thread_info(tsk
)->status
&= ~TS_USEDFPU
;
311 * Were we in an interrupt that interrupted kernel mode?
313 * We can do a kernel_fpu_begin/end() pair *ONLY* if that
314 * pair does nothing at all: TS_USEDFPU must be clear (so
315 * that we don't try to save the FPU state), and TS must
316 * be set (so that the clts/stts pair does nothing that is
317 * visible in the interrupted kernel thread).
319 static inline bool interrupted_kernel_fpu_idle(void)
321 return !(current_thread_info()->status
& TS_USEDFPU
) &&
322 (read_cr0() & X86_CR0_TS
);
326 * Were we in user mode (or vm86 mode) when we were
329 * Doing kernel_fpu_begin/end() is ok if we are running
330 * in an interrupt context from user mode - we'll just
331 * save the FPU state as required.
333 static inline bool interrupted_user_mode(void)
335 struct pt_regs
*regs
= get_irq_regs();
336 return regs
&& user_mode_vm(regs
);
340 * Can we use the FPU in kernel mode with the
341 * whole "kernel_fpu_begin/end()" sequence?
343 * It's always ok in process context (ie "not interrupt")
344 * but it is sometimes ok even from an irq.
346 static inline bool irq_fpu_usable(void)
348 return !in_interrupt() ||
349 interrupted_user_mode() ||
350 interrupted_kernel_fpu_idle();
353 static inline void kernel_fpu_begin(void)
355 struct thread_info
*me
= current_thread_info();
357 WARN_ON_ONCE(!irq_fpu_usable());
359 if (me
->status
& TS_USEDFPU
) {
360 __save_init_fpu(me
->task
);
361 me
->status
&= ~TS_USEDFPU
;
362 /* We do 'stts()' in kernel_fpu_end() */
367 static inline void kernel_fpu_end(void)
374 * Some instructions like VIA's padlock instructions generate a spurious
375 * DNA fault but don't modify SSE registers. And these instructions
376 * get used from interrupt context as well. To prevent these kernel instructions
377 * in interrupt context interacting wrongly with other user/kernel fpu usage, we
378 * should use them only in the context of irq_ts_save/restore()
380 static inline int irq_ts_save(void)
383 * If in process context and not atomic, we can take a spurious DNA fault.
384 * Otherwise, doing clts() in process context requires disabling preemption
385 * or some heavy lifting like kernel_fpu_begin()
390 if (read_cr0() & X86_CR0_TS
) {
398 static inline void irq_ts_restore(int TS_state
)
405 * The question "does this thread have fpu access?"
406 * is slightly racy, since preemption could come in
407 * and revoke it immediately after the test.
409 * However, even in that very unlikely scenario,
410 * we can just assume we have FPU access - typically
411 * to save the FP state - we'll just take a #NM
412 * fault and get the FPU access back.
414 * The actual user_fpu_begin/end() functions
415 * need to be preemption-safe, though.
417 * NOTE! user_fpu_end() must be used only after you
418 * have saved the FP state, and user_fpu_begin() must
419 * be used only immediately before restoring it.
420 * These functions do not do any save/restore on
423 static inline int user_has_fpu(void)
425 return current_thread_info()->status
& TS_USEDFPU
;
428 static inline void user_fpu_end(void)
431 current_thread_info()->status
&= ~TS_USEDFPU
;
436 static inline void user_fpu_begin(void)
439 if (!user_has_fpu()) {
441 current_thread_info()->status
|= TS_USEDFPU
;
447 * These disable preemption on their own and are safe
449 static inline void save_init_fpu(struct task_struct
*tsk
)
451 WARN_ON_ONCE(!(task_thread_info(tsk
)->status
& TS_USEDFPU
));
453 __save_init_fpu(tsk
);
454 task_thread_info(tsk
)->status
&= ~TS_USEDFPU
;
459 static inline void unlazy_fpu(struct task_struct
*tsk
)
466 static inline void clear_fpu(struct task_struct
*tsk
)
474 * i387 state interaction
476 static inline unsigned short get_fpu_cwd(struct task_struct
*tsk
)
479 return tsk
->thread
.fpu
.state
->fxsave
.cwd
;
481 return (unsigned short)tsk
->thread
.fpu
.state
->fsave
.cwd
;
485 static inline unsigned short get_fpu_swd(struct task_struct
*tsk
)
488 return tsk
->thread
.fpu
.state
->fxsave
.swd
;
490 return (unsigned short)tsk
->thread
.fpu
.state
->fsave
.swd
;
494 static inline unsigned short get_fpu_mxcsr(struct task_struct
*tsk
)
497 return tsk
->thread
.fpu
.state
->fxsave
.mxcsr
;
499 return MXCSR_DEFAULT
;
503 static bool fpu_allocated(struct fpu
*fpu
)
505 return fpu
->state
!= NULL
;
508 static inline int fpu_alloc(struct fpu
*fpu
)
510 if (fpu_allocated(fpu
))
512 fpu
->state
= kmem_cache_alloc(task_xstate_cachep
, GFP_KERNEL
);
515 WARN_ON((unsigned long)fpu
->state
& 15);
519 static inline void fpu_free(struct fpu
*fpu
)
522 kmem_cache_free(task_xstate_cachep
, fpu
->state
);
527 static inline void fpu_copy(struct fpu
*dst
, struct fpu
*src
)
529 memcpy(dst
->state
, src
->state
, xstate_size
);
532 extern void fpu_finit(struct fpu
*fpu
);
534 #endif /* __ASSEMBLY__ */
536 #endif /* _ASM_X86_I387_H */