Commit | Line | Data |
---|---|---|
1361b83a LT |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
78f7f1e5 IM |
10 | #ifndef _ASM_X86_FPU_INTERNAL_H |
11 | #define _ASM_X86_FPU_INTERNAL_H | |
1361b83a | 12 | |
050902c0 | 13 | #include <linux/compat.h> |
952f07ec | 14 | #include <linux/sched.h> |
1361b83a | 15 | #include <linux/slab.h> |
f89e32e0 | 16 | |
1361b83a | 17 | #include <asm/user.h> |
df6b35f4 | 18 | #include <asm/fpu/api.h> |
669ebabb | 19 | #include <asm/fpu/xstate.h> |
cd4d09ec | 20 | #include <asm/cpufeature.h> |
1361b83a | 21 | |
6ffc152e IM |
22 | /* |
23 | * High level FPU state handling functions: | |
24 | */ | |
0c306bcf | 25 | extern void fpu__activate_curr(struct fpu *fpu); |
05602812 | 26 | extern void fpu__activate_fpstate_read(struct fpu *fpu); |
6a81d7eb | 27 | extern void fpu__activate_fpstate_write(struct fpu *fpu); |
6ffc152e | 28 | extern void fpu__save(struct fpu *fpu); |
e1884d69 | 29 | extern void fpu__restore(struct fpu *fpu); |
82c0e45e | 30 | extern int fpu__restore_sig(void __user *buf, int ia32_frame); |
6ffc152e IM |
31 | extern void fpu__drop(struct fpu *fpu); |
32 | extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu); | |
04c8e01d | 33 | extern void fpu__clear(struct fpu *fpu); |
b1b64dc3 IM |
34 | extern int fpu__exception_code(struct fpu *fpu, int trap_nr); |
35 | extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate); | |
6ffc152e | 36 | |
b1b64dc3 IM |
37 | /* |
38 | * Boot time FPU initialization functions: | |
39 | */ | |
40 | extern void fpu__init_cpu(void); | |
41 | extern void fpu__init_system_xstate(void); | |
42 | extern void fpu__init_cpu_xstate(void); | |
43 | extern void fpu__init_system(struct cpuinfo_x86 *c); | |
952f07ec IM |
44 | extern void fpu__init_check_bugs(void); |
45 | extern void fpu__resume_cpu(void); | |
a5fe93a5 | 46 | extern u64 fpu__get_supported_xfeatures_mask(void); |
952f07ec | 47 | |
e97131a8 IM |
48 | /* |
49 | * Debugging facility: | |
50 | */ | |
51 | #ifdef CONFIG_X86_DEBUG_FPU | |
52 | # define WARN_ON_FPU(x) WARN_ON_ONCE(x) | |
53 | #else | |
83242c51 | 54 | # define WARN_ON_FPU(x) ({ (void)(x); 0; }) |
e97131a8 IM |
55 | #endif |
56 | ||
1c927eea | 57 | /* |
b1b64dc3 | 58 | * FPU related CPU feature flag helper routines: |
1c927eea | 59 | */ |
5d2bd700 SS |
60 | static __always_inline __pure bool use_eager_fpu(void) |
61 | { | |
bc696ca0 | 62 | return static_cpu_has(X86_FEATURE_EAGER_FPU); |
5d2bd700 SS |
63 | } |
64 | ||
1361b83a LT |
65 | static __always_inline __pure bool use_xsaveopt(void) |
66 | { | |
bc696ca0 | 67 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
1361b83a LT |
68 | } |
69 | ||
70 | static __always_inline __pure bool use_xsave(void) | |
71 | { | |
bc696ca0 | 72 | return static_cpu_has(X86_FEATURE_XSAVE); |
1361b83a LT |
73 | } |
74 | ||
75 | static __always_inline __pure bool use_fxsr(void) | |
76 | { | |
bc696ca0 | 77 | return static_cpu_has(X86_FEATURE_FXSR); |
1361b83a LT |
78 | } |
79 | ||
b1b64dc3 IM |
80 | /* |
81 | * fpstate handling functions: | |
82 | */ | |
83 | ||
84 | extern union fpregs_state init_fpstate; | |
85 | ||
86 | extern void fpstate_init(union fpregs_state *state); | |
87 | #ifdef CONFIG_MATH_EMULATION | |
88 | extern void fpstate_init_soft(struct swregs_state *soft); | |
89 | #else | |
90 | static inline void fpstate_init_soft(struct swregs_state *soft) {} | |
91 | #endif | |
92 | static inline void fpstate_init_fxstate(struct fxregs_state *fx) | |
93 | { | |
94 | fx->cwd = 0x37f; | |
95 | fx->mxcsr = MXCSR_DEFAULT; | |
96 | } | |
36e49e7f | 97 | extern void fpstate_sanitize_xstate(struct fpu *fpu); |
1361b83a | 98 | |
49b8c695 PA |
99 | #define user_insn(insn, output, input...) \ |
100 | ({ \ | |
101 | int err; \ | |
102 | asm volatile(ASM_STAC "\n" \ | |
103 | "1:" #insn "\n\t" \ | |
104 | "2: " ASM_CLAC "\n" \ | |
105 | ".section .fixup,\"ax\"\n" \ | |
106 | "3: movl $-1,%[err]\n" \ | |
107 | " jmp 2b\n" \ | |
108 | ".previous\n" \ | |
109 | _ASM_EXTABLE(1b, 3b) \ | |
110 | : [err] "=r" (err), output \ | |
111 | : "0"(0), input); \ | |
112 | err; \ | |
113 | }) | |
114 | ||
0ca5bd0d SS |
115 | #define check_insn(insn, output, input...) \ |
116 | ({ \ | |
117 | int err; \ | |
118 | asm volatile("1:" #insn "\n\t" \ | |
119 | "2:\n" \ | |
120 | ".section .fixup,\"ax\"\n" \ | |
121 | "3: movl $-1,%[err]\n" \ | |
122 | " jmp 2b\n" \ | |
123 | ".previous\n" \ | |
124 | _ASM_EXTABLE(1b, 3b) \ | |
125 | : [err] "=r" (err), output \ | |
126 | : "0"(0), input); \ | |
127 | err; \ | |
128 | }) | |
129 | ||
c47ada30 | 130 | static inline int copy_fregs_to_user(struct fregs_state __user *fx) |
1361b83a | 131 | { |
49b8c695 | 132 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
1361b83a LT |
133 | } |
134 | ||
c47ada30 | 135 | static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) |
1361b83a | 136 | { |
0ca5bd0d | 137 | if (config_enabled(CONFIG_X86_32)) |
49b8c695 | 138 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
0ca5bd0d | 139 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
49b8c695 | 140 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
1361b83a | 141 | |
c6813144 | 142 | /* See comment in copy_fxregs_to_kernel() below. */ |
49b8c695 | 143 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
1361b83a LT |
144 | } |
145 | ||
9ccc27a5 | 146 | static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) |
1361b83a | 147 | { |
43b287b3 IM |
148 | int err; |
149 | ||
150 | if (config_enabled(CONFIG_X86_32)) { | |
151 | err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
152 | } else { | |
153 | if (config_enabled(CONFIG_AS_FXSAVEQ)) { | |
154 | err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
155 | } else { | |
156 | /* See comment in copy_fxregs_to_kernel() below. */ | |
157 | err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); | |
158 | } | |
159 | } | |
160 | /* Copying from a kernel buffer to FPU registers should never fail: */ | |
161 | WARN_ON_FPU(err); | |
1361b83a LT |
162 | } |
163 | ||
c47ada30 | 164 | static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) |
e139e955 PA |
165 | { |
166 | if (config_enabled(CONFIG_X86_32)) | |
167 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
168 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
169 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
170 | ||
c6813144 | 171 | /* See comment in copy_fxregs_to_kernel() below. */ |
e139e955 PA |
172 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), |
173 | "m" (*fx)); | |
174 | } | |
175 | ||
9ccc27a5 | 176 | static inline void copy_kernel_to_fregs(struct fregs_state *fx) |
1361b83a | 177 | { |
43b287b3 IM |
178 | int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
179 | ||
180 | WARN_ON_FPU(err); | |
e139e955 PA |
181 | } |
182 | ||
c47ada30 | 183 | static inline int copy_user_to_fregs(struct fregs_state __user *fx) |
e139e955 PA |
184 | { |
185 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a LT |
186 | } |
187 | ||
c6813144 | 188 | static inline void copy_fxregs_to_kernel(struct fpu *fpu) |
1361b83a | 189 | { |
0ca5bd0d | 190 | if (config_enabled(CONFIG_X86_32)) |
7366ed77 | 191 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
0ca5bd0d | 192 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
7366ed77 | 193 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
194 | else { |
195 | /* Using "rex64; fxsave %0" is broken because, if the memory | |
196 | * operand uses any extended registers for addressing, a second | |
197 | * REX prefix will be generated (to the assembler, rex64 | |
198 | * followed by semicolon is a separate instruction), and hence | |
199 | * the 64-bitness is lost. | |
200 | * | |
201 | * Using "fxsaveq %0" would be the ideal choice, but is only | |
202 | * supported starting with gas 2.16. | |
203 | * | |
204 | * Using, as a workaround, the properly prefixed form below | |
205 | * isn't accepted by any binutils version so far released, | |
206 | * complaining that the same type of prefix is used twice if | |
207 | * an extended register is needed for addressing (fix submitted | |
208 | * to mainline 2005-11-21). | |
209 | * | |
7366ed77 | 210 | * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
211 | * |
212 | * This, however, we can work around by forcing the compiler to | |
213 | * select an addressing mode that doesn't require extended | |
214 | * registers. | |
215 | */ | |
216 | asm volatile( "rex64/fxsave (%[fx])" | |
7366ed77 IM |
217 | : "=m" (fpu->state.fxsave) |
218 | : [fx] "R" (&fpu->state.fxsave)); | |
0ca5bd0d | 219 | } |
1361b83a LT |
220 | } |
221 | ||
fd169b05 IM |
222 | /* These macros all use (%edi)/(%rdi) as the single memory argument. */ |
223 | #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" | |
224 | #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" | |
225 | #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f" | |
226 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" | |
227 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" | |
228 | ||
b74a0cf1 BP |
229 | #define XSTATE_OP(op, st, lmask, hmask, err) \ |
230 | asm volatile("1:" op "\n\t" \ | |
231 | "xor %[err], %[err]\n" \ | |
232 | "2:\n\t" \ | |
233 | ".pushsection .fixup,\"ax\"\n\t" \ | |
234 | "3: movl $-2,%[err]\n\t" \ | |
235 | "jmp 2b\n\t" \ | |
236 | ".popsection\n\t" \ | |
237 | _ASM_EXTABLE(1b, 3b) \ | |
238 | : [err] "=r" (err) \ | |
239 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | |
240 | : "memory") | |
241 | ||
b7106fa0 BP |
242 | /* |
243 | * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact | |
244 | * format and supervisor states in addition to modified optimization in | |
245 | * XSAVEOPT. | |
246 | * | |
247 | * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT | |
248 | * supports modified optimization which is not supported by XSAVE. | |
249 | * | |
250 | * We use XSAVE as a fallback. | |
251 | * | |
252 | * The 661 label is defined in the ALTERNATIVE* macros as the address of the | |
253 | * original instruction which gets replaced. We need to use it here as the | |
254 | * address of the instruction where we might get an exception at. | |
255 | */ | |
256 | #define XSTATE_XSAVE(st, lmask, hmask, err) \ | |
257 | asm volatile(ALTERNATIVE_2(XSAVE, \ | |
258 | XSAVEOPT, X86_FEATURE_XSAVEOPT, \ | |
259 | XSAVES, X86_FEATURE_XSAVES) \ | |
260 | "\n" \ | |
261 | "xor %[err], %[err]\n" \ | |
262 | "3:\n" \ | |
263 | ".pushsection .fixup,\"ax\"\n" \ | |
264 | "4: movl $-2, %[err]\n" \ | |
265 | "jmp 3b\n" \ | |
266 | ".popsection\n" \ | |
267 | _ASM_EXTABLE(661b, 4b) \ | |
268 | : [err] "=r" (err) \ | |
269 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | |
270 | : "memory") | |
271 | ||
272 | /* | |
273 | * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact | |
274 | * XSAVE area format. | |
275 | */ | |
276 | #define XSTATE_XRESTORE(st, lmask, hmask, err) \ | |
277 | asm volatile(ALTERNATIVE(XRSTOR, \ | |
278 | XRSTORS, X86_FEATURE_XSAVES) \ | |
279 | "\n" \ | |
280 | "xor %[err], %[err]\n" \ | |
281 | "3:\n" \ | |
282 | ".pushsection .fixup,\"ax\"\n" \ | |
283 | "4: movl $-2, %[err]\n" \ | |
284 | "jmp 3b\n" \ | |
285 | ".popsection\n" \ | |
286 | _ASM_EXTABLE(661b, 4b) \ | |
287 | : [err] "=r" (err) \ | |
288 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | |
289 | : "memory") | |
b74a0cf1 | 290 | |
fd169b05 IM |
291 | /* |
292 | * This function is called only during boot time when x86 caps are not set | |
293 | * up and alternative can not be used yet. | |
294 | */ | |
8c05f05e | 295 | static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) |
fd169b05 IM |
296 | { |
297 | u64 mask = -1; | |
298 | u32 lmask = mask; | |
299 | u32 hmask = mask >> 32; | |
b74a0cf1 | 300 | int err; |
fd169b05 IM |
301 | |
302 | WARN_ON(system_state != SYSTEM_BOOTING); | |
303 | ||
bc696ca0 | 304 | if (static_cpu_has(X86_FEATURE_XSAVES)) |
b74a0cf1 | 305 | XSTATE_OP(XSAVES, xstate, lmask, hmask, err); |
fd169b05 | 306 | else |
b74a0cf1 | 307 | XSTATE_OP(XSAVE, xstate, lmask, hmask, err); |
8c05f05e IM |
308 | |
309 | /* We should never fault when copying to a kernel buffer: */ | |
310 | WARN_ON_FPU(err); | |
fd169b05 IM |
311 | } |
312 | ||
313 | /* | |
314 | * This function is called only during boot time when x86 caps are not set | |
315 | * up and alternative can not be used yet. | |
316 | */ | |
d65fcd60 | 317 | static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) |
fd169b05 | 318 | { |
d65fcd60 | 319 | u64 mask = -1; |
fd169b05 IM |
320 | u32 lmask = mask; |
321 | u32 hmask = mask >> 32; | |
b74a0cf1 | 322 | int err; |
fd169b05 IM |
323 | |
324 | WARN_ON(system_state != SYSTEM_BOOTING); | |
325 | ||
bc696ca0 | 326 | if (static_cpu_has(X86_FEATURE_XSAVES)) |
b74a0cf1 | 327 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
fd169b05 | 328 | else |
b74a0cf1 | 329 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
8c05f05e IM |
330 | |
331 | /* We should never fault when copying from a kernel buffer: */ | |
332 | WARN_ON_FPU(err); | |
fd169b05 IM |
333 | } |
334 | ||
335 | /* | |
336 | * Save processor xstate to xsave area. | |
337 | */ | |
8c05f05e | 338 | static inline void copy_xregs_to_kernel(struct xregs_state *xstate) |
fd169b05 IM |
339 | { |
340 | u64 mask = -1; | |
341 | u32 lmask = mask; | |
342 | u32 hmask = mask >> 32; | |
b7106fa0 | 343 | int err; |
fd169b05 IM |
344 | |
345 | WARN_ON(!alternatives_patched); | |
346 | ||
b7106fa0 | 347 | XSTATE_XSAVE(xstate, lmask, hmask, err); |
fd169b05 | 348 | |
8c05f05e IM |
349 | /* We should never fault when copying to a kernel buffer: */ |
350 | WARN_ON_FPU(err); | |
fd169b05 IM |
351 | } |
352 | ||
353 | /* | |
354 | * Restore processor xstate from xsave area. | |
355 | */ | |
8c05f05e | 356 | static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) |
fd169b05 | 357 | { |
fd169b05 IM |
358 | u32 lmask = mask; |
359 | u32 hmask = mask >> 32; | |
b7106fa0 | 360 | int err; |
fd169b05 | 361 | |
b7106fa0 | 362 | XSTATE_XRESTORE(xstate, lmask, hmask, err); |
fd169b05 | 363 | |
8c05f05e IM |
364 | /* We should never fault when copying from a kernel buffer: */ |
365 | WARN_ON_FPU(err); | |
fd169b05 IM |
366 | } |
367 | ||
368 | /* | |
369 | * Save xstate to user space xsave area. | |
370 | * | |
371 | * We don't use modified optimization because xrstor/xrstors might track | |
372 | * a different application. | |
373 | * | |
374 | * We don't use compacted format xsave area for | |
375 | * backward compatibility for old applications which don't understand | |
376 | * compacted format of xsave area. | |
377 | */ | |
378 | static inline int copy_xregs_to_user(struct xregs_state __user *buf) | |
379 | { | |
380 | int err; | |
381 | ||
382 | /* | |
383 | * Clear the xsave header first, so that reserved fields are | |
384 | * initialized to zero. | |
385 | */ | |
386 | err = __clear_user(&buf->header, sizeof(buf->header)); | |
387 | if (unlikely(err)) | |
388 | return -EFAULT; | |
389 | ||
b74a0cf1 BP |
390 | stac(); |
391 | XSTATE_OP(XSAVE, buf, -1, -1, err); | |
392 | clac(); | |
393 | ||
fd169b05 IM |
394 | return err; |
395 | } | |
396 | ||
397 | /* | |
398 | * Restore xstate from user space xsave area. | |
399 | */ | |
400 | static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) | |
401 | { | |
fd169b05 IM |
402 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); |
403 | u32 lmask = mask; | |
404 | u32 hmask = mask >> 32; | |
b74a0cf1 BP |
405 | int err; |
406 | ||
407 | stac(); | |
408 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); | |
409 | clac(); | |
fd169b05 | 410 | |
fd169b05 IM |
411 | return err; |
412 | } | |
413 | ||
1361b83a LT |
414 | /* |
415 | * These must be called with preempt disabled. Returns | |
4f836347 IM |
416 | * 'true' if the FPU state is still intact and we can |
417 | * keep registers active. | |
418 | * | |
419 | * The legacy FNSAVE instruction cleared all FPU state | |
420 | * unconditionally, so registers are essentially destroyed. | |
421 | * Modern FPU state can be kept in registers, if there are | |
1bc6b056 | 422 | * no pending FP exceptions. |
1361b83a | 423 | */ |
4f836347 | 424 | static inline int copy_fpregs_to_fpstate(struct fpu *fpu) |
1361b83a | 425 | { |
1bc6b056 | 426 | if (likely(use_xsave())) { |
c6813144 | 427 | copy_xregs_to_kernel(&fpu->state.xsave); |
1bc6b056 IM |
428 | return 1; |
429 | } | |
1361b83a | 430 | |
1bc6b056 | 431 | if (likely(use_fxsr())) { |
c6813144 | 432 | copy_fxregs_to_kernel(fpu); |
1bc6b056 | 433 | return 1; |
1361b83a LT |
434 | } |
435 | ||
436 | /* | |
1bc6b056 IM |
437 | * Legacy FPU register saving, FNSAVE always clears FPU registers, |
438 | * so we have to mark them inactive: | |
1361b83a | 439 | */ |
87dafd41 | 440 | asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); |
4f836347 | 441 | |
4f836347 | 442 | return 0; |
1361b83a LT |
443 | } |
444 | ||
003e2e8b | 445 | static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate) |
1361b83a | 446 | { |
8c05f05e | 447 | if (use_xsave()) { |
003e2e8b | 448 | copy_kernel_to_xregs(&fpstate->xsave, -1); |
8c05f05e IM |
449 | } else { |
450 | if (use_fxsr()) | |
003e2e8b | 451 | copy_kernel_to_fxregs(&fpstate->fxsave); |
8c05f05e | 452 | else |
003e2e8b | 453 | copy_kernel_to_fregs(&fpstate->fsave); |
8c05f05e | 454 | } |
1361b83a LT |
455 | } |
456 | ||
003e2e8b | 457 | static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) |
1361b83a | 458 | { |
6ca7a8a1 BP |
459 | /* |
460 | * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is | |
461 | * pending. Clear the x87 state here by setting it to fixed values. | |
462 | * "m" is a random variable that should be in L1. | |
463 | */ | |
bc696ca0 | 464 | if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) { |
26bef131 LT |
465 | asm volatile( |
466 | "fnclex\n\t" | |
467 | "emms\n\t" | |
468 | "fildl %P[addr]" /* set F?P to defined value */ | |
003e2e8b | 469 | : : [addr] "m" (fpstate)); |
26bef131 | 470 | } |
1361b83a | 471 | |
003e2e8b | 472 | __copy_kernel_to_fpregs(fpstate); |
1361b83a LT |
473 | } |
474 | ||
87dafd41 | 475 | extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); |
b1b64dc3 IM |
476 | |
477 | /* | |
478 | * FPU context switch related helper methods: | |
479 | */ | |
480 | ||
481 | DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); | |
482 | ||
483 | /* | |
484 | * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx, | |
485 | * on this CPU. | |
486 | * | |
487 | * This will disable any lazy FPU state restore of the current FPU state, | |
488 | * but if the current thread owns the FPU, it will still be saved by. | |
489 | */ | |
490 | static inline void __cpu_disable_lazy_restore(unsigned int cpu) | |
491 | { | |
492 | per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; | |
493 | } | |
494 | ||
495 | static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu) | |
496 | { | |
497 | return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; | |
498 | } | |
499 | ||
500 | ||
32b49b3c IM |
501 | /* |
502 | * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation' | |
503 | * idiom, which is then paired with the sw-flag (fpregs_active) later on: | |
504 | */ | |
505 | ||
506 | static inline void __fpregs_activate_hw(void) | |
507 | { | |
508 | if (!use_eager_fpu()) | |
509 | clts(); | |
510 | } | |
511 | ||
512 | static inline void __fpregs_deactivate_hw(void) | |
513 | { | |
514 | if (!use_eager_fpu()) | |
515 | stts(); | |
516 | } | |
517 | ||
518 | /* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */ | |
723c58e4 | 519 | static inline void __fpregs_deactivate(struct fpu *fpu) |
1361b83a | 520 | { |
e97131a8 IM |
521 | WARN_ON_FPU(!fpu->fpregs_active); |
522 | ||
d5cea9b0 | 523 | fpu->fpregs_active = 0; |
36b544dc | 524 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
1361b83a LT |
525 | } |
526 | ||
32b49b3c | 527 | /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */ |
dfaea4e6 | 528 | static inline void __fpregs_activate(struct fpu *fpu) |
1361b83a | 529 | { |
e97131a8 IM |
530 | WARN_ON_FPU(fpu->fpregs_active); |
531 | ||
d5cea9b0 | 532 | fpu->fpregs_active = 1; |
c0311f63 | 533 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); |
1361b83a LT |
534 | } |
535 | ||
952f07ec IM |
536 | /* |
537 | * The question "does this thread have fpu access?" | |
538 | * is slightly racy, since preemption could come in | |
539 | * and revoke it immediately after the test. | |
540 | * | |
541 | * However, even in that very unlikely scenario, | |
542 | * we can just assume we have FPU access - typically | |
543 | * to save the FP state - we'll just take a #NM | |
544 | * fault and get the FPU access back. | |
545 | */ | |
3c6dffa9 | 546 | static inline int fpregs_active(void) |
952f07ec IM |
547 | { |
548 | return current->thread.fpu.fpregs_active; | |
549 | } | |
550 | ||
1361b83a LT |
551 | /* |
552 | * Encapsulate the CR0.TS handling together with the | |
553 | * software flag. | |
554 | * | |
555 | * These generally need preemption protection to work, | |
556 | * do try to avoid using these on their own. | |
557 | */ | |
66af8e27 | 558 | static inline void fpregs_activate(struct fpu *fpu) |
1361b83a | 559 | { |
32b49b3c | 560 | __fpregs_activate_hw(); |
66af8e27 | 561 | __fpregs_activate(fpu); |
1361b83a LT |
562 | } |
563 | ||
66af8e27 | 564 | static inline void fpregs_deactivate(struct fpu *fpu) |
1361b83a | 565 | { |
66af8e27 | 566 | __fpregs_deactivate(fpu); |
32b49b3c | 567 | __fpregs_deactivate_hw(); |
1361b83a LT |
568 | } |
569 | ||
570 | /* | |
571 | * FPU state switching for scheduling. | |
572 | * | |
573 | * This is a two-stage process: | |
574 | * | |
575 | * - switch_fpu_prepare() saves the old state and | |
576 | * sets the new state of the CR0.TS bit. This is | |
577 | * done within the context of the old process. | |
578 | * | |
579 | * - switch_fpu_finish() restores the new state as | |
580 | * necessary. | |
581 | */ | |
582 | typedef struct { int preload; } fpu_switch_t; | |
583 | ||
cb8818b6 IM |
584 | static inline fpu_switch_t |
585 | switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) | |
1361b83a LT |
586 | { |
587 | fpu_switch_t fpu; | |
588 | ||
304bceda SS |
589 | /* |
590 | * If the task has used the math, pre-load the FPU on xsave processors | |
591 | * or if the past 5 consecutive context-switches used math. | |
592 | */ | |
4ecd16ec AL |
593 | fpu.preload = static_cpu_has(X86_FEATURE_FPU) && |
594 | new_fpu->fpstate_active && | |
cb8818b6 | 595 | (use_eager_fpu() || new_fpu->counter > 5); |
1361ef29 | 596 | |
d5cea9b0 | 597 | if (old_fpu->fpregs_active) { |
4f836347 | 598 | if (!copy_fpregs_to_fpstate(old_fpu)) |
cb8818b6 | 599 | old_fpu->last_cpu = -1; |
1361ef29 | 600 | else |
cb8818b6 | 601 | old_fpu->last_cpu = cpu; |
1361ef29 | 602 | |
36b544dc | 603 | /* But leave fpu_fpregs_owner_ctx! */ |
d5cea9b0 | 604 | old_fpu->fpregs_active = 0; |
1361b83a LT |
605 | |
606 | /* Don't change CR0.TS if we just switch! */ | |
607 | if (fpu.preload) { | |
cb8818b6 | 608 | new_fpu->counter++; |
dfaea4e6 | 609 | __fpregs_activate(new_fpu); |
7366ed77 | 610 | prefetch(&new_fpu->state); |
32b49b3c IM |
611 | } else { |
612 | __fpregs_deactivate_hw(); | |
613 | } | |
1361b83a | 614 | } else { |
cb8818b6 IM |
615 | old_fpu->counter = 0; |
616 | old_fpu->last_cpu = -1; | |
1361b83a | 617 | if (fpu.preload) { |
cb8818b6 | 618 | new_fpu->counter++; |
66ddc2cb | 619 | if (fpu_want_lazy_restore(new_fpu, cpu)) |
1361b83a LT |
620 | fpu.preload = 0; |
621 | else | |
7366ed77 | 622 | prefetch(&new_fpu->state); |
232f62cd | 623 | fpregs_activate(new_fpu); |
1361b83a LT |
624 | } |
625 | } | |
626 | return fpu; | |
627 | } | |
628 | ||
b1b64dc3 IM |
629 | /* |
630 | * Misc helper functions: | |
631 | */ | |
632 | ||
1361b83a LT |
633 | /* |
634 | * By the time this gets called, we've already cleared CR0.TS and | |
635 | * given the process the FPU if we are going to preload the FPU | |
636 | * state - all we need to do is to conditionally restore the register | |
637 | * state itself. | |
638 | */ | |
384a23f9 | 639 | static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) |
1361b83a | 640 | { |
9ccc27a5 | 641 | if (fpu_switch.preload) |
003e2e8b | 642 | copy_kernel_to_fpregs(&new_fpu->state); |
1361b83a LT |
643 | } |
644 | ||
1361b83a | 645 | /* |
fb14b4ea | 646 | * Needs to be preemption-safe. |
1361b83a | 647 | * |
377ffbcc | 648 | * NOTE! user_fpu_begin() must be used only immediately before restoring |
fb14b4ea ON |
649 | * the save state. It does not do any saving/restoring on its own. In |
650 | * lazy FPU mode, it is just an optimization to avoid a #NM exception, | |
651 | * the task can lose the FPU right after preempt_enable(). | |
1361b83a | 652 | */ |
1361b83a LT |
653 | static inline void user_fpu_begin(void) |
654 | { | |
4540d3fa IM |
655 | struct fpu *fpu = ¤t->thread.fpu; |
656 | ||
1361b83a | 657 | preempt_disable(); |
3c6dffa9 | 658 | if (!fpregs_active()) |
232f62cd | 659 | fpregs_activate(fpu); |
1361b83a LT |
660 | preempt_enable(); |
661 | } | |
662 | ||
b1b64dc3 IM |
663 | /* |
664 | * MXCSR and XCR definitions: | |
665 | */ | |
666 | ||
667 | extern unsigned int mxcsr_feature_mask; | |
668 | ||
669 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 | |
670 | ||
671 | static inline u64 xgetbv(u32 index) | |
672 | { | |
673 | u32 eax, edx; | |
674 | ||
675 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ | |
676 | : "=a" (eax), "=d" (edx) | |
677 | : "c" (index)); | |
678 | return eax + ((u64)edx << 32); | |
679 | } | |
680 | ||
681 | static inline void xsetbv(u32 index, u64 value) | |
682 | { | |
683 | u32 eax = value; | |
684 | u32 edx = value >> 32; | |
685 | ||
686 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ | |
687 | : : "a" (eax), "d" (edx), "c" (index)); | |
688 | } | |
689 | ||
78f7f1e5 | 690 | #endif /* _ASM_X86_FPU_INTERNAL_H */ |