x86/fpu: Remove fpu_allocated()
[deliverable/linux.git] / arch / x86 / kernel / i387.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
129f6946 8#include <linux/module.h>
44210111 9#include <linux/regset.h>
f668964e 10#include <linux/sched.h>
5a0e3ad6 11#include <linux/slab.h>
f668964e
IM
12
13#include <asm/sigcontext.h>
1da177e4 14#include <asm/processor.h>
1da177e4 15#include <asm/math_emu.h>
375074cc 16#include <asm/tlbflush.h>
1da177e4 17#include <asm/uaccess.h>
f668964e
IM
18#include <asm/ptrace.h>
19#include <asm/i387.h>
1361b83a 20#include <asm/fpu-internal.h>
f668964e 21#include <asm/user.h>
1da177e4 22
14e153ef
ON
23static DEFINE_PER_CPU(bool, in_kernel_fpu);
24
7575637a
ON
25void kernel_fpu_disable(void)
26{
27 WARN_ON(this_cpu_read(in_kernel_fpu));
28 this_cpu_write(in_kernel_fpu, true);
29}
30
31void kernel_fpu_enable(void)
32{
33 this_cpu_write(in_kernel_fpu, false);
34}
35
8546c008
LT
36/*
37 * Were we in an interrupt that interrupted kernel mode?
38 *
304bceda 39 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
8546c008
LT
40 * pair does nothing at all: the thread must not have fpu (so
41 * that we don't try to save the FPU state), and TS must
42 * be set (so that the clts/stts pair does nothing that is
43 * visible in the interrupted kernel thread).
5187b28f 44 *
4b2e762e
ON
45 * Except for the eagerfpu case when we return true; in the likely case
46 * the thread has FPU but we are not going to set/clear TS.
8546c008
LT
47 */
48static inline bool interrupted_kernel_fpu_idle(void)
49{
14e153ef
ON
50 if (this_cpu_read(in_kernel_fpu))
51 return false;
52
5d2bd700 53 if (use_eager_fpu())
4b2e762e 54 return true;
304bceda 55
8546c008
LT
56 return !__thread_has_fpu(current) &&
57 (read_cr0() & X86_CR0_TS);
58}
59
60/*
61 * Were we in user mode (or vm86 mode) when we were
62 * interrupted?
63 *
64 * Doing kernel_fpu_begin/end() is ok if we are running
65 * in an interrupt context from user mode - we'll just
66 * save the FPU state as required.
67 */
68static inline bool interrupted_user_mode(void)
69{
70 struct pt_regs *regs = get_irq_regs();
f39b6f0e 71 return regs && user_mode(regs);
8546c008
LT
72}
73
74/*
75 * Can we use the FPU in kernel mode with the
76 * whole "kernel_fpu_begin/end()" sequence?
77 *
78 * It's always ok in process context (ie "not interrupt")
79 * but it is sometimes ok even from an irq.
80 */
81bool irq_fpu_usable(void)
82{
83 return !in_interrupt() ||
84 interrupted_user_mode() ||
85 interrupted_kernel_fpu_idle();
86}
87EXPORT_SYMBOL(irq_fpu_usable);
88
b1a74bf8 89void __kernel_fpu_begin(void)
8546c008
LT
90{
91 struct task_struct *me = current;
92
14e153ef
ON
93 this_cpu_write(in_kernel_fpu, true);
94
8546c008 95 if (__thread_has_fpu(me)) {
5187b28f 96 __save_init_fpu(me);
7aeccb83 97 } else {
c6ae41e7 98 this_cpu_write(fpu_owner_task, NULL);
7aeccb83
ON
99 if (!use_eager_fpu())
100 clts();
8546c008
LT
101 }
102}
b1a74bf8 103EXPORT_SYMBOL(__kernel_fpu_begin);
8546c008 104
b1a74bf8 105void __kernel_fpu_end(void)
8546c008 106{
33a3ebdc
ON
107 struct task_struct *me = current;
108
109 if (__thread_has_fpu(me)) {
110 if (WARN_ON(restore_fpu_checking(me)))
b85e67d1 111 fpu_reset_state(me);
33a3ebdc 112 } else if (!use_eager_fpu()) {
304bceda 113 stts();
731bd6a9 114 }
14e153ef
ON
115
116 this_cpu_write(in_kernel_fpu, false);
8546c008 117}
b1a74bf8 118EXPORT_SYMBOL(__kernel_fpu_end);
8546c008 119
4af08f2f
IM
120/*
121 * Save the FPU state (initialize it if necessary):
87cdb98a
IM
122 *
123 * This only ever gets called for the current task.
4af08f2f 124 */
0a781551 125void fpu__save(struct task_struct *tsk)
8546c008 126{
87cdb98a
IM
127 WARN_ON(tsk != current);
128
8546c008
LT
129 preempt_disable();
130 if (__thread_has_fpu(tsk)) {
1a2a7f4e
ON
131 if (use_eager_fpu()) {
132 __save_fpu(tsk);
133 } else {
134 __save_init_fpu(tsk);
135 __thread_fpu_end(tsk);
136 }
a9241ea5 137 }
8546c008
LT
138 preempt_enable();
139}
4af08f2f 140EXPORT_SYMBOL_GPL(fpu__save);
8546c008 141
72a671ce 142unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
61c4628b 143unsigned int xstate_size;
f45755b8 144EXPORT_SYMBOL_GPL(xstate_size);
148f9bb8 145static struct i387_fxsave_struct fx_scratch;
1da177e4 146
148f9bb8 147static void mxcsr_feature_mask_init(void)
1da177e4
LT
148{
149 unsigned long mask = 0;
f668964e 150
1da177e4 151 if (cpu_has_fxsr) {
61c4628b 152 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
eaa5a990 153 asm volatile("fxsave %0" : "+m" (fx_scratch));
61c4628b 154 mask = fx_scratch.mxcsr_mask;
3b095a04
CG
155 if (mask == 0)
156 mask = 0x0000ffbf;
157 }
1da177e4 158 mxcsr_feature_mask &= mask;
1da177e4
LT
159}
160
148f9bb8 161static void init_thread_xstate(void)
61c4628b 162{
0e49bf66
RR
163 /*
164 * Note that xstate_size might be overwriten later during
165 * xsave_init().
166 */
167
60e019eb 168 if (!cpu_has_fpu) {
1f999ab5
RR
169 /*
170 * Disable xsave as we do not support it if i387
171 * emulation is enabled.
172 */
173 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
174 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
e8a496ac
SS
175 xstate_size = sizeof(struct i387_soft_struct);
176 return;
177 }
178
61c4628b
SS
179 if (cpu_has_fxsr)
180 xstate_size = sizeof(struct i387_fxsave_struct);
61c4628b
SS
181 else
182 xstate_size = sizeof(struct i387_fsave_struct);
61c4628b
SS
183}
184
44210111
RM
185/*
186 * Called at bootup to set up the initial FPU state that is later cloned
187 * into all processes.
188 */
0e49bf66 189
148f9bb8 190void fpu_init(void)
44210111 191{
6ac8bac2
BG
192 unsigned long cr0;
193 unsigned long cr4_mask = 0;
44210111 194
60e019eb
PA
195#ifndef CONFIG_MATH_EMULATION
196 if (!cpu_has_fpu) {
197 pr_emerg("No FPU found and no math emulation present\n");
198 pr_emerg("Giving up\n");
199 for (;;)
200 asm volatile("hlt");
201 }
202#endif
6ac8bac2
BG
203 if (cpu_has_fxsr)
204 cr4_mask |= X86_CR4_OSFXSR;
205 if (cpu_has_xmm)
206 cr4_mask |= X86_CR4_OSXMMEXCPT;
207 if (cr4_mask)
375074cc 208 cr4_set_bits(cr4_mask);
6ac8bac2
BG
209
210 cr0 = read_cr0();
211 cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
60e019eb 212 if (!cpu_has_fpu)
6ac8bac2
BG
213 cr0 |= X86_CR0_EM;
214 write_cr0(cr0);
44210111 215
6f5298c2
FY
216 /*
217 * init_thread_xstate is only called once to avoid overriding
218 * xstate_size during boot time or during CPU hotplug.
219 */
220 if (xstate_size == 0)
dc1e35c6 221 init_thread_xstate();
dc1e35c6 222
44210111 223 mxcsr_feature_mask_init();
5d2bd700
SS
224 xsave_init();
225 eager_fpu_init();
44210111 226}
0e49bf66 227
5ee481da 228void fpu_finit(struct fpu *fpu)
1da177e4 229{
60e019eb 230 if (!cpu_has_fpu) {
86603283
AK
231 finit_soft_fpu(&fpu->state->soft);
232 return;
e8a496ac 233 }
e8a496ac 234
1d23c451
ON
235 memset(fpu->state, 0, xstate_size);
236
1da177e4 237 if (cpu_has_fxsr) {
5d2bd700 238 fx_finit(&fpu->state->fxsave);
1da177e4 239 } else {
86603283 240 struct i387_fsave_struct *fp = &fpu->state->fsave;
61c4628b
SS
241 fp->cwd = 0xffff037fu;
242 fp->swd = 0xffff0000u;
243 fp->twd = 0xffffffffu;
244 fp->fos = 0xffff0000u;
1da177e4 245 }
86603283 246}
5ee481da 247EXPORT_SYMBOL_GPL(fpu_finit);
86603283 248
97185c95
IM
249/*
250 * Allocate the backing store for the current task's FPU registers
251 * and initialize the registers themselves as well.
252 *
253 * Can fail.
254 */
255int fpstate_alloc_init(struct task_struct *curr)
256{
257 int ret;
258
259 if (WARN_ON_ONCE(curr != current))
260 return -EINVAL;
261 if (WARN_ON_ONCE(curr->flags & PF_USED_MATH))
262 return -EINVAL;
263
264 /*
265 * Memory allocation at the first usage of the FPU and other state.
266 */
267 ret = fpu_alloc(&curr->thread.fpu);
268 if (ret)
269 return ret;
270
271 fpu_finit(&curr->thread.fpu);
272
273 /* Safe to do for the current task: */
274 curr->flags |= PF_USED_MATH;
275
276 return 0;
277}
278EXPORT_SYMBOL_GPL(fpstate_alloc_init);
279
86603283
AK
280/*
281 * The _current_ task is using the FPU for the first time
282 * so initialize it and set the mxcsr to its default
283 * value at reset if we support XMM instructions and then
0d2eb44f 284 * remember the current task has used the FPU.
86603283 285 */
67e97fc2 286static int fpu__unlazy_stopped(struct task_struct *child)
86603283
AK
287{
288 int ret;
289
67e97fc2
IM
290 if (WARN_ON_ONCE(child == current))
291 return -EINVAL;
292
071ae621 293 if (child->flags & PF_USED_MATH) {
67e97fc2 294 task_disable_lazy_fpu_restore(child);
86603283
AK
295 return 0;
296 }
297
44210111 298 /*
86603283 299 * Memory allocation at the first usage of the FPU and other state.
44210111 300 */
67e97fc2 301 ret = fpu_alloc(&child->thread.fpu);
86603283
AK
302 if (ret)
303 return ret;
304
67e97fc2 305 fpu_finit(&child->thread.fpu);
86603283 306
071ae621
IM
307 /* Safe to do for stopped child tasks: */
308 child->flags |= PF_USED_MATH;
309
aa283f49 310 return 0;
1da177e4
LT
311}
312
5b3efd50
SS
313/*
314 * The xstateregs_active() routine is the same as the fpregs_active() routine,
315 * as the "regset->n" for the xstate regset will be updated based on the feature
316 * capabilites supported by the xsave.
317 */
44210111
RM
318int fpregs_active(struct task_struct *target, const struct user_regset *regset)
319{
320 return tsk_used_math(target) ? regset->n : 0;
321}
1da177e4 322
44210111 323int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
1da177e4 324{
44210111
RM
325 return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
326}
1da177e4 327
44210111
RM
328int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
329 unsigned int pos, unsigned int count,
330 void *kbuf, void __user *ubuf)
331{
aa283f49
SS
332 int ret;
333
44210111
RM
334 if (!cpu_has_fxsr)
335 return -ENODEV;
336
67e97fc2 337 ret = fpu__unlazy_stopped(target);
aa283f49
SS
338 if (ret)
339 return ret;
44210111 340
29104e10
SS
341 sanitize_i387_state(target);
342
44210111 343 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
86603283 344 &target->thread.fpu.state->fxsave, 0, -1);
1da177e4 345}
44210111
RM
346
347int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
348 unsigned int pos, unsigned int count,
349 const void *kbuf, const void __user *ubuf)
350{
351 int ret;
352
353 if (!cpu_has_fxsr)
354 return -ENODEV;
355
67e97fc2 356 ret = fpu__unlazy_stopped(target);
aa283f49
SS
357 if (ret)
358 return ret;
359
29104e10
SS
360 sanitize_i387_state(target);
361
44210111 362 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
86603283 363 &target->thread.fpu.state->fxsave, 0, -1);
44210111
RM
364
365 /*
366 * mxcsr reserved bits must be masked to zero for security reasons.
367 */
86603283 368 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
44210111 369
42deec6f
SS
370 /*
371 * update the header bits in the xsave header, indicating the
372 * presence of FP and SSE state.
373 */
374 if (cpu_has_xsave)
86603283 375 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
42deec6f 376
44210111
RM
377 return ret;
378}
379
5b3efd50
SS
380int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
381 unsigned int pos, unsigned int count,
382 void *kbuf, void __user *ubuf)
383{
18ecb3bf 384 struct xsave_struct *xsave;
5b3efd50
SS
385 int ret;
386
387 if (!cpu_has_xsave)
388 return -ENODEV;
389
67e97fc2 390 ret = fpu__unlazy_stopped(target);
5b3efd50
SS
391 if (ret)
392 return ret;
393
18ecb3bf
BP
394 xsave = &target->thread.fpu.state->xsave;
395
5b3efd50 396 /*
ff7fbc72
SS
397 * Copy the 48bytes defined by the software first into the xstate
398 * memory layout in the thread struct, so that we can copy the entire
399 * xstateregs to the user using one user_regset_copyout().
5b3efd50 400 */
e7f180dc
ON
401 memcpy(&xsave->i387.sw_reserved,
402 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
5b3efd50 403 /*
ff7fbc72 404 * Copy the xstate memory layout.
5b3efd50 405 */
e7f180dc 406 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
5b3efd50
SS
407 return ret;
408}
409
410int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
411 unsigned int pos, unsigned int count,
412 const void *kbuf, const void __user *ubuf)
413{
18ecb3bf 414 struct xsave_struct *xsave;
5b3efd50 415 int ret;
5b3efd50
SS
416
417 if (!cpu_has_xsave)
418 return -ENODEV;
419
67e97fc2 420 ret = fpu__unlazy_stopped(target);
5b3efd50
SS
421 if (ret)
422 return ret;
423
18ecb3bf
BP
424 xsave = &target->thread.fpu.state->xsave;
425
e7f180dc 426 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
5b3efd50
SS
427 /*
428 * mxcsr reserved bits must be masked to zero for security reasons.
429 */
e7f180dc
ON
430 xsave->i387.mxcsr &= mxcsr_feature_mask;
431 xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
5b3efd50
SS
432 /*
433 * These bits must be zero.
434 */
e7f180dc 435 memset(&xsave->xsave_hdr.reserved, 0, 48);
5b3efd50
SS
436 return ret;
437}
438
44210111 439#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1da177e4 440
1da177e4
LT
441/*
442 * FPU tag word conversions.
443 */
444
3b095a04 445static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
1da177e4
LT
446{
447 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
3b095a04 448
1da177e4 449 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
3b095a04 450 tmp = ~twd;
44210111 451 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
3b095a04
CG
452 /* and move the valid bits to the lower byte. */
453 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
454 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
455 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
f668964e 456
3b095a04 457 return tmp;
1da177e4
LT
458}
459
497888cf 460#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
44210111
RM
461#define FP_EXP_TAG_VALID 0
462#define FP_EXP_TAG_ZERO 1
463#define FP_EXP_TAG_SPECIAL 2
464#define FP_EXP_TAG_EMPTY 3
465
466static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
467{
468 struct _fpxreg *st;
469 u32 tos = (fxsave->swd >> 11) & 7;
470 u32 twd = (unsigned long) fxsave->twd;
471 u32 tag;
472 u32 ret = 0xffff0000u;
473 int i;
1da177e4 474
44210111 475 for (i = 0; i < 8; i++, twd >>= 1) {
3b095a04
CG
476 if (twd & 0x1) {
477 st = FPREG_ADDR(fxsave, (i - tos) & 7);
1da177e4 478
3b095a04 479 switch (st->exponent & 0x7fff) {
1da177e4 480 case 0x7fff:
44210111 481 tag = FP_EXP_TAG_SPECIAL;
1da177e4
LT
482 break;
483 case 0x0000:
3b095a04
CG
484 if (!st->significand[0] &&
485 !st->significand[1] &&
486 !st->significand[2] &&
44210111
RM
487 !st->significand[3])
488 tag = FP_EXP_TAG_ZERO;
489 else
490 tag = FP_EXP_TAG_SPECIAL;
1da177e4
LT
491 break;
492 default:
44210111
RM
493 if (st->significand[3] & 0x8000)
494 tag = FP_EXP_TAG_VALID;
495 else
496 tag = FP_EXP_TAG_SPECIAL;
1da177e4
LT
497 break;
498 }
499 } else {
44210111 500 tag = FP_EXP_TAG_EMPTY;
1da177e4 501 }
44210111 502 ret |= tag << (2 * i);
1da177e4
LT
503 }
504 return ret;
505}
506
507/*
44210111 508 * FXSR floating point environment conversions.
1da177e4
LT
509 */
510
72a671ce 511void
f668964e 512convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
1da177e4 513{
86603283 514 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
44210111
RM
515 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
516 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
517 int i;
1da177e4 518
44210111
RM
519 env->cwd = fxsave->cwd | 0xffff0000u;
520 env->swd = fxsave->swd | 0xffff0000u;
521 env->twd = twd_fxsr_to_i387(fxsave);
522
523#ifdef CONFIG_X86_64
524 env->fip = fxsave->rip;
525 env->foo = fxsave->rdp;
10c11f30
BG
526 /*
527 * should be actually ds/cs at fpu exception time, but
528 * that information is not available in 64bit mode.
529 */
530 env->fcs = task_pt_regs(tsk)->cs;
44210111 531 if (tsk == current) {
10c11f30 532 savesegment(ds, env->fos);
1da177e4 533 } else {
10c11f30 534 env->fos = tsk->thread.ds;
1da177e4 535 }
10c11f30 536 env->fos |= 0xffff0000;
44210111
RM
537#else
538 env->fip = fxsave->fip;
609b5297 539 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
44210111
RM
540 env->foo = fxsave->foo;
541 env->fos = fxsave->fos;
542#endif
1da177e4 543
44210111
RM
544 for (i = 0; i < 8; ++i)
545 memcpy(&to[i], &from[i], sizeof(to[0]));
1da177e4
LT
546}
547
72a671ce
SS
548void convert_to_fxsr(struct task_struct *tsk,
549 const struct user_i387_ia32_struct *env)
1da177e4 550
1da177e4 551{
86603283 552 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
44210111
RM
553 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
554 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
555 int i;
1da177e4 556
44210111
RM
557 fxsave->cwd = env->cwd;
558 fxsave->swd = env->swd;
559 fxsave->twd = twd_i387_to_fxsr(env->twd);
560 fxsave->fop = (u16) ((u32) env->fcs >> 16);
561#ifdef CONFIG_X86_64
562 fxsave->rip = env->fip;
563 fxsave->rdp = env->foo;
564 /* cs and ds ignored */
565#else
566 fxsave->fip = env->fip;
567 fxsave->fcs = (env->fcs & 0xffff);
568 fxsave->foo = env->foo;
569 fxsave->fos = env->fos;
570#endif
1da177e4 571
44210111
RM
572 for (i = 0; i < 8; ++i)
573 memcpy(&to[i], &from[i], sizeof(from[0]));
1da177e4
LT
574}
575
44210111
RM
576int fpregs_get(struct task_struct *target, const struct user_regset *regset,
577 unsigned int pos, unsigned int count,
578 void *kbuf, void __user *ubuf)
1da177e4 579{
44210111 580 struct user_i387_ia32_struct env;
aa283f49 581 int ret;
1da177e4 582
67e97fc2 583 ret = fpu__unlazy_stopped(target);
aa283f49
SS
584 if (ret)
585 return ret;
1da177e4 586
60e019eb 587 if (!static_cpu_has(X86_FEATURE_FPU))
e8a496ac
SS
588 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
589
60e019eb 590 if (!cpu_has_fxsr)
44210111 591 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
86603283 592 &target->thread.fpu.state->fsave, 0,
61c4628b 593 -1);
1da177e4 594
29104e10
SS
595 sanitize_i387_state(target);
596
44210111
RM
597 if (kbuf && pos == 0 && count == sizeof(env)) {
598 convert_from_fxsr(kbuf, target);
599 return 0;
1da177e4 600 }
44210111
RM
601
602 convert_from_fxsr(&env, target);
f668964e 603
44210111 604 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
1da177e4
LT
605}
606
44210111
RM
607int fpregs_set(struct task_struct *target, const struct user_regset *regset,
608 unsigned int pos, unsigned int count,
609 const void *kbuf, const void __user *ubuf)
1da177e4 610{
44210111
RM
611 struct user_i387_ia32_struct env;
612 int ret;
1da177e4 613
67e97fc2 614 ret = fpu__unlazy_stopped(target);
aa283f49
SS
615 if (ret)
616 return ret;
617
29104e10
SS
618 sanitize_i387_state(target);
619
60e019eb 620 if (!static_cpu_has(X86_FEATURE_FPU))
e8a496ac
SS
621 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
622
60e019eb 623 if (!cpu_has_fxsr)
44210111 624 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
60e019eb
PA
625 &target->thread.fpu.state->fsave, 0,
626 -1);
44210111
RM
627
628 if (pos > 0 || count < sizeof(env))
629 convert_from_fxsr(&env, target);
630
631 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
632 if (!ret)
633 convert_to_fxsr(target, &env);
634
42deec6f
SS
635 /*
636 * update the header bit in the xsave header, indicating the
637 * presence of FP.
638 */
639 if (cpu_has_xsave)
86603283 640 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
44210111 641 return ret;
1da177e4
LT
642}
643
1da177e4
LT
644/*
645 * FPU state for core dumps.
60b3b9af
RM
646 * This is only used for a.out dumps now.
647 * It is declared generically using elf_fpregset_t (which is
648 * struct user_i387_struct) but is in fact only used for 32-bit
649 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
1da177e4 650 */
3b095a04 651int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
1da177e4 652{
1da177e4 653 struct task_struct *tsk = current;
f668964e 654 int fpvalid;
1da177e4
LT
655
656 fpvalid = !!used_math();
60b3b9af
RM
657 if (fpvalid)
658 fpvalid = !fpregs_get(tsk, NULL,
659 0, sizeof(struct user_i387_ia32_struct),
660 fpu, NULL);
1da177e4
LT
661
662 return fpvalid;
663}
129f6946 664EXPORT_SYMBOL(dump_fpu);
1da177e4 665
60b3b9af 666#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
60e019eb
PA
667
668static int __init no_387(char *s)
669{
670 setup_clear_cpu_cap(X86_FEATURE_FPU);
671 return 1;
672}
673
674__setup("no387", no_387);
675
1a7dc0db
IM
676/*
677 * Set the X86_FEATURE_FPU CPU-capability bit based on
678 * trying to execute an actual sequence of FPU instructions:
679 */
680void fpu__detect(struct cpuinfo_x86 *c)
60e019eb
PA
681{
682 unsigned long cr0;
683 u16 fsw, fcw;
684
685 fsw = fcw = 0xffff;
686
687 cr0 = read_cr0();
688 cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
689 write_cr0(cr0);
690
691 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
692 : "+m" (fsw), "+m" (fcw));
693
694 if (fsw == 0 && (fcw & 0x103f) == 0x003f)
695 set_cpu_cap(c, X86_FEATURE_FPU);
696 else
697 clear_cpu_cap(c, X86_FEATURE_FPU);
698
699 /* The final cr0 value is set in fpu_init() */
700}
This page took 0.798423 seconds and 5 git commands to generate.