x86-64, fpu: Disable preemption when using TS_USEDFPU
[deliverable/linux.git] / arch / x86 / kernel / i387.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
129f6946 8#include <linux/module.h>
44210111 9#include <linux/regset.h>
f668964e 10#include <linux/sched.h>
5a0e3ad6 11#include <linux/slab.h>
f668964e
IM
12
13#include <asm/sigcontext.h>
1da177e4 14#include <asm/processor.h>
1da177e4 15#include <asm/math_emu.h>
1da177e4 16#include <asm/uaccess.h>
f668964e
IM
17#include <asm/ptrace.h>
18#include <asm/i387.h>
19#include <asm/user.h>
1da177e4 20
44210111 21#ifdef CONFIG_X86_64
f668964e
IM
22# include <asm/sigcontext32.h>
23# include <asm/user32.h>
44210111 24#else
ab513701
SS
25# define save_i387_xstate_ia32 save_i387_xstate
26# define restore_i387_xstate_ia32 restore_i387_xstate
f668964e 27# define _fpstate_ia32 _fpstate
ab513701 28# define _xstate_ia32 _xstate
3c1c7f10 29# define sig_xstate_ia32_size sig_xstate_size
c37b5efe 30# define fx_sw_reserved_ia32 fx_sw_reserved
f668964e
IM
31# define user_i387_ia32_struct user_i387_struct
32# define user32_fxsr_struct user_fxsr_struct
44210111
RM
33#endif
34
1da177e4 35#ifdef CONFIG_MATH_EMULATION
f668964e 36# define HAVE_HWFP (boot_cpu_data.hard_math)
1da177e4 37#else
f668964e 38# define HAVE_HWFP 1
1da177e4
LT
39#endif
40
f668964e 41static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
61c4628b 42unsigned int xstate_size;
3c1c7f10 43unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
61c4628b 44static struct i387_fxsave_struct fx_scratch __cpuinitdata;
1da177e4 45
61c4628b 46void __cpuinit mxcsr_feature_mask_init(void)
1da177e4
LT
47{
48 unsigned long mask = 0;
f668964e 49
1da177e4
LT
50 clts();
51 if (cpu_has_fxsr) {
61c4628b
SS
52 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
53 asm volatile("fxsave %0" : : "m" (fx_scratch));
54 mask = fx_scratch.mxcsr_mask;
3b095a04
CG
55 if (mask == 0)
56 mask = 0x0000ffbf;
57 }
1da177e4
LT
58 mxcsr_feature_mask &= mask;
59 stts();
60}
61
0e49bf66 62static void __cpuinit init_thread_xstate(void)
61c4628b 63{
0e49bf66
RR
64 /*
65 * Note that xstate_size might be overwriten later during
66 * xsave_init().
67 */
68
e8a496ac 69 if (!HAVE_HWFP) {
1f999ab5
RR
70 /*
71 * Disable xsave as we do not support it if i387
72 * emulation is enabled.
73 */
74 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
75 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
e8a496ac
SS
76 xstate_size = sizeof(struct i387_soft_struct);
77 return;
78 }
79
61c4628b
SS
80 if (cpu_has_fxsr)
81 xstate_size = sizeof(struct i387_fxsave_struct);
82#ifdef CONFIG_X86_32
83 else
84 xstate_size = sizeof(struct i387_fsave_struct);
85#endif
61c4628b
SS
86}
87
44210111
RM
88/*
89 * Called at bootup to set up the initial FPU state that is later cloned
90 * into all processes.
91 */
0e49bf66 92
44210111
RM
93void __cpuinit fpu_init(void)
94{
6ac8bac2
BG
95 unsigned long cr0;
96 unsigned long cr4_mask = 0;
44210111 97
6ac8bac2
BG
98 if (cpu_has_fxsr)
99 cr4_mask |= X86_CR4_OSFXSR;
100 if (cpu_has_xmm)
101 cr4_mask |= X86_CR4_OSXMMEXCPT;
102 if (cr4_mask)
103 set_in_cr4(cr4_mask);
104
105 cr0 = read_cr0();
106 cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
107 if (!HAVE_HWFP)
108 cr0 |= X86_CR0_EM;
109 write_cr0(cr0);
44210111 110
dc1e35c6
SS
111 if (!smp_processor_id())
112 init_thread_xstate();
dc1e35c6 113
44210111
RM
114 mxcsr_feature_mask_init();
115 /* clean state in init */
c9ad4882 116 current_thread_info()->status = 0;
44210111
RM
117 clear_used_math();
118}
0e49bf66 119
5ee481da 120void fpu_finit(struct fpu *fpu)
1da177e4 121{
e8a496ac
SS
122#ifdef CONFIG_X86_32
123 if (!HAVE_HWFP) {
86603283
AK
124 finit_soft_fpu(&fpu->state->soft);
125 return;
e8a496ac
SS
126 }
127#endif
128
1da177e4 129 if (cpu_has_fxsr) {
86603283 130 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
61c4628b
SS
131
132 memset(fx, 0, xstate_size);
133 fx->cwd = 0x37f;
1da177e4 134 if (cpu_has_xmm)
61c4628b 135 fx->mxcsr = MXCSR_DEFAULT;
1da177e4 136 } else {
86603283 137 struct i387_fsave_struct *fp = &fpu->state->fsave;
61c4628b
SS
138 memset(fp, 0, xstate_size);
139 fp->cwd = 0xffff037fu;
140 fp->swd = 0xffff0000u;
141 fp->twd = 0xffffffffu;
142 fp->fos = 0xffff0000u;
1da177e4 143 }
86603283 144}
5ee481da 145EXPORT_SYMBOL_GPL(fpu_finit);
86603283
AK
146
147/*
148 * The _current_ task is using the FPU for the first time
149 * so initialize it and set the mxcsr to its default
150 * value at reset if we support XMM instructions and then
151 * remeber the current task has used the FPU.
152 */
153int init_fpu(struct task_struct *tsk)
154{
155 int ret;
156
157 if (tsk_used_math(tsk)) {
158 if (HAVE_HWFP && tsk == current)
159 unlazy_fpu(tsk);
160 return 0;
161 }
162
44210111 163 /*
86603283 164 * Memory allocation at the first usage of the FPU and other state.
44210111 165 */
86603283
AK
166 ret = fpu_alloc(&tsk->thread.fpu);
167 if (ret)
168 return ret;
169
170 fpu_finit(&tsk->thread.fpu);
171
1da177e4 172 set_stopped_child_used_math(tsk);
aa283f49 173 return 0;
1da177e4
LT
174}
175
5b3efd50
SS
176/*
177 * The xstateregs_active() routine is the same as the fpregs_active() routine,
178 * as the "regset->n" for the xstate regset will be updated based on the feature
179 * capabilites supported by the xsave.
180 */
44210111
RM
181int fpregs_active(struct task_struct *target, const struct user_regset *regset)
182{
183 return tsk_used_math(target) ? regset->n : 0;
184}
1da177e4 185
44210111 186int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
1da177e4 187{
44210111
RM
188 return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
189}
1da177e4 190
44210111
RM
191int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
192 unsigned int pos, unsigned int count,
193 void *kbuf, void __user *ubuf)
194{
aa283f49
SS
195 int ret;
196
44210111
RM
197 if (!cpu_has_fxsr)
198 return -ENODEV;
199
aa283f49
SS
200 ret = init_fpu(target);
201 if (ret)
202 return ret;
44210111 203
29104e10
SS
204 sanitize_i387_state(target);
205
44210111 206 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
86603283 207 &target->thread.fpu.state->fxsave, 0, -1);
1da177e4 208}
44210111
RM
209
210int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
211 unsigned int pos, unsigned int count,
212 const void *kbuf, const void __user *ubuf)
213{
214 int ret;
215
216 if (!cpu_has_fxsr)
217 return -ENODEV;
218
aa283f49
SS
219 ret = init_fpu(target);
220 if (ret)
221 return ret;
222
29104e10
SS
223 sanitize_i387_state(target);
224
44210111 225 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
86603283 226 &target->thread.fpu.state->fxsave, 0, -1);
44210111
RM
227
228 /*
229 * mxcsr reserved bits must be masked to zero for security reasons.
230 */
86603283 231 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
44210111 232
42deec6f
SS
233 /*
234 * update the header bits in the xsave header, indicating the
235 * presence of FP and SSE state.
236 */
237 if (cpu_has_xsave)
86603283 238 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
42deec6f 239
44210111
RM
240 return ret;
241}
242
5b3efd50
SS
243int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
244 unsigned int pos, unsigned int count,
245 void *kbuf, void __user *ubuf)
246{
247 int ret;
248
249 if (!cpu_has_xsave)
250 return -ENODEV;
251
252 ret = init_fpu(target);
253 if (ret)
254 return ret;
255
256 /*
ff7fbc72
SS
257 * Copy the 48bytes defined by the software first into the xstate
258 * memory layout in the thread struct, so that we can copy the entire
259 * xstateregs to the user using one user_regset_copyout().
5b3efd50 260 */
86603283 261 memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
ff7fbc72 262 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
5b3efd50
SS
263
264 /*
ff7fbc72 265 * Copy the xstate memory layout.
5b3efd50
SS
266 */
267 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
86603283 268 &target->thread.fpu.state->xsave, 0, -1);
5b3efd50
SS
269 return ret;
270}
271
272int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
273 unsigned int pos, unsigned int count,
274 const void *kbuf, const void __user *ubuf)
275{
276 int ret;
277 struct xsave_hdr_struct *xsave_hdr;
278
279 if (!cpu_has_xsave)
280 return -ENODEV;
281
282 ret = init_fpu(target);
283 if (ret)
284 return ret;
285
286 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
86603283 287 &target->thread.fpu.state->xsave, 0, -1);
5b3efd50
SS
288
289 /*
290 * mxcsr reserved bits must be masked to zero for security reasons.
291 */
86603283 292 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
5b3efd50 293
86603283 294 xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
5b3efd50
SS
295
296 xsave_hdr->xstate_bv &= pcntxt_mask;
297 /*
298 * These bits must be zero.
299 */
300 xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
301
302 return ret;
303}
304
44210111 305#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1da177e4 306
1da177e4
LT
307/*
308 * FPU tag word conversions.
309 */
310
3b095a04 311static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
1da177e4
LT
312{
313 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
3b095a04 314
1da177e4 315 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
3b095a04 316 tmp = ~twd;
44210111 317 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
3b095a04
CG
318 /* and move the valid bits to the lower byte. */
319 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
320 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
321 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
f668964e 322
3b095a04 323 return tmp;
1da177e4
LT
324}
325
1da177e4 326#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
44210111
RM
327#define FP_EXP_TAG_VALID 0
328#define FP_EXP_TAG_ZERO 1
329#define FP_EXP_TAG_SPECIAL 2
330#define FP_EXP_TAG_EMPTY 3
331
332static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
333{
334 struct _fpxreg *st;
335 u32 tos = (fxsave->swd >> 11) & 7;
336 u32 twd = (unsigned long) fxsave->twd;
337 u32 tag;
338 u32 ret = 0xffff0000u;
339 int i;
1da177e4 340
44210111 341 for (i = 0; i < 8; i++, twd >>= 1) {
3b095a04
CG
342 if (twd & 0x1) {
343 st = FPREG_ADDR(fxsave, (i - tos) & 7);
1da177e4 344
3b095a04 345 switch (st->exponent & 0x7fff) {
1da177e4 346 case 0x7fff:
44210111 347 tag = FP_EXP_TAG_SPECIAL;
1da177e4
LT
348 break;
349 case 0x0000:
3b095a04
CG
350 if (!st->significand[0] &&
351 !st->significand[1] &&
352 !st->significand[2] &&
44210111
RM
353 !st->significand[3])
354 tag = FP_EXP_TAG_ZERO;
355 else
356 tag = FP_EXP_TAG_SPECIAL;
1da177e4
LT
357 break;
358 default:
44210111
RM
359 if (st->significand[3] & 0x8000)
360 tag = FP_EXP_TAG_VALID;
361 else
362 tag = FP_EXP_TAG_SPECIAL;
1da177e4
LT
363 break;
364 }
365 } else {
44210111 366 tag = FP_EXP_TAG_EMPTY;
1da177e4 367 }
44210111 368 ret |= tag << (2 * i);
1da177e4
LT
369 }
370 return ret;
371}
372
373/*
44210111 374 * FXSR floating point environment conversions.
1da177e4
LT
375 */
376
f668964e
IM
377static void
378convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
1da177e4 379{
86603283 380 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
44210111
RM
381 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
382 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
383 int i;
1da177e4 384
44210111
RM
385 env->cwd = fxsave->cwd | 0xffff0000u;
386 env->swd = fxsave->swd | 0xffff0000u;
387 env->twd = twd_fxsr_to_i387(fxsave);
388
389#ifdef CONFIG_X86_64
390 env->fip = fxsave->rip;
391 env->foo = fxsave->rdp;
392 if (tsk == current) {
393 /*
394 * should be actually ds/cs at fpu exception time, but
395 * that information is not available in 64bit mode.
396 */
f668964e
IM
397 asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos));
398 asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs));
1da177e4 399 } else {
44210111 400 struct pt_regs *regs = task_pt_regs(tsk);
f668964e 401
44210111
RM
402 env->fos = 0xffff0000 | tsk->thread.ds;
403 env->fcs = regs->cs;
1da177e4 404 }
44210111
RM
405#else
406 env->fip = fxsave->fip;
609b5297 407 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
44210111
RM
408 env->foo = fxsave->foo;
409 env->fos = fxsave->fos;
410#endif
1da177e4 411
44210111
RM
412 for (i = 0; i < 8; ++i)
413 memcpy(&to[i], &from[i], sizeof(to[0]));
1da177e4
LT
414}
415
44210111
RM
416static void convert_to_fxsr(struct task_struct *tsk,
417 const struct user_i387_ia32_struct *env)
1da177e4 418
1da177e4 419{
86603283 420 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
44210111
RM
421 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
422 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
423 int i;
1da177e4 424
44210111
RM
425 fxsave->cwd = env->cwd;
426 fxsave->swd = env->swd;
427 fxsave->twd = twd_i387_to_fxsr(env->twd);
428 fxsave->fop = (u16) ((u32) env->fcs >> 16);
429#ifdef CONFIG_X86_64
430 fxsave->rip = env->fip;
431 fxsave->rdp = env->foo;
432 /* cs and ds ignored */
433#else
434 fxsave->fip = env->fip;
435 fxsave->fcs = (env->fcs & 0xffff);
436 fxsave->foo = env->foo;
437 fxsave->fos = env->fos;
438#endif
1da177e4 439
44210111
RM
440 for (i = 0; i < 8; ++i)
441 memcpy(&to[i], &from[i], sizeof(from[0]));
1da177e4
LT
442}
443
44210111
RM
444int fpregs_get(struct task_struct *target, const struct user_regset *regset,
445 unsigned int pos, unsigned int count,
446 void *kbuf, void __user *ubuf)
1da177e4 447{
44210111 448 struct user_i387_ia32_struct env;
aa283f49 449 int ret;
1da177e4 450
aa283f49
SS
451 ret = init_fpu(target);
452 if (ret)
453 return ret;
1da177e4 454
e8a496ac
SS
455 if (!HAVE_HWFP)
456 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
457
f668964e 458 if (!cpu_has_fxsr) {
44210111 459 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
86603283 460 &target->thread.fpu.state->fsave, 0,
61c4628b 461 -1);
f668964e 462 }
1da177e4 463
29104e10
SS
464 sanitize_i387_state(target);
465
44210111
RM
466 if (kbuf && pos == 0 && count == sizeof(env)) {
467 convert_from_fxsr(kbuf, target);
468 return 0;
1da177e4 469 }
44210111
RM
470
471 convert_from_fxsr(&env, target);
f668964e 472
44210111 473 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
1da177e4
LT
474}
475
44210111
RM
476int fpregs_set(struct task_struct *target, const struct user_regset *regset,
477 unsigned int pos, unsigned int count,
478 const void *kbuf, const void __user *ubuf)
1da177e4 479{
44210111
RM
480 struct user_i387_ia32_struct env;
481 int ret;
1da177e4 482
aa283f49
SS
483 ret = init_fpu(target);
484 if (ret)
485 return ret;
486
29104e10
SS
487 sanitize_i387_state(target);
488
e8a496ac
SS
489 if (!HAVE_HWFP)
490 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
491
f668964e 492 if (!cpu_has_fxsr) {
44210111 493 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
86603283 494 &target->thread.fpu.state->fsave, 0, -1);
f668964e 495 }
44210111
RM
496
497 if (pos > 0 || count < sizeof(env))
498 convert_from_fxsr(&env, target);
499
500 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
501 if (!ret)
502 convert_to_fxsr(target, &env);
503
42deec6f
SS
504 /*
505 * update the header bit in the xsave header, indicating the
506 * presence of FP.
507 */
508 if (cpu_has_xsave)
86603283 509 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
44210111 510 return ret;
1da177e4
LT
511}
512
513/*
514 * Signal frame handlers.
515 */
516
44210111 517static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
1da177e4
LT
518{
519 struct task_struct *tsk = current;
86603283 520 struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
1da177e4 521
61c4628b
SS
522 fp->status = fp->swd;
523 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
1da177e4
LT
524 return -1;
525 return 1;
526}
527
44210111 528static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
1da177e4
LT
529{
530 struct task_struct *tsk = current;
86603283 531 struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
44210111 532 struct user_i387_ia32_struct env;
1da177e4
LT
533 int err = 0;
534
44210111
RM
535 convert_from_fxsr(&env, tsk);
536 if (__copy_to_user(buf, &env, sizeof(env)))
1da177e4
LT
537 return -1;
538
61c4628b 539 err |= __put_user(fx->swd, &buf->status);
3b095a04
CG
540 err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
541 if (err)
1da177e4
LT
542 return -1;
543
c37b5efe 544 if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size))
1da177e4
LT
545 return -1;
546 return 1;
547}
548
c37b5efe
SS
549static int save_i387_xsave(void __user *buf)
550{
04944b79 551 struct task_struct *tsk = current;
c37b5efe
SS
552 struct _fpstate_ia32 __user *fx = buf;
553 int err = 0;
554
29104e10
SS
555
556 sanitize_i387_state(tsk);
557
04944b79
SS
558 /*
559 * For legacy compatible, we always set FP/SSE bits in the bit
560 * vector while saving the state to the user context.
561 * This will enable us capturing any changes(during sigreturn) to
562 * the FP/SSE bits by the legacy applications which don't touch
563 * xstate_bv in the xsave header.
564 *
565 * xsave aware applications can change the xstate_bv in the xsave
566 * header as well as change any contents in the memory layout.
567 * xrestore as part of sigreturn will capture all the changes.
568 */
86603283 569 tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
04944b79 570
c37b5efe
SS
571 if (save_i387_fxsave(fx) < 0)
572 return -1;
573
574 err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32,
575 sizeof(struct _fpx_sw_bytes));
576 err |= __put_user(FP_XSTATE_MAGIC2,
577 (__u32 __user *) (buf + sig_xstate_ia32_size
578 - FP_XSTATE_MAGIC2_SIZE));
579 if (err)
580 return -1;
581
582 return 1;
583}
584
ab513701 585int save_i387_xstate_ia32(void __user *buf)
1da177e4 586{
ab513701
SS
587 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
588 struct task_struct *tsk = current;
589
3b095a04 590 if (!used_math())
1da177e4 591 return 0;
ab513701
SS
592
593 if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size))
594 return -EACCES;
f668964e
IM
595 /*
596 * This will cause a "finit" to be triggered by the next
1da177e4
LT
597 * attempted FPU operation by the 'current' process.
598 */
599 clear_used_math();
600
f668964e 601 if (!HAVE_HWFP) {
44210111
RM
602 return fpregs_soft_get(current, NULL,
603 0, sizeof(struct user_i387_ia32_struct),
ab513701 604 NULL, fp) ? -1 : 1;
1da177e4 605 }
f668964e 606
ab513701
SS
607 unlazy_fpu(tsk);
608
c37b5efe
SS
609 if (cpu_has_xsave)
610 return save_i387_xsave(fp);
f668964e 611 if (cpu_has_fxsr)
ab513701 612 return save_i387_fxsave(fp);
f668964e 613 else
ab513701 614 return save_i387_fsave(fp);
1da177e4
LT
615}
616
44210111 617static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
1da177e4
LT
618{
619 struct task_struct *tsk = current;
f668964e 620
86603283 621 return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
3b095a04 622 sizeof(struct i387_fsave_struct));
1da177e4
LT
623}
624
c37b5efe
SS
625static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
626 unsigned int size)
1da177e4 627{
1da177e4 628 struct task_struct *tsk = current;
44210111 629 struct user_i387_ia32_struct env;
f668964e
IM
630 int err;
631
86603283 632 err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
c37b5efe 633 size);
1da177e4 634 /* mxcsr reserved bits must be masked to zero for security reasons */
86603283 635 tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
44210111
RM
636 if (err || __copy_from_user(&env, buf, sizeof(env)))
637 return 1;
638 convert_to_fxsr(tsk, &env);
f668964e 639
44210111 640 return 0;
1da177e4
LT
641}
642
c37b5efe
SS
643static int restore_i387_xsave(void __user *buf)
644{
645 struct _fpx_sw_bytes fx_sw_user;
646 struct _fpstate_ia32 __user *fx_user =
647 ((struct _fpstate_ia32 __user *) buf);
648 struct i387_fxsave_struct __user *fx =
649 (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
650 struct xsave_hdr_struct *xsave_hdr =
86603283 651 &current->thread.fpu.state->xsave.xsave_hdr;
6152e4b1 652 u64 mask;
c37b5efe
SS
653 int err;
654
655 if (check_for_xstate(fx, buf, &fx_sw_user))
656 goto fx_only;
657
6152e4b1 658 mask = fx_sw_user.xstate_bv;
c37b5efe
SS
659
660 err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
661
6152e4b1 662 xsave_hdr->xstate_bv &= pcntxt_mask;
c37b5efe
SS
663 /*
664 * These bits must be zero.
665 */
666 xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
667
668 /*
669 * Init the state that is not present in the memory layout
670 * and enabled by the OS.
671 */
6152e4b1
PA
672 mask = ~(pcntxt_mask & ~mask);
673 xsave_hdr->xstate_bv &= mask;
c37b5efe
SS
674
675 return err;
676fx_only:
677 /*
678 * Couldn't find the extended state information in the memory
679 * layout. Restore the FP/SSE and init the other extended state
680 * enabled by the OS.
681 */
682 xsave_hdr->xstate_bv = XSTATE_FPSSE;
683 return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct));
684}
685
ab513701 686int restore_i387_xstate_ia32(void __user *buf)
1da177e4
LT
687{
688 int err;
e8a496ac 689 struct task_struct *tsk = current;
ab513701 690 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
1da177e4 691
e8a496ac 692 if (HAVE_HWFP)
fd3c3ed5
SS
693 clear_fpu(tsk);
694
ab513701
SS
695 if (!buf) {
696 if (used_math()) {
697 clear_fpu(tsk);
698 clear_used_math();
699 }
700
701 return 0;
702 } else
703 if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size))
704 return -EACCES;
705
e8a496ac
SS
706 if (!used_math()) {
707 err = init_fpu(tsk);
708 if (err)
709 return err;
710 }
fd3c3ed5 711
e8a496ac 712 if (HAVE_HWFP) {
c37b5efe
SS
713 if (cpu_has_xsave)
714 err = restore_i387_xsave(buf);
715 else if (cpu_has_fxsr)
716 err = restore_i387_fxsave(fp, sizeof(struct
717 i387_fxsave_struct));
f668964e 718 else
ab513701 719 err = restore_i387_fsave(fp);
1da177e4 720 } else {
44210111
RM
721 err = fpregs_soft_set(current, NULL,
722 0, sizeof(struct user_i387_ia32_struct),
ab513701 723 NULL, fp) != 0;
1da177e4
LT
724 }
725 set_used_math();
f668964e 726
1da177e4
LT
727 return err;
728}
729
1da177e4
LT
730/*
731 * FPU state for core dumps.
60b3b9af
RM
732 * This is only used for a.out dumps now.
733 * It is declared generically using elf_fpregset_t (which is
734 * struct user_i387_struct) but is in fact only used for 32-bit
735 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
1da177e4 736 */
3b095a04 737int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
1da177e4 738{
1da177e4 739 struct task_struct *tsk = current;
f668964e 740 int fpvalid;
1da177e4
LT
741
742 fpvalid = !!used_math();
60b3b9af
RM
743 if (fpvalid)
744 fpvalid = !fpregs_get(tsk, NULL,
745 0, sizeof(struct user_i387_ia32_struct),
746 fpu, NULL);
1da177e4
LT
747
748 return fpvalid;
749}
129f6946 750EXPORT_SYMBOL(dump_fpu);
1da177e4 751
60b3b9af 752#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
This page took 0.665653 seconds and 5 git commands to generate.