Merge branch 'x86/urgent' into x86/asm to pick up dependent fixes
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * X86-64 port
8 * Andi Kleen.
9 *
10 * CPU hotplug support - ashok.raj@intel.com
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/ftrace.h>
38
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
44 #include <asm/desc.h>
45 #include <asm/proto.h>
46 #include <asm/ia32.h>
47 #include <asm/idle.h>
48 #include <asm/syscalls.h>
49 #include <asm/debugreg.h>
50 #include <asm/switch_to.h>
51 #include <asm/xen/hypervisor.h>
52
53 asmlinkage extern void ret_from_fork(void);
54
55 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
56
57 /* Prints also some state that isn't saved in the pt_regs */
58 void __show_regs(struct pt_regs *regs, int all)
59 {
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
61 unsigned long d0, d1, d2, d3, d6, d7;
62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es;
64
65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
66 printk_address(regs->ip);
67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
68 regs->sp, regs->flags);
69 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
70 regs->ax, regs->bx, regs->cx);
71 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
72 regs->dx, regs->si, regs->di);
73 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
74 regs->bp, regs->r8, regs->r9);
75 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
76 regs->r10, regs->r11, regs->r12);
77 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
78 regs->r13, regs->r14, regs->r15);
79
80 asm("movl %%ds,%0" : "=r" (ds));
81 asm("movl %%cs,%0" : "=r" (cs));
82 asm("movl %%es,%0" : "=r" (es));
83 asm("movl %%fs,%0" : "=r" (fsindex));
84 asm("movl %%gs,%0" : "=r" (gsindex));
85
86 rdmsrl(MSR_FS_BASE, fs);
87 rdmsrl(MSR_GS_BASE, gs);
88 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
89
90 if (!all)
91 return;
92
93 cr0 = read_cr0();
94 cr2 = read_cr2();
95 cr3 = read_cr3();
96 cr4 = __read_cr4();
97
98 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
99 fs, fsindex, gs, gsindex, shadowgs);
100 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
101 es, cr0);
102 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
103 cr4);
104
105 get_debugreg(d0, 0);
106 get_debugreg(d1, 1);
107 get_debugreg(d2, 2);
108 get_debugreg(d3, 3);
109 get_debugreg(d6, 6);
110 get_debugreg(d7, 7);
111
112 /* Only print out debug registers if they are in their non-default state. */
113 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
114 (d6 == DR6_RESERVED) && (d7 == 0x400))
115 return;
116
117 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
118 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
119
120 if (boot_cpu_has(X86_FEATURE_OSPKE))
121 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
122 }
123
124 void release_thread(struct task_struct *dead_task)
125 {
126 if (dead_task->mm) {
127 #ifdef CONFIG_MODIFY_LDT_SYSCALL
128 if (dead_task->mm->context.ldt) {
129 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
130 dead_task->comm,
131 dead_task->mm->context.ldt->entries,
132 dead_task->mm->context.ldt->size);
133 BUG();
134 }
135 #endif
136 }
137 }
138
139 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
140 {
141 struct user_desc ud = {
142 .base_addr = addr,
143 .limit = 0xfffff,
144 .seg_32bit = 1,
145 .limit_in_pages = 1,
146 .useable = 1,
147 };
148 struct desc_struct *desc = t->thread.tls_array;
149 desc += tls;
150 fill_ldt(desc, &ud);
151 }
152
153 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
154 {
155 return get_desc_base(&t->thread.tls_array[tls]);
156 }
157
158 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
159 unsigned long arg, struct task_struct *p, unsigned long tls)
160 {
161 int err;
162 struct pt_regs *childregs;
163 struct task_struct *me = current;
164
165 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
166 childregs = task_pt_regs(p);
167 p->thread.sp = (unsigned long) childregs;
168 set_tsk_thread_flag(p, TIF_FORK);
169 p->thread.io_bitmap_ptr = NULL;
170
171 savesegment(gs, p->thread.gsindex);
172 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
173 savesegment(fs, p->thread.fsindex);
174 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
175 savesegment(es, p->thread.es);
176 savesegment(ds, p->thread.ds);
177 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
178
179 if (unlikely(p->flags & PF_KTHREAD)) {
180 /* kernel thread */
181 memset(childregs, 0, sizeof(struct pt_regs));
182 childregs->sp = (unsigned long)childregs;
183 childregs->ss = __KERNEL_DS;
184 childregs->bx = sp; /* function */
185 childregs->bp = arg;
186 childregs->orig_ax = -1;
187 childregs->cs = __KERNEL_CS | get_kernel_rpl();
188 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
189 return 0;
190 }
191 *childregs = *current_pt_regs();
192
193 childregs->ax = 0;
194 if (sp)
195 childregs->sp = sp;
196
197 err = -ENOMEM;
198 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
199 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
200 IO_BITMAP_BYTES, GFP_KERNEL);
201 if (!p->thread.io_bitmap_ptr) {
202 p->thread.io_bitmap_max = 0;
203 return -ENOMEM;
204 }
205 set_tsk_thread_flag(p, TIF_IO_BITMAP);
206 }
207
208 /*
209 * Set a new TLS for the child thread?
210 */
211 if (clone_flags & CLONE_SETTLS) {
212 #ifdef CONFIG_IA32_EMULATION
213 if (is_ia32_task())
214 err = do_set_thread_area(p, -1,
215 (struct user_desc __user *)tls, 0);
216 else
217 #endif
218 err = do_arch_prctl(p, ARCH_SET_FS, tls);
219 if (err)
220 goto out;
221 }
222 err = 0;
223 out:
224 if (err && p->thread.io_bitmap_ptr) {
225 kfree(p->thread.io_bitmap_ptr);
226 p->thread.io_bitmap_max = 0;
227 }
228
229 return err;
230 }
231
232 static void
233 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
234 unsigned long new_sp,
235 unsigned int _cs, unsigned int _ss, unsigned int _ds)
236 {
237 loadsegment(fs, 0);
238 loadsegment(es, _ds);
239 loadsegment(ds, _ds);
240 load_gs_index(0);
241 regs->ip = new_ip;
242 regs->sp = new_sp;
243 regs->cs = _cs;
244 regs->ss = _ss;
245 regs->flags = X86_EFLAGS_IF;
246 force_iret();
247 }
248
249 void
250 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
251 {
252 start_thread_common(regs, new_ip, new_sp,
253 __USER_CS, __USER_DS, 0);
254 }
255
256 #ifdef CONFIG_COMPAT
257 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
258 {
259 start_thread_common(regs, new_ip, new_sp,
260 test_thread_flag(TIF_X32)
261 ? __USER_CS : __USER32_CS,
262 __USER_DS, __USER_DS);
263 }
264 #endif
265
266 /*
267 * switch_to(x,y) should switch tasks from x to y.
268 *
269 * This could still be optimized:
270 * - fold all the options into a flag word and test it with a single test.
271 * - could test fs/gs bitsliced
272 *
273 * Kprobes not supported here. Set the probe on schedule instead.
274 * Function graph tracer not supported too.
275 */
276 __visible __notrace_funcgraph struct task_struct *
277 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
278 {
279 struct thread_struct *prev = &prev_p->thread;
280 struct thread_struct *next = &next_p->thread;
281 struct fpu *prev_fpu = &prev->fpu;
282 struct fpu *next_fpu = &next->fpu;
283 int cpu = smp_processor_id();
284 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
285 unsigned prev_fsindex, prev_gsindex;
286 fpu_switch_t fpu_switch;
287
288 fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
289
290 /* We must save %fs and %gs before load_TLS() because
291 * %fs and %gs may be cleared by load_TLS().
292 *
293 * (e.g. xen_load_tls())
294 */
295 savesegment(fs, prev_fsindex);
296 savesegment(gs, prev_gsindex);
297
298 /*
299 * Load TLS before restoring any segments so that segment loads
300 * reference the correct GDT entries.
301 */
302 load_TLS(next, cpu);
303
304 /*
305 * Leave lazy mode, flushing any hypercalls made here. This
306 * must be done after loading TLS entries in the GDT but before
307 * loading segments that might reference them, and and it must
308 * be done before fpu__restore(), so the TS bit is up to
309 * date.
310 */
311 arch_end_context_switch(next_p);
312
313 /* Switch DS and ES.
314 *
315 * Reading them only returns the selectors, but writing them (if
316 * nonzero) loads the full descriptor from the GDT or LDT. The
317 * LDT for next is loaded in switch_mm, and the GDT is loaded
318 * above.
319 *
320 * We therefore need to write new values to the segment
321 * registers on every context switch unless both the new and old
322 * values are zero.
323 *
324 * Note that we don't need to do anything for CS and SS, as
325 * those are saved and restored as part of pt_regs.
326 */
327 savesegment(es, prev->es);
328 if (unlikely(next->es | prev->es))
329 loadsegment(es, next->es);
330
331 savesegment(ds, prev->ds);
332 if (unlikely(next->ds | prev->ds))
333 loadsegment(ds, next->ds);
334
335 /*
336 * Switch FS and GS.
337 *
338 * These are even more complicated than DS and ES: they have
339 * 64-bit bases are that controlled by arch_prctl. The bases
340 * don't necessarily match the selectors, as user code can do
341 * any number of things to cause them to be inconsistent.
342 *
343 * We don't promise to preserve the bases if the selectors are
344 * nonzero. We also don't promise to preserve the base if the
345 * selector is zero and the base doesn't match whatever was
346 * most recently passed to ARCH_SET_FS/GS. (If/when the
347 * FSGSBASE instructions are enabled, we'll need to offer
348 * stronger guarantees.)
349 *
350 * As an invariant,
351 * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) is
352 * impossible.
353 */
354 if (next->fsindex) {
355 /* Loading a nonzero value into FS sets the index and base. */
356 loadsegment(fs, next->fsindex);
357 } else {
358 if (next->fs) {
359 /* Next index is zero but next base is nonzero. */
360 if (prev_fsindex)
361 loadsegment(fs, 0);
362 wrmsrl(MSR_FS_BASE, next->fs);
363 } else {
364 /* Next base and index are both zero. */
365 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
366 /*
367 * We don't know the previous base and can't
368 * find out without RDMSR. Forcibly clear it.
369 */
370 loadsegment(fs, __USER_DS);
371 loadsegment(fs, 0);
372 } else {
373 /*
374 * If the previous index is zero and ARCH_SET_FS
375 * didn't change the base, then the base is
376 * also zero and we don't need to do anything.
377 */
378 if (prev->fs || prev_fsindex)
379 loadsegment(fs, 0);
380 }
381 }
382 }
383 /*
384 * Save the old state and preserve the invariant.
385 * NB: if prev_fsindex == 0, then we can't reliably learn the base
386 * without RDMSR because Intel user code can zero it without telling
387 * us and AMD user code can program any 32-bit value without telling
388 * us.
389 */
390 if (prev_fsindex)
391 prev->fs = 0;
392 prev->fsindex = prev_fsindex;
393
394 if (next->gsindex) {
395 /* Loading a nonzero value into GS sets the index and base. */
396 load_gs_index(next->gsindex);
397 } else {
398 if (next->gs) {
399 /* Next index is zero but next base is nonzero. */
400 if (prev_gsindex)
401 load_gs_index(0);
402 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
403 } else {
404 /* Next base and index are both zero. */
405 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
406 /*
407 * We don't know the previous base and can't
408 * find out without RDMSR. Forcibly clear it.
409 *
410 * This contains a pointless SWAPGS pair.
411 * Fixing it would involve an explicit check
412 * for Xen or a new pvop.
413 */
414 load_gs_index(__USER_DS);
415 load_gs_index(0);
416 } else {
417 /*
418 * If the previous index is zero and ARCH_SET_GS
419 * didn't change the base, then the base is
420 * also zero and we don't need to do anything.
421 */
422 if (prev->gs || prev_gsindex)
423 load_gs_index(0);
424 }
425 }
426 }
427 /*
428 * Save the old state and preserve the invariant.
429 * NB: if prev_gsindex == 0, then we can't reliably learn the base
430 * without RDMSR because Intel user code can zero it without telling
431 * us and AMD user code can program any 32-bit value without telling
432 * us.
433 */
434 if (prev_gsindex)
435 prev->gs = 0;
436 prev->gsindex = prev_gsindex;
437
438 switch_fpu_finish(next_fpu, fpu_switch);
439
440 /*
441 * Switch the PDA and FPU contexts.
442 */
443 this_cpu_write(current_task, next_p);
444
445 /* Reload esp0 and ss1. This changes current_thread_info(). */
446 load_sp0(tss, next);
447
448 /*
449 * Now maybe reload the debug registers and handle I/O bitmaps
450 */
451 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
452 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
453 __switch_to_xtra(prev_p, next_p, tss);
454
455 #ifdef CONFIG_XEN
456 /*
457 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
458 * current_pt_regs()->flags may not match the current task's
459 * intended IOPL. We need to switch it manually.
460 */
461 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
462 prev->iopl != next->iopl))
463 xen_set_iopl_mask(next->iopl);
464 #endif
465
466 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
467 /*
468 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
469 * does not update the cached descriptor. As a result, if we
470 * do SYSRET while SS is NULL, we'll end up in user mode with
471 * SS apparently equal to __USER_DS but actually unusable.
472 *
473 * The straightforward workaround would be to fix it up just
474 * before SYSRET, but that would slow down the system call
475 * fast paths. Instead, we ensure that SS is never NULL in
476 * system call context. We do this by replacing NULL SS
477 * selectors at every context switch. SYSCALL sets up a valid
478 * SS, so the only way to get NULL is to re-enter the kernel
479 * from CPL 3 through an interrupt. Since that can't happen
480 * in the same task as a running syscall, we are guaranteed to
481 * context switch between every interrupt vector entry and a
482 * subsequent SYSRET.
483 *
484 * We read SS first because SS reads are much faster than
485 * writes. Out of caution, we force SS to __KERNEL_DS even if
486 * it previously had a different non-NULL value.
487 */
488 unsigned short ss_sel;
489 savesegment(ss, ss_sel);
490 if (ss_sel != __KERNEL_DS)
491 loadsegment(ss, __KERNEL_DS);
492 }
493
494 return prev_p;
495 }
496
497 void set_personality_64bit(void)
498 {
499 /* inherit personality from parent */
500
501 /* Make sure to be in 64bit mode */
502 clear_thread_flag(TIF_IA32);
503 clear_thread_flag(TIF_ADDR32);
504 clear_thread_flag(TIF_X32);
505
506 /* Ensure the corresponding mm is not marked. */
507 if (current->mm)
508 current->mm->context.ia32_compat = 0;
509
510 /* TBD: overwrites user setup. Should have two bits.
511 But 64bit processes have always behaved this way,
512 so it's not too bad. The main problem is just that
513 32bit childs are affected again. */
514 current->personality &= ~READ_IMPLIES_EXEC;
515 }
516
517 void set_personality_ia32(bool x32)
518 {
519 /* inherit personality from parent */
520
521 /* Make sure to be in 32bit mode */
522 set_thread_flag(TIF_ADDR32);
523
524 /* Mark the associated mm as containing 32-bit tasks. */
525 if (x32) {
526 clear_thread_flag(TIF_IA32);
527 set_thread_flag(TIF_X32);
528 if (current->mm)
529 current->mm->context.ia32_compat = TIF_X32;
530 current->personality &= ~READ_IMPLIES_EXEC;
531 /* in_compat_syscall() uses the presence of the x32
532 syscall bit flag to determine compat status */
533 current_thread_info()->status &= ~TS_COMPAT;
534 } else {
535 set_thread_flag(TIF_IA32);
536 clear_thread_flag(TIF_X32);
537 if (current->mm)
538 current->mm->context.ia32_compat = TIF_IA32;
539 current->personality |= force_personality32;
540 /* Prepare the first "return" to user space */
541 current_thread_info()->status |= TS_COMPAT;
542 }
543 }
544 EXPORT_SYMBOL_GPL(set_personality_ia32);
545
546 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
547 {
548 int ret = 0;
549 int doit = task == current;
550 int cpu;
551
552 switch (code) {
553 case ARCH_SET_GS:
554 if (addr >= TASK_SIZE_OF(task))
555 return -EPERM;
556 cpu = get_cpu();
557 /* handle small bases via the GDT because that's faster to
558 switch. */
559 if (addr <= 0xffffffff) {
560 set_32bit_tls(task, GS_TLS, addr);
561 if (doit) {
562 load_TLS(&task->thread, cpu);
563 load_gs_index(GS_TLS_SEL);
564 }
565 task->thread.gsindex = GS_TLS_SEL;
566 task->thread.gs = 0;
567 } else {
568 task->thread.gsindex = 0;
569 task->thread.gs = addr;
570 if (doit) {
571 load_gs_index(0);
572 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
573 }
574 }
575 put_cpu();
576 break;
577 case ARCH_SET_FS:
578 /* Not strictly needed for fs, but do it for symmetry
579 with gs */
580 if (addr >= TASK_SIZE_OF(task))
581 return -EPERM;
582 cpu = get_cpu();
583 /* handle small bases via the GDT because that's faster to
584 switch. */
585 if (addr <= 0xffffffff) {
586 set_32bit_tls(task, FS_TLS, addr);
587 if (doit) {
588 load_TLS(&task->thread, cpu);
589 loadsegment(fs, FS_TLS_SEL);
590 }
591 task->thread.fsindex = FS_TLS_SEL;
592 task->thread.fs = 0;
593 } else {
594 task->thread.fsindex = 0;
595 task->thread.fs = addr;
596 if (doit) {
597 /* set the selector to 0 to not confuse
598 __switch_to */
599 loadsegment(fs, 0);
600 ret = wrmsrl_safe(MSR_FS_BASE, addr);
601 }
602 }
603 put_cpu();
604 break;
605 case ARCH_GET_FS: {
606 unsigned long base;
607 if (doit)
608 rdmsrl(MSR_FS_BASE, base);
609 else if (task->thread.fsindex == FS_TLS_SEL)
610 base = read_32bit_tls(task, FS_TLS);
611 else
612 base = task->thread.fs;
613 ret = put_user(base, (unsigned long __user *)addr);
614 break;
615 }
616 case ARCH_GET_GS: {
617 unsigned long base;
618 if (doit)
619 rdmsrl(MSR_KERNEL_GS_BASE, base);
620 else if (task->thread.gsindex == GS_TLS_SEL)
621 base = read_32bit_tls(task, GS_TLS);
622 else
623 base = task->thread.gs;
624 ret = put_user(base, (unsigned long __user *)addr);
625 break;
626 }
627
628 default:
629 ret = -EINVAL;
630 break;
631 }
632
633 return ret;
634 }
635
636 long sys_arch_prctl(int code, unsigned long addr)
637 {
638 return do_arch_prctl(current, code, addr);
639 }
640
641 unsigned long KSTK_ESP(struct task_struct *task)
642 {
643 return task_pt_regs(task)->sp;
644 }
This page took 0.068231 seconds and 6 git commands to generate.