x86/mm/pkeys: Do not skip PKRU register if debug registers are not used
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * X86-64 port
8 * Andi Kleen.
9 *
10 * CPU hotplug support - ashok.raj@intel.com
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
36 #include <linux/io.h>
37 #include <linux/ftrace.h>
38
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
44 #include <asm/desc.h>
45 #include <asm/proto.h>
46 #include <asm/ia32.h>
47 #include <asm/idle.h>
48 #include <asm/syscalls.h>
49 #include <asm/debugreg.h>
50 #include <asm/switch_to.h>
51 #include <asm/xen/hypervisor.h>
52
53 asmlinkage extern void ret_from_fork(void);
54
55 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
56
57 /* Prints also some state that isn't saved in the pt_regs */
58 void __show_regs(struct pt_regs *regs, int all)
59 {
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
61 unsigned long d0, d1, d2, d3, d6, d7;
62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es;
64
65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
66 printk_address(regs->ip);
67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
68 regs->sp, regs->flags);
69 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
70 regs->ax, regs->bx, regs->cx);
71 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
72 regs->dx, regs->si, regs->di);
73 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
74 regs->bp, regs->r8, regs->r9);
75 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
76 regs->r10, regs->r11, regs->r12);
77 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
78 regs->r13, regs->r14, regs->r15);
79
80 asm("movl %%ds,%0" : "=r" (ds));
81 asm("movl %%cs,%0" : "=r" (cs));
82 asm("movl %%es,%0" : "=r" (es));
83 asm("movl %%fs,%0" : "=r" (fsindex));
84 asm("movl %%gs,%0" : "=r" (gsindex));
85
86 rdmsrl(MSR_FS_BASE, fs);
87 rdmsrl(MSR_GS_BASE, gs);
88 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
89
90 if (!all)
91 return;
92
93 cr0 = read_cr0();
94 cr2 = read_cr2();
95 cr3 = read_cr3();
96 cr4 = __read_cr4();
97
98 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
99 fs, fsindex, gs, gsindex, shadowgs);
100 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
101 es, cr0);
102 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
103 cr4);
104
105 get_debugreg(d0, 0);
106 get_debugreg(d1, 1);
107 get_debugreg(d2, 2);
108 get_debugreg(d3, 3);
109 get_debugreg(d6, 6);
110 get_debugreg(d7, 7);
111
112 /* Only print out debug registers if they are in their non-default state. */
113 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
114 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
115 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
116 d0, d1, d2);
117 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
118 d3, d6, d7);
119 }
120
121 if (boot_cpu_has(X86_FEATURE_OSPKE))
122 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
123 }
124
125 void release_thread(struct task_struct *dead_task)
126 {
127 if (dead_task->mm) {
128 #ifdef CONFIG_MODIFY_LDT_SYSCALL
129 if (dead_task->mm->context.ldt) {
130 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
131 dead_task->comm,
132 dead_task->mm->context.ldt->entries,
133 dead_task->mm->context.ldt->size);
134 BUG();
135 }
136 #endif
137 }
138 }
139
140 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
141 unsigned long arg, struct task_struct *p, unsigned long tls)
142 {
143 int err;
144 struct pt_regs *childregs;
145 struct task_struct *me = current;
146
147 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
148 childregs = task_pt_regs(p);
149 p->thread.sp = (unsigned long) childregs;
150 set_tsk_thread_flag(p, TIF_FORK);
151 p->thread.io_bitmap_ptr = NULL;
152
153 savesegment(gs, p->thread.gsindex);
154 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
155 savesegment(fs, p->thread.fsindex);
156 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
157 savesegment(es, p->thread.es);
158 savesegment(ds, p->thread.ds);
159 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
160
161 if (unlikely(p->flags & PF_KTHREAD)) {
162 /* kernel thread */
163 memset(childregs, 0, sizeof(struct pt_regs));
164 childregs->sp = (unsigned long)childregs;
165 childregs->ss = __KERNEL_DS;
166 childregs->bx = sp; /* function */
167 childregs->bp = arg;
168 childregs->orig_ax = -1;
169 childregs->cs = __KERNEL_CS | get_kernel_rpl();
170 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
171 return 0;
172 }
173 *childregs = *current_pt_regs();
174
175 childregs->ax = 0;
176 if (sp)
177 childregs->sp = sp;
178
179 err = -ENOMEM;
180 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
181 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
182 IO_BITMAP_BYTES, GFP_KERNEL);
183 if (!p->thread.io_bitmap_ptr) {
184 p->thread.io_bitmap_max = 0;
185 return -ENOMEM;
186 }
187 set_tsk_thread_flag(p, TIF_IO_BITMAP);
188 }
189
190 /*
191 * Set a new TLS for the child thread?
192 */
193 if (clone_flags & CLONE_SETTLS) {
194 #ifdef CONFIG_IA32_EMULATION
195 if (in_ia32_syscall())
196 err = do_set_thread_area(p, -1,
197 (struct user_desc __user *)tls, 0);
198 else
199 #endif
200 err = do_arch_prctl(p, ARCH_SET_FS, tls);
201 if (err)
202 goto out;
203 }
204 err = 0;
205 out:
206 if (err && p->thread.io_bitmap_ptr) {
207 kfree(p->thread.io_bitmap_ptr);
208 p->thread.io_bitmap_max = 0;
209 }
210
211 return err;
212 }
213
214 static void
215 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
216 unsigned long new_sp,
217 unsigned int _cs, unsigned int _ss, unsigned int _ds)
218 {
219 loadsegment(fs, 0);
220 loadsegment(es, _ds);
221 loadsegment(ds, _ds);
222 load_gs_index(0);
223 regs->ip = new_ip;
224 regs->sp = new_sp;
225 regs->cs = _cs;
226 regs->ss = _ss;
227 regs->flags = X86_EFLAGS_IF;
228 force_iret();
229 }
230
231 void
232 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
233 {
234 start_thread_common(regs, new_ip, new_sp,
235 __USER_CS, __USER_DS, 0);
236 }
237
238 #ifdef CONFIG_COMPAT
239 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
240 {
241 start_thread_common(regs, new_ip, new_sp,
242 test_thread_flag(TIF_X32)
243 ? __USER_CS : __USER32_CS,
244 __USER_DS, __USER_DS);
245 }
246 #endif
247
248 /*
249 * switch_to(x,y) should switch tasks from x to y.
250 *
251 * This could still be optimized:
252 * - fold all the options into a flag word and test it with a single test.
253 * - could test fs/gs bitsliced
254 *
255 * Kprobes not supported here. Set the probe on schedule instead.
256 * Function graph tracer not supported too.
257 */
258 __visible __notrace_funcgraph struct task_struct *
259 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
260 {
261 struct thread_struct *prev = &prev_p->thread;
262 struct thread_struct *next = &next_p->thread;
263 struct fpu *prev_fpu = &prev->fpu;
264 struct fpu *next_fpu = &next->fpu;
265 int cpu = smp_processor_id();
266 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
267 unsigned prev_fsindex, prev_gsindex;
268 fpu_switch_t fpu_switch;
269
270 fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
271
272 /* We must save %fs and %gs before load_TLS() because
273 * %fs and %gs may be cleared by load_TLS().
274 *
275 * (e.g. xen_load_tls())
276 */
277 savesegment(fs, prev_fsindex);
278 savesegment(gs, prev_gsindex);
279
280 /*
281 * Load TLS before restoring any segments so that segment loads
282 * reference the correct GDT entries.
283 */
284 load_TLS(next, cpu);
285
286 /*
287 * Leave lazy mode, flushing any hypercalls made here. This
288 * must be done after loading TLS entries in the GDT but before
289 * loading segments that might reference them, and and it must
290 * be done before fpu__restore(), so the TS bit is up to
291 * date.
292 */
293 arch_end_context_switch(next_p);
294
295 /* Switch DS and ES.
296 *
297 * Reading them only returns the selectors, but writing them (if
298 * nonzero) loads the full descriptor from the GDT or LDT. The
299 * LDT for next is loaded in switch_mm, and the GDT is loaded
300 * above.
301 *
302 * We therefore need to write new values to the segment
303 * registers on every context switch unless both the new and old
304 * values are zero.
305 *
306 * Note that we don't need to do anything for CS and SS, as
307 * those are saved and restored as part of pt_regs.
308 */
309 savesegment(es, prev->es);
310 if (unlikely(next->es | prev->es))
311 loadsegment(es, next->es);
312
313 savesegment(ds, prev->ds);
314 if (unlikely(next->ds | prev->ds))
315 loadsegment(ds, next->ds);
316
317 /*
318 * Switch FS and GS.
319 *
320 * These are even more complicated than DS and ES: they have
321 * 64-bit bases are that controlled by arch_prctl. The bases
322 * don't necessarily match the selectors, as user code can do
323 * any number of things to cause them to be inconsistent.
324 *
325 * We don't promise to preserve the bases if the selectors are
326 * nonzero. We also don't promise to preserve the base if the
327 * selector is zero and the base doesn't match whatever was
328 * most recently passed to ARCH_SET_FS/GS. (If/when the
329 * FSGSBASE instructions are enabled, we'll need to offer
330 * stronger guarantees.)
331 *
332 * As an invariant,
333 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
334 * impossible.
335 */
336 if (next->fsindex) {
337 /* Loading a nonzero value into FS sets the index and base. */
338 loadsegment(fs, next->fsindex);
339 } else {
340 if (next->fsbase) {
341 /* Next index is zero but next base is nonzero. */
342 if (prev_fsindex)
343 loadsegment(fs, 0);
344 wrmsrl(MSR_FS_BASE, next->fsbase);
345 } else {
346 /* Next base and index are both zero. */
347 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
348 /*
349 * We don't know the previous base and can't
350 * find out without RDMSR. Forcibly clear it.
351 */
352 loadsegment(fs, __USER_DS);
353 loadsegment(fs, 0);
354 } else {
355 /*
356 * If the previous index is zero and ARCH_SET_FS
357 * didn't change the base, then the base is
358 * also zero and we don't need to do anything.
359 */
360 if (prev->fsbase || prev_fsindex)
361 loadsegment(fs, 0);
362 }
363 }
364 }
365 /*
366 * Save the old state and preserve the invariant.
367 * NB: if prev_fsindex == 0, then we can't reliably learn the base
368 * without RDMSR because Intel user code can zero it without telling
369 * us and AMD user code can program any 32-bit value without telling
370 * us.
371 */
372 if (prev_fsindex)
373 prev->fsbase = 0;
374 prev->fsindex = prev_fsindex;
375
376 if (next->gsindex) {
377 /* Loading a nonzero value into GS sets the index and base. */
378 load_gs_index(next->gsindex);
379 } else {
380 if (next->gsbase) {
381 /* Next index is zero but next base is nonzero. */
382 if (prev_gsindex)
383 load_gs_index(0);
384 wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
385 } else {
386 /* Next base and index are both zero. */
387 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
388 /*
389 * We don't know the previous base and can't
390 * find out without RDMSR. Forcibly clear it.
391 *
392 * This contains a pointless SWAPGS pair.
393 * Fixing it would involve an explicit check
394 * for Xen or a new pvop.
395 */
396 load_gs_index(__USER_DS);
397 load_gs_index(0);
398 } else {
399 /*
400 * If the previous index is zero and ARCH_SET_GS
401 * didn't change the base, then the base is
402 * also zero and we don't need to do anything.
403 */
404 if (prev->gsbase || prev_gsindex)
405 load_gs_index(0);
406 }
407 }
408 }
409 /*
410 * Save the old state and preserve the invariant.
411 * NB: if prev_gsindex == 0, then we can't reliably learn the base
412 * without RDMSR because Intel user code can zero it without telling
413 * us and AMD user code can program any 32-bit value without telling
414 * us.
415 */
416 if (prev_gsindex)
417 prev->gsbase = 0;
418 prev->gsindex = prev_gsindex;
419
420 switch_fpu_finish(next_fpu, fpu_switch);
421
422 /*
423 * Switch the PDA and FPU contexts.
424 */
425 this_cpu_write(current_task, next_p);
426
427 /* Reload esp0 and ss1. This changes current_thread_info(). */
428 load_sp0(tss, next);
429
430 /*
431 * Now maybe reload the debug registers and handle I/O bitmaps
432 */
433 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
434 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
435 __switch_to_xtra(prev_p, next_p, tss);
436
437 #ifdef CONFIG_XEN
438 /*
439 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
440 * current_pt_regs()->flags may not match the current task's
441 * intended IOPL. We need to switch it manually.
442 */
443 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
444 prev->iopl != next->iopl))
445 xen_set_iopl_mask(next->iopl);
446 #endif
447
448 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
449 /*
450 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
451 * does not update the cached descriptor. As a result, if we
452 * do SYSRET while SS is NULL, we'll end up in user mode with
453 * SS apparently equal to __USER_DS but actually unusable.
454 *
455 * The straightforward workaround would be to fix it up just
456 * before SYSRET, but that would slow down the system call
457 * fast paths. Instead, we ensure that SS is never NULL in
458 * system call context. We do this by replacing NULL SS
459 * selectors at every context switch. SYSCALL sets up a valid
460 * SS, so the only way to get NULL is to re-enter the kernel
461 * from CPL 3 through an interrupt. Since that can't happen
462 * in the same task as a running syscall, we are guaranteed to
463 * context switch between every interrupt vector entry and a
464 * subsequent SYSRET.
465 *
466 * We read SS first because SS reads are much faster than
467 * writes. Out of caution, we force SS to __KERNEL_DS even if
468 * it previously had a different non-NULL value.
469 */
470 unsigned short ss_sel;
471 savesegment(ss, ss_sel);
472 if (ss_sel != __KERNEL_DS)
473 loadsegment(ss, __KERNEL_DS);
474 }
475
476 return prev_p;
477 }
478
479 void set_personality_64bit(void)
480 {
481 /* inherit personality from parent */
482
483 /* Make sure to be in 64bit mode */
484 clear_thread_flag(TIF_IA32);
485 clear_thread_flag(TIF_ADDR32);
486 clear_thread_flag(TIF_X32);
487
488 /* Ensure the corresponding mm is not marked. */
489 if (current->mm)
490 current->mm->context.ia32_compat = 0;
491
492 /* TBD: overwrites user setup. Should have two bits.
493 But 64bit processes have always behaved this way,
494 so it's not too bad. The main problem is just that
495 32bit childs are affected again. */
496 current->personality &= ~READ_IMPLIES_EXEC;
497 }
498
499 void set_personality_ia32(bool x32)
500 {
501 /* inherit personality from parent */
502
503 /* Make sure to be in 32bit mode */
504 set_thread_flag(TIF_ADDR32);
505
506 /* Mark the associated mm as containing 32-bit tasks. */
507 if (x32) {
508 clear_thread_flag(TIF_IA32);
509 set_thread_flag(TIF_X32);
510 if (current->mm)
511 current->mm->context.ia32_compat = TIF_X32;
512 current->personality &= ~READ_IMPLIES_EXEC;
513 /* in_compat_syscall() uses the presence of the x32
514 syscall bit flag to determine compat status */
515 current_thread_info()->status &= ~TS_COMPAT;
516 } else {
517 set_thread_flag(TIF_IA32);
518 clear_thread_flag(TIF_X32);
519 if (current->mm)
520 current->mm->context.ia32_compat = TIF_IA32;
521 current->personality |= force_personality32;
522 /* Prepare the first "return" to user space */
523 current_thread_info()->status |= TS_COMPAT;
524 }
525 }
526 EXPORT_SYMBOL_GPL(set_personality_ia32);
527
528 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
529 {
530 int ret = 0;
531 int doit = task == current;
532 int cpu;
533
534 switch (code) {
535 case ARCH_SET_GS:
536 if (addr >= TASK_SIZE_MAX)
537 return -EPERM;
538 cpu = get_cpu();
539 task->thread.gsindex = 0;
540 task->thread.gsbase = addr;
541 if (doit) {
542 load_gs_index(0);
543 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
544 }
545 put_cpu();
546 break;
547 case ARCH_SET_FS:
548 /* Not strictly needed for fs, but do it for symmetry
549 with gs */
550 if (addr >= TASK_SIZE_MAX)
551 return -EPERM;
552 cpu = get_cpu();
553 task->thread.fsindex = 0;
554 task->thread.fsbase = addr;
555 if (doit) {
556 /* set the selector to 0 to not confuse __switch_to */
557 loadsegment(fs, 0);
558 ret = wrmsrl_safe(MSR_FS_BASE, addr);
559 }
560 put_cpu();
561 break;
562 case ARCH_GET_FS: {
563 unsigned long base;
564 if (doit)
565 rdmsrl(MSR_FS_BASE, base);
566 else
567 base = task->thread.fsbase;
568 ret = put_user(base, (unsigned long __user *)addr);
569 break;
570 }
571 case ARCH_GET_GS: {
572 unsigned long base;
573 if (doit)
574 rdmsrl(MSR_KERNEL_GS_BASE, base);
575 else
576 base = task->thread.gsbase;
577 ret = put_user(base, (unsigned long __user *)addr);
578 break;
579 }
580
581 default:
582 ret = -EINVAL;
583 break;
584 }
585
586 return ret;
587 }
588
589 long sys_arch_prctl(int code, unsigned long addr)
590 {
591 return do_arch_prctl(current, code, addr);
592 }
593
594 unsigned long KSTK_ESP(struct task_struct *task)
595 {
596 return task_pt_regs(task)->sp;
597 }
This page took 0.080758 seconds and 5 git commands to generate.