x86-64: move unlazy_fpu() into lazy cpu state part of context switch
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6612538c 6 *
1da177e4
LT
7 * X86-64 port
8 * Andi Kleen.
76e4f660
AR
9 *
10 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
42059429 17#include <linux/stackprotector.h>
76e4f660 18#include <linux/cpu.h>
1da177e4
LT
19#include <linux/errno.h>
20#include <linux/sched.h>
6612538c 21#include <linux/fs.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/elfcore.h>
25#include <linux/smp.h>
26#include <linux/slab.h>
27#include <linux/user.h>
1da177e4 28#include <linux/interrupt.h>
6612538c 29#include <linux/utsname.h>
1da177e4 30#include <linux/delay.h>
6612538c 31#include <linux/module.h>
1da177e4 32#include <linux/ptrace.h>
95833c83 33#include <linux/notifier.h>
c6fd91f0 34#include <linux/kprobes.h>
1eeb66a1 35#include <linux/kdebug.h>
02290683 36#include <linux/tick.h>
529e25f6 37#include <linux/prctl.h>
7de08b4e
GP
38#include <linux/uaccess.h>
39#include <linux/io.h>
8b96f011 40#include <linux/ftrace.h>
48ec4d95 41#include <linux/dmi.h>
1da177e4 42
1da177e4
LT
43#include <asm/pgtable.h>
44#include <asm/system.h>
1da177e4
LT
45#include <asm/processor.h>
46#include <asm/i387.h>
47#include <asm/mmu_context.h>
1da177e4 48#include <asm/prctl.h>
1da177e4
LT
49#include <asm/desc.h>
50#include <asm/proto.h>
51#include <asm/ia32.h>
95833c83 52#include <asm/idle.h>
bbc1f698 53#include <asm/syscalls.h>
bf53de90 54#include <asm/ds.h>
1da177e4
LT
55
56asmlinkage extern void ret_from_fork(void);
57
c6f5e0ac
BG
58DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
59EXPORT_PER_CPU_SYMBOL(current_task);
60
3d1e42a7 61DEFINE_PER_CPU(unsigned long, old_rsp);
c2558e0e 62static DEFINE_PER_CPU(unsigned char, is_idle);
3d1e42a7 63
1da177e4
LT
64unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
65
e041c683 66static ATOMIC_NOTIFIER_HEAD(idle_notifier);
95833c83
AK
67
68void idle_notifier_register(struct notifier_block *n)
69{
e041c683 70 atomic_notifier_chain_register(&idle_notifier, n);
95833c83 71}
c7d87d79
VP
72EXPORT_SYMBOL_GPL(idle_notifier_register);
73
74void idle_notifier_unregister(struct notifier_block *n)
75{
76 atomic_notifier_chain_unregister(&idle_notifier, n);
77}
78EXPORT_SYMBOL_GPL(idle_notifier_unregister);
95833c83 79
95833c83
AK
80void enter_idle(void)
81{
c2558e0e 82 percpu_write(is_idle, 1);
e041c683 83 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
95833c83
AK
84}
85
86static void __exit_idle(void)
87{
c2558e0e 88 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
a15da49d 89 return;
e041c683 90 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
95833c83
AK
91}
92
93/* Called from interrupts to signify idle end */
94void exit_idle(void)
95{
a15da49d
AK
96 /* idle loop has pid 0 */
97 if (current->pid)
95833c83
AK
98 return;
99 __exit_idle();
100}
101
913da64b 102#ifndef CONFIG_SMP
76e4f660
AR
103static inline void play_dead(void)
104{
105 BUG();
106}
913da64b 107#endif
76e4f660 108
1da177e4
LT
109/*
110 * The idle thread. There's no useful work to be
111 * done, so just try to conserve power and have a
112 * low exit latency (ie sit in a loop waiting for
113 * somebody to say that they'd like to reschedule)
114 */
b10db7f0 115void cpu_idle(void)
1da177e4 116{
495ab9c0 117 current_thread_info()->status |= TS_POLLING;
ce22bd92 118
ce22bd92 119 /*
5c79d2a5
TH
120 * If we're the non-boot CPU, nothing set the stack canary up
121 * for us. CPU0 already has it initialized but no harm in
122 * doing it again. This is a good place for updating it, as
123 * we wont ever return from this function (so the invalid
124 * canaries already on the stack wont ever trigger).
ce22bd92 125 */
18aa8bb1
IM
126 boot_init_stack_canary();
127
1da177e4
LT
128 /* endless idle loop with no priority at all */
129 while (1) {
b8f8c3cf 130 tick_nohz_stop_sched_tick(1);
1da177e4 131 while (!need_resched()) {
1da177e4 132
1da177e4 133 rmb();
6ddd2a27 134
76e4f660
AR
135 if (cpu_is_offline(smp_processor_id()))
136 play_dead();
d331e739
VP
137 /*
138 * Idle routines should keep interrupts disabled
139 * from here on, until they go to idle.
140 * Otherwise, idle callbacks can misfire.
141 */
142 local_irq_disable();
95833c83 143 enter_idle();
81d68a96
SR
144 /* Don't trace irqs off for idle */
145 stop_critical_timings();
6ddd2a27 146 pm_idle();
81d68a96 147 start_critical_timings();
a15da49d
AK
148 /* In many cases the interrupt that ended idle
149 has already called exit_idle. But some idle
150 loops can be woken up without interrupt. */
95833c83 151 __exit_idle();
1da177e4
LT
152 }
153
02290683 154 tick_nohz_restart_sched_tick();
5bfb5d69 155 preempt_enable_no_resched();
1da177e4 156 schedule();
5bfb5d69 157 preempt_disable();
1da177e4
LT
158 }
159}
160
6612538c 161/* Prints also some state that isn't saved in the pt_regs */
e2ce07c8 162void __show_regs(struct pt_regs *regs, int all)
1da177e4
LT
163{
164 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
bb1995d5 165 unsigned long d0, d1, d2, d3, d6, d7;
6612538c
HS
166 unsigned int fsindex, gsindex;
167 unsigned int ds, cs, es;
48ec4d95 168 const char *board;
1da177e4
LT
169
170 printk("\n");
171 print_modules();
48ec4d95
KM
172 board = dmi_get_system_info(DMI_PRODUCT_NAME);
173 if (!board)
174 board = "";
175 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
9acf23c4 176 current->pid, current->comm, print_tainted(),
96b644bd
SH
177 init_utsname()->release,
178 (int)strcspn(init_utsname()->version, " "),
48ec4d95 179 init_utsname()->version, board);
8092c654 180 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
aafbd7eb 181 printk_address(regs->ip, 1);
8092c654
GP
182 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
183 regs->sp, regs->flags);
184 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
65ea5b03 185 regs->ax, regs->bx, regs->cx);
8092c654 186 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
65ea5b03 187 regs->dx, regs->si, regs->di);
8092c654 188 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
65ea5b03 189 regs->bp, regs->r8, regs->r9);
8092c654 190 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
7de08b4e 191 regs->r10, regs->r11, regs->r12);
8092c654 192 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
7de08b4e 193 regs->r13, regs->r14, regs->r15);
1da177e4 194
7de08b4e
GP
195 asm("movl %%ds,%0" : "=r" (ds));
196 asm("movl %%cs,%0" : "=r" (cs));
197 asm("movl %%es,%0" : "=r" (es));
1da177e4
LT
198 asm("movl %%fs,%0" : "=r" (fsindex));
199 asm("movl %%gs,%0" : "=r" (gsindex));
200
201 rdmsrl(MSR_FS_BASE, fs);
7de08b4e
GP
202 rdmsrl(MSR_GS_BASE, gs);
203 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
1da177e4 204
e2ce07c8
PE
205 if (!all)
206 return;
1da177e4 207
f51c9452
GOC
208 cr0 = read_cr0();
209 cr2 = read_cr2();
210 cr3 = read_cr3();
211 cr4 = read_cr4();
1da177e4 212
8092c654 213 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
7de08b4e 214 fs, fsindex, gs, gsindex, shadowgs);
8092c654
GP
215 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
216 es, cr0);
217 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
218 cr4);
bb1995d5
AS
219
220 get_debugreg(d0, 0);
221 get_debugreg(d1, 1);
222 get_debugreg(d2, 2);
8092c654 223 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
bb1995d5
AS
224 get_debugreg(d3, 3);
225 get_debugreg(d6, 6);
226 get_debugreg(d7, 7);
8092c654 227 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
1da177e4
LT
228}
229
230void show_regs(struct pt_regs *regs)
231{
8092c654 232 printk(KERN_INFO "CPU %d:", smp_processor_id());
e2ce07c8 233 __show_regs(regs, 1);
bc850d6b 234 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
1da177e4
LT
235}
236
1da177e4
LT
237void release_thread(struct task_struct *dead_task)
238{
239 if (dead_task->mm) {
240 if (dead_task->mm->context.size) {
241 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
242 dead_task->comm,
243 dead_task->mm->context.ldt,
244 dead_task->mm->context.size);
245 BUG();
246 }
247 }
248}
249
250static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
251{
6612538c 252 struct user_desc ud = {
1da177e4
LT
253 .base_addr = addr,
254 .limit = 0xfffff,
255 .seg_32bit = 1,
256 .limit_in_pages = 1,
257 .useable = 1,
258 };
ade1af77 259 struct desc_struct *desc = t->thread.tls_array;
1da177e4 260 desc += tls;
80fbb69a 261 fill_ldt(desc, &ud);
1da177e4
LT
262}
263
264static inline u32 read_32bit_tls(struct task_struct *t, int tls)
265{
91394eb0 266 return get_desc_base(&t->thread.tls_array[tls]);
1da177e4
LT
267}
268
269/*
270 * This gets called before we allocate a new thread and copy
271 * the current task into it.
272 */
273void prepare_to_copy(struct task_struct *tsk)
274{
275 unlazy_fpu(tsk);
276}
277
6f2c55b8 278int copy_thread(unsigned long clone_flags, unsigned long sp,
1da177e4 279 unsigned long unused,
7de08b4e 280 struct task_struct *p, struct pt_regs *regs)
1da177e4
LT
281{
282 int err;
7de08b4e 283 struct pt_regs *childregs;
1da177e4
LT
284 struct task_struct *me = current;
285
a88cde13 286 childregs = ((struct pt_regs *)
57eafdc2 287 (THREAD_SIZE + task_stack_page(p))) - 1;
1da177e4
LT
288 *childregs = *regs;
289
65ea5b03
PA
290 childregs->ax = 0;
291 childregs->sp = sp;
292 if (sp == ~0UL)
293 childregs->sp = (unsigned long)childregs;
1da177e4 294
faca6227
PA
295 p->thread.sp = (unsigned long) childregs;
296 p->thread.sp0 = (unsigned long) (childregs+1);
297 p->thread.usersp = me->thread.usersp;
1da177e4 298
e4f17c43 299 set_tsk_thread_flag(p, TIF_FORK);
1da177e4
LT
300
301 p->thread.fs = me->thread.fs;
302 p->thread.gs = me->thread.gs;
303
ada85708
JF
304 savesegment(gs, p->thread.gsindex);
305 savesegment(fs, p->thread.fsindex);
306 savesegment(es, p->thread.es);
307 savesegment(ds, p->thread.ds);
1da177e4 308
d3a4f48d 309 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
1da177e4
LT
310 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
311 if (!p->thread.io_bitmap_ptr) {
312 p->thread.io_bitmap_max = 0;
313 return -ENOMEM;
314 }
a88cde13
AK
315 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
316 IO_BITMAP_BYTES);
d3a4f48d 317 set_tsk_thread_flag(p, TIF_IO_BITMAP);
6612538c 318 }
1da177e4
LT
319
320 /*
321 * Set a new TLS for the child thread?
322 */
323 if (clone_flags & CLONE_SETTLS) {
324#ifdef CONFIG_IA32_EMULATION
325 if (test_thread_flag(TIF_IA32))
efd1ca52 326 err = do_set_thread_area(p, -1,
65ea5b03 327 (struct user_desc __user *)childregs->si, 0);
7de08b4e
GP
328 else
329#endif
330 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
331 if (err)
1da177e4
LT
332 goto out;
333 }
bf53de90 334
2311f0de
MM
335 clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
336 p->thread.ds_ctx = NULL;
bf53de90
MM
337
338 clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
339 p->thread.debugctlmsr = 0;
340
1da177e4
LT
341 err = 0;
342out:
343 if (err && p->thread.io_bitmap_ptr) {
344 kfree(p->thread.io_bitmap_ptr);
345 p->thread.io_bitmap_max = 0;
346 }
347 return err;
348}
349
513ad84b
IM
350void
351start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
352{
ada85708
JF
353 loadsegment(fs, 0);
354 loadsegment(es, 0);
355 loadsegment(ds, 0);
513ad84b
IM
356 load_gs_index(0);
357 regs->ip = new_ip;
358 regs->sp = new_sp;
3d1e42a7 359 percpu_write(old_rsp, new_sp);
513ad84b
IM
360 regs->cs = __USER_CS;
361 regs->ss = __USER_DS;
362 regs->flags = 0x200;
363 set_fs(USER_DS);
aa283f49
SS
364 /*
365 * Free the old FP and other extended state
366 */
367 free_thread_xstate(current);
513ad84b
IM
368}
369EXPORT_SYMBOL_GPL(start_thread);
370
1da177e4
LT
371/*
372 * switch_to(x,y) should switch tasks from x to y.
373 *
6612538c 374 * This could still be optimized:
1da177e4
LT
375 * - fold all the options into a flag word and test it with a single test.
376 * - could test fs/gs bitsliced
099f318b
AK
377 *
378 * Kprobes not supported here. Set the probe on schedule instead.
8b96f011 379 * Function graph tracer not supported too.
1da177e4 380 */
8b96f011 381__notrace_funcgraph struct task_struct *
a88cde13 382__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4 383{
87b935a0
JF
384 struct thread_struct *prev = &prev_p->thread;
385 struct thread_struct *next = &next_p->thread;
6612538c 386 int cpu = smp_processor_id();
1da177e4 387 struct tss_struct *tss = &per_cpu(init_tss, cpu);
478de5a9 388 unsigned fsindex, gsindex;
1da177e4 389
e07e23e1 390 /* we're going to use this soon, after a few expensive things */
7de08b4e 391 if (next_p->fpu_counter > 5)
61c4628b 392 prefetch(next->xstate);
e07e23e1 393
1da177e4
LT
394 /*
395 * Reload esp0, LDT and the page table pointer:
396 */
7818a1e0 397 load_sp0(tss, next);
1da177e4 398
7de08b4e 399 /*
1da177e4
LT
400 * Switch DS and ES.
401 * This won't pick up thread selector changes, but I guess that is ok.
402 */
ada85708 403 savesegment(es, prev->es);
1da177e4 404 if (unlikely(next->es | prev->es))
7de08b4e 405 loadsegment(es, next->es);
ada85708
JF
406
407 savesegment(ds, prev->ds);
1da177e4
LT
408 if (unlikely(next->ds | prev->ds))
409 loadsegment(ds, next->ds);
410
478de5a9
JF
411
412 /* We must save %fs and %gs before load_TLS() because
413 * %fs and %gs may be cleared by load_TLS().
414 *
415 * (e.g. xen_load_tls())
416 */
417 savesegment(fs, fsindex);
418 savesegment(gs, gsindex);
419
1da177e4
LT
420 load_TLS(next, cpu);
421
16d9dbf0
JF
422 /* Must be after DS reload */
423 unlazy_fpu(prev_p);
424
3fe0a63e
JF
425 /*
426 * Leave lazy mode, flushing any hypercalls made here.
427 * This must be done before restoring TLS segments so
428 * the GDT and LDT are properly updated, and must be
429 * done before math_state_restore, so the TS bit is up
430 * to date.
431 */
224101ed 432 arch_end_context_switch(next_p);
3fe0a63e 433
7de08b4e 434 /*
1da177e4 435 * Switch FS and GS.
87b935a0
JF
436 *
437 * Segment register != 0 always requires a reload. Also
438 * reload when it has changed. When prev process used 64bit
439 * base always reload to avoid an information leak.
1da177e4 440 */
87b935a0
JF
441 if (unlikely(fsindex | next->fsindex | prev->fs)) {
442 loadsegment(fs, next->fsindex);
7de08b4e 443 /*
87b935a0
JF
444 * Check if the user used a selector != 0; if yes
445 * clear 64bit base, since overloaded base is always
446 * mapped to the Null selector
447 */
448 if (fsindex)
7de08b4e 449 prev->fs = 0;
1da177e4 450 }
87b935a0
JF
451 /* when next process has a 64bit base use it */
452 if (next->fs)
453 wrmsrl(MSR_FS_BASE, next->fs);
454 prev->fsindex = fsindex;
455
456 if (unlikely(gsindex | next->gsindex | prev->gs)) {
457 load_gs_index(next->gsindex);
458 if (gsindex)
7de08b4e 459 prev->gs = 0;
1da177e4 460 }
87b935a0
JF
461 if (next->gs)
462 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
463 prev->gsindex = gsindex;
1da177e4 464
7de08b4e 465 /*
45948d77 466 * Switch the PDA and FPU contexts.
1da177e4 467 */
3d1e42a7
BG
468 prev->usersp = percpu_read(old_rsp);
469 percpu_write(old_rsp, next->usersp);
c6f5e0ac 470 percpu_write(current_task, next_p);
18bd057b 471
9af45651 472 percpu_write(kernel_stack,
87b935a0 473 (unsigned long)task_stack_page(next_p) +
9af45651 474 THREAD_SIZE - KERNEL_STACK_OFFSET);
1da177e4
LT
475
476 /*
d3a4f48d 477 * Now maybe reload the debug registers and handle I/O bitmaps
1da177e4 478 */
eee3af4a
MM
479 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
480 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
d3a4f48d 481 __switch_to_xtra(prev_p, next_p, tss);
1da177e4 482
e07e23e1
AV
483 /* If the task has used fpu the last 5 timeslices, just do a full
484 * restore of the math state immediately to avoid the trap; the
485 * chances of needing FPU soon are obviously high now
870568b3
SS
486 *
487 * tsk_used_math() checks prevent calling math_state_restore(),
488 * which can sleep in the case of !tsk_used_math()
e07e23e1 489 */
870568b3 490 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
e07e23e1 491 math_state_restore();
1da177e4
LT
492 return prev_p;
493}
494
495/*
496 * sys_execve() executes a new program.
497 */
6612538c 498asmlinkage
1da177e4 499long sys_execve(char __user *name, char __user * __user *argv,
5d119b2c 500 char __user * __user *envp, struct pt_regs *regs)
1da177e4
LT
501{
502 long error;
7de08b4e 503 char *filename;
1da177e4
LT
504
505 filename = getname(name);
506 error = PTR_ERR(filename);
5d119b2c 507 if (IS_ERR(filename))
1da177e4 508 return error;
5d119b2c 509 error = do_execve(filename, argv, envp, regs);
1da177e4
LT
510 putname(filename);
511 return error;
512}
513
514void set_personality_64bit(void)
515{
516 /* inherit personality from parent */
517
518 /* Make sure to be in 64bit mode */
6612538c 519 clear_thread_flag(TIF_IA32);
1da177e4
LT
520
521 /* TBD: overwrites user setup. Should have two bits.
522 But 64bit processes have always behaved this way,
523 so it's not too bad. The main problem is just that
6612538c 524 32bit childs are affected again. */
1da177e4
LT
525 current->personality &= ~READ_IMPLIES_EXEC;
526}
527
a88cde13
AK
528asmlinkage long
529sys_clone(unsigned long clone_flags, unsigned long newsp,
530 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
1da177e4
LT
531{
532 if (!newsp)
65ea5b03 533 newsp = regs->sp;
1da177e4
LT
534 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
535}
536
1da177e4
LT
537unsigned long get_wchan(struct task_struct *p)
538{
539 unsigned long stack;
7de08b4e 540 u64 fp, ip;
1da177e4
LT
541 int count = 0;
542
7de08b4e
GP
543 if (!p || p == current || p->state == TASK_RUNNING)
544 return 0;
57eafdc2 545 stack = (unsigned long)task_stack_page(p);
e1e23bb0 546 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
1da177e4 547 return 0;
faca6227 548 fp = *(u64 *)(p->thread.sp);
7de08b4e 549 do {
a88cde13 550 if (fp < (unsigned long)stack ||
e1e23bb0 551 fp >= (unsigned long)stack+THREAD_SIZE)
7de08b4e 552 return 0;
65ea5b03
PA
553 ip = *(u64 *)(fp+8);
554 if (!in_sched_functions(ip))
555 return ip;
7de08b4e
GP
556 fp = *(u64 *)fp;
557 } while (count++ < 16);
1da177e4
LT
558 return 0;
559}
560
561long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
7de08b4e
GP
562{
563 int ret = 0;
1da177e4
LT
564 int doit = task == current;
565 int cpu;
566
7de08b4e 567 switch (code) {
1da177e4 568 case ARCH_SET_GS:
84929801 569 if (addr >= TASK_SIZE_OF(task))
7de08b4e 570 return -EPERM;
1da177e4 571 cpu = get_cpu();
7de08b4e 572 /* handle small bases via the GDT because that's faster to
1da177e4 573 switch. */
7de08b4e
GP
574 if (addr <= 0xffffffff) {
575 set_32bit_tls(task, GS_TLS, addr);
576 if (doit) {
1da177e4 577 load_TLS(&task->thread, cpu);
7de08b4e 578 load_gs_index(GS_TLS_SEL);
1da177e4 579 }
7de08b4e 580 task->thread.gsindex = GS_TLS_SEL;
1da177e4 581 task->thread.gs = 0;
7de08b4e 582 } else {
1da177e4
LT
583 task->thread.gsindex = 0;
584 task->thread.gs = addr;
585 if (doit) {
a88cde13
AK
586 load_gs_index(0);
587 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
7de08b4e 588 }
1da177e4
LT
589 }
590 put_cpu();
591 break;
592 case ARCH_SET_FS:
593 /* Not strictly needed for fs, but do it for symmetry
594 with gs */
84929801 595 if (addr >= TASK_SIZE_OF(task))
6612538c 596 return -EPERM;
1da177e4 597 cpu = get_cpu();
6612538c 598 /* handle small bases via the GDT because that's faster to
1da177e4 599 switch. */
6612538c 600 if (addr <= 0xffffffff) {
1da177e4 601 set_32bit_tls(task, FS_TLS, addr);
6612538c
HS
602 if (doit) {
603 load_TLS(&task->thread, cpu);
ada85708 604 loadsegment(fs, FS_TLS_SEL);
1da177e4
LT
605 }
606 task->thread.fsindex = FS_TLS_SEL;
607 task->thread.fs = 0;
6612538c 608 } else {
1da177e4
LT
609 task->thread.fsindex = 0;
610 task->thread.fs = addr;
611 if (doit) {
612 /* set the selector to 0 to not confuse
613 __switch_to */
ada85708 614 loadsegment(fs, 0);
a88cde13 615 ret = checking_wrmsrl(MSR_FS_BASE, addr);
1da177e4
LT
616 }
617 }
618 put_cpu();
619 break;
6612538c
HS
620 case ARCH_GET_FS: {
621 unsigned long base;
1da177e4
LT
622 if (task->thread.fsindex == FS_TLS_SEL)
623 base = read_32bit_tls(task, FS_TLS);
a88cde13 624 else if (doit)
1da177e4 625 rdmsrl(MSR_FS_BASE, base);
a88cde13 626 else
1da177e4 627 base = task->thread.fs;
6612538c
HS
628 ret = put_user(base, (unsigned long __user *)addr);
629 break;
1da177e4 630 }
6612538c 631 case ARCH_GET_GS: {
1da177e4 632 unsigned long base;
97c2803c 633 unsigned gsindex;
1da177e4
LT
634 if (task->thread.gsindex == GS_TLS_SEL)
635 base = read_32bit_tls(task, GS_TLS);
97c2803c 636 else if (doit) {
ada85708 637 savesegment(gs, gsindex);
97c2803c
JB
638 if (gsindex)
639 rdmsrl(MSR_KERNEL_GS_BASE, base);
640 else
641 base = task->thread.gs;
7de08b4e 642 } else
1da177e4 643 base = task->thread.gs;
6612538c 644 ret = put_user(base, (unsigned long __user *)addr);
1da177e4
LT
645 break;
646 }
647
648 default:
649 ret = -EINVAL;
650 break;
6612538c 651 }
1da177e4 652
6612538c
HS
653 return ret;
654}
1da177e4
LT
655
656long sys_arch_prctl(int code, unsigned long addr)
657{
658 return do_arch_prctl(current, code, addr);
1da177e4
LT
659}
660
This page took 0.570521 seconds and 5 git commands to generate.