x86: process.c, remove useless headers
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6612538c 6 *
1da177e4
LT
7 * X86-64 port
8 * Andi Kleen.
76e4f660
AR
9 *
10 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
42059429 17#include <linux/stackprotector.h>
76e4f660 18#include <linux/cpu.h>
1da177e4
LT
19#include <linux/errno.h>
20#include <linux/sched.h>
6612538c 21#include <linux/fs.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/elfcore.h>
25#include <linux/smp.h>
26#include <linux/slab.h>
27#include <linux/user.h>
1da177e4 28#include <linux/interrupt.h>
6612538c 29#include <linux/utsname.h>
1da177e4 30#include <linux/delay.h>
6612538c 31#include <linux/module.h>
1da177e4 32#include <linux/ptrace.h>
95833c83 33#include <linux/notifier.h>
c6fd91f0 34#include <linux/kprobes.h>
1eeb66a1 35#include <linux/kdebug.h>
02290683 36#include <linux/tick.h>
529e25f6 37#include <linux/prctl.h>
7de08b4e
GP
38#include <linux/uaccess.h>
39#include <linux/io.h>
8b96f011 40#include <linux/ftrace.h>
48ec4d95 41#include <linux/dmi.h>
1da177e4 42
1da177e4
LT
43#include <asm/pgtable.h>
44#include <asm/system.h>
1da177e4
LT
45#include <asm/processor.h>
46#include <asm/i387.h>
47#include <asm/mmu_context.h>
1da177e4 48#include <asm/prctl.h>
1da177e4
LT
49#include <asm/desc.h>
50#include <asm/proto.h>
51#include <asm/ia32.h>
95833c83 52#include <asm/idle.h>
bbc1f698 53#include <asm/syscalls.h>
bf53de90 54#include <asm/ds.h>
1da177e4
LT
55
56asmlinkage extern void ret_from_fork(void);
57
c6f5e0ac
BG
58DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
59EXPORT_PER_CPU_SYMBOL(current_task);
60
3d1e42a7 61DEFINE_PER_CPU(unsigned long, old_rsp);
c2558e0e 62static DEFINE_PER_CPU(unsigned char, is_idle);
3d1e42a7 63
1da177e4
LT
64unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
65
e041c683 66static ATOMIC_NOTIFIER_HEAD(idle_notifier);
95833c83
AK
67
68void idle_notifier_register(struct notifier_block *n)
69{
e041c683 70 atomic_notifier_chain_register(&idle_notifier, n);
95833c83 71}
c7d87d79
VP
72EXPORT_SYMBOL_GPL(idle_notifier_register);
73
74void idle_notifier_unregister(struct notifier_block *n)
75{
76 atomic_notifier_chain_unregister(&idle_notifier, n);
77}
78EXPORT_SYMBOL_GPL(idle_notifier_unregister);
95833c83 79
95833c83
AK
80void enter_idle(void)
81{
c2558e0e 82 percpu_write(is_idle, 1);
e041c683 83 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
95833c83
AK
84}
85
86static void __exit_idle(void)
87{
c2558e0e 88 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
a15da49d 89 return;
e041c683 90 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
95833c83
AK
91}
92
93/* Called from interrupts to signify idle end */
94void exit_idle(void)
95{
a15da49d
AK
96 /* idle loop has pid 0 */
97 if (current->pid)
95833c83
AK
98 return;
99 __exit_idle();
100}
101
913da64b 102#ifndef CONFIG_SMP
76e4f660
AR
103static inline void play_dead(void)
104{
105 BUG();
106}
913da64b 107#endif
76e4f660 108
1da177e4
LT
109/*
110 * The idle thread. There's no useful work to be
111 * done, so just try to conserve power and have a
112 * low exit latency (ie sit in a loop waiting for
113 * somebody to say that they'd like to reschedule)
114 */
b10db7f0 115void cpu_idle(void)
1da177e4 116{
495ab9c0 117 current_thread_info()->status |= TS_POLLING;
ce22bd92 118
ce22bd92 119 /*
5c79d2a5
TH
120 * If we're the non-boot CPU, nothing set the stack canary up
121 * for us. CPU0 already has it initialized but no harm in
122 * doing it again. This is a good place for updating it, as
123 * we wont ever return from this function (so the invalid
124 * canaries already on the stack wont ever trigger).
ce22bd92 125 */
18aa8bb1
IM
126 boot_init_stack_canary();
127
1da177e4
LT
128 /* endless idle loop with no priority at all */
129 while (1) {
b8f8c3cf 130 tick_nohz_stop_sched_tick(1);
1da177e4 131 while (!need_resched()) {
1da177e4 132
1da177e4 133 rmb();
6ddd2a27 134
76e4f660
AR
135 if (cpu_is_offline(smp_processor_id()))
136 play_dead();
d331e739
VP
137 /*
138 * Idle routines should keep interrupts disabled
139 * from here on, until they go to idle.
140 * Otherwise, idle callbacks can misfire.
141 */
142 local_irq_disable();
95833c83 143 enter_idle();
81d68a96
SR
144 /* Don't trace irqs off for idle */
145 stop_critical_timings();
6ddd2a27 146 pm_idle();
81d68a96 147 start_critical_timings();
a15da49d
AK
148 /* In many cases the interrupt that ended idle
149 has already called exit_idle. But some idle
150 loops can be woken up without interrupt. */
95833c83 151 __exit_idle();
1da177e4
LT
152 }
153
02290683 154 tick_nohz_restart_sched_tick();
5bfb5d69 155 preempt_enable_no_resched();
1da177e4 156 schedule();
5bfb5d69 157 preempt_disable();
1da177e4
LT
158 }
159}
160
6612538c 161/* Prints also some state that isn't saved in the pt_regs */
e2ce07c8 162void __show_regs(struct pt_regs *regs, int all)
1da177e4
LT
163{
164 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
bb1995d5 165 unsigned long d0, d1, d2, d3, d6, d7;
6612538c
HS
166 unsigned int fsindex, gsindex;
167 unsigned int ds, cs, es;
48ec4d95 168 const char *board;
1da177e4
LT
169
170 printk("\n");
171 print_modules();
48ec4d95
KM
172 board = dmi_get_system_info(DMI_PRODUCT_NAME);
173 if (!board)
174 board = "";
175 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
9acf23c4 176 current->pid, current->comm, print_tainted(),
96b644bd
SH
177 init_utsname()->release,
178 (int)strcspn(init_utsname()->version, " "),
48ec4d95 179 init_utsname()->version, board);
8092c654 180 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
aafbd7eb 181 printk_address(regs->ip, 1);
8092c654
GP
182 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
183 regs->sp, regs->flags);
184 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
65ea5b03 185 regs->ax, regs->bx, regs->cx);
8092c654 186 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
65ea5b03 187 regs->dx, regs->si, regs->di);
8092c654 188 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
65ea5b03 189 regs->bp, regs->r8, regs->r9);
8092c654 190 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
7de08b4e 191 regs->r10, regs->r11, regs->r12);
8092c654 192 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
7de08b4e 193 regs->r13, regs->r14, regs->r15);
1da177e4 194
7de08b4e
GP
195 asm("movl %%ds,%0" : "=r" (ds));
196 asm("movl %%cs,%0" : "=r" (cs));
197 asm("movl %%es,%0" : "=r" (es));
1da177e4
LT
198 asm("movl %%fs,%0" : "=r" (fsindex));
199 asm("movl %%gs,%0" : "=r" (gsindex));
200
201 rdmsrl(MSR_FS_BASE, fs);
7de08b4e
GP
202 rdmsrl(MSR_GS_BASE, gs);
203 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
1da177e4 204
e2ce07c8
PE
205 if (!all)
206 return;
1da177e4 207
f51c9452
GOC
208 cr0 = read_cr0();
209 cr2 = read_cr2();
210 cr3 = read_cr3();
211 cr4 = read_cr4();
1da177e4 212
8092c654 213 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
7de08b4e 214 fs, fsindex, gs, gsindex, shadowgs);
8092c654
GP
215 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
216 es, cr0);
217 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
218 cr4);
bb1995d5
AS
219
220 get_debugreg(d0, 0);
221 get_debugreg(d1, 1);
222 get_debugreg(d2, 2);
8092c654 223 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
bb1995d5
AS
224 get_debugreg(d3, 3);
225 get_debugreg(d6, 6);
226 get_debugreg(d7, 7);
8092c654 227 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
1da177e4
LT
228}
229
230void show_regs(struct pt_regs *regs)
231{
8092c654 232 printk(KERN_INFO "CPU %d:", smp_processor_id());
e2ce07c8 233 __show_regs(regs, 1);
bc850d6b 234 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
1da177e4
LT
235}
236
1da177e4
LT
237void release_thread(struct task_struct *dead_task)
238{
239 if (dead_task->mm) {
240 if (dead_task->mm->context.size) {
241 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
242 dead_task->comm,
243 dead_task->mm->context.ldt,
244 dead_task->mm->context.size);
245 BUG();
246 }
247 }
248}
249
250static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
251{
6612538c 252 struct user_desc ud = {
1da177e4
LT
253 .base_addr = addr,
254 .limit = 0xfffff,
255 .seg_32bit = 1,
256 .limit_in_pages = 1,
257 .useable = 1,
258 };
ade1af77 259 struct desc_struct *desc = t->thread.tls_array;
1da177e4 260 desc += tls;
80fbb69a 261 fill_ldt(desc, &ud);
1da177e4
LT
262}
263
264static inline u32 read_32bit_tls(struct task_struct *t, int tls)
265{
91394eb0 266 return get_desc_base(&t->thread.tls_array[tls]);
1da177e4
LT
267}
268
269/*
270 * This gets called before we allocate a new thread and copy
271 * the current task into it.
272 */
273void prepare_to_copy(struct task_struct *tsk)
274{
275 unlazy_fpu(tsk);
276}
277
6f2c55b8 278int copy_thread(unsigned long clone_flags, unsigned long sp,
1da177e4 279 unsigned long unused,
7de08b4e 280 struct task_struct *p, struct pt_regs *regs)
1da177e4
LT
281{
282 int err;
7de08b4e 283 struct pt_regs *childregs;
1da177e4
LT
284 struct task_struct *me = current;
285
a88cde13 286 childregs = ((struct pt_regs *)
57eafdc2 287 (THREAD_SIZE + task_stack_page(p))) - 1;
1da177e4
LT
288 *childregs = *regs;
289
65ea5b03
PA
290 childregs->ax = 0;
291 childregs->sp = sp;
292 if (sp == ~0UL)
293 childregs->sp = (unsigned long)childregs;
1da177e4 294
faca6227
PA
295 p->thread.sp = (unsigned long) childregs;
296 p->thread.sp0 = (unsigned long) (childregs+1);
297 p->thread.usersp = me->thread.usersp;
1da177e4 298
e4f17c43 299 set_tsk_thread_flag(p, TIF_FORK);
1da177e4
LT
300
301 p->thread.fs = me->thread.fs;
302 p->thread.gs = me->thread.gs;
303
ada85708
JF
304 savesegment(gs, p->thread.gsindex);
305 savesegment(fs, p->thread.fsindex);
306 savesegment(es, p->thread.es);
307 savesegment(ds, p->thread.ds);
1da177e4 308
d3a4f48d 309 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
1da177e4
LT
310 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
311 if (!p->thread.io_bitmap_ptr) {
312 p->thread.io_bitmap_max = 0;
313 return -ENOMEM;
314 }
a88cde13
AK
315 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
316 IO_BITMAP_BYTES);
d3a4f48d 317 set_tsk_thread_flag(p, TIF_IO_BITMAP);
6612538c 318 }
1da177e4
LT
319
320 /*
321 * Set a new TLS for the child thread?
322 */
323 if (clone_flags & CLONE_SETTLS) {
324#ifdef CONFIG_IA32_EMULATION
325 if (test_thread_flag(TIF_IA32))
efd1ca52 326 err = do_set_thread_area(p, -1,
65ea5b03 327 (struct user_desc __user *)childregs->si, 0);
7de08b4e
GP
328 else
329#endif
330 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
331 if (err)
1da177e4
LT
332 goto out;
333 }
bf53de90
MM
334
335 ds_copy_thread(p, me);
336
337 clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
338 p->thread.debugctlmsr = 0;
339
1da177e4
LT
340 err = 0;
341out:
342 if (err && p->thread.io_bitmap_ptr) {
343 kfree(p->thread.io_bitmap_ptr);
344 p->thread.io_bitmap_max = 0;
345 }
346 return err;
347}
348
513ad84b
IM
349void
350start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
351{
ada85708
JF
352 loadsegment(fs, 0);
353 loadsegment(es, 0);
354 loadsegment(ds, 0);
513ad84b
IM
355 load_gs_index(0);
356 regs->ip = new_ip;
357 regs->sp = new_sp;
3d1e42a7 358 percpu_write(old_rsp, new_sp);
513ad84b
IM
359 regs->cs = __USER_CS;
360 regs->ss = __USER_DS;
361 regs->flags = 0x200;
362 set_fs(USER_DS);
aa283f49
SS
363 /*
364 * Free the old FP and other extended state
365 */
366 free_thread_xstate(current);
513ad84b
IM
367}
368EXPORT_SYMBOL_GPL(start_thread);
369
1da177e4
LT
370/*
371 * switch_to(x,y) should switch tasks from x to y.
372 *
6612538c 373 * This could still be optimized:
1da177e4
LT
374 * - fold all the options into a flag word and test it with a single test.
375 * - could test fs/gs bitsliced
099f318b
AK
376 *
377 * Kprobes not supported here. Set the probe on schedule instead.
8b96f011 378 * Function graph tracer not supported too.
1da177e4 379 */
8b96f011 380__notrace_funcgraph struct task_struct *
a88cde13 381__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4 382{
87b935a0
JF
383 struct thread_struct *prev = &prev_p->thread;
384 struct thread_struct *next = &next_p->thread;
6612538c 385 int cpu = smp_processor_id();
1da177e4 386 struct tss_struct *tss = &per_cpu(init_tss, cpu);
478de5a9 387 unsigned fsindex, gsindex;
1da177e4 388
e07e23e1 389 /* we're going to use this soon, after a few expensive things */
7de08b4e 390 if (next_p->fpu_counter > 5)
61c4628b 391 prefetch(next->xstate);
e07e23e1 392
1da177e4
LT
393 /*
394 * Reload esp0, LDT and the page table pointer:
395 */
7818a1e0 396 load_sp0(tss, next);
1da177e4 397
7de08b4e 398 /*
1da177e4
LT
399 * Switch DS and ES.
400 * This won't pick up thread selector changes, but I guess that is ok.
401 */
ada85708 402 savesegment(es, prev->es);
1da177e4 403 if (unlikely(next->es | prev->es))
7de08b4e 404 loadsegment(es, next->es);
ada85708
JF
405
406 savesegment(ds, prev->ds);
1da177e4
LT
407 if (unlikely(next->ds | prev->ds))
408 loadsegment(ds, next->ds);
409
478de5a9
JF
410
411 /* We must save %fs and %gs before load_TLS() because
412 * %fs and %gs may be cleared by load_TLS().
413 *
414 * (e.g. xen_load_tls())
415 */
416 savesegment(fs, fsindex);
417 savesegment(gs, gsindex);
418
1da177e4
LT
419 load_TLS(next, cpu);
420
3fe0a63e
JF
421 /*
422 * Leave lazy mode, flushing any hypercalls made here.
423 * This must be done before restoring TLS segments so
424 * the GDT and LDT are properly updated, and must be
425 * done before math_state_restore, so the TS bit is up
426 * to date.
427 */
428 arch_leave_lazy_cpu_mode();
429
7de08b4e 430 /*
1da177e4 431 * Switch FS and GS.
87b935a0
JF
432 *
433 * Segment register != 0 always requires a reload. Also
434 * reload when it has changed. When prev process used 64bit
435 * base always reload to avoid an information leak.
1da177e4 436 */
87b935a0
JF
437 if (unlikely(fsindex | next->fsindex | prev->fs)) {
438 loadsegment(fs, next->fsindex);
7de08b4e 439 /*
87b935a0
JF
440 * Check if the user used a selector != 0; if yes
441 * clear 64bit base, since overloaded base is always
442 * mapped to the Null selector
443 */
444 if (fsindex)
7de08b4e 445 prev->fs = 0;
1da177e4 446 }
87b935a0
JF
447 /* when next process has a 64bit base use it */
448 if (next->fs)
449 wrmsrl(MSR_FS_BASE, next->fs);
450 prev->fsindex = fsindex;
451
452 if (unlikely(gsindex | next->gsindex | prev->gs)) {
453 load_gs_index(next->gsindex);
454 if (gsindex)
7de08b4e 455 prev->gs = 0;
1da177e4 456 }
87b935a0
JF
457 if (next->gs)
458 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
459 prev->gsindex = gsindex;
1da177e4 460
0a5ace2a
AK
461 /* Must be after DS reload */
462 unlazy_fpu(prev_p);
463
7de08b4e 464 /*
45948d77 465 * Switch the PDA and FPU contexts.
1da177e4 466 */
3d1e42a7
BG
467 prev->usersp = percpu_read(old_rsp);
468 percpu_write(old_rsp, next->usersp);
c6f5e0ac 469 percpu_write(current_task, next_p);
18bd057b 470
9af45651 471 percpu_write(kernel_stack,
87b935a0 472 (unsigned long)task_stack_page(next_p) +
9af45651 473 THREAD_SIZE - KERNEL_STACK_OFFSET);
1da177e4
LT
474
475 /*
d3a4f48d 476 * Now maybe reload the debug registers and handle I/O bitmaps
1da177e4 477 */
eee3af4a
MM
478 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
479 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
d3a4f48d 480 __switch_to_xtra(prev_p, next_p, tss);
1da177e4 481
e07e23e1
AV
482 /* If the task has used fpu the last 5 timeslices, just do a full
483 * restore of the math state immediately to avoid the trap; the
484 * chances of needing FPU soon are obviously high now
870568b3
SS
485 *
486 * tsk_used_math() checks prevent calling math_state_restore(),
487 * which can sleep in the case of !tsk_used_math()
e07e23e1 488 */
870568b3 489 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
e07e23e1 490 math_state_restore();
1da177e4
LT
491 return prev_p;
492}
493
494/*
495 * sys_execve() executes a new program.
496 */
6612538c 497asmlinkage
1da177e4 498long sys_execve(char __user *name, char __user * __user *argv,
5d119b2c 499 char __user * __user *envp, struct pt_regs *regs)
1da177e4
LT
500{
501 long error;
7de08b4e 502 char *filename;
1da177e4
LT
503
504 filename = getname(name);
505 error = PTR_ERR(filename);
5d119b2c 506 if (IS_ERR(filename))
1da177e4 507 return error;
5d119b2c 508 error = do_execve(filename, argv, envp, regs);
1da177e4
LT
509 putname(filename);
510 return error;
511}
512
513void set_personality_64bit(void)
514{
515 /* inherit personality from parent */
516
517 /* Make sure to be in 64bit mode */
6612538c 518 clear_thread_flag(TIF_IA32);
1da177e4
LT
519
520 /* TBD: overwrites user setup. Should have two bits.
521 But 64bit processes have always behaved this way,
522 so it's not too bad. The main problem is just that
6612538c 523 32bit childs are affected again. */
1da177e4
LT
524 current->personality &= ~READ_IMPLIES_EXEC;
525}
526
a88cde13
AK
527asmlinkage long
528sys_clone(unsigned long clone_flags, unsigned long newsp,
529 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
1da177e4
LT
530{
531 if (!newsp)
65ea5b03 532 newsp = regs->sp;
1da177e4
LT
533 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
534}
535
1da177e4
LT
536unsigned long get_wchan(struct task_struct *p)
537{
538 unsigned long stack;
7de08b4e 539 u64 fp, ip;
1da177e4
LT
540 int count = 0;
541
7de08b4e
GP
542 if (!p || p == current || p->state == TASK_RUNNING)
543 return 0;
57eafdc2 544 stack = (unsigned long)task_stack_page(p);
e1e23bb0 545 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
1da177e4 546 return 0;
faca6227 547 fp = *(u64 *)(p->thread.sp);
7de08b4e 548 do {
a88cde13 549 if (fp < (unsigned long)stack ||
e1e23bb0 550 fp >= (unsigned long)stack+THREAD_SIZE)
7de08b4e 551 return 0;
65ea5b03
PA
552 ip = *(u64 *)(fp+8);
553 if (!in_sched_functions(ip))
554 return ip;
7de08b4e
GP
555 fp = *(u64 *)fp;
556 } while (count++ < 16);
1da177e4
LT
557 return 0;
558}
559
560long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
7de08b4e
GP
561{
562 int ret = 0;
1da177e4
LT
563 int doit = task == current;
564 int cpu;
565
7de08b4e 566 switch (code) {
1da177e4 567 case ARCH_SET_GS:
84929801 568 if (addr >= TASK_SIZE_OF(task))
7de08b4e 569 return -EPERM;
1da177e4 570 cpu = get_cpu();
7de08b4e 571 /* handle small bases via the GDT because that's faster to
1da177e4 572 switch. */
7de08b4e
GP
573 if (addr <= 0xffffffff) {
574 set_32bit_tls(task, GS_TLS, addr);
575 if (doit) {
1da177e4 576 load_TLS(&task->thread, cpu);
7de08b4e 577 load_gs_index(GS_TLS_SEL);
1da177e4 578 }
7de08b4e 579 task->thread.gsindex = GS_TLS_SEL;
1da177e4 580 task->thread.gs = 0;
7de08b4e 581 } else {
1da177e4
LT
582 task->thread.gsindex = 0;
583 task->thread.gs = addr;
584 if (doit) {
a88cde13
AK
585 load_gs_index(0);
586 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
7de08b4e 587 }
1da177e4
LT
588 }
589 put_cpu();
590 break;
591 case ARCH_SET_FS:
592 /* Not strictly needed for fs, but do it for symmetry
593 with gs */
84929801 594 if (addr >= TASK_SIZE_OF(task))
6612538c 595 return -EPERM;
1da177e4 596 cpu = get_cpu();
6612538c 597 /* handle small bases via the GDT because that's faster to
1da177e4 598 switch. */
6612538c 599 if (addr <= 0xffffffff) {
1da177e4 600 set_32bit_tls(task, FS_TLS, addr);
6612538c
HS
601 if (doit) {
602 load_TLS(&task->thread, cpu);
ada85708 603 loadsegment(fs, FS_TLS_SEL);
1da177e4
LT
604 }
605 task->thread.fsindex = FS_TLS_SEL;
606 task->thread.fs = 0;
6612538c 607 } else {
1da177e4
LT
608 task->thread.fsindex = 0;
609 task->thread.fs = addr;
610 if (doit) {
611 /* set the selector to 0 to not confuse
612 __switch_to */
ada85708 613 loadsegment(fs, 0);
a88cde13 614 ret = checking_wrmsrl(MSR_FS_BASE, addr);
1da177e4
LT
615 }
616 }
617 put_cpu();
618 break;
6612538c
HS
619 case ARCH_GET_FS: {
620 unsigned long base;
1da177e4
LT
621 if (task->thread.fsindex == FS_TLS_SEL)
622 base = read_32bit_tls(task, FS_TLS);
a88cde13 623 else if (doit)
1da177e4 624 rdmsrl(MSR_FS_BASE, base);
a88cde13 625 else
1da177e4 626 base = task->thread.fs;
6612538c
HS
627 ret = put_user(base, (unsigned long __user *)addr);
628 break;
1da177e4 629 }
6612538c 630 case ARCH_GET_GS: {
1da177e4 631 unsigned long base;
97c2803c 632 unsigned gsindex;
1da177e4
LT
633 if (task->thread.gsindex == GS_TLS_SEL)
634 base = read_32bit_tls(task, GS_TLS);
97c2803c 635 else if (doit) {
ada85708 636 savesegment(gs, gsindex);
97c2803c
JB
637 if (gsindex)
638 rdmsrl(MSR_KERNEL_GS_BASE, base);
639 else
640 base = task->thread.gs;
7de08b4e 641 } else
1da177e4 642 base = task->thread.gs;
6612538c 643 ret = put_user(base, (unsigned long __user *)addr);
1da177e4
LT
644 break;
645 }
646
647 default:
648 ret = -EINVAL;
649 break;
6612538c 650 }
1da177e4 651
6612538c
HS
652 return ret;
653}
1da177e4
LT
654
655long sys_arch_prctl(int code, unsigned long addr)
656{
657 return do_arch_prctl(current, code, addr);
1da177e4
LT
658}
659
This page took 0.673883 seconds and 5 git commands to generate.