Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86-64/kernel/process.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * | |
6 | * Pentium III FXSR, SSE support | |
7 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
8 | * | |
9 | * X86-64 port | |
10 | * Andi Kleen. | |
76e4f660 AR |
11 | * |
12 | * CPU hotplug support - ashok.raj@intel.com | |
1da177e4 LT |
13 | * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $ |
14 | */ | |
15 | ||
16 | /* | |
17 | * This file handles the architecture-dependent parts of process handling.. | |
18 | */ | |
19 | ||
20 | #include <stdarg.h> | |
21 | ||
76e4f660 | 22 | #include <linux/cpu.h> |
1da177e4 LT |
23 | #include <linux/errno.h> |
24 | #include <linux/sched.h> | |
25 | #include <linux/kernel.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/elfcore.h> | |
28 | #include <linux/smp.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/user.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/a.out.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/delay.h> | |
1da177e4 LT |
35 | #include <linux/ptrace.h> |
36 | #include <linux/utsname.h> | |
37 | #include <linux/random.h> | |
95833c83 | 38 | #include <linux/notifier.h> |
c6fd91f0 | 39 | #include <linux/kprobes.h> |
1da177e4 LT |
40 | |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/pgtable.h> | |
43 | #include <asm/system.h> | |
44 | #include <asm/io.h> | |
45 | #include <asm/processor.h> | |
46 | #include <asm/i387.h> | |
47 | #include <asm/mmu_context.h> | |
48 | #include <asm/pda.h> | |
49 | #include <asm/prctl.h> | |
50 | #include <asm/kdebug.h> | |
51 | #include <asm/desc.h> | |
52 | #include <asm/proto.h> | |
53 | #include <asm/ia32.h> | |
95833c83 | 54 | #include <asm/idle.h> |
1da177e4 LT |
55 | |
56 | asmlinkage extern void ret_from_fork(void); | |
57 | ||
58 | unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; | |
59 | ||
1da177e4 LT |
60 | unsigned long boot_option_idle_override = 0; |
61 | EXPORT_SYMBOL(boot_option_idle_override); | |
62 | ||
63 | /* | |
64 | * Powermanagement idle function, if any.. | |
65 | */ | |
66 | void (*pm_idle)(void); | |
67 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); | |
68 | ||
e041c683 | 69 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); |
95833c83 AK |
70 | |
71 | void idle_notifier_register(struct notifier_block *n) | |
72 | { | |
e041c683 | 73 | atomic_notifier_chain_register(&idle_notifier, n); |
95833c83 AK |
74 | } |
75 | EXPORT_SYMBOL_GPL(idle_notifier_register); | |
76 | ||
77 | void idle_notifier_unregister(struct notifier_block *n) | |
78 | { | |
e041c683 | 79 | atomic_notifier_chain_unregister(&idle_notifier, n); |
95833c83 AK |
80 | } |
81 | EXPORT_SYMBOL(idle_notifier_unregister); | |
82 | ||
83 | enum idle_state { CPU_IDLE, CPU_NOT_IDLE }; | |
84 | static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE; | |
85 | ||
86 | void enter_idle(void) | |
87 | { | |
88 | __get_cpu_var(idle_state) = CPU_IDLE; | |
e041c683 | 89 | atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); |
95833c83 AK |
90 | } |
91 | ||
92 | static void __exit_idle(void) | |
93 | { | |
94 | __get_cpu_var(idle_state) = CPU_NOT_IDLE; | |
e041c683 | 95 | atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); |
95833c83 AK |
96 | } |
97 | ||
98 | /* Called from interrupts to signify idle end */ | |
99 | void exit_idle(void) | |
100 | { | |
101 | if (current->pid | read_pda(irqcount)) | |
102 | return; | |
103 | __exit_idle(); | |
104 | } | |
105 | ||
1da177e4 LT |
106 | /* |
107 | * We use this if we don't have any better | |
108 | * idle routine.. | |
109 | */ | |
cdb04527 | 110 | static void default_idle(void) |
1da177e4 | 111 | { |
64c7c8f8 NP |
112 | local_irq_enable(); |
113 | ||
2d52ede9 AK |
114 | clear_thread_flag(TIF_POLLING_NRFLAG); |
115 | smp_mb__after_clear_bit(); | |
116 | while (!need_resched()) { | |
117 | local_irq_disable(); | |
118 | if (!need_resched()) | |
119 | safe_halt(); | |
120 | else | |
121 | local_irq_enable(); | |
1da177e4 | 122 | } |
2d52ede9 | 123 | set_thread_flag(TIF_POLLING_NRFLAG); |
1da177e4 LT |
124 | } |
125 | ||
126 | /* | |
127 | * On SMP it's slightly faster (but much more power-consuming!) | |
128 | * to poll the ->need_resched flag instead of waiting for the | |
129 | * cross-CPU IPI to arrive. Use this option with caution. | |
130 | */ | |
131 | static void poll_idle (void) | |
132 | { | |
1da177e4 LT |
133 | local_irq_enable(); |
134 | ||
64c7c8f8 NP |
135 | asm volatile( |
136 | "2:" | |
137 | "testl %0,%1;" | |
138 | "rep; nop;" | |
139 | "je 2b;" | |
140 | : : | |
141 | "i" (_TIF_NEED_RESCHED), | |
142 | "m" (current_thread_info()->flags)); | |
1da177e4 LT |
143 | } |
144 | ||
145 | void cpu_idle_wait(void) | |
146 | { | |
147 | unsigned int cpu, this_cpu = get_cpu(); | |
148 | cpumask_t map; | |
149 | ||
150 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | |
151 | put_cpu(); | |
152 | ||
153 | cpus_clear(map); | |
154 | for_each_online_cpu(cpu) { | |
155 | per_cpu(cpu_idle_state, cpu) = 1; | |
156 | cpu_set(cpu, map); | |
157 | } | |
158 | ||
159 | __get_cpu_var(cpu_idle_state) = 0; | |
160 | ||
161 | wmb(); | |
162 | do { | |
163 | ssleep(1); | |
164 | for_each_online_cpu(cpu) { | |
a88cde13 AK |
165 | if (cpu_isset(cpu, map) && |
166 | !per_cpu(cpu_idle_state, cpu)) | |
1da177e4 LT |
167 | cpu_clear(cpu, map); |
168 | } | |
169 | cpus_and(map, map, cpu_online_map); | |
170 | } while (!cpus_empty(map)); | |
171 | } | |
172 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | |
173 | ||
76e4f660 AR |
174 | #ifdef CONFIG_HOTPLUG_CPU |
175 | DECLARE_PER_CPU(int, cpu_state); | |
176 | ||
177 | #include <asm/nmi.h> | |
1fa744e6 | 178 | /* We halt the CPU with physical CPU hotplug */ |
76e4f660 AR |
179 | static inline void play_dead(void) |
180 | { | |
181 | idle_task_exit(); | |
182 | wbinvd(); | |
183 | mb(); | |
184 | /* Ack it */ | |
185 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
186 | ||
1fa744e6 | 187 | local_irq_disable(); |
76e4f660 | 188 | while (1) |
1fa744e6 | 189 | halt(); |
76e4f660 AR |
190 | } |
191 | #else | |
192 | static inline void play_dead(void) | |
193 | { | |
194 | BUG(); | |
195 | } | |
196 | #endif /* CONFIG_HOTPLUG_CPU */ | |
197 | ||
1da177e4 LT |
198 | /* |
199 | * The idle thread. There's no useful work to be | |
200 | * done, so just try to conserve power and have a | |
201 | * low exit latency (ie sit in a loop waiting for | |
202 | * somebody to say that they'd like to reschedule) | |
203 | */ | |
204 | void cpu_idle (void) | |
205 | { | |
64c7c8f8 NP |
206 | set_thread_flag(TIF_POLLING_NRFLAG); |
207 | ||
1da177e4 LT |
208 | /* endless idle loop with no priority at all */ |
209 | while (1) { | |
210 | while (!need_resched()) { | |
211 | void (*idle)(void); | |
212 | ||
213 | if (__get_cpu_var(cpu_idle_state)) | |
214 | __get_cpu_var(cpu_idle_state) = 0; | |
215 | ||
216 | rmb(); | |
217 | idle = pm_idle; | |
218 | if (!idle) | |
219 | idle = default_idle; | |
76e4f660 AR |
220 | if (cpu_is_offline(smp_processor_id())) |
221 | play_dead(); | |
95833c83 | 222 | enter_idle(); |
1da177e4 | 223 | idle(); |
95833c83 | 224 | __exit_idle(); |
1da177e4 LT |
225 | } |
226 | ||
5bfb5d69 | 227 | preempt_enable_no_resched(); |
1da177e4 | 228 | schedule(); |
5bfb5d69 | 229 | preempt_disable(); |
1da177e4 LT |
230 | } |
231 | } | |
232 | ||
233 | /* | |
234 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | |
235 | * which can obviate IPI to trigger checking of need_resched. | |
236 | * We execute MONITOR against need_resched and enter optimized wait state | |
237 | * through MWAIT. Whenever someone changes need_resched, we would be woken | |
238 | * up from MWAIT (without an IPI). | |
239 | */ | |
240 | static void mwait_idle(void) | |
241 | { | |
242 | local_irq_enable(); | |
243 | ||
64c7c8f8 NP |
244 | while (!need_resched()) { |
245 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | |
246 | smp_mb(); | |
247 | if (need_resched()) | |
248 | break; | |
249 | __mwait(0, 0); | |
1da177e4 LT |
250 | } |
251 | } | |
252 | ||
e6982c67 | 253 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
1da177e4 LT |
254 | { |
255 | static int printed; | |
256 | if (cpu_has(c, X86_FEATURE_MWAIT)) { | |
257 | /* | |
258 | * Skip, if setup has overridden idle. | |
259 | * One CPU supports mwait => All CPUs supports mwait | |
260 | */ | |
261 | if (!pm_idle) { | |
262 | if (!printed) { | |
263 | printk("using mwait in idle threads.\n"); | |
264 | printed = 1; | |
265 | } | |
266 | pm_idle = mwait_idle; | |
267 | } | |
268 | } | |
269 | } | |
270 | ||
271 | static int __init idle_setup (char *str) | |
272 | { | |
273 | if (!strncmp(str, "poll", 4)) { | |
274 | printk("using polling idle threads.\n"); | |
275 | pm_idle = poll_idle; | |
276 | } | |
277 | ||
278 | boot_option_idle_override = 1; | |
279 | return 1; | |
280 | } | |
281 | ||
282 | __setup("idle=", idle_setup); | |
283 | ||
284 | /* Prints also some state that isn't saved in the pt_regs */ | |
285 | void __show_regs(struct pt_regs * regs) | |
286 | { | |
287 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; | |
288 | unsigned int fsindex,gsindex; | |
289 | unsigned int ds,cs,es; | |
290 | ||
291 | printk("\n"); | |
292 | print_modules(); | |
9acf23c4 AK |
293 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", |
294 | current->pid, current->comm, print_tainted(), | |
295 | system_utsname.release, | |
296 | (int)strcspn(system_utsname.version, " "), | |
297 | system_utsname.version); | |
1da177e4 LT |
298 | printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip); |
299 | printk_address(regs->rip); | |
a88cde13 AK |
300 | printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, |
301 | regs->eflags); | |
1da177e4 LT |
302 | printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", |
303 | regs->rax, regs->rbx, regs->rcx); | |
304 | printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", | |
305 | regs->rdx, regs->rsi, regs->rdi); | |
306 | printk("RBP: %016lx R08: %016lx R09: %016lx\n", | |
307 | regs->rbp, regs->r8, regs->r9); | |
308 | printk("R10: %016lx R11: %016lx R12: %016lx\n", | |
309 | regs->r10, regs->r11, regs->r12); | |
310 | printk("R13: %016lx R14: %016lx R15: %016lx\n", | |
311 | regs->r13, regs->r14, regs->r15); | |
312 | ||
313 | asm("movl %%ds,%0" : "=r" (ds)); | |
314 | asm("movl %%cs,%0" : "=r" (cs)); | |
315 | asm("movl %%es,%0" : "=r" (es)); | |
316 | asm("movl %%fs,%0" : "=r" (fsindex)); | |
317 | asm("movl %%gs,%0" : "=r" (gsindex)); | |
318 | ||
319 | rdmsrl(MSR_FS_BASE, fs); | |
320 | rdmsrl(MSR_GS_BASE, gs); | |
321 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | |
322 | ||
323 | asm("movq %%cr0, %0": "=r" (cr0)); | |
324 | asm("movq %%cr2, %0": "=r" (cr2)); | |
325 | asm("movq %%cr3, %0": "=r" (cr3)); | |
326 | asm("movq %%cr4, %0": "=r" (cr4)); | |
327 | ||
328 | printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", | |
329 | fs,fsindex,gs,gsindex,shadowgs); | |
330 | printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); | |
331 | printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); | |
332 | } | |
333 | ||
334 | void show_regs(struct pt_regs *regs) | |
335 | { | |
c078d326 | 336 | printk("CPU %d:", smp_processor_id()); |
1da177e4 LT |
337 | __show_regs(regs); |
338 | show_trace(®s->rsp); | |
339 | } | |
340 | ||
341 | /* | |
342 | * Free current thread data structures etc.. | |
343 | */ | |
344 | void exit_thread(void) | |
345 | { | |
346 | struct task_struct *me = current; | |
347 | struct thread_struct *t = &me->thread; | |
73649dab | 348 | |
1da177e4 LT |
349 | if (me->thread.io_bitmap_ptr) { |
350 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | |
351 | ||
352 | kfree(t->io_bitmap_ptr); | |
353 | t->io_bitmap_ptr = NULL; | |
354 | /* | |
355 | * Careful, clear this in the TSS too: | |
356 | */ | |
357 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | |
358 | t->io_bitmap_max = 0; | |
359 | put_cpu(); | |
360 | } | |
361 | } | |
362 | ||
363 | void flush_thread(void) | |
364 | { | |
365 | struct task_struct *tsk = current; | |
366 | struct thread_info *t = current_thread_info(); | |
367 | ||
368 | if (t->flags & _TIF_ABI_PENDING) | |
369 | t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); | |
370 | ||
371 | tsk->thread.debugreg0 = 0; | |
372 | tsk->thread.debugreg1 = 0; | |
373 | tsk->thread.debugreg2 = 0; | |
374 | tsk->thread.debugreg3 = 0; | |
375 | tsk->thread.debugreg6 = 0; | |
376 | tsk->thread.debugreg7 = 0; | |
377 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | |
378 | /* | |
379 | * Forget coprocessor state.. | |
380 | */ | |
381 | clear_fpu(tsk); | |
382 | clear_used_math(); | |
383 | } | |
384 | ||
385 | void release_thread(struct task_struct *dead_task) | |
386 | { | |
387 | if (dead_task->mm) { | |
388 | if (dead_task->mm->context.size) { | |
389 | printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", | |
390 | dead_task->comm, | |
391 | dead_task->mm->context.ldt, | |
392 | dead_task->mm->context.size); | |
393 | BUG(); | |
394 | } | |
395 | } | |
396 | } | |
397 | ||
398 | static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr) | |
399 | { | |
400 | struct user_desc ud = { | |
401 | .base_addr = addr, | |
402 | .limit = 0xfffff, | |
403 | .seg_32bit = 1, | |
404 | .limit_in_pages = 1, | |
405 | .useable = 1, | |
406 | }; | |
407 | struct n_desc_struct *desc = (void *)t->thread.tls_array; | |
408 | desc += tls; | |
409 | desc->a = LDT_entry_a(&ud); | |
410 | desc->b = LDT_entry_b(&ud); | |
411 | } | |
412 | ||
413 | static inline u32 read_32bit_tls(struct task_struct *t, int tls) | |
414 | { | |
415 | struct desc_struct *desc = (void *)t->thread.tls_array; | |
416 | desc += tls; | |
417 | return desc->base0 | | |
418 | (((u32)desc->base1) << 16) | | |
419 | (((u32)desc->base2) << 24); | |
420 | } | |
421 | ||
422 | /* | |
423 | * This gets called before we allocate a new thread and copy | |
424 | * the current task into it. | |
425 | */ | |
426 | void prepare_to_copy(struct task_struct *tsk) | |
427 | { | |
428 | unlazy_fpu(tsk); | |
429 | } | |
430 | ||
431 | int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, | |
432 | unsigned long unused, | |
433 | struct task_struct * p, struct pt_regs * regs) | |
434 | { | |
435 | int err; | |
436 | struct pt_regs * childregs; | |
437 | struct task_struct *me = current; | |
438 | ||
a88cde13 | 439 | childregs = ((struct pt_regs *) |
57eafdc2 | 440 | (THREAD_SIZE + task_stack_page(p))) - 1; |
1da177e4 LT |
441 | *childregs = *regs; |
442 | ||
443 | childregs->rax = 0; | |
444 | childregs->rsp = rsp; | |
a88cde13 | 445 | if (rsp == ~0UL) |
1da177e4 | 446 | childregs->rsp = (unsigned long)childregs; |
1da177e4 LT |
447 | |
448 | p->thread.rsp = (unsigned long) childregs; | |
449 | p->thread.rsp0 = (unsigned long) (childregs+1); | |
450 | p->thread.userrsp = me->thread.userrsp; | |
451 | ||
e4f17c43 | 452 | set_tsk_thread_flag(p, TIF_FORK); |
1da177e4 LT |
453 | |
454 | p->thread.fs = me->thread.fs; | |
455 | p->thread.gs = me->thread.gs; | |
456 | ||
fd51f666 L |
457 | asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); |
458 | asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); | |
459 | asm("mov %%es,%0" : "=m" (p->thread.es)); | |
460 | asm("mov %%ds,%0" : "=m" (p->thread.ds)); | |
1da177e4 LT |
461 | |
462 | if (unlikely(me->thread.io_bitmap_ptr != NULL)) { | |
463 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); | |
464 | if (!p->thread.io_bitmap_ptr) { | |
465 | p->thread.io_bitmap_max = 0; | |
466 | return -ENOMEM; | |
467 | } | |
a88cde13 AK |
468 | memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, |
469 | IO_BITMAP_BYTES); | |
1da177e4 LT |
470 | } |
471 | ||
472 | /* | |
473 | * Set a new TLS for the child thread? | |
474 | */ | |
475 | if (clone_flags & CLONE_SETTLS) { | |
476 | #ifdef CONFIG_IA32_EMULATION | |
477 | if (test_thread_flag(TIF_IA32)) | |
478 | err = ia32_child_tls(p, childregs); | |
479 | else | |
480 | #endif | |
481 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); | |
482 | if (err) | |
483 | goto out; | |
484 | } | |
485 | err = 0; | |
486 | out: | |
487 | if (err && p->thread.io_bitmap_ptr) { | |
488 | kfree(p->thread.io_bitmap_ptr); | |
489 | p->thread.io_bitmap_max = 0; | |
490 | } | |
491 | return err; | |
492 | } | |
493 | ||
494 | /* | |
495 | * This special macro can be used to load a debugging register | |
496 | */ | |
2b514e74 | 497 | #define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r) |
1da177e4 LT |
498 | |
499 | /* | |
500 | * switch_to(x,y) should switch tasks from x to y. | |
501 | * | |
502 | * This could still be optimized: | |
503 | * - fold all the options into a flag word and test it with a single test. | |
504 | * - could test fs/gs bitsliced | |
099f318b AK |
505 | * |
506 | * Kprobes not supported here. Set the probe on schedule instead. | |
1da177e4 | 507 | */ |
099f318b | 508 | __kprobes struct task_struct * |
a88cde13 | 509 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
1da177e4 LT |
510 | { |
511 | struct thread_struct *prev = &prev_p->thread, | |
512 | *next = &next_p->thread; | |
513 | int cpu = smp_processor_id(); | |
514 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
515 | ||
1da177e4 LT |
516 | /* |
517 | * Reload esp0, LDT and the page table pointer: | |
518 | */ | |
519 | tss->rsp0 = next->rsp0; | |
520 | ||
521 | /* | |
522 | * Switch DS and ES. | |
523 | * This won't pick up thread selector changes, but I guess that is ok. | |
524 | */ | |
fd51f666 | 525 | asm volatile("mov %%es,%0" : "=m" (prev->es)); |
1da177e4 LT |
526 | if (unlikely(next->es | prev->es)) |
527 | loadsegment(es, next->es); | |
528 | ||
fd51f666 | 529 | asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); |
1da177e4 LT |
530 | if (unlikely(next->ds | prev->ds)) |
531 | loadsegment(ds, next->ds); | |
532 | ||
533 | load_TLS(next, cpu); | |
534 | ||
535 | /* | |
536 | * Switch FS and GS. | |
537 | */ | |
538 | { | |
539 | unsigned fsindex; | |
540 | asm volatile("movl %%fs,%0" : "=r" (fsindex)); | |
541 | /* segment register != 0 always requires a reload. | |
542 | also reload when it has changed. | |
543 | when prev process used 64bit base always reload | |
544 | to avoid an information leak. */ | |
545 | if (unlikely(fsindex | next->fsindex | prev->fs)) { | |
546 | loadsegment(fs, next->fsindex); | |
547 | /* check if the user used a selector != 0 | |
548 | * if yes clear 64bit base, since overloaded base | |
549 | * is always mapped to the Null selector | |
550 | */ | |
551 | if (fsindex) | |
552 | prev->fs = 0; | |
553 | } | |
554 | /* when next process has a 64bit base use it */ | |
555 | if (next->fs) | |
556 | wrmsrl(MSR_FS_BASE, next->fs); | |
557 | prev->fsindex = fsindex; | |
558 | } | |
559 | { | |
560 | unsigned gsindex; | |
561 | asm volatile("movl %%gs,%0" : "=r" (gsindex)); | |
562 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | |
563 | load_gs_index(next->gsindex); | |
564 | if (gsindex) | |
565 | prev->gs = 0; | |
566 | } | |
567 | if (next->gs) | |
568 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | |
569 | prev->gsindex = gsindex; | |
570 | } | |
571 | ||
572 | /* | |
45948d77 | 573 | * Switch the PDA and FPU contexts. |
1da177e4 LT |
574 | */ |
575 | prev->userrsp = read_pda(oldrsp); | |
576 | write_pda(oldrsp, next->userrsp); | |
577 | write_pda(pcurrent, next_p); | |
45948d77 JB |
578 | /* This must be here to ensure both math_state_restore() and |
579 | kernel_fpu_begin() work consistently. */ | |
580 | unlazy_fpu(prev_p); | |
a88cde13 | 581 | write_pda(kernelstack, |
57eafdc2 | 582 | task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); |
1da177e4 LT |
583 | |
584 | /* | |
585 | * Now maybe reload the debug registers | |
586 | */ | |
587 | if (unlikely(next->debugreg7)) { | |
588 | loaddebug(next, 0); | |
589 | loaddebug(next, 1); | |
590 | loaddebug(next, 2); | |
591 | loaddebug(next, 3); | |
592 | /* no 4 and 5 */ | |
593 | loaddebug(next, 6); | |
594 | loaddebug(next, 7); | |
595 | } | |
596 | ||
597 | ||
598 | /* | |
599 | * Handle the IO bitmap | |
600 | */ | |
601 | if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) { | |
602 | if (next->io_bitmap_ptr) | |
603 | /* | |
604 | * Copy the relevant range of the IO bitmap. | |
605 | * Normally this is 128 bytes or less: | |
606 | */ | |
607 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, | |
608 | max(prev->io_bitmap_max, next->io_bitmap_max)); | |
609 | else { | |
610 | /* | |
611 | * Clear any possible leftover bits: | |
612 | */ | |
613 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | |
614 | } | |
615 | } | |
616 | ||
617 | return prev_p; | |
618 | } | |
619 | ||
620 | /* | |
621 | * sys_execve() executes a new program. | |
622 | */ | |
623 | asmlinkage | |
624 | long sys_execve(char __user *name, char __user * __user *argv, | |
625 | char __user * __user *envp, struct pt_regs regs) | |
626 | { | |
627 | long error; | |
628 | char * filename; | |
629 | ||
630 | filename = getname(name); | |
631 | error = PTR_ERR(filename); | |
632 | if (IS_ERR(filename)) | |
633 | return error; | |
634 | error = do_execve(filename, argv, envp, ®s); | |
635 | if (error == 0) { | |
636 | task_lock(current); | |
637 | current->ptrace &= ~PT_DTRACE; | |
638 | task_unlock(current); | |
639 | } | |
640 | putname(filename); | |
641 | return error; | |
642 | } | |
643 | ||
644 | void set_personality_64bit(void) | |
645 | { | |
646 | /* inherit personality from parent */ | |
647 | ||
648 | /* Make sure to be in 64bit mode */ | |
649 | clear_thread_flag(TIF_IA32); | |
650 | ||
651 | /* TBD: overwrites user setup. Should have two bits. | |
652 | But 64bit processes have always behaved this way, | |
653 | so it's not too bad. The main problem is just that | |
654 | 32bit childs are affected again. */ | |
655 | current->personality &= ~READ_IMPLIES_EXEC; | |
656 | } | |
657 | ||
658 | asmlinkage long sys_fork(struct pt_regs *regs) | |
659 | { | |
660 | return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL); | |
661 | } | |
662 | ||
a88cde13 AK |
663 | asmlinkage long |
664 | sys_clone(unsigned long clone_flags, unsigned long newsp, | |
665 | void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) | |
1da177e4 LT |
666 | { |
667 | if (!newsp) | |
668 | newsp = regs->rsp; | |
669 | return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); | |
670 | } | |
671 | ||
672 | /* | |
673 | * This is trivial, and on the face of it looks like it | |
674 | * could equally well be done in user mode. | |
675 | * | |
676 | * Not so, for quite unobvious reasons - register pressure. | |
677 | * In user mode vfork() cannot have a stack frame, and if | |
678 | * done by calling the "clone()" system call directly, you | |
679 | * do not have enough call-clobbered registers to hold all | |
680 | * the information you need. | |
681 | */ | |
682 | asmlinkage long sys_vfork(struct pt_regs *regs) | |
683 | { | |
684 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0, | |
685 | NULL, NULL); | |
686 | } | |
687 | ||
688 | unsigned long get_wchan(struct task_struct *p) | |
689 | { | |
690 | unsigned long stack; | |
691 | u64 fp,rip; | |
692 | int count = 0; | |
693 | ||
694 | if (!p || p == current || p->state==TASK_RUNNING) | |
695 | return 0; | |
57eafdc2 | 696 | stack = (unsigned long)task_stack_page(p); |
1da177e4 LT |
697 | if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE) |
698 | return 0; | |
699 | fp = *(u64 *)(p->thread.rsp); | |
700 | do { | |
a88cde13 AK |
701 | if (fp < (unsigned long)stack || |
702 | fp > (unsigned long)stack+THREAD_SIZE) | |
1da177e4 LT |
703 | return 0; |
704 | rip = *(u64 *)(fp+8); | |
705 | if (!in_sched_functions(rip)) | |
706 | return rip; | |
707 | fp = *(u64 *)fp; | |
708 | } while (count++ < 16); | |
709 | return 0; | |
710 | } | |
711 | ||
712 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |
713 | { | |
714 | int ret = 0; | |
715 | int doit = task == current; | |
716 | int cpu; | |
717 | ||
718 | switch (code) { | |
719 | case ARCH_SET_GS: | |
84929801 | 720 | if (addr >= TASK_SIZE_OF(task)) |
1da177e4 LT |
721 | return -EPERM; |
722 | cpu = get_cpu(); | |
723 | /* handle small bases via the GDT because that's faster to | |
724 | switch. */ | |
725 | if (addr <= 0xffffffff) { | |
726 | set_32bit_tls(task, GS_TLS, addr); | |
727 | if (doit) { | |
728 | load_TLS(&task->thread, cpu); | |
729 | load_gs_index(GS_TLS_SEL); | |
730 | } | |
731 | task->thread.gsindex = GS_TLS_SEL; | |
732 | task->thread.gs = 0; | |
733 | } else { | |
734 | task->thread.gsindex = 0; | |
735 | task->thread.gs = addr; | |
736 | if (doit) { | |
a88cde13 AK |
737 | load_gs_index(0); |
738 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); | |
1da177e4 LT |
739 | } |
740 | } | |
741 | put_cpu(); | |
742 | break; | |
743 | case ARCH_SET_FS: | |
744 | /* Not strictly needed for fs, but do it for symmetry | |
745 | with gs */ | |
84929801 | 746 | if (addr >= TASK_SIZE_OF(task)) |
1da177e4 LT |
747 | return -EPERM; |
748 | cpu = get_cpu(); | |
749 | /* handle small bases via the GDT because that's faster to | |
750 | switch. */ | |
751 | if (addr <= 0xffffffff) { | |
752 | set_32bit_tls(task, FS_TLS, addr); | |
753 | if (doit) { | |
754 | load_TLS(&task->thread, cpu); | |
a88cde13 | 755 | asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL)); |
1da177e4 LT |
756 | } |
757 | task->thread.fsindex = FS_TLS_SEL; | |
758 | task->thread.fs = 0; | |
759 | } else { | |
760 | task->thread.fsindex = 0; | |
761 | task->thread.fs = addr; | |
762 | if (doit) { | |
763 | /* set the selector to 0 to not confuse | |
764 | __switch_to */ | |
a88cde13 AK |
765 | asm volatile("movl %0,%%fs" :: "r" (0)); |
766 | ret = checking_wrmsrl(MSR_FS_BASE, addr); | |
1da177e4 LT |
767 | } |
768 | } | |
769 | put_cpu(); | |
770 | break; | |
771 | case ARCH_GET_FS: { | |
772 | unsigned long base; | |
773 | if (task->thread.fsindex == FS_TLS_SEL) | |
774 | base = read_32bit_tls(task, FS_TLS); | |
a88cde13 | 775 | else if (doit) |
1da177e4 | 776 | rdmsrl(MSR_FS_BASE, base); |
a88cde13 | 777 | else |
1da177e4 LT |
778 | base = task->thread.fs; |
779 | ret = put_user(base, (unsigned long __user *)addr); | |
780 | break; | |
781 | } | |
782 | case ARCH_GET_GS: { | |
783 | unsigned long base; | |
784 | if (task->thread.gsindex == GS_TLS_SEL) | |
785 | base = read_32bit_tls(task, GS_TLS); | |
a88cde13 | 786 | else if (doit) |
1da177e4 | 787 | rdmsrl(MSR_KERNEL_GS_BASE, base); |
a88cde13 | 788 | else |
1da177e4 LT |
789 | base = task->thread.gs; |
790 | ret = put_user(base, (unsigned long __user *)addr); | |
791 | break; | |
792 | } | |
793 | ||
794 | default: | |
795 | ret = -EINVAL; | |
796 | break; | |
797 | } | |
798 | ||
799 | return ret; | |
800 | } | |
801 | ||
802 | long sys_arch_prctl(int code, unsigned long addr) | |
803 | { | |
804 | return do_arch_prctl(current, code, addr); | |
805 | } | |
806 | ||
807 | /* | |
808 | * Capture the user space registers if the task is not running (in user space) | |
809 | */ | |
810 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | |
811 | { | |
812 | struct pt_regs *pp, ptregs; | |
813 | ||
bb049232 | 814 | pp = task_pt_regs(tsk); |
1da177e4 LT |
815 | |
816 | ptregs = *pp; | |
817 | ptregs.cs &= 0xffff; | |
818 | ptregs.ss &= 0xffff; | |
819 | ||
820 | elf_core_copy_regs(regs, &ptregs); | |
821 | ||
822 | return 1; | |
823 | } | |
824 | ||
825 | unsigned long arch_align_stack(unsigned long sp) | |
826 | { | |
827 | if (randomize_va_space) | |
828 | sp -= get_random_int() % 8192; | |
829 | return sp & ~0xf; | |
830 | } |