Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1995 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
6 | */ | |
7 | ||
8 | /* | |
9 | * This file handles the architecture-dependent parts of process handling.. | |
10 | */ | |
11 | ||
12 | #include <stdarg.h> | |
13 | ||
f3705136 | 14 | #include <linux/cpu.h> |
1da177e4 LT |
15 | #include <linux/errno.h> |
16 | #include <linux/sched.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/elfcore.h> | |
21 | #include <linux/smp.h> | |
1da177e4 LT |
22 | #include <linux/stddef.h> |
23 | #include <linux/slab.h> | |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/user.h> | |
26 | #include <linux/a.out.h> | |
27 | #include <linux/interrupt.h> | |
1da177e4 LT |
28 | #include <linux/utsname.h> |
29 | #include <linux/delay.h> | |
30 | #include <linux/reboot.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/mc146818rtc.h> | |
33 | #include <linux/module.h> | |
34 | #include <linux/kallsyms.h> | |
35 | #include <linux/ptrace.h> | |
36 | #include <linux/random.h> | |
c16b63e0 | 37 | #include <linux/personality.h> |
74167347 | 38 | #include <linux/tick.h> |
7c3576d2 | 39 | #include <linux/percpu.h> |
1da177e4 LT |
40 | |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/pgtable.h> | |
43 | #include <asm/system.h> | |
44 | #include <asm/io.h> | |
45 | #include <asm/ldt.h> | |
46 | #include <asm/processor.h> | |
47 | #include <asm/i387.h> | |
1da177e4 | 48 | #include <asm/desc.h> |
64ca9004 | 49 | #include <asm/vm86.h> |
1da177e4 LT |
50 | #ifdef CONFIG_MATH_EMULATION |
51 | #include <asm/math_emu.h> | |
52 | #endif | |
53 | ||
1da177e4 LT |
54 | #include <linux/err.h> |
55 | ||
f3705136 ZM |
56 | #include <asm/tlbflush.h> |
57 | #include <asm/cpu.h> | |
718fc13b | 58 | #include <asm/kdebug.h> |
f3705136 | 59 | |
1da177e4 LT |
60 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
61 | ||
62 | static int hlt_counter; | |
63 | ||
64 | unsigned long boot_option_idle_override = 0; | |
65 | EXPORT_SYMBOL(boot_option_idle_override); | |
66 | ||
7c3576d2 JF |
67 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
68 | EXPORT_PER_CPU_SYMBOL(current_task); | |
69 | ||
70 | DEFINE_PER_CPU(int, cpu_number); | |
71 | EXPORT_PER_CPU_SYMBOL(cpu_number); | |
72 | ||
1da177e4 LT |
73 | /* |
74 | * Return saved PC of a blocked thread. | |
75 | */ | |
76 | unsigned long thread_saved_pc(struct task_struct *tsk) | |
77 | { | |
78 | return ((unsigned long *)tsk->thread.esp)[3]; | |
79 | } | |
80 | ||
81 | /* | |
82 | * Powermanagement idle function, if any.. | |
83 | */ | |
84 | void (*pm_idle)(void); | |
129f6946 | 85 | EXPORT_SYMBOL(pm_idle); |
1da177e4 LT |
86 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); |
87 | ||
88 | void disable_hlt(void) | |
89 | { | |
90 | hlt_counter++; | |
91 | } | |
92 | ||
93 | EXPORT_SYMBOL(disable_hlt); | |
94 | ||
95 | void enable_hlt(void) | |
96 | { | |
97 | hlt_counter--; | |
98 | } | |
99 | ||
100 | EXPORT_SYMBOL(enable_hlt); | |
101 | ||
102 | /* | |
103 | * We use this if we don't have any better | |
104 | * idle routine.. | |
105 | */ | |
106 | void default_idle(void) | |
107 | { | |
108 | if (!hlt_counter && boot_cpu_data.hlt_works_ok) { | |
495ab9c0 | 109 | current_thread_info()->status &= ~TS_POLLING; |
0888f06a IM |
110 | /* |
111 | * TS_POLLING-cleared state must be visible before we | |
112 | * test NEED_RESCHED: | |
113 | */ | |
114 | smp_mb(); | |
115 | ||
72690a21 | 116 | local_irq_disable(); |
5ee613b6 IM |
117 | if (!need_resched()) { |
118 | ktime_t t0, t1; | |
119 | u64 t0n, t1n; | |
120 | ||
121 | t0 = ktime_get(); | |
122 | t0n = ktime_to_ns(t0); | |
72690a21 | 123 | safe_halt(); /* enables interrupts racelessly */ |
5ee613b6 IM |
124 | local_irq_disable(); |
125 | t1 = ktime_get(); | |
126 | t1n = ktime_to_ns(t1); | |
127 | sched_clock_idle_wakeup_event(t1n - t0n); | |
128 | } | |
129 | local_irq_enable(); | |
495ab9c0 | 130 | current_thread_info()->status |= TS_POLLING; |
1da177e4 | 131 | } else { |
72690a21 AK |
132 | /* loop is done by the caller */ |
133 | cpu_relax(); | |
1da177e4 LT |
134 | } |
135 | } | |
129f6946 AD |
136 | #ifdef CONFIG_APM_MODULE |
137 | EXPORT_SYMBOL(default_idle); | |
138 | #endif | |
1da177e4 LT |
139 | |
140 | /* | |
141 | * On SMP it's slightly faster (but much more power-consuming!) | |
142 | * to poll the ->work.need_resched flag instead of waiting for the | |
143 | * cross-CPU IPI to arrive. Use this option with caution. | |
144 | */ | |
145 | static void poll_idle (void) | |
146 | { | |
72690a21 | 147 | cpu_relax(); |
1da177e4 LT |
148 | } |
149 | ||
f3705136 ZM |
150 | #ifdef CONFIG_HOTPLUG_CPU |
151 | #include <asm/nmi.h> | |
152 | /* We don't actually take CPU down, just spin without interrupts. */ | |
153 | static inline void play_dead(void) | |
154 | { | |
e1367daf LS |
155 | /* This must be done before dead CPU ack */ |
156 | cpu_exit_clear(); | |
157 | wbinvd(); | |
158 | mb(); | |
f3705136 ZM |
159 | /* Ack it */ |
160 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
161 | ||
e1367daf LS |
162 | /* |
163 | * With physical CPU hotplug, we should halt the cpu | |
164 | */ | |
f3705136 | 165 | local_irq_disable(); |
e1367daf | 166 | while (1) |
f2ab4461 | 167 | halt(); |
f3705136 ZM |
168 | } |
169 | #else | |
170 | static inline void play_dead(void) | |
171 | { | |
172 | BUG(); | |
173 | } | |
174 | #endif /* CONFIG_HOTPLUG_CPU */ | |
175 | ||
1da177e4 LT |
176 | /* |
177 | * The idle thread. There's no useful work to be | |
178 | * done, so just try to conserve power and have a | |
179 | * low exit latency (ie sit in a loop waiting for | |
180 | * somebody to say that they'd like to reschedule) | |
181 | */ | |
f3705136 | 182 | void cpu_idle(void) |
1da177e4 | 183 | { |
5bfb5d69 | 184 | int cpu = smp_processor_id(); |
f3705136 | 185 | |
495ab9c0 | 186 | current_thread_info()->status |= TS_POLLING; |
64c7c8f8 | 187 | |
1da177e4 LT |
188 | /* endless idle loop with no priority at all */ |
189 | while (1) { | |
74167347 | 190 | tick_nohz_stop_sched_tick(); |
1da177e4 LT |
191 | while (!need_resched()) { |
192 | void (*idle)(void); | |
193 | ||
194 | if (__get_cpu_var(cpu_idle_state)) | |
195 | __get_cpu_var(cpu_idle_state) = 0; | |
196 | ||
f1d1a842 | 197 | check_pgt_cache(); |
1da177e4 LT |
198 | rmb(); |
199 | idle = pm_idle; | |
200 | ||
201 | if (!idle) | |
202 | idle = default_idle; | |
203 | ||
f3705136 ZM |
204 | if (cpu_is_offline(cpu)) |
205 | play_dead(); | |
206 | ||
1da177e4 LT |
207 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
208 | idle(); | |
209 | } | |
74167347 | 210 | tick_nohz_restart_sched_tick(); |
5bfb5d69 | 211 | preempt_enable_no_resched(); |
1da177e4 | 212 | schedule(); |
5bfb5d69 | 213 | preempt_disable(); |
1da177e4 LT |
214 | } |
215 | } | |
216 | ||
40d6a146 SR |
217 | static void do_nothing(void *unused) |
218 | { | |
219 | } | |
220 | ||
1da177e4 LT |
221 | void cpu_idle_wait(void) |
222 | { | |
223 | unsigned int cpu, this_cpu = get_cpu(); | |
dc1829a4 | 224 | cpumask_t map, tmp = current->cpus_allowed; |
1da177e4 LT |
225 | |
226 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | |
227 | put_cpu(); | |
228 | ||
229 | cpus_clear(map); | |
230 | for_each_online_cpu(cpu) { | |
231 | per_cpu(cpu_idle_state, cpu) = 1; | |
232 | cpu_set(cpu, map); | |
233 | } | |
234 | ||
235 | __get_cpu_var(cpu_idle_state) = 0; | |
236 | ||
237 | wmb(); | |
238 | do { | |
239 | ssleep(1); | |
240 | for_each_online_cpu(cpu) { | |
241 | if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) | |
242 | cpu_clear(cpu, map); | |
243 | } | |
244 | cpus_and(map, map, cpu_online_map); | |
40d6a146 SR |
245 | /* |
246 | * We waited 1 sec, if a CPU still did not call idle | |
247 | * it may be because it is in idle and not waking up | |
248 | * because it has nothing to do. | |
249 | * Give all the remaining CPUS a kick. | |
250 | */ | |
251 | smp_call_function_mask(map, do_nothing, 0, 0); | |
1da177e4 | 252 | } while (!cpus_empty(map)); |
dc1829a4 IM |
253 | |
254 | set_cpus_allowed(current, tmp); | |
1da177e4 LT |
255 | } |
256 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | |
257 | ||
258 | /* | |
259 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | |
260 | * which can obviate IPI to trigger checking of need_resched. | |
261 | * We execute MONITOR against need_resched and enter optimized wait state | |
262 | * through MWAIT. Whenever someone changes need_resched, we would be woken | |
263 | * up from MWAIT (without an IPI). | |
991528d7 VP |
264 | * |
265 | * New with Core Duo processors, MWAIT can take some hints based on CPU | |
266 | * capability. | |
1da177e4 | 267 | */ |
991528d7 | 268 | void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) |
1da177e4 | 269 | { |
991528d7 | 270 | if (!need_resched()) { |
64c7c8f8 NP |
271 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
272 | smp_mb(); | |
991528d7 | 273 | if (!need_resched()) |
ea3d5226 | 274 | __mwait(eax, ecx); |
1da177e4 LT |
275 | } |
276 | } | |
277 | ||
991528d7 VP |
278 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ |
279 | static void mwait_idle(void) | |
280 | { | |
281 | local_irq_enable(); | |
72690a21 | 282 | mwait_idle_with_hints(0, 0); |
991528d7 VP |
283 | } |
284 | ||
3446fa05 | 285 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
1da177e4 LT |
286 | { |
287 | if (cpu_has(c, X86_FEATURE_MWAIT)) { | |
288 | printk("monitor/mwait feature present.\n"); | |
289 | /* | |
290 | * Skip, if setup has overridden idle. | |
291 | * One CPU supports mwait => All CPUs supports mwait | |
292 | */ | |
293 | if (!pm_idle) { | |
294 | printk("using mwait in idle threads.\n"); | |
295 | pm_idle = mwait_idle; | |
296 | } | |
297 | } | |
298 | } | |
299 | ||
f039b754 | 300 | static int __init idle_setup(char *str) |
1da177e4 | 301 | { |
f039b754 | 302 | if (!strcmp(str, "poll")) { |
1da177e4 LT |
303 | printk("using polling idle threads.\n"); |
304 | pm_idle = poll_idle; | |
305 | #ifdef CONFIG_X86_SMP | |
306 | if (smp_num_siblings > 1) | |
307 | printk("WARNING: polling idle and HT enabled, performance may degrade.\n"); | |
308 | #endif | |
f039b754 AK |
309 | } else if (!strcmp(str, "mwait")) |
310 | force_mwait = 1; | |
311 | else | |
312 | return -1; | |
1da177e4 LT |
313 | |
314 | boot_option_idle_override = 1; | |
f039b754 | 315 | return 0; |
1da177e4 | 316 | } |
f039b754 | 317 | early_param("idle", idle_setup); |
1da177e4 | 318 | |
9d975ebd | 319 | void __show_registers(struct pt_regs *regs, int all) |
1da177e4 LT |
320 | { |
321 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | |
bb1995d5 | 322 | unsigned long d0, d1, d2, d3, d6, d7; |
9d975ebd PE |
323 | unsigned long esp; |
324 | unsigned short ss, gs; | |
325 | ||
326 | if (user_mode_vm(regs)) { | |
327 | esp = regs->esp; | |
328 | ss = regs->xss & 0xffff; | |
329 | savesegment(gs, gs); | |
330 | } else { | |
331 | esp = (unsigned long) (®s->esp); | |
332 | savesegment(ss, ss); | |
333 | savesegment(gs, gs); | |
334 | } | |
1da177e4 LT |
335 | |
336 | printk("\n"); | |
60812a4a LT |
337 | printk("Pid: %d, comm: %s %s (%s %.*s)\n", |
338 | task_pid_nr(current), current->comm, | |
9d975ebd PE |
339 | print_tainted(), init_utsname()->release, |
340 | (int)strcspn(init_utsname()->version, " "), | |
341 | init_utsname()->version); | |
342 | ||
343 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", | |
344 | 0xffff & regs->xcs, regs->eip, regs->eflags, | |
345 | smp_processor_id()); | |
1da177e4 LT |
346 | print_symbol("EIP is at %s\n", regs->eip); |
347 | ||
1da177e4 | 348 | printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
9d975ebd PE |
349 | regs->eax, regs->ebx, regs->ecx, regs->edx); |
350 | printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", | |
351 | regs->esi, regs->edi, regs->ebp, esp); | |
352 | printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", | |
353 | regs->xds & 0xffff, regs->xes & 0xffff, | |
354 | regs->xfs & 0xffff, gs, ss); | |
355 | ||
356 | if (!all) | |
357 | return; | |
1da177e4 | 358 | |
4bb0d3ec ZA |
359 | cr0 = read_cr0(); |
360 | cr2 = read_cr2(); | |
361 | cr3 = read_cr3(); | |
ff6e8c0d | 362 | cr4 = read_cr4_safe(); |
9d975ebd PE |
363 | printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", |
364 | cr0, cr2, cr3, cr4); | |
bb1995d5 AS |
365 | |
366 | get_debugreg(d0, 0); | |
367 | get_debugreg(d1, 1); | |
368 | get_debugreg(d2, 2); | |
369 | get_debugreg(d3, 3); | |
370 | printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", | |
371 | d0, d1, d2, d3); | |
9d975ebd | 372 | |
bb1995d5 AS |
373 | get_debugreg(d6, 6); |
374 | get_debugreg(d7, 7); | |
9d975ebd PE |
375 | printk("DR6: %08lx DR7: %08lx\n", |
376 | d6, d7); | |
377 | } | |
bb1995d5 | 378 | |
9d975ebd PE |
379 | void show_regs(struct pt_regs *regs) |
380 | { | |
381 | __show_registers(regs, 1); | |
176a2718 | 382 | show_trace(NULL, regs, ®s->esp); |
1da177e4 LT |
383 | } |
384 | ||
385 | /* | |
386 | * This gets run with %ebx containing the | |
387 | * function to call, and %edx containing | |
388 | * the "args". | |
389 | */ | |
390 | extern void kernel_thread_helper(void); | |
1da177e4 LT |
391 | |
392 | /* | |
393 | * Create a kernel thread | |
394 | */ | |
395 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
396 | { | |
397 | struct pt_regs regs; | |
398 | ||
399 | memset(®s, 0, sizeof(regs)); | |
400 | ||
401 | regs.ebx = (unsigned long) fn; | |
402 | regs.edx = (unsigned long) arg; | |
403 | ||
404 | regs.xds = __USER_DS; | |
405 | regs.xes = __USER_DS; | |
7c3576d2 | 406 | regs.xfs = __KERNEL_PERCPU; |
1da177e4 LT |
407 | regs.orig_eax = -1; |
408 | regs.eip = (unsigned long) kernel_thread_helper; | |
78be3706 | 409 | regs.xcs = __KERNEL_CS | get_kernel_rpl(); |
1da177e4 LT |
410 | regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; |
411 | ||
412 | /* Ok, create the new process.. */ | |
8cf2c519 | 413 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); |
1da177e4 | 414 | } |
129f6946 | 415 | EXPORT_SYMBOL(kernel_thread); |
1da177e4 LT |
416 | |
417 | /* | |
418 | * Free current thread data structures etc.. | |
419 | */ | |
420 | void exit_thread(void) | |
421 | { | |
1da177e4 | 422 | /* The process may have allocated an io port bitmap... nuke it. */ |
b3cf2576 SE |
423 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { |
424 | struct task_struct *tsk = current; | |
425 | struct thread_struct *t = &tsk->thread; | |
1da177e4 LT |
426 | int cpu = get_cpu(); |
427 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
428 | ||
429 | kfree(t->io_bitmap_ptr); | |
430 | t->io_bitmap_ptr = NULL; | |
b3cf2576 | 431 | clear_thread_flag(TIF_IO_BITMAP); |
1da177e4 LT |
432 | /* |
433 | * Careful, clear this in the TSS too: | |
434 | */ | |
435 | memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); | |
436 | t->io_bitmap_max = 0; | |
437 | tss->io_bitmap_owner = NULL; | |
438 | tss->io_bitmap_max = 0; | |
a75c54f9 | 439 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
440 | put_cpu(); |
441 | } | |
442 | } | |
443 | ||
444 | void flush_thread(void) | |
445 | { | |
446 | struct task_struct *tsk = current; | |
447 | ||
448 | memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); | |
449 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | |
b3cf2576 | 450 | clear_tsk_thread_flag(tsk, TIF_DEBUG); |
1da177e4 LT |
451 | /* |
452 | * Forget coprocessor state.. | |
453 | */ | |
454 | clear_fpu(tsk); | |
455 | clear_used_math(); | |
456 | } | |
457 | ||
458 | void release_thread(struct task_struct *dead_task) | |
459 | { | |
2684927c | 460 | BUG_ON(dead_task->mm); |
1da177e4 LT |
461 | release_vm86_irqs(dead_task); |
462 | } | |
463 | ||
464 | /* | |
465 | * This gets called before we allocate a new thread and copy | |
466 | * the current task into it. | |
467 | */ | |
468 | void prepare_to_copy(struct task_struct *tsk) | |
469 | { | |
470 | unlazy_fpu(tsk); | |
471 | } | |
472 | ||
473 | int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, | |
474 | unsigned long unused, | |
475 | struct task_struct * p, struct pt_regs * regs) | |
476 | { | |
477 | struct pt_regs * childregs; | |
478 | struct task_struct *tsk; | |
479 | int err; | |
480 | ||
07b047fc | 481 | childregs = task_pt_regs(p); |
f48d9663 AN |
482 | *childregs = *regs; |
483 | childregs->eax = 0; | |
484 | childregs->esp = esp; | |
485 | ||
486 | p->thread.esp = (unsigned long) childregs; | |
487 | p->thread.esp0 = (unsigned long) (childregs+1); | |
1da177e4 LT |
488 | |
489 | p->thread.eip = (unsigned long) ret_from_fork; | |
490 | ||
464d1a78 | 491 | savesegment(gs,p->thread.gs); |
1da177e4 LT |
492 | |
493 | tsk = current; | |
b3cf2576 | 494 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
52978be6 AD |
495 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
496 | IO_BITMAP_BYTES, GFP_KERNEL); | |
1da177e4 LT |
497 | if (!p->thread.io_bitmap_ptr) { |
498 | p->thread.io_bitmap_max = 0; | |
499 | return -ENOMEM; | |
500 | } | |
b3cf2576 | 501 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
1da177e4 LT |
502 | } |
503 | ||
efd1ca52 RM |
504 | err = 0; |
505 | ||
1da177e4 LT |
506 | /* |
507 | * Set a new TLS for the child thread? | |
508 | */ | |
efd1ca52 RM |
509 | if (clone_flags & CLONE_SETTLS) |
510 | err = do_set_thread_area(p, -1, | |
511 | (struct user_desc __user *)childregs->esi, 0); | |
1da177e4 | 512 | |
1da177e4 LT |
513 | if (err && p->thread.io_bitmap_ptr) { |
514 | kfree(p->thread.io_bitmap_ptr); | |
515 | p->thread.io_bitmap_max = 0; | |
516 | } | |
517 | return err; | |
518 | } | |
519 | ||
520 | /* | |
521 | * fill in the user structure for a core dump.. | |
522 | */ | |
523 | void dump_thread(struct pt_regs * regs, struct user * dump) | |
524 | { | |
525 | int i; | |
526 | ||
527 | /* changed the size calculations - should hopefully work better. lbt */ | |
528 | dump->magic = CMAGIC; | |
529 | dump->start_code = 0; | |
530 | dump->start_stack = regs->esp & ~(PAGE_SIZE - 1); | |
531 | dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; | |
532 | dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; | |
533 | dump->u_dsize -= dump->u_tsize; | |
534 | dump->u_ssize = 0; | |
535 | for (i = 0; i < 8; i++) | |
536 | dump->u_debugreg[i] = current->thread.debugreg[i]; | |
537 | ||
538 | if (dump->start_stack < TASK_SIZE) | |
539 | dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; | |
540 | ||
541 | dump->regs.ebx = regs->ebx; | |
542 | dump->regs.ecx = regs->ecx; | |
543 | dump->regs.edx = regs->edx; | |
544 | dump->regs.esi = regs->esi; | |
545 | dump->regs.edi = regs->edi; | |
546 | dump->regs.ebp = regs->ebp; | |
547 | dump->regs.eax = regs->eax; | |
548 | dump->regs.ds = regs->xds; | |
549 | dump->regs.es = regs->xes; | |
464d1a78 JF |
550 | dump->regs.fs = regs->xfs; |
551 | savesegment(gs,dump->regs.gs); | |
1da177e4 LT |
552 | dump->regs.orig_eax = regs->orig_eax; |
553 | dump->regs.eip = regs->eip; | |
554 | dump->regs.cs = regs->xcs; | |
555 | dump->regs.eflags = regs->eflags; | |
556 | dump->regs.esp = regs->esp; | |
557 | dump->regs.ss = regs->xss; | |
558 | ||
559 | dump->u_fpvalid = dump_fpu (regs, &dump->i387); | |
560 | } | |
129f6946 | 561 | EXPORT_SYMBOL(dump_thread); |
1da177e4 LT |
562 | |
563 | /* | |
564 | * Capture the user space registers if the task is not running (in user space) | |
565 | */ | |
566 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | |
567 | { | |
07b047fc | 568 | struct pt_regs ptregs = *task_pt_regs(tsk); |
1da177e4 LT |
569 | ptregs.xcs &= 0xffff; |
570 | ptregs.xds &= 0xffff; | |
571 | ptregs.xes &= 0xffff; | |
572 | ptregs.xss &= 0xffff; | |
573 | ||
574 | elf_core_copy_regs(regs, &ptregs); | |
575 | ||
576 | return 1; | |
577 | } | |
578 | ||
cf99abac AA |
579 | #ifdef CONFIG_SECCOMP |
580 | void hard_disable_TSC(void) | |
581 | { | |
582 | write_cr4(read_cr4() | X86_CR4_TSD); | |
583 | } | |
584 | void disable_TSC(void) | |
585 | { | |
586 | preempt_disable(); | |
587 | if (!test_and_set_thread_flag(TIF_NOTSC)) | |
588 | /* | |
589 | * Must flip the CPU state synchronously with | |
590 | * TIF_NOTSC in the current running context. | |
591 | */ | |
592 | hard_disable_TSC(); | |
593 | preempt_enable(); | |
594 | } | |
595 | void hard_enable_TSC(void) | |
596 | { | |
597 | write_cr4(read_cr4() & ~X86_CR4_TSD); | |
598 | } | |
599 | #endif /* CONFIG_SECCOMP */ | |
600 | ||
601 | static noinline void | |
602 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |
603 | struct tss_struct *tss) | |
1da177e4 | 604 | { |
7e991604 | 605 | struct thread_struct *prev, *next; |
b3cf2576 | 606 | |
7e991604 | 607 | prev = &prev_p->thread; |
b3cf2576 SE |
608 | next = &next_p->thread; |
609 | ||
7e991604 RM |
610 | if (next->debugctlmsr != prev->debugctlmsr) |
611 | wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0); | |
612 | ||
b3cf2576 SE |
613 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { |
614 | set_debugreg(next->debugreg[0], 0); | |
615 | set_debugreg(next->debugreg[1], 1); | |
616 | set_debugreg(next->debugreg[2], 2); | |
617 | set_debugreg(next->debugreg[3], 3); | |
618 | /* no 4 and 5 */ | |
619 | set_debugreg(next->debugreg[6], 6); | |
620 | set_debugreg(next->debugreg[7], 7); | |
621 | } | |
622 | ||
cf99abac AA |
623 | #ifdef CONFIG_SECCOMP |
624 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | |
625 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | |
626 | /* prev and next are different */ | |
627 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | |
628 | hard_disable_TSC(); | |
629 | else | |
630 | hard_enable_TSC(); | |
631 | } | |
632 | #endif | |
633 | ||
b3cf2576 | 634 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { |
1da177e4 LT |
635 | /* |
636 | * Disable the bitmap via an invalid offset. We still cache | |
637 | * the previous bitmap owner and the IO bitmap contents: | |
638 | */ | |
a75c54f9 | 639 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
640 | return; |
641 | } | |
b3cf2576 | 642 | |
1da177e4 LT |
643 | if (likely(next == tss->io_bitmap_owner)) { |
644 | /* | |
645 | * Previous owner of the bitmap (hence the bitmap content) | |
646 | * matches the next task, we dont have to do anything but | |
647 | * to set a valid offset in the TSS: | |
648 | */ | |
a75c54f9 | 649 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
1da177e4 LT |
650 | return; |
651 | } | |
652 | /* | |
653 | * Lazy TSS's I/O bitmap copy. We set an invalid offset here | |
654 | * and we let the task to get a GPF in case an I/O instruction | |
655 | * is performed. The handler of the GPF will verify that the | |
656 | * faulting task has a valid I/O bitmap and, it true, does the | |
657 | * real copy and restart the instruction. This will save us | |
658 | * redundant copies when the currently switched task does not | |
659 | * perform any I/O during its timeslice. | |
660 | */ | |
a75c54f9 | 661 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; |
1da177e4 | 662 | } |
1da177e4 LT |
663 | |
664 | /* | |
665 | * switch_to(x,yn) should switch tasks from x to y. | |
666 | * | |
667 | * We fsave/fwait so that an exception goes off at the right time | |
668 | * (as a call from the fsave or fwait in effect) rather than to | |
669 | * the wrong process. Lazy FP saving no longer makes any sense | |
670 | * with modern CPU's, and this simplifies a lot of things (SMP | |
671 | * and UP become the same). | |
672 | * | |
673 | * NOTE! We used to use the x86 hardware context switching. The | |
674 | * reason for not using it any more becomes apparent when you | |
675 | * try to recover gracefully from saved state that is no longer | |
676 | * valid (stale segment register values in particular). With the | |
677 | * hardware task-switch, there is no way to fix up bad state in | |
678 | * a reasonable manner. | |
679 | * | |
680 | * The fact that Intel documents the hardware task-switching to | |
681 | * be slow is a fairly red herring - this code is not noticeably | |
682 | * faster. However, there _is_ some room for improvement here, | |
683 | * so the performance issues may eventually be a valid point. | |
684 | * More important, however, is the fact that this allows us much | |
685 | * more flexibility. | |
686 | * | |
687 | * The return value (in %eax) will be the "prev" task after | |
688 | * the task-switch, and shows up in ret_from_fork in entry.S, | |
689 | * for example. | |
690 | */ | |
691 | struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |
692 | { | |
693 | struct thread_struct *prev = &prev_p->thread, | |
694 | *next = &next_p->thread; | |
695 | int cpu = smp_processor_id(); | |
696 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
697 | ||
698 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | |
699 | ||
700 | __unlazy_fpu(prev_p); | |
701 | ||
acc20761 CE |
702 | |
703 | /* we're going to use this soon, after a few expensive things */ | |
704 | if (next_p->fpu_counter > 5) | |
705 | prefetch(&next->i387.fxsave); | |
706 | ||
1da177e4 | 707 | /* |
e7a2ff59 | 708 | * Reload esp0. |
1da177e4 LT |
709 | */ |
710 | load_esp0(tss, next); | |
711 | ||
712 | /* | |
464d1a78 | 713 | * Save away %gs. No need to save %fs, as it was saved on the |
f95d47ca JF |
714 | * stack on entry. No need to save %es and %ds, as those are |
715 | * always kernel segments while inside the kernel. Doing this | |
716 | * before setting the new TLS descriptors avoids the situation | |
717 | * where we temporarily have non-reloadable segments in %fs | |
718 | * and %gs. This could be an issue if the NMI handler ever | |
719 | * used %fs or %gs (it does not today), or if the kernel is | |
720 | * running inside of a hypervisor layer. | |
1da177e4 | 721 | */ |
464d1a78 | 722 | savesegment(gs, prev->gs); |
1da177e4 LT |
723 | |
724 | /* | |
e7a2ff59 | 725 | * Load the per-thread Thread-Local Storage descriptor. |
1da177e4 | 726 | */ |
e7a2ff59 | 727 | load_TLS(next, cpu); |
1da177e4 | 728 | |
8b151144 ZA |
729 | /* |
730 | * Restore IOPL if needed. In normal use, the flags restore | |
731 | * in the switch assembly will handle this. But if the kernel | |
732 | * is running virtualized at a non-zero CPL, the popf will | |
733 | * not restore flags, so it must be done in a separate step. | |
734 | */ | |
735 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) | |
736 | set_iopl_mask(next->iopl); | |
737 | ||
1da177e4 | 738 | /* |
b3cf2576 | 739 | * Now maybe handle debug registers and/or IO bitmaps |
1da177e4 | 740 | */ |
cf99abac AA |
741 | if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || |
742 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | |
743 | __switch_to_xtra(prev_p, next_p, tss); | |
ffaa8bd6 | 744 | |
9226d125 ZA |
745 | /* |
746 | * Leave lazy mode, flushing any hypercalls made here. | |
747 | * This must be done before restoring TLS segments so | |
748 | * the GDT and LDT are properly updated, and must be | |
749 | * done before math_state_restore, so the TS bit is up | |
750 | * to date. | |
751 | */ | |
752 | arch_leave_lazy_cpu_mode(); | |
753 | ||
acc20761 CE |
754 | /* If the task has used fpu the last 5 timeslices, just do a full |
755 | * restore of the math state immediately to avoid the trap; the | |
756 | * chances of needing FPU soon are obviously high now | |
757 | */ | |
758 | if (next_p->fpu_counter > 5) | |
759 | math_state_restore(); | |
760 | ||
9226d125 ZA |
761 | /* |
762 | * Restore %gs if needed (which is common) | |
763 | */ | |
764 | if (prev->gs | next->gs) | |
765 | loadsegment(gs, next->gs); | |
766 | ||
7c3576d2 | 767 | x86_write_percpu(current_task, next_p); |
9226d125 | 768 | |
1da177e4 LT |
769 | return prev_p; |
770 | } | |
771 | ||
772 | asmlinkage int sys_fork(struct pt_regs regs) | |
773 | { | |
774 | return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL); | |
775 | } | |
776 | ||
777 | asmlinkage int sys_clone(struct pt_regs regs) | |
778 | { | |
779 | unsigned long clone_flags; | |
780 | unsigned long newsp; | |
781 | int __user *parent_tidptr, *child_tidptr; | |
782 | ||
783 | clone_flags = regs.ebx; | |
784 | newsp = regs.ecx; | |
785 | parent_tidptr = (int __user *)regs.edx; | |
786 | child_tidptr = (int __user *)regs.edi; | |
787 | if (!newsp) | |
788 | newsp = regs.esp; | |
789 | return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); | |
790 | } | |
791 | ||
792 | /* | |
793 | * This is trivial, and on the face of it looks like it | |
794 | * could equally well be done in user mode. | |
795 | * | |
796 | * Not so, for quite unobvious reasons - register pressure. | |
797 | * In user mode vfork() cannot have a stack frame, and if | |
798 | * done by calling the "clone()" system call directly, you | |
799 | * do not have enough call-clobbered registers to hold all | |
800 | * the information you need. | |
801 | */ | |
802 | asmlinkage int sys_vfork(struct pt_regs regs) | |
803 | { | |
804 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0, NULL, NULL); | |
805 | } | |
806 | ||
807 | /* | |
808 | * sys_execve() executes a new program. | |
809 | */ | |
810 | asmlinkage int sys_execve(struct pt_regs regs) | |
811 | { | |
812 | int error; | |
813 | char * filename; | |
814 | ||
815 | filename = getname((char __user *) regs.ebx); | |
816 | error = PTR_ERR(filename); | |
817 | if (IS_ERR(filename)) | |
818 | goto out; | |
819 | error = do_execve(filename, | |
820 | (char __user * __user *) regs.ecx, | |
821 | (char __user * __user *) regs.edx, | |
822 | ®s); | |
823 | if (error == 0) { | |
1da177e4 LT |
824 | /* Make sure we don't return using sysenter.. */ |
825 | set_thread_flag(TIF_IRET); | |
826 | } | |
827 | putname(filename); | |
828 | out: | |
829 | return error; | |
830 | } | |
831 | ||
832 | #define top_esp (THREAD_SIZE - sizeof(unsigned long)) | |
833 | #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) | |
834 | ||
835 | unsigned long get_wchan(struct task_struct *p) | |
836 | { | |
837 | unsigned long ebp, esp, eip; | |
838 | unsigned long stack_page; | |
839 | int count = 0; | |
840 | if (!p || p == current || p->state == TASK_RUNNING) | |
841 | return 0; | |
65e0fdff | 842 | stack_page = (unsigned long)task_stack_page(p); |
1da177e4 LT |
843 | esp = p->thread.esp; |
844 | if (!stack_page || esp < stack_page || esp > top_esp+stack_page) | |
845 | return 0; | |
846 | /* include/asm-i386/system.h:switch_to() pushes ebp last. */ | |
847 | ebp = *(unsigned long *) esp; | |
848 | do { | |
849 | if (ebp < stack_page || ebp > top_ebp+stack_page) | |
850 | return 0; | |
851 | eip = *(unsigned long *) (ebp+4); | |
852 | if (!in_sched_functions(eip)) | |
853 | return eip; | |
854 | ebp = *(unsigned long *) ebp; | |
855 | } while (count++ < 16); | |
856 | return 0; | |
857 | } | |
858 | ||
1da177e4 LT |
859 | unsigned long arch_align_stack(unsigned long sp) |
860 | { | |
c16b63e0 | 861 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1da177e4 LT |
862 | sp -= get_random_int() % 8192; |
863 | return sp & ~0xf; | |
864 | } | |
c1d171a0 JK |
865 | |
866 | unsigned long arch_randomize_brk(struct mm_struct *mm) | |
867 | { | |
868 | unsigned long range_end = mm->brk + 0x02000000; | |
869 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | |
870 | } | |
871 |