Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1995 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
6 | */ | |
7 | ||
8 | /* | |
9 | * This file handles the architecture-dependent parts of process handling.. | |
10 | */ | |
11 | ||
12 | #include <stdarg.h> | |
13 | ||
f3705136 | 14 | #include <linux/cpu.h> |
1da177e4 LT |
15 | #include <linux/errno.h> |
16 | #include <linux/sched.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/elfcore.h> | |
21 | #include <linux/smp.h> | |
1da177e4 LT |
22 | #include <linux/stddef.h> |
23 | #include <linux/slab.h> | |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/user.h> | |
1da177e4 | 26 | #include <linux/interrupt.h> |
1da177e4 LT |
27 | #include <linux/utsname.h> |
28 | #include <linux/delay.h> | |
29 | #include <linux/reboot.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/mc146818rtc.h> | |
32 | #include <linux/module.h> | |
33 | #include <linux/kallsyms.h> | |
34 | #include <linux/ptrace.h> | |
35 | #include <linux/random.h> | |
c16b63e0 | 36 | #include <linux/personality.h> |
74167347 | 37 | #include <linux/tick.h> |
7c3576d2 | 38 | #include <linux/percpu.h> |
529e25f6 | 39 | #include <linux/prctl.h> |
1da177e4 LT |
40 | |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/pgtable.h> | |
43 | #include <asm/system.h> | |
44 | #include <asm/io.h> | |
45 | #include <asm/ldt.h> | |
46 | #include <asm/processor.h> | |
47 | #include <asm/i387.h> | |
1da177e4 LT |
48 | #include <asm/desc.h> |
49 | #ifdef CONFIG_MATH_EMULATION | |
50 | #include <asm/math_emu.h> | |
51 | #endif | |
52 | ||
1da177e4 LT |
53 | #include <linux/err.h> |
54 | ||
f3705136 ZM |
55 | #include <asm/tlbflush.h> |
56 | #include <asm/cpu.h> | |
718fc13b | 57 | #include <asm/kdebug.h> |
f3705136 | 58 | |
1da177e4 LT |
59 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
60 | ||
7c3576d2 JF |
61 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
62 | EXPORT_PER_CPU_SYMBOL(current_task); | |
63 | ||
64 | DEFINE_PER_CPU(int, cpu_number); | |
65 | EXPORT_PER_CPU_SYMBOL(cpu_number); | |
66 | ||
1da177e4 LT |
67 | /* |
68 | * Return saved PC of a blocked thread. | |
69 | */ | |
70 | unsigned long thread_saved_pc(struct task_struct *tsk) | |
71 | { | |
faca6227 | 72 | return ((unsigned long *)tsk->thread.sp)[3]; |
1da177e4 LT |
73 | } |
74 | ||
f3705136 ZM |
75 | #ifdef CONFIG_HOTPLUG_CPU |
76 | #include <asm/nmi.h> | |
1da177e4 | 77 | |
1481a3dd | 78 | static void cpu_exit_clear(void) |
1da177e4 | 79 | { |
1481a3dd | 80 | int cpu = raw_smp_processor_id(); |
1da177e4 | 81 | |
1481a3dd | 82 | idle_task_exit(); |
1da177e4 | 83 | |
1481a3dd GC |
84 | cpu_uninit(); |
85 | irq_ctx_exit(cpu); | |
1da177e4 | 86 | |
1481a3dd GC |
87 | cpu_clear(cpu, cpu_callout_map); |
88 | cpu_clear(cpu, cpu_callin_map); | |
1da177e4 | 89 | |
1481a3dd | 90 | numa_remove_cpu(cpu); |
4faac97d | 91 | c1e_remove_cpu(cpu); |
1da177e4 LT |
92 | } |
93 | ||
f3705136 ZM |
94 | /* We don't actually take CPU down, just spin without interrupts. */ |
95 | static inline void play_dead(void) | |
96 | { | |
e1367daf LS |
97 | /* This must be done before dead CPU ack */ |
98 | cpu_exit_clear(); | |
e1367daf | 99 | mb(); |
f3705136 ZM |
100 | /* Ack it */ |
101 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
102 | ||
e1367daf LS |
103 | /* |
104 | * With physical CPU hotplug, we should halt the cpu | |
105 | */ | |
f3705136 | 106 | local_irq_disable(); |
394a1505 ML |
107 | /* mask all interrupts, flush any and all caches, and halt */ |
108 | wbinvd_halt(); | |
f3705136 ZM |
109 | } |
110 | #else | |
111 | static inline void play_dead(void) | |
112 | { | |
113 | BUG(); | |
114 | } | |
115 | #endif /* CONFIG_HOTPLUG_CPU */ | |
116 | ||
1da177e4 LT |
117 | /* |
118 | * The idle thread. There's no useful work to be | |
119 | * done, so just try to conserve power and have a | |
120 | * low exit latency (ie sit in a loop waiting for | |
121 | * somebody to say that they'd like to reschedule) | |
122 | */ | |
f3705136 | 123 | void cpu_idle(void) |
1da177e4 | 124 | { |
5bfb5d69 | 125 | int cpu = smp_processor_id(); |
f3705136 | 126 | |
495ab9c0 | 127 | current_thread_info()->status |= TS_POLLING; |
64c7c8f8 | 128 | |
1da177e4 LT |
129 | /* endless idle loop with no priority at all */ |
130 | while (1) { | |
b8f8c3cf | 131 | tick_nohz_stop_sched_tick(1); |
1da177e4 | 132 | while (!need_resched()) { |
1da177e4 | 133 | |
f1d1a842 | 134 | check_pgt_cache(); |
1da177e4 | 135 | rmb(); |
1da177e4 | 136 | |
0723a69a BL |
137 | if (rcu_pending(cpu)) |
138 | rcu_check_callbacks(cpu, 0); | |
139 | ||
f3705136 ZM |
140 | if (cpu_is_offline(cpu)) |
141 | play_dead(); | |
142 | ||
7f424a8b | 143 | local_irq_disable(); |
1da177e4 | 144 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
6cd8a4bb SR |
145 | /* Don't trace irqs off for idle */ |
146 | stop_critical_timings(); | |
6ddd2a27 | 147 | pm_idle(); |
6cd8a4bb | 148 | start_critical_timings(); |
1da177e4 | 149 | } |
74167347 | 150 | tick_nohz_restart_sched_tick(); |
5bfb5d69 | 151 | preempt_enable_no_resched(); |
1da177e4 | 152 | schedule(); |
5bfb5d69 | 153 | preempt_disable(); |
1da177e4 LT |
154 | } |
155 | } | |
156 | ||
9d975ebd | 157 | void __show_registers(struct pt_regs *regs, int all) |
1da177e4 LT |
158 | { |
159 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | |
bb1995d5 | 160 | unsigned long d0, d1, d2, d3, d6, d7; |
65ea5b03 | 161 | unsigned long sp; |
9d975ebd PE |
162 | unsigned short ss, gs; |
163 | ||
164 | if (user_mode_vm(regs)) { | |
65ea5b03 PA |
165 | sp = regs->sp; |
166 | ss = regs->ss & 0xffff; | |
9d975ebd PE |
167 | savesegment(gs, gs); |
168 | } else { | |
65ea5b03 | 169 | sp = (unsigned long) (®s->sp); |
9d975ebd PE |
170 | savesegment(ss, ss); |
171 | savesegment(gs, gs); | |
172 | } | |
1da177e4 LT |
173 | |
174 | printk("\n"); | |
60812a4a LT |
175 | printk("Pid: %d, comm: %s %s (%s %.*s)\n", |
176 | task_pid_nr(current), current->comm, | |
9d975ebd PE |
177 | print_tainted(), init_utsname()->release, |
178 | (int)strcspn(init_utsname()->version, " "), | |
179 | init_utsname()->version); | |
180 | ||
181 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", | |
92bc2056 | 182 | (u16)regs->cs, regs->ip, regs->flags, |
9d975ebd | 183 | smp_processor_id()); |
65ea5b03 | 184 | print_symbol("EIP is at %s\n", regs->ip); |
1da177e4 | 185 | |
1da177e4 | 186 | printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
65ea5b03 | 187 | regs->ax, regs->bx, regs->cx, regs->dx); |
9d975ebd | 188 | printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", |
65ea5b03 | 189 | regs->si, regs->di, regs->bp, sp); |
9d975ebd | 190 | printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", |
92bc2056 | 191 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); |
9d975ebd PE |
192 | |
193 | if (!all) | |
194 | return; | |
1da177e4 | 195 | |
4bb0d3ec ZA |
196 | cr0 = read_cr0(); |
197 | cr2 = read_cr2(); | |
198 | cr3 = read_cr3(); | |
ff6e8c0d | 199 | cr4 = read_cr4_safe(); |
9d975ebd PE |
200 | printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", |
201 | cr0, cr2, cr3, cr4); | |
bb1995d5 AS |
202 | |
203 | get_debugreg(d0, 0); | |
204 | get_debugreg(d1, 1); | |
205 | get_debugreg(d2, 2); | |
206 | get_debugreg(d3, 3); | |
207 | printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", | |
208 | d0, d1, d2, d3); | |
9d975ebd | 209 | |
bb1995d5 AS |
210 | get_debugreg(d6, 6); |
211 | get_debugreg(d7, 7); | |
9d975ebd PE |
212 | printk("DR6: %08lx DR7: %08lx\n", |
213 | d6, d7); | |
214 | } | |
bb1995d5 | 215 | |
9d975ebd PE |
216 | void show_regs(struct pt_regs *regs) |
217 | { | |
218 | __show_registers(regs, 1); | |
5bc27dc2 | 219 | show_trace(NULL, regs, ®s->sp, regs->bp); |
1da177e4 LT |
220 | } |
221 | ||
222 | /* | |
65ea5b03 PA |
223 | * This gets run with %bx containing the |
224 | * function to call, and %dx containing | |
1da177e4 LT |
225 | * the "args". |
226 | */ | |
227 | extern void kernel_thread_helper(void); | |
1da177e4 LT |
228 | |
229 | /* | |
230 | * Create a kernel thread | |
231 | */ | |
232 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
233 | { | |
234 | struct pt_regs regs; | |
235 | ||
236 | memset(®s, 0, sizeof(regs)); | |
237 | ||
65ea5b03 PA |
238 | regs.bx = (unsigned long) fn; |
239 | regs.dx = (unsigned long) arg; | |
1da177e4 | 240 | |
65ea5b03 PA |
241 | regs.ds = __USER_DS; |
242 | regs.es = __USER_DS; | |
243 | regs.fs = __KERNEL_PERCPU; | |
244 | regs.orig_ax = -1; | |
245 | regs.ip = (unsigned long) kernel_thread_helper; | |
246 | regs.cs = __KERNEL_CS | get_kernel_rpl(); | |
247 | regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; | |
1da177e4 LT |
248 | |
249 | /* Ok, create the new process.. */ | |
8cf2c519 | 250 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); |
1da177e4 | 251 | } |
129f6946 | 252 | EXPORT_SYMBOL(kernel_thread); |
1da177e4 LT |
253 | |
254 | /* | |
255 | * Free current thread data structures etc.. | |
256 | */ | |
257 | void exit_thread(void) | |
258 | { | |
1da177e4 | 259 | /* The process may have allocated an io port bitmap... nuke it. */ |
b3cf2576 SE |
260 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { |
261 | struct task_struct *tsk = current; | |
262 | struct thread_struct *t = &tsk->thread; | |
1da177e4 LT |
263 | int cpu = get_cpu(); |
264 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
265 | ||
266 | kfree(t->io_bitmap_ptr); | |
267 | t->io_bitmap_ptr = NULL; | |
b3cf2576 | 268 | clear_thread_flag(TIF_IO_BITMAP); |
1da177e4 LT |
269 | /* |
270 | * Careful, clear this in the TSS too: | |
271 | */ | |
272 | memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); | |
273 | t->io_bitmap_max = 0; | |
274 | tss->io_bitmap_owner = NULL; | |
275 | tss->io_bitmap_max = 0; | |
a75c54f9 | 276 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
277 | put_cpu(); |
278 | } | |
279 | } | |
280 | ||
281 | void flush_thread(void) | |
282 | { | |
283 | struct task_struct *tsk = current; | |
284 | ||
0f534093 RM |
285 | tsk->thread.debugreg0 = 0; |
286 | tsk->thread.debugreg1 = 0; | |
287 | tsk->thread.debugreg2 = 0; | |
288 | tsk->thread.debugreg3 = 0; | |
289 | tsk->thread.debugreg6 = 0; | |
290 | tsk->thread.debugreg7 = 0; | |
1da177e4 | 291 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); |
b3cf2576 | 292 | clear_tsk_thread_flag(tsk, TIF_DEBUG); |
1da177e4 LT |
293 | /* |
294 | * Forget coprocessor state.. | |
295 | */ | |
75118a82 | 296 | tsk->fpu_counter = 0; |
1da177e4 LT |
297 | clear_fpu(tsk); |
298 | clear_used_math(); | |
299 | } | |
300 | ||
301 | void release_thread(struct task_struct *dead_task) | |
302 | { | |
2684927c | 303 | BUG_ON(dead_task->mm); |
1da177e4 LT |
304 | release_vm86_irqs(dead_task); |
305 | } | |
306 | ||
307 | /* | |
308 | * This gets called before we allocate a new thread and copy | |
309 | * the current task into it. | |
310 | */ | |
311 | void prepare_to_copy(struct task_struct *tsk) | |
312 | { | |
313 | unlazy_fpu(tsk); | |
314 | } | |
315 | ||
65ea5b03 | 316 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
1da177e4 LT |
317 | unsigned long unused, |
318 | struct task_struct * p, struct pt_regs * regs) | |
319 | { | |
320 | struct pt_regs * childregs; | |
321 | struct task_struct *tsk; | |
322 | int err; | |
323 | ||
07b047fc | 324 | childregs = task_pt_regs(p); |
f48d9663 | 325 | *childregs = *regs; |
65ea5b03 PA |
326 | childregs->ax = 0; |
327 | childregs->sp = sp; | |
f48d9663 | 328 | |
faca6227 PA |
329 | p->thread.sp = (unsigned long) childregs; |
330 | p->thread.sp0 = (unsigned long) (childregs+1); | |
1da177e4 | 331 | |
faca6227 | 332 | p->thread.ip = (unsigned long) ret_from_fork; |
1da177e4 | 333 | |
6612538c | 334 | savesegment(gs, p->thread.gs); |
1da177e4 LT |
335 | |
336 | tsk = current; | |
b3cf2576 | 337 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
52978be6 AD |
338 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
339 | IO_BITMAP_BYTES, GFP_KERNEL); | |
1da177e4 LT |
340 | if (!p->thread.io_bitmap_ptr) { |
341 | p->thread.io_bitmap_max = 0; | |
342 | return -ENOMEM; | |
343 | } | |
b3cf2576 | 344 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
1da177e4 LT |
345 | } |
346 | ||
efd1ca52 RM |
347 | err = 0; |
348 | ||
1da177e4 LT |
349 | /* |
350 | * Set a new TLS for the child thread? | |
351 | */ | |
efd1ca52 RM |
352 | if (clone_flags & CLONE_SETTLS) |
353 | err = do_set_thread_area(p, -1, | |
65ea5b03 | 354 | (struct user_desc __user *)childregs->si, 0); |
1da177e4 | 355 | |
1da177e4 LT |
356 | if (err && p->thread.io_bitmap_ptr) { |
357 | kfree(p->thread.io_bitmap_ptr); | |
358 | p->thread.io_bitmap_max = 0; | |
359 | } | |
360 | return err; | |
361 | } | |
362 | ||
513ad84b IM |
363 | void |
364 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |
365 | { | |
366 | __asm__("movl %0, %%gs" :: "r"(0)); | |
367 | regs->fs = 0; | |
368 | set_fs(USER_DS); | |
369 | regs->ds = __USER_DS; | |
370 | regs->es = __USER_DS; | |
371 | regs->ss = __USER_DS; | |
372 | regs->cs = __USER_CS; | |
373 | regs->ip = new_ip; | |
374 | regs->sp = new_sp; | |
aa283f49 SS |
375 | /* |
376 | * Free the old FP and other extended state | |
377 | */ | |
378 | free_thread_xstate(current); | |
513ad84b IM |
379 | } |
380 | EXPORT_SYMBOL_GPL(start_thread); | |
381 | ||
bdb4f156 | 382 | static void hard_disable_TSC(void) |
cf99abac AA |
383 | { |
384 | write_cr4(read_cr4() | X86_CR4_TSD); | |
385 | } | |
529e25f6 | 386 | |
cf99abac AA |
387 | void disable_TSC(void) |
388 | { | |
389 | preempt_disable(); | |
390 | if (!test_and_set_thread_flag(TIF_NOTSC)) | |
391 | /* | |
392 | * Must flip the CPU state synchronously with | |
393 | * TIF_NOTSC in the current running context. | |
394 | */ | |
395 | hard_disable_TSC(); | |
396 | preempt_enable(); | |
397 | } | |
529e25f6 | 398 | |
bdb4f156 | 399 | static void hard_enable_TSC(void) |
cf99abac AA |
400 | { |
401 | write_cr4(read_cr4() & ~X86_CR4_TSD); | |
402 | } | |
529e25f6 | 403 | |
a4928cff | 404 | static void enable_TSC(void) |
529e25f6 EB |
405 | { |
406 | preempt_disable(); | |
407 | if (test_and_clear_thread_flag(TIF_NOTSC)) | |
408 | /* | |
409 | * Must flip the CPU state synchronously with | |
410 | * TIF_NOTSC in the current running context. | |
411 | */ | |
412 | hard_enable_TSC(); | |
413 | preempt_enable(); | |
414 | } | |
415 | ||
416 | int get_tsc_mode(unsigned long adr) | |
417 | { | |
418 | unsigned int val; | |
419 | ||
420 | if (test_thread_flag(TIF_NOTSC)) | |
421 | val = PR_TSC_SIGSEGV; | |
422 | else | |
423 | val = PR_TSC_ENABLE; | |
424 | ||
425 | return put_user(val, (unsigned int __user *)adr); | |
426 | } | |
427 | ||
428 | int set_tsc_mode(unsigned int val) | |
429 | { | |
430 | if (val == PR_TSC_SIGSEGV) | |
431 | disable_TSC(); | |
432 | else if (val == PR_TSC_ENABLE) | |
433 | enable_TSC(); | |
434 | else | |
435 | return -EINVAL; | |
436 | ||
437 | return 0; | |
438 | } | |
cf99abac AA |
439 | |
440 | static noinline void | |
441 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |
442 | struct tss_struct *tss) | |
1da177e4 | 443 | { |
7e991604 | 444 | struct thread_struct *prev, *next; |
eee3af4a | 445 | unsigned long debugctl; |
b3cf2576 | 446 | |
7e991604 | 447 | prev = &prev_p->thread; |
b3cf2576 SE |
448 | next = &next_p->thread; |
449 | ||
eee3af4a MM |
450 | debugctl = prev->debugctlmsr; |
451 | if (next->ds_area_msr != prev->ds_area_msr) { | |
452 | /* we clear debugctl to make sure DS | |
453 | * is not in use when we change it */ | |
454 | debugctl = 0; | |
5b0e5084 | 455 | update_debugctlmsr(0); |
eee3af4a MM |
456 | wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); |
457 | } | |
458 | ||
459 | if (next->debugctlmsr != debugctl) | |
5b0e5084 | 460 | update_debugctlmsr(next->debugctlmsr); |
7e991604 | 461 | |
b3cf2576 | 462 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { |
0f534093 RM |
463 | set_debugreg(next->debugreg0, 0); |
464 | set_debugreg(next->debugreg1, 1); | |
465 | set_debugreg(next->debugreg2, 2); | |
466 | set_debugreg(next->debugreg3, 3); | |
b3cf2576 | 467 | /* no 4 and 5 */ |
0f534093 RM |
468 | set_debugreg(next->debugreg6, 6); |
469 | set_debugreg(next->debugreg7, 7); | |
b3cf2576 SE |
470 | } |
471 | ||
cf99abac AA |
472 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ |
473 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | |
474 | /* prev and next are different */ | |
475 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | |
476 | hard_disable_TSC(); | |
477 | else | |
478 | hard_enable_TSC(); | |
479 | } | |
cf99abac | 480 | |
b4ef95de | 481 | #ifdef X86_BTS |
eee3af4a MM |
482 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) |
483 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); | |
484 | ||
485 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) | |
486 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); | |
b4ef95de | 487 | #endif |
eee3af4a MM |
488 | |
489 | ||
b3cf2576 | 490 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { |
1da177e4 LT |
491 | /* |
492 | * Disable the bitmap via an invalid offset. We still cache | |
493 | * the previous bitmap owner and the IO bitmap contents: | |
494 | */ | |
a75c54f9 | 495 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
496 | return; |
497 | } | |
b3cf2576 | 498 | |
1da177e4 LT |
499 | if (likely(next == tss->io_bitmap_owner)) { |
500 | /* | |
501 | * Previous owner of the bitmap (hence the bitmap content) | |
502 | * matches the next task, we dont have to do anything but | |
503 | * to set a valid offset in the TSS: | |
504 | */ | |
a75c54f9 | 505 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
1da177e4 LT |
506 | return; |
507 | } | |
508 | /* | |
509 | * Lazy TSS's I/O bitmap copy. We set an invalid offset here | |
510 | * and we let the task to get a GPF in case an I/O instruction | |
511 | * is performed. The handler of the GPF will verify that the | |
512 | * faulting task has a valid I/O bitmap and, it true, does the | |
513 | * real copy and restart the instruction. This will save us | |
514 | * redundant copies when the currently switched task does not | |
515 | * perform any I/O during its timeslice. | |
516 | */ | |
a75c54f9 | 517 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; |
1da177e4 | 518 | } |
1da177e4 LT |
519 | |
520 | /* | |
521 | * switch_to(x,yn) should switch tasks from x to y. | |
522 | * | |
523 | * We fsave/fwait so that an exception goes off at the right time | |
524 | * (as a call from the fsave or fwait in effect) rather than to | |
525 | * the wrong process. Lazy FP saving no longer makes any sense | |
526 | * with modern CPU's, and this simplifies a lot of things (SMP | |
527 | * and UP become the same). | |
528 | * | |
529 | * NOTE! We used to use the x86 hardware context switching. The | |
530 | * reason for not using it any more becomes apparent when you | |
531 | * try to recover gracefully from saved state that is no longer | |
532 | * valid (stale segment register values in particular). With the | |
533 | * hardware task-switch, there is no way to fix up bad state in | |
534 | * a reasonable manner. | |
535 | * | |
536 | * The fact that Intel documents the hardware task-switching to | |
537 | * be slow is a fairly red herring - this code is not noticeably | |
538 | * faster. However, there _is_ some room for improvement here, | |
539 | * so the performance issues may eventually be a valid point. | |
540 | * More important, however, is the fact that this allows us much | |
541 | * more flexibility. | |
542 | * | |
65ea5b03 | 543 | * The return value (in %ax) will be the "prev" task after |
1da177e4 LT |
544 | * the task-switch, and shows up in ret_from_fork in entry.S, |
545 | * for example. | |
546 | */ | |
75604d7f | 547 | struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
1da177e4 LT |
548 | { |
549 | struct thread_struct *prev = &prev_p->thread, | |
550 | *next = &next_p->thread; | |
551 | int cpu = smp_processor_id(); | |
552 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
553 | ||
554 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | |
555 | ||
556 | __unlazy_fpu(prev_p); | |
557 | ||
acc20761 CE |
558 | |
559 | /* we're going to use this soon, after a few expensive things */ | |
560 | if (next_p->fpu_counter > 5) | |
61c4628b | 561 | prefetch(next->xstate); |
acc20761 | 562 | |
1da177e4 | 563 | /* |
e7a2ff59 | 564 | * Reload esp0. |
1da177e4 | 565 | */ |
faca6227 | 566 | load_sp0(tss, next); |
1da177e4 LT |
567 | |
568 | /* | |
464d1a78 | 569 | * Save away %gs. No need to save %fs, as it was saved on the |
f95d47ca JF |
570 | * stack on entry. No need to save %es and %ds, as those are |
571 | * always kernel segments while inside the kernel. Doing this | |
572 | * before setting the new TLS descriptors avoids the situation | |
573 | * where we temporarily have non-reloadable segments in %fs | |
574 | * and %gs. This could be an issue if the NMI handler ever | |
575 | * used %fs or %gs (it does not today), or if the kernel is | |
576 | * running inside of a hypervisor layer. | |
1da177e4 | 577 | */ |
464d1a78 | 578 | savesegment(gs, prev->gs); |
1da177e4 LT |
579 | |
580 | /* | |
e7a2ff59 | 581 | * Load the per-thread Thread-Local Storage descriptor. |
1da177e4 | 582 | */ |
e7a2ff59 | 583 | load_TLS(next, cpu); |
1da177e4 | 584 | |
8b151144 ZA |
585 | /* |
586 | * Restore IOPL if needed. In normal use, the flags restore | |
587 | * in the switch assembly will handle this. But if the kernel | |
588 | * is running virtualized at a non-zero CPL, the popf will | |
589 | * not restore flags, so it must be done in a separate step. | |
590 | */ | |
591 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) | |
592 | set_iopl_mask(next->iopl); | |
593 | ||
1da177e4 | 594 | /* |
b3cf2576 | 595 | * Now maybe handle debug registers and/or IO bitmaps |
1da177e4 | 596 | */ |
cf99abac AA |
597 | if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || |
598 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | |
599 | __switch_to_xtra(prev_p, next_p, tss); | |
ffaa8bd6 | 600 | |
9226d125 ZA |
601 | /* |
602 | * Leave lazy mode, flushing any hypercalls made here. | |
603 | * This must be done before restoring TLS segments so | |
604 | * the GDT and LDT are properly updated, and must be | |
605 | * done before math_state_restore, so the TS bit is up | |
606 | * to date. | |
607 | */ | |
608 | arch_leave_lazy_cpu_mode(); | |
609 | ||
acc20761 CE |
610 | /* If the task has used fpu the last 5 timeslices, just do a full |
611 | * restore of the math state immediately to avoid the trap; the | |
612 | * chances of needing FPU soon are obviously high now | |
870568b3 SS |
613 | * |
614 | * tsk_used_math() checks prevent calling math_state_restore(), | |
615 | * which can sleep in the case of !tsk_used_math() | |
acc20761 | 616 | */ |
870568b3 | 617 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) |
acc20761 CE |
618 | math_state_restore(); |
619 | ||
9226d125 ZA |
620 | /* |
621 | * Restore %gs if needed (which is common) | |
622 | */ | |
623 | if (prev->gs | next->gs) | |
624 | loadsegment(gs, next->gs); | |
625 | ||
7c3576d2 | 626 | x86_write_percpu(current_task, next_p); |
9226d125 | 627 | |
1da177e4 LT |
628 | return prev_p; |
629 | } | |
630 | ||
631 | asmlinkage int sys_fork(struct pt_regs regs) | |
632 | { | |
65ea5b03 | 633 | return do_fork(SIGCHLD, regs.sp, ®s, 0, NULL, NULL); |
1da177e4 LT |
634 | } |
635 | ||
636 | asmlinkage int sys_clone(struct pt_regs regs) | |
637 | { | |
638 | unsigned long clone_flags; | |
639 | unsigned long newsp; | |
640 | int __user *parent_tidptr, *child_tidptr; | |
641 | ||
65ea5b03 PA |
642 | clone_flags = regs.bx; |
643 | newsp = regs.cx; | |
644 | parent_tidptr = (int __user *)regs.dx; | |
645 | child_tidptr = (int __user *)regs.di; | |
1da177e4 | 646 | if (!newsp) |
65ea5b03 | 647 | newsp = regs.sp; |
1da177e4 LT |
648 | return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); |
649 | } | |
650 | ||
651 | /* | |
652 | * This is trivial, and on the face of it looks like it | |
653 | * could equally well be done in user mode. | |
654 | * | |
655 | * Not so, for quite unobvious reasons - register pressure. | |
656 | * In user mode vfork() cannot have a stack frame, and if | |
657 | * done by calling the "clone()" system call directly, you | |
658 | * do not have enough call-clobbered registers to hold all | |
659 | * the information you need. | |
660 | */ | |
661 | asmlinkage int sys_vfork(struct pt_regs regs) | |
662 | { | |
65ea5b03 | 663 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, ®s, 0, NULL, NULL); |
1da177e4 LT |
664 | } |
665 | ||
666 | /* | |
667 | * sys_execve() executes a new program. | |
668 | */ | |
669 | asmlinkage int sys_execve(struct pt_regs regs) | |
670 | { | |
671 | int error; | |
672 | char * filename; | |
673 | ||
65ea5b03 | 674 | filename = getname((char __user *) regs.bx); |
1da177e4 LT |
675 | error = PTR_ERR(filename); |
676 | if (IS_ERR(filename)) | |
677 | goto out; | |
678 | error = do_execve(filename, | |
65ea5b03 PA |
679 | (char __user * __user *) regs.cx, |
680 | (char __user * __user *) regs.dx, | |
1da177e4 LT |
681 | ®s); |
682 | if (error == 0) { | |
1da177e4 LT |
683 | /* Make sure we don't return using sysenter.. */ |
684 | set_thread_flag(TIF_IRET); | |
685 | } | |
686 | putname(filename); | |
687 | out: | |
688 | return error; | |
689 | } | |
690 | ||
691 | #define top_esp (THREAD_SIZE - sizeof(unsigned long)) | |
692 | #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) | |
693 | ||
694 | unsigned long get_wchan(struct task_struct *p) | |
695 | { | |
65ea5b03 | 696 | unsigned long bp, sp, ip; |
1da177e4 LT |
697 | unsigned long stack_page; |
698 | int count = 0; | |
699 | if (!p || p == current || p->state == TASK_RUNNING) | |
700 | return 0; | |
65e0fdff | 701 | stack_page = (unsigned long)task_stack_page(p); |
faca6227 | 702 | sp = p->thread.sp; |
65ea5b03 | 703 | if (!stack_page || sp < stack_page || sp > top_esp+stack_page) |
1da177e4 | 704 | return 0; |
65ea5b03 PA |
705 | /* include/asm-i386/system.h:switch_to() pushes bp last. */ |
706 | bp = *(unsigned long *) sp; | |
1da177e4 | 707 | do { |
65ea5b03 | 708 | if (bp < stack_page || bp > top_ebp+stack_page) |
1da177e4 | 709 | return 0; |
65ea5b03 PA |
710 | ip = *(unsigned long *) (bp+4); |
711 | if (!in_sched_functions(ip)) | |
712 | return ip; | |
713 | bp = *(unsigned long *) bp; | |
1da177e4 LT |
714 | } while (count++ < 16); |
715 | return 0; | |
716 | } | |
717 | ||
1da177e4 LT |
718 | unsigned long arch_align_stack(unsigned long sp) |
719 | { | |
c16b63e0 | 720 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1da177e4 LT |
721 | sp -= get_random_int() % 8192; |
722 | return sp & ~0xf; | |
723 | } | |
c1d171a0 JK |
724 | |
725 | unsigned long arch_randomize_brk(struct mm_struct *mm) | |
726 | { | |
727 | unsigned long range_end = mm->brk + 0x02000000; | |
728 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | |
729 | } |