Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/mm/fault.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/signal.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/string.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/ptrace.h> | |
14 | #include <linux/mman.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/smp.h> | |
1da177e4 LT |
17 | #include <linux/interrupt.h> |
18 | #include <linux/init.h> | |
19 | #include <linux/tty.h> | |
20 | #include <linux/vt_kern.h> /* For unblank_screen() */ | |
21 | #include <linux/highmem.h> | |
28609f6e | 22 | #include <linux/bootmem.h> /* for max_low_pfn */ |
1eeb66a1 | 23 | #include <linux/vmalloc.h> |
1da177e4 | 24 | #include <linux/module.h> |
3d97ae5b | 25 | #include <linux/kprobes.h> |
11a4180c | 26 | #include <linux/uaccess.h> |
1eeb66a1 | 27 | #include <linux/kdebug.h> |
1da177e4 LT |
28 | |
29 | #include <asm/system.h> | |
1da177e4 | 30 | #include <asm/desc.h> |
78be3706 | 31 | #include <asm/segment.h> |
1da177e4 LT |
32 | |
33 | extern void die(const char *,struct pt_regs *,long); | |
34 | ||
474c2568 AK |
35 | static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); |
36 | ||
b71b5b65 AK |
37 | int register_page_fault_notifier(struct notifier_block *nb) |
38 | { | |
39 | vmalloc_sync_all(); | |
40 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | |
41 | } | |
474c2568 | 42 | EXPORT_SYMBOL_GPL(register_page_fault_notifier); |
b71b5b65 AK |
43 | |
44 | int unregister_page_fault_notifier(struct notifier_block *nb) | |
45 | { | |
46 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); | |
47 | } | |
474c2568 | 48 | EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); |
b71b5b65 | 49 | |
9b355897 | 50 | static inline int notify_page_fault(struct pt_regs *regs, long err) |
b71b5b65 AK |
51 | { |
52 | struct die_args args = { | |
53 | .regs = regs, | |
9b355897 | 54 | .str = "page fault", |
b71b5b65 | 55 | .err = err, |
9b355897 JB |
56 | .trapnr = 14, |
57 | .signr = SIGSEGV | |
b71b5b65 | 58 | }; |
9b355897 JB |
59 | return atomic_notifier_call_chain(¬ify_page_fault_chain, |
60 | DIE_PAGE_FAULT, &args); | |
b71b5b65 | 61 | } |
b71b5b65 | 62 | |
1da177e4 LT |
63 | /* |
64 | * Return EIP plus the CS segment base. The segment limit is also | |
65 | * adjusted, clamped to the kernel/user address space (whichever is | |
66 | * appropriate), and returned in *eip_limit. | |
67 | * | |
68 | * The segment is checked, because it might have been changed by another | |
69 | * task between the original faulting instruction and here. | |
70 | * | |
71 | * If CS is no longer a valid code segment, or if EIP is beyond the | |
72 | * limit, or if it is a kernel address when CS is not a kernel segment, | |
73 | * then the returned value will be greater than *eip_limit. | |
74 | * | |
75 | * This is slow, but is very rarely executed. | |
76 | */ | |
77 | static inline unsigned long get_segment_eip(struct pt_regs *regs, | |
78 | unsigned long *eip_limit) | |
79 | { | |
80 | unsigned long eip = regs->eip; | |
81 | unsigned seg = regs->xcs & 0xffff; | |
82 | u32 seg_ar, seg_limit, base, *desc; | |
83 | ||
19964fec CE |
84 | /* Unlikely, but must come before segment checks. */ |
85 | if (unlikely(regs->eflags & VM_MASK)) { | |
86 | base = seg << 4; | |
87 | *eip_limit = base + 0xffff; | |
88 | return base + (eip & 0xffff); | |
89 | } | |
90 | ||
1da177e4 | 91 | /* The standard kernel/user address space limit. */ |
78be3706 | 92 | *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg; |
1da177e4 LT |
93 | |
94 | /* By far the most common cases. */ | |
78be3706 | 95 | if (likely(SEGMENT_IS_FLAT_CODE(seg))) |
1da177e4 LT |
96 | return eip; |
97 | ||
98 | /* Check the segment exists, is within the current LDT/GDT size, | |
99 | that kernel/user (ring 0..3) has the appropriate privilege, | |
100 | that it's a code segment, and get the limit. */ | |
101 | __asm__ ("larl %3,%0; lsll %3,%1" | |
102 | : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg)); | |
103 | if ((~seg_ar & 0x9800) || eip > seg_limit) { | |
104 | *eip_limit = 0; | |
105 | return 1; /* So that returned eip > *eip_limit. */ | |
106 | } | |
107 | ||
108 | /* Get the GDT/LDT descriptor base. | |
109 | When you look for races in this code remember that | |
110 | LDT and other horrors are only used in user space. */ | |
111 | if (seg & (1<<2)) { | |
112 | /* Must lock the LDT while reading it. */ | |
113 | down(¤t->mm->context.sem); | |
114 | desc = current->mm->context.ldt; | |
115 | desc = (void *)desc + (seg & ~7); | |
116 | } else { | |
117 | /* Must disable preemption while reading the GDT. */ | |
251e6912 | 118 | desc = (u32 *)get_cpu_gdt_table(get_cpu()); |
1da177e4 LT |
119 | desc = (void *)desc + (seg & ~7); |
120 | } | |
121 | ||
122 | /* Decode the code segment base from the descriptor */ | |
123 | base = get_desc_base((unsigned long *)desc); | |
124 | ||
125 | if (seg & (1<<2)) { | |
126 | up(¤t->mm->context.sem); | |
127 | } else | |
128 | put_cpu(); | |
129 | ||
130 | /* Adjust EIP and segment limit, and clamp at the kernel limit. | |
131 | It's legitimate for segments to wrap at 0xffffffff. */ | |
132 | seg_limit += base; | |
133 | if (seg_limit < *eip_limit && seg_limit >= base) | |
134 | *eip_limit = seg_limit; | |
135 | return eip + base; | |
136 | } | |
137 | ||
138 | /* | |
139 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | |
140 | * Check that here and ignore it. | |
141 | */ | |
142 | static int __is_prefetch(struct pt_regs *regs, unsigned long addr) | |
143 | { | |
144 | unsigned long limit; | |
11a4180c | 145 | unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit); |
1da177e4 LT |
146 | int scan_more = 1; |
147 | int prefetch = 0; | |
148 | int i; | |
149 | ||
150 | for (i = 0; scan_more && i < 15; i++) { | |
151 | unsigned char opcode; | |
152 | unsigned char instr_hi; | |
153 | unsigned char instr_lo; | |
154 | ||
11a4180c | 155 | if (instr > (unsigned char *)limit) |
1da177e4 | 156 | break; |
11a4180c | 157 | if (probe_kernel_address(instr, opcode)) |
1da177e4 LT |
158 | break; |
159 | ||
160 | instr_hi = opcode & 0xf0; | |
161 | instr_lo = opcode & 0x0f; | |
162 | instr++; | |
163 | ||
164 | switch (instr_hi) { | |
165 | case 0x20: | |
166 | case 0x30: | |
167 | /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */ | |
168 | scan_more = ((instr_lo & 7) == 0x6); | |
169 | break; | |
170 | ||
171 | case 0x60: | |
172 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
173 | scan_more = (instr_lo & 0xC) == 0x4; | |
174 | break; | |
175 | case 0xF0: | |
176 | /* 0xF0, 0xF2, and 0xF3 are valid prefixes */ | |
177 | scan_more = !instr_lo || (instr_lo>>1) == 1; | |
178 | break; | |
179 | case 0x00: | |
180 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
181 | scan_more = 0; | |
11a4180c | 182 | if (instr > (unsigned char *)limit) |
1da177e4 | 183 | break; |
11a4180c | 184 | if (probe_kernel_address(instr, opcode)) |
1da177e4 LT |
185 | break; |
186 | prefetch = (instr_lo == 0xF) && | |
187 | (opcode == 0x0D || opcode == 0x18); | |
188 | break; | |
189 | default: | |
190 | scan_more = 0; | |
191 | break; | |
192 | } | |
193 | } | |
194 | return prefetch; | |
195 | } | |
196 | ||
197 | static inline int is_prefetch(struct pt_regs *regs, unsigned long addr, | |
198 | unsigned long error_code) | |
199 | { | |
200 | if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | |
201 | boot_cpu_data.x86 >= 6)) { | |
202 | /* Catch an obscure case of prefetch inside an NX page. */ | |
203 | if (nx_enabled && (error_code & 16)) | |
204 | return 0; | |
205 | return __is_prefetch(regs, addr); | |
206 | } | |
207 | return 0; | |
208 | } | |
209 | ||
869f96a0 IM |
210 | static noinline void force_sig_info_fault(int si_signo, int si_code, |
211 | unsigned long address, struct task_struct *tsk) | |
212 | { | |
213 | siginfo_t info; | |
214 | ||
215 | info.si_signo = si_signo; | |
216 | info.si_errno = 0; | |
217 | info.si_code = si_code; | |
218 | info.si_addr = (void __user *)address; | |
219 | force_sig_info(si_signo, &info, tsk); | |
220 | } | |
221 | ||
1da177e4 LT |
222 | fastcall void do_invalid_op(struct pt_regs *, unsigned long); |
223 | ||
101f12af JB |
224 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
225 | { | |
226 | unsigned index = pgd_index(address); | |
227 | pgd_t *pgd_k; | |
228 | pud_t *pud, *pud_k; | |
229 | pmd_t *pmd, *pmd_k; | |
230 | ||
231 | pgd += index; | |
232 | pgd_k = init_mm.pgd + index; | |
233 | ||
234 | if (!pgd_present(*pgd_k)) | |
235 | return NULL; | |
236 | ||
237 | /* | |
238 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
239 | * and redundant with the set_pmd() on non-PAE. As would | |
240 | * set_pud. | |
241 | */ | |
242 | ||
243 | pud = pud_offset(pgd, address); | |
244 | pud_k = pud_offset(pgd_k, address); | |
245 | if (!pud_present(*pud_k)) | |
246 | return NULL; | |
247 | ||
248 | pmd = pmd_offset(pud, address); | |
249 | pmd_k = pmd_offset(pud_k, address); | |
250 | if (!pmd_present(*pmd_k)) | |
251 | return NULL; | |
252 | if (!pmd_present(*pmd)) | |
253 | set_pmd(pmd, *pmd_k); | |
254 | else | |
255 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | |
256 | return pmd_k; | |
257 | } | |
258 | ||
259 | /* | |
260 | * Handle a fault on the vmalloc or module mapping area | |
261 | * | |
262 | * This assumes no large pages in there. | |
263 | */ | |
264 | static inline int vmalloc_fault(unsigned long address) | |
265 | { | |
266 | unsigned long pgd_paddr; | |
267 | pmd_t *pmd_k; | |
268 | pte_t *pte_k; | |
269 | /* | |
270 | * Synchronize this task's top level page-table | |
271 | * with the 'reference' page table. | |
272 | * | |
273 | * Do _not_ use "current" here. We might be inside | |
274 | * an interrupt in the middle of a task switch.. | |
275 | */ | |
276 | pgd_paddr = read_cr3(); | |
277 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | |
278 | if (!pmd_k) | |
279 | return -1; | |
280 | pte_k = pte_offset_kernel(pmd_k, address); | |
281 | if (!pte_present(*pte_k)) | |
282 | return -1; | |
283 | return 0; | |
284 | } | |
285 | ||
1da177e4 LT |
286 | /* |
287 | * This routine handles page faults. It determines the address, | |
288 | * and the problem, and then passes it off to one of the appropriate | |
289 | * routines. | |
290 | * | |
291 | * error_code: | |
292 | * bit 0 == 0 means no page found, 1 means protection fault | |
293 | * bit 1 == 0 means read, 1 means write | |
294 | * bit 2 == 0 means kernel, 1 means user-mode | |
101f12af JB |
295 | * bit 3 == 1 means use of reserved bit detected |
296 | * bit 4 == 1 means fault was an instruction fetch | |
1da177e4 | 297 | */ |
3d97ae5b PP |
298 | fastcall void __kprobes do_page_fault(struct pt_regs *regs, |
299 | unsigned long error_code) | |
1da177e4 LT |
300 | { |
301 | struct task_struct *tsk; | |
302 | struct mm_struct *mm; | |
303 | struct vm_area_struct * vma; | |
304 | unsigned long address; | |
869f96a0 | 305 | int write, si_code; |
1da177e4 LT |
306 | |
307 | /* get the address */ | |
4bb0d3ec | 308 | address = read_cr2(); |
1da177e4 | 309 | |
1da177e4 LT |
310 | tsk = current; |
311 | ||
869f96a0 | 312 | si_code = SEGV_MAPERR; |
1da177e4 LT |
313 | |
314 | /* | |
315 | * We fault-in kernel-space virtual memory on-demand. The | |
316 | * 'reference' page table is init_mm.pgd. | |
317 | * | |
318 | * NOTE! We MUST NOT take any locks for this case. We may | |
319 | * be in an interrupt or a critical region, and should | |
320 | * only copy the information from the master page table, | |
321 | * nothing more. | |
322 | * | |
323 | * This verifies that the fault happens in kernel space | |
324 | * (error_code & 4) == 0, and that the fault was not a | |
101f12af | 325 | * protection error (error_code & 9) == 0. |
1da177e4 | 326 | */ |
101f12af JB |
327 | if (unlikely(address >= TASK_SIZE)) { |
328 | if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) | |
329 | return; | |
9b355897 | 330 | if (notify_page_fault(regs, error_code) == NOTIFY_STOP) |
101f12af JB |
331 | return; |
332 | /* | |
1da177e4 LT |
333 | * Don't take the mm semaphore here. If we fixup a prefetch |
334 | * fault we could otherwise deadlock. | |
335 | */ | |
336 | goto bad_area_nosemaphore; | |
101f12af JB |
337 | } |
338 | ||
9b355897 | 339 | if (notify_page_fault(regs, error_code) == NOTIFY_STOP) |
101f12af JB |
340 | return; |
341 | ||
342 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc | |
343 | fault has been handled. */ | |
344 | if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) | |
345 | local_irq_enable(); | |
1da177e4 LT |
346 | |
347 | mm = tsk->mm; | |
348 | ||
349 | /* | |
350 | * If we're in an interrupt, have no user context or are running in an | |
351 | * atomic region then we must not take the fault.. | |
352 | */ | |
353 | if (in_atomic() || !mm) | |
354 | goto bad_area_nosemaphore; | |
355 | ||
356 | /* When running in the kernel we expect faults to occur only to | |
357 | * addresses in user space. All other faults represent errors in the | |
358 | * kernel and should generate an OOPS. Unfortunatly, in the case of an | |
80f7228b | 359 | * erroneous fault occurring in a code path which already holds mmap_sem |
1da177e4 LT |
360 | * we will deadlock attempting to validate the fault against the |
361 | * address space. Luckily the kernel only validly references user | |
362 | * space from well defined areas of code, which are listed in the | |
363 | * exceptions table. | |
364 | * | |
365 | * As the vast majority of faults will be valid we will only perform | |
366 | * the source reference check when there is a possibilty of a deadlock. | |
367 | * Attempt to lock the address space, if we cannot we then validate the | |
368 | * source. If this is invalid we can skip the address space check, | |
369 | * thus avoiding the deadlock. | |
370 | */ | |
371 | if (!down_read_trylock(&mm->mmap_sem)) { | |
372 | if ((error_code & 4) == 0 && | |
373 | !search_exception_tables(regs->eip)) | |
374 | goto bad_area_nosemaphore; | |
375 | down_read(&mm->mmap_sem); | |
376 | } | |
377 | ||
378 | vma = find_vma(mm, address); | |
379 | if (!vma) | |
380 | goto bad_area; | |
381 | if (vma->vm_start <= address) | |
382 | goto good_area; | |
383 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
384 | goto bad_area; | |
385 | if (error_code & 4) { | |
386 | /* | |
21528454 CE |
387 | * Accessing the stack below %esp is always a bug. |
388 | * The large cushion allows instructions like enter | |
389 | * and pusha to work. ("enter $65535,$31" pushes | |
390 | * 32 pointers and then decrements %esp by 65535.) | |
1da177e4 | 391 | */ |
21528454 | 392 | if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp) |
1da177e4 LT |
393 | goto bad_area; |
394 | } | |
395 | if (expand_stack(vma, address)) | |
396 | goto bad_area; | |
397 | /* | |
398 | * Ok, we have a good vm_area for this memory access, so | |
399 | * we can handle it.. | |
400 | */ | |
401 | good_area: | |
869f96a0 | 402 | si_code = SEGV_ACCERR; |
1da177e4 LT |
403 | write = 0; |
404 | switch (error_code & 3) { | |
405 | default: /* 3: write, present */ | |
78be3706 | 406 | /* fall through */ |
1da177e4 LT |
407 | case 2: /* write, not present */ |
408 | if (!(vma->vm_flags & VM_WRITE)) | |
409 | goto bad_area; | |
410 | write++; | |
411 | break; | |
412 | case 1: /* read, present */ | |
413 | goto bad_area; | |
414 | case 0: /* read, not present */ | |
df67b3da | 415 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) |
1da177e4 LT |
416 | goto bad_area; |
417 | } | |
418 | ||
419 | survive: | |
420 | /* | |
421 | * If for any reason at all we couldn't handle the fault, | |
422 | * make sure we exit gracefully rather than endlessly redo | |
423 | * the fault. | |
424 | */ | |
425 | switch (handle_mm_fault(mm, vma, address, write)) { | |
426 | case VM_FAULT_MINOR: | |
427 | tsk->min_flt++; | |
428 | break; | |
429 | case VM_FAULT_MAJOR: | |
430 | tsk->maj_flt++; | |
431 | break; | |
432 | case VM_FAULT_SIGBUS: | |
433 | goto do_sigbus; | |
434 | case VM_FAULT_OOM: | |
435 | goto out_of_memory; | |
436 | default: | |
437 | BUG(); | |
438 | } | |
439 | ||
440 | /* | |
441 | * Did it hit the DOS screen memory VA from vm86 mode? | |
442 | */ | |
443 | if (regs->eflags & VM_MASK) { | |
444 | unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT; | |
445 | if (bit < 32) | |
446 | tsk->thread.screen_bitmap |= 1 << bit; | |
447 | } | |
448 | up_read(&mm->mmap_sem); | |
449 | return; | |
450 | ||
451 | /* | |
452 | * Something tried to access memory that isn't in our memory map.. | |
453 | * Fix it, but check if it's kernel or user first.. | |
454 | */ | |
455 | bad_area: | |
456 | up_read(&mm->mmap_sem); | |
457 | ||
458 | bad_area_nosemaphore: | |
459 | /* User mode accesses just cause a SIGSEGV */ | |
460 | if (error_code & 4) { | |
e5e3c84b SR |
461 | /* |
462 | * It's possible to have interrupts off here. | |
463 | */ | |
464 | local_irq_enable(); | |
465 | ||
1da177e4 LT |
466 | /* |
467 | * Valid to do another page fault here because this one came | |
468 | * from user space. | |
469 | */ | |
470 | if (is_prefetch(regs, address, error_code)) | |
471 | return; | |
472 | ||
473 | tsk->thread.cr2 = address; | |
474 | /* Kernel addresses are always protection faults */ | |
475 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | |
476 | tsk->thread.trap_no = 14; | |
869f96a0 | 477 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); |
1da177e4 LT |
478 | return; |
479 | } | |
480 | ||
481 | #ifdef CONFIG_X86_F00F_BUG | |
482 | /* | |
483 | * Pentium F0 0F C7 C8 bug workaround. | |
484 | */ | |
485 | if (boot_cpu_data.f00f_bug) { | |
486 | unsigned long nr; | |
487 | ||
488 | nr = (address - idt_descr.address) >> 3; | |
489 | ||
490 | if (nr == 6) { | |
491 | do_invalid_op(regs, 0); | |
492 | return; | |
493 | } | |
494 | } | |
495 | #endif | |
496 | ||
497 | no_context: | |
498 | /* Are we prepared to handle this kernel fault? */ | |
499 | if (fixup_exception(regs)) | |
500 | return; | |
501 | ||
502 | /* | |
503 | * Valid to do another page fault here, because if this fault | |
504 | * had been triggered by is_prefetch fixup_exception would have | |
505 | * handled it. | |
506 | */ | |
507 | if (is_prefetch(regs, address, error_code)) | |
508 | return; | |
509 | ||
510 | /* | |
511 | * Oops. The kernel tried to access some bad page. We'll have to | |
512 | * terminate things with extreme prejudice. | |
513 | */ | |
514 | ||
515 | bust_spinlocks(1); | |
516 | ||
dd287796 | 517 | if (oops_may_print()) { |
28609f6e JB |
518 | __typeof__(pte_val(__pte(0))) page; |
519 | ||
520 | #ifdef CONFIG_X86_PAE | |
dd287796 AM |
521 | if (error_code & 16) { |
522 | pte_t *pte = lookup_address(address); | |
523 | ||
524 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) | |
525 | printk(KERN_CRIT "kernel tried to execute " | |
526 | "NX-protected page - exploit attempt? " | |
527 | "(uid: %d)\n", current->uid); | |
528 | } | |
28609f6e | 529 | #endif |
dd287796 AM |
530 | if (address < PAGE_SIZE) |
531 | printk(KERN_ALERT "BUG: unable to handle kernel NULL " | |
532 | "pointer dereference"); | |
533 | else | |
534 | printk(KERN_ALERT "BUG: unable to handle kernel paging" | |
535 | " request"); | |
536 | printk(" at virtual address %08lx\n",address); | |
537 | printk(KERN_ALERT " printing eip:\n"); | |
538 | printk("%08lx\n", regs->eip); | |
28609f6e JB |
539 | |
540 | page = read_cr3(); | |
541 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; | |
542 | #ifdef CONFIG_X86_PAE | |
543 | printk(KERN_ALERT "*pdpt = %016Lx\n", page); | |
544 | if ((page >> PAGE_SHIFT) < max_low_pfn | |
545 | && page & _PAGE_PRESENT) { | |
546 | page &= PAGE_MASK; | |
547 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | |
548 | & (PTRS_PER_PMD - 1)]; | |
549 | printk(KERN_ALERT "*pde = %016Lx\n", page); | |
550 | page &= ~_PAGE_NX; | |
551 | } | |
552 | #else | |
dd287796 | 553 | printk(KERN_ALERT "*pde = %08lx\n", page); |
1da177e4 | 554 | #endif |
28609f6e JB |
555 | |
556 | /* | |
557 | * We must not directly access the pte in the highpte | |
558 | * case if the page table is located in highmem. | |
559 | * And let's rather not kmap-atomic the pte, just in case | |
560 | * it's allocated already. | |
561 | */ | |
562 | if ((page >> PAGE_SHIFT) < max_low_pfn | |
563 | && (page & _PAGE_PRESENT)) { | |
564 | page &= PAGE_MASK; | |
565 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) | |
566 | & (PTRS_PER_PTE - 1)]; | |
567 | printk(KERN_ALERT "*pte = %0*Lx\n", sizeof(page)*2, (u64)page); | |
568 | } | |
569 | } | |
570 | ||
4f339ecb AN |
571 | tsk->thread.cr2 = address; |
572 | tsk->thread.trap_no = 14; | |
573 | tsk->thread.error_code = error_code; | |
1da177e4 LT |
574 | die("Oops", regs, error_code); |
575 | bust_spinlocks(0); | |
576 | do_exit(SIGKILL); | |
577 | ||
578 | /* | |
579 | * We ran out of memory, or some other thing happened to us that made | |
580 | * us unable to handle the page fault gracefully. | |
581 | */ | |
582 | out_of_memory: | |
583 | up_read(&mm->mmap_sem); | |
f400e198 | 584 | if (is_init(tsk)) { |
1da177e4 LT |
585 | yield(); |
586 | down_read(&mm->mmap_sem); | |
587 | goto survive; | |
588 | } | |
589 | printk("VM: killing process %s\n", tsk->comm); | |
590 | if (error_code & 4) | |
591 | do_exit(SIGKILL); | |
592 | goto no_context; | |
593 | ||
594 | do_sigbus: | |
595 | up_read(&mm->mmap_sem); | |
596 | ||
597 | /* Kernel mode? Handle exceptions or die */ | |
598 | if (!(error_code & 4)) | |
599 | goto no_context; | |
600 | ||
601 | /* User space => ok to do another page fault */ | |
602 | if (is_prefetch(regs, address, error_code)) | |
603 | return; | |
604 | ||
605 | tsk->thread.cr2 = address; | |
606 | tsk->thread.error_code = error_code; | |
607 | tsk->thread.trap_no = 14; | |
869f96a0 | 608 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
101f12af | 609 | } |
1da177e4 | 610 | |
101f12af JB |
611 | void vmalloc_sync_all(void) |
612 | { | |
613 | /* | |
614 | * Note that races in the updates of insync and start aren't | |
615 | * problematic: insync can only get set bits added, and updates to | |
616 | * start are only improving performance (without affecting correctness | |
617 | * if undone). | |
618 | */ | |
619 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); | |
620 | static unsigned long start = TASK_SIZE; | |
621 | unsigned long address; | |
1da177e4 | 622 | |
5311ab62 JF |
623 | if (SHARED_KERNEL_PMD) |
624 | return; | |
625 | ||
101f12af JB |
626 | BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); |
627 | for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { | |
628 | if (!test_bit(pgd_index(address), insync)) { | |
629 | unsigned long flags; | |
630 | struct page *page; | |
631 | ||
632 | spin_lock_irqsave(&pgd_lock, flags); | |
633 | for (page = pgd_list; page; page = | |
634 | (struct page *)page->index) | |
635 | if (!vmalloc_sync_one(page_address(page), | |
636 | address)) { | |
637 | BUG_ON(page != pgd_list); | |
638 | break; | |
639 | } | |
640 | spin_unlock_irqrestore(&pgd_lock, flags); | |
641 | if (!page) | |
642 | set_bit(pgd_index(address), insync); | |
643 | } | |
644 | if (address == start && test_bit(pgd_index(address), insync)) | |
645 | start = address + PGDIR_SIZE; | |
1da177e4 LT |
646 | } |
647 | } |