2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2012 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/perf_event.h>
19 #include <linux/kdebug.h>
20 #include <asm/io_trapped.h>
21 #include <asm/mmu_context.h>
22 #include <asm/tlbflush.h>
23 #include <asm/traps.h>
25 static inline int notify_page_fault(struct pt_regs
*regs
, int trap
)
29 if (kprobes_built_in() && !user_mode(regs
)) {
31 if (kprobe_running() && kprobe_fault_handler(regs
, trap
))
40 force_sig_info_fault(int si_signo
, int si_code
, unsigned long address
,
41 struct task_struct
*tsk
)
45 info
.si_signo
= si_signo
;
47 info
.si_code
= si_code
;
48 info
.si_addr
= (void __user
*)address
;
50 force_sig_info(si_signo
, &info
, tsk
);
54 * This is useful to dump out the page tables associated with
57 static void show_pte(struct mm_struct
*mm
, unsigned long addr
)
66 printk(KERN_ALERT
"pgd = %p\n", pgd
);
67 pgd
+= pgd_index(addr
);
68 printk(KERN_ALERT
"[%08lx] *pgd=%0*Lx", addr
,
69 sizeof(*pgd
) * 2, (u64
)pgd_val(*pgd
));
84 pud
= pud_offset(pgd
, addr
);
85 if (PTRS_PER_PUD
!= 1)
86 printk(", *pud=%0*Lx", sizeof(*pud
) * 2,
97 pmd
= pmd_offset(pud
, addr
);
98 if (PTRS_PER_PMD
!= 1)
99 printk(", *pmd=%0*Lx", sizeof(*pmd
) * 2,
110 /* We must not map this if we have highmem enabled */
111 if (PageHighMem(pfn_to_page(pmd_val(*pmd
) >> PAGE_SHIFT
)))
114 pte
= pte_offset_kernel(pmd
, addr
);
115 printk(", *pte=%0*Lx", sizeof(*pte
) * 2, (u64
)pte_val(*pte
));
121 static inline pmd_t
*vmalloc_sync_one(pgd_t
*pgd
, unsigned long address
)
123 unsigned index
= pgd_index(address
);
129 pgd_k
= init_mm
.pgd
+ index
;
131 if (!pgd_present(*pgd_k
))
134 pud
= pud_offset(pgd
, address
);
135 pud_k
= pud_offset(pgd_k
, address
);
136 if (!pud_present(*pud_k
))
139 if (!pud_present(*pud
))
140 set_pud(pud
, *pud_k
);
142 pmd
= pmd_offset(pud
, address
);
143 pmd_k
= pmd_offset(pud_k
, address
);
144 if (!pmd_present(*pmd_k
))
147 if (!pmd_present(*pmd
))
148 set_pmd(pmd
, *pmd_k
);
151 * The page tables are fully synchronised so there must
152 * be another reason for the fault. Return NULL here to
153 * signal that we have not taken care of the fault.
155 BUG_ON(pmd_page(*pmd
) != pmd_page(*pmd_k
));
163 * Handle a fault on the vmalloc or module mapping area
165 static noinline
int vmalloc_fault(unsigned long address
)
171 /* Make sure we are in vmalloc/module/P3 area: */
172 if (!(address
>= P3SEG
&& address
< P3_ADDR_MAX
))
176 * Synchronize this task's top level page-table
177 * with the 'reference' page table.
179 * Do _not_ use "current" here. We might be inside
180 * an interrupt in the middle of a task switch..
183 pmd_k
= vmalloc_sync_one(pgd_k
, address
);
187 pte_k
= pte_offset_kernel(pmd_k
, address
);
188 if (!pte_present(*pte_k
))
195 show_fault_oops(struct pt_regs
*regs
, unsigned long address
)
197 if (!oops_may_print())
200 printk(KERN_ALERT
"BUG: unable to handle kernel ");
201 if (address
< PAGE_SIZE
)
202 printk(KERN_CONT
"NULL pointer dereference");
204 printk(KERN_CONT
"paging request");
206 printk(KERN_CONT
" at %08lx\n", address
);
207 printk(KERN_ALERT
"PC:");
208 printk_address(regs
->pc
, 1);
210 show_pte(NULL
, address
);
214 no_context(struct pt_regs
*regs
, unsigned long writeaccess
,
215 unsigned long address
)
217 /* Are we prepared to handle this kernel fault? */
218 if (fixup_exception(regs
))
221 if (handle_trapped_io(regs
, address
))
225 * Oops. The kernel tried to access some bad page. We'll have to
226 * terminate things with extreme prejudice.
230 show_fault_oops(regs
, address
);
232 die("Oops", regs
, writeaccess
);
238 __bad_area_nosemaphore(struct pt_regs
*regs
, unsigned long writeaccess
,
239 unsigned long address
, int si_code
)
241 struct task_struct
*tsk
= current
;
243 /* User mode accesses just cause a SIGSEGV */
244 if (user_mode(regs
)) {
246 * It's possible to have interrupts off here:
250 force_sig_info_fault(SIGSEGV
, si_code
, address
, tsk
);
255 no_context(regs
, writeaccess
, address
);
259 bad_area_nosemaphore(struct pt_regs
*regs
, unsigned long writeaccess
,
260 unsigned long address
)
262 __bad_area_nosemaphore(regs
, writeaccess
, address
, SEGV_MAPERR
);
266 __bad_area(struct pt_regs
*regs
, unsigned long writeaccess
,
267 unsigned long address
, int si_code
)
269 struct mm_struct
*mm
= current
->mm
;
272 * Something tried to access memory that isn't in our memory map..
273 * Fix it, but check if it's kernel or user first..
275 up_read(&mm
->mmap_sem
);
277 __bad_area_nosemaphore(regs
, writeaccess
, address
, si_code
);
281 bad_area(struct pt_regs
*regs
, unsigned long writeaccess
, unsigned long address
)
283 __bad_area(regs
, writeaccess
, address
, SEGV_MAPERR
);
287 bad_area_access_error(struct pt_regs
*regs
, unsigned long writeaccess
,
288 unsigned long address
)
290 __bad_area(regs
, writeaccess
, address
, SEGV_ACCERR
);
293 static void out_of_memory(void)
296 * We ran out of memory, call the OOM killer, and return the userspace
297 * (which will retry the fault, or kill us if we got oom-killed):
299 up_read(¤t
->mm
->mmap_sem
);
301 pagefault_out_of_memory();
305 do_sigbus(struct pt_regs
*regs
, unsigned long writeaccess
, unsigned long address
)
307 struct task_struct
*tsk
= current
;
308 struct mm_struct
*mm
= tsk
->mm
;
310 up_read(&mm
->mmap_sem
);
312 /* Kernel mode? Handle exceptions or die: */
313 if (!user_mode(regs
))
314 no_context(regs
, writeaccess
, address
);
316 force_sig_info_fault(SIGBUS
, BUS_ADRERR
, address
, tsk
);
320 mm_fault_error(struct pt_regs
*regs
, unsigned long writeaccess
,
321 unsigned long address
, unsigned int fault
)
324 * Pagefault was interrupted by SIGKILL. We have no reason to
325 * continue pagefault.
327 if (fatal_signal_pending(current
)) {
328 if (!(fault
& VM_FAULT_RETRY
))
329 up_read(¤t
->mm
->mmap_sem
);
330 if (!user_mode(regs
))
331 no_context(regs
, writeaccess
, address
);
335 if (!(fault
& VM_FAULT_ERROR
))
338 if (fault
& VM_FAULT_OOM
) {
339 /* Kernel mode? Handle exceptions or die: */
340 if (!user_mode(regs
)) {
341 up_read(¤t
->mm
->mmap_sem
);
342 no_context(regs
, writeaccess
, address
);
348 if (fault
& VM_FAULT_SIGBUS
)
349 do_sigbus(regs
, writeaccess
, address
);
357 static inline int access_error(int write
, struct vm_area_struct
*vma
)
360 /* write, present and write, not present: */
361 if (unlikely(!(vma
->vm_flags
& VM_WRITE
)))
366 /* read, not present: */
367 if (unlikely(!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
))))
373 static int fault_in_kernel_space(unsigned long address
)
375 return address
>= TASK_SIZE
;
379 * This routine handles page faults. It determines the address,
380 * and the problem, and then passes it off to one of the appropriate
383 asmlinkage
void __kprobes
do_page_fault(struct pt_regs
*regs
,
384 unsigned long writeaccess
,
385 unsigned long address
)
388 struct task_struct
*tsk
;
389 struct mm_struct
*mm
;
390 struct vm_area_struct
* vma
;
392 unsigned int flags
= (FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
|
393 (writeaccess
? FAULT_FLAG_WRITE
: 0));
397 vec
= lookup_exception_vector();
400 * We fault-in kernel-space virtual memory on-demand. The
401 * 'reference' page table is init_mm.pgd.
403 * NOTE! We MUST NOT take any locks for this case. We may
404 * be in an interrupt or a critical region, and should
405 * only copy the information from the master page table,
408 if (unlikely(fault_in_kernel_space(address
))) {
409 if (vmalloc_fault(address
) >= 0)
411 if (notify_page_fault(regs
, vec
))
414 bad_area_nosemaphore(regs
, writeaccess
, address
);
418 if (unlikely(notify_page_fault(regs
, vec
)))
421 /* Only enable interrupts if they were on before the fault */
422 if ((regs
->sr
& SR_IMASK
) != SR_IMASK
)
425 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
428 * If we're in an interrupt, have no user context or are running
429 * in an atomic region then we must not take the fault:
431 if (unlikely(in_atomic() || !mm
)) {
432 bad_area_nosemaphore(regs
, writeaccess
, address
);
437 down_read(&mm
->mmap_sem
);
439 vma
= find_vma(mm
, address
);
440 if (unlikely(!vma
)) {
441 bad_area(regs
, writeaccess
, address
);
444 if (likely(vma
->vm_start
<= address
))
446 if (unlikely(!(vma
->vm_flags
& VM_GROWSDOWN
))) {
447 bad_area(regs
, writeaccess
, address
);
450 if (unlikely(expand_stack(vma
, address
))) {
451 bad_area(regs
, writeaccess
, address
);
456 * Ok, we have a good vm_area for this memory access, so
460 if (unlikely(access_error(writeaccess
, vma
))) {
461 bad_area_access_error(regs
, writeaccess
, address
);
466 * If for any reason at all we couldn't handle the fault,
467 * make sure we exit gracefully rather than endlessly redo
470 fault
= handle_mm_fault(mm
, vma
, address
, flags
);
472 if (unlikely(fault
& (VM_FAULT_RETRY
| VM_FAULT_ERROR
)))
473 if (mm_fault_error(regs
, writeaccess
, address
, fault
))
476 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
477 if (fault
& VM_FAULT_MAJOR
) {
479 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
, 1,
483 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
, 1,
486 if (fault
& VM_FAULT_RETRY
) {
487 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
490 * No need to up_read(&mm->mmap_sem) as we would
491 * have already released it in __lock_page_or_retry
498 up_read(&mm
->mmap_sem
);
502 * Called with interrupts disabled.
504 asmlinkage
int __kprobes
505 handle_tlbmiss(struct pt_regs
*regs
, unsigned long writeaccess
,
506 unsigned long address
)
515 * We don't take page faults for P1, P2, and parts of P4, these
516 * are always mapped, whether it be due to legacy behaviour in
517 * 29-bit mode, or due to PMB configuration in 32-bit mode.
519 if (address
>= P3SEG
&& address
< P3_ADDR_MAX
) {
520 pgd
= pgd_offset_k(address
);
522 if (unlikely(address
>= TASK_SIZE
|| !current
->mm
))
525 pgd
= pgd_offset(current
->mm
, address
);
528 pud
= pud_offset(pgd
, address
);
529 if (pud_none_or_clear_bad(pud
))
531 pmd
= pmd_offset(pud
, address
);
532 if (pmd_none_or_clear_bad(pmd
))
534 pte
= pte_offset_kernel(pmd
, address
);
536 if (unlikely(pte_none(entry
) || pte_not_present(entry
)))
538 if (unlikely(writeaccess
&& !pte_write(entry
)))
542 entry
= pte_mkdirty(entry
);
543 entry
= pte_mkyoung(entry
);
547 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
549 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
550 * the case of an initial page write exception, so we need to
551 * flush it in order to avoid potential TLB entry duplication.
553 if (writeaccess
== 2)
554 local_flush_tlb_one(get_asid(), address
& PAGE_MASK
);
557 update_mmu_cache(NULL
, address
, pte
);