2 * fault.c: Page fault handlers for the Sparc.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/kdebug.h>
26 #include <asm/pgtable.h>
27 #include <asm/openprom.h>
28 #include <asm/oplib.h>
30 #include <asm/traps.h>
31 #include <asm/uaccess.h>
33 extern int prom_node_root
;
35 int show_unhandled_signals
= 1;
37 /* At boot time we determine these two values necessary for setting
38 * up the segment maps and page table entries (pte's).
41 int num_segmaps
, num_contexts
;
44 /* various Virtual Address Cache parameters we find at boot time... */
46 int vac_size
, vac_linesize
, vac_do_hw_vac_flushes
;
47 int vac_entries_per_context
, vac_entries_per_segment
;
48 int vac_entries_per_page
;
50 /* Return how much physical memory we have. */
51 unsigned long probe_memory(void)
53 unsigned long total
= 0;
56 for (i
= 0; sp_banks
[i
].num_bytes
; i
++)
57 total
+= sp_banks
[i
].num_bytes
;
62 static void unhandled_fault(unsigned long, struct task_struct
*,
63 struct pt_regs
*) __attribute__ ((noreturn
));
65 static void unhandled_fault(unsigned long address
, struct task_struct
*tsk
,
68 if((unsigned long) address
< PAGE_SIZE
) {
70 "Unable to handle kernel NULL pointer dereference\n");
72 printk(KERN_ALERT
"Unable to handle kernel paging request "
73 "at virtual address %08lx\n", address
);
75 printk(KERN_ALERT
"tsk->{mm,active_mm}->context = %08lx\n",
76 (tsk
->mm
? tsk
->mm
->context
: tsk
->active_mm
->context
));
77 printk(KERN_ALERT
"tsk->{mm,active_mm}->pgd = %08lx\n",
78 (tsk
->mm
? (unsigned long) tsk
->mm
->pgd
:
79 (unsigned long) tsk
->active_mm
->pgd
));
80 die_if_kernel("Oops", regs
);
83 asmlinkage
int lookup_fault(unsigned long pc
, unsigned long ret_pc
,
84 unsigned long address
)
91 i
= search_extables_range(ret_pc
, &g2
);
94 /* load & store will be handled by fixup */
98 /* store will be handled by fixup, load will bump out */
100 insn
= *((unsigned int *) pc
);
101 if ((insn
>> 21) & 1)
106 /* load will be handled by fixup, store will bump out */
107 /* for _from_ macros */
108 insn
= *((unsigned int *) pc
);
109 if (!((insn
>> 21) & 1) || ((insn
>>19)&0x3f) == 15)
117 memset(®s
, 0, sizeof (regs
));
120 __asm__
__volatile__(
124 "nop\n" : "=r" (regs
.psr
));
125 unhandled_fault(address
, current
, ®s
);
132 show_signal_msg(struct pt_regs
*regs
, int sig
, int code
,
133 unsigned long address
, struct task_struct
*tsk
)
135 if (!unhandled_signal(tsk
, sig
))
138 if (!printk_ratelimit())
141 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
142 task_pid_nr(tsk
) > 1 ? KERN_INFO
: KERN_EMERG
,
143 tsk
->comm
, task_pid_nr(tsk
), address
,
144 (void *)regs
->pc
, (void *)regs
->u_regs
[UREG_I7
],
145 (void *)regs
->u_regs
[UREG_FP
], code
);
147 print_vma_addr(KERN_CONT
" in ", regs
->pc
);
149 printk(KERN_CONT
"\n");
152 static void __do_fault_siginfo(int code
, int sig
, struct pt_regs
*regs
,
160 info
.si_addr
= (void __user
*) addr
;
163 if (unlikely(show_unhandled_signals
))
164 show_signal_msg(regs
, sig
, info
.si_code
,
167 force_sig_info (sig
, &info
, current
);
170 extern unsigned long safe_compute_effective_address(struct pt_regs
*,
173 static unsigned long compute_si_addr(struct pt_regs
*regs
, int text_fault
)
180 if (regs
->psr
& PSR_PS
) {
181 insn
= *(unsigned int *) regs
->pc
;
183 __get_user(insn
, (unsigned int *) regs
->pc
);
186 return safe_compute_effective_address(regs
, insn
);
189 static noinline
void do_fault_siginfo(int code
, int sig
, struct pt_regs
*regs
,
192 unsigned long addr
= compute_si_addr(regs
, text_fault
);
194 __do_fault_siginfo(code
, sig
, regs
, addr
);
197 asmlinkage
void do_sparc_fault(struct pt_regs
*regs
, int text_fault
, int write
,
198 unsigned long address
)
200 struct vm_area_struct
*vma
;
201 struct task_struct
*tsk
= current
;
202 struct mm_struct
*mm
= tsk
->mm
;
205 int from_user
= !(regs
->psr
& PSR_PS
);
207 unsigned int flags
= (FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
|
208 (write
? FAULT_FLAG_WRITE
: 0));
214 * We fault-in kernel-space virtual memory on-demand. The
215 * 'reference' page table is init_mm.pgd.
217 * NOTE! We MUST NOT take any locks for this case. We may
218 * be in an interrupt or a critical region, and should
219 * only copy the information from the master page table,
223 if (address
>= TASK_SIZE
)
227 * If we're in an interrupt or have no user
228 * context, we must not take the fault..
230 if (in_atomic() || !mm
)
233 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
236 down_read(&mm
->mmap_sem
);
238 if(!from_user
&& address
>= PAGE_OFFSET
)
241 vma
= find_vma(mm
, address
);
244 if(vma
->vm_start
<= address
)
246 if(!(vma
->vm_flags
& VM_GROWSDOWN
))
248 if(expand_stack(vma
, address
))
251 * Ok, we have a good vm_area for this memory access, so
257 if(!(vma
->vm_flags
& VM_WRITE
))
260 /* Allow reads even for write-only mappings */
261 if(!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
266 * If for any reason at all we couldn't handle the fault,
267 * make sure we exit gracefully rather than endlessly redo
270 fault
= handle_mm_fault(mm
, vma
, address
, flags
);
272 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
275 if (unlikely(fault
& VM_FAULT_ERROR
)) {
276 if (fault
& VM_FAULT_OOM
)
278 else if (fault
& VM_FAULT_SIGBUS
)
283 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
284 if (fault
& VM_FAULT_MAJOR
) {
286 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
,
290 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
,
293 if (fault
& VM_FAULT_RETRY
) {
294 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
296 /* No need to up_read(&mm->mmap_sem) as we would
297 * have already released it in __lock_page_or_retry
305 up_read(&mm
->mmap_sem
);
309 * Something tried to access memory that isn't in our memory map..
310 * Fix it, but check if it's kernel or user first..
313 up_read(&mm
->mmap_sem
);
315 bad_area_nosemaphore
:
316 /* User mode accesses just cause a SIGSEGV */
318 do_fault_siginfo(code
, SIGSEGV
, regs
, text_fault
);
322 /* Is this in ex_table? */
324 g2
= regs
->u_regs
[UREG_G2
];
326 fixup
= search_extables_range(regs
->pc
, &g2
);
327 if (fixup
> 10) { /* Values below are reserved for other things */
328 extern const unsigned __memset_start
[];
329 extern const unsigned __memset_end
[];
330 extern const unsigned __csum_partial_copy_start
[];
331 extern const unsigned __csum_partial_copy_end
[];
333 #ifdef DEBUG_EXCEPTIONS
334 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs
->pc
, address
);
335 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
336 regs
->pc
, fixup
, g2
);
338 if ((regs
->pc
>= (unsigned long)__memset_start
&&
339 regs
->pc
< (unsigned long)__memset_end
) ||
340 (regs
->pc
>= (unsigned long)__csum_partial_copy_start
&&
341 regs
->pc
< (unsigned long)__csum_partial_copy_end
)) {
342 regs
->u_regs
[UREG_I4
] = address
;
343 regs
->u_regs
[UREG_I5
] = regs
->pc
;
345 regs
->u_regs
[UREG_G2
] = g2
;
347 regs
->npc
= regs
->pc
+ 4;
352 unhandled_fault (address
, tsk
, regs
);
356 * We ran out of memory, or some other thing happened to us that made
357 * us unable to handle the page fault gracefully.
360 up_read(&mm
->mmap_sem
);
362 pagefault_out_of_memory();
368 up_read(&mm
->mmap_sem
);
369 do_fault_siginfo(BUS_ADRERR
, SIGBUS
, regs
, text_fault
);
376 * Synchronize this task's top level page-table
377 * with the 'reference' page table.
379 int offset
= pgd_index(address
);
383 pgd
= tsk
->active_mm
->pgd
+ offset
;
384 pgd_k
= init_mm
.pgd
+ offset
;
386 if (!pgd_present(*pgd
)) {
387 if (!pgd_present(*pgd_k
))
388 goto bad_area_nosemaphore
;
389 pgd_val(*pgd
) = pgd_val(*pgd_k
);
393 pmd
= pmd_offset(pgd
, address
);
394 pmd_k
= pmd_offset(pgd_k
, address
);
396 if (pmd_present(*pmd
) || !pmd_present(*pmd_k
))
397 goto bad_area_nosemaphore
;
403 /* This always deals with user addresses. */
404 static void force_user_fault(unsigned long address
, int write
)
406 struct vm_area_struct
*vma
;
407 struct task_struct
*tsk
= current
;
408 struct mm_struct
*mm
= tsk
->mm
;
413 down_read(&mm
->mmap_sem
);
414 vma
= find_vma(mm
, address
);
417 if(vma
->vm_start
<= address
)
419 if(!(vma
->vm_flags
& VM_GROWSDOWN
))
421 if(expand_stack(vma
, address
))
426 if(!(vma
->vm_flags
& VM_WRITE
))
429 if(!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
432 switch (handle_mm_fault(mm
, vma
, address
, write
? FAULT_FLAG_WRITE
: 0)) {
433 case VM_FAULT_SIGBUS
:
437 up_read(&mm
->mmap_sem
);
440 up_read(&mm
->mmap_sem
);
441 __do_fault_siginfo(code
, SIGSEGV
, tsk
->thread
.kregs
, address
);
445 up_read(&mm
->mmap_sem
);
446 __do_fault_siginfo(BUS_ADRERR
, SIGBUS
, tsk
->thread
.kregs
, address
);
449 static void check_stack_aligned(unsigned long sp
)
452 force_sig(SIGILL
, current
);
455 void window_overflow_fault(void)
459 sp
= current_thread_info()->rwbuf_stkptrs
[0];
460 if(((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
461 force_user_fault(sp
+ 0x38, 1);
462 force_user_fault(sp
, 1);
464 check_stack_aligned(sp
);
467 void window_underflow_fault(unsigned long sp
)
469 if(((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
470 force_user_fault(sp
+ 0x38, 0);
471 force_user_fault(sp
, 0);
473 check_stack_aligned(sp
);
476 void window_ret_fault(struct pt_regs
*regs
)
480 sp
= regs
->u_regs
[UREG_FP
];
481 if(((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
482 force_user_fault(sp
+ 0x38, 0);
483 force_user_fault(sp
, 0);
485 check_stack_aligned(sp
);