2 * fault.c: Page fault handlers for the Sparc.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/kdebug.h>
26 #include <asm/pgtable.h>
27 #include <asm/openprom.h>
28 #include <asm/oplib.h>
30 #include <asm/traps.h>
31 #include <asm/uaccess.h>
33 int show_unhandled_signals
= 1;
35 /* At boot time we determine these two values necessary for setting
36 * up the segment maps and page table entries (pte's).
41 /* Return how much physical memory we have. */
42 unsigned long probe_memory(void)
44 unsigned long total
= 0;
47 for (i
= 0; sp_banks
[i
].num_bytes
; i
++)
48 total
+= sp_banks
[i
].num_bytes
;
53 static void unhandled_fault(unsigned long, struct task_struct
*,
54 struct pt_regs
*) __attribute__ ((noreturn
));
56 static void __noreturn
unhandled_fault(unsigned long address
,
57 struct task_struct
*tsk
,
60 if ((unsigned long) address
< PAGE_SIZE
) {
62 "Unable to handle kernel NULL pointer dereference\n");
64 printk(KERN_ALERT
"Unable to handle kernel paging request at virtual address %08lx\n",
67 printk(KERN_ALERT
"tsk->{mm,active_mm}->context = %08lx\n",
68 (tsk
->mm
? tsk
->mm
->context
: tsk
->active_mm
->context
));
69 printk(KERN_ALERT
"tsk->{mm,active_mm}->pgd = %08lx\n",
70 (tsk
->mm
? (unsigned long) tsk
->mm
->pgd
:
71 (unsigned long) tsk
->active_mm
->pgd
));
72 die_if_kernel("Oops", regs
);
75 asmlinkage
int lookup_fault(unsigned long pc
, unsigned long ret_pc
,
76 unsigned long address
)
83 i
= search_extables_range(ret_pc
, &g2
);
86 /* load & store will be handled by fixup */
90 /* store will be handled by fixup, load will bump out */
92 insn
= *((unsigned int *) pc
);
98 /* load will be handled by fixup, store will bump out */
99 /* for _from_ macros */
100 insn
= *((unsigned int *) pc
);
101 if (!((insn
>> 21) & 1) || ((insn
>>19)&0x3f) == 15)
109 memset(®s
, 0, sizeof(regs
));
112 __asm__
__volatile__(
116 "nop\n" : "=r" (regs
.psr
));
117 unhandled_fault(address
, current
, ®s
);
124 show_signal_msg(struct pt_regs
*regs
, int sig
, int code
,
125 unsigned long address
, struct task_struct
*tsk
)
127 if (!unhandled_signal(tsk
, sig
))
130 if (!printk_ratelimit())
133 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
134 task_pid_nr(tsk
) > 1 ? KERN_INFO
: KERN_EMERG
,
135 tsk
->comm
, task_pid_nr(tsk
), address
,
136 (void *)regs
->pc
, (void *)regs
->u_regs
[UREG_I7
],
137 (void *)regs
->u_regs
[UREG_FP
], code
);
139 print_vma_addr(KERN_CONT
" in ", regs
->pc
);
141 printk(KERN_CONT
"\n");
144 static void __do_fault_siginfo(int code
, int sig
, struct pt_regs
*regs
,
152 info
.si_addr
= (void __user
*) addr
;
155 if (unlikely(show_unhandled_signals
))
156 show_signal_msg(regs
, sig
, info
.si_code
,
159 force_sig_info (sig
, &info
, current
);
162 extern unsigned long safe_compute_effective_address(struct pt_regs
*,
165 static unsigned long compute_si_addr(struct pt_regs
*regs
, int text_fault
)
172 if (regs
->psr
& PSR_PS
)
173 insn
= *(unsigned int *) regs
->pc
;
175 __get_user(insn
, (unsigned int *) regs
->pc
);
177 return safe_compute_effective_address(regs
, insn
);
180 static noinline
void do_fault_siginfo(int code
, int sig
, struct pt_regs
*regs
,
183 unsigned long addr
= compute_si_addr(regs
, text_fault
);
185 __do_fault_siginfo(code
, sig
, regs
, addr
);
188 asmlinkage
void do_sparc_fault(struct pt_regs
*regs
, int text_fault
, int write
,
189 unsigned long address
)
191 struct vm_area_struct
*vma
;
192 struct task_struct
*tsk
= current
;
193 struct mm_struct
*mm
= tsk
->mm
;
196 int from_user
= !(regs
->psr
& PSR_PS
);
198 unsigned int flags
= (FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
|
199 (write
? FAULT_FLAG_WRITE
: 0));
205 * We fault-in kernel-space virtual memory on-demand. The
206 * 'reference' page table is init_mm.pgd.
208 * NOTE! We MUST NOT take any locks for this case. We may
209 * be in an interrupt or a critical region, and should
210 * only copy the information from the master page table,
214 if (address
>= TASK_SIZE
)
218 * If we're in an interrupt or have no user
219 * context, we must not take the fault..
221 if (in_atomic() || !mm
)
224 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
227 down_read(&mm
->mmap_sem
);
229 if (!from_user
&& address
>= PAGE_OFFSET
)
232 vma
= find_vma(mm
, address
);
235 if (vma
->vm_start
<= address
)
237 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
239 if (expand_stack(vma
, address
))
242 * Ok, we have a good vm_area for this memory access, so
248 if (!(vma
->vm_flags
& VM_WRITE
))
251 /* Allow reads even for write-only mappings */
252 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
257 * If for any reason at all we couldn't handle the fault,
258 * make sure we exit gracefully rather than endlessly redo
261 fault
= handle_mm_fault(mm
, vma
, address
, flags
);
263 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
266 if (unlikely(fault
& VM_FAULT_ERROR
)) {
267 if (fault
& VM_FAULT_OOM
)
269 else if (fault
& VM_FAULT_SIGBUS
)
274 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
275 if (fault
& VM_FAULT_MAJOR
) {
277 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
,
281 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
,
284 if (fault
& VM_FAULT_RETRY
) {
285 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
287 /* No need to up_read(&mm->mmap_sem) as we would
288 * have already released it in __lock_page_or_retry
296 up_read(&mm
->mmap_sem
);
300 * Something tried to access memory that isn't in our memory map..
301 * Fix it, but check if it's kernel or user first..
304 up_read(&mm
->mmap_sem
);
306 bad_area_nosemaphore
:
307 /* User mode accesses just cause a SIGSEGV */
309 do_fault_siginfo(code
, SIGSEGV
, regs
, text_fault
);
313 /* Is this in ex_table? */
315 g2
= regs
->u_regs
[UREG_G2
];
317 fixup
= search_extables_range(regs
->pc
, &g2
);
318 /* Values below 10 are reserved for other things */
320 extern const unsigned __memset_start
[];
321 extern const unsigned __memset_end
[];
322 extern const unsigned __csum_partial_copy_start
[];
323 extern const unsigned __csum_partial_copy_end
[];
325 #ifdef DEBUG_EXCEPTIONS
326 printk("Exception: PC<%08lx> faddr<%08lx>\n",
328 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
329 regs
->pc
, fixup
, g2
);
331 if ((regs
->pc
>= (unsigned long)__memset_start
&&
332 regs
->pc
< (unsigned long)__memset_end
) ||
333 (regs
->pc
>= (unsigned long)__csum_partial_copy_start
&&
334 regs
->pc
< (unsigned long)__csum_partial_copy_end
)) {
335 regs
->u_regs
[UREG_I4
] = address
;
336 regs
->u_regs
[UREG_I5
] = regs
->pc
;
338 regs
->u_regs
[UREG_G2
] = g2
;
340 regs
->npc
= regs
->pc
+ 4;
345 unhandled_fault(address
, tsk
, regs
);
349 * We ran out of memory, or some other thing happened to us that made
350 * us unable to handle the page fault gracefully.
353 up_read(&mm
->mmap_sem
);
355 pagefault_out_of_memory();
361 up_read(&mm
->mmap_sem
);
362 do_fault_siginfo(BUS_ADRERR
, SIGBUS
, regs
, text_fault
);
369 * Synchronize this task's top level page-table
370 * with the 'reference' page table.
372 int offset
= pgd_index(address
);
376 pgd
= tsk
->active_mm
->pgd
+ offset
;
377 pgd_k
= init_mm
.pgd
+ offset
;
379 if (!pgd_present(*pgd
)) {
380 if (!pgd_present(*pgd_k
))
381 goto bad_area_nosemaphore
;
382 pgd_val(*pgd
) = pgd_val(*pgd_k
);
386 pmd
= pmd_offset(pgd
, address
);
387 pmd_k
= pmd_offset(pgd_k
, address
);
389 if (pmd_present(*pmd
) || !pmd_present(*pmd_k
))
390 goto bad_area_nosemaphore
;
397 /* This always deals with user addresses. */
398 static void force_user_fault(unsigned long address
, int write
)
400 struct vm_area_struct
*vma
;
401 struct task_struct
*tsk
= current
;
402 struct mm_struct
*mm
= tsk
->mm
;
407 down_read(&mm
->mmap_sem
);
408 vma
= find_vma(mm
, address
);
411 if (vma
->vm_start
<= address
)
413 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
415 if (expand_stack(vma
, address
))
420 if (!(vma
->vm_flags
& VM_WRITE
))
423 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
426 switch (handle_mm_fault(mm
, vma
, address
, write
? FAULT_FLAG_WRITE
: 0)) {
427 case VM_FAULT_SIGBUS
:
431 up_read(&mm
->mmap_sem
);
434 up_read(&mm
->mmap_sem
);
435 __do_fault_siginfo(code
, SIGSEGV
, tsk
->thread
.kregs
, address
);
439 up_read(&mm
->mmap_sem
);
440 __do_fault_siginfo(BUS_ADRERR
, SIGBUS
, tsk
->thread
.kregs
, address
);
443 static void check_stack_aligned(unsigned long sp
)
446 force_sig(SIGILL
, current
);
449 void window_overflow_fault(void)
453 sp
= current_thread_info()->rwbuf_stkptrs
[0];
454 if (((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
455 force_user_fault(sp
+ 0x38, 1);
456 force_user_fault(sp
, 1);
458 check_stack_aligned(sp
);
461 void window_underflow_fault(unsigned long sp
)
463 if (((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
464 force_user_fault(sp
+ 0x38, 0);
465 force_user_fault(sp
, 0);
467 check_stack_aligned(sp
);
470 void window_ret_fault(struct pt_regs
*regs
)
474 sp
= regs
->u_regs
[UREG_FP
];
475 if (((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
476 force_user_fault(sp
+ 0x38, 0);
477 force_user_fault(sp
, 0);
479 check_stack_aligned(sp
);