2 * fault.c: Page fault handlers for the Sparc.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/kdebug.h>
26 #include <asm/pgtable.h>
27 #include <asm/memreg.h>
28 #include <asm/openprom.h>
29 #include <asm/oplib.h>
31 #include <asm/traps.h>
32 #include <asm/uaccess.h>
34 extern int prom_node_root
;
36 int show_unhandled_signals
= 1;
38 /* At boot time we determine these two values necessary for setting
39 * up the segment maps and page table entries (pte's).
42 int num_segmaps
, num_contexts
;
45 /* various Virtual Address Cache parameters we find at boot time... */
47 int vac_size
, vac_linesize
, vac_do_hw_vac_flushes
;
48 int vac_entries_per_context
, vac_entries_per_segment
;
49 int vac_entries_per_page
;
51 /* Return how much physical memory we have. */
52 unsigned long probe_memory(void)
54 unsigned long total
= 0;
57 for (i
= 0; sp_banks
[i
].num_bytes
; i
++)
58 total
+= sp_banks
[i
].num_bytes
;
63 extern void sun4c_complete_all_stores(void);
65 /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
66 asmlinkage
void sparc_lvl15_nmi(struct pt_regs
*regs
, unsigned long serr
,
67 unsigned long svaddr
, unsigned long aerr
,
70 sun4c_complete_all_stores();
71 printk("FAULT: NMI received\n");
72 printk("SREGS: Synchronous Error %08lx\n", serr
);
73 printk(" Synchronous Vaddr %08lx\n", svaddr
);
74 printk(" Asynchronous Error %08lx\n", aerr
);
75 printk(" Asynchronous Vaddr %08lx\n", avaddr
);
77 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg
);
78 printk("REGISTER DUMP:\n");
83 static void unhandled_fault(unsigned long, struct task_struct
*,
84 struct pt_regs
*) __attribute__ ((noreturn
));
86 static void unhandled_fault(unsigned long address
, struct task_struct
*tsk
,
89 if((unsigned long) address
< PAGE_SIZE
) {
91 "Unable to handle kernel NULL pointer dereference\n");
93 printk(KERN_ALERT
"Unable to handle kernel paging request "
94 "at virtual address %08lx\n", address
);
96 printk(KERN_ALERT
"tsk->{mm,active_mm}->context = %08lx\n",
97 (tsk
->mm
? tsk
->mm
->context
: tsk
->active_mm
->context
));
98 printk(KERN_ALERT
"tsk->{mm,active_mm}->pgd = %08lx\n",
99 (tsk
->mm
? (unsigned long) tsk
->mm
->pgd
:
100 (unsigned long) tsk
->active_mm
->pgd
));
101 die_if_kernel("Oops", regs
);
104 asmlinkage
int lookup_fault(unsigned long pc
, unsigned long ret_pc
,
105 unsigned long address
)
112 i
= search_extables_range(ret_pc
, &g2
);
115 /* load & store will be handled by fixup */
119 /* store will be handled by fixup, load will bump out */
120 /* for _to_ macros */
121 insn
= *((unsigned int *) pc
);
122 if ((insn
>> 21) & 1)
127 /* load will be handled by fixup, store will bump out */
128 /* for _from_ macros */
129 insn
= *((unsigned int *) pc
);
130 if (!((insn
>> 21) & 1) || ((insn
>>19)&0x3f) == 15)
138 memset(®s
, 0, sizeof (regs
));
141 __asm__
__volatile__(
145 "nop\n" : "=r" (regs
.psr
));
146 unhandled_fault(address
, current
, ®s
);
153 show_signal_msg(struct pt_regs
*regs
, int sig
, int code
,
154 unsigned long address
, struct task_struct
*tsk
)
156 if (!unhandled_signal(tsk
, sig
))
159 if (!printk_ratelimit())
162 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
163 task_pid_nr(tsk
) > 1 ? KERN_INFO
: KERN_EMERG
,
164 tsk
->comm
, task_pid_nr(tsk
), address
,
165 (void *)regs
->pc
, (void *)regs
->u_regs
[UREG_I7
],
166 (void *)regs
->u_regs
[UREG_FP
], code
);
168 print_vma_addr(KERN_CONT
" in ", regs
->pc
);
170 printk(KERN_CONT
"\n");
173 static void __do_fault_siginfo(int code
, int sig
, struct pt_regs
*regs
,
181 info
.si_addr
= (void __user
*) addr
;
184 if (unlikely(show_unhandled_signals
))
185 show_signal_msg(regs
, sig
, info
.si_code
,
188 force_sig_info (sig
, &info
, current
);
191 extern unsigned long safe_compute_effective_address(struct pt_regs
*,
194 static unsigned long compute_si_addr(struct pt_regs
*regs
, int text_fault
)
201 if (regs
->psr
& PSR_PS
) {
202 insn
= *(unsigned int *) regs
->pc
;
204 __get_user(insn
, (unsigned int *) regs
->pc
);
207 return safe_compute_effective_address(regs
, insn
);
210 static noinline
void do_fault_siginfo(int code
, int sig
, struct pt_regs
*regs
,
213 unsigned long addr
= compute_si_addr(regs
, text_fault
);
215 __do_fault_siginfo(code
, sig
, regs
, addr
);
218 asmlinkage
void do_sparc_fault(struct pt_regs
*regs
, int text_fault
, int write
,
219 unsigned long address
)
221 struct vm_area_struct
*vma
;
222 struct task_struct
*tsk
= current
;
223 struct mm_struct
*mm
= tsk
->mm
;
226 int from_user
= !(regs
->psr
& PSR_PS
);
228 unsigned int flags
= (FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
|
229 (write
? FAULT_FLAG_WRITE
: 0));
235 * We fault-in kernel-space virtual memory on-demand. The
236 * 'reference' page table is init_mm.pgd.
238 * NOTE! We MUST NOT take any locks for this case. We may
239 * be in an interrupt or a critical region, and should
240 * only copy the information from the master page table,
244 if (!ARCH_SUN4C
&& address
>= TASK_SIZE
)
248 * If we're in an interrupt or have no user
249 * context, we must not take the fault..
251 if (in_atomic() || !mm
)
254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
257 down_read(&mm
->mmap_sem
);
260 * The kernel referencing a bad kernel pointer can lock up
261 * a sun4c machine completely, so we must attempt recovery.
263 if(!from_user
&& address
>= PAGE_OFFSET
)
266 vma
= find_vma(mm
, address
);
269 if(vma
->vm_start
<= address
)
271 if(!(vma
->vm_flags
& VM_GROWSDOWN
))
273 if(expand_stack(vma
, address
))
276 * Ok, we have a good vm_area for this memory access, so
282 if(!(vma
->vm_flags
& VM_WRITE
))
285 /* Allow reads even for write-only mappings */
286 if(!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
291 * If for any reason at all we couldn't handle the fault,
292 * make sure we exit gracefully rather than endlessly redo
295 fault
= handle_mm_fault(mm
, vma
, address
, flags
);
297 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
300 if (unlikely(fault
& VM_FAULT_ERROR
)) {
301 if (fault
& VM_FAULT_OOM
)
303 else if (fault
& VM_FAULT_SIGBUS
)
308 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
309 if (fault
& VM_FAULT_MAJOR
) {
311 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
,
315 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
,
318 if (fault
& VM_FAULT_RETRY
) {
319 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
321 /* No need to up_read(&mm->mmap_sem) as we would
322 * have already released it in __lock_page_or_retry
330 up_read(&mm
->mmap_sem
);
334 * Something tried to access memory that isn't in our memory map..
335 * Fix it, but check if it's kernel or user first..
338 up_read(&mm
->mmap_sem
);
340 bad_area_nosemaphore
:
341 /* User mode accesses just cause a SIGSEGV */
343 do_fault_siginfo(code
, SIGSEGV
, regs
, text_fault
);
347 /* Is this in ex_table? */
349 g2
= regs
->u_regs
[UREG_G2
];
351 fixup
= search_extables_range(regs
->pc
, &g2
);
352 if (fixup
> 10) { /* Values below are reserved for other things */
353 extern const unsigned __memset_start
[];
354 extern const unsigned __memset_end
[];
355 extern const unsigned __csum_partial_copy_start
[];
356 extern const unsigned __csum_partial_copy_end
[];
358 #ifdef DEBUG_EXCEPTIONS
359 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs
->pc
, address
);
360 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
361 regs
->pc
, fixup
, g2
);
363 if ((regs
->pc
>= (unsigned long)__memset_start
&&
364 regs
->pc
< (unsigned long)__memset_end
) ||
365 (regs
->pc
>= (unsigned long)__csum_partial_copy_start
&&
366 regs
->pc
< (unsigned long)__csum_partial_copy_end
)) {
367 regs
->u_regs
[UREG_I4
] = address
;
368 regs
->u_regs
[UREG_I5
] = regs
->pc
;
370 regs
->u_regs
[UREG_G2
] = g2
;
372 regs
->npc
= regs
->pc
+ 4;
377 unhandled_fault (address
, tsk
, regs
);
381 * We ran out of memory, or some other thing happened to us that made
382 * us unable to handle the page fault gracefully.
385 up_read(&mm
->mmap_sem
);
387 pagefault_out_of_memory();
393 up_read(&mm
->mmap_sem
);
394 do_fault_siginfo(BUS_ADRERR
, SIGBUS
, regs
, text_fault
);
401 * Synchronize this task's top level page-table
402 * with the 'reference' page table.
404 int offset
= pgd_index(address
);
408 pgd
= tsk
->active_mm
->pgd
+ offset
;
409 pgd_k
= init_mm
.pgd
+ offset
;
411 if (!pgd_present(*pgd
)) {
412 if (!pgd_present(*pgd_k
))
413 goto bad_area_nosemaphore
;
414 pgd_val(*pgd
) = pgd_val(*pgd_k
);
418 pmd
= pmd_offset(pgd
, address
);
419 pmd_k
= pmd_offset(pgd_k
, address
);
421 if (pmd_present(*pmd
) || !pmd_present(*pmd_k
))
422 goto bad_area_nosemaphore
;
428 asmlinkage
void do_sun4c_fault(struct pt_regs
*regs
, int text_fault
, int write
,
429 unsigned long address
)
431 extern void sun4c_update_mmu_cache(struct vm_area_struct
*,
432 unsigned long,pte_t
*);
433 extern pte_t
*sun4c_pte_offset_kernel(pmd_t
*,unsigned long);
434 struct task_struct
*tsk
= current
;
435 struct mm_struct
*mm
= tsk
->mm
;
442 !(regs
->psr
& PSR_PS
)) {
443 unsigned int insn
, __user
*ip
;
445 ip
= (unsigned int __user
*)regs
->pc
;
446 if (!get_user(insn
, ip
)) {
447 if ((insn
& 0xc1680000) == 0xc0680000)
453 /* We are oopsing. */
454 do_sparc_fault(regs
, text_fault
, write
, address
);
455 BUG(); /* P3 Oops already, you bitch */
458 pgdp
= pgd_offset(mm
, address
);
459 ptep
= sun4c_pte_offset_kernel((pmd_t
*) pgdp
, address
);
461 if (pgd_val(*pgdp
)) {
463 if ((pte_val(*ptep
) & (_SUN4C_PAGE_WRITE
|_SUN4C_PAGE_PRESENT
))
464 == (_SUN4C_PAGE_WRITE
|_SUN4C_PAGE_PRESENT
)) {
467 *ptep
= __pte(pte_val(*ptep
) | _SUN4C_PAGE_ACCESSED
|
468 _SUN4C_PAGE_MODIFIED
|
472 local_irq_save(flags
);
473 if (sun4c_get_segmap(address
) != invalid_segment
) {
474 sun4c_put_pte(address
, pte_val(*ptep
));
475 local_irq_restore(flags
);
478 local_irq_restore(flags
);
481 if ((pte_val(*ptep
) & (_SUN4C_PAGE_READ
|_SUN4C_PAGE_PRESENT
))
482 == (_SUN4C_PAGE_READ
|_SUN4C_PAGE_PRESENT
)) {
485 *ptep
= __pte(pte_val(*ptep
) | _SUN4C_PAGE_ACCESSED
|
488 local_irq_save(flags
);
489 if (sun4c_get_segmap(address
) != invalid_segment
) {
490 sun4c_put_pte(address
, pte_val(*ptep
));
491 local_irq_restore(flags
);
494 local_irq_restore(flags
);
499 /* This conditional is 'interesting'. */
500 if (pgd_val(*pgdp
) && !(write
&& !(pte_val(*ptep
) & _SUN4C_PAGE_WRITE
))
501 && (pte_val(*ptep
) & _SUN4C_PAGE_VALID
))
502 /* Note: It is safe to not grab the MMAP semaphore here because
503 * we know that update_mmu_cache() will not sleep for
504 * any reason (at least not in the current implementation)
505 * and therefore there is no danger of another thread getting
506 * on the CPU and doing a shrink_mmap() on this vma.
508 sun4c_update_mmu_cache (find_vma(current
->mm
, address
), address
,
511 do_sparc_fault(regs
, text_fault
, write
, address
);
514 /* This always deals with user addresses. */
515 static void force_user_fault(unsigned long address
, int write
)
517 struct vm_area_struct
*vma
;
518 struct task_struct
*tsk
= current
;
519 struct mm_struct
*mm
= tsk
->mm
;
524 down_read(&mm
->mmap_sem
);
525 vma
= find_vma(mm
, address
);
528 if(vma
->vm_start
<= address
)
530 if(!(vma
->vm_flags
& VM_GROWSDOWN
))
532 if(expand_stack(vma
, address
))
537 if(!(vma
->vm_flags
& VM_WRITE
))
540 if(!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
543 switch (handle_mm_fault(mm
, vma
, address
, write
? FAULT_FLAG_WRITE
: 0)) {
544 case VM_FAULT_SIGBUS
:
548 up_read(&mm
->mmap_sem
);
551 up_read(&mm
->mmap_sem
);
552 __do_fault_siginfo(code
, SIGSEGV
, tsk
->thread
.kregs
, address
);
556 up_read(&mm
->mmap_sem
);
557 __do_fault_siginfo(BUS_ADRERR
, SIGBUS
, tsk
->thread
.kregs
, address
);
560 static void check_stack_aligned(unsigned long sp
)
563 force_sig(SIGILL
, current
);
566 void window_overflow_fault(void)
570 sp
= current_thread_info()->rwbuf_stkptrs
[0];
571 if(((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
572 force_user_fault(sp
+ 0x38, 1);
573 force_user_fault(sp
, 1);
575 check_stack_aligned(sp
);
578 void window_underflow_fault(unsigned long sp
)
580 if(((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
581 force_user_fault(sp
+ 0x38, 0);
582 force_user_fault(sp
, 0);
584 check_stack_aligned(sp
);
587 void window_ret_fault(struct pt_regs
*regs
)
591 sp
= regs
->u_regs
[UREG_FP
];
592 if(((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
593 force_user_fault(sp
+ 0x38, 0);
594 force_user_fault(sp
, 0);
596 check_stack_aligned(sp
);