[PATCH] pidspace: is_init()
[deliverable/linux.git] / arch / sh / mm / fault.c
CommitLineData
26ff6c11
PM
1/*
2 * Page fault handler for SH with an MMU.
1da177e4 3 *
1da177e4
LT
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
26ff6c11
PM
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
1da177e4 13 */
1da177e4 14#include <linux/kernel.h>
1da177e4 15#include <linux/mm.h>
0f08f338
PM
16#include <linux/hardirq.h>
17#include <linux/kprobes.h>
1da177e4 18#include <asm/system.h>
1da177e4 19#include <asm/mmu_context.h>
1da177e4
LT
20#include <asm/kgdb.h>
21
22extern void die(const char *,struct pt_regs *,long);
23
24/*
25 * This routine handles page faults. It determines the address,
26 * and the problem, and then passes it off to one of the appropriate
27 * routines.
28 */
29asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
30 unsigned long address)
31{
32 struct task_struct *tsk;
33 struct mm_struct *mm;
34 struct vm_area_struct * vma;
35 unsigned long page;
36
37#ifdef CONFIG_SH_KGDB
38 if (kgdb_nofault && kgdb_bus_err_hook)
39 kgdb_bus_err_hook();
40#endif
41
42 tsk = current;
43 mm = tsk->mm;
44
45 /*
46 * If we're in an interrupt or have no user
47 * context, we must not take the fault..
48 */
49 if (in_atomic() || !mm)
50 goto no_context;
51
52 down_read(&mm->mmap_sem);
53
54 vma = find_vma(mm, address);
55 if (!vma)
56 goto bad_area;
57 if (vma->vm_start <= address)
58 goto good_area;
59 if (!(vma->vm_flags & VM_GROWSDOWN))
60 goto bad_area;
61 if (expand_stack(vma, address))
62 goto bad_area;
63/*
64 * Ok, we have a good vm_area for this memory access, so
65 * we can handle it..
66 */
67good_area:
68 if (writeaccess) {
69 if (!(vma->vm_flags & VM_WRITE))
70 goto bad_area;
71 } else {
df67b3da 72 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1da177e4
LT
73 goto bad_area;
74 }
75
76 /*
77 * If for any reason at all we couldn't handle the fault,
78 * make sure we exit gracefully rather than endlessly redo
79 * the fault.
80 */
81survive:
82 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
83 case VM_FAULT_MINOR:
84 tsk->min_flt++;
85 break;
86 case VM_FAULT_MAJOR:
87 tsk->maj_flt++;
88 break;
89 case VM_FAULT_SIGBUS:
90 goto do_sigbus;
91 case VM_FAULT_OOM:
92 goto out_of_memory;
93 default:
94 BUG();
95 }
96
97 up_read(&mm->mmap_sem);
98 return;
99
100/*
101 * Something tried to access memory that isn't in our memory map..
102 * Fix it, but check if it's kernel or user first..
103 */
104bad_area:
105 up_read(&mm->mmap_sem);
106
107 if (user_mode(regs)) {
108 tsk->thread.address = address;
109 tsk->thread.error_code = writeaccess;
110 force_sig(SIGSEGV, tsk);
111 return;
112 }
113
114no_context:
115 /* Are we prepared to handle this kernel fault? */
116 if (fixup_exception(regs))
117 return;
118
119/*
120 * Oops. The kernel tried to access some bad page. We'll have to
121 * terminate things with extreme prejudice.
122 *
123 */
124 if (address < PAGE_SIZE)
125 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
126 else
127 printk(KERN_ALERT "Unable to handle kernel paging request");
128 printk(" at virtual address %08lx\n", address);
129 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
130 asm volatile("mov.l %1, %0"
131 : "=r" (page)
132 : "m" (__m(MMU_TTB)));
133 if (page) {
134 page = ((unsigned long *) page)[address >> 22];
135 printk(KERN_ALERT "*pde = %08lx\n", page);
136 if (page & _PAGE_PRESENT) {
137 page &= PAGE_MASK;
138 address &= 0x003ff000;
139 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
140 printk(KERN_ALERT "*pte = %08lx\n", page);
141 }
142 }
143 die("Oops", regs, writeaccess);
144 do_exit(SIGKILL);
145
146/*
147 * We ran out of memory, or some other thing happened to us that made
148 * us unable to handle the page fault gracefully.
149 */
150out_of_memory:
151 up_read(&mm->mmap_sem);
f400e198 152 if (is_init(current)) {
1da177e4
LT
153 yield();
154 down_read(&mm->mmap_sem);
155 goto survive;
156 }
157 printk("VM: killing process %s\n", tsk->comm);
158 if (user_mode(regs))
159 do_exit(SIGKILL);
160 goto no_context;
161
162do_sigbus:
163 up_read(&mm->mmap_sem);
164
165 /*
166 * Send a sigbus, regardless of whether we were in kernel
167 * or user mode.
168 */
169 tsk->thread.address = address;
170 tsk->thread.error_code = writeaccess;
171 tsk->thread.trap_no = 14;
172 force_sig(SIGBUS, tsk);
173
174 /* Kernel mode? Handle exceptions or die */
175 if (!user_mode(regs))
176 goto no_context;
177}
178
26ff6c11 179#ifdef CONFIG_SH_STORE_QUEUES
1da177e4 180/*
26ff6c11
PM
181 * This is a special case for the SH-4 store queues, as pages for this
182 * space still need to be faulted in before it's possible to flush the
183 * store queue cache for writeout to the remapped region.
184 */
185#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
186#else
187#define P3_ADDR_MAX P4SEG
188#endif
189
190/*
191 * Called with interrupts disabled.
1da177e4 192 */
0f08f338
PM
193asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
194 unsigned long writeaccess,
195 unsigned long address)
1da177e4 196{
60ec5585 197 pgd_t *pgd;
26ff6c11 198 pud_t *pud;
1da177e4
LT
199 pmd_t *pmd;
200 pte_t *pte;
201 pte_t entry;
0f08f338 202 struct mm_struct *mm = current->mm;
60ec5585
HD
203 spinlock_t *ptl;
204 int ret = 1;
1da177e4
LT
205
206#ifdef CONFIG_SH_KGDB
207 if (kgdb_nofault && kgdb_bus_err_hook)
208 kgdb_bus_err_hook();
209#endif
210
26ff6c11
PM
211 /*
212 * We don't take page faults for P1, P2, and parts of P4, these
213 * are always mapped, whether it be due to legacy behaviour in
214 * 29-bit mode, or due to PMB configuration in 32-bit mode.
215 */
f647d33f 216 if (address >= P3SEG && address < P3_ADDR_MAX) {
60ec5585 217 pgd = pgd_offset_k(address);
f647d33f
PM
218 mm = NULL;
219 } else {
0f08f338 220 if (unlikely(address >= TASK_SIZE || !mm))
26ff6c11
PM
221 return 1;
222
0f08f338 223 pgd = pgd_offset(mm, address);
26ff6c11 224 }
1da177e4 225
26ff6c11
PM
226 pud = pud_offset(pgd, address);
227 if (pud_none_or_clear_bad(pud))
228 return 1;
229 pmd = pmd_offset(pud, address);
60ec5585 230 if (pmd_none_or_clear_bad(pmd))
1da177e4 231 return 1;
26ff6c11 232
60ec5585
HD
233 if (mm)
234 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
235 else
236 pte = pte_offset_kernel(pmd, address);
237
1da177e4 238 entry = *pte;
26ff6c11
PM
239 if (unlikely(pte_none(entry) || pte_not_present(entry)))
240 goto unlock;
241 if (unlikely(writeaccess && !pte_write(entry)))
60ec5585 242 goto unlock;
1da177e4
LT
243
244 if (writeaccess)
245 entry = pte_mkdirty(entry);
246 entry = pte_mkyoung(entry);
247
248#ifdef CONFIG_CPU_SH4
249 /*
250 * ITLB is not affected by "ldtlb" instruction.
251 * So, we need to flush the entry by ourselves.
252 */
26ff6c11 253 __flush_tlb_page(get_asid(), address & PAGE_MASK);
1da177e4
LT
254#endif
255
256 set_pte(pte, entry);
257 update_mmu_cache(NULL, address, entry);
60ec5585
HD
258 ret = 0;
259unlock:
260 if (mm)
261 pte_unmap_unlock(pte, ptl);
262 return ret;
1da177e4 263}
This page took 0.262967 seconds and 5 git commands to generate.