Commit | Line | Data |
---|---|---|
0186f47e KG |
1 | /* |
2 | * This file contains common routines for dealing with free of page tables | |
8d30c14c | 3 | * Along with common page table handling code |
0186f47e KG |
4 | * |
5 | * Derived from arch/powerpc/mm/tlb_64.c: | |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
7 | * | |
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
10 | * Copyright (C) 1996 Paul Mackerras | |
11 | * | |
12 | * Derived from "arch/i386/mm/init.c" | |
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
14 | * | |
15 | * Dave Engebretsen <engebret@us.ibm.com> | |
16 | * Rework for PPC64 port. | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | */ | |
23 | ||
24 | #include <linux/kernel.h> | |
5a0e3ad6 | 25 | #include <linux/gfp.h> |
0186f47e KG |
26 | #include <linux/mm.h> |
27 | #include <linux/init.h> | |
28 | #include <linux/percpu.h> | |
29 | #include <linux/hardirq.h> | |
30 | #include <asm/pgalloc.h> | |
31 | #include <asm/tlbflush.h> | |
32 | #include <asm/tlb.h> | |
33 | ||
e0908085 RF |
34 | #include "mmu_decl.h" |
35 | ||
a8f7758c BH |
36 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
37 | ||
c7cc58a1 BH |
38 | #ifdef CONFIG_SMP |
39 | ||
40 | /* | |
41 | * Handle batching of page table freeing on SMP. Page tables are | |
42 | * queued up and send to be freed later by RCU in order to avoid | |
43 | * freeing a page table page that is being walked without locks | |
44 | */ | |
45 | ||
0186f47e KG |
46 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); |
47 | static unsigned long pte_freelist_forced_free; | |
48 | ||
49 | struct pte_freelist_batch | |
50 | { | |
51 | struct rcu_head rcu; | |
52 | unsigned int index; | |
a0668cdc | 53 | unsigned long tables[0]; |
0186f47e KG |
54 | }; |
55 | ||
56 | #define PTE_FREELIST_SIZE \ | |
57 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | |
a0668cdc | 58 | / sizeof(unsigned long)) |
0186f47e KG |
59 | |
60 | static void pte_free_smp_sync(void *arg) | |
61 | { | |
62 | /* Do nothing, just ensure we sync with all CPUs */ | |
63 | } | |
64 | ||
65 | /* This is only called when we are critically out of memory | |
66 | * (and fail to get a page in pte_free_tlb). | |
67 | */ | |
a0668cdc | 68 | static void pgtable_free_now(void *table, unsigned shift) |
0186f47e KG |
69 | { |
70 | pte_freelist_forced_free++; | |
71 | ||
72 | smp_call_function(pte_free_smp_sync, NULL, 1); | |
73 | ||
a0668cdc | 74 | pgtable_free(table, shift); |
0186f47e KG |
75 | } |
76 | ||
77 | static void pte_free_rcu_callback(struct rcu_head *head) | |
78 | { | |
79 | struct pte_freelist_batch *batch = | |
80 | container_of(head, struct pte_freelist_batch, rcu); | |
81 | unsigned int i; | |
82 | ||
a0668cdc DG |
83 | for (i = 0; i < batch->index; i++) { |
84 | void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE); | |
85 | unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE; | |
86 | ||
87 | pgtable_free(table, shift); | |
88 | } | |
0186f47e KG |
89 | |
90 | free_page((unsigned long)batch); | |
91 | } | |
92 | ||
93 | static void pte_free_submit(struct pte_freelist_batch *batch) | |
94 | { | |
0186f47e KG |
95 | call_rcu(&batch->rcu, pte_free_rcu_callback); |
96 | } | |
97 | ||
a0668cdc | 98 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) |
0186f47e KG |
99 | { |
100 | /* This is safe since tlb_gather_mmu has disabled preemption */ | |
0186f47e | 101 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); |
a0668cdc | 102 | unsigned long pgf; |
0186f47e KG |
103 | |
104 | if (atomic_read(&tlb->mm->mm_users) < 2 || | |
56aa4129 | 105 | cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ |
a0668cdc | 106 | pgtable_free(table, shift); |
0186f47e KG |
107 | return; |
108 | } | |
109 | ||
110 | if (*batchp == NULL) { | |
111 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | |
112 | if (*batchp == NULL) { | |
a0668cdc | 113 | pgtable_free_now(table, shift); |
0186f47e KG |
114 | return; |
115 | } | |
116 | (*batchp)->index = 0; | |
117 | } | |
a0668cdc DG |
118 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); |
119 | pgf = (unsigned long)table | shift; | |
0186f47e KG |
120 | (*batchp)->tables[(*batchp)->index++] = pgf; |
121 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | |
122 | pte_free_submit(*batchp); | |
123 | *batchp = NULL; | |
124 | } | |
125 | } | |
126 | ||
127 | void pte_free_finish(void) | |
128 | { | |
129 | /* This is safe since tlb_gather_mmu has disabled preemption */ | |
130 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | |
131 | ||
132 | if (*batchp == NULL) | |
133 | return; | |
134 | pte_free_submit(*batchp); | |
135 | *batchp = NULL; | |
136 | } | |
8d30c14c | 137 | |
c7cc58a1 BH |
138 | #endif /* CONFIG_SMP */ |
139 | ||
8d30c14c BH |
140 | static inline int is_exec_fault(void) |
141 | { | |
142 | return current->thread.regs && TRAP(current->thread.regs) == 0x400; | |
143 | } | |
144 | ||
145 | /* We only try to do i/d cache coherency on stuff that looks like | |
146 | * reasonably "normal" PTEs. We currently require a PTE to be present | |
ea3cc330 BH |
147 | * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE. We also only do that |
148 | * on userspace PTEs | |
8d30c14c BH |
149 | */ |
150 | static inline int pte_looks_normal(pte_t pte) | |
151 | { | |
152 | return (pte_val(pte) & | |
ea3cc330 BH |
153 | (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) == |
154 | (_PAGE_PRESENT | _PAGE_USER); | |
8d30c14c BH |
155 | } |
156 | ||
ea3cc330 BH |
157 | struct page * maybe_pte_to_page(pte_t pte) |
158 | { | |
159 | unsigned long pfn = pte_pfn(pte); | |
160 | struct page *page; | |
161 | ||
162 | if (unlikely(!pfn_valid(pfn))) | |
163 | return NULL; | |
164 | page = pfn_to_page(pfn); | |
165 | if (PageReserved(page)) | |
166 | return NULL; | |
167 | return page; | |
168 | } | |
169 | ||
170 | #if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 | |
171 | ||
8d30c14c | 172 | /* Server-style MMU handles coherency when hashing if HW exec permission |
ea3cc330 BH |
173 | * is supposed per page (currently 64-bit only). If not, then, we always |
174 | * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec | |
175 | * support falls into the same category. | |
8d30c14c | 176 | */ |
ea3cc330 | 177 | |
e0908085 | 178 | static pte_t set_pte_filter(pte_t pte, unsigned long addr) |
8d30c14c | 179 | { |
ea3cc330 BH |
180 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
181 | if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || | |
182 | cpu_has_feature(CPU_FTR_NOEXECUTE))) { | |
183 | struct page *pg = maybe_pte_to_page(pte); | |
184 | if (!pg) | |
185 | return pte; | |
186 | if (!test_bit(PG_arch_1, &pg->flags)) { | |
e0908085 RF |
187 | #ifdef CONFIG_8xx |
188 | /* On 8xx, cache control instructions (particularly | |
189 | * "dcbst" from flush_dcache_icache) fault as write | |
190 | * operation if there is an unpopulated TLB entry | |
191 | * for the address in question. To workaround that, | |
192 | * we invalidate the TLB here, thus avoiding dcbst | |
193 | * misbehaviour. | |
194 | */ | |
195 | /* 8xx doesn't care about PID, size or ind args */ | |
196 | _tlbil_va(addr, 0, 0, 0); | |
197 | #endif /* CONFIG_8xx */ | |
ea3cc330 BH |
198 | flush_dcache_icache_page(pg); |
199 | set_bit(PG_arch_1, &pg->flags); | |
200 | } | |
201 | } | |
202 | return pte; | |
8d30c14c | 203 | } |
ea3cc330 BH |
204 | |
205 | static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, | |
206 | int dirty) | |
8d30c14c | 207 | { |
ea3cc330 | 208 | return pte; |
8d30c14c | 209 | } |
ea3cc330 BH |
210 | |
211 | #else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */ | |
212 | ||
213 | /* Embedded type MMU with HW exec support. This is a bit more complicated | |
214 | * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so | |
215 | * instead we "filter out" the exec permission for non clean pages. | |
8d30c14c | 216 | */ |
e0908085 | 217 | static pte_t set_pte_filter(pte_t pte, unsigned long addr) |
8d30c14c | 218 | { |
ea3cc330 BH |
219 | struct page *pg; |
220 | ||
221 | /* No exec permission in the first place, move on */ | |
222 | if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte)) | |
223 | return pte; | |
224 | ||
225 | /* If you set _PAGE_EXEC on weird pages you're on your own */ | |
226 | pg = maybe_pte_to_page(pte); | |
227 | if (unlikely(!pg)) | |
228 | return pte; | |
229 | ||
230 | /* If the page clean, we move on */ | |
231 | if (test_bit(PG_arch_1, &pg->flags)) | |
232 | return pte; | |
233 | ||
234 | /* If it's an exec fault, we flush the cache and make it clean */ | |
235 | if (is_exec_fault()) { | |
236 | flush_dcache_icache_page(pg); | |
237 | set_bit(PG_arch_1, &pg->flags); | |
238 | return pte; | |
239 | } | |
240 | ||
241 | /* Else, we filter out _PAGE_EXEC */ | |
242 | return __pte(pte_val(pte) & ~_PAGE_EXEC); | |
8d30c14c | 243 | } |
ea3cc330 BH |
244 | |
245 | static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, | |
246 | int dirty) | |
247 | { | |
248 | struct page *pg; | |
249 | ||
250 | /* So here, we only care about exec faults, as we use them | |
251 | * to recover lost _PAGE_EXEC and perform I$/D$ coherency | |
252 | * if necessary. Also if _PAGE_EXEC is already set, same deal, | |
253 | * we just bail out | |
254 | */ | |
255 | if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault()) | |
256 | return pte; | |
257 | ||
258 | #ifdef CONFIG_DEBUG_VM | |
259 | /* So this is an exec fault, _PAGE_EXEC is not set. If it was | |
260 | * an error we would have bailed out earlier in do_page_fault() | |
261 | * but let's make sure of it | |
262 | */ | |
263 | if (WARN_ON(!(vma->vm_flags & VM_EXEC))) | |
264 | return pte; | |
265 | #endif /* CONFIG_DEBUG_VM */ | |
266 | ||
267 | /* If you set _PAGE_EXEC on weird pages you're on your own */ | |
268 | pg = maybe_pte_to_page(pte); | |
269 | if (unlikely(!pg)) | |
270 | goto bail; | |
271 | ||
272 | /* If the page is already clean, we move on */ | |
273 | if (test_bit(PG_arch_1, &pg->flags)) | |
274 | goto bail; | |
275 | ||
276 | /* Clean the page and set PG_arch_1 */ | |
277 | flush_dcache_icache_page(pg); | |
278 | set_bit(PG_arch_1, &pg->flags); | |
279 | ||
280 | bail: | |
281 | return __pte(pte_val(pte) | _PAGE_EXEC); | |
282 | } | |
283 | ||
284 | #endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */ | |
8d30c14c BH |
285 | |
286 | /* | |
287 | * set_pte stores a linux PTE into the linux page table. | |
288 | */ | |
ea3cc330 BH |
289 | void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, |
290 | pte_t pte) | |
8d30c14c BH |
291 | { |
292 | #ifdef CONFIG_DEBUG_VM | |
293 | WARN_ON(pte_present(*ptep)); | |
294 | #endif | |
295 | /* Note: mm->context.id might not yet have been assigned as | |
296 | * this context might not have been activated yet when this | |
297 | * is called. | |
298 | */ | |
e0908085 | 299 | pte = set_pte_filter(pte, addr); |
8d30c14c BH |
300 | |
301 | /* Perform the setting of the PTE */ | |
302 | __set_pte_at(mm, addr, ptep, pte, 0); | |
303 | } | |
304 | ||
305 | /* | |
306 | * This is called when relaxing access to a PTE. It's also called in the page | |
307 | * fault path when we don't hit any of the major fault cases, ie, a minor | |
308 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have | |
309 | * handled those two for us, we additionally deal with missing execute | |
310 | * permission here on some processors | |
311 | */ | |
312 | int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |
313 | pte_t *ptep, pte_t entry, int dirty) | |
314 | { | |
315 | int changed; | |
ea3cc330 | 316 | entry = set_access_flags_filter(entry, vma, dirty); |
8d30c14c BH |
317 | changed = !pte_same(*(ptep), entry); |
318 | if (changed) { | |
af3e4aca MG |
319 | if (!(vma->vm_flags & VM_HUGETLB)) |
320 | assert_pte_locked(vma->vm_mm, address); | |
8d30c14c BH |
321 | __ptep_set_access_flags(ptep, entry); |
322 | flush_tlb_page_nohash(vma, address); | |
323 | } | |
324 | return changed; | |
325 | } | |
326 | ||
327 | #ifdef CONFIG_DEBUG_VM | |
328 | void assert_pte_locked(struct mm_struct *mm, unsigned long addr) | |
329 | { | |
330 | pgd_t *pgd; | |
331 | pud_t *pud; | |
332 | pmd_t *pmd; | |
333 | ||
334 | if (mm == &init_mm) | |
335 | return; | |
336 | pgd = mm->pgd + pgd_index(addr); | |
337 | BUG_ON(pgd_none(*pgd)); | |
338 | pud = pud_offset(pgd, addr); | |
339 | BUG_ON(pud_none(*pud)); | |
340 | pmd = pmd_offset(pud, addr); | |
341 | BUG_ON(!pmd_present(*pmd)); | |
797a747a | 342 | assert_spin_locked(pte_lockptr(mm, pmd)); |
8d30c14c BH |
343 | } |
344 | #endif /* CONFIG_DEBUG_VM */ | |
345 |