Merge branch 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspe...
[deliverable/linux.git] / include / asm-generic / pgtable.h
1 #ifndef _ASM_GENERIC_PGTABLE_H
2 #define _ASM_GENERIC_PGTABLE_H
3
4 #ifndef __ASSEMBLY__
5 #ifdef CONFIG_MMU
6
7 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
8 extern int ptep_set_access_flags(struct vm_area_struct *vma,
9 unsigned long address, pte_t *ptep,
10 pte_t entry, int dirty);
11 #endif
12
13 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
14 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
15 unsigned long address, pmd_t *pmdp,
16 pmd_t entry, int dirty);
17 #endif
18
19 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
20 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
21 unsigned long address,
22 pte_t *ptep)
23 {
24 pte_t pte = *ptep;
25 int r = 1;
26 if (!pte_young(pte))
27 r = 0;
28 else
29 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
30 return r;
31 }
32 #endif
33
34 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
35 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
36 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
37 unsigned long address,
38 pmd_t *pmdp)
39 {
40 pmd_t pmd = *pmdp;
41 int r = 1;
42 if (!pmd_young(pmd))
43 r = 0;
44 else
45 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
46 return r;
47 }
48 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
49 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
50 unsigned long address,
51 pmd_t *pmdp)
52 {
53 BUG();
54 return 0;
55 }
56 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
57 #endif
58
59 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
60 int ptep_clear_flush_young(struct vm_area_struct *vma,
61 unsigned long address, pte_t *ptep);
62 #endif
63
64 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
65 int pmdp_clear_flush_young(struct vm_area_struct *vma,
66 unsigned long address, pmd_t *pmdp);
67 #endif
68
69 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
70 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
71 unsigned long address,
72 pte_t *ptep)
73 {
74 pte_t pte = *ptep;
75 pte_clear(mm, address, ptep);
76 return pte;
77 }
78 #endif
79
80 #ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
81 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
82 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
83 unsigned long address,
84 pmd_t *pmdp)
85 {
86 pmd_t pmd = *pmdp;
87 pmd_clear(mm, address, pmdp);
88 return pmd;
89 })
90 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
91 #endif
92
93 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
94 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
95 unsigned long address, pte_t *ptep,
96 int full)
97 {
98 pte_t pte;
99 pte = ptep_get_and_clear(mm, address, ptep);
100 return pte;
101 }
102 #endif
103
104 /*
105 * Some architectures may be able to avoid expensive synchronization
106 * primitives when modifications are made to PTE's which are already
107 * not present, or in the process of an address space destruction.
108 */
109 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
110 static inline void pte_clear_not_present_full(struct mm_struct *mm,
111 unsigned long address,
112 pte_t *ptep,
113 int full)
114 {
115 pte_clear(mm, address, ptep);
116 }
117 #endif
118
119 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
120 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
121 unsigned long address,
122 pte_t *ptep);
123 #endif
124
125 #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
126 extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
127 unsigned long address,
128 pmd_t *pmdp);
129 #endif
130
131 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
132 struct mm_struct;
133 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
134 {
135 pte_t old_pte = *ptep;
136 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
137 }
138 #endif
139
140 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
141 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
142 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
143 unsigned long address, pmd_t *pmdp)
144 {
145 pmd_t old_pmd = *pmdp;
146 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
147 }
148 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
149 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
150 unsigned long address, pmd_t *pmdp)
151 {
152 BUG();
153 }
154 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
155 #endif
156
157 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
158 extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
159 unsigned long address,
160 pmd_t *pmdp);
161 #endif
162
163 #ifndef __HAVE_ARCH_PTE_SAME
164 static inline int pte_same(pte_t pte_a, pte_t pte_b)
165 {
166 return pte_val(pte_a) == pte_val(pte_b);
167 }
168 #endif
169
170 #ifndef __HAVE_ARCH_PMD_SAME
171 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
172 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
173 {
174 return pmd_val(pmd_a) == pmd_val(pmd_b);
175 }
176 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
177 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
178 {
179 BUG();
180 return 0;
181 }
182 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
183 #endif
184
185 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
186 #define page_test_dirty(page) (0)
187 #endif
188
189 #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
190 #define page_clear_dirty(page, mapped) do { } while (0)
191 #endif
192
193 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
194 #define pte_maybe_dirty(pte) pte_dirty(pte)
195 #else
196 #define pte_maybe_dirty(pte) (1)
197 #endif
198
199 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
200 #define page_test_and_clear_young(page) (0)
201 #endif
202
203 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
204 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
205 #endif
206
207 #ifndef __HAVE_ARCH_MOVE_PTE
208 #define move_pte(pte, prot, old_addr, new_addr) (pte)
209 #endif
210
211 #ifndef flush_tlb_fix_spurious_fault
212 #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
213 #endif
214
215 #ifndef pgprot_noncached
216 #define pgprot_noncached(prot) (prot)
217 #endif
218
219 #ifndef pgprot_writecombine
220 #define pgprot_writecombine pgprot_noncached
221 #endif
222
223 /*
224 * When walking page tables, get the address of the next boundary,
225 * or the end address of the range if that comes earlier. Although no
226 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
227 */
228
229 #define pgd_addr_end(addr, end) \
230 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
231 (__boundary - 1 < (end) - 1)? __boundary: (end); \
232 })
233
234 #ifndef pud_addr_end
235 #define pud_addr_end(addr, end) \
236 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
237 (__boundary - 1 < (end) - 1)? __boundary: (end); \
238 })
239 #endif
240
241 #ifndef pmd_addr_end
242 #define pmd_addr_end(addr, end) \
243 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
244 (__boundary - 1 < (end) - 1)? __boundary: (end); \
245 })
246 #endif
247
248 /*
249 * When walking page tables, we usually want to skip any p?d_none entries;
250 * and any p?d_bad entries - reporting the error before resetting to none.
251 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
252 */
253 void pgd_clear_bad(pgd_t *);
254 void pud_clear_bad(pud_t *);
255 void pmd_clear_bad(pmd_t *);
256
257 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
258 {
259 if (pgd_none(*pgd))
260 return 1;
261 if (unlikely(pgd_bad(*pgd))) {
262 pgd_clear_bad(pgd);
263 return 1;
264 }
265 return 0;
266 }
267
268 static inline int pud_none_or_clear_bad(pud_t *pud)
269 {
270 if (pud_none(*pud))
271 return 1;
272 if (unlikely(pud_bad(*pud))) {
273 pud_clear_bad(pud);
274 return 1;
275 }
276 return 0;
277 }
278
279 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
280 {
281 if (pmd_none(*pmd))
282 return 1;
283 if (unlikely(pmd_bad(*pmd))) {
284 pmd_clear_bad(pmd);
285 return 1;
286 }
287 return 0;
288 }
289
290 static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
291 unsigned long addr,
292 pte_t *ptep)
293 {
294 /*
295 * Get the current pte state, but zero it out to make it
296 * non-present, preventing the hardware from asynchronously
297 * updating it.
298 */
299 return ptep_get_and_clear(mm, addr, ptep);
300 }
301
302 static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
303 unsigned long addr,
304 pte_t *ptep, pte_t pte)
305 {
306 /*
307 * The pte is non-present, so there's no hardware state to
308 * preserve.
309 */
310 set_pte_at(mm, addr, ptep, pte);
311 }
312
313 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
314 /*
315 * Start a pte protection read-modify-write transaction, which
316 * protects against asynchronous hardware modifications to the pte.
317 * The intention is not to prevent the hardware from making pte
318 * updates, but to prevent any updates it may make from being lost.
319 *
320 * This does not protect against other software modifications of the
321 * pte; the appropriate pte lock must be held over the transation.
322 *
323 * Note that this interface is intended to be batchable, meaning that
324 * ptep_modify_prot_commit may not actually update the pte, but merely
325 * queue the update to be done at some later time. The update must be
326 * actually committed before the pte lock is released, however.
327 */
328 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
329 unsigned long addr,
330 pte_t *ptep)
331 {
332 return __ptep_modify_prot_start(mm, addr, ptep);
333 }
334
335 /*
336 * Commit an update to a pte, leaving any hardware-controlled bits in
337 * the PTE unmodified.
338 */
339 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
340 unsigned long addr,
341 pte_t *ptep, pte_t pte)
342 {
343 __ptep_modify_prot_commit(mm, addr, ptep, pte);
344 }
345 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
346 #endif /* CONFIG_MMU */
347
348 /*
349 * A facility to provide lazy MMU batching. This allows PTE updates and
350 * page invalidations to be delayed until a call to leave lazy MMU mode
351 * is issued. Some architectures may benefit from doing this, and it is
352 * beneficial for both shadow and direct mode hypervisors, which may batch
353 * the PTE updates which happen during this window. Note that using this
354 * interface requires that read hazards be removed from the code. A read
355 * hazard could result in the direct mode hypervisor case, since the actual
356 * write to the page tables may not yet have taken place, so reads though
357 * a raw PTE pointer after it has been modified are not guaranteed to be
358 * up to date. This mode can only be entered and left under the protection of
359 * the page table locks for all page tables which may be modified. In the UP
360 * case, this is required so that preemption is disabled, and in the SMP case,
361 * it must synchronize the delayed page table writes properly on other CPUs.
362 */
363 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
364 #define arch_enter_lazy_mmu_mode() do {} while (0)
365 #define arch_leave_lazy_mmu_mode() do {} while (0)
366 #define arch_flush_lazy_mmu_mode() do {} while (0)
367 #endif
368
369 /*
370 * A facility to provide batching of the reload of page tables and
371 * other process state with the actual context switch code for
372 * paravirtualized guests. By convention, only one of the batched
373 * update (lazy) modes (CPU, MMU) should be active at any given time,
374 * entry should never be nested, and entry and exits should always be
375 * paired. This is for sanity of maintaining and reasoning about the
376 * kernel code. In this case, the exit (end of the context switch) is
377 * in architecture-specific code, and so doesn't need a generic
378 * definition.
379 */
380 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
381 #define arch_start_context_switch(prev) do {} while (0)
382 #endif
383
384 #ifndef __HAVE_PFNMAP_TRACKING
385 /*
386 * Interface that can be used by architecture code to keep track of
387 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
388 *
389 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
390 * for physical range indicated by pfn and size.
391 */
392 static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
393 unsigned long pfn, unsigned long size)
394 {
395 return 0;
396 }
397
398 /*
399 * Interface that can be used by architecture code to keep track of
400 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
401 *
402 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
403 * copied through copy_page_range().
404 */
405 static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
406 {
407 return 0;
408 }
409
410 /*
411 * Interface that can be used by architecture code to keep track of
412 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
413 *
414 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
415 * untrack can be called for a specific region indicated by pfn and size or
416 * can be for the entire vma (in which case size can be zero).
417 */
418 static inline void untrack_pfn_vma(struct vm_area_struct *vma,
419 unsigned long pfn, unsigned long size)
420 {
421 }
422 #else
423 extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
424 unsigned long pfn, unsigned long size);
425 extern int track_pfn_vma_copy(struct vm_area_struct *vma);
426 extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
427 unsigned long size);
428 #endif
429
430 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
431 static inline int pmd_trans_huge(pmd_t pmd)
432 {
433 return 0;
434 }
435 static inline int pmd_trans_splitting(pmd_t pmd)
436 {
437 return 0;
438 }
439 #ifndef __HAVE_ARCH_PMD_WRITE
440 static inline int pmd_write(pmd_t pmd)
441 {
442 BUG();
443 return 0;
444 }
445 #endif /* __HAVE_ARCH_PMD_WRITE */
446 #endif
447
448 #endif /* !__ASSEMBLY__ */
449
450 #endif /* _ASM_GENERIC_PGTABLE_H */
This page took 0.059059 seconds and 6 git commands to generate.