Commit | Line | Data |
---|---|---|
6c386655 JF |
1 | #ifndef _ASM_X86_PGTABLE_H |
2 | #define _ASM_X86_PGTABLE_H | |
3 | ||
6c386655 JF |
4 | #define FIRST_USER_ADDRESS 0 |
5 | ||
43cdf5d6 JS |
6 | #define _PAGE_BIT_PRESENT 0 /* is present */ |
7 | #define _PAGE_BIT_RW 1 /* writeable */ | |
8 | #define _PAGE_BIT_USER 2 /* userspace addressable */ | |
9 | #define _PAGE_BIT_PWT 3 /* page write through */ | |
10 | #define _PAGE_BIT_PCD 4 /* page cache disabled */ | |
11 | #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ | |
12 | #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ | |
6c386655 JF |
13 | #define _PAGE_BIT_FILE 6 |
14 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | |
9bf5a475 | 15 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ |
6c386655 JF |
16 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ |
17 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | |
18 | #define _PAGE_BIT_UNUSED2 10 | |
19 | #define _PAGE_BIT_UNUSED3 11 | |
9bf5a475 | 20 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ |
6c386655 JF |
21 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ |
22 | ||
f2919232 JF |
23 | /* |
24 | * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a | |
25 | * sign-extended value on 32-bit with all 1's in the upper word, | |
26 | * which preserves the upper pte values on 64-bit ptes: | |
27 | */ | |
61f38226 IM |
28 | #define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT) |
29 | #define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW) | |
30 | #define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER) | |
31 | #define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT) | |
32 | #define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD) | |
33 | #define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED) | |
34 | #define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY) | |
35 | #define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */ | |
36 | #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ | |
37 | #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) | |
38 | #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) | |
39 | #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3) | |
9bf5a475 AK |
40 | #define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT) |
41 | #define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE) | |
6c386655 JF |
42 | |
43 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | |
44 | #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) | |
45 | #else | |
46 | #define _PAGE_NX 0 | |
47 | #endif | |
48 | ||
49 | /* If _PAGE_PRESENT is clear, we use these: */ | |
3cbaeafe JP |
50 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, |
51 | * saved PTE; unset:swap */ | |
6c386655 JF |
52 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; |
53 | pte_present gives true */ | |
54 | ||
3cbaeafe JP |
55 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
56 | _PAGE_ACCESSED | _PAGE_DIRTY) | |
57 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ | |
58 | _PAGE_DIRTY) | |
6c386655 JF |
59 | |
60 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | |
61 | ||
2e5d9c85 | 62 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) |
63 | #define _PAGE_CACHE_WB (0) | |
64 | #define _PAGE_CACHE_WC (_PAGE_PWT) | |
65 | #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) | |
66 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) | |
67 | ||
6c386655 | 68 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
3cbaeafe JP |
69 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
70 | _PAGE_ACCESSED | _PAGE_NX) | |
71 | ||
72 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ | |
73 | _PAGE_USER | _PAGE_ACCESSED) | |
74 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | |
75 | _PAGE_ACCESSED | _PAGE_NX) | |
76 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | |
77 | _PAGE_ACCESSED) | |
6c386655 | 78 | #define PAGE_COPY PAGE_COPY_NOEXEC |
3cbaeafe JP |
79 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
80 | _PAGE_ACCESSED | _PAGE_NX) | |
81 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | |
82 | _PAGE_ACCESSED) | |
6c386655 JF |
83 | |
84 | #ifdef CONFIG_X86_32 | |
85 | #define _PAGE_KERNEL_EXEC \ | |
86 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | |
87 | #define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX) | |
88 | ||
89 | #ifndef __ASSEMBLY__ | |
c93c82bb | 90 | extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; |
6c386655 JF |
91 | #endif /* __ASSEMBLY__ */ |
92 | #else | |
93 | #define __PAGE_KERNEL_EXEC \ | |
94 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | |
95 | #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) | |
96 | #endif | |
97 | ||
98 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | |
99 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | |
d2e626f4 | 100 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) |
b310f381 | 101 | #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) |
6c386655 | 102 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) |
d546b67a | 103 | #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) |
6c386655 JF |
104 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) |
105 | #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) | |
106 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | |
107 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | |
108 | ||
109 | #ifdef CONFIG_X86_32 | |
110 | # define MAKE_GLOBAL(x) __pgprot((x)) | |
111 | #else | |
112 | # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) | |
113 | #endif | |
114 | ||
115 | #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) | |
116 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) | |
117 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) | |
118 | #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) | |
b310f381 | 119 | #define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC) |
6c386655 | 120 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) |
d546b67a | 121 | #define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) |
d2e626f4 | 122 | #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) |
6c386655 JF |
123 | #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) |
124 | #define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC) | |
125 | #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) | |
126 | #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) | |
127 | ||
128 | /* xwr */ | |
129 | #define __P000 PAGE_NONE | |
130 | #define __P001 PAGE_READONLY | |
131 | #define __P010 PAGE_COPY | |
132 | #define __P011 PAGE_COPY | |
133 | #define __P100 PAGE_READONLY_EXEC | |
134 | #define __P101 PAGE_READONLY_EXEC | |
135 | #define __P110 PAGE_COPY_EXEC | |
136 | #define __P111 PAGE_COPY_EXEC | |
137 | ||
138 | #define __S000 PAGE_NONE | |
139 | #define __S001 PAGE_READONLY | |
140 | #define __S010 PAGE_SHARED | |
141 | #define __S011 PAGE_SHARED | |
142 | #define __S100 PAGE_READONLY_EXEC | |
143 | #define __S101 PAGE_READONLY_EXEC | |
144 | #define __S110 PAGE_SHARED_EXEC | |
145 | #define __S111 PAGE_SHARED_EXEC | |
146 | ||
4614139c | 147 | #ifndef __ASSEMBLY__ |
195466dc | 148 | |
8405b122 JF |
149 | /* |
150 | * ZERO_PAGE is a global shared page that is always zero: used | |
151 | * for zero-mapped memory areas etc.. | |
152 | */ | |
3cbaeafe | 153 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
8405b122 JF |
154 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
155 | ||
e3ed910d JF |
156 | extern spinlock_t pgd_lock; |
157 | extern struct list_head pgd_list; | |
8405b122 | 158 | |
4614139c JF |
159 | /* |
160 | * The following only work if pte_present() is true. | |
161 | * Undefined behaviour if not.. | |
162 | */ | |
3cbaeafe JP |
163 | static inline int pte_dirty(pte_t pte) |
164 | { | |
165 | return pte_val(pte) & _PAGE_DIRTY; | |
166 | } | |
167 | ||
168 | static inline int pte_young(pte_t pte) | |
169 | { | |
170 | return pte_val(pte) & _PAGE_ACCESSED; | |
171 | } | |
172 | ||
173 | static inline int pte_write(pte_t pte) | |
174 | { | |
175 | return pte_val(pte) & _PAGE_RW; | |
176 | } | |
177 | ||
178 | static inline int pte_file(pte_t pte) | |
179 | { | |
180 | return pte_val(pte) & _PAGE_FILE; | |
181 | } | |
182 | ||
183 | static inline int pte_huge(pte_t pte) | |
184 | { | |
185 | return pte_val(pte) & _PAGE_PSE; | |
4614139c JF |
186 | } |
187 | ||
3cbaeafe JP |
188 | static inline int pte_global(pte_t pte) |
189 | { | |
190 | return pte_val(pte) & _PAGE_GLOBAL; | |
191 | } | |
192 | ||
193 | static inline int pte_exec(pte_t pte) | |
194 | { | |
195 | return !(pte_val(pte) & _PAGE_NX); | |
196 | } | |
197 | ||
198 | static inline int pmd_large(pmd_t pte) | |
199 | { | |
200 | return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == | |
201 | (_PAGE_PSE | _PAGE_PRESENT); | |
202 | } | |
203 | ||
204 | static inline pte_t pte_mkclean(pte_t pte) | |
205 | { | |
206 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); | |
207 | } | |
208 | ||
209 | static inline pte_t pte_mkold(pte_t pte) | |
210 | { | |
211 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); | |
212 | } | |
213 | ||
214 | static inline pte_t pte_wrprotect(pte_t pte) | |
215 | { | |
216 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); | |
217 | } | |
218 | ||
219 | static inline pte_t pte_mkexec(pte_t pte) | |
220 | { | |
221 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); | |
222 | } | |
223 | ||
224 | static inline pte_t pte_mkdirty(pte_t pte) | |
225 | { | |
226 | return __pte(pte_val(pte) | _PAGE_DIRTY); | |
227 | } | |
228 | ||
229 | static inline pte_t pte_mkyoung(pte_t pte) | |
230 | { | |
231 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | |
232 | } | |
233 | ||
234 | static inline pte_t pte_mkwrite(pte_t pte) | |
235 | { | |
236 | return __pte(pte_val(pte) | _PAGE_RW); | |
237 | } | |
238 | ||
239 | static inline pte_t pte_mkhuge(pte_t pte) | |
240 | { | |
241 | return __pte(pte_val(pte) | _PAGE_PSE); | |
242 | } | |
243 | ||
244 | static inline pte_t pte_clrhuge(pte_t pte) | |
245 | { | |
246 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); | |
247 | } | |
248 | ||
249 | static inline pte_t pte_mkglobal(pte_t pte) | |
250 | { | |
251 | return __pte(pte_val(pte) | _PAGE_GLOBAL); | |
252 | } | |
253 | ||
254 | static inline pte_t pte_clrglobal(pte_t pte) | |
255 | { | |
256 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); | |
257 | } | |
4614139c | 258 | |
6fdc05d4 JF |
259 | extern pteval_t __supported_pte_mask; |
260 | ||
261 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | |
262 | { | |
263 | return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | | |
264 | pgprot_val(pgprot)) & __supported_pte_mask); | |
265 | } | |
266 | ||
267 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |
268 | { | |
269 | return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) | | |
270 | pgprot_val(pgprot)) & __supported_pte_mask); | |
271 | } | |
272 | ||
38472311 IM |
273 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
274 | { | |
275 | pteval_t val = pte_val(pte); | |
276 | ||
277 | /* | |
278 | * Chop off the NX bit (if present), and add the NX portion of | |
279 | * the newprot (if present): | |
280 | */ | |
281 | val &= _PAGE_CHG_MASK & ~_PAGE_NX; | |
282 | val |= pgprot_val(newprot) & __supported_pte_mask; | |
283 | ||
284 | return __pte(val); | |
285 | } | |
286 | ||
c6ca18eb AK |
287 | #define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX)) |
288 | ||
1e8e23bc AK |
289 | #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) |
290 | ||
f0970c13 | 291 | #ifndef __ASSEMBLY__ |
292 | #define __HAVE_PHYS_MEM_ACCESS_PROT | |
293 | struct file; | |
294 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
295 | unsigned long size, pgprot_t vma_prot); | |
296 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |
297 | unsigned long size, pgprot_t *vma_prot); | |
298 | #endif | |
299 | ||
4891645e JF |
300 | #ifdef CONFIG_PARAVIRT |
301 | #include <asm/paravirt.h> | |
302 | #else /* !CONFIG_PARAVIRT */ | |
303 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | |
304 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | |
305 | ||
306 | #define set_pte_present(mm, addr, ptep, pte) \ | |
307 | native_set_pte_present(mm, addr, ptep, pte) | |
308 | #define set_pte_atomic(ptep, pte) \ | |
309 | native_set_pte_atomic(ptep, pte) | |
310 | ||
311 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | |
312 | ||
313 | #ifndef __PAGETABLE_PUD_FOLDED | |
314 | #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) | |
315 | #define pgd_clear(pgd) native_pgd_clear(pgd) | |
316 | #endif | |
317 | ||
318 | #ifndef set_pud | |
319 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) | |
320 | #endif | |
321 | ||
322 | #ifndef __PAGETABLE_PMD_FOLDED | |
323 | #define pud_clear(pud) native_pud_clear(pud) | |
324 | #endif | |
325 | ||
326 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | |
327 | #define pmd_clear(pmd) native_pmd_clear(pmd) | |
328 | ||
329 | #define pte_update(mm, addr, ptep) do { } while (0) | |
330 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
331 | #endif /* CONFIG_PARAVIRT */ | |
332 | ||
4614139c JF |
333 | #endif /* __ASSEMBLY__ */ |
334 | ||
96a388de TG |
335 | #ifdef CONFIG_X86_32 |
336 | # include "pgtable_32.h" | |
337 | #else | |
338 | # include "pgtable_64.h" | |
339 | #endif | |
6c386655 | 340 | |
68db065c JF |
341 | #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) |
342 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) | |
343 | ||
195466dc JF |
344 | #ifndef __ASSEMBLY__ |
345 | ||
30551bb3 TG |
346 | enum { |
347 | PG_LEVEL_NONE, | |
348 | PG_LEVEL_4K, | |
349 | PG_LEVEL_2M, | |
86f03989 | 350 | PG_LEVEL_1G, |
30551bb3 TG |
351 | }; |
352 | ||
0a663088 TG |
353 | /* |
354 | * Helper function that returns the kernel pagetable entry controlling | |
355 | * the virtual address 'address'. NULL means no pagetable entry present. | |
356 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it | |
357 | * as a pte too. | |
358 | */ | |
da7bfc50 | 359 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); |
0a663088 | 360 | |
4891645e JF |
361 | /* local pte updates need not use xchg for locking */ |
362 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |
363 | { | |
364 | pte_t res = *ptep; | |
365 | ||
366 | /* Pure native function needs no input for mm, addr */ | |
367 | native_pte_clear(NULL, 0, ptep); | |
368 | return res; | |
369 | } | |
370 | ||
371 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |
372 | pte_t *ptep , pte_t pte) | |
373 | { | |
374 | native_set_pte(ptep, pte); | |
375 | } | |
376 | ||
195466dc JF |
377 | #ifndef CONFIG_PARAVIRT |
378 | /* | |
379 | * Rules for using pte_update - it must be called after any PTE update which | |
380 | * has not been done using the set_pte / clear_pte interfaces. It is used by | |
381 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | |
382 | * updates should either be sets, clears, or set_pte_atomic for P->P | |
383 | * transitions, which means this hook should only be called for user PTEs. | |
384 | * This hook implies a P->P protection or access change has taken place, which | |
385 | * requires a subsequent TLB flush. The notification can optionally be delayed | |
386 | * until the TLB flush event by using the pte_update_defer form of the | |
387 | * interface, but care must be taken to assure that the flush happens while | |
388 | * still holding the same page table lock so that the shadow and primary pages | |
389 | * do not become out of sync on SMP. | |
390 | */ | |
391 | #define pte_update(mm, addr, ptep) do { } while (0) | |
392 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
393 | #endif | |
394 | ||
195466dc JF |
395 | /* |
396 | * We only update the dirty/accessed state if we set | |
397 | * the dirty bit by hand in the kernel, since the hardware | |
398 | * will do the accessed bit for us, and we don't want to | |
399 | * race with other CPU's that might be updating the dirty | |
400 | * bit at the same time. | |
401 | */ | |
402 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
ee5aa8d3 JF |
403 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
404 | unsigned long address, pte_t *ptep, | |
405 | pte_t entry, int dirty); | |
195466dc JF |
406 | |
407 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
f9fbf1a3 JF |
408 | extern int ptep_test_and_clear_young(struct vm_area_struct *vma, |
409 | unsigned long addr, pte_t *ptep); | |
195466dc JF |
410 | |
411 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
c20311e1 JF |
412 | extern int ptep_clear_flush_young(struct vm_area_struct *vma, |
413 | unsigned long address, pte_t *ptep); | |
195466dc JF |
414 | |
415 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
3cbaeafe JP |
416 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
417 | pte_t *ptep) | |
195466dc JF |
418 | { |
419 | pte_t pte = native_ptep_get_and_clear(ptep); | |
420 | pte_update(mm, addr, ptep); | |
421 | return pte; | |
422 | } | |
423 | ||
424 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
3cbaeafe JP |
425 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
426 | unsigned long addr, pte_t *ptep, | |
427 | int full) | |
195466dc JF |
428 | { |
429 | pte_t pte; | |
430 | if (full) { | |
431 | /* | |
432 | * Full address destruction in progress; paravirt does not | |
433 | * care about updates and native needs no locking | |
434 | */ | |
435 | pte = native_local_ptep_get_and_clear(ptep); | |
436 | } else { | |
437 | pte = ptep_get_and_clear(mm, addr, ptep); | |
438 | } | |
439 | return pte; | |
440 | } | |
441 | ||
442 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
3cbaeafe JP |
443 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
444 | unsigned long addr, pte_t *ptep) | |
195466dc | 445 | { |
d8d89827 | 446 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
195466dc JF |
447 | pte_update(mm, addr, ptep); |
448 | } | |
449 | ||
85958b46 JF |
450 | /* |
451 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | |
452 | * | |
453 | * dst - pointer to pgd range anwhere on a pgd page | |
454 | * src - "" | |
455 | * count - the number of pgds to copy. | |
456 | * | |
457 | * dst and src can be on the same page, but the range must not overlap, | |
458 | * and must not cross a page boundary. | |
459 | */ | |
460 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
461 | { | |
462 | memcpy(dst, src, count * sizeof(pgd_t)); | |
463 | } | |
464 | ||
465 | ||
195466dc JF |
466 | #include <asm-generic/pgtable.h> |
467 | #endif /* __ASSEMBLY__ */ | |
468 | ||
6c386655 | 469 | #endif /* _ASM_X86_PGTABLE_H */ |