1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
4 #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
5 #define FIRST_USER_ADDRESS 0
7 #define _PAGE_BIT_PRESENT 0
9 #define _PAGE_BIT_USER 2
10 #define _PAGE_BIT_PWT 3
11 #define _PAGE_BIT_PCD 4
12 #define _PAGE_BIT_ACCESSED 5
13 #define _PAGE_BIT_DIRTY 6
14 #define _PAGE_BIT_FILE 6
15 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18 #define _PAGE_BIT_UNUSED2 10
19 #define _PAGE_BIT_UNUSED3 11
20 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
22 #define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
23 #define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
24 #define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
25 #define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
26 #define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
27 #define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
28 #define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
29 #define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
30 #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
31 #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
32 #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
33 #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
35 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
36 #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
41 /* If _PAGE_PRESENT is clear, we use these: */
42 #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */
43 #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
44 pte_present gives true */
46 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
47 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
49 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
51 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
52 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
54 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
55 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
56 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
57 #define PAGE_COPY PAGE_COPY_NOEXEC
58 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
59 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
62 #define _PAGE_KERNEL_EXEC \
63 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
64 #define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
67 extern unsigned long long __PAGE_KERNEL
, __PAGE_KERNEL_EXEC
;
68 #endif /* __ASSEMBLY__ */
70 #define __PAGE_KERNEL_EXEC \
71 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
72 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
75 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
76 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
77 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
78 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
79 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
80 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
81 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
84 # define MAKE_GLOBAL(x) __pgprot((x))
86 # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
89 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
90 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
91 #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
92 #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX)
93 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
94 #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
95 #define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
96 #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
97 #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
100 #define __P000 PAGE_NONE
101 #define __P001 PAGE_READONLY
102 #define __P010 PAGE_COPY
103 #define __P011 PAGE_COPY
104 #define __P100 PAGE_READONLY_EXEC
105 #define __P101 PAGE_READONLY_EXEC
106 #define __P110 PAGE_COPY_EXEC
107 #define __P111 PAGE_COPY_EXEC
109 #define __S000 PAGE_NONE
110 #define __S001 PAGE_READONLY
111 #define __S010 PAGE_SHARED
112 #define __S011 PAGE_SHARED
113 #define __S100 PAGE_READONLY_EXEC
114 #define __S101 PAGE_READONLY_EXEC
115 #define __S110 PAGE_SHARED_EXEC
116 #define __S111 PAGE_SHARED_EXEC
121 * The following only work if pte_present() is true.
122 * Undefined behaviour if not..
124 static inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_DIRTY
; }
125 static inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
126 static inline int pte_write(pte_t pte
) { return pte_val(pte
) & _PAGE_RW
; }
127 static inline int pte_file(pte_t pte
) { return pte_val(pte
) & _PAGE_FILE
; }
128 static inline int pte_huge(pte_t pte
) { return pte_val(pte
) & _PAGE_PSE
; }
130 static inline int pmd_large(pmd_t pte
) {
131 return (pmd_val(pte
) & (_PAGE_PSE
|_PAGE_PRESENT
)) ==
132 (_PAGE_PSE
|_PAGE_PRESENT
);
135 static inline pte_t
pte_mkclean(pte_t pte
) { return __pte(pte_val(pte
) & ~_PAGE_DIRTY
); }
136 static inline pte_t
pte_mkold(pte_t pte
) { return __pte(pte_val(pte
) & ~_PAGE_ACCESSED
); }
137 static inline pte_t
pte_wrprotect(pte_t pte
) { return __pte(pte_val(pte
) & ~_PAGE_RW
); }
138 static inline pte_t
pte_mkexec(pte_t pte
) { return __pte(pte_val(pte
) & ~_PAGE_NX
); }
139 static inline pte_t
pte_mkdirty(pte_t pte
) { return __pte(pte_val(pte
) | _PAGE_DIRTY
); }
140 static inline pte_t
pte_mkyoung(pte_t pte
) { return __pte(pte_val(pte
) | _PAGE_ACCESSED
); }
141 static inline pte_t
pte_mkwrite(pte_t pte
) { return __pte(pte_val(pte
) | _PAGE_RW
); }
142 static inline pte_t
pte_mkhuge(pte_t pte
) { return __pte(pte_val(pte
) | _PAGE_PSE
); }
143 static inline pte_t
pte_clrhuge(pte_t pte
) { return __pte(pte_val(pte
) & ~_PAGE_PSE
); }
145 extern pteval_t __supported_pte_mask
;
147 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
149 return __pte((((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
150 pgprot_val(pgprot
)) & __supported_pte_mask
);
153 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
155 return __pmd((((phys_addr_t
)page_nr
<< PAGE_SHIFT
) |
156 pgprot_val(pgprot
)) & __supported_pte_mask
);
159 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
161 pteval_t val
= pte_val(pte
);
164 * Chop off the NX bit (if present), and add the NX portion of
165 * the newprot (if present):
167 val
&= _PAGE_CHG_MASK
& ~_PAGE_NX
;
168 val
|= pgprot_val(newprot
) & __supported_pte_mask
;
173 #endif /* __ASSEMBLY__ */
176 # include "pgtable_32.h"
178 # include "pgtable_64.h"
183 #ifndef CONFIG_PARAVIRT
185 * Rules for using pte_update - it must be called after any PTE update which
186 * has not been done using the set_pte / clear_pte interfaces. It is used by
187 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
188 * updates should either be sets, clears, or set_pte_atomic for P->P
189 * transitions, which means this hook should only be called for user PTEs.
190 * This hook implies a P->P protection or access change has taken place, which
191 * requires a subsequent TLB flush. The notification can optionally be delayed
192 * until the TLB flush event by using the pte_update_defer form of the
193 * interface, but care must be taken to assure that the flush happens while
194 * still holding the same page table lock so that the shadow and primary pages
195 * do not become out of sync on SMP.
197 #define pte_update(mm, addr, ptep) do { } while (0)
198 #define pte_update_defer(mm, addr, ptep) do { } while (0)
201 /* local pte updates need not use xchg for locking */
202 static inline pte_t
native_local_ptep_get_and_clear(pte_t
*ptep
)
206 /* Pure native function needs no input for mm, addr */
207 native_pte_clear(NULL
, 0, ptep
);
212 * We only update the dirty/accessed state if we set
213 * the dirty bit by hand in the kernel, since the hardware
214 * will do the accessed bit for us, and we don't want to
215 * race with other CPU's that might be updating the dirty
216 * bit at the same time.
218 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
219 #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
221 int __changed = !pte_same(*(ptep), entry); \
222 if (__changed && dirty) { \
224 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
225 flush_tlb_page(vma, address); \
230 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
231 #define ptep_test_and_clear_young(vma, addr, ptep) ({ \
233 if (pte_young(*(ptep))) \
234 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
237 pte_update((vma)->vm_mm, addr, ptep); \
241 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
242 #define ptep_clear_flush_young(vma, address, ptep) \
245 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
247 flush_tlb_page(vma, address); \
251 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
252 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
254 pte_t pte
= native_ptep_get_and_clear(ptep
);
255 pte_update(mm
, addr
, ptep
);
259 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
260 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
, int full
)
265 * Full address destruction in progress; paravirt does not
266 * care about updates and native needs no locking
268 pte
= native_local_ptep_get_and_clear(ptep
);
270 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
275 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
276 static inline void ptep_set_wrprotect(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
278 clear_bit(_PAGE_BIT_RW
, &ptep
->pte
);
279 pte_update(mm
, addr
, ptep
);
282 #ifndef CONFIG_PARAVIRT
283 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
284 #endif /* !CONFIG_PARAVIRT */
286 #include <asm-generic/pgtable.h>
287 #endif /* __ASSEMBLY__ */
289 #endif /* _ASM_X86_PGTABLE_H */