x86: unify zero_page definition
[deliverable/linux.git] / include / asm-x86 / pgtable.h
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3
4 #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
5 #define FIRST_USER_ADDRESS 0
6
7 #define _PAGE_BIT_PRESENT 0
8 #define _PAGE_BIT_RW 1
9 #define _PAGE_BIT_USER 2
10 #define _PAGE_BIT_PWT 3
11 #define _PAGE_BIT_PCD 4
12 #define _PAGE_BIT_ACCESSED 5
13 #define _PAGE_BIT_DIRTY 6
14 #define _PAGE_BIT_FILE 6
15 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18 #define _PAGE_BIT_UNUSED2 10
19 #define _PAGE_BIT_UNUSED3 11
20 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
21
22 #define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
23 #define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
24 #define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
25 #define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
26 #define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
27 #define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
28 #define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
29 #define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
30 #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
31 #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
32 #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
33 #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
34
35 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
36 #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
37 #else
38 #define _PAGE_NX 0
39 #endif
40
41 /* If _PAGE_PRESENT is clear, we use these: */
42 #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */
43 #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
44 pte_present gives true */
45
46 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
47 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
48
49 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
50
51 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
52 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
53
54 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
55 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
56 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
57 #define PAGE_COPY PAGE_COPY_NOEXEC
58 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
59 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
60
61 #ifdef CONFIG_X86_32
62 #define _PAGE_KERNEL_EXEC \
63 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
64 #define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
65
66 #ifndef __ASSEMBLY__
67 extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
68 #endif /* __ASSEMBLY__ */
69 #else
70 #define __PAGE_KERNEL_EXEC \
71 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
72 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
73 #endif
74
75 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
76 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
77 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
78 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
79 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
80 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
81 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
82
83 #ifdef CONFIG_X86_32
84 # define MAKE_GLOBAL(x) __pgprot((x))
85 #else
86 # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
87 #endif
88
89 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
90 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
91 #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
92 #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX)
93 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
94 #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
95 #define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
96 #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
97 #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
98
99 /* xwr */
100 #define __P000 PAGE_NONE
101 #define __P001 PAGE_READONLY
102 #define __P010 PAGE_COPY
103 #define __P011 PAGE_COPY
104 #define __P100 PAGE_READONLY_EXEC
105 #define __P101 PAGE_READONLY_EXEC
106 #define __P110 PAGE_COPY_EXEC
107 #define __P111 PAGE_COPY_EXEC
108
109 #define __S000 PAGE_NONE
110 #define __S001 PAGE_READONLY
111 #define __S010 PAGE_SHARED
112 #define __S011 PAGE_SHARED
113 #define __S100 PAGE_READONLY_EXEC
114 #define __S101 PAGE_READONLY_EXEC
115 #define __S110 PAGE_SHARED_EXEC
116 #define __S111 PAGE_SHARED_EXEC
117
118 #ifndef __ASSEMBLY__
119
120 /*
121 * ZERO_PAGE is a global shared page that is always zero: used
122 * for zero-mapped memory areas etc..
123 */
124 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
125 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
126
127
128 /*
129 * The following only work if pte_present() is true.
130 * Undefined behaviour if not..
131 */
132 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
133 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
134 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
135 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
136 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
137
138 static inline int pmd_large(pmd_t pte) {
139 return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
140 (_PAGE_PSE|_PAGE_PRESENT);
141 }
142
143 static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_DIRTY); }
144 static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_ACCESSED); }
145 static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_RW); }
146 static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_NX); }
147 static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); }
148 static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); }
149 static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); }
150 static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
151 static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_PSE); }
152
153 extern pteval_t __supported_pte_mask;
154
155 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
156 {
157 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
158 pgprot_val(pgprot)) & __supported_pte_mask);
159 }
160
161 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
162 {
163 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
164 pgprot_val(pgprot)) & __supported_pte_mask);
165 }
166
167 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
168 {
169 pteval_t val = pte_val(pte);
170
171 /*
172 * Chop off the NX bit (if present), and add the NX portion of
173 * the newprot (if present):
174 */
175 val &= _PAGE_CHG_MASK & ~_PAGE_NX;
176 val |= pgprot_val(newprot) & __supported_pte_mask;
177
178 return __pte(val);
179 }
180
181 #endif /* __ASSEMBLY__ */
182
183 #ifdef CONFIG_X86_32
184 # include "pgtable_32.h"
185 #else
186 # include "pgtable_64.h"
187 #endif
188
189 #ifndef __ASSEMBLY__
190
191 #ifndef CONFIG_PARAVIRT
192 /*
193 * Rules for using pte_update - it must be called after any PTE update which
194 * has not been done using the set_pte / clear_pte interfaces. It is used by
195 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
196 * updates should either be sets, clears, or set_pte_atomic for P->P
197 * transitions, which means this hook should only be called for user PTEs.
198 * This hook implies a P->P protection or access change has taken place, which
199 * requires a subsequent TLB flush. The notification can optionally be delayed
200 * until the TLB flush event by using the pte_update_defer form of the
201 * interface, but care must be taken to assure that the flush happens while
202 * still holding the same page table lock so that the shadow and primary pages
203 * do not become out of sync on SMP.
204 */
205 #define pte_update(mm, addr, ptep) do { } while (0)
206 #define pte_update_defer(mm, addr, ptep) do { } while (0)
207 #endif
208
209 /* local pte updates need not use xchg for locking */
210 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
211 {
212 pte_t res = *ptep;
213
214 /* Pure native function needs no input for mm, addr */
215 native_pte_clear(NULL, 0, ptep);
216 return res;
217 }
218
219 /*
220 * We only update the dirty/accessed state if we set
221 * the dirty bit by hand in the kernel, since the hardware
222 * will do the accessed bit for us, and we don't want to
223 * race with other CPU's that might be updating the dirty
224 * bit at the same time.
225 */
226 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
227 #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
228 ({ \
229 int __changed = !pte_same(*(ptep), entry); \
230 if (__changed && dirty) { \
231 *ptep = entry; \
232 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
233 flush_tlb_page(vma, address); \
234 } \
235 __changed; \
236 })
237
238 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
239 #define ptep_test_and_clear_young(vma, addr, ptep) ({ \
240 int __ret = 0; \
241 if (pte_young(*(ptep))) \
242 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
243 &(ptep)->pte); \
244 if (__ret) \
245 pte_update((vma)->vm_mm, addr, ptep); \
246 __ret; \
247 })
248
249 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
250 #define ptep_clear_flush_young(vma, address, ptep) \
251 ({ \
252 int __young; \
253 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
254 if (__young) \
255 flush_tlb_page(vma, address); \
256 __young; \
257 })
258
259 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
260 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
261 {
262 pte_t pte = native_ptep_get_and_clear(ptep);
263 pte_update(mm, addr, ptep);
264 return pte;
265 }
266
267 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
268 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
269 {
270 pte_t pte;
271 if (full) {
272 /*
273 * Full address destruction in progress; paravirt does not
274 * care about updates and native needs no locking
275 */
276 pte = native_local_ptep_get_and_clear(ptep);
277 } else {
278 pte = ptep_get_and_clear(mm, addr, ptep);
279 }
280 return pte;
281 }
282
283 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
284 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
285 {
286 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
287 pte_update(mm, addr, ptep);
288 }
289
290 #ifndef CONFIG_PARAVIRT
291 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
292 #endif /* !CONFIG_PARAVIRT */
293
294 #include <asm-generic/pgtable.h>
295 #endif /* __ASSEMBLY__ */
296
297 #endif /* _ASM_X86_PGTABLE_H */
This page took 0.048372 seconds and 6 git commands to generate.