Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _I386_PGTABLE_H |
2 | #define _I386_PGTABLE_H | |
3 | ||
1da177e4 LT |
4 | |
5 | /* | |
6 | * The Linux memory management assumes a three-level page table setup. On | |
7 | * the i386, we use that, but "fold" the mid level into the top-level page | |
8 | * table, so that we physically have the same two-level page table as the | |
9 | * i386 mmu expects. | |
10 | * | |
11 | * This file contains the functions and defines necessary to modify and use | |
12 | * the i386 page table tree. | |
13 | */ | |
14 | #ifndef __ASSEMBLY__ | |
15 | #include <asm/processor.h> | |
16 | #include <asm/fixmap.h> | |
17 | #include <linux/threads.h> | |
da181a8b | 18 | #include <asm/paravirt.h> |
1da177e4 LT |
19 | |
20 | #ifndef _I386_BITOPS_H | |
21 | #include <asm/bitops.h> | |
22 | #endif | |
23 | ||
24 | #include <linux/slab.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/spinlock.h> | |
27 | ||
8c65b4a6 TS |
28 | struct mm_struct; |
29 | struct vm_area_struct; | |
30 | ||
1da177e4 LT |
31 | /* |
32 | * ZERO_PAGE is a global shared page that is always zero: used | |
33 | * for zero-mapped memory areas etc.. | |
34 | */ | |
35 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
36 | extern unsigned long empty_zero_page[1024]; | |
37 | extern pgd_t swapper_pg_dir[1024]; | |
e18b890b CL |
38 | extern struct kmem_cache *pgd_cache; |
39 | extern struct kmem_cache *pmd_cache; | |
1da177e4 LT |
40 | extern spinlock_t pgd_lock; |
41 | extern struct page *pgd_list; | |
42 | ||
e18b890b CL |
43 | void pmd_ctor(void *, struct kmem_cache *, unsigned long); |
44 | void pgd_ctor(void *, struct kmem_cache *, unsigned long); | |
45 | void pgd_dtor(void *, struct kmem_cache *, unsigned long); | |
1da177e4 LT |
46 | void pgtable_cache_init(void); |
47 | void paging_init(void); | |
48 | ||
49 | /* | |
50 | * The Linux x86 paging architecture is 'compile-time dual-mode', it | |
51 | * implements both the traditional 2-level x86 page tables and the | |
52 | * newer 3-level PAE-mode page tables. | |
53 | */ | |
54 | #ifdef CONFIG_X86_PAE | |
55 | # include <asm/pgtable-3level-defs.h> | |
56 | # define PMD_SIZE (1UL << PMD_SHIFT) | |
57 | # define PMD_MASK (~(PMD_SIZE-1)) | |
58 | #else | |
59 | # include <asm/pgtable-2level-defs.h> | |
60 | #endif | |
61 | ||
62 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
63 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
64 | ||
65 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | |
d455a369 | 66 | #define FIRST_USER_ADDRESS 0 |
1da177e4 LT |
67 | |
68 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | |
69 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | |
70 | ||
71 | #define TWOLEVEL_PGDIR_SHIFT 22 | |
72 | #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) | |
73 | #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) | |
74 | ||
75 | /* Just any arbitrary offset to the start of the vmalloc VM area: the | |
76 | * current 8MB value just means that there will be a 8MB "hole" after the | |
77 | * physical memory until the kernel virtual memory starts. That means that | |
78 | * any out-of-bounds memory accesses will hopefully be caught. | |
79 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
80 | * area for the same reason. ;) | |
81 | */ | |
82 | #define VMALLOC_OFFSET (8*1024*1024) | |
83 | #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \ | |
84 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) | |
85 | #ifdef CONFIG_HIGHMEM | |
86 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | |
87 | #else | |
88 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | |
89 | #endif | |
90 | ||
91 | /* | |
9b4ee40e | 92 | * _PAGE_PSE set in the page directory entry just means that |
1da177e4 LT |
93 | * the page directory entry points directly to a 4MB-aligned block of |
94 | * memory. | |
95 | */ | |
96 | #define _PAGE_BIT_PRESENT 0 | |
97 | #define _PAGE_BIT_RW 1 | |
98 | #define _PAGE_BIT_USER 2 | |
99 | #define _PAGE_BIT_PWT 3 | |
100 | #define _PAGE_BIT_PCD 4 | |
101 | #define _PAGE_BIT_ACCESSED 5 | |
102 | #define _PAGE_BIT_DIRTY 6 | |
103 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ | |
104 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | |
105 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | |
106 | #define _PAGE_BIT_UNUSED2 10 | |
107 | #define _PAGE_BIT_UNUSED3 11 | |
108 | #define _PAGE_BIT_NX 63 | |
109 | ||
110 | #define _PAGE_PRESENT 0x001 | |
111 | #define _PAGE_RW 0x002 | |
112 | #define _PAGE_USER 0x004 | |
113 | #define _PAGE_PWT 0x008 | |
114 | #define _PAGE_PCD 0x010 | |
115 | #define _PAGE_ACCESSED 0x020 | |
116 | #define _PAGE_DIRTY 0x040 | |
117 | #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ | |
118 | #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ | |
119 | #define _PAGE_UNUSED1 0x200 /* available for programmer */ | |
120 | #define _PAGE_UNUSED2 0x400 | |
121 | #define _PAGE_UNUSED3 0x800 | |
122 | ||
9b4ee40e PBG |
123 | /* If _PAGE_PRESENT is clear, we use these: */ |
124 | #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ | |
125 | #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; | |
126 | pte_present gives true */ | |
1da177e4 LT |
127 | #ifdef CONFIG_X86_PAE |
128 | #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) | |
129 | #else | |
130 | #define _PAGE_NX 0 | |
131 | #endif | |
132 | ||
133 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | |
134 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | |
135 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | |
136 | ||
137 | #define PAGE_NONE \ | |
138 | __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | |
139 | #define PAGE_SHARED \ | |
140 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | |
141 | ||
142 | #define PAGE_SHARED_EXEC \ | |
143 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | |
144 | #define PAGE_COPY_NOEXEC \ | |
145 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | |
146 | #define PAGE_COPY_EXEC \ | |
147 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | |
148 | #define PAGE_COPY \ | |
149 | PAGE_COPY_NOEXEC | |
150 | #define PAGE_READONLY \ | |
151 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | |
152 | #define PAGE_READONLY_EXEC \ | |
153 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | |
154 | ||
155 | #define _PAGE_KERNEL \ | |
156 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | |
157 | #define _PAGE_KERNEL_EXEC \ | |
158 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | |
159 | ||
160 | extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |
161 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | |
d01ad8dd | 162 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) |
1da177e4 LT |
163 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) |
164 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | |
165 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | |
166 | ||
167 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) | |
168 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) | |
169 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) | |
d01ad8dd | 170 | #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) |
1da177e4 LT |
171 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) |
172 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) | |
173 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) | |
174 | ||
175 | /* | |
176 | * The i386 can't do page protection for execute, and considers that | |
177 | * the same are read. Also, write permissions imply read permissions. | |
178 | * This is the closest we can get.. | |
179 | */ | |
180 | #define __P000 PAGE_NONE | |
181 | #define __P001 PAGE_READONLY | |
182 | #define __P010 PAGE_COPY | |
183 | #define __P011 PAGE_COPY | |
184 | #define __P100 PAGE_READONLY_EXEC | |
185 | #define __P101 PAGE_READONLY_EXEC | |
186 | #define __P110 PAGE_COPY_EXEC | |
187 | #define __P111 PAGE_COPY_EXEC | |
188 | ||
189 | #define __S000 PAGE_NONE | |
190 | #define __S001 PAGE_READONLY | |
191 | #define __S010 PAGE_SHARED | |
192 | #define __S011 PAGE_SHARED | |
193 | #define __S100 PAGE_READONLY_EXEC | |
194 | #define __S101 PAGE_READONLY_EXEC | |
195 | #define __S110 PAGE_SHARED_EXEC | |
196 | #define __S111 PAGE_SHARED_EXEC | |
197 | ||
198 | /* | |
199 | * Define this if things work differently on an i386 and an i486: | |
200 | * it will (on an i486) warn about kernel memory accesses that are | |
e49332bd | 201 | * done without a 'access_ok(VERIFY_WRITE,..)' |
1da177e4 | 202 | */ |
e49332bd | 203 | #undef TEST_ACCESS_OK |
1da177e4 LT |
204 | |
205 | /* The boot page tables (all created as a single array) */ | |
206 | extern unsigned long pg0[]; | |
207 | ||
208 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) | |
1da177e4 | 209 | |
705e87c0 HD |
210 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ |
211 | #define pmd_none(x) (!(unsigned long)pmd_val(x)) | |
1da177e4 | 212 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
1da177e4 LT |
213 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
214 | ||
215 | ||
216 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | |
217 | ||
218 | /* | |
219 | * The following only work if pte_present() is true. | |
220 | * Undefined behaviour if not.. | |
221 | */ | |
222 | static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } | |
223 | static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } | |
224 | static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } | |
225 | static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } | |
226 | static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } | |
8f860591 | 227 | static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } |
1da177e4 LT |
228 | |
229 | /* | |
230 | * The following only works if pte_present() is not true. | |
231 | */ | |
232 | static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } | |
233 | ||
234 | static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } | |
235 | static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } | |
236 | static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } | |
237 | static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } | |
238 | static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } | |
239 | static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } | |
240 | static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } | |
241 | static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } | |
242 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } | |
243 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } | |
8f860591 | 244 | static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } |
1da177e4 | 245 | |
5311ab62 JF |
246 | extern void vmalloc_sync_all(void); |
247 | ||
1da177e4 LT |
248 | #ifdef CONFIG_X86_PAE |
249 | # include <asm/pgtable-3level.h> | |
250 | #else | |
251 | # include <asm/pgtable-2level.h> | |
252 | #endif | |
253 | ||
da181a8b | 254 | #ifndef CONFIG_PARAVIRT |
789e6ac0 ZA |
255 | /* |
256 | * Rules for using pte_update - it must be called after any PTE update which | |
257 | * has not been done using the set_pte / clear_pte interfaces. It is used by | |
258 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | |
259 | * updates should either be sets, clears, or set_pte_atomic for P->P | |
260 | * transitions, which means this hook should only be called for user PTEs. | |
261 | * This hook implies a P->P protection or access change has taken place, which | |
262 | * requires a subsequent TLB flush. The notification can optionally be delayed | |
263 | * until the TLB flush event by using the pte_update_defer form of the | |
264 | * interface, but care must be taken to assure that the flush happens while | |
265 | * still holding the same page table lock so that the shadow and primary pages | |
266 | * do not become out of sync on SMP. | |
267 | */ | |
268 | #define pte_update(mm, addr, ptep) do { } while (0) | |
269 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
da181a8b | 270 | #endif |
789e6ac0 | 271 | |
2965a0e6 RR |
272 | /* |
273 | * We only update the dirty/accessed state if we set | |
274 | * the dirty bit by hand in the kernel, since the hardware | |
275 | * will do the accessed bit for us, and we don't want to | |
276 | * race with other CPU's that might be updating the dirty | |
277 | * bit at the same time. | |
278 | */ | |
279 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
280 | #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ | |
281 | do { \ | |
282 | if (dirty) { \ | |
283 | (ptep)->pte_low = (entry).pte_low; \ | |
dfbea0ad | 284 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
2965a0e6 RR |
285 | flush_tlb_page(vma, address); \ |
286 | } \ | |
287 | } while (0) | |
288 | ||
25e4df5b ZA |
289 | /* |
290 | * We don't actually have these, but we want to advertise them so that | |
291 | * we can encompass the flush here. | |
292 | */ | |
6049742d | 293 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
6049742d | 294 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
25e4df5b | 295 | |
d6d861e3 ZA |
296 | /* |
297 | * Rules for using ptep_establish: the pte MUST be a user pte, and | |
298 | * must be a present->present transition. | |
299 | */ | |
300 | #define __HAVE_ARCH_PTEP_ESTABLISH | |
301 | #define ptep_establish(vma, address, ptep, pteval) \ | |
302 | do { \ | |
303 | set_pte_present((vma)->vm_mm, address, ptep, pteval); \ | |
304 | flush_tlb_page(vma, address); \ | |
305 | } while (0) | |
306 | ||
25e4df5b ZA |
307 | #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH |
308 | #define ptep_clear_flush_dirty(vma, address, ptep) \ | |
309 | ({ \ | |
310 | int __dirty; \ | |
311 | __dirty = pte_dirty(*(ptep)); \ | |
312 | if (__dirty) { \ | |
313 | clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ | |
dfbea0ad | 314 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
25e4df5b ZA |
315 | flush_tlb_page(vma, address); \ |
316 | } \ | |
317 | __dirty; \ | |
318 | }) | |
319 | ||
320 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
321 | #define ptep_clear_flush_young(vma, address, ptep) \ | |
322 | ({ \ | |
323 | int __young; \ | |
324 | __young = pte_young(*(ptep)); \ | |
325 | if (__young) { \ | |
326 | clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ | |
dfbea0ad | 327 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
25e4df5b ZA |
328 | flush_tlb_page(vma, address); \ |
329 | } \ | |
330 | __young; \ | |
331 | }) | |
1da177e4 | 332 | |
8ecb8950 ZA |
333 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
334 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
335 | { | |
4cdd9c89 | 336 | pte_t pte = native_ptep_get_and_clear(ptep); |
8ecb8950 ZA |
337 | pte_update(mm, addr, ptep); |
338 | return pte; | |
339 | } | |
340 | ||
6049742d | 341 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
a600388d ZA |
342 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) |
343 | { | |
344 | pte_t pte; | |
345 | if (full) { | |
346 | pte = *ptep; | |
c2c1accd | 347 | native_pte_clear(mm, addr, ptep); |
a600388d ZA |
348 | } else { |
349 | pte = ptep_get_and_clear(mm, addr, ptep); | |
350 | } | |
351 | return pte; | |
352 | } | |
353 | ||
6049742d | 354 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
1da177e4 LT |
355 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
356 | { | |
357 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); | |
789e6ac0 | 358 | pte_update(mm, addr, ptep); |
1da177e4 LT |
359 | } |
360 | ||
d7271b14 ZA |
361 | /* |
362 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | |
363 | * | |
364 | * dst - pointer to pgd range anwhere on a pgd page | |
365 | * src - "" | |
366 | * count - the number of pgds to copy. | |
367 | * | |
368 | * dst and src can be on the same page, but the range must not overlap, | |
369 | * and must not cross a page boundary. | |
370 | */ | |
371 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
372 | { | |
373 | memcpy(dst, src, count * sizeof(pgd_t)); | |
374 | } | |
375 | ||
1da177e4 LT |
376 | /* |
377 | * Macro to mark a page protection value as "uncacheable". On processors which do not support | |
378 | * it, this is a no-op. | |
379 | */ | |
380 | #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ | |
381 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) | |
382 | ||
383 | /* | |
384 | * Conversion functions: convert a page and protection to a page entry, | |
385 | * and a page entry and page directory to the page they refer to. | |
386 | */ | |
387 | ||
388 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
1da177e4 LT |
389 | |
390 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
391 | { | |
392 | pte.pte_low &= _PAGE_CHG_MASK; | |
393 | pte.pte_low |= pgprot_val(newprot); | |
394 | #ifdef CONFIG_X86_PAE | |
395 | /* | |
396 | * Chop off the NX bit (if present), and add the NX portion of | |
397 | * the newprot (if present): | |
398 | */ | |
399 | pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); | |
400 | pte.pte_high |= (pgprot_val(newprot) >> 32) & \ | |
401 | (__supported_pte_mask >> 32); | |
402 | #endif | |
403 | return pte; | |
404 | } | |
405 | ||
1da177e4 LT |
406 | #define pmd_large(pmd) \ |
407 | ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) | |
408 | ||
409 | /* | |
410 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | |
411 | * | |
412 | * this macro returns the index of the entry in the pgd page which would | |
413 | * control the given virtual address | |
414 | */ | |
415 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | |
416 | #define pgd_index_k(addr) pgd_index(addr) | |
417 | ||
418 | /* | |
419 | * pgd_offset() returns a (pgd_t *) | |
420 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
421 | */ | |
422 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | |
423 | ||
424 | /* | |
425 | * a shortcut which implies the use of the kernel's pgd, instead | |
426 | * of a process's | |
427 | */ | |
428 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
429 | ||
430 | /* | |
431 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | |
432 | * | |
433 | * this macro returns the index of the entry in the pmd page which would | |
434 | * control the given virtual address | |
435 | */ | |
436 | #define pmd_index(address) \ | |
437 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
438 | ||
439 | /* | |
440 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | |
441 | * | |
442 | * this macro returns the index of the entry in the pte page which would | |
443 | * control the given virtual address | |
444 | */ | |
445 | #define pte_index(address) \ | |
446 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
447 | #define pte_offset_kernel(dir, address) \ | |
46a82b2d | 448 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
1da177e4 | 449 | |
ca140fda PBG |
450 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
451 | ||
46a82b2d | 452 | #define pmd_page_vaddr(pmd) \ |
ca140fda PBG |
453 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
454 | ||
1da177e4 LT |
455 | /* |
456 | * Helper function that returns the kernel pagetable entry controlling | |
457 | * the virtual address 'address'. NULL means no pagetable entry present. | |
458 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it | |
459 | * as a pte too. | |
460 | */ | |
461 | extern pte_t *lookup_address(unsigned long address); | |
462 | ||
463 | /* | |
464 | * Make a given kernel text page executable/non-executable. | |
465 | * Returns the previous executability setting of that page (which | |
466 | * is used to restore the previous state). Used by the SMP bootup code. | |
467 | * NOTE: this is an __init function for security reasons. | |
468 | */ | |
469 | #ifdef CONFIG_X86_PAE | |
470 | extern int set_kernel_exec(unsigned long vaddr, int enable); | |
471 | #else | |
472 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} | |
473 | #endif | |
474 | ||
1da177e4 | 475 | #if defined(CONFIG_HIGHPTE) |
a27fe809 | 476 | #define pte_offset_map(dir, address) \ |
ce6234b5 | 477 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) |
a27fe809 | 478 | #define pte_offset_map_nested(dir, address) \ |
ce6234b5 | 479 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) |
1da177e4 LT |
480 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) |
481 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | |
482 | #else | |
483 | #define pte_offset_map(dir, address) \ | |
484 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | |
485 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | |
486 | #define pte_unmap(pte) do { } while (0) | |
487 | #define pte_unmap_nested(pte) do { } while (0) | |
488 | #endif | |
489 | ||
23002d88 ZA |
490 | /* Clear a kernel PTE and flush it from the TLB */ |
491 | #define kpte_clear_flush(ptep, vaddr) \ | |
492 | do { \ | |
493 | pte_clear(&init_mm, vaddr, ptep); \ | |
494 | __flush_tlb_one(vaddr); \ | |
495 | } while (0) | |
496 | ||
1da177e4 LT |
497 | /* |
498 | * The i386 doesn't have any external MMU info: the kernel page | |
499 | * tables contain all the necessary information. | |
1da177e4 LT |
500 | */ |
501 | #define update_mmu_cache(vma,address,pte) do { } while (0) | |
b239fb25 JF |
502 | |
503 | void native_pagetable_setup_start(pgd_t *base); | |
504 | void native_pagetable_setup_done(pgd_t *base); | |
505 | ||
506 | #ifndef CONFIG_PARAVIRT | |
507 | static inline void paravirt_pagetable_setup_start(pgd_t *base) | |
508 | { | |
509 | native_pagetable_setup_start(base); | |
510 | } | |
511 | ||
512 | static inline void paravirt_pagetable_setup_done(pgd_t *base) | |
513 | { | |
514 | native_pagetable_setup_done(base); | |
515 | } | |
516 | #endif /* !CONFIG_PARAVIRT */ | |
517 | ||
1da177e4 LT |
518 | #endif /* !__ASSEMBLY__ */ |
519 | ||
05b79bdc | 520 | #ifdef CONFIG_FLATMEM |
1da177e4 | 521 | #define kern_addr_valid(addr) (1) |
05b79bdc | 522 | #endif /* CONFIG_FLATMEM */ |
1da177e4 | 523 | |
1da177e4 LT |
524 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
525 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
526 | ||
527 | #define MK_IOSPACE_PFN(space, pfn) (pfn) | |
528 | #define GET_IOSPACE(pfn) 0 | |
529 | #define GET_PFN(pfn) (pfn) | |
530 | ||
1da177e4 LT |
531 | #include <asm-generic/pgtable.h> |
532 | ||
533 | #endif /* _I386_PGTABLE_H */ |