Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _I386_PGTABLE_H |
2 | #define _I386_PGTABLE_H | |
3 | ||
1da177e4 LT |
4 | |
5 | /* | |
6 | * The Linux memory management assumes a three-level page table setup. On | |
7 | * the i386, we use that, but "fold" the mid level into the top-level page | |
8 | * table, so that we physically have the same two-level page table as the | |
9 | * i386 mmu expects. | |
10 | * | |
11 | * This file contains the functions and defines necessary to modify and use | |
12 | * the i386 page table tree. | |
13 | */ | |
14 | #ifndef __ASSEMBLY__ | |
15 | #include <asm/processor.h> | |
16 | #include <asm/fixmap.h> | |
17 | #include <linux/threads.h> | |
da181a8b | 18 | #include <asm/paravirt.h> |
1da177e4 LT |
19 | |
20 | #ifndef _I386_BITOPS_H | |
21 | #include <asm/bitops.h> | |
22 | #endif | |
23 | ||
24 | #include <linux/slab.h> | |
25 | #include <linux/list.h> | |
26 | #include <linux/spinlock.h> | |
27 | ||
8c65b4a6 TS |
28 | struct mm_struct; |
29 | struct vm_area_struct; | |
30 | ||
1da177e4 LT |
31 | /* |
32 | * ZERO_PAGE is a global shared page that is always zero: used | |
33 | * for zero-mapped memory areas etc.. | |
34 | */ | |
35 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
36 | extern unsigned long empty_zero_page[1024]; | |
37 | extern pgd_t swapper_pg_dir[1024]; | |
e18b890b | 38 | extern struct kmem_cache *pmd_cache; |
1da177e4 LT |
39 | extern spinlock_t pgd_lock; |
40 | extern struct page *pgd_list; | |
f1d1a842 | 41 | void check_pgt_cache(void); |
1da177e4 | 42 | |
e18b890b | 43 | void pmd_ctor(void *, struct kmem_cache *, unsigned long); |
1da177e4 LT |
44 | void pgtable_cache_init(void); |
45 | void paging_init(void); | |
46 | ||
f1d1a842 | 47 | |
1da177e4 LT |
48 | /* |
49 | * The Linux x86 paging architecture is 'compile-time dual-mode', it | |
50 | * implements both the traditional 2-level x86 page tables and the | |
51 | * newer 3-level PAE-mode page tables. | |
52 | */ | |
53 | #ifdef CONFIG_X86_PAE | |
54 | # include <asm/pgtable-3level-defs.h> | |
55 | # define PMD_SIZE (1UL << PMD_SHIFT) | |
56 | # define PMD_MASK (~(PMD_SIZE-1)) | |
57 | #else | |
58 | # include <asm/pgtable-2level-defs.h> | |
59 | #endif | |
60 | ||
61 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
62 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
63 | ||
64 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | |
d455a369 | 65 | #define FIRST_USER_ADDRESS 0 |
1da177e4 LT |
66 | |
67 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | |
68 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | |
69 | ||
70 | #define TWOLEVEL_PGDIR_SHIFT 22 | |
71 | #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) | |
72 | #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) | |
73 | ||
74 | /* Just any arbitrary offset to the start of the vmalloc VM area: the | |
75 | * current 8MB value just means that there will be a 8MB "hole" after the | |
76 | * physical memory until the kernel virtual memory starts. That means that | |
77 | * any out-of-bounds memory accesses will hopefully be caught. | |
78 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
79 | * area for the same reason. ;) | |
80 | */ | |
81 | #define VMALLOC_OFFSET (8*1024*1024) | |
8f0accc8 | 82 | #define VMALLOC_START (((unsigned long) high_memory + \ |
1da177e4 LT |
83 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) |
84 | #ifdef CONFIG_HIGHMEM | |
85 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | |
86 | #else | |
87 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | |
88 | #endif | |
89 | ||
90 | /* | |
9b4ee40e | 91 | * _PAGE_PSE set in the page directory entry just means that |
1da177e4 LT |
92 | * the page directory entry points directly to a 4MB-aligned block of |
93 | * memory. | |
94 | */ | |
95 | #define _PAGE_BIT_PRESENT 0 | |
96 | #define _PAGE_BIT_RW 1 | |
97 | #define _PAGE_BIT_USER 2 | |
98 | #define _PAGE_BIT_PWT 3 | |
99 | #define _PAGE_BIT_PCD 4 | |
100 | #define _PAGE_BIT_ACCESSED 5 | |
101 | #define _PAGE_BIT_DIRTY 6 | |
102 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ | |
103 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | |
104 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | |
105 | #define _PAGE_BIT_UNUSED2 10 | |
106 | #define _PAGE_BIT_UNUSED3 11 | |
107 | #define _PAGE_BIT_NX 63 | |
108 | ||
109 | #define _PAGE_PRESENT 0x001 | |
110 | #define _PAGE_RW 0x002 | |
111 | #define _PAGE_USER 0x004 | |
112 | #define _PAGE_PWT 0x008 | |
113 | #define _PAGE_PCD 0x010 | |
114 | #define _PAGE_ACCESSED 0x020 | |
115 | #define _PAGE_DIRTY 0x040 | |
116 | #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ | |
117 | #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ | |
118 | #define _PAGE_UNUSED1 0x200 /* available for programmer */ | |
119 | #define _PAGE_UNUSED2 0x400 | |
120 | #define _PAGE_UNUSED3 0x800 | |
121 | ||
9b4ee40e PBG |
122 | /* If _PAGE_PRESENT is clear, we use these: */ |
123 | #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ | |
124 | #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; | |
125 | pte_present gives true */ | |
1da177e4 LT |
126 | #ifdef CONFIG_X86_PAE |
127 | #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) | |
128 | #else | |
129 | #define _PAGE_NX 0 | |
130 | #endif | |
131 | ||
132 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | |
133 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | |
134 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | |
135 | ||
136 | #define PAGE_NONE \ | |
137 | __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | |
138 | #define PAGE_SHARED \ | |
139 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | |
140 | ||
141 | #define PAGE_SHARED_EXEC \ | |
142 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | |
143 | #define PAGE_COPY_NOEXEC \ | |
144 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | |
145 | #define PAGE_COPY_EXEC \ | |
146 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | |
147 | #define PAGE_COPY \ | |
148 | PAGE_COPY_NOEXEC | |
149 | #define PAGE_READONLY \ | |
150 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | |
151 | #define PAGE_READONLY_EXEC \ | |
152 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | |
153 | ||
154 | #define _PAGE_KERNEL \ | |
155 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | |
156 | #define _PAGE_KERNEL_EXEC \ | |
157 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | |
158 | ||
159 | extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |
160 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | |
d01ad8dd | 161 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) |
1da177e4 LT |
162 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) |
163 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | |
164 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | |
165 | ||
166 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) | |
167 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) | |
168 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) | |
d01ad8dd | 169 | #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) |
1da177e4 LT |
170 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) |
171 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) | |
172 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) | |
173 | ||
174 | /* | |
175 | * The i386 can't do page protection for execute, and considers that | |
176 | * the same are read. Also, write permissions imply read permissions. | |
177 | * This is the closest we can get.. | |
178 | */ | |
179 | #define __P000 PAGE_NONE | |
180 | #define __P001 PAGE_READONLY | |
181 | #define __P010 PAGE_COPY | |
182 | #define __P011 PAGE_COPY | |
183 | #define __P100 PAGE_READONLY_EXEC | |
184 | #define __P101 PAGE_READONLY_EXEC | |
185 | #define __P110 PAGE_COPY_EXEC | |
186 | #define __P111 PAGE_COPY_EXEC | |
187 | ||
188 | #define __S000 PAGE_NONE | |
189 | #define __S001 PAGE_READONLY | |
190 | #define __S010 PAGE_SHARED | |
191 | #define __S011 PAGE_SHARED | |
192 | #define __S100 PAGE_READONLY_EXEC | |
193 | #define __S101 PAGE_READONLY_EXEC | |
194 | #define __S110 PAGE_SHARED_EXEC | |
195 | #define __S111 PAGE_SHARED_EXEC | |
196 | ||
197 | /* | |
198 | * Define this if things work differently on an i386 and an i486: | |
199 | * it will (on an i486) warn about kernel memory accesses that are | |
e49332bd | 200 | * done without a 'access_ok(VERIFY_WRITE,..)' |
1da177e4 | 201 | */ |
e49332bd | 202 | #undef TEST_ACCESS_OK |
1da177e4 LT |
203 | |
204 | /* The boot page tables (all created as a single array) */ | |
205 | extern unsigned long pg0[]; | |
206 | ||
207 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) | |
1da177e4 | 208 | |
705e87c0 HD |
209 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ |
210 | #define pmd_none(x) (!(unsigned long)pmd_val(x)) | |
1da177e4 | 211 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
1da177e4 LT |
212 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
213 | ||
214 | ||
215 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | |
216 | ||
217 | /* | |
218 | * The following only work if pte_present() is true. | |
219 | * Undefined behaviour if not.. | |
220 | */ | |
1da177e4 LT |
221 | static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } |
222 | static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } | |
223 | static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } | |
8f860591 | 224 | static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } |
1da177e4 LT |
225 | |
226 | /* | |
227 | * The following only works if pte_present() is not true. | |
228 | */ | |
229 | static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } | |
230 | ||
1da177e4 LT |
231 | static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } |
232 | static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } | |
233 | static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } | |
1da177e4 LT |
234 | static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } |
235 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } | |
236 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } | |
8f860591 | 237 | static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } |
1da177e4 LT |
238 | |
239 | #ifdef CONFIG_X86_PAE | |
240 | # include <asm/pgtable-3level.h> | |
241 | #else | |
242 | # include <asm/pgtable-2level.h> | |
243 | #endif | |
244 | ||
da181a8b | 245 | #ifndef CONFIG_PARAVIRT |
789e6ac0 ZA |
246 | /* |
247 | * Rules for using pte_update - it must be called after any PTE update which | |
248 | * has not been done using the set_pte / clear_pte interfaces. It is used by | |
249 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | |
250 | * updates should either be sets, clears, or set_pte_atomic for P->P | |
251 | * transitions, which means this hook should only be called for user PTEs. | |
252 | * This hook implies a P->P protection or access change has taken place, which | |
253 | * requires a subsequent TLB flush. The notification can optionally be delayed | |
254 | * until the TLB flush event by using the pte_update_defer form of the | |
255 | * interface, but care must be taken to assure that the flush happens while | |
256 | * still holding the same page table lock so that the shadow and primary pages | |
257 | * do not become out of sync on SMP. | |
258 | */ | |
259 | #define pte_update(mm, addr, ptep) do { } while (0) | |
260 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
da181a8b | 261 | #endif |
789e6ac0 | 262 | |
9e5e3162 ZA |
263 | /* local pte updates need not use xchg for locking */ |
264 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |
265 | { | |
266 | pte_t res = *ptep; | |
267 | ||
268 | /* Pure native function needs no input for mm, addr */ | |
269 | native_pte_clear(NULL, 0, ptep); | |
270 | return res; | |
271 | } | |
272 | ||
2965a0e6 RR |
273 | /* |
274 | * We only update the dirty/accessed state if we set | |
275 | * the dirty bit by hand in the kernel, since the hardware | |
276 | * will do the accessed bit for us, and we don't want to | |
277 | * race with other CPU's that might be updating the dirty | |
278 | * bit at the same time. | |
279 | */ | |
280 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
281 | #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ | |
8dab5241 BH |
282 | ({ \ |
283 | int __changed = !pte_same(*(ptep), entry); \ | |
284 | if (__changed && dirty) { \ | |
2965a0e6 | 285 | (ptep)->pte_low = (entry).pte_low; \ |
dfbea0ad | 286 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
2965a0e6 RR |
287 | flush_tlb_page(vma, address); \ |
288 | } \ | |
8dab5241 BH |
289 | __changed; \ |
290 | }) | |
2965a0e6 | 291 | |
6049742d | 292 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
0013572b | 293 | #define ptep_test_and_clear_young(vma, addr, ptep) ({ \ |
d6f8bb13 HD |
294 | int __ret = 0; \ |
295 | if (pte_young(*(ptep))) \ | |
296 | __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ | |
297 | &(ptep)->pte_low); \ | |
298 | if (__ret) \ | |
299 | pte_update((vma)->vm_mm, addr, ptep); \ | |
300 | __ret; \ | |
0013572b | 301 | }) |
25e4df5b | 302 | |
25e4df5b ZA |
303 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
304 | #define ptep_clear_flush_young(vma, address, ptep) \ | |
305 | ({ \ | |
306 | int __young; \ | |
10a8d6ae | 307 | __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ |
0013572b | 308 | if (__young) \ |
25e4df5b | 309 | flush_tlb_page(vma, address); \ |
25e4df5b ZA |
310 | __young; \ |
311 | }) | |
1da177e4 | 312 | |
8ecb8950 ZA |
313 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
314 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
315 | { | |
4cdd9c89 | 316 | pte_t pte = native_ptep_get_and_clear(ptep); |
8ecb8950 ZA |
317 | pte_update(mm, addr, ptep); |
318 | return pte; | |
319 | } | |
320 | ||
6049742d | 321 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
a600388d ZA |
322 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) |
323 | { | |
324 | pte_t pte; | |
325 | if (full) { | |
9e5e3162 ZA |
326 | /* |
327 | * Full address destruction in progress; paravirt does not | |
328 | * care about updates and native needs no locking | |
329 | */ | |
330 | pte = native_local_ptep_get_and_clear(ptep); | |
a600388d ZA |
331 | } else { |
332 | pte = ptep_get_and_clear(mm, addr, ptep); | |
333 | } | |
334 | return pte; | |
335 | } | |
336 | ||
6049742d | 337 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
1da177e4 LT |
338 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
339 | { | |
340 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); | |
789e6ac0 | 341 | pte_update(mm, addr, ptep); |
1da177e4 LT |
342 | } |
343 | ||
d7271b14 ZA |
344 | /* |
345 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | |
346 | * | |
347 | * dst - pointer to pgd range anwhere on a pgd page | |
348 | * src - "" | |
349 | * count - the number of pgds to copy. | |
350 | * | |
351 | * dst and src can be on the same page, but the range must not overlap, | |
352 | * and must not cross a page boundary. | |
353 | */ | |
354 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
355 | { | |
356 | memcpy(dst, src, count * sizeof(pgd_t)); | |
357 | } | |
358 | ||
1da177e4 LT |
359 | /* |
360 | * Macro to mark a page protection value as "uncacheable". On processors which do not support | |
361 | * it, this is a no-op. | |
362 | */ | |
363 | #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ | |
364 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) | |
365 | ||
366 | /* | |
367 | * Conversion functions: convert a page and protection to a page entry, | |
368 | * and a page entry and page directory to the page they refer to. | |
369 | */ | |
370 | ||
371 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
1da177e4 LT |
372 | |
373 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
374 | { | |
375 | pte.pte_low &= _PAGE_CHG_MASK; | |
376 | pte.pte_low |= pgprot_val(newprot); | |
377 | #ifdef CONFIG_X86_PAE | |
378 | /* | |
379 | * Chop off the NX bit (if present), and add the NX portion of | |
380 | * the newprot (if present): | |
381 | */ | |
382 | pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); | |
383 | pte.pte_high |= (pgprot_val(newprot) >> 32) & \ | |
384 | (__supported_pte_mask >> 32); | |
385 | #endif | |
386 | return pte; | |
387 | } | |
388 | ||
1da177e4 LT |
389 | #define pmd_large(pmd) \ |
390 | ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) | |
391 | ||
392 | /* | |
393 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | |
394 | * | |
395 | * this macro returns the index of the entry in the pgd page which would | |
396 | * control the given virtual address | |
397 | */ | |
398 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | |
399 | #define pgd_index_k(addr) pgd_index(addr) | |
400 | ||
401 | /* | |
402 | * pgd_offset() returns a (pgd_t *) | |
403 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
404 | */ | |
405 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | |
406 | ||
407 | /* | |
408 | * a shortcut which implies the use of the kernel's pgd, instead | |
409 | * of a process's | |
410 | */ | |
411 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
412 | ||
413 | /* | |
414 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | |
415 | * | |
416 | * this macro returns the index of the entry in the pmd page which would | |
417 | * control the given virtual address | |
418 | */ | |
419 | #define pmd_index(address) \ | |
420 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
421 | ||
422 | /* | |
423 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | |
424 | * | |
425 | * this macro returns the index of the entry in the pte page which would | |
426 | * control the given virtual address | |
427 | */ | |
428 | #define pte_index(address) \ | |
429 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
430 | #define pte_offset_kernel(dir, address) \ | |
46a82b2d | 431 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
1da177e4 | 432 | |
ca140fda PBG |
433 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
434 | ||
46a82b2d | 435 | #define pmd_page_vaddr(pmd) \ |
ca140fda PBG |
436 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
437 | ||
1da177e4 LT |
438 | /* |
439 | * Helper function that returns the kernel pagetable entry controlling | |
440 | * the virtual address 'address'. NULL means no pagetable entry present. | |
441 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it | |
442 | * as a pte too. | |
443 | */ | |
444 | extern pte_t *lookup_address(unsigned long address); | |
445 | ||
446 | /* | |
447 | * Make a given kernel text page executable/non-executable. | |
448 | * Returns the previous executability setting of that page (which | |
449 | * is used to restore the previous state). Used by the SMP bootup code. | |
450 | * NOTE: this is an __init function for security reasons. | |
451 | */ | |
452 | #ifdef CONFIG_X86_PAE | |
453 | extern int set_kernel_exec(unsigned long vaddr, int enable); | |
454 | #else | |
455 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} | |
456 | #endif | |
457 | ||
1da177e4 | 458 | #if defined(CONFIG_HIGHPTE) |
a27fe809 | 459 | #define pte_offset_map(dir, address) \ |
ce6234b5 | 460 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) |
a27fe809 | 461 | #define pte_offset_map_nested(dir, address) \ |
ce6234b5 | 462 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) |
1da177e4 LT |
463 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) |
464 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | |
465 | #else | |
466 | #define pte_offset_map(dir, address) \ | |
467 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | |
468 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | |
469 | #define pte_unmap(pte) do { } while (0) | |
470 | #define pte_unmap_nested(pte) do { } while (0) | |
471 | #endif | |
472 | ||
23002d88 ZA |
473 | /* Clear a kernel PTE and flush it from the TLB */ |
474 | #define kpte_clear_flush(ptep, vaddr) \ | |
475 | do { \ | |
476 | pte_clear(&init_mm, vaddr, ptep); \ | |
477 | __flush_tlb_one(vaddr); \ | |
478 | } while (0) | |
479 | ||
1da177e4 LT |
480 | /* |
481 | * The i386 doesn't have any external MMU info: the kernel page | |
482 | * tables contain all the necessary information. | |
1da177e4 LT |
483 | */ |
484 | #define update_mmu_cache(vma,address,pte) do { } while (0) | |
b239fb25 JF |
485 | |
486 | void native_pagetable_setup_start(pgd_t *base); | |
487 | void native_pagetable_setup_done(pgd_t *base); | |
488 | ||
489 | #ifndef CONFIG_PARAVIRT | |
490 | static inline void paravirt_pagetable_setup_start(pgd_t *base) | |
491 | { | |
492 | native_pagetable_setup_start(base); | |
493 | } | |
494 | ||
495 | static inline void paravirt_pagetable_setup_done(pgd_t *base) | |
496 | { | |
497 | native_pagetable_setup_done(base); | |
498 | } | |
499 | #endif /* !CONFIG_PARAVIRT */ | |
500 | ||
1da177e4 LT |
501 | #endif /* !__ASSEMBLY__ */ |
502 | ||
05b79bdc | 503 | #ifdef CONFIG_FLATMEM |
1da177e4 | 504 | #define kern_addr_valid(addr) (1) |
05b79bdc | 505 | #endif /* CONFIG_FLATMEM */ |
1da177e4 | 506 | |
1da177e4 LT |
507 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
508 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
509 | ||
1da177e4 LT |
510 | #include <asm-generic/pgtable.h> |
511 | ||
512 | #endif /* _I386_PGTABLE_H */ |