[PATCH] Hugepage consolidation
[deliverable/linux.git] / include / asm-ia64 / pgtable.h
1 #ifndef _ASM_IA64_PGTABLE_H
2 #define _ASM_IA64_PGTABLE_H
3
4 /*
5 * This file contains the functions and defines necessary to modify and use
6 * the IA-64 page table tree.
7 *
8 * This hopefully works with any (fixed) IA-64 page-size, as defined
9 * in <asm/page.h>.
10 *
11 * Copyright (C) 1998-2005 Hewlett-Packard Co
12 * David Mosberger-Tang <davidm@hpl.hp.com>
13 */
14
15 #include <linux/config.h>
16
17 #include <asm/mman.h>
18 #include <asm/page.h>
19 #include <asm/processor.h>
20 #include <asm/system.h>
21 #include <asm/types.h>
22
23 #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
24
25 /*
26 * First, define the various bits in a PTE. Note that the PTE format
27 * matches the VHPT short format, the firt doubleword of the VHPD long
28 * format, and the first doubleword of the TLB insertion format.
29 */
30 #define _PAGE_P_BIT 0
31 #define _PAGE_A_BIT 5
32 #define _PAGE_D_BIT 6
33
34 #define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
35 #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
36 #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
37 #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
38 #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
39 #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
40 #define _PAGE_MA_MASK (0x7 << 2)
41 #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
42 #define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
43 #define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
44 #define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
45 #define _PAGE_PL_MASK (3 << 7)
46 #define _PAGE_AR_R (0 << 9) /* read only */
47 #define _PAGE_AR_RX (1 << 9) /* read & execute */
48 #define _PAGE_AR_RW (2 << 9) /* read & write */
49 #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
50 #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
51 #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
52 #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
53 #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
54 #define _PAGE_AR_MASK (7 << 9)
55 #define _PAGE_AR_SHIFT 9
56 #define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
57 #define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
58 #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
59 #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
60 #define _PAGE_PROTNONE (__IA64_UL(1) << 63)
61
62 /* Valid only for a PTE with the present bit cleared: */
63 #define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
64
65 #define _PFN_MASK _PAGE_PPN_MASK
66 /* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
67 #define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
68
69 #define _PAGE_SIZE_4K 12
70 #define _PAGE_SIZE_8K 13
71 #define _PAGE_SIZE_16K 14
72 #define _PAGE_SIZE_64K 16
73 #define _PAGE_SIZE_256K 18
74 #define _PAGE_SIZE_1M 20
75 #define _PAGE_SIZE_4M 22
76 #define _PAGE_SIZE_16M 24
77 #define _PAGE_SIZE_64M 26
78 #define _PAGE_SIZE_256M 28
79 #define _PAGE_SIZE_1G 30
80 #define _PAGE_SIZE_4G 32
81
82 #define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
83 #define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
84 #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
85
86 /*
87 * Definitions for first level:
88 *
89 * PGDIR_SHIFT determines what a first-level page table entry can map.
90 */
91 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
92 #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
93 #define PGDIR_MASK (~(PGDIR_SIZE-1))
94 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
95 #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
96 #define FIRST_USER_ADDRESS 0
97
98 /*
99 * Definitions for second level:
100 *
101 * PMD_SHIFT determines the size of the area a second-level page table
102 * can map.
103 */
104 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
105 #define PMD_SIZE (1UL << PMD_SHIFT)
106 #define PMD_MASK (~(PMD_SIZE-1))
107 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
108
109 /*
110 * Definitions for third level:
111 */
112 #define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3))
113
114 /*
115 * All the normal masks have the "page accessed" bits on, as any time
116 * they are used, the page is accessed. They are cleared only by the
117 * page-out routines.
118 */
119 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
120 #define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
121 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
122 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
123 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
124 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
125 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
126 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
127
128 # ifndef __ASSEMBLY__
129
130 #include <asm/bitops.h>
131 #include <asm/cacheflush.h>
132 #include <asm/mmu_context.h>
133 #include <asm/processor.h>
134
135 /*
136 * Next come the mappings that determine how mmap() protection bits
137 * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
138 * _P version gets used for a private shared memory segment, the _S
139 * version gets used for a shared memory segment with MAP_SHARED on.
140 * In a private shared memory segment, we do a copy-on-write if a task
141 * attempts to write to the page.
142 */
143 /* xwr */
144 #define __P000 PAGE_NONE
145 #define __P001 PAGE_READONLY
146 #define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
147 #define __P011 PAGE_READONLY /* ditto */
148 #define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
149 #define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
150 #define __P110 PAGE_COPY_EXEC
151 #define __P111 PAGE_COPY_EXEC
152
153 #define __S000 PAGE_NONE
154 #define __S001 PAGE_READONLY
155 #define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
156 #define __S011 PAGE_SHARED
157 #define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
158 #define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
159 #define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
160 #define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
161
162 #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
163 #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
164 #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
165
166
167 /*
168 * Some definitions to translate between mem_map, PTEs, and page addresses:
169 */
170
171
172 /* Quick test to see if ADDR is a (potentially) valid physical address. */
173 static inline long
174 ia64_phys_addr_valid (unsigned long addr)
175 {
176 return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
177 }
178
179 /*
180 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
181 * memory. For the return value to be meaningful, ADDR must be >=
182 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
183 * require a hash-, or multi-level tree-lookup or something of that
184 * sort) but it guarantees to return TRUE only if accessing the page
185 * at that address does not cause an error. Note that there may be
186 * addresses for which kern_addr_valid() returns FALSE even though an
187 * access would not cause an error (e.g., this is typically true for
188 * memory mapped I/O regions.
189 *
190 * XXX Need to implement this for IA-64.
191 */
192 #define kern_addr_valid(addr) (1)
193
194
195 /*
196 * Now come the defines and routines to manage and access the three-level
197 * page table.
198 */
199
200 /*
201 * On some architectures, special things need to be done when setting
202 * the PTE in a page table. Nothing special needs to be on IA-64.
203 */
204 #define set_pte(ptep, pteval) (*(ptep) = (pteval))
205 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
206
207 #define RGN_SIZE (1UL << 61)
208 #define RGN_KERNEL 7
209
210 #define VMALLOC_START 0xa000000200000000UL
211 #ifdef CONFIG_VIRTUAL_MEM_MAP
212 # define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
213 # define VMALLOC_END vmalloc_end
214 extern unsigned long vmalloc_end;
215 #else
216 # define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
217 #endif
218
219 /* fs/proc/kcore.c */
220 #define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL)
221 #define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL)
222
223 /*
224 * Conversion functions: convert page frame number (pfn) and a protection value to a page
225 * table entry (pte).
226 */
227 #define pfn_pte(pfn, pgprot) \
228 ({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
229
230 /* Extract pfn from pte. */
231 #define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
232
233 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
234
235 /* This takes a physical page address that is used by the remapping functions */
236 #define mk_pte_phys(physpage, pgprot) \
237 ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
238
239 #define pte_modify(_pte, newprot) \
240 (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
241
242 #define page_pte_prot(page,prot) mk_pte(page, prot)
243 #define page_pte(page) page_pte_prot(page, __pgprot(0))
244
245 #define pte_none(pte) (!pte_val(pte))
246 #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
247 #define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
248 /* pte_page() returns the "struct page *" corresponding to the PTE: */
249 #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
250
251 #define pmd_none(pmd) (!pmd_val(pmd))
252 #define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
253 #define pmd_present(pmd) (pmd_val(pmd) != 0UL)
254 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
255 #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
256 #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
257
258 #define pud_none(pud) (!pud_val(pud))
259 #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
260 #define pud_present(pud) (pud_val(pud) != 0UL)
261 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
262
263 #define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
264
265 /*
266 * The following have defined behavior only work if pte_present() is true.
267 */
268 #define pte_user(pte) ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3)
269 #define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
270 #define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
271 #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
272 #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
273 #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
274 #define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
275 /*
276 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
277 * access rights:
278 */
279 #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
280 #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
281 #define pte_mkexec(pte) (__pte(pte_val(pte) | _PAGE_AR_RX))
282 #define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
283 #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
284 #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
285 #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
286 #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P))
287
288 /*
289 * Macro to a page protection value as "uncacheable". Note that "protection" is really a
290 * misnomer here as the protection value contains the memory attribute bits, dirty bits,
291 * and various other bits as well.
292 */
293 #define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
294
295 /*
296 * Macro to make mark a page protection value as "write-combining".
297 * Note that "protection" is really a misnomer here as the protection
298 * value contains the memory attribute bits, dirty bits, and various
299 * other bits as well. Accesses through a write-combining translation
300 * works bypasses the caches, but does allow for consecutive writes to
301 * be combined into single (but larger) write transactions.
302 */
303 #define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
304
305 static inline unsigned long
306 pgd_index (unsigned long address)
307 {
308 unsigned long region = address >> 61;
309 unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
310
311 return (region << (PAGE_SHIFT - 6)) | l1index;
312 }
313
314 /* The offset in the 1-level directory is given by the 3 region bits
315 (61..63) and the level-1 bits. */
316 static inline pgd_t*
317 pgd_offset (struct mm_struct *mm, unsigned long address)
318 {
319 return mm->pgd + pgd_index(address);
320 }
321
322 /* In the kernel's mapped region we completely ignore the region number
323 (since we know it's in region number 5). */
324 #define pgd_offset_k(addr) \
325 (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
326
327 /* Look up a pgd entry in the gate area. On IA-64, the gate-area
328 resides in the kernel-mapped segment, hence we use pgd_offset_k()
329 here. */
330 #define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
331
332 /* Find an entry in the second-level page table.. */
333 #define pmd_offset(dir,addr) \
334 ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
335
336 /*
337 * Find an entry in the third-level page table. This looks more complicated than it
338 * should be because some platforms place page tables in high memory.
339 */
340 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
341 #define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
342 #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
343 #define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
344 #define pte_unmap(pte) do { } while (0)
345 #define pte_unmap_nested(pte) do { } while (0)
346
347 /* atomic versions of the some PTE manipulations: */
348
349 static inline int
350 ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
351 {
352 #ifdef CONFIG_SMP
353 if (!pte_young(*ptep))
354 return 0;
355 return test_and_clear_bit(_PAGE_A_BIT, ptep);
356 #else
357 pte_t pte = *ptep;
358 if (!pte_young(pte))
359 return 0;
360 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
361 return 1;
362 #endif
363 }
364
365 static inline int
366 ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
367 {
368 #ifdef CONFIG_SMP
369 if (!pte_dirty(*ptep))
370 return 0;
371 return test_and_clear_bit(_PAGE_D_BIT, ptep);
372 #else
373 pte_t pte = *ptep;
374 if (!pte_dirty(pte))
375 return 0;
376 set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
377 return 1;
378 #endif
379 }
380
381 static inline pte_t
382 ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
383 {
384 #ifdef CONFIG_SMP
385 return __pte(xchg((long *) ptep, 0));
386 #else
387 pte_t pte = *ptep;
388 pte_clear(mm, addr, ptep);
389 return pte;
390 #endif
391 }
392
393 static inline void
394 ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
395 {
396 #ifdef CONFIG_SMP
397 unsigned long new, old;
398
399 do {
400 old = pte_val(*ptep);
401 new = pte_val(pte_wrprotect(__pte (old)));
402 } while (cmpxchg((unsigned long *) ptep, old, new) != old);
403 #else
404 pte_t old_pte = *ptep;
405 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
406 #endif
407 }
408
409 static inline int
410 pte_same (pte_t a, pte_t b)
411 {
412 return pte_val(a) == pte_val(b);
413 }
414
415 #define update_mmu_cache(vma, address, pte) do { } while (0)
416
417 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
418 extern void paging_init (void);
419
420 /*
421 * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
422 * bits in the swap-type field of the swap pte. It would be nice to
423 * enforce that, but we can't easily include <linux/swap.h> here.
424 * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
425 *
426 * Format of swap pte:
427 * bit 0 : present bit (must be zero)
428 * bit 1 : _PAGE_FILE (must be zero)
429 * bits 2- 8: swap-type
430 * bits 9-62: swap offset
431 * bit 63 : _PAGE_PROTNONE bit
432 *
433 * Format of file pte:
434 * bit 0 : present bit (must be zero)
435 * bit 1 : _PAGE_FILE (must be one)
436 * bits 2-62: file_offset/PAGE_SIZE
437 * bit 63 : _PAGE_PROTNONE bit
438 */
439 #define __swp_type(entry) (((entry).val >> 2) & 0x7f)
440 #define __swp_offset(entry) (((entry).val << 1) >> 10)
441 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
442 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
443 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
444
445 #define PTE_FILE_MAX_BITS 61
446 #define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
447 #define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
448
449 /* XXX is this right? */
450 #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
451 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
452
453 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
454 remap_pfn_range(vma, vaddr, pfn, size, prot)
455
456 #define MK_IOSPACE_PFN(space, pfn) (pfn)
457 #define GET_IOSPACE(pfn) 0
458 #define GET_PFN(pfn) (pfn)
459
460 /*
461 * ZERO_PAGE is a global shared page that is always zero: used
462 * for zero-mapped memory areas etc..
463 */
464 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
465 extern struct page *zero_page_memmap_ptr;
466 #define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
467
468 /* We provide our own get_unmapped_area to cope with VA holes for userland */
469 #define HAVE_ARCH_UNMAPPED_AREA
470
471 #ifdef CONFIG_HUGETLB_PAGE
472 #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
473 #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
474 #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
475 struct mmu_gather;
476 void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
477 unsigned long end, unsigned long floor, unsigned long ceiling);
478 #endif
479
480 /*
481 * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
482 * information. However, we use this routine to take care of any (delayed) i-cache
483 * flushing that may be necessary.
484 */
485 extern void lazy_mmu_prot_update (pte_t pte);
486
487 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
488 /*
489 * Update PTEP with ENTRY, which is guaranteed to be a less
490 * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
491 * WRITABLE bits turned on, when the value at PTEP did not. The
492 * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
493 *
494 * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
495 * having to worry about races. On SMP machines, there are only two
496 * cases where this is true:
497 *
498 * (1) *PTEP has the PRESENT bit turned OFF
499 * (2) ENTRY has the DIRTY bit turned ON
500 *
501 * On ia64, we could implement this routine with a cmpxchg()-loop
502 * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
503 * However, like on x86, we can get a more streamlined version by
504 * observing that it is OK to drop ACCESSED bit updates when
505 * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
506 * result in an extra Access-bit fault, which would then turn on the
507 * ACCESSED bit in the low-level fault handler (iaccess_bit or
508 * daccess_bit in ivt.S).
509 */
510 #ifdef CONFIG_SMP
511 # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
512 do { \
513 if (__safely_writable) { \
514 set_pte(__ptep, __entry); \
515 flush_tlb_page(__vma, __addr); \
516 } \
517 } while (0)
518 #else
519 # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
520 ptep_establish(__vma, __addr, __ptep, __entry)
521 #endif
522
523 # ifdef CONFIG_VIRTUAL_MEM_MAP
524 /* arch mem_map init routine is needed due to holes in a virtual mem_map */
525 # define __HAVE_ARCH_MEMMAP_INIT
526 extern void memmap_init (unsigned long size, int nid, unsigned long zone,
527 unsigned long start_pfn);
528 # endif /* CONFIG_VIRTUAL_MEM_MAP */
529 # endif /* !__ASSEMBLY__ */
530
531 /*
532 * Identity-mapped regions use a large page size. We'll call such large pages
533 * "granules". If you can think of a better name that's unambiguous, let me
534 * know...
535 */
536 #if defined(CONFIG_IA64_GRANULE_64MB)
537 # define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
538 #elif defined(CONFIG_IA64_GRANULE_16MB)
539 # define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
540 #endif
541 #define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
542 /*
543 * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
544 */
545 #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
546 #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
547
548 /*
549 * No page table caches to initialise
550 */
551 #define pgtable_cache_init() do { } while (0)
552
553 /* These tell get_user_pages() that the first gate page is accessible from user-level. */
554 #define FIXADDR_USER_START GATE_ADDR
555 #ifdef HAVE_BUGGY_SEGREL
556 # define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
557 #else
558 # define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
559 #endif
560
561 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
562 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
563 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
564 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
565 #define __HAVE_ARCH_PTE_SAME
566 #define __HAVE_ARCH_PGD_OFFSET_GATE
567 #define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
568
569 #include <asm-generic/pgtable-nopud.h>
570 #include <asm-generic/pgtable.h>
571
572 #endif /* _ASM_IA64_PGTABLE_H */
This page took 0.042115 seconds and 5 git commands to generate.