x86, traps: introduce dotraplinkage
[deliverable/linux.git] / include / asm-x86 / pgtable_32.h
CommitLineData
77ef50a5
VN
1#ifndef ASM_X86__PGTABLE_32_H
2#define ASM_X86__PGTABLE_32_H
1da177e4 3
1da177e4
LT
4
5/*
6 * The Linux memory management assumes a three-level page table setup. On
7 * the i386, we use that, but "fold" the mid level into the top-level page
8 * table, so that we physically have the same two-level page table as the
9 * i386 mmu expects.
10 *
11 * This file contains the functions and defines necessary to modify and use
12 * the i386 page table tree.
13 */
14#ifndef __ASSEMBLY__
15#include <asm/processor.h>
16#include <asm/fixmap.h>
17#include <linux/threads.h>
da181a8b 18#include <asm/paravirt.h>
1da177e4 19
1977f032 20#include <linux/bitops.h>
1da177e4
LT
21#include <linux/slab.h>
22#include <linux/list.h>
23#include <linux/spinlock.h>
24
8c65b4a6
TS
25struct mm_struct;
26struct vm_area_struct;
27
1da177e4 28extern pgd_t swapper_pg_dir[1024];
1da177e4 29
985a34bd
TG
30static inline void pgtable_cache_init(void) { }
31static inline void check_pgt_cache(void) { }
1da177e4
LT
32void paging_init(void);
33
01eb7858 34extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
f1d1a842 35
1da177e4
LT
36/*
37 * The Linux x86 paging architecture is 'compile-time dual-mode', it
38 * implements both the traditional 2-level x86 page tables and the
39 * newer 3-level PAE-mode page tables.
40 */
41#ifdef CONFIG_X86_PAE
42# include <asm/pgtable-3level-defs.h>
43# define PMD_SIZE (1UL << PMD_SHIFT)
cf840147 44# define PMD_MASK (~(PMD_SIZE - 1))
1da177e4
LT
45#else
46# include <asm/pgtable-2level-defs.h>
47#endif
48
49#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
cf840147 50#define PGDIR_MASK (~(PGDIR_SIZE - 1))
1da177e4 51
1da177e4
LT
52/* Just any arbitrary offset to the start of the vmalloc VM area: the
53 * current 8MB value just means that there will be a 8MB "hole" after the
54 * physical memory until the kernel virtual memory starts. That means that
55 * any out-of-bounds memory accesses will hopefully be caught.
56 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
57 * area for the same reason. ;)
58 */
cf840147 59#define VMALLOC_OFFSET (8 * 1024 * 1024)
e621bd18 60#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
0b7a9611
CL
61#ifdef CONFIG_X86_PAE
62#define LAST_PKMAP 512
63#else
64#define LAST_PKMAP 1024
65#endif
66
cf840147
JP
67#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
68 & PMD_MASK)
0b7a9611 69
1da177e4 70#ifdef CONFIG_HIGHMEM
cf840147 71# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
1da177e4 72#else
cf840147 73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
1da177e4
LT
74#endif
75
e621bd18
DY
76#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
77
1da177e4
LT
78/*
79 * Define this if things work differently on an i386 and an i486:
80 * it will (on an i486) warn about kernel memory accesses that are
e49332bd 81 * done without a 'access_ok(VERIFY_WRITE,..)'
1da177e4 82 */
e49332bd 83#undef TEST_ACCESS_OK
1da177e4
LT
84
85/* The boot page tables (all created as a single array) */
86extern unsigned long pg0[];
87
88#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
1da177e4 89
705e87c0 90/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
cf840147
JP
91#define pmd_none(x) (!(unsigned long)pmd_val((x)))
92#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
77be1fab 93#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
1da177e4
LT
94
95#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
96
1da177e4
LT
97#ifdef CONFIG_X86_PAE
98# include <asm/pgtable-3level.h>
99#else
100# include <asm/pgtable-2level.h>
101#endif
102
1da177e4 103/*
cf840147
JP
104 * Macro to mark a page protection value as "uncacheable".
105 * On processors which do not support it, this is a no-op.
1da177e4 106 */
cf840147
JP
107#define pgprot_noncached(prot) \
108 ((boot_cpu_data.x86 > 3) \
109 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \
110 : (prot))
1da177e4
LT
111
112/*
113 * Conversion functions: convert a page and protection to a page entry,
114 * and a page entry and page directory to the page they refer to.
115 */
1da177e4 116#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
1da177e4 117
1da177e4 118
61e19a34
AK
119static inline int pud_large(pud_t pud) { return 0; }
120
1da177e4
LT
121/*
122 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
123 *
124 * this macro returns the index of the entry in the pmd page which would
125 * control the given virtual address
126 */
cf840147
JP
127#define pmd_index(address) \
128 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
1da177e4
LT
129
130/*
131 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
132 *
133 * this macro returns the index of the entry in the pte page which would
134 * control the given virtual address
135 */
cf840147
JP
136#define pte_index(address) \
137 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
138#define pte_offset_kernel(dir, address) \
139 ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
1da177e4 140
cf840147 141#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
ca140fda 142
cf840147 143#define pmd_page_vaddr(pmd) \
59438c9f 144 ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
ca140fda 145
1da177e4 146#if defined(CONFIG_HIGHPTE)
cf840147
JP
147#define pte_offset_map(dir, address) \
148 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
149 pte_index((address)))
150#define pte_offset_map_nested(dir, address) \
151 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
152 pte_index((address)))
153#define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0)
154#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
1da177e4 155#else
cf840147
JP
156#define pte_offset_map(dir, address) \
157 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
158#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
1da177e4
LT
159#define pte_unmap(pte) do { } while (0)
160#define pte_unmap_nested(pte) do { } while (0)
161#endif
162
23002d88 163/* Clear a kernel PTE and flush it from the TLB */
cf840147
JP
164#define kpte_clear_flush(ptep, vaddr) \
165do { \
166 pte_clear(&init_mm, (vaddr), (ptep)); \
167 __flush_tlb_one((vaddr)); \
23002d88
ZA
168} while (0)
169
1da177e4
LT
170/*
171 * The i386 doesn't have any external MMU info: the kernel page
172 * tables contain all the necessary information.
1da177e4 173 */
cf840147 174#define update_mmu_cache(vma, address, pte) do { } while (0)
b239fb25 175
1da177e4
LT
176#endif /* !__ASSEMBLY__ */
177
4757d7d8
TG
178/*
179 * kern_addr_valid() is (1) for FLATMEM and (0) for
180 * SPARSEMEM and DISCONTIGMEM
181 */
05b79bdc 182#ifdef CONFIG_FLATMEM
1da177e4 183#define kern_addr_valid(addr) (1)
4757d7d8
TG
184#else
185#define kern_addr_valid(kaddr) (0)
186#endif
1da177e4 187
cf840147
JP
188#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
189 remap_pfn_range(vma, vaddr, pfn, size, prot)
1da177e4 190
77ef50a5 191#endif /* ASM_X86__PGTABLE_32_H */
This page took 0.48976 seconds and 5 git commands to generate.