Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _I386_PGTABLE_3LEVEL_H |
2 | #define _I386_PGTABLE_3LEVEL_H | |
3 | ||
1da177e4 LT |
4 | /* |
5 | * Intel Physical Address Extension (PAE) Mode - three-level page | |
6 | * tables on PPro+ CPUs. | |
7 | * | |
8 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
9 | */ | |
10 | ||
11 | #define pte_ERROR(e) \ | |
12 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) | |
13 | #define pmd_ERROR(e) \ | |
14 | printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) | |
15 | #define pgd_ERROR(e) \ | |
16 | printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | |
17 | ||
18 | #define pud_none(pud) 0 | |
19 | #define pud_bad(pud) 0 | |
20 | #define pud_present(pud) 1 | |
21 | ||
1da177e4 LT |
22 | /* |
23 | * All present pages with !NX bit are kernel-executable: | |
24 | */ | |
25 | static inline int pte_exec_kernel(pte_t pte) | |
26 | { | |
45e98cdb | 27 | return !(pte_val(pte) & _PAGE_NX); |
1da177e4 LT |
28 | } |
29 | ||
30 | /* Rules for using set_pte: the pte being assigned *must* be | |
31 | * either not present or in a state where the hardware will | |
32 | * not attempt to update the pte. In places where this is | |
33 | * not possible, use pte_get_and_clear to obtain the old pte | |
34 | * value and then use set_pte to update it. -ben | |
35 | */ | |
3dc494e8 | 36 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
1da177e4 LT |
37 | { |
38 | ptep->pte_high = pte.pte_high; | |
39 | smp_wmb(); | |
40 | ptep->pte_low = pte.pte_low; | |
41 | } | |
3dc494e8 JF |
42 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, |
43 | pte_t *ptep , pte_t pte) | |
44 | { | |
45 | native_set_pte(ptep, pte); | |
46 | } | |
1da177e4 | 47 | |
d6d861e3 ZA |
48 | /* |
49 | * Since this is only called on user PTEs, and the page fault handler | |
50 | * must handle the already racy situation of simultaneous page faults, | |
51 | * we are justified in merely clearing the PTE present bit, followed | |
52 | * by a set. The ordering here is important. | |
53 | */ | |
3dc494e8 JF |
54 | static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, |
55 | pte_t *ptep, pte_t pte) | |
d6d861e3 ZA |
56 | { |
57 | ptep->pte_low = 0; | |
58 | smp_wmb(); | |
59 | ptep->pte_high = pte.pte_high; | |
60 | smp_wmb(); | |
61 | ptep->pte_low = pte.pte_low; | |
62 | } | |
63 | ||
3dc494e8 JF |
64 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
65 | { | |
66 | set_64bit((unsigned long long *)(ptep),native_pte_val(pte)); | |
67 | } | |
68 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | |
69 | { | |
70 | set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd)); | |
71 | } | |
72 | static inline void native_set_pud(pud_t *pudp, pud_t pud) | |
73 | { | |
74 | *pudp = pud; | |
75 | } | |
1da177e4 | 76 | |
6e5882cf ZA |
77 | /* |
78 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table | |
79 | * entry, so clear the bottom half first and enforce ordering with a compiler | |
80 | * barrier. | |
81 | */ | |
3dc494e8 | 82 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
6e5882cf ZA |
83 | { |
84 | ptep->pte_low = 0; | |
85 | smp_wmb(); | |
86 | ptep->pte_high = 0; | |
87 | } | |
88 | ||
3dc494e8 | 89 | static inline void native_pmd_clear(pmd_t *pmd) |
6e5882cf ZA |
90 | { |
91 | u32 *tmp = (u32 *)pmd; | |
92 | *tmp = 0; | |
93 | smp_wmb(); | |
94 | *(tmp + 1) = 0; | |
95 | } | |
3dc494e8 JF |
96 | |
97 | #ifndef CONFIG_PARAVIRT | |
98 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | |
99 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | |
100 | #define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte) | |
101 | #define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte) | |
102 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | |
103 | #define set_pud(pudp, pud) native_set_pud(pudp, pud) | |
104 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | |
105 | #define pmd_clear(pmd) native_pmd_clear(pmd) | |
da181a8b RR |
106 | #endif |
107 | ||
108 | /* | |
109 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush | |
110 | * the TLB via cr3 if the top-level pgd is changed... | |
111 | * We do not let the generic code free and clear pgd entries due to | |
112 | * this erratum. | |
113 | */ | |
114 | static inline void pud_clear (pud_t * pud) { } | |
115 | ||
116 | #define pud_page(pud) \ | |
117 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | |
118 | ||
119 | #define pud_page_vaddr(pud) \ | |
120 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) | |
121 | ||
122 | ||
123 | /* Find an entry in the second-level page table.. */ | |
124 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ | |
125 | pmd_index(address)) | |
6e5882cf | 126 | |
142dd975 | 127 | #ifdef CONFIG_SMP |
3dc494e8 | 128 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
1da177e4 LT |
129 | { |
130 | pte_t res; | |
131 | ||
132 | /* xchg acts as a barrier before the setting of the high bits */ | |
133 | res.pte_low = xchg(&ptep->pte_low, 0); | |
134 | res.pte_high = ptep->pte_high; | |
135 | ptep->pte_high = 0; | |
136 | ||
137 | return res; | |
138 | } | |
142dd975 ZA |
139 | #else |
140 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) | |
141 | #endif | |
1da177e4 | 142 | |
6049742d | 143 | #define __HAVE_ARCH_PTE_SAME |
1da177e4 LT |
144 | static inline int pte_same(pte_t a, pte_t b) |
145 | { | |
146 | return a.pte_low == b.pte_low && a.pte_high == b.pte_high; | |
147 | } | |
148 | ||
149 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
150 | ||
151 | static inline int pte_none(pte_t pte) | |
152 | { | |
153 | return !pte.pte_low && !pte.pte_high; | |
154 | } | |
155 | ||
156 | static inline unsigned long pte_pfn(pte_t pte) | |
157 | { | |
3dc494e8 | 158 | return pte_val(pte) >> PAGE_SHIFT; |
1da177e4 LT |
159 | } |
160 | ||
161 | extern unsigned long long __supported_pte_mask; | |
162 | ||
163 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | |
164 | { | |
3dc494e8 JF |
165 | return __pte((((unsigned long long)page_nr << PAGE_SHIFT) | |
166 | pgprot_val(pgprot)) & __supported_pte_mask); | |
1da177e4 LT |
167 | } |
168 | ||
169 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |
170 | { | |
3dc494e8 JF |
171 | return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | |
172 | pgprot_val(pgprot)) & __supported_pte_mask); | |
1da177e4 LT |
173 | } |
174 | ||
175 | /* | |
176 | * Bits 0, 6 and 7 are taken in the low part of the pte, | |
177 | * put the 32 bits of offset into the high part. | |
178 | */ | |
179 | #define pte_to_pgoff(pte) ((pte).pte_high) | |
180 | #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) | |
181 | #define PTE_FILE_MAX_BITS 32 | |
182 | ||
183 | /* Encode and de-code a swap entry */ | |
184 | #define __swp_type(x) (((x).val) & 0x1f) | |
185 | #define __swp_offset(x) ((x).val >> 5) | |
186 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) | |
187 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) | |
188 | #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) | |
189 | ||
190 | #define __pmd_free_tlb(tlb, x) do { } while (0) | |
191 | ||
192 | #endif /* _I386_PGTABLE_3LEVEL_H */ |