Commit | Line | Data |
---|---|---|
3dfcb315 AK |
1 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
2 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ | |
3 | /* | |
4 | * This file contains the functions and defines necessary to modify and use | |
5 | * the ppc64 hashed page table. | |
6 | */ | |
7 | ||
ab537dca | 8 | #include <asm/book3s/64/hash.h> |
3dfcb315 AK |
9 | #include <asm/barrier.h> |
10 | ||
3dfcb315 AK |
11 | /* |
12 | * The second half of the kernel virtual space is used for IO mappings, | |
13 | * it's itself carved into the PIO region (ISA and PHB IO space) and | |
14 | * the ioremap space | |
15 | * | |
16 | * ISA_IO_BASE = KERN_IO_START, 64K reserved area | |
17 | * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces | |
18 | * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE | |
19 | */ | |
20 | #define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) | |
21 | #define FULL_IO_SIZE 0x80000000ul | |
22 | #define ISA_IO_BASE (KERN_IO_START) | |
23 | #define ISA_IO_END (KERN_IO_START + 0x10000ul) | |
24 | #define PHB_IO_BASE (ISA_IO_END) | |
25 | #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) | |
26 | #define IOREMAP_BASE (PHB_IO_END) | |
27 | #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) | |
28 | ||
3dfcb315 AK |
29 | #define vmemmap ((struct page *)VMEMMAP_BASE) |
30 | ||
b0412ea9 | 31 | /* Advertise special mapping type for AGP */ |
b0412ea9 AK |
32 | #define HAVE_PAGE_AGP |
33 | ||
34 | /* Advertise support for _PAGE_SPECIAL */ | |
35 | #define __HAVE_ARCH_PTE_SPECIAL | |
36 | ||
3dfcb315 AK |
37 | #ifndef __ASSEMBLY__ |
38 | ||
39 | /* | |
40 | * This is the default implementation of various PTE accessors, it's | |
41 | * used in all cases except Book3S with 64K pages where we have a | |
42 | * concept of sub-pages | |
43 | */ | |
44 | #ifndef __real_pte | |
45 | ||
46 | #ifdef CONFIG_STRICT_MM_TYPECHECKS | |
47 | #define __real_pte(e,p) ((real_pte_t){(e)}) | |
48 | #define __rpte_to_pte(r) ((r).pte) | |
49 | #else | |
50 | #define __real_pte(e,p) (e) | |
51 | #define __rpte_to_pte(r) (__pte(r)) | |
52 | #endif | |
4d9057c3 | 53 | #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT) |
3dfcb315 AK |
54 | |
55 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | |
56 | do { \ | |
57 | index = 0; \ | |
58 | shift = mmu_psize_defs[psize].shift; \ | |
59 | ||
60 | #define pte_iterate_hashed_end() } while(0) | |
61 | ||
62 | /* | |
63 | * We expect this to be called only for user addresses or kernel virtual | |
64 | * addresses other than the linear mapping. | |
65 | */ | |
66 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K | |
67 | ||
68 | #endif /* __real_pte */ | |
69 | ||
f281b5d5 AK |
70 | static inline void pmd_set(pmd_t *pmdp, unsigned long val) |
71 | { | |
72 | *pmdp = __pmd(val); | |
73 | } | |
74 | ||
75 | static inline void pmd_clear(pmd_t *pmdp) | |
76 | { | |
77 | *pmdp = __pmd(0); | |
78 | } | |
79 | ||
3dfcb315 | 80 | #define pmd_none(pmd) (!pmd_val(pmd)) |
3dfcb315 | 81 | #define pmd_present(pmd) (!pmd_none(pmd)) |
3dfcb315 | 82 | |
f281b5d5 AK |
83 | static inline void pud_set(pud_t *pudp, unsigned long val) |
84 | { | |
85 | *pudp = __pud(val); | |
86 | } | |
87 | ||
88 | static inline void pud_clear(pud_t *pudp) | |
89 | { | |
90 | *pudp = __pud(0); | |
91 | } | |
92 | ||
3dfcb315 | 93 | #define pud_none(pud) (!pud_val(pud)) |
3dfcb315 | 94 | #define pud_present(pud) (pud_val(pud) != 0) |
3dfcb315 AK |
95 | |
96 | extern struct page *pud_page(pud_t pud); | |
371352ca | 97 | extern struct page *pmd_page(pmd_t pmd); |
3dfcb315 AK |
98 | static inline pte_t pud_pte(pud_t pud) |
99 | { | |
100 | return __pte(pud_val(pud)); | |
101 | } | |
102 | ||
103 | static inline pud_t pte_pud(pte_t pte) | |
104 | { | |
105 | return __pud(pte_val(pte)); | |
106 | } | |
107 | #define pud_write(pud) pte_write(pud_pte(pud)) | |
3dfcb315 | 108 | #define pgd_write(pgd) pte_write(pgd_pte(pgd)) |
f281b5d5 AK |
109 | static inline void pgd_set(pgd_t *pgdp, unsigned long val) |
110 | { | |
111 | *pgdp = __pgd(val); | |
112 | } | |
3dfcb315 AK |
113 | |
114 | /* | |
115 | * Find an entry in a page-table-directory. We combine the address region | |
116 | * (the high order N bits) and the pgd portion of the address. | |
117 | */ | |
3dfcb315 AK |
118 | |
119 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
120 | ||
121 | #define pmd_offset(pudp,addr) \ | |
371352ca | 122 | (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) |
3dfcb315 AK |
123 | |
124 | #define pte_offset_kernel(dir,addr) \ | |
371352ca | 125 | (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr)) |
3dfcb315 AK |
126 | |
127 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
128 | #define pte_unmap(pte) do { } while(0) | |
129 | ||
130 | /* to find an entry in a kernel page-table-directory */ | |
131 | /* This now only contains the vmalloc pages */ | |
132 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
3dfcb315 AK |
133 | |
134 | #define pte_ERROR(e) \ | |
135 | pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | |
136 | #define pmd_ERROR(e) \ | |
137 | pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | |
138 | #define pgd_ERROR(e) \ | |
139 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
140 | ||
141 | /* Encode and de-code a swap entry */ | |
142 | #define MAX_SWAPFILES_CHECK() do { \ | |
143 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ | |
144 | /* \ | |
145 | * Don't have overlapping bits with _PAGE_HPTEFLAGS \ | |
146 | * We filter HPTEFLAGS on set_pte. \ | |
147 | */ \ | |
148 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ | |
7207f436 | 149 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \ |
3dfcb315 AK |
150 | } while (0) |
151 | /* | |
152 | * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; | |
153 | */ | |
154 | #define SWP_TYPE_BITS 5 | |
155 | #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ | |
156 | & ((1UL << SWP_TYPE_BITS) - 1)) | |
157 | #define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT) | |
158 | #define __swp_entry(type, offset) ((swp_entry_t) { \ | |
159 | ((type) << _PAGE_BIT_SWAP_TYPE) \ | |
160 | | ((offset) << PTE_RPN_SHIFT) }) | |
44734f23 AK |
161 | /* |
162 | * swp_entry_t must be independent of pte bits. We build a swp_entry_t from | |
163 | * swap type and offset we get from swap and convert that to pte to find a | |
164 | * matching pte in linux page table. | |
165 | * Clear bits not found in swap entries here. | |
166 | */ | |
167 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE }) | |
168 | #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE) | |
3dfcb315 | 169 | |
2f10f1a7 | 170 | #ifdef CONFIG_MEM_SOFT_DIRTY |
7207f436 | 171 | #define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE)) |
2f10f1a7 HD |
172 | #else |
173 | #define _PAGE_SWP_SOFT_DIRTY 0UL | |
174 | #endif /* CONFIG_MEM_SOFT_DIRTY */ | |
175 | ||
176 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | |
7207f436 LD |
177 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
178 | { | |
179 | return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY); | |
180 | } | |
181 | static inline bool pte_swp_soft_dirty(pte_t pte) | |
182 | { | |
183 | return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY); | |
184 | } | |
185 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | |
186 | { | |
187 | return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY); | |
188 | } | |
7207f436 LD |
189 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
190 | ||
3dfcb315 AK |
191 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); |
192 | void pgtable_cache_init(void); | |
3dfcb315 | 193 | |
3dfcb315 AK |
194 | struct page *realmode_pfn_to_page(unsigned long pfn); |
195 | ||
3dfcb315 | 196 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
3dfcb315 AK |
197 | extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); |
198 | extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); | |
199 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); | |
200 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
201 | pmd_t *pmdp, pmd_t pmd); | |
202 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |
203 | pmd_t *pmd); | |
3dfcb315 | 204 | extern int has_transparent_hugepage(void); |
3dfcb315 AK |
205 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
206 | ||
3dfcb315 AK |
207 | |
208 | static inline pte_t pmd_pte(pmd_t pmd) | |
209 | { | |
210 | return __pte(pmd_val(pmd)); | |
211 | } | |
212 | ||
213 | static inline pmd_t pte_pmd(pte_t pte) | |
214 | { | |
215 | return __pmd(pte_val(pte)); | |
216 | } | |
217 | ||
218 | static inline pte_t *pmdp_ptep(pmd_t *pmd) | |
219 | { | |
220 | return (pte_t *)pmd; | |
221 | } | |
222 | ||
223 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) | |
224 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) | |
225 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | |
226 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) | |
227 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | |
228 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | |
229 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | |
230 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | |
7207f436 LD |
231 | |
232 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | |
233 | #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) | |
234 | #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd))) | |
235 | #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd))) | |
236 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ | |
237 | ||
1ca72129 AK |
238 | #ifdef CONFIG_NUMA_BALANCING |
239 | static inline int pmd_protnone(pmd_t pmd) | |
240 | { | |
241 | return pte_protnone(pmd_pte(pmd)); | |
242 | } | |
243 | #endif /* CONFIG_NUMA_BALANCING */ | |
3dfcb315 AK |
244 | |
245 | #define __HAVE_ARCH_PMD_WRITE | |
246 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | |
247 | ||
248 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | |
249 | { | |
6a119eae | 250 | return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE)); |
3dfcb315 AK |
251 | } |
252 | ||
3dfcb315 AK |
253 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
254 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |
255 | unsigned long address, pmd_t *pmdp, | |
256 | pmd_t entry, int dirty); | |
257 | ||
3dfcb315 AK |
258 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
259 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
260 | unsigned long address, pmd_t *pmdp); | |
261 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | |
262 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
263 | unsigned long address, pmd_t *pmdp); | |
264 | ||
265 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR | |
266 | extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | |
267 | unsigned long addr, pmd_t *pmdp); | |
268 | ||
3dfcb315 AK |
269 | extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, |
270 | unsigned long address, pmd_t *pmdp); | |
271 | #define pmdp_collapse_flush pmdp_collapse_flush | |
272 | ||
273 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | |
274 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | |
275 | pgtable_t pgtable); | |
276 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | |
277 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); | |
278 | ||
279 | #define __HAVE_ARCH_PMDP_INVALIDATE | |
280 | extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |
281 | pmd_t *pmdp); | |
282 | ||
283 | #define pmd_move_must_withdraw pmd_move_must_withdraw | |
284 | struct spinlock; | |
285 | static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | |
286 | struct spinlock *old_pmd_ptl) | |
287 | { | |
288 | /* | |
289 | * Archs like ppc64 use pgtable to store per pmd | |
290 | * specific information. So when we switch the pmd, | |
291 | * we should also withdraw and deposit the pgtable | |
292 | */ | |
293 | return true; | |
294 | } | |
295 | #endif /* __ASSEMBLY__ */ | |
296 | #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ |