Commit | Line | Data |
---|---|---|
56372b0b G |
1 | /* |
2 | * linux/arch/unicore32/include/asm/pgtable.h | |
3 | * | |
4 | * Code specific to PKUnity SoC and UniCore ISA | |
5 | * | |
6 | * Copyright (C) 2001-2010 GUAN Xue-tao | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | #ifndef __UNICORE_PGTABLE_H__ | |
13 | #define __UNICORE_PGTABLE_H__ | |
14 | ||
15 | #include <asm-generic/pgtable-nopmd.h> | |
16 | #include <asm/cpu-single.h> | |
17 | ||
18 | #include <asm/memory.h> | |
19 | #include <asm/pgtable-hwdef.h> | |
20 | ||
21 | /* | |
22 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
23 | * current 8MB value just means that there will be a 8MB "hole" after the | |
24 | * physical memory until the kernel virtual memory starts. That means that | |
25 | * any out-of-bounds memory accesses will hopefully be caught. | |
26 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
27 | * area for the same reason. ;) | |
28 | * | |
29 | * Note that platforms may override VMALLOC_START, but they must provide | |
30 | * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, | |
31 | * which may not overlap IO space. | |
32 | */ | |
33 | #ifndef VMALLOC_START | |
34 | #define VMALLOC_OFFSET SZ_8M | |
35 | #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) \ | |
36 | & ~(VMALLOC_OFFSET-1)) | |
37 | #define VMALLOC_END (0xff000000UL) | |
38 | #endif | |
39 | ||
40 | #define PTRS_PER_PTE 1024 | |
41 | #define PTRS_PER_PGD 1024 | |
42 | ||
43 | /* | |
44 | * PGDIR_SHIFT determines what a third-level page table entry can map | |
45 | */ | |
46 | #define PGDIR_SHIFT 22 | |
47 | ||
48 | #ifndef __ASSEMBLY__ | |
49 | extern void __pte_error(const char *file, int line, unsigned long val); | |
50 | extern void __pgd_error(const char *file, int line, unsigned long val); | |
51 | ||
52 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) | |
53 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) | |
54 | #endif /* !__ASSEMBLY__ */ | |
55 | ||
56 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
57 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
58 | ||
59 | /* | |
60 | * This is the lowest virtual address we can permit any user space | |
61 | * mapping to be mapped at. This is particularly important for | |
62 | * non-high vector CPUs. | |
63 | */ | |
64 | #define FIRST_USER_ADDRESS PAGE_SIZE | |
65 | ||
66 | #define FIRST_USER_PGD_NR 1 | |
67 | #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) | |
68 | ||
69 | /* | |
70 | * section address mask and size definitions. | |
71 | */ | |
72 | #define SECTION_SHIFT 22 | |
73 | #define SECTION_SIZE (1UL << SECTION_SHIFT) | |
74 | #define SECTION_MASK (~(SECTION_SIZE-1)) | |
75 | ||
76 | #ifndef __ASSEMBLY__ | |
77 | ||
78 | /* | |
79 | * The pgprot_* and protection_map entries will be fixed up in runtime | |
80 | * to include the cachable bits based on memory policy, as well as any | |
81 | * architecture dependent bits. | |
82 | */ | |
83 | #define _PTE_DEFAULT (PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE) | |
84 | ||
85 | extern pgprot_t pgprot_user; | |
86 | extern pgprot_t pgprot_kernel; | |
87 | ||
88 | #define PAGE_NONE pgprot_user | |
89 | #define PAGE_SHARED __pgprot(pgprot_val(pgprot_user | PTE_READ \ | |
aaad6183 | 90 | | PTE_WRITE)) |
56372b0b G |
91 | #define PAGE_SHARED_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \ |
92 | | PTE_WRITE \ | |
aaad6183 | 93 | | PTE_EXEC)) |
56372b0b G |
94 | #define PAGE_COPY __pgprot(pgprot_val(pgprot_user | PTE_READ) |
95 | #define PAGE_COPY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \ | |
aaad6183 CG |
96 | | PTE_EXEC)) |
97 | #define PAGE_READONLY __pgprot(pgprot_val(pgprot_user | PTE_READ)) | |
56372b0b | 98 | #define PAGE_READONLY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \ |
aaad6183 | 99 | | PTE_EXEC)) |
56372b0b G |
100 | #define PAGE_KERNEL pgprot_kernel |
101 | #define PAGE_KERNEL_EXEC __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC)) | |
102 | ||
103 | #define __PAGE_NONE __pgprot(_PTE_DEFAULT) | |
104 | #define __PAGE_SHARED __pgprot(_PTE_DEFAULT | PTE_READ \ | |
105 | | PTE_WRITE) | |
106 | #define __PAGE_SHARED_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \ | |
107 | | PTE_WRITE \ | |
108 | | PTE_EXEC) | |
109 | #define __PAGE_COPY __pgprot(_PTE_DEFAULT | PTE_READ) | |
110 | #define __PAGE_COPY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \ | |
111 | | PTE_EXEC) | |
112 | #define __PAGE_READONLY __pgprot(_PTE_DEFAULT | PTE_READ) | |
113 | #define __PAGE_READONLY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \ | |
114 | | PTE_EXEC) | |
115 | ||
116 | #endif /* __ASSEMBLY__ */ | |
117 | ||
118 | /* | |
119 | * The table below defines the page protection levels that we insert into our | |
120 | * Linux page table version. These get translated into the best that the | |
121 | * architecture can perform. Note that on UniCore hardware: | |
122 | * 1) We cannot do execute protection | |
123 | * 2) If we could do execute protection, then read is implied | |
124 | * 3) write implies read permissions | |
125 | */ | |
126 | #define __P000 __PAGE_NONE | |
127 | #define __P001 __PAGE_READONLY | |
128 | #define __P010 __PAGE_COPY | |
129 | #define __P011 __PAGE_COPY | |
130 | #define __P100 __PAGE_READONLY_EXEC | |
131 | #define __P101 __PAGE_READONLY_EXEC | |
132 | #define __P110 __PAGE_COPY_EXEC | |
133 | #define __P111 __PAGE_COPY_EXEC | |
134 | ||
135 | #define __S000 __PAGE_NONE | |
136 | #define __S001 __PAGE_READONLY | |
137 | #define __S010 __PAGE_SHARED | |
138 | #define __S011 __PAGE_SHARED | |
139 | #define __S100 __PAGE_READONLY_EXEC | |
140 | #define __S101 __PAGE_READONLY_EXEC | |
141 | #define __S110 __PAGE_SHARED_EXEC | |
142 | #define __S111 __PAGE_SHARED_EXEC | |
143 | ||
144 | #ifndef __ASSEMBLY__ | |
145 | /* | |
146 | * ZERO_PAGE is a global shared page that is always zero: used | |
147 | * for zero-mapped memory areas etc.. | |
148 | */ | |
149 | extern struct page *empty_zero_page; | |
150 | #define ZERO_PAGE(vaddr) (empty_zero_page) | |
151 | ||
152 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | |
153 | #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) \ | |
154 | | pgprot_val(prot))) | |
155 | ||
156 | #define pte_none(pte) (!pte_val(pte)) | |
157 | #define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0)) | |
158 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | |
159 | #define pte_offset_kernel(dir, addr) (pmd_page_vaddr(*(dir)) \ | |
160 | + __pte_index(addr)) | |
161 | ||
162 | #define pte_offset_map(dir, addr) (pmd_page_vaddr(*(dir)) \ | |
163 | + __pte_index(addr)) | |
164 | #define pte_unmap(pte) do { } while (0) | |
165 | ||
166 | #define set_pte(ptep, pte) cpu_set_pte(ptep, pte) | |
167 | ||
168 | #define set_pte_at(mm, addr, ptep, pteval) \ | |
169 | do { \ | |
170 | set_pte(ptep, pteval); \ | |
171 | } while (0) | |
172 | ||
173 | /* | |
174 | * The following only work if pte_present() is true. | |
175 | * Undefined behaviour if not.. | |
176 | */ | |
177 | #define pte_present(pte) (pte_val(pte) & PTE_PRESENT) | |
178 | #define pte_write(pte) (pte_val(pte) & PTE_WRITE) | |
179 | #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) | |
180 | #define pte_young(pte) (pte_val(pte) & PTE_YOUNG) | |
181 | #define pte_exec(pte) (pte_val(pte) & PTE_EXEC) | |
182 | #define pte_special(pte) (0) | |
183 | ||
184 | #define PTE_BIT_FUNC(fn, op) \ | |
185 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } | |
186 | ||
187 | PTE_BIT_FUNC(wrprotect, &= ~PTE_WRITE); | |
188 | PTE_BIT_FUNC(mkwrite, |= PTE_WRITE); | |
189 | PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY); | |
190 | PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY); | |
191 | PTE_BIT_FUNC(mkold, &= ~PTE_YOUNG); | |
192 | PTE_BIT_FUNC(mkyoung, |= PTE_YOUNG); | |
193 | ||
194 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | |
195 | ||
196 | /* | |
197 | * Mark the prot value as uncacheable. | |
198 | */ | |
199 | #define pgprot_noncached(prot) \ | |
200 | __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE) | |
201 | #define pgprot_writecombine(prot) \ | |
202 | __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE) | |
203 | #define pgprot_dmacoherent(prot) \ | |
204 | __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE) | |
205 | ||
206 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
207 | #define pmd_present(pmd) (pmd_val(pmd) & PMD_PRESENT) | |
208 | #define pmd_bad(pmd) (((pmd_val(pmd) & \ | |
209 | (PMD_PRESENT | PMD_TYPE_MASK)) \ | |
210 | != (PMD_PRESENT | PMD_TYPE_TABLE))) | |
211 | ||
212 | #define set_pmd(pmdpd, pmdval) \ | |
213 | do { \ | |
214 | *(pmdpd) = pmdval; \ | |
215 | } while (0) | |
216 | ||
217 | #define pmd_clear(pmdp) \ | |
218 | do { \ | |
219 | set_pmd(pmdp, __pmd(0));\ | |
220 | clean_pmd_entry(pmdp); \ | |
221 | } while (0) | |
222 | ||
223 | #define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK)) | |
224 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) | |
225 | ||
226 | /* | |
227 | * Conversion functions: convert a page and protection to a page entry, | |
228 | * and a page entry and page directory to the page they refer to. | |
229 | */ | |
230 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) | |
231 | ||
232 | /* to find an entry in a page-table-directory */ | |
233 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) | |
234 | ||
235 | #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) | |
236 | ||
237 | /* to find an entry in a kernel page-table-directory */ | |
238 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | |
239 | ||
240 | /* Find an entry in the third-level page table.. */ | |
241 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
242 | ||
243 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
244 | { | |
245 | const unsigned long mask = PTE_EXEC | PTE_WRITE | PTE_READ; | |
246 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | |
247 | return pte; | |
248 | } | |
249 | ||
250 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
251 | ||
252 | /* | |
253 | * Encode and decode a swap entry. Swap entries are stored in the Linux | |
254 | * page tables as follows: | |
255 | * | |
256 | * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 | |
257 | * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 | |
258 | * <--------------- offset --------------> <--- type --> 0 0 0 0 0 | |
259 | * | |
260 | * This gives us up to 127 swap files and 32GB per swap file. Note that | |
261 | * the offset field is always non-zero. | |
262 | */ | |
263 | #define __SWP_TYPE_SHIFT 5 | |
264 | #define __SWP_TYPE_BITS 7 | |
265 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) | |
266 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
267 | ||
268 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) \ | |
269 | & __SWP_TYPE_MASK) | |
270 | #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) | |
271 | #define __swp_entry(type, offset) ((swp_entry_t) { \ | |
272 | ((type) << __SWP_TYPE_SHIFT) | \ | |
273 | ((offset) << __SWP_OFFSET_SHIFT) }) | |
274 | ||
275 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
276 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) | |
277 | ||
278 | /* | |
279 | * It is an error for the kernel to have more swap files than we can | |
280 | * encode in the PTEs. This ensures that we know when MAX_SWAPFILES | |
281 | * is increased beyond what we presently support. | |
282 | */ | |
283 | #define MAX_SWAPFILES_CHECK() \ | |
284 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
285 | ||
286 | /* | |
287 | * Encode and decode a file entry. File entries are stored in the Linux | |
288 | * page tables as follows: | |
289 | * | |
290 | * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 | |
291 | * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 | |
292 | * <----------------------- offset ----------------------> 1 0 0 0 | |
293 | */ | |
294 | #define pte_file(pte) (pte_val(pte) & PTE_FILE) | |
295 | #define pte_to_pgoff(x) (pte_val(x) >> 4) | |
296 | #define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) | |
297 | ||
298 | #define PTE_FILE_MAX_BITS 28 | |
299 | ||
300 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | |
301 | /* FIXME: this is not correct */ | |
302 | #define kern_addr_valid(addr) (1) | |
303 | ||
304 | #include <asm-generic/pgtable.h> | |
305 | ||
56372b0b G |
306 | #define pgtable_cache_init() do { } while (0) |
307 | ||
308 | #endif /* !__ASSEMBLY__ */ | |
309 | ||
310 | #endif /* __UNICORE_PGTABLE_H__ */ |