Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * include/asm-s390/pgtable.h | |
3 | * | |
4 | * S390 version | |
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | |
7 | * Ulrich Weigand (weigand@de.ibm.com) | |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | |
9 | * | |
10 | * Derived from "include/asm-i386/pgtable.h" | |
11 | */ | |
12 | ||
13 | #ifndef _ASM_S390_PGTABLE_H | |
14 | #define _ASM_S390_PGTABLE_H | |
15 | ||
1da177e4 LT |
16 | /* |
17 | * The Linux memory management assumes a three-level page table setup. For | |
18 | * s390 31 bit we "fold" the mid level into the top-level page table, so | |
19 | * that we physically have the same two-level page table as the s390 mmu | |
20 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels | |
21 | * the hardware provides (region first and region second tables are not | |
22 | * used). | |
23 | * | |
24 | * The "pgd_xxx()" functions are trivial for a folded two-level | |
25 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | |
26 | * into the pgd entry) | |
27 | * | |
28 | * This file contains the functions and defines necessary to modify and use | |
29 | * the S390 page table tree. | |
30 | */ | |
31 | #ifndef __ASSEMBLY__ | |
2dcea57a | 32 | #include <linux/mm_types.h> |
1da177e4 LT |
33 | #include <asm/bug.h> |
34 | #include <asm/processor.h> | |
1da177e4 | 35 | |
1da177e4 LT |
36 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
37 | extern void paging_init(void); | |
2b67fc46 | 38 | extern void vmem_map_init(void); |
1da177e4 LT |
39 | |
40 | /* | |
41 | * The S390 doesn't have any external MMU info: the kernel page | |
42 | * tables contain all the necessary information. | |
43 | */ | |
44 | #define update_mmu_cache(vma, address, pte) do { } while (0) | |
45 | ||
46 | /* | |
47 | * ZERO_PAGE is a global shared page that is always zero: used | |
48 | * for zero-mapped memory areas etc.. | |
49 | */ | |
50 | extern char empty_zero_page[PAGE_SIZE]; | |
51 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
52 | #endif /* !__ASSEMBLY__ */ | |
53 | ||
54 | /* | |
55 | * PMD_SHIFT determines the size of the area a second-level page | |
56 | * table can map | |
57 | * PGDIR_SHIFT determines what a third-level page table entry can map | |
58 | */ | |
59 | #ifndef __s390x__ | |
60 | # define PMD_SHIFT 22 | |
190a1d72 | 61 | # define PUD_SHIFT 22 |
1da177e4 LT |
62 | # define PGDIR_SHIFT 22 |
63 | #else /* __s390x__ */ | |
64 | # define PMD_SHIFT 21 | |
190a1d72 | 65 | # define PUD_SHIFT 31 |
1da177e4 LT |
66 | # define PGDIR_SHIFT 31 |
67 | #endif /* __s390x__ */ | |
68 | ||
69 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
70 | #define PMD_MASK (~(PMD_SIZE-1)) | |
190a1d72 MS |
71 | #define PUD_SIZE (1UL << PUD_SHIFT) |
72 | #define PUD_MASK (~(PUD_SIZE-1)) | |
1da177e4 LT |
73 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
74 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
75 | ||
76 | /* | |
77 | * entries per page directory level: the S390 is two-level, so | |
78 | * we don't really have any PMD directory physically. | |
79 | * for S390 segment-table entries are combined to one PGD | |
80 | * that leads to 1024 pte per pgd | |
81 | */ | |
82 | #ifndef __s390x__ | |
83 | # define PTRS_PER_PTE 1024 | |
84 | # define PTRS_PER_PMD 1 | |
190a1d72 | 85 | # define PTRS_PER_PUD 1 |
1da177e4 LT |
86 | # define PTRS_PER_PGD 512 |
87 | #else /* __s390x__ */ | |
88 | # define PTRS_PER_PTE 512 | |
89 | # define PTRS_PER_PMD 1024 | |
190a1d72 | 90 | # define PTRS_PER_PUD 1 |
1da177e4 LT |
91 | # define PTRS_PER_PGD 2048 |
92 | #endif /* __s390x__ */ | |
93 | ||
d455a369 HD |
94 | #define FIRST_USER_ADDRESS 0 |
95 | ||
1da177e4 LT |
96 | #define pte_ERROR(e) \ |
97 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) | |
98 | #define pmd_ERROR(e) \ | |
99 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) | |
190a1d72 MS |
100 | #define pud_ERROR(e) \ |
101 | printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) | |
1da177e4 LT |
102 | #define pgd_ERROR(e) \ |
103 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) | |
104 | ||
105 | #ifndef __ASSEMBLY__ | |
106 | /* | |
107 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
108 | * current 8MB value just means that there will be a 8MB "hole" after the | |
109 | * physical memory until the kernel virtual memory starts. That means that | |
110 | * any out-of-bounds memory accesses will hopefully be caught. | |
111 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
112 | * area for the same reason. ;) | |
e39394b8 HC |
113 | * vmalloc area starts at 4GB to prevent syscall table entry exchanging |
114 | * from modules. | |
1da177e4 | 115 | */ |
f4eb07c1 | 116 | extern unsigned long vmalloc_end; |
e39394b8 HC |
117 | |
118 | #ifdef CONFIG_64BIT | |
119 | #define VMALLOC_ADDR (max(0x100000000UL, (unsigned long) high_memory)) | |
120 | #else | |
121 | #define VMALLOC_ADDR ((unsigned long) high_memory) | |
122 | #endif | |
123 | #define VMALLOC_OFFSET (8*1024*1024) | |
124 | #define VMALLOC_START ((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | |
f4eb07c1 | 125 | #define VMALLOC_END vmalloc_end |
8b62bc96 HC |
126 | |
127 | /* | |
128 | * We need some free virtual space to be able to do vmalloc. | |
129 | * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc | |
130 | * area. On a machine with 2GB memory we make sure that we | |
131 | * have at least 128MB free space for vmalloc. On a machine | |
f4eb07c1 | 132 | * with 4TB we make sure we have at least 128GB. |
8b62bc96 | 133 | */ |
1da177e4 | 134 | #ifndef __s390x__ |
8b62bc96 | 135 | #define VMALLOC_MIN_SIZE 0x8000000UL |
f4eb07c1 | 136 | #define VMALLOC_END_INIT 0x80000000UL |
1da177e4 | 137 | #else /* __s390x__ */ |
f4eb07c1 HC |
138 | #define VMALLOC_MIN_SIZE 0x2000000000UL |
139 | #define VMALLOC_END_INIT 0x40000000000UL | |
1da177e4 LT |
140 | #endif /* __s390x__ */ |
141 | ||
1da177e4 LT |
142 | /* |
143 | * A 31 bit pagetable entry of S390 has following format: | |
144 | * | PFRA | | OS | | |
145 | * 0 0IP0 | |
146 | * 00000000001111111111222222222233 | |
147 | * 01234567890123456789012345678901 | |
148 | * | |
149 | * I Page-Invalid Bit: Page is not available for address-translation | |
150 | * P Page-Protection Bit: Store access not possible for page | |
151 | * | |
152 | * A 31 bit segmenttable entry of S390 has following format: | |
153 | * | P-table origin | |PTL | |
154 | * 0 IC | |
155 | * 00000000001111111111222222222233 | |
156 | * 01234567890123456789012345678901 | |
157 | * | |
158 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
159 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | |
160 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) | |
161 | * | |
162 | * The 31 bit segmenttable origin of S390 has following format: | |
163 | * | |
164 | * |S-table origin | | STL | | |
165 | * X **GPS | |
166 | * 00000000001111111111222222222233 | |
167 | * 01234567890123456789012345678901 | |
168 | * | |
169 | * X Space-Switch event: | |
170 | * G Segment-Invalid Bit: * | |
171 | * P Private-Space Bit: Segment is not private (PoP 3-30) | |
172 | * S Storage-Alteration: | |
173 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) | |
174 | * | |
175 | * A 64 bit pagetable entry of S390 has following format: | |
176 | * | PFRA |0IP0| OS | | |
177 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
178 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
179 | * | |
180 | * I Page-Invalid Bit: Page is not available for address-translation | |
181 | * P Page-Protection Bit: Store access not possible for page | |
182 | * | |
183 | * A 64 bit segmenttable entry of S390 has following format: | |
184 | * | P-table origin | TT | |
185 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
186 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
187 | * | |
188 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
189 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | |
190 | * P Page-Protection Bit: Store access not possible for page | |
191 | * TT Type 00 | |
192 | * | |
193 | * A 64 bit region table entry of S390 has following format: | |
194 | * | S-table origin | TF TTTL | |
195 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
196 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
197 | * | |
198 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
199 | * TT Type 01 | |
200 | * TF | |
190a1d72 | 201 | * TL Table length |
1da177e4 LT |
202 | * |
203 | * The 64 bit regiontable origin of S390 has following format: | |
204 | * | region table origon | DTTL | |
205 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
206 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
207 | * | |
208 | * X Space-Switch event: | |
209 | * G Segment-Invalid Bit: | |
210 | * P Private-Space Bit: | |
211 | * S Storage-Alteration: | |
212 | * R Real space | |
213 | * TL Table-Length: | |
214 | * | |
215 | * A storage key has the following format: | |
216 | * | ACC |F|R|C|0| | |
217 | * 0 3 4 5 6 7 | |
218 | * ACC: access key | |
219 | * F : fetch protection bit | |
220 | * R : referenced bit | |
221 | * C : changed bit | |
222 | */ | |
223 | ||
224 | /* Hardware bits in the page table entry */ | |
83377484 MS |
225 | #define _PAGE_RO 0x200 /* HW read-only bit */ |
226 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ | |
3610cce8 MS |
227 | |
228 | /* Software bits in the page table entry */ | |
83377484 MS |
229 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ |
230 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ | |
1da177e4 | 231 | |
83377484 | 232 | /* Six different types of pages. */ |
9282ed92 GS |
233 | #define _PAGE_TYPE_EMPTY 0x400 |
234 | #define _PAGE_TYPE_NONE 0x401 | |
83377484 MS |
235 | #define _PAGE_TYPE_SWAP 0x403 |
236 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ | |
9282ed92 GS |
237 | #define _PAGE_TYPE_RO 0x200 |
238 | #define _PAGE_TYPE_RW 0x000 | |
c1821c2e GS |
239 | #define _PAGE_TYPE_EX_RO 0x202 |
240 | #define _PAGE_TYPE_EX_RW 0x002 | |
1da177e4 | 241 | |
83377484 MS |
242 | /* |
243 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, | |
244 | * pte_none and pte_file to find out the pte type WITHOUT holding the page | |
245 | * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to | |
246 | * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs | |
247 | * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. | |
248 | * This change is done while holding the lock, but the intermediate step | |
249 | * of a previously valid pte with the hw invalid bit set can be observed by | |
250 | * handle_pte_fault. That makes it necessary that all valid pte types with | |
251 | * the hw invalid bit set must be distinguishable from the four pte types | |
252 | * empty, none, swap and file. | |
253 | * | |
254 | * irxt ipte irxt | |
255 | * _PAGE_TYPE_EMPTY 1000 -> 1000 | |
256 | * _PAGE_TYPE_NONE 1001 -> 1001 | |
257 | * _PAGE_TYPE_SWAP 1011 -> 1011 | |
258 | * _PAGE_TYPE_FILE 11?1 -> 11?1 | |
259 | * _PAGE_TYPE_RO 0100 -> 1100 | |
260 | * _PAGE_TYPE_RW 0000 -> 1000 | |
c1821c2e GS |
261 | * _PAGE_TYPE_EX_RO 0110 -> 1110 |
262 | * _PAGE_TYPE_EX_RW 0010 -> 1010 | |
83377484 | 263 | * |
c1821c2e | 264 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 |
83377484 MS |
265 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 |
266 | * pte_file is true for bits combinations 1101, 1111 | |
c1821c2e | 267 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. |
83377484 MS |
268 | */ |
269 | ||
1da177e4 LT |
270 | #ifndef __s390x__ |
271 | ||
3610cce8 MS |
272 | /* Bits in the segment table address-space-control-element */ |
273 | #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ | |
274 | #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ | |
275 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | |
276 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | |
277 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ | |
1da177e4 | 278 | |
3610cce8 MS |
279 | /* Bits in the segment table entry */ |
280 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ | |
281 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | |
282 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ | |
283 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ | |
1da177e4 | 284 | |
3610cce8 MS |
285 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) |
286 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | |
1da177e4 LT |
287 | |
288 | #else /* __s390x__ */ | |
289 | ||
3610cce8 MS |
290 | /* Bits in the segment/region table address-space-control-element */ |
291 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ | |
292 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | |
293 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | |
294 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ | |
295 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ | |
296 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ | |
297 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ | |
298 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ | |
299 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ | |
300 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ | |
301 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ | |
302 | ||
303 | /* Bits in the region table entry */ | |
304 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ | |
305 | #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ | |
306 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ | |
307 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ | |
308 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ | |
309 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ | |
310 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ | |
311 | ||
312 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) | |
313 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) | |
314 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) | |
315 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) | |
316 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) | |
317 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) | |
318 | ||
1da177e4 | 319 | /* Bits in the segment table entry */ |
3610cce8 MS |
320 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
321 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | |
322 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | |
1da177e4 | 323 | |
3610cce8 MS |
324 | #define _SEGMENT_ENTRY (0) |
325 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | |
326 | ||
327 | #endif /* __s390x__ */ | |
1da177e4 LT |
328 | |
329 | /* | |
3610cce8 MS |
330 | * A user page table pointer has the space-switch-event bit, the |
331 | * private-space-control bit and the storage-alteration-event-control | |
332 | * bit set. A kernel page table pointer doesn't need them. | |
1da177e4 | 333 | */ |
3610cce8 MS |
334 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
335 | _ASCE_ALT_EVENT) | |
1da177e4 | 336 | |
3610cce8 | 337 | /* Bits int the storage key */ |
1da177e4 LT |
338 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ |
339 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | |
340 | ||
1da177e4 | 341 | /* |
9282ed92 | 342 | * Page protection definitions. |
1da177e4 | 343 | */ |
9282ed92 GS |
344 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) |
345 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) | |
346 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) | |
c1821c2e GS |
347 | #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) |
348 | #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) | |
9282ed92 GS |
349 | |
350 | #define PAGE_KERNEL PAGE_RW | |
351 | #define PAGE_COPY PAGE_RO | |
1da177e4 LT |
352 | |
353 | /* | |
c1821c2e GS |
354 | * Dependent on the EXEC_PROTECT option s390 can do execute protection. |
355 | * Write permission always implies read permission. In theory with a | |
356 | * primary/secondary page table execute only can be implemented but | |
357 | * it would cost an additional bit in the pte to distinguish all the | |
358 | * different pte types. To avoid that execute permission currently | |
359 | * implies read permission as well. | |
1da177e4 LT |
360 | */ |
361 | /*xwr*/ | |
9282ed92 GS |
362 | #define __P000 PAGE_NONE |
363 | #define __P001 PAGE_RO | |
364 | #define __P010 PAGE_RO | |
365 | #define __P011 PAGE_RO | |
c1821c2e GS |
366 | #define __P100 PAGE_EX_RO |
367 | #define __P101 PAGE_EX_RO | |
368 | #define __P110 PAGE_EX_RO | |
369 | #define __P111 PAGE_EX_RO | |
9282ed92 GS |
370 | |
371 | #define __S000 PAGE_NONE | |
372 | #define __S001 PAGE_RO | |
373 | #define __S010 PAGE_RW | |
374 | #define __S011 PAGE_RW | |
c1821c2e GS |
375 | #define __S100 PAGE_EX_RO |
376 | #define __S101 PAGE_EX_RO | |
377 | #define __S110 PAGE_EX_RW | |
378 | #define __S111 PAGE_EX_RW | |
379 | ||
380 | #ifndef __s390x__ | |
3610cce8 | 381 | # define PxD_SHADOW_SHIFT 1 |
c1821c2e | 382 | #else /* __s390x__ */ |
3610cce8 | 383 | # define PxD_SHADOW_SHIFT 2 |
c1821c2e GS |
384 | #endif /* __s390x__ */ |
385 | ||
386 | static inline struct page *get_shadow_page(struct page *page) | |
387 | { | |
3610cce8 MS |
388 | if (s390_noexec && page->index) |
389 | return virt_to_page((void *)(addr_t) page->index); | |
c1821c2e GS |
390 | return NULL; |
391 | } | |
392 | ||
3610cce8 | 393 | static inline void *get_shadow_pte(void *table) |
c1821c2e | 394 | { |
3610cce8 MS |
395 | unsigned long addr, offset; |
396 | struct page *page; | |
c1821c2e | 397 | |
3610cce8 MS |
398 | addr = (unsigned long) table; |
399 | offset = addr & (PAGE_SIZE - 1); | |
400 | page = virt_to_page((void *)(addr ^ offset)); | |
401 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); | |
c1821c2e GS |
402 | } |
403 | ||
3610cce8 | 404 | static inline void *get_shadow_table(void *table) |
c1821c2e | 405 | { |
3610cce8 MS |
406 | unsigned long addr, offset; |
407 | struct page *page; | |
408 | ||
409 | addr = (unsigned long) table; | |
410 | offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); | |
411 | page = virt_to_page((void *)(addr ^ offset)); | |
412 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); | |
c1821c2e | 413 | } |
1da177e4 LT |
414 | |
415 | /* | |
416 | * Certain architectures need to do special things when PTEs | |
417 | * within a page table are directly modified. Thus, the following | |
418 | * hook is made available. | |
419 | */ | |
ba8a9229 MS |
420 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
421 | pte_t *pteptr, pte_t pteval) | |
1da177e4 | 422 | { |
c1821c2e GS |
423 | pte_t *shadow_pte = get_shadow_pte(pteptr); |
424 | ||
1da177e4 | 425 | *pteptr = pteval; |
c1821c2e GS |
426 | if (shadow_pte) { |
427 | if (!(pte_val(pteval) & _PAGE_INVALID) && | |
428 | (pte_val(pteval) & _PAGE_SWX)) | |
429 | pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO; | |
430 | else | |
431 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | |
432 | } | |
1da177e4 | 433 | } |
1da177e4 LT |
434 | |
435 | /* | |
436 | * pgd/pmd/pte query functions | |
437 | */ | |
438 | #ifndef __s390x__ | |
439 | ||
4448aaf0 AB |
440 | static inline int pgd_present(pgd_t pgd) { return 1; } |
441 | static inline int pgd_none(pgd_t pgd) { return 0; } | |
442 | static inline int pgd_bad(pgd_t pgd) { return 0; } | |
1da177e4 | 443 | |
190a1d72 MS |
444 | static inline int pud_present(pud_t pud) { return 1; } |
445 | static inline int pud_none(pud_t pud) { return 0; } | |
446 | static inline int pud_bad(pud_t pud) { return 0; } | |
447 | ||
1da177e4 LT |
448 | #else /* __s390x__ */ |
449 | ||
190a1d72 MS |
450 | static inline int pgd_present(pgd_t pgd) { return 1; } |
451 | static inline int pgd_none(pgd_t pgd) { return 0; } | |
452 | static inline int pgd_bad(pgd_t pgd) { return 0; } | |
453 | ||
454 | static inline int pud_present(pud_t pud) | |
1da177e4 | 455 | { |
0d017923 | 456 | return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; |
1da177e4 LT |
457 | } |
458 | ||
190a1d72 | 459 | static inline int pud_none(pud_t pud) |
1da177e4 | 460 | { |
0d017923 | 461 | return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; |
1da177e4 LT |
462 | } |
463 | ||
190a1d72 | 464 | static inline int pud_bad(pud_t pud) |
1da177e4 | 465 | { |
3610cce8 | 466 | unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV; |
190a1d72 | 467 | return (pud_val(pud) & mask) != _REGION3_ENTRY; |
1da177e4 LT |
468 | } |
469 | ||
3610cce8 MS |
470 | #endif /* __s390x__ */ |
471 | ||
4448aaf0 | 472 | static inline int pmd_present(pmd_t pmd) |
1da177e4 | 473 | { |
0d017923 | 474 | return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; |
1da177e4 LT |
475 | } |
476 | ||
4448aaf0 | 477 | static inline int pmd_none(pmd_t pmd) |
1da177e4 | 478 | { |
0d017923 | 479 | return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; |
1da177e4 LT |
480 | } |
481 | ||
4448aaf0 | 482 | static inline int pmd_bad(pmd_t pmd) |
1da177e4 | 483 | { |
3610cce8 MS |
484 | unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; |
485 | return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; | |
1da177e4 LT |
486 | } |
487 | ||
4448aaf0 | 488 | static inline int pte_none(pte_t pte) |
1da177e4 | 489 | { |
83377484 | 490 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); |
1da177e4 LT |
491 | } |
492 | ||
4448aaf0 | 493 | static inline int pte_present(pte_t pte) |
1da177e4 | 494 | { |
83377484 MS |
495 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; |
496 | return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || | |
497 | (!(pte_val(pte) & _PAGE_INVALID) && | |
498 | !(pte_val(pte) & _PAGE_SWT)); | |
1da177e4 LT |
499 | } |
500 | ||
4448aaf0 | 501 | static inline int pte_file(pte_t pte) |
1da177e4 | 502 | { |
83377484 MS |
503 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; |
504 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; | |
1da177e4 LT |
505 | } |
506 | ||
ba8a9229 MS |
507 | #define __HAVE_ARCH_PTE_SAME |
508 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | |
1da177e4 LT |
509 | |
510 | /* | |
511 | * query functions pte_write/pte_dirty/pte_young only work if | |
512 | * pte_present() is true. Undefined behaviour if not.. | |
513 | */ | |
4448aaf0 | 514 | static inline int pte_write(pte_t pte) |
1da177e4 LT |
515 | { |
516 | return (pte_val(pte) & _PAGE_RO) == 0; | |
517 | } | |
518 | ||
4448aaf0 | 519 | static inline int pte_dirty(pte_t pte) |
1da177e4 LT |
520 | { |
521 | /* A pte is neither clean nor dirty on s/390. The dirty bit | |
522 | * is in the storage key. See page_test_and_clear_dirty for | |
523 | * details. | |
524 | */ | |
525 | return 0; | |
526 | } | |
527 | ||
4448aaf0 | 528 | static inline int pte_young(pte_t pte) |
1da177e4 LT |
529 | { |
530 | /* A pte is neither young nor old on s/390. The young bit | |
531 | * is in the storage key. See page_test_and_clear_young for | |
532 | * details. | |
533 | */ | |
534 | return 0; | |
535 | } | |
536 | ||
1da177e4 LT |
537 | /* |
538 | * pgd/pmd/pte modification functions | |
539 | */ | |
540 | ||
541 | #ifndef __s390x__ | |
542 | ||
190a1d72 MS |
543 | #define pgd_clear(pgd) do { } while (0) |
544 | #define pud_clear(pud) do { } while (0) | |
1da177e4 | 545 | |
c1821c2e | 546 | static inline void pmd_clear_kernel(pmd_t * pmdp) |
1da177e4 | 547 | { |
3610cce8 MS |
548 | pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY; |
549 | pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY; | |
550 | pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY; | |
551 | pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY; | |
c1821c2e GS |
552 | } |
553 | ||
1da177e4 LT |
554 | #else /* __s390x__ */ |
555 | ||
190a1d72 MS |
556 | #define pgd_clear(pgd) do { } while (0) |
557 | ||
558 | static inline void pud_clear_kernel(pud_t *pud) | |
1da177e4 | 559 | { |
190a1d72 | 560 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; |
1da177e4 LT |
561 | } |
562 | ||
190a1d72 | 563 | static inline void pud_clear(pud_t * pud) |
c1821c2e | 564 | { |
190a1d72 | 565 | pud_t *shadow = get_shadow_table(pud); |
c1821c2e | 566 | |
190a1d72 MS |
567 | pud_clear_kernel(pud); |
568 | if (shadow) | |
569 | pud_clear_kernel(shadow); | |
c1821c2e GS |
570 | } |
571 | ||
572 | static inline void pmd_clear_kernel(pmd_t * pmdp) | |
1da177e4 | 573 | { |
3610cce8 MS |
574 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; |
575 | pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY; | |
1da177e4 LT |
576 | } |
577 | ||
3610cce8 MS |
578 | #endif /* __s390x__ */ |
579 | ||
c1821c2e GS |
580 | static inline void pmd_clear(pmd_t * pmdp) |
581 | { | |
3610cce8 | 582 | pmd_t *shadow_pmd = get_shadow_table(pmdp); |
c1821c2e GS |
583 | |
584 | pmd_clear_kernel(pmdp); | |
585 | if (shadow_pmd) | |
586 | pmd_clear_kernel(shadow_pmd); | |
587 | } | |
588 | ||
4448aaf0 | 589 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
1da177e4 | 590 | { |
c1821c2e GS |
591 | pte_t *shadow_pte = get_shadow_pte(ptep); |
592 | ||
9282ed92 | 593 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
c1821c2e GS |
594 | if (shadow_pte) |
595 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | |
1da177e4 LT |
596 | } |
597 | ||
598 | /* | |
599 | * The following pte modification functions only work if | |
600 | * pte_present() is true. Undefined behaviour if not.. | |
601 | */ | |
4448aaf0 | 602 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
1da177e4 LT |
603 | { |
604 | pte_val(pte) &= PAGE_MASK; | |
605 | pte_val(pte) |= pgprot_val(newprot); | |
606 | return pte; | |
607 | } | |
608 | ||
4448aaf0 | 609 | static inline pte_t pte_wrprotect(pte_t pte) |
1da177e4 | 610 | { |
9282ed92 | 611 | /* Do not clobber _PAGE_TYPE_NONE pages! */ |
1da177e4 LT |
612 | if (!(pte_val(pte) & _PAGE_INVALID)) |
613 | pte_val(pte) |= _PAGE_RO; | |
614 | return pte; | |
615 | } | |
616 | ||
4448aaf0 | 617 | static inline pte_t pte_mkwrite(pte_t pte) |
1da177e4 LT |
618 | { |
619 | pte_val(pte) &= ~_PAGE_RO; | |
620 | return pte; | |
621 | } | |
622 | ||
4448aaf0 | 623 | static inline pte_t pte_mkclean(pte_t pte) |
1da177e4 LT |
624 | { |
625 | /* The only user of pte_mkclean is the fork() code. | |
626 | We must *not* clear the *physical* page dirty bit | |
627 | just because fork() wants to clear the dirty bit in | |
628 | *one* of the page's mappings. So we just do nothing. */ | |
629 | return pte; | |
630 | } | |
631 | ||
4448aaf0 | 632 | static inline pte_t pte_mkdirty(pte_t pte) |
1da177e4 LT |
633 | { |
634 | /* We do not explicitly set the dirty bit because the | |
635 | * sske instruction is slow. It is faster to let the | |
636 | * next instruction set the dirty bit. | |
637 | */ | |
638 | return pte; | |
639 | } | |
640 | ||
4448aaf0 | 641 | static inline pte_t pte_mkold(pte_t pte) |
1da177e4 LT |
642 | { |
643 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | |
644 | * There is no point in clearing the real referenced bit. | |
645 | */ | |
646 | return pte; | |
647 | } | |
648 | ||
4448aaf0 | 649 | static inline pte_t pte_mkyoung(pte_t pte) |
1da177e4 LT |
650 | { |
651 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | |
652 | * There is no point in setting the real referenced bit. | |
653 | */ | |
654 | return pte; | |
655 | } | |
656 | ||
ba8a9229 MS |
657 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
658 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
659 | unsigned long addr, pte_t *ptep) | |
1da177e4 LT |
660 | { |
661 | return 0; | |
662 | } | |
663 | ||
ba8a9229 MS |
664 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
665 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |
666 | unsigned long address, pte_t *ptep) | |
1da177e4 LT |
667 | { |
668 | /* No need to flush TLB; bits are in storage key */ | |
ba8a9229 | 669 | return 0; |
1da177e4 LT |
670 | } |
671 | ||
9282ed92 | 672 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
1da177e4 | 673 | { |
9282ed92 | 674 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
1da177e4 | 675 | #ifndef __s390x__ |
1da177e4 LT |
676 | /* S390 has 1mb segments, we are emulating 4MB segments */ |
677 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); | |
9282ed92 GS |
678 | #else |
679 | /* ipte in zarch mode can do the math */ | |
680 | pte_t *pto = ptep; | |
681 | #endif | |
94c12cc7 MS |
682 | asm volatile( |
683 | " ipte %2,%3" | |
684 | : "=m" (*ptep) : "m" (*ptep), | |
685 | "a" (pto), "a" (address)); | |
1da177e4 | 686 | } |
9282ed92 GS |
687 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
688 | } | |
689 | ||
f0e47c22 | 690 | static inline void ptep_invalidate(unsigned long address, pte_t *ptep) |
9282ed92 | 691 | { |
9282ed92 | 692 | __ptep_ipte(address, ptep); |
f0e47c22 MS |
693 | ptep = get_shadow_pte(ptep); |
694 | if (ptep) | |
695 | __ptep_ipte(address, ptep); | |
696 | } | |
697 | ||
ba8a9229 MS |
698 | /* |
699 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush | |
700 | * both clear the TLB for the unmapped pte. The reason is that | |
701 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) | |
702 | * to modify an active pte. The sequence is | |
703 | * 1) ptep_get_and_clear | |
704 | * 2) set_pte_at | |
705 | * 3) flush_tlb_range | |
706 | * On s390 the tlb needs to get flushed with the modification of the pte | |
707 | * if the pte is active. The only way how this can be implemented is to | |
708 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range | |
709 | * is a nop. | |
710 | */ | |
711 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
712 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | |
713 | ({ \ | |
714 | pte_t __pte = *(__ptep); \ | |
715 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | |
716 | (__mm) != current->active_mm) \ | |
717 | ptep_invalidate(__address, __ptep); \ | |
718 | else \ | |
719 | pte_clear((__mm), (__address), (__ptep)); \ | |
720 | __pte; \ | |
721 | }) | |
722 | ||
723 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | |
f0e47c22 MS |
724 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
725 | unsigned long address, pte_t *ptep) | |
726 | { | |
727 | pte_t pte = *ptep; | |
728 | ptep_invalidate(address, ptep); | |
1da177e4 LT |
729 | return pte; |
730 | } | |
731 | ||
ba8a9229 MS |
732 | /* |
733 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the | |
734 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all | |
735 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct | |
736 | * cannot be accessed while the batched unmap is running. In this case | |
737 | * full==1 and a simple pte_clear is enough. See tlb.h. | |
738 | */ | |
739 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
740 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |
741 | unsigned long addr, | |
742 | pte_t *ptep, int full) | |
1da177e4 | 743 | { |
ba8a9229 MS |
744 | pte_t pte = *ptep; |
745 | ||
746 | if (full) | |
747 | pte_clear(mm, addr, ptep); | |
748 | else | |
749 | ptep_invalidate(addr, ptep); | |
750 | return pte; | |
1da177e4 LT |
751 | } |
752 | ||
ba8a9229 MS |
753 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
754 | #define ptep_set_wrprotect(__mm, __addr, __ptep) \ | |
755 | ({ \ | |
756 | pte_t __pte = *(__ptep); \ | |
757 | if (pte_write(__pte)) { \ | |
758 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | |
759 | (__mm) != current->active_mm) \ | |
760 | ptep_invalidate(__addr, __ptep); \ | |
761 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ | |
762 | } \ | |
763 | }) | |
764 | ||
765 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
f0e47c22 MS |
766 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ |
767 | ({ \ | |
768 | int __changed = !pte_same(*(__ptep), __entry); \ | |
769 | if (__changed) { \ | |
770 | ptep_invalidate(__addr, __ptep); \ | |
771 | set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ | |
772 | } \ | |
773 | __changed; \ | |
8dab5241 | 774 | }) |
1da177e4 LT |
775 | |
776 | /* | |
777 | * Test and clear dirty bit in storage key. | |
778 | * We can't clear the changed bit atomically. This is a potential | |
779 | * race against modification of the referenced bit. This function | |
780 | * should therefore only be called if it is not mapped in any | |
781 | * address space. | |
782 | */ | |
ba8a9229 | 783 | #define __HAVE_ARCH_PAGE_TEST_DIRTY |
6c210482 | 784 | static inline int page_test_dirty(struct page *page) |
2dcea57a | 785 | { |
6c210482 MS |
786 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; |
787 | } | |
2dcea57a | 788 | |
ba8a9229 | 789 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY |
6c210482 MS |
790 | static inline void page_clear_dirty(struct page *page) |
791 | { | |
792 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); | |
2dcea57a | 793 | } |
1da177e4 LT |
794 | |
795 | /* | |
796 | * Test and clear referenced bit in storage key. | |
797 | */ | |
ba8a9229 | 798 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG |
2dcea57a HC |
799 | static inline int page_test_and_clear_young(struct page *page) |
800 | { | |
0b2b6e1d | 801 | unsigned long physpage = page_to_phys(page); |
2dcea57a HC |
802 | int ccode; |
803 | ||
0b2b6e1d HC |
804 | asm volatile( |
805 | " rrbe 0,%1\n" | |
806 | " ipm %0\n" | |
807 | " srl %0,28\n" | |
2dcea57a HC |
808 | : "=d" (ccode) : "a" (physpage) : "cc" ); |
809 | return ccode & 2; | |
810 | } | |
1da177e4 LT |
811 | |
812 | /* | |
813 | * Conversion functions: convert a page and protection to a page entry, | |
814 | * and a page entry and page directory to the page they refer to. | |
815 | */ | |
816 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | |
817 | { | |
818 | pte_t __pte; | |
819 | pte_val(__pte) = physpage + pgprot_val(pgprot); | |
820 | return __pte; | |
821 | } | |
822 | ||
2dcea57a HC |
823 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
824 | { | |
0b2b6e1d | 825 | unsigned long physpage = page_to_phys(page); |
1da177e4 | 826 | |
2dcea57a HC |
827 | return mk_pte_phys(physpage, pgprot); |
828 | } | |
829 | ||
190a1d72 MS |
830 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
831 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | |
832 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
833 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | |
1da177e4 | 834 | |
190a1d72 MS |
835 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
836 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
1da177e4 | 837 | |
190a1d72 | 838 | #ifndef __s390x__ |
1da177e4 | 839 | |
190a1d72 MS |
840 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
841 | #define pud_deref(pmd) ({ BUG(); 0UL; }) | |
842 | #define pgd_deref(pmd) ({ BUG(); 0UL; }) | |
46a82b2d | 843 | |
190a1d72 MS |
844 | #define pud_offset(pgd, address) ((pud_t *) pgd) |
845 | #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) | |
1da177e4 | 846 | |
190a1d72 | 847 | #else /* __s390x__ */ |
1da177e4 | 848 | |
190a1d72 MS |
849 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
850 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) | |
851 | #define pgd_deref(pgd) ({ BUG(); 0UL; }) | |
1da177e4 | 852 | |
190a1d72 | 853 | #define pud_offset(pgd, address) ((pud_t *) pgd) |
1da177e4 | 854 | |
190a1d72 | 855 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
1da177e4 | 856 | { |
190a1d72 MS |
857 | pmd_t *pmd = (pmd_t *) pud_deref(*pud); |
858 | return pmd + pmd_index(address); | |
1da177e4 LT |
859 | } |
860 | ||
190a1d72 | 861 | #endif /* __s390x__ */ |
1da177e4 | 862 | |
190a1d72 MS |
863 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) |
864 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | |
865 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
1da177e4 | 866 | |
190a1d72 | 867 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
1da177e4 | 868 | |
190a1d72 MS |
869 | /* Find an entry in the lowest level page table.. */ |
870 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) | |
871 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) | |
1da177e4 LT |
872 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
873 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | |
874 | #define pte_unmap(pte) do { } while (0) | |
875 | #define pte_unmap_nested(pte) do { } while (0) | |
876 | ||
877 | /* | |
878 | * 31 bit swap entry format: | |
879 | * A page-table entry has some bits we have to treat in a special way. | |
880 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification | |
881 | * exception will occur instead of a page translation exception. The | |
882 | * specifiation exception has the bad habit not to store necessary | |
883 | * information in the lowcore. | |
884 | * Bit 21 and bit 22 are the page invalid bit and the page protection | |
885 | * bit. We set both to indicate a swapped page. | |
886 | * Bit 30 and 31 are used to distinguish the different page types. For | |
887 | * a swapped page these bits need to be zero. | |
888 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. | |
889 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 | |
890 | * plus 24 for the offset. | |
891 | * 0| offset |0110|o|type |00| | |
892 | * 0 0000000001111111111 2222 2 22222 33 | |
893 | * 0 1234567890123456789 0123 4 56789 01 | |
894 | * | |
895 | * 64 bit swap entry format: | |
896 | * A page-table entry has some bits we have to treat in a special way. | |
897 | * Bits 52 and bit 55 have to be zero, otherwise an specification | |
898 | * exception will occur instead of a page translation exception. The | |
899 | * specifiation exception has the bad habit not to store necessary | |
900 | * information in the lowcore. | |
901 | * Bit 53 and bit 54 are the page invalid bit and the page protection | |
902 | * bit. We set both to indicate a swapped page. | |
903 | * Bit 62 and 63 are used to distinguish the different page types. For | |
904 | * a swapped page these bits need to be zero. | |
905 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. | |
906 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 | |
907 | * plus 56 for the offset. | |
908 | * | offset |0110|o|type |00| | |
909 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 | |
910 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 | |
911 | */ | |
912 | #ifndef __s390x__ | |
913 | #define __SWP_OFFSET_MASK (~0UL >> 12) | |
914 | #else | |
915 | #define __SWP_OFFSET_MASK (~0UL >> 11) | |
916 | #endif | |
4448aaf0 | 917 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
1da177e4 LT |
918 | { |
919 | pte_t pte; | |
920 | offset &= __SWP_OFFSET_MASK; | |
9282ed92 | 921 | pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | |
1da177e4 LT |
922 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); |
923 | return pte; | |
924 | } | |
925 | ||
926 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) | |
927 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) | |
928 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | |
929 | ||
930 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
931 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
932 | ||
933 | #ifndef __s390x__ | |
934 | # define PTE_FILE_MAX_BITS 26 | |
935 | #else /* __s390x__ */ | |
936 | # define PTE_FILE_MAX_BITS 59 | |
937 | #endif /* __s390x__ */ | |
938 | ||
939 | #define pte_to_pgoff(__pte) \ | |
940 | ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) | |
941 | ||
942 | #define pgoff_to_pte(__off) \ | |
943 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ | |
9282ed92 | 944 | | _PAGE_TYPE_FILE }) |
1da177e4 LT |
945 | |
946 | #endif /* !__ASSEMBLY__ */ | |
947 | ||
948 | #define kern_addr_valid(addr) (1) | |
949 | ||
f4eb07c1 HC |
950 | extern int add_shared_memory(unsigned long start, unsigned long size); |
951 | extern int remove_shared_memory(unsigned long start, unsigned long size); | |
952 | ||
1da177e4 LT |
953 | /* |
954 | * No page table caches to initialise | |
955 | */ | |
956 | #define pgtable_cache_init() do { } while (0) | |
957 | ||
f4eb07c1 HC |
958 | #define __HAVE_ARCH_MEMMAP_INIT |
959 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); | |
960 | ||
1da177e4 LT |
961 | #include <asm-generic/pgtable.h> |
962 | ||
963 | #endif /* _S390_PAGE_H */ |