Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * include/asm-s390/pgtable.h | |
3 | * | |
4 | * S390 version | |
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | |
7 | * Ulrich Weigand (weigand@de.ibm.com) | |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | |
9 | * | |
10 | * Derived from "include/asm-i386/pgtable.h" | |
11 | */ | |
12 | ||
13 | #ifndef _ASM_S390_PGTABLE_H | |
14 | #define _ASM_S390_PGTABLE_H | |
15 | ||
1da177e4 LT |
16 | /* |
17 | * The Linux memory management assumes a three-level page table setup. For | |
18 | * s390 31 bit we "fold" the mid level into the top-level page table, so | |
19 | * that we physically have the same two-level page table as the s390 mmu | |
20 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels | |
21 | * the hardware provides (region first and region second tables are not | |
22 | * used). | |
23 | * | |
24 | * The "pgd_xxx()" functions are trivial for a folded two-level | |
25 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | |
26 | * into the pgd entry) | |
27 | * | |
28 | * This file contains the functions and defines necessary to modify and use | |
29 | * the S390 page table tree. | |
30 | */ | |
31 | #ifndef __ASSEMBLY__ | |
9789db08 | 32 | #include <linux/sched.h> |
2dcea57a | 33 | #include <linux/mm_types.h> |
5b7baf05 | 34 | #include <asm/bitops.h> |
1da177e4 LT |
35 | #include <asm/bug.h> |
36 | #include <asm/processor.h> | |
1da177e4 | 37 | |
1da177e4 LT |
38 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
39 | extern void paging_init(void); | |
2b67fc46 | 40 | extern void vmem_map_init(void); |
1da177e4 LT |
41 | |
42 | /* | |
43 | * The S390 doesn't have any external MMU info: the kernel page | |
44 | * tables contain all the necessary information. | |
45 | */ | |
46 | #define update_mmu_cache(vma, address, pte) do { } while (0) | |
47 | ||
48 | /* | |
49 | * ZERO_PAGE is a global shared page that is always zero: used | |
50 | * for zero-mapped memory areas etc.. | |
51 | */ | |
52 | extern char empty_zero_page[PAGE_SIZE]; | |
53 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
54 | #endif /* !__ASSEMBLY__ */ | |
55 | ||
56 | /* | |
57 | * PMD_SHIFT determines the size of the area a second-level page | |
58 | * table can map | |
59 | * PGDIR_SHIFT determines what a third-level page table entry can map | |
60 | */ | |
61 | #ifndef __s390x__ | |
146e4b3c MS |
62 | # define PMD_SHIFT 20 |
63 | # define PUD_SHIFT 20 | |
64 | # define PGDIR_SHIFT 20 | |
1da177e4 | 65 | #else /* __s390x__ */ |
146e4b3c | 66 | # define PMD_SHIFT 20 |
190a1d72 | 67 | # define PUD_SHIFT 31 |
5a216a20 | 68 | # define PGDIR_SHIFT 42 |
1da177e4 LT |
69 | #endif /* __s390x__ */ |
70 | ||
71 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
72 | #define PMD_MASK (~(PMD_SIZE-1)) | |
190a1d72 MS |
73 | #define PUD_SIZE (1UL << PUD_SHIFT) |
74 | #define PUD_MASK (~(PUD_SIZE-1)) | |
5a216a20 MS |
75 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
76 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
1da177e4 LT |
77 | |
78 | /* | |
79 | * entries per page directory level: the S390 is two-level, so | |
80 | * we don't really have any PMD directory physically. | |
81 | * for S390 segment-table entries are combined to one PGD | |
82 | * that leads to 1024 pte per pgd | |
83 | */ | |
146e4b3c | 84 | #define PTRS_PER_PTE 256 |
1da177e4 | 85 | #ifndef __s390x__ |
146e4b3c | 86 | #define PTRS_PER_PMD 1 |
5a216a20 | 87 | #define PTRS_PER_PUD 1 |
1da177e4 | 88 | #else /* __s390x__ */ |
146e4b3c | 89 | #define PTRS_PER_PMD 2048 |
5a216a20 | 90 | #define PTRS_PER_PUD 2048 |
1da177e4 | 91 | #endif /* __s390x__ */ |
146e4b3c | 92 | #define PTRS_PER_PGD 2048 |
1da177e4 | 93 | |
d455a369 HD |
94 | #define FIRST_USER_ADDRESS 0 |
95 | ||
1da177e4 LT |
96 | #define pte_ERROR(e) \ |
97 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) | |
98 | #define pmd_ERROR(e) \ | |
99 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) | |
190a1d72 MS |
100 | #define pud_ERROR(e) \ |
101 | printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) | |
1da177e4 LT |
102 | #define pgd_ERROR(e) \ |
103 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) | |
104 | ||
105 | #ifndef __ASSEMBLY__ | |
106 | /* | |
5fd9c6e2 CB |
107 | * The vmalloc area will always be on the topmost area of the kernel |
108 | * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc, | |
109 | * which should be enough for any sane case. | |
110 | * By putting vmalloc at the top, we maximise the gap between physical | |
111 | * memory and vmalloc to catch misplaced memory accesses. As a side | |
112 | * effect, this also makes sure that 64 bit module code cannot be used | |
113 | * as system call address. | |
8b62bc96 | 114 | */ |
239a6425 HC |
115 | |
116 | extern unsigned long VMALLOC_START; | |
117 | ||
1da177e4 | 118 | #ifndef __s390x__ |
239a6425 | 119 | #define VMALLOC_SIZE (96UL << 20) |
5fd9c6e2 | 120 | #define VMALLOC_END 0x7e000000UL |
0189103c | 121 | #define VMEM_MAP_END 0x80000000UL |
1da177e4 | 122 | #else /* __s390x__ */ |
239a6425 | 123 | #define VMALLOC_SIZE (1UL << 30) |
5fd9c6e2 | 124 | #define VMALLOC_END 0x3e040000000UL |
0189103c | 125 | #define VMEM_MAP_END 0x40000000000UL |
1da177e4 LT |
126 | #endif /* __s390x__ */ |
127 | ||
0189103c HC |
128 | /* |
129 | * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1 | |
130 | * mapping. This needs to be calculated at compile time since the size of the | |
131 | * VMEM_MAP is static but the size of struct page can change. | |
132 | */ | |
522d8dc0 MS |
133 | #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) |
134 | #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) | |
135 | #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) | |
17f34580 | 136 | #define vmemmap ((struct page *) VMALLOC_END) |
5fd9c6e2 | 137 | |
1da177e4 LT |
138 | /* |
139 | * A 31 bit pagetable entry of S390 has following format: | |
140 | * | PFRA | | OS | | |
141 | * 0 0IP0 | |
142 | * 00000000001111111111222222222233 | |
143 | * 01234567890123456789012345678901 | |
144 | * | |
145 | * I Page-Invalid Bit: Page is not available for address-translation | |
146 | * P Page-Protection Bit: Store access not possible for page | |
147 | * | |
148 | * A 31 bit segmenttable entry of S390 has following format: | |
149 | * | P-table origin | |PTL | |
150 | * 0 IC | |
151 | * 00000000001111111111222222222233 | |
152 | * 01234567890123456789012345678901 | |
153 | * | |
154 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
155 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | |
156 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) | |
157 | * | |
158 | * The 31 bit segmenttable origin of S390 has following format: | |
159 | * | |
160 | * |S-table origin | | STL | | |
161 | * X **GPS | |
162 | * 00000000001111111111222222222233 | |
163 | * 01234567890123456789012345678901 | |
164 | * | |
165 | * X Space-Switch event: | |
166 | * G Segment-Invalid Bit: * | |
167 | * P Private-Space Bit: Segment is not private (PoP 3-30) | |
168 | * S Storage-Alteration: | |
169 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) | |
170 | * | |
171 | * A 64 bit pagetable entry of S390 has following format: | |
172 | * | PFRA |0IP0| OS | | |
173 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
174 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
175 | * | |
176 | * I Page-Invalid Bit: Page is not available for address-translation | |
177 | * P Page-Protection Bit: Store access not possible for page | |
178 | * | |
179 | * A 64 bit segmenttable entry of S390 has following format: | |
180 | * | P-table origin | TT | |
181 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
182 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
183 | * | |
184 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
185 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | |
186 | * P Page-Protection Bit: Store access not possible for page | |
187 | * TT Type 00 | |
188 | * | |
189 | * A 64 bit region table entry of S390 has following format: | |
190 | * | S-table origin | TF TTTL | |
191 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
192 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
193 | * | |
194 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
195 | * TT Type 01 | |
196 | * TF | |
190a1d72 | 197 | * TL Table length |
1da177e4 LT |
198 | * |
199 | * The 64 bit regiontable origin of S390 has following format: | |
200 | * | region table origon | DTTL | |
201 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
202 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
203 | * | |
204 | * X Space-Switch event: | |
205 | * G Segment-Invalid Bit: | |
206 | * P Private-Space Bit: | |
207 | * S Storage-Alteration: | |
208 | * R Real space | |
209 | * TL Table-Length: | |
210 | * | |
211 | * A storage key has the following format: | |
212 | * | ACC |F|R|C|0| | |
213 | * 0 3 4 5 6 7 | |
214 | * ACC: access key | |
215 | * F : fetch protection bit | |
216 | * R : referenced bit | |
217 | * C : changed bit | |
218 | */ | |
219 | ||
220 | /* Hardware bits in the page table entry */ | |
83377484 MS |
221 | #define _PAGE_RO 0x200 /* HW read-only bit */ |
222 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ | |
3610cce8 MS |
223 | |
224 | /* Software bits in the page table entry */ | |
83377484 MS |
225 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ |
226 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ | |
a08cb629 NP |
227 | #define _PAGE_SPECIAL 0x004 /* SW associated with special page */ |
228 | #define __HAVE_ARCH_PTE_SPECIAL | |
1da177e4 | 229 | |
138c9021 NP |
230 | /* Set of bits not changed in pte_modify */ |
231 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL) | |
232 | ||
83377484 | 233 | /* Six different types of pages. */ |
9282ed92 GS |
234 | #define _PAGE_TYPE_EMPTY 0x400 |
235 | #define _PAGE_TYPE_NONE 0x401 | |
83377484 MS |
236 | #define _PAGE_TYPE_SWAP 0x403 |
237 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ | |
9282ed92 GS |
238 | #define _PAGE_TYPE_RO 0x200 |
239 | #define _PAGE_TYPE_RW 0x000 | |
c1821c2e GS |
240 | #define _PAGE_TYPE_EX_RO 0x202 |
241 | #define _PAGE_TYPE_EX_RW 0x002 | |
1da177e4 | 242 | |
53492b1d GS |
243 | /* |
244 | * Only four types for huge pages, using the invalid bit and protection bit | |
245 | * of a segment table entry. | |
246 | */ | |
247 | #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ | |
248 | #define _HPAGE_TYPE_NONE 0x220 | |
249 | #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ | |
250 | #define _HPAGE_TYPE_RW 0x000 | |
251 | ||
83377484 MS |
252 | /* |
253 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, | |
254 | * pte_none and pte_file to find out the pte type WITHOUT holding the page | |
255 | * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to | |
256 | * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs | |
257 | * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. | |
258 | * This change is done while holding the lock, but the intermediate step | |
259 | * of a previously valid pte with the hw invalid bit set can be observed by | |
260 | * handle_pte_fault. That makes it necessary that all valid pte types with | |
261 | * the hw invalid bit set must be distinguishable from the four pte types | |
262 | * empty, none, swap and file. | |
263 | * | |
264 | * irxt ipte irxt | |
265 | * _PAGE_TYPE_EMPTY 1000 -> 1000 | |
266 | * _PAGE_TYPE_NONE 1001 -> 1001 | |
267 | * _PAGE_TYPE_SWAP 1011 -> 1011 | |
268 | * _PAGE_TYPE_FILE 11?1 -> 11?1 | |
269 | * _PAGE_TYPE_RO 0100 -> 1100 | |
270 | * _PAGE_TYPE_RW 0000 -> 1000 | |
c1821c2e GS |
271 | * _PAGE_TYPE_EX_RO 0110 -> 1110 |
272 | * _PAGE_TYPE_EX_RW 0010 -> 1010 | |
83377484 | 273 | * |
c1821c2e | 274 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 |
83377484 MS |
275 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 |
276 | * pte_file is true for bits combinations 1101, 1111 | |
c1821c2e | 277 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. |
83377484 MS |
278 | */ |
279 | ||
5b7baf05 CB |
280 | /* Page status table bits for virtualization */ |
281 | #define RCP_PCL_BIT 55 | |
282 | #define RCP_HR_BIT 54 | |
283 | #define RCP_HC_BIT 53 | |
284 | #define RCP_GR_BIT 50 | |
285 | #define RCP_GC_BIT 49 | |
286 | ||
15e86b0c FF |
287 | /* User dirty bit for KVM's migration feature */ |
288 | #define KVM_UD_BIT 47 | |
289 | ||
1da177e4 LT |
290 | #ifndef __s390x__ |
291 | ||
3610cce8 MS |
292 | /* Bits in the segment table address-space-control-element */ |
293 | #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ | |
294 | #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ | |
295 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | |
296 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | |
297 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ | |
1da177e4 | 298 | |
3610cce8 MS |
299 | /* Bits in the segment table entry */ |
300 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ | |
301 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | |
302 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ | |
303 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ | |
1da177e4 | 304 | |
3610cce8 MS |
305 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) |
306 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | |
1da177e4 LT |
307 | |
308 | #else /* __s390x__ */ | |
309 | ||
3610cce8 MS |
310 | /* Bits in the segment/region table address-space-control-element */ |
311 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ | |
312 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | |
313 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | |
314 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ | |
315 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ | |
316 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ | |
317 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ | |
318 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ | |
319 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ | |
320 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ | |
321 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ | |
322 | ||
323 | /* Bits in the region table entry */ | |
324 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ | |
325 | #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ | |
326 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ | |
327 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ | |
328 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ | |
329 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ | |
330 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ | |
331 | ||
332 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) | |
333 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) | |
334 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) | |
335 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) | |
336 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) | |
337 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) | |
338 | ||
1da177e4 | 339 | /* Bits in the segment table entry */ |
3610cce8 MS |
340 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
341 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | |
342 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | |
1da177e4 | 343 | |
3610cce8 MS |
344 | #define _SEGMENT_ENTRY (0) |
345 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | |
346 | ||
53492b1d GS |
347 | #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ |
348 | #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ | |
349 | ||
3610cce8 | 350 | #endif /* __s390x__ */ |
1da177e4 LT |
351 | |
352 | /* | |
3610cce8 MS |
353 | * A user page table pointer has the space-switch-event bit, the |
354 | * private-space-control bit and the storage-alteration-event-control | |
355 | * bit set. A kernel page table pointer doesn't need them. | |
1da177e4 | 356 | */ |
3610cce8 MS |
357 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
358 | _ASCE_ALT_EVENT) | |
1da177e4 | 359 | |
3610cce8 | 360 | /* Bits int the storage key */ |
1da177e4 LT |
361 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ |
362 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | |
363 | ||
1da177e4 | 364 | /* |
9282ed92 | 365 | * Page protection definitions. |
1da177e4 | 366 | */ |
9282ed92 GS |
367 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) |
368 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) | |
369 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) | |
c1821c2e GS |
370 | #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) |
371 | #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) | |
9282ed92 GS |
372 | |
373 | #define PAGE_KERNEL PAGE_RW | |
374 | #define PAGE_COPY PAGE_RO | |
1da177e4 LT |
375 | |
376 | /* | |
c1821c2e GS |
377 | * Dependent on the EXEC_PROTECT option s390 can do execute protection. |
378 | * Write permission always implies read permission. In theory with a | |
379 | * primary/secondary page table execute only can be implemented but | |
380 | * it would cost an additional bit in the pte to distinguish all the | |
381 | * different pte types. To avoid that execute permission currently | |
382 | * implies read permission as well. | |
1da177e4 LT |
383 | */ |
384 | /*xwr*/ | |
9282ed92 GS |
385 | #define __P000 PAGE_NONE |
386 | #define __P001 PAGE_RO | |
387 | #define __P010 PAGE_RO | |
388 | #define __P011 PAGE_RO | |
c1821c2e GS |
389 | #define __P100 PAGE_EX_RO |
390 | #define __P101 PAGE_EX_RO | |
391 | #define __P110 PAGE_EX_RO | |
392 | #define __P111 PAGE_EX_RO | |
9282ed92 GS |
393 | |
394 | #define __S000 PAGE_NONE | |
395 | #define __S001 PAGE_RO | |
396 | #define __S010 PAGE_RW | |
397 | #define __S011 PAGE_RW | |
c1821c2e GS |
398 | #define __S100 PAGE_EX_RO |
399 | #define __S101 PAGE_EX_RO | |
400 | #define __S110 PAGE_EX_RW | |
401 | #define __S111 PAGE_EX_RW | |
402 | ||
403 | #ifndef __s390x__ | |
3610cce8 | 404 | # define PxD_SHADOW_SHIFT 1 |
c1821c2e | 405 | #else /* __s390x__ */ |
3610cce8 | 406 | # define PxD_SHADOW_SHIFT 2 |
c1821c2e GS |
407 | #endif /* __s390x__ */ |
408 | ||
3610cce8 | 409 | static inline void *get_shadow_table(void *table) |
c1821c2e | 410 | { |
3610cce8 MS |
411 | unsigned long addr, offset; |
412 | struct page *page; | |
413 | ||
414 | addr = (unsigned long) table; | |
415 | offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); | |
416 | page = virt_to_page((void *)(addr ^ offset)); | |
417 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); | |
c1821c2e | 418 | } |
1da177e4 LT |
419 | |
420 | /* | |
421 | * Certain architectures need to do special things when PTEs | |
422 | * within a page table are directly modified. Thus, the following | |
423 | * hook is made available. | |
424 | */ | |
ba8a9229 | 425 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
146e4b3c | 426 | pte_t *ptep, pte_t entry) |
1da177e4 | 427 | { |
146e4b3c MS |
428 | *ptep = entry; |
429 | if (mm->context.noexec) { | |
430 | if (!(pte_val(entry) & _PAGE_INVALID) && | |
431 | (pte_val(entry) & _PAGE_SWX)) | |
432 | pte_val(entry) |= _PAGE_RO; | |
c1821c2e | 433 | else |
146e4b3c MS |
434 | pte_val(entry) = _PAGE_TYPE_EMPTY; |
435 | ptep[PTRS_PER_PTE] = entry; | |
c1821c2e | 436 | } |
1da177e4 | 437 | } |
1da177e4 LT |
438 | |
439 | /* | |
440 | * pgd/pmd/pte query functions | |
441 | */ | |
442 | #ifndef __s390x__ | |
443 | ||
4448aaf0 AB |
444 | static inline int pgd_present(pgd_t pgd) { return 1; } |
445 | static inline int pgd_none(pgd_t pgd) { return 0; } | |
446 | static inline int pgd_bad(pgd_t pgd) { return 0; } | |
1da177e4 | 447 | |
190a1d72 MS |
448 | static inline int pud_present(pud_t pud) { return 1; } |
449 | static inline int pud_none(pud_t pud) { return 0; } | |
450 | static inline int pud_bad(pud_t pud) { return 0; } | |
451 | ||
1da177e4 LT |
452 | #else /* __s390x__ */ |
453 | ||
5a216a20 MS |
454 | static inline int pgd_present(pgd_t pgd) |
455 | { | |
6252d702 MS |
456 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
457 | return 1; | |
5a216a20 MS |
458 | return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; |
459 | } | |
460 | ||
461 | static inline int pgd_none(pgd_t pgd) | |
462 | { | |
6252d702 MS |
463 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
464 | return 0; | |
5a216a20 MS |
465 | return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; |
466 | } | |
467 | ||
468 | static inline int pgd_bad(pgd_t pgd) | |
469 | { | |
6252d702 MS |
470 | /* |
471 | * With dynamic page table levels the pgd can be a region table | |
472 | * entry or a segment table entry. Check for the bit that are | |
473 | * invalid for either table entry. | |
474 | */ | |
5a216a20 | 475 | unsigned long mask = |
6252d702 | 476 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & |
5a216a20 MS |
477 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
478 | return (pgd_val(pgd) & mask) != 0; | |
479 | } | |
190a1d72 MS |
480 | |
481 | static inline int pud_present(pud_t pud) | |
1da177e4 | 482 | { |
6252d702 MS |
483 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
484 | return 1; | |
0d017923 | 485 | return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; |
1da177e4 LT |
486 | } |
487 | ||
190a1d72 | 488 | static inline int pud_none(pud_t pud) |
1da177e4 | 489 | { |
6252d702 MS |
490 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
491 | return 0; | |
0d017923 | 492 | return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; |
1da177e4 LT |
493 | } |
494 | ||
190a1d72 | 495 | static inline int pud_bad(pud_t pud) |
1da177e4 | 496 | { |
6252d702 MS |
497 | /* |
498 | * With dynamic page table levels the pud can be a region table | |
499 | * entry or a segment table entry. Check for the bit that are | |
500 | * invalid for either table entry. | |
501 | */ | |
5a216a20 | 502 | unsigned long mask = |
6252d702 | 503 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & |
5a216a20 MS |
504 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
505 | return (pud_val(pud) & mask) != 0; | |
1da177e4 LT |
506 | } |
507 | ||
3610cce8 MS |
508 | #endif /* __s390x__ */ |
509 | ||
4448aaf0 | 510 | static inline int pmd_present(pmd_t pmd) |
1da177e4 | 511 | { |
0d017923 | 512 | return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; |
1da177e4 LT |
513 | } |
514 | ||
4448aaf0 | 515 | static inline int pmd_none(pmd_t pmd) |
1da177e4 | 516 | { |
0d017923 | 517 | return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; |
1da177e4 LT |
518 | } |
519 | ||
4448aaf0 | 520 | static inline int pmd_bad(pmd_t pmd) |
1da177e4 | 521 | { |
3610cce8 MS |
522 | unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; |
523 | return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; | |
1da177e4 LT |
524 | } |
525 | ||
4448aaf0 | 526 | static inline int pte_none(pte_t pte) |
1da177e4 | 527 | { |
83377484 | 528 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); |
1da177e4 LT |
529 | } |
530 | ||
4448aaf0 | 531 | static inline int pte_present(pte_t pte) |
1da177e4 | 532 | { |
83377484 MS |
533 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; |
534 | return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || | |
535 | (!(pte_val(pte) & _PAGE_INVALID) && | |
536 | !(pte_val(pte) & _PAGE_SWT)); | |
1da177e4 LT |
537 | } |
538 | ||
4448aaf0 | 539 | static inline int pte_file(pte_t pte) |
1da177e4 | 540 | { |
83377484 MS |
541 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; |
542 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; | |
1da177e4 LT |
543 | } |
544 | ||
7e675137 NP |
545 | static inline int pte_special(pte_t pte) |
546 | { | |
a08cb629 | 547 | return (pte_val(pte) & _PAGE_SPECIAL); |
7e675137 NP |
548 | } |
549 | ||
ba8a9229 MS |
550 | #define __HAVE_ARCH_PTE_SAME |
551 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | |
1da177e4 | 552 | |
5b7baf05 CB |
553 | static inline void rcp_lock(pte_t *ptep) |
554 | { | |
555 | #ifdef CONFIG_PGSTE | |
556 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | |
557 | preempt_disable(); | |
558 | while (test_and_set_bit(RCP_PCL_BIT, pgste)) | |
559 | ; | |
560 | #endif | |
561 | } | |
562 | ||
563 | static inline void rcp_unlock(pte_t *ptep) | |
564 | { | |
565 | #ifdef CONFIG_PGSTE | |
566 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | |
567 | clear_bit(RCP_PCL_BIT, pgste); | |
568 | preempt_enable(); | |
569 | #endif | |
570 | } | |
571 | ||
572 | /* forward declaration for SetPageUptodate in page-flags.h*/ | |
573 | static inline void page_clear_dirty(struct page *page); | |
574 | #include <linux/page-flags.h> | |
575 | ||
576 | static inline void ptep_rcp_copy(pte_t *ptep) | |
577 | { | |
578 | #ifdef CONFIG_PGSTE | |
579 | struct page *page = virt_to_page(pte_val(*ptep)); | |
580 | unsigned int skey; | |
581 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | |
582 | ||
583 | skey = page_get_storage_key(page_to_phys(page)); | |
15e86b0c | 584 | if (skey & _PAGE_CHANGED) { |
c71799c1 | 585 | set_bit_simple(RCP_GC_BIT, pgste); |
15e86b0c FF |
586 | set_bit_simple(KVM_UD_BIT, pgste); |
587 | } | |
5b7baf05 | 588 | if (skey & _PAGE_REFERENCED) |
c71799c1 | 589 | set_bit_simple(RCP_GR_BIT, pgste); |
15e86b0c | 590 | if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { |
5b7baf05 | 591 | SetPageDirty(page); |
15e86b0c FF |
592 | set_bit_simple(KVM_UD_BIT, pgste); |
593 | } | |
c71799c1 | 594 | if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) |
5b7baf05 CB |
595 | SetPageReferenced(page); |
596 | #endif | |
597 | } | |
598 | ||
1da177e4 LT |
599 | /* |
600 | * query functions pte_write/pte_dirty/pte_young only work if | |
601 | * pte_present() is true. Undefined behaviour if not.. | |
602 | */ | |
4448aaf0 | 603 | static inline int pte_write(pte_t pte) |
1da177e4 LT |
604 | { |
605 | return (pte_val(pte) & _PAGE_RO) == 0; | |
606 | } | |
607 | ||
4448aaf0 | 608 | static inline int pte_dirty(pte_t pte) |
1da177e4 LT |
609 | { |
610 | /* A pte is neither clean nor dirty on s/390. The dirty bit | |
611 | * is in the storage key. See page_test_and_clear_dirty for | |
612 | * details. | |
613 | */ | |
614 | return 0; | |
615 | } | |
616 | ||
4448aaf0 | 617 | static inline int pte_young(pte_t pte) |
1da177e4 LT |
618 | { |
619 | /* A pte is neither young nor old on s/390. The young bit | |
620 | * is in the storage key. See page_test_and_clear_young for | |
621 | * details. | |
622 | */ | |
623 | return 0; | |
624 | } | |
625 | ||
1da177e4 LT |
626 | /* |
627 | * pgd/pmd/pte modification functions | |
628 | */ | |
629 | ||
630 | #ifndef __s390x__ | |
631 | ||
190a1d72 MS |
632 | #define pgd_clear(pgd) do { } while (0) |
633 | #define pud_clear(pud) do { } while (0) | |
1da177e4 | 634 | |
1da177e4 LT |
635 | #else /* __s390x__ */ |
636 | ||
5a216a20 MS |
637 | static inline void pgd_clear_kernel(pgd_t * pgd) |
638 | { | |
6252d702 MS |
639 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
640 | pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; | |
5a216a20 MS |
641 | } |
642 | ||
643 | static inline void pgd_clear(pgd_t * pgd) | |
644 | { | |
645 | pgd_t *shadow = get_shadow_table(pgd); | |
646 | ||
647 | pgd_clear_kernel(pgd); | |
648 | if (shadow) | |
649 | pgd_clear_kernel(shadow); | |
650 | } | |
190a1d72 MS |
651 | |
652 | static inline void pud_clear_kernel(pud_t *pud) | |
1da177e4 | 653 | { |
6252d702 MS |
654 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
655 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; | |
1da177e4 LT |
656 | } |
657 | ||
6252d702 | 658 | static inline void pud_clear(pud_t *pud) |
c1821c2e | 659 | { |
190a1d72 | 660 | pud_t *shadow = get_shadow_table(pud); |
c1821c2e | 661 | |
190a1d72 MS |
662 | pud_clear_kernel(pud); |
663 | if (shadow) | |
664 | pud_clear_kernel(shadow); | |
c1821c2e GS |
665 | } |
666 | ||
146e4b3c MS |
667 | #endif /* __s390x__ */ |
668 | ||
c1821c2e | 669 | static inline void pmd_clear_kernel(pmd_t * pmdp) |
1da177e4 | 670 | { |
3610cce8 | 671 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; |
1da177e4 LT |
672 | } |
673 | ||
146e4b3c | 674 | static inline void pmd_clear(pmd_t *pmd) |
c1821c2e | 675 | { |
146e4b3c | 676 | pmd_t *shadow = get_shadow_table(pmd); |
c1821c2e | 677 | |
146e4b3c MS |
678 | pmd_clear_kernel(pmd); |
679 | if (shadow) | |
680 | pmd_clear_kernel(shadow); | |
c1821c2e GS |
681 | } |
682 | ||
4448aaf0 | 683 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
1da177e4 | 684 | { |
9282ed92 | 685 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
146e4b3c MS |
686 | if (mm->context.noexec) |
687 | pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; | |
1da177e4 LT |
688 | } |
689 | ||
690 | /* | |
691 | * The following pte modification functions only work if | |
692 | * pte_present() is true. Undefined behaviour if not.. | |
693 | */ | |
4448aaf0 | 694 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
1da177e4 | 695 | { |
138c9021 | 696 | pte_val(pte) &= _PAGE_CHG_MASK; |
1da177e4 LT |
697 | pte_val(pte) |= pgprot_val(newprot); |
698 | return pte; | |
699 | } | |
700 | ||
4448aaf0 | 701 | static inline pte_t pte_wrprotect(pte_t pte) |
1da177e4 | 702 | { |
9282ed92 | 703 | /* Do not clobber _PAGE_TYPE_NONE pages! */ |
1da177e4 LT |
704 | if (!(pte_val(pte) & _PAGE_INVALID)) |
705 | pte_val(pte) |= _PAGE_RO; | |
706 | return pte; | |
707 | } | |
708 | ||
4448aaf0 | 709 | static inline pte_t pte_mkwrite(pte_t pte) |
1da177e4 LT |
710 | { |
711 | pte_val(pte) &= ~_PAGE_RO; | |
712 | return pte; | |
713 | } | |
714 | ||
4448aaf0 | 715 | static inline pte_t pte_mkclean(pte_t pte) |
1da177e4 LT |
716 | { |
717 | /* The only user of pte_mkclean is the fork() code. | |
718 | We must *not* clear the *physical* page dirty bit | |
719 | just because fork() wants to clear the dirty bit in | |
720 | *one* of the page's mappings. So we just do nothing. */ | |
721 | return pte; | |
722 | } | |
723 | ||
4448aaf0 | 724 | static inline pte_t pte_mkdirty(pte_t pte) |
1da177e4 LT |
725 | { |
726 | /* We do not explicitly set the dirty bit because the | |
727 | * sske instruction is slow. It is faster to let the | |
728 | * next instruction set the dirty bit. | |
729 | */ | |
730 | return pte; | |
731 | } | |
732 | ||
4448aaf0 | 733 | static inline pte_t pte_mkold(pte_t pte) |
1da177e4 LT |
734 | { |
735 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | |
736 | * There is no point in clearing the real referenced bit. | |
737 | */ | |
738 | return pte; | |
739 | } | |
740 | ||
4448aaf0 | 741 | static inline pte_t pte_mkyoung(pte_t pte) |
1da177e4 LT |
742 | { |
743 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | |
744 | * There is no point in setting the real referenced bit. | |
745 | */ | |
746 | return pte; | |
747 | } | |
748 | ||
7e675137 NP |
749 | static inline pte_t pte_mkspecial(pte_t pte) |
750 | { | |
a08cb629 | 751 | pte_val(pte) |= _PAGE_SPECIAL; |
7e675137 NP |
752 | return pte; |
753 | } | |
754 | ||
15e86b0c FF |
755 | #ifdef CONFIG_PGSTE |
756 | /* | |
757 | * Get (and clear) the user dirty bit for a PTE. | |
758 | */ | |
759 | static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, | |
760 | pte_t *ptep) | |
761 | { | |
762 | int dirty; | |
763 | unsigned long *pgste; | |
764 | struct page *page; | |
765 | unsigned int skey; | |
766 | ||
250cf776 | 767 | if (!mm->context.has_pgste) |
15e86b0c FF |
768 | return -EINVAL; |
769 | rcp_lock(ptep); | |
770 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | |
771 | page = virt_to_page(pte_val(*ptep)); | |
772 | skey = page_get_storage_key(page_to_phys(page)); | |
773 | if (skey & _PAGE_CHANGED) { | |
774 | set_bit_simple(RCP_GC_BIT, pgste); | |
775 | set_bit_simple(KVM_UD_BIT, pgste); | |
776 | } | |
777 | if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { | |
778 | SetPageDirty(page); | |
779 | set_bit_simple(KVM_UD_BIT, pgste); | |
780 | } | |
781 | dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); | |
782 | if (skey & _PAGE_CHANGED) | |
783 | page_clear_dirty(page); | |
784 | rcp_unlock(ptep); | |
785 | return dirty; | |
786 | } | |
787 | #endif | |
788 | ||
ba8a9229 MS |
789 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
790 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
791 | unsigned long addr, pte_t *ptep) | |
1da177e4 | 792 | { |
5b7baf05 CB |
793 | #ifdef CONFIG_PGSTE |
794 | unsigned long physpage; | |
795 | int young; | |
796 | unsigned long *pgste; | |
797 | ||
250cf776 | 798 | if (!vma->vm_mm->context.has_pgste) |
5b7baf05 CB |
799 | return 0; |
800 | physpage = pte_val(*ptep) & PAGE_MASK; | |
801 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | |
802 | ||
803 | young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); | |
804 | rcp_lock(ptep); | |
805 | if (young) | |
c71799c1 HC |
806 | set_bit_simple(RCP_GR_BIT, pgste); |
807 | young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste); | |
5b7baf05 CB |
808 | rcp_unlock(ptep); |
809 | return young; | |
810 | #endif | |
1da177e4 LT |
811 | return 0; |
812 | } | |
813 | ||
ba8a9229 MS |
814 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
815 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |
816 | unsigned long address, pte_t *ptep) | |
1da177e4 | 817 | { |
5b7baf05 CB |
818 | /* No need to flush TLB |
819 | * On s390 reference bits are in storage key and never in TLB | |
820 | * With virtualization we handle the reference bit, without we | |
821 | * we can simply return */ | |
822 | #ifdef CONFIG_PGSTE | |
823 | return ptep_test_and_clear_young(vma, address, ptep); | |
824 | #endif | |
ba8a9229 | 825 | return 0; |
1da177e4 LT |
826 | } |
827 | ||
9282ed92 | 828 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
1da177e4 | 829 | { |
9282ed92 | 830 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
1da177e4 | 831 | #ifndef __s390x__ |
146e4b3c | 832 | /* pto must point to the start of the segment table */ |
1da177e4 | 833 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); |
9282ed92 GS |
834 | #else |
835 | /* ipte in zarch mode can do the math */ | |
836 | pte_t *pto = ptep; | |
837 | #endif | |
94c12cc7 MS |
838 | asm volatile( |
839 | " ipte %2,%3" | |
840 | : "=m" (*ptep) : "m" (*ptep), | |
841 | "a" (pto), "a" (address)); | |
1da177e4 | 842 | } |
9282ed92 GS |
843 | } |
844 | ||
146e4b3c MS |
845 | static inline void ptep_invalidate(struct mm_struct *mm, |
846 | unsigned long address, pte_t *ptep) | |
9282ed92 | 847 | { |
250cf776 | 848 | if (mm->context.has_pgste) { |
5b7baf05 CB |
849 | rcp_lock(ptep); |
850 | __ptep_ipte(address, ptep); | |
851 | ptep_rcp_copy(ptep); | |
852 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | |
853 | rcp_unlock(ptep); | |
854 | return; | |
855 | } | |
9282ed92 | 856 | __ptep_ipte(address, ptep); |
5b7baf05 CB |
857 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
858 | if (mm->context.noexec) { | |
146e4b3c | 859 | __ptep_ipte(address, ptep + PTRS_PER_PTE); |
5b7baf05 CB |
860 | pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY; |
861 | } | |
f0e47c22 MS |
862 | } |
863 | ||
ba8a9229 MS |
864 | /* |
865 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush | |
866 | * both clear the TLB for the unmapped pte. The reason is that | |
867 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) | |
868 | * to modify an active pte. The sequence is | |
869 | * 1) ptep_get_and_clear | |
870 | * 2) set_pte_at | |
871 | * 3) flush_tlb_range | |
872 | * On s390 the tlb needs to get flushed with the modification of the pte | |
873 | * if the pte is active. The only way how this can be implemented is to | |
874 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range | |
875 | * is a nop. | |
876 | */ | |
877 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
878 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | |
879 | ({ \ | |
880 | pte_t __pte = *(__ptep); \ | |
881 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | |
882 | (__mm) != current->active_mm) \ | |
146e4b3c | 883 | ptep_invalidate(__mm, __address, __ptep); \ |
ba8a9229 MS |
884 | else \ |
885 | pte_clear((__mm), (__address), (__ptep)); \ | |
886 | __pte; \ | |
887 | }) | |
888 | ||
889 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | |
f0e47c22 MS |
890 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
891 | unsigned long address, pte_t *ptep) | |
892 | { | |
893 | pte_t pte = *ptep; | |
146e4b3c | 894 | ptep_invalidate(vma->vm_mm, address, ptep); |
1da177e4 LT |
895 | return pte; |
896 | } | |
897 | ||
ba8a9229 MS |
898 | /* |
899 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the | |
900 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all | |
901 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct | |
902 | * cannot be accessed while the batched unmap is running. In this case | |
903 | * full==1 and a simple pte_clear is enough. See tlb.h. | |
904 | */ | |
905 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
906 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |
907 | unsigned long addr, | |
908 | pte_t *ptep, int full) | |
1da177e4 | 909 | { |
ba8a9229 MS |
910 | pte_t pte = *ptep; |
911 | ||
912 | if (full) | |
913 | pte_clear(mm, addr, ptep); | |
914 | else | |
146e4b3c | 915 | ptep_invalidate(mm, addr, ptep); |
ba8a9229 | 916 | return pte; |
1da177e4 LT |
917 | } |
918 | ||
ba8a9229 MS |
919 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
920 | #define ptep_set_wrprotect(__mm, __addr, __ptep) \ | |
921 | ({ \ | |
922 | pte_t __pte = *(__ptep); \ | |
923 | if (pte_write(__pte)) { \ | |
924 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | |
925 | (__mm) != current->active_mm) \ | |
146e4b3c | 926 | ptep_invalidate(__mm, __addr, __ptep); \ |
ba8a9229 MS |
927 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ |
928 | } \ | |
929 | }) | |
930 | ||
931 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
f0e47c22 MS |
932 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ |
933 | ({ \ | |
934 | int __changed = !pte_same(*(__ptep), __entry); \ | |
935 | if (__changed) { \ | |
146e4b3c | 936 | ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ |
f0e47c22 MS |
937 | set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ |
938 | } \ | |
939 | __changed; \ | |
8dab5241 | 940 | }) |
1da177e4 LT |
941 | |
942 | /* | |
943 | * Test and clear dirty bit in storage key. | |
944 | * We can't clear the changed bit atomically. This is a potential | |
945 | * race against modification of the referenced bit. This function | |
946 | * should therefore only be called if it is not mapped in any | |
947 | * address space. | |
948 | */ | |
ba8a9229 | 949 | #define __HAVE_ARCH_PAGE_TEST_DIRTY |
6c210482 | 950 | static inline int page_test_dirty(struct page *page) |
2dcea57a | 951 | { |
6c210482 MS |
952 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; |
953 | } | |
2dcea57a | 954 | |
ba8a9229 | 955 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY |
6c210482 MS |
956 | static inline void page_clear_dirty(struct page *page) |
957 | { | |
958 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); | |
2dcea57a | 959 | } |
1da177e4 LT |
960 | |
961 | /* | |
962 | * Test and clear referenced bit in storage key. | |
963 | */ | |
ba8a9229 | 964 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG |
2dcea57a HC |
965 | static inline int page_test_and_clear_young(struct page *page) |
966 | { | |
0b2b6e1d | 967 | unsigned long physpage = page_to_phys(page); |
2dcea57a HC |
968 | int ccode; |
969 | ||
0b2b6e1d HC |
970 | asm volatile( |
971 | " rrbe 0,%1\n" | |
972 | " ipm %0\n" | |
973 | " srl %0,28\n" | |
2dcea57a HC |
974 | : "=d" (ccode) : "a" (physpage) : "cc" ); |
975 | return ccode & 2; | |
976 | } | |
1da177e4 LT |
977 | |
978 | /* | |
979 | * Conversion functions: convert a page and protection to a page entry, | |
980 | * and a page entry and page directory to the page they refer to. | |
981 | */ | |
982 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | |
983 | { | |
984 | pte_t __pte; | |
985 | pte_val(__pte) = physpage + pgprot_val(pgprot); | |
986 | return __pte; | |
987 | } | |
988 | ||
2dcea57a HC |
989 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
990 | { | |
0b2b6e1d | 991 | unsigned long physpage = page_to_phys(page); |
1da177e4 | 992 | |
2dcea57a HC |
993 | return mk_pte_phys(physpage, pgprot); |
994 | } | |
995 | ||
190a1d72 MS |
996 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
997 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | |
998 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
999 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | |
1da177e4 | 1000 | |
190a1d72 MS |
1001 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
1002 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
1da177e4 | 1003 | |
190a1d72 | 1004 | #ifndef __s390x__ |
1da177e4 | 1005 | |
190a1d72 MS |
1006 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
1007 | #define pud_deref(pmd) ({ BUG(); 0UL; }) | |
1008 | #define pgd_deref(pmd) ({ BUG(); 0UL; }) | |
46a82b2d | 1009 | |
190a1d72 MS |
1010 | #define pud_offset(pgd, address) ((pud_t *) pgd) |
1011 | #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) | |
1da177e4 | 1012 | |
190a1d72 | 1013 | #else /* __s390x__ */ |
1da177e4 | 1014 | |
190a1d72 MS |
1015 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
1016 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) | |
5a216a20 | 1017 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) |
1da177e4 | 1018 | |
5a216a20 MS |
1019 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) |
1020 | { | |
6252d702 MS |
1021 | pud_t *pud = (pud_t *) pgd; |
1022 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) | |
1023 | pud = (pud_t *) pgd_deref(*pgd); | |
5a216a20 MS |
1024 | return pud + pud_index(address); |
1025 | } | |
1da177e4 | 1026 | |
190a1d72 | 1027 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
1da177e4 | 1028 | { |
6252d702 MS |
1029 | pmd_t *pmd = (pmd_t *) pud; |
1030 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) | |
1031 | pmd = (pmd_t *) pud_deref(*pud); | |
190a1d72 | 1032 | return pmd + pmd_index(address); |
1da177e4 LT |
1033 | } |
1034 | ||
190a1d72 | 1035 | #endif /* __s390x__ */ |
1da177e4 | 1036 | |
190a1d72 MS |
1037 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) |
1038 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | |
1039 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
1da177e4 | 1040 | |
190a1d72 | 1041 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
1da177e4 | 1042 | |
190a1d72 MS |
1043 | /* Find an entry in the lowest level page table.. */ |
1044 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) | |
1045 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) | |
1da177e4 LT |
1046 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
1047 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | |
1048 | #define pte_unmap(pte) do { } while (0) | |
1049 | #define pte_unmap_nested(pte) do { } while (0) | |
1050 | ||
1051 | /* | |
1052 | * 31 bit swap entry format: | |
1053 | * A page-table entry has some bits we have to treat in a special way. | |
1054 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification | |
1055 | * exception will occur instead of a page translation exception. The | |
1056 | * specifiation exception has the bad habit not to store necessary | |
1057 | * information in the lowcore. | |
1058 | * Bit 21 and bit 22 are the page invalid bit and the page protection | |
1059 | * bit. We set both to indicate a swapped page. | |
1060 | * Bit 30 and 31 are used to distinguish the different page types. For | |
1061 | * a swapped page these bits need to be zero. | |
1062 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. | |
1063 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 | |
1064 | * plus 24 for the offset. | |
1065 | * 0| offset |0110|o|type |00| | |
1066 | * 0 0000000001111111111 2222 2 22222 33 | |
1067 | * 0 1234567890123456789 0123 4 56789 01 | |
1068 | * | |
1069 | * 64 bit swap entry format: | |
1070 | * A page-table entry has some bits we have to treat in a special way. | |
1071 | * Bits 52 and bit 55 have to be zero, otherwise an specification | |
1072 | * exception will occur instead of a page translation exception. The | |
1073 | * specifiation exception has the bad habit not to store necessary | |
1074 | * information in the lowcore. | |
1075 | * Bit 53 and bit 54 are the page invalid bit and the page protection | |
1076 | * bit. We set both to indicate a swapped page. | |
1077 | * Bit 62 and 63 are used to distinguish the different page types. For | |
1078 | * a swapped page these bits need to be zero. | |
1079 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. | |
1080 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 | |
1081 | * plus 56 for the offset. | |
1082 | * | offset |0110|o|type |00| | |
1083 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 | |
1084 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 | |
1085 | */ | |
1086 | #ifndef __s390x__ | |
1087 | #define __SWP_OFFSET_MASK (~0UL >> 12) | |
1088 | #else | |
1089 | #define __SWP_OFFSET_MASK (~0UL >> 11) | |
1090 | #endif | |
4448aaf0 | 1091 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
1da177e4 LT |
1092 | { |
1093 | pte_t pte; | |
1094 | offset &= __SWP_OFFSET_MASK; | |
9282ed92 | 1095 | pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | |
1da177e4 LT |
1096 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); |
1097 | return pte; | |
1098 | } | |
1099 | ||
1100 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) | |
1101 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) | |
1102 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | |
1103 | ||
1104 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
1105 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
1106 | ||
1107 | #ifndef __s390x__ | |
1108 | # define PTE_FILE_MAX_BITS 26 | |
1109 | #else /* __s390x__ */ | |
1110 | # define PTE_FILE_MAX_BITS 59 | |
1111 | #endif /* __s390x__ */ | |
1112 | ||
1113 | #define pte_to_pgoff(__pte) \ | |
1114 | ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) | |
1115 | ||
1116 | #define pgoff_to_pte(__off) \ | |
1117 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ | |
9282ed92 | 1118 | | _PAGE_TYPE_FILE }) |
1da177e4 LT |
1119 | |
1120 | #endif /* !__ASSEMBLY__ */ | |
1121 | ||
1122 | #define kern_addr_valid(addr) (1) | |
1123 | ||
17f34580 HC |
1124 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
1125 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); | |
402b0862 | 1126 | extern int s390_enable_sie(void); |
f4eb07c1 | 1127 | |
1da177e4 LT |
1128 | /* |
1129 | * No page table caches to initialise | |
1130 | */ | |
1131 | #define pgtable_cache_init() do { } while (0) | |
1132 | ||
1da177e4 LT |
1133 | #include <asm-generic/pgtable.h> |
1134 | ||
1135 | #endif /* _S390_PAGE_H */ |