Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifdef __KERNEL__ |
2 | #ifndef _PPC_PGTABLE_H | |
3 | #define _PPC_PGTABLE_H | |
4 | ||
5 | #include <asm-generic/4level-fixup.h> | |
6 | ||
7 | #include <linux/config.h> | |
8 | ||
9 | #ifndef __ASSEMBLY__ | |
10 | #include <linux/sched.h> | |
11 | #include <linux/threads.h> | |
12 | #include <asm/processor.h> /* For TASK_SIZE */ | |
13 | #include <asm/mmu.h> | |
14 | #include <asm/page.h> | |
0ec57e53 | 15 | #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ |
8c65b4a6 | 16 | struct mm_struct; |
1da177e4 LT |
17 | |
18 | extern unsigned long va_to_phys(unsigned long address); | |
19 | extern pte_t *va_to_pte(unsigned long address); | |
20 | extern unsigned long ioremap_bot, ioremap_base; | |
21 | #endif /* __ASSEMBLY__ */ | |
22 | ||
23 | /* | |
24 | * The PowerPC MMU uses a hash table containing PTEs, together with | |
25 | * a set of 16 segment registers (on 32-bit implementations), to define | |
26 | * the virtual to physical address mapping. | |
27 | * | |
28 | * We use the hash table as an extended TLB, i.e. a cache of currently | |
29 | * active mappings. We maintain a two-level page table tree, much | |
30 | * like that used by the i386, for the sake of the Linux memory | |
31 | * management code. Low-level assembler code in hashtable.S | |
32 | * (procedure hash_page) is responsible for extracting ptes from the | |
33 | * tree and putting them into the hash table when necessary, and | |
34 | * updating the accessed and modified bits in the page table tree. | |
35 | */ | |
36 | ||
37 | /* | |
38 | * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk. | |
39 | * We also use the two level tables, but we can put the real bits in them | |
40 | * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0, | |
41 | * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has | |
42 | * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit | |
43 | * based upon user/super access. The TLB does not have accessed nor write | |
44 | * protect. We assume that if the TLB get loaded with an entry it is | |
45 | * accessed, and overload the changed bit for write protect. We use | |
46 | * two bits in the software pte that are supposed to be set to zero in | |
47 | * the TLB entry (24 and 25) for these indicators. Although the level 1 | |
48 | * descriptor contains the guarded and writethrough/copyback bits, we can | |
49 | * set these at the page level since they get copied from the Mx_TWC | |
50 | * register when the TLB entry is loaded. We will use bit 27 for guard, since | |
51 | * that is where it exists in the MD_TWC, and bit 26 for writethrough. | |
52 | * These will get masked from the level 2 descriptor at TLB load time, and | |
53 | * copied to the MD_TWC before it gets loaded. | |
54 | * Large page sizes added. We currently support two sizes, 4K and 8M. | |
55 | * This also allows a TLB hander optimization because we can directly | |
56 | * load the PMD into MD_TWC. The 8M pages are only used for kernel | |
57 | * mapping of well known areas. The PMD (PGD) entries contain control | |
58 | * flags in addition to the address, so care must be taken that the | |
59 | * software no longer assumes these are only pointers. | |
60 | */ | |
61 | ||
62 | /* | |
63 | * At present, all PowerPC 400-class processors share a similar TLB | |
64 | * architecture. The instruction and data sides share a unified, | |
65 | * 64-entry, fully-associative TLB which is maintained totally under | |
66 | * software control. In addition, the instruction side has a | |
67 | * hardware-managed, 4-entry, fully-associative TLB which serves as a | |
68 | * first level to the shared TLB. These two TLBs are known as the UTLB | |
69 | * and ITLB, respectively (see "mmu.h" for definitions). | |
70 | */ | |
71 | ||
72 | /* | |
73 | * The normal case is that PTEs are 32-bits and we have a 1-page | |
74 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | |
75 | * | |
76 | * For any >32-bit physical address platform, we can use the following | |
77 | * two level page table layout where the pgdir is 8KB and the MS 13 bits | |
78 | * are an index to the second level table. The combined pgdir/pmd first | |
79 | * level has 2048 entries and the second level has 512 64-bit PTE entries. | |
80 | * -Matt | |
81 | */ | |
82 | /* PMD_SHIFT determines the size of the area mapped by the PTE pages */ | |
83 | #define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT) | |
84 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
85 | #define PMD_MASK (~(PMD_SIZE-1)) | |
86 | ||
87 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ | |
88 | #define PGDIR_SHIFT PMD_SHIFT | |
89 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
90 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
91 | ||
92 | /* | |
93 | * entries per page directory level: our page-table tree is two-level, so | |
94 | * we don't really have any PMD directory. | |
95 | */ | |
96 | #define PTRS_PER_PTE (1 << PTE_SHIFT) | |
97 | #define PTRS_PER_PMD 1 | |
98 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | |
99 | ||
100 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | |
d455a369 | 101 | #define FIRST_USER_ADDRESS 0 |
1da177e4 LT |
102 | |
103 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | |
104 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | |
105 | ||
106 | #define pte_ERROR(e) \ | |
107 | printk("%s:%d: bad pte "PTE_FMT".\n", __FILE__, __LINE__, pte_val(e)) | |
108 | #define pmd_ERROR(e) \ | |
109 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | |
110 | #define pgd_ERROR(e) \ | |
111 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
112 | ||
113 | /* | |
114 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
115 | * current 64MB value just means that there will be a 64MB "hole" after the | |
116 | * physical memory until the kernel virtual memory starts. That means that | |
117 | * any out-of-bounds memory accesses will hopefully be caught. | |
118 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
119 | * area for the same reason. ;) | |
120 | * | |
121 | * We no longer map larger than phys RAM with the BATs so we don't have | |
122 | * to worry about the VMALLOC_OFFSET causing problems. We do have to worry | |
123 | * about clashes between our early calls to ioremap() that start growing down | |
124 | * from ioremap_base being run into the VM area allocations (growing upwards | |
125 | * from VMALLOC_START). For this reason we have ioremap_bot to check when | |
126 | * we actually run into our mappings setup in the early boot with the VM | |
127 | * system. This really does become a problem for machines with good amounts | |
128 | * of RAM. -- Cort | |
129 | */ | |
130 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ | |
0ec57e53 MT |
131 | #ifdef PPC_PIN_SIZE |
132 | #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | |
1da177e4 LT |
133 | #else |
134 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | |
135 | #endif | |
136 | #define VMALLOC_END ioremap_bot | |
137 | ||
138 | /* | |
139 | * Bits in a linux-style PTE. These match the bits in the | |
140 | * (hardware-defined) PowerPC PTE as closely as possible. | |
141 | */ | |
142 | ||
143 | #if defined(CONFIG_40x) | |
144 | ||
145 | /* There are several potential gotchas here. The 40x hardware TLBLO | |
146 | field looks like this: | |
147 | ||
148 | 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | |
149 | RPN..................... 0 0 EX WR ZSEL....... W I M G | |
150 | ||
151 | Where possible we make the Linux PTE bits match up with this | |
152 | ||
153 | - bits 20 and 21 must be cleared, because we use 4k pages (40x can | |
154 | support down to 1k pages), this is done in the TLBMiss exception | |
155 | handler. | |
156 | - We use only zones 0 (for kernel pages) and 1 (for user pages) | |
157 | of the 16 available. Bit 24-26 of the TLB are cleared in the TLB | |
158 | miss handler. Bit 27 is PAGE_USER, thus selecting the correct | |
159 | zone. | |
160 | - PRESENT *must* be in the bottom two bits because swap cache | |
161 | entries use the top 30 bits. Because 40x doesn't support SMP | |
162 | anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 | |
163 | is cleared in the TLB miss handler before the TLB entry is loaded. | |
164 | - All other bits of the PTE are loaded into TLBLO without | |
165 | modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for | |
166 | software PTE bits. We actually use use bits 21, 24, 25, and | |
167 | 30 respectively for the software bits: ACCESSED, DIRTY, RW, and | |
168 | PRESENT. | |
169 | */ | |
170 | ||
171 | /* Definitions for 40x embedded chips. */ | |
172 | #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ | |
173 | #define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */ | |
174 | #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ | |
175 | #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ | |
176 | #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ | |
177 | #define _PAGE_USER 0x010 /* matches one of the zone permission bits */ | |
178 | #define _PAGE_RW 0x040 /* software: Writes permitted */ | |
179 | #define _PAGE_DIRTY 0x080 /* software: dirty page */ | |
180 | #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ | |
181 | #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ | |
182 | #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ | |
183 | ||
184 | #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ | |
185 | #define _PMD_BAD 0x802 | |
186 | #define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */ | |
187 | #define _PMD_SIZE_4M 0x0c0 | |
188 | #define _PMD_SIZE_16M 0x0e0 | |
189 | #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) | |
190 | ||
191 | #elif defined(CONFIG_44x) | |
192 | /* | |
193 | * Definitions for PPC440 | |
194 | * | |
195 | * Because of the 3 word TLB entries to support 36-bit addressing, | |
196 | * the attribute are difficult to map in such a fashion that they | |
197 | * are easily loaded during exception processing. I decided to | |
198 | * organize the entry so the ERPN is the only portion in the | |
199 | * upper word of the PTE and the attribute bits below are packed | |
200 | * in as sensibly as they can be in the area below a 4KB page size | |
201 | * oriented RPN. This at least makes it easy to load the RPN and | |
202 | * ERPN fields in the TLB. -Matt | |
203 | * | |
204 | * Note that these bits preclude future use of a page size | |
205 | * less than 4KB. | |
534afb90 MP |
206 | * |
207 | * | |
208 | * PPC 440 core has following TLB attribute fields; | |
209 | * | |
210 | * TLB1: | |
211 | * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | |
212 | * RPN................................. - - - - - - ERPN....... | |
213 | * | |
214 | * TLB2: | |
215 | * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | |
216 | * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR | |
217 | * | |
218 | * There are some constrains and options, to decide mapping software bits | |
219 | * into TLB entry. | |
220 | * | |
221 | * - PRESENT *must* be in the bottom three bits because swap cache | |
222 | * entries use the top 29 bits for TLB2. | |
223 | * | |
224 | * - FILE *must* be in the bottom three bits because swap cache | |
225 | * entries use the top 29 bits for TLB2. | |
226 | * | |
227 | * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it | |
228 | * doesn't support SMP. So we can use this as software bit, like | |
229 | * DIRTY. | |
230 | * | |
021a52ac MP |
231 | * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used |
232 | * for memory protection related functions (see PTE structure in | |
233 | * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the | |
234 | * above bits. Note that the bit values are CPU specific, not architecture | |
235 | * specific. | |
236 | * | |
237 | * The kernel PTE entry holds an arch-dependent swp_entry structure under | |
238 | * certain situations. In other words, in such situations some portion of | |
239 | * the PTE bits are used as a swp_entry. In the PPC implementation, the | |
240 | * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still | |
241 | * hold protection values. That means the three protection bits are | |
242 | * reserved for both PTE and SWAP entry at the most significant three | |
243 | * LSBs. | |
244 | * | |
245 | * There are three protection bits available for SWAP entry: | |
534afb90 MP |
246 | * _PAGE_PRESENT |
247 | * _PAGE_FILE | |
248 | * _PAGE_HASHPTE (if HW has) | |
249 | * | |
250 | * So those three bits have to be inside of 0-2nd LSB of PTE. | |
251 | * | |
1da177e4 | 252 | */ |
534afb90 | 253 | |
1da177e4 LT |
254 | #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ |
255 | #define _PAGE_RW 0x00000002 /* S: Write permission */ | |
534afb90 | 256 | #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ |
1da177e4 LT |
257 | #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ |
258 | #define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */ | |
259 | #define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */ | |
260 | #define _PAGE_USER 0x00000040 /* S: User page */ | |
261 | #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ | |
262 | #define _PAGE_GUARDED 0x00000100 /* H: G bit */ | |
534afb90 | 263 | #define _PAGE_DIRTY 0x00000200 /* S: Page dirty */ |
1da177e4 LT |
264 | #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ |
265 | #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ | |
266 | ||
267 | /* TODO: Add large page lowmem mapping support */ | |
268 | #define _PMD_PRESENT 0 | |
269 | #define _PMD_PRESENT_MASK (PAGE_MASK) | |
270 | #define _PMD_BAD (~PAGE_MASK) | |
271 | ||
272 | /* ERPN in a PTE never gets cleared, ignore it */ | |
273 | #define _PTE_NONE_MASK 0xffffffff00000000ULL | |
274 | ||
f50b153b | 275 | #elif defined(CONFIG_FSL_BOOKE) |
1da177e4 LT |
276 | /* |
277 | MMU Assist Register 3: | |
278 | ||
279 | 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | |
280 | RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR | |
281 | ||
282 | - PRESENT *must* be in the bottom three bits because swap cache | |
283 | entries use the top 29 bits. | |
284 | ||
285 | - FILE *must* be in the bottom three bits because swap cache | |
286 | entries use the top 29 bits. | |
287 | */ | |
288 | ||
f50b153b KG |
289 | /* Definitions for FSL Book-E Cores */ |
290 | #define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ | |
291 | #define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ | |
292 | #define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */ | |
293 | #define _PAGE_ACCESSED 0x00004 /* S: Page referenced */ | |
294 | #define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */ | |
295 | #define _PAGE_RW 0x00010 /* S: Write permission */ | |
296 | #define _PAGE_HWEXEC 0x00020 /* H: UX permission */ | |
297 | ||
298 | #define _PAGE_ENDIAN 0x00040 /* H: E bit */ | |
299 | #define _PAGE_GUARDED 0x00080 /* H: G bit */ | |
300 | #define _PAGE_COHERENT 0x00100 /* H: M bit */ | |
301 | #define _PAGE_NO_CACHE 0x00200 /* H: I bit */ | |
302 | #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ | |
303 | ||
304 | #ifdef CONFIG_PTE_64BIT | |
305 | #define _PAGE_DIRTY 0x08000 /* S: Page dirty */ | |
306 | ||
307 | /* ERPN in a PTE never gets cleared, ignore it */ | |
308 | #define _PTE_NONE_MASK 0xffffffffffff0000ULL | |
309 | #else | |
310 | #define _PAGE_DIRTY 0x00800 /* S: Page dirty */ | |
311 | #endif | |
1da177e4 LT |
312 | |
313 | #define _PMD_PRESENT 0 | |
314 | #define _PMD_PRESENT_MASK (PAGE_MASK) | |
315 | #define _PMD_BAD (~PAGE_MASK) | |
316 | ||
1da177e4 LT |
317 | #elif defined(CONFIG_8xx) |
318 | /* Definitions for 8xx embedded chips. */ | |
319 | #define _PAGE_PRESENT 0x0001 /* Page is valid */ | |
320 | #define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */ | |
321 | #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ | |
322 | #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ | |
323 | ||
324 | /* These five software bits must be masked out when the entry is loaded | |
325 | * into the TLB. | |
326 | */ | |
327 | #define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */ | |
328 | #define _PAGE_GUARDED 0x0010 /* software: guarded access */ | |
329 | #define _PAGE_DIRTY 0x0020 /* software: page changed */ | |
330 | #define _PAGE_RW 0x0040 /* software: user write access allowed */ | |
331 | #define _PAGE_ACCESSED 0x0080 /* software: page referenced */ | |
332 | ||
333 | /* Setting any bits in the nibble with the follow two controls will | |
334 | * require a TLB exception handler change. It is assumed unused bits | |
335 | * are always zero. | |
336 | */ | |
337 | #define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */ | |
338 | #define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */ | |
339 | ||
340 | #define _PMD_PRESENT 0x0001 | |
341 | #define _PMD_BAD 0x0ff0 | |
342 | #define _PMD_PAGE_MASK 0x000c | |
343 | #define _PMD_PAGE_8M 0x000c | |
344 | ||
345 | /* | |
346 | * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE | |
347 | * for an address even if _PAGE_PRESENT is not set, as a performance | |
348 | * optimization. This is a bug if you ever want to use swap unless | |
349 | * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific | |
350 | * definitions for __swp_entry etc. below, which would be gross. | |
351 | * -- paulus | |
352 | */ | |
353 | #define _PTE_NONE_MASK _PAGE_ACCESSED | |
354 | ||
355 | #else /* CONFIG_6xx */ | |
356 | /* Definitions for 60x, 740/750, etc. */ | |
357 | #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ | |
358 | #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */ | |
359 | #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ | |
360 | #define _PAGE_USER 0x004 /* usermode access allowed */ | |
361 | #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ | |
362 | #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ | |
363 | #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ | |
364 | #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ | |
365 | #define _PAGE_DIRTY 0x080 /* C: page changed */ | |
366 | #define _PAGE_ACCESSED 0x100 /* R: page referenced */ | |
367 | #define _PAGE_EXEC 0x200 /* software: i-cache coherency required */ | |
368 | #define _PAGE_RW 0x400 /* software: user write access allowed */ | |
369 | ||
370 | #define _PTE_NONE_MASK _PAGE_HASHPTE | |
371 | ||
372 | #define _PMD_PRESENT 0 | |
373 | #define _PMD_PRESENT_MASK (PAGE_MASK) | |
374 | #define _PMD_BAD (~PAGE_MASK) | |
375 | #endif | |
376 | ||
377 | /* | |
378 | * Some bits are only used on some cpu families... | |
379 | */ | |
380 | #ifndef _PAGE_HASHPTE | |
381 | #define _PAGE_HASHPTE 0 | |
382 | #endif | |
383 | #ifndef _PTE_NONE_MASK | |
384 | #define _PTE_NONE_MASK 0 | |
385 | #endif | |
386 | #ifndef _PAGE_SHARED | |
387 | #define _PAGE_SHARED 0 | |
388 | #endif | |
389 | #ifndef _PAGE_HWWRITE | |
390 | #define _PAGE_HWWRITE 0 | |
391 | #endif | |
392 | #ifndef _PAGE_HWEXEC | |
393 | #define _PAGE_HWEXEC 0 | |
394 | #endif | |
395 | #ifndef _PAGE_EXEC | |
396 | #define _PAGE_EXEC 0 | |
397 | #endif | |
398 | #ifndef _PMD_PRESENT_MASK | |
399 | #define _PMD_PRESENT_MASK _PMD_PRESENT | |
400 | #endif | |
401 | #ifndef _PMD_SIZE | |
402 | #define _PMD_SIZE 0 | |
403 | #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() | |
404 | #endif | |
405 | ||
406 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | |
407 | ||
408 | /* | |
409 | * Note: the _PAGE_COHERENT bit automatically gets set in the hardware | |
410 | * PTE if CONFIG_SMP is defined (hash_page does this); there is no need | |
411 | * to have it in the Linux PTE, and in fact the bit could be reused for | |
412 | * another purpose. -- paulus. | |
413 | */ | |
414 | ||
415 | #ifdef CONFIG_44x | |
416 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED) | |
417 | #else | |
418 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) | |
419 | #endif | |
420 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) | |
421 | #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) | |
422 | ||
423 | #ifdef CONFIG_PPC_STD_MMU | |
424 | /* On standard PPC MMU, no user access implies kernel read/write access, | |
425 | * so to write-protect kernel memory we must turn on user access */ | |
426 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER) | |
427 | #else | |
428 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) | |
429 | #endif | |
430 | ||
431 | #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) | |
432 | #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) | |
433 | ||
434 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) | |
435 | /* We want the debuggers to be able to set breakpoints anywhere, so | |
436 | * don't write protect the kernel text */ | |
437 | #define _PAGE_RAM_TEXT _PAGE_RAM | |
438 | #else | |
439 | #define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC) | |
440 | #endif | |
441 | ||
442 | #define PAGE_NONE __pgprot(_PAGE_BASE) | |
443 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | |
444 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
445 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) | |
446 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) | |
447 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | |
448 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
449 | ||
450 | #define PAGE_KERNEL __pgprot(_PAGE_RAM) | |
451 | #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO) | |
452 | ||
453 | /* | |
454 | * The PowerPC can only do execute protection on a segment (256MB) basis, | |
455 | * not on a page basis. So we consider execute permission the same as read. | |
456 | * Also, write permissions imply read permissions. | |
457 | * This is the closest we can get.. | |
458 | */ | |
459 | #define __P000 PAGE_NONE | |
460 | #define __P001 PAGE_READONLY_X | |
461 | #define __P010 PAGE_COPY | |
462 | #define __P011 PAGE_COPY_X | |
463 | #define __P100 PAGE_READONLY | |
464 | #define __P101 PAGE_READONLY_X | |
465 | #define __P110 PAGE_COPY | |
466 | #define __P111 PAGE_COPY_X | |
467 | ||
468 | #define __S000 PAGE_NONE | |
469 | #define __S001 PAGE_READONLY_X | |
470 | #define __S010 PAGE_SHARED | |
471 | #define __S011 PAGE_SHARED_X | |
472 | #define __S100 PAGE_READONLY | |
473 | #define __S101 PAGE_READONLY_X | |
474 | #define __S110 PAGE_SHARED | |
475 | #define __S111 PAGE_SHARED_X | |
476 | ||
477 | #ifndef __ASSEMBLY__ | |
478 | /* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a | |
479 | * kernel without large page PMD support */ | |
480 | extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | |
481 | ||
482 | /* | |
483 | * Conversions between PTE values and page frame numbers. | |
484 | */ | |
485 | ||
b464fce5 KG |
486 | /* in some case we want to additionaly adjust where the pfn is in the pte to |
487 | * allow room for more flags */ | |
f50b153b KG |
488 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) |
489 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8) | |
490 | #else | |
b464fce5 | 491 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT) |
f50b153b | 492 | #endif |
b464fce5 KG |
493 | |
494 | #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) | |
1da177e4 LT |
495 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
496 | ||
b464fce5 KG |
497 | #define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ |
498 | pgprot_val(prot)) | |
1da177e4 LT |
499 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
500 | ||
501 | /* | |
502 | * ZERO_PAGE is a global shared page that is always zero: used | |
503 | * for zero-mapped memory areas etc.. | |
504 | */ | |
505 | extern unsigned long empty_zero_page[1024]; | |
506 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
507 | ||
508 | #endif /* __ASSEMBLY__ */ | |
509 | ||
510 | #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) | |
511 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | |
512 | #define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) | |
513 | ||
514 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
515 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) | |
516 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) | |
517 | #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) | |
518 | ||
519 | #ifndef __ASSEMBLY__ | |
520 | /* | |
521 | * The "pgd_xxx()" functions here are trivial for a folded two-level | |
522 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | |
523 | * into the pgd entry) | |
524 | */ | |
525 | static inline int pgd_none(pgd_t pgd) { return 0; } | |
526 | static inline int pgd_bad(pgd_t pgd) { return 0; } | |
527 | static inline int pgd_present(pgd_t pgd) { return 1; } | |
528 | #define pgd_clear(xp) do { } while (0) | |
529 | ||
530 | #define pgd_page(pgd) \ | |
531 | ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) | |
532 | ||
533 | /* | |
534 | * The following only work if pte_present() is true. | |
535 | * Undefined behaviour if not.. | |
536 | */ | |
537 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | |
538 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | |
539 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } | |
540 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | |
541 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | |
542 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | |
543 | ||
544 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | |
545 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | |
546 | ||
547 | static inline pte_t pte_rdprotect(pte_t pte) { | |
548 | pte_val(pte) &= ~_PAGE_USER; return pte; } | |
549 | static inline pte_t pte_wrprotect(pte_t pte) { | |
550 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } | |
551 | static inline pte_t pte_exprotect(pte_t pte) { | |
552 | pte_val(pte) &= ~_PAGE_EXEC; return pte; } | |
553 | static inline pte_t pte_mkclean(pte_t pte) { | |
554 | pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } | |
555 | static inline pte_t pte_mkold(pte_t pte) { | |
556 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | |
557 | ||
558 | static inline pte_t pte_mkread(pte_t pte) { | |
559 | pte_val(pte) |= _PAGE_USER; return pte; } | |
560 | static inline pte_t pte_mkexec(pte_t pte) { | |
561 | pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } | |
562 | static inline pte_t pte_mkwrite(pte_t pte) { | |
563 | pte_val(pte) |= _PAGE_RW; return pte; } | |
564 | static inline pte_t pte_mkdirty(pte_t pte) { | |
565 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | |
566 | static inline pte_t pte_mkyoung(pte_t pte) { | |
567 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | |
568 | ||
569 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
570 | { | |
571 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | |
572 | return pte; | |
573 | } | |
574 | ||
575 | /* | |
576 | * When flushing the tlb entry for a page, we also need to flush the hash | |
577 | * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. | |
578 | */ | |
579 | extern int flush_hash_pages(unsigned context, unsigned long va, | |
580 | unsigned long pmdval, int count); | |
581 | ||
582 | /* Add an HPTE to the hash table */ | |
583 | extern void add_hash_page(unsigned context, unsigned long va, | |
584 | unsigned long pmdval); | |
585 | ||
586 | /* | |
587 | * Atomic PTE updates. | |
588 | * | |
589 | * pte_update clears and sets bit atomically, and returns | |
7a1e3350 KG |
590 | * the old pte value. In the 64-bit PTE case we lock around the |
591 | * low PTE word since we expect ALL flag bits to be there | |
1da177e4 | 592 | */ |
7a1e3350 | 593 | #ifndef CONFIG_PTE_64BIT |
1da177e4 LT |
594 | static inline unsigned long pte_update(pte_t *p, unsigned long clr, |
595 | unsigned long set) | |
596 | { | |
597 | unsigned long old, tmp; | |
598 | ||
599 | __asm__ __volatile__("\ | |
600 | 1: lwarx %0,0,%3\n\ | |
601 | andc %1,%0,%4\n\ | |
602 | or %1,%1,%5\n" | |
603 | PPC405_ERR77(0,%3) | |
604 | " stwcx. %1,0,%3\n\ | |
605 | bne- 1b" | |
606 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
7a1e3350 | 607 | : "r" (p), "r" (clr), "r" (set), "m" (*p) |
1da177e4 LT |
608 | : "cc" ); |
609 | return old; | |
610 | } | |
7a1e3350 KG |
611 | #else |
612 | static inline unsigned long long pte_update(pte_t *p, unsigned long clr, | |
613 | unsigned long set) | |
614 | { | |
615 | unsigned long long old; | |
616 | unsigned long tmp; | |
617 | ||
618 | __asm__ __volatile__("\ | |
619 | 1: lwarx %L0,0,%4\n\ | |
620 | lwzx %0,0,%3\n\ | |
621 | andc %1,%L0,%5\n\ | |
622 | or %1,%1,%6\n" | |
623 | PPC405_ERR77(0,%3) | |
624 | " stwcx. %1,0,%4\n\ | |
625 | bne- 1b" | |
626 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
627 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) | |
628 | : "cc" ); | |
629 | return old; | |
630 | } | |
631 | #endif | |
1da177e4 LT |
632 | |
633 | /* | |
634 | * set_pte stores a linux PTE into the linux page table. | |
635 | * On machines which use an MMU hash table we avoid changing the | |
636 | * _PAGE_HASHPTE bit. | |
637 | */ | |
638 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
639 | pte_t *ptep, pte_t pte) | |
640 | { | |
641 | #if _PAGE_HASHPTE != 0 | |
642 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); | |
643 | #else | |
644 | *ptep = pte; | |
645 | #endif | |
646 | } | |
647 | ||
648 | /* | |
649 | * 2.6 calles this without flushing the TLB entry, this is wrong | |
650 | * for our hash-based implementation, we fix that up here | |
651 | */ | |
652 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
653 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) | |
654 | { | |
655 | unsigned long old; | |
656 | old = pte_update(ptep, _PAGE_ACCESSED, 0); | |
657 | #if _PAGE_HASHPTE != 0 | |
658 | if (old & _PAGE_HASHPTE) { | |
659 | unsigned long ptephys = __pa(ptep) & PAGE_MASK; | |
660 | flush_hash_pages(context, addr, ptephys, 1); | |
661 | } | |
662 | #endif | |
663 | return (old & _PAGE_ACCESSED) != 0; | |
664 | } | |
665 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
666 | __ptep_test_and_clear_young((__vma)->vm_mm->context, __addr, __ptep) | |
667 | ||
668 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | |
669 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, | |
670 | unsigned long addr, pte_t *ptep) | |
671 | { | |
672 | return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; | |
673 | } | |
674 | ||
675 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
676 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
677 | pte_t *ptep) | |
678 | { | |
679 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); | |
680 | } | |
681 | ||
682 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
683 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
684 | pte_t *ptep) | |
685 | { | |
686 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); | |
687 | } | |
688 | ||
689 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
690 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | |
691 | { | |
692 | unsigned long bits = pte_val(entry) & | |
693 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); | |
694 | pte_update(ptep, 0, bits); | |
695 | } | |
696 | ||
697 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | |
698 | do { \ | |
699 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ | |
700 | flush_tlb_page_nohash(__vma, __address); \ | |
701 | } while(0) | |
702 | ||
703 | /* | |
704 | * Macro to mark a page protection value as "uncacheable". | |
705 | */ | |
706 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) | |
707 | ||
708 | struct file; | |
8b150478 | 709 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
1da177e4 LT |
710 | unsigned long size, pgprot_t vma_prot); |
711 | #define __HAVE_PHYS_MEM_ACCESS_PROT | |
712 | ||
713 | #define __HAVE_ARCH_PTE_SAME | |
714 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) | |
715 | ||
716 | /* | |
717 | * Note that on Book E processors, the pmd contains the kernel virtual | |
718 | * (lowmem) address of the pte page. The physical address is less useful | |
719 | * because everything runs with translation enabled (even the TLB miss | |
720 | * handler). On everything else the pmd contains the physical address | |
721 | * of the pte page. -- paulus | |
722 | */ | |
723 | #ifndef CONFIG_BOOKE | |
724 | #define pmd_page_kernel(pmd) \ | |
725 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | |
726 | #define pmd_page(pmd) \ | |
727 | (mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) | |
728 | #else | |
729 | #define pmd_page_kernel(pmd) \ | |
730 | ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) | |
731 | #define pmd_page(pmd) \ | |
732 | (mem_map + (__pa(pmd_val(pmd)) >> PAGE_SHIFT)) | |
733 | #endif | |
734 | ||
735 | /* to find an entry in a kernel page-table-directory */ | |
736 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
737 | ||
738 | /* to find an entry in a page-table-directory */ | |
739 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | |
740 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
741 | ||
742 | /* Find an entry in the second-level page table.. */ | |
743 | static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | |
744 | { | |
745 | return (pmd_t *) dir; | |
746 | } | |
747 | ||
748 | /* Find an entry in the third-level page table.. */ | |
749 | #define pte_index(address) \ | |
750 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
751 | #define pte_offset_kernel(dir, addr) \ | |
752 | ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) | |
753 | #define pte_offset_map(dir, addr) \ | |
754 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) | |
755 | #define pte_offset_map_nested(dir, addr) \ | |
756 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) | |
757 | ||
758 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | |
759 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | |
760 | ||
761 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
762 | ||
763 | extern void paging_init(void); | |
764 | ||
765 | /* | |
766 | * Encode and decode a swap entry. | |
767 | * Note that the bits we use in a PTE for representing a swap entry | |
768 | * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the | |
769 | *_PAGE_HASHPTE bit (if used). -- paulus | |
770 | */ | |
771 | #define __swp_type(entry) ((entry).val & 0x1f) | |
772 | #define __swp_offset(entry) ((entry).val >> 5) | |
773 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) | |
774 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) | |
775 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) | |
776 | ||
777 | /* Encode and decode a nonlinear file mapping entry */ | |
778 | #define PTE_FILE_MAX_BITS 29 | |
779 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) | |
780 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) | |
781 | ||
782 | /* CONFIG_APUS */ | |
783 | /* For virtual address to physical address conversion */ | |
784 | extern void cache_clear(__u32 addr, int length); | |
785 | extern void cache_push(__u32 addr, int length); | |
786 | extern int mm_end_of_chunk (unsigned long addr, int len); | |
787 | extern unsigned long iopa(unsigned long addr); | |
788 | extern unsigned long mm_ptov(unsigned long addr) __attribute_const__; | |
789 | ||
790 | /* Values for nocacheflag and cmode */ | |
791 | /* These are not used by the APUS kernel_map, but prevents | |
792 | compilation errors. */ | |
793 | #define KERNELMAP_FULL_CACHING 0 | |
794 | #define KERNELMAP_NOCACHE_SER 1 | |
795 | #define KERNELMAP_NOCACHE_NONSER 2 | |
796 | #define KERNELMAP_NO_COPYBACK 3 | |
797 | ||
798 | /* | |
799 | * Map some physical address range into the kernel address space. | |
800 | */ | |
801 | extern unsigned long kernel_map(unsigned long paddr, unsigned long size, | |
802 | int nocacheflag, unsigned long *memavailp ); | |
803 | ||
804 | /* | |
805 | * Set cache mode of (kernel space) address range. | |
806 | */ | |
807 | extern void kernel_set_cachemode (unsigned long address, unsigned long size, | |
808 | unsigned int cmode); | |
809 | ||
810 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | |
811 | #define kern_addr_valid(addr) (1) | |
812 | ||
813 | #ifdef CONFIG_PHYS_64BIT | |
814 | extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | |
815 | unsigned long paddr, unsigned long size, pgprot_t prot); | |
1da177e4 LT |
816 | |
817 | static inline int io_remap_pfn_range(struct vm_area_struct *vma, | |
818 | unsigned long vaddr, | |
819 | unsigned long pfn, | |
820 | unsigned long size, | |
821 | pgprot_t prot) | |
822 | { | |
823 | phys_addr_t paddr64 = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); | |
824 | return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot); | |
825 | } | |
826 | #else | |
1da177e4 LT |
827 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
828 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
829 | #endif | |
830 | ||
831 | #define MK_IOSPACE_PFN(space, pfn) (pfn) | |
832 | #define GET_IOSPACE(pfn) 0 | |
833 | #define GET_PFN(pfn) (pfn) | |
834 | ||
835 | /* | |
836 | * No page table caches to initialise | |
837 | */ | |
838 | #define pgtable_cache_init() do { } while (0) | |
839 | ||
840 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep); | |
841 | ||
842 | #include <asm-generic/pgtable.h> | |
843 | ||
844 | #endif /* !__ASSEMBLY__ */ | |
845 | ||
846 | #endif /* _PPC_PGTABLE_H */ | |
847 | #endif /* __KERNEL__ */ |