4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
24 #include <asm/system.h>
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
29 #include <asm/mmu_context.h>
31 #define NR_PMB_ENTRIES 16
33 static void __pmb_unmap(struct pmb_entry
*);
35 static struct pmb_entry pmb_entry_list
[NR_PMB_ENTRIES
];
36 static unsigned long pmb_map
;
38 static inline unsigned long mk_pmb_entry(unsigned int entry
)
40 return (entry
& PMB_E_MASK
) << PMB_E_SHIFT
;
43 static inline unsigned long mk_pmb_addr(unsigned int entry
)
45 return mk_pmb_entry(entry
) | PMB_ADDR
;
48 static inline unsigned long mk_pmb_data(unsigned int entry
)
50 return mk_pmb_entry(entry
) | PMB_DATA
;
53 static int pmb_alloc_entry(void)
58 pos
= find_first_zero_bit(&pmb_map
, NR_PMB_ENTRIES
);
60 if (unlikely(pos
> NR_PMB_ENTRIES
))
63 if (test_and_set_bit(pos
, &pmb_map
))
69 static struct pmb_entry
*pmb_alloc(unsigned long vpn
, unsigned long ppn
,
70 unsigned long flags
, int entry
)
72 struct pmb_entry
*pmbe
;
75 if (entry
== PMB_NO_ENTRY
) {
76 pos
= pmb_alloc_entry();
80 if (test_bit(entry
, &pmb_map
))
81 return ERR_PTR(-ENOSPC
);
85 pmbe
= &pmb_entry_list
[pos
];
87 return ERR_PTR(-ENOMEM
);
97 static void pmb_free(struct pmb_entry
*pmbe
)
99 int pos
= pmbe
->entry
;
106 clear_bit(pos
, &pmb_map
);
110 * Must be in P2 for __set_pmb_entry()
112 static void __set_pmb_entry(unsigned long vpn
, unsigned long ppn
,
113 unsigned long flags
, int pos
)
115 ctrl_outl(vpn
| PMB_V
, mk_pmb_addr(pos
));
117 #ifdef CONFIG_CACHE_WRITETHROUGH
119 * When we are in 32-bit address extended mode, CCR.CB becomes
120 * invalid, so care must be taken to manually adjust cacheable
123 if (likely(flags
& PMB_C
))
127 ctrl_outl(ppn
| flags
| PMB_V
, mk_pmb_data(pos
));
130 static void set_pmb_entry(struct pmb_entry
*pmbe
)
133 __set_pmb_entry(pmbe
->vpn
, pmbe
->ppn
, pmbe
->flags
, pmbe
->entry
);
137 static void clear_pmb_entry(struct pmb_entry
*pmbe
)
139 unsigned int entry
= pmbe
->entry
;
142 if (unlikely(entry
>= NR_PMB_ENTRIES
))
148 addr
= mk_pmb_addr(entry
);
149 ctrl_outl(ctrl_inl(addr
) & ~PMB_V
, addr
);
151 addr
= mk_pmb_data(entry
);
152 ctrl_outl(ctrl_inl(addr
) & ~PMB_V
, addr
);
162 { .size
= 0x20000000, .flag
= PMB_SZ_512M
, },
163 { .size
= 0x08000000, .flag
= PMB_SZ_128M
, },
164 { .size
= 0x04000000, .flag
= PMB_SZ_64M
, },
165 { .size
= 0x01000000, .flag
= PMB_SZ_16M
, },
168 long pmb_remap(unsigned long vaddr
, unsigned long phys
,
169 unsigned long size
, unsigned long flags
)
171 struct pmb_entry
*pmbp
, *pmbe
;
172 unsigned long wanted
;
176 /* Convert typical pgprot value to the PMB equivalent */
177 if (flags
& _PAGE_CACHABLE
) {
178 if (flags
& _PAGE_WT
)
183 pmb_flags
= PMB_WT
| PMB_UB
;
189 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++) {
190 if (size
< pmb_sizes
[i
].size
)
193 pmbe
= pmb_alloc(vaddr
, phys
, pmb_flags
| pmb_sizes
[i
].flag
,
202 phys
+= pmb_sizes
[i
].size
;
203 vaddr
+= pmb_sizes
[i
].size
;
204 size
-= pmb_sizes
[i
].size
;
207 * Link adjacent entries that span multiple PMB entries
208 * for easier tear-down.
216 * Instead of trying smaller sizes on every iteration
217 * (even if we succeed in allocating space), try using
218 * pmb_sizes[i].size again.
223 if (size
>= 0x1000000)
226 return wanted
- size
;
235 void pmb_unmap(unsigned long addr
)
237 struct pmb_entry
*pmbe
= NULL
;
240 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
241 if (test_bit(i
, &pmb_map
)) {
242 pmbe
= &pmb_entry_list
[i
];
243 if (pmbe
->vpn
== addr
)
254 static void __pmb_unmap(struct pmb_entry
*pmbe
)
256 BUG_ON(!test_bit(pmbe
->entry
, &pmb_map
));
259 struct pmb_entry
*pmblink
= pmbe
;
262 * We may be called before this pmb_entry has been
263 * entered into the PMB table via set_pmb_entry(), but
264 * that's OK because we've allocated a unique slot for
265 * this entry in pmb_alloc() (even if we haven't filled
268 * Therefore, calling clear_pmb_entry() is safe as no
269 * other mapping can be using that slot.
271 clear_pmb_entry(pmbe
);
273 pmbe
= pmblink
->link
;
279 #ifdef CONFIG_PMB_LEGACY
280 static inline unsigned int pmb_ppn_in_range(unsigned long ppn
)
282 return ppn
>= __MEMORY_START
&& ppn
< __MEMORY_START
+ __MEMORY_SIZE
;
285 static int pmb_apply_legacy_mappings(void)
287 unsigned int applied
= 0;
290 pr_info("PMB: Preserving legacy mappings:\n");
293 * The following entries are setup by the bootloader.
295 * Entry VPN PPN V SZ C UB
296 * --------------------------------------------------------
297 * 0 0xA0000000 0x00000000 1 64MB 0 0
298 * 1 0xA4000000 0x04000000 1 16MB 0 0
299 * 2 0xA6000000 0x08000000 1 16MB 0 0
300 * 9 0x88000000 0x48000000 1 128MB 1 1
301 * 10 0x90000000 0x50000000 1 128MB 1 1
302 * 11 0x98000000 0x58000000 1 128MB 1 1
303 * 13 0xA8000000 0x48000000 1 128MB 0 0
304 * 14 0xB0000000 0x50000000 1 128MB 0 0
305 * 15 0xB8000000 0x58000000 1 128MB 0 0
307 * The only entries the we need are the ones that map the kernel
308 * at the cached and uncached addresses.
310 for (i
= 0; i
< PMB_ENTRY_MAX
; i
++) {
311 unsigned long addr
, data
;
312 unsigned long addr_val
, data_val
;
313 unsigned long ppn
, vpn
;
315 addr
= mk_pmb_addr(i
);
316 data
= mk_pmb_data(i
);
318 addr_val
= __raw_readl(addr
);
319 data_val
= __raw_readl(data
);
322 * Skip over any bogus entries
324 if (!(data_val
& PMB_V
) || !(addr_val
& PMB_V
))
327 ppn
= data_val
& PMB_PFN_MASK
;
328 vpn
= addr_val
& PMB_PFN_MASK
;
331 * Only preserve in-range mappings.
333 if (pmb_ppn_in_range(ppn
)) {
337 size
= data_val
& PMB_SZ_MASK
;
339 sz_str
= (size
== PMB_SZ_16M
) ? " 16MB":
340 (size
== PMB_SZ_64M
) ? " 64MB":
341 (size
== PMB_SZ_128M
) ? "128MB":
344 pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
345 vpn
>> PAGE_SHIFT
, ppn
>> PAGE_SHIFT
, sz_str
,
346 (data_val
& PMB_C
) ? "" : "un");
351 * Invalidate anything out of bounds.
353 __raw_writel(addr_val
& ~PMB_V
, addr
);
354 __raw_writel(data_val
& ~PMB_V
, data
);
358 return (applied
== 0);
361 static inline int pmb_apply_legacy_mappings(void)
370 unsigned long addr
, data
;
376 * Attempt to apply the legacy boot mappings if configured. If
377 * this is successful then we simply carry on with those and
378 * don't bother establishing additional memory mappings. Dynamic
379 * device mappings through pmb_remap() can still be bolted on
382 ret
= pmb_apply_legacy_mappings();
389 * Sync our software copy of the PMB mappings with those in
390 * hardware. The mappings in the hardware PMB were either set up
391 * by the bootloader or very early on by the kernel.
393 for (i
= 0; i
< PMB_ENTRY_MAX
; i
++) {
394 struct pmb_entry
*pmbe
;
395 unsigned long vpn
, ppn
, flags
;
397 addr
= PMB_DATA
+ (i
<< PMB_E_SHIFT
);
398 data
= ctrl_inl(addr
);
403 #if defined(CONFIG_CACHE_WRITETHROUGH)
405 #elif defined(CONFIG_CACHE_WRITEBACK)
408 data
&= ~(PMB_C
| PMB_WT
);
411 ctrl_outl(data
, addr
);
413 ppn
= data
& PMB_PFN_MASK
;
415 flags
= data
& (PMB_C
| PMB_WT
| PMB_UB
);
416 flags
|= data
& PMB_SZ_MASK
;
418 addr
= PMB_ADDR
+ (i
<< PMB_E_SHIFT
);
419 data
= ctrl_inl(addr
);
421 vpn
= data
& PMB_PFN_MASK
;
423 pmbe
= pmb_alloc(vpn
, ppn
, flags
, i
);
424 WARN_ON(IS_ERR(pmbe
));
427 ctrl_outl(0, PMB_IRMCR
);
429 /* Flush out the TLB */
439 bool __in_29bit_mode(void)
441 return (__raw_readl(PMB_PASCR
) & PASCR_SE
) == 0;
444 static int pmb_seq_show(struct seq_file
*file
, void *iter
)
448 seq_printf(file
, "V: Valid, C: Cacheable, WT: Write-Through\n"
449 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
450 seq_printf(file
, "ety vpn ppn size flags\n");
452 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
453 unsigned long addr
, data
;
457 addr
= ctrl_inl(mk_pmb_addr(i
));
458 data
= ctrl_inl(mk_pmb_data(i
));
460 size
= data
& PMB_SZ_MASK
;
461 sz_str
= (size
== PMB_SZ_16M
) ? " 16MB":
462 (size
== PMB_SZ_64M
) ? " 64MB":
463 (size
== PMB_SZ_128M
) ? "128MB":
466 /* 02: V 0x88 0x08 128MB C CB B */
467 seq_printf(file
, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
468 i
, ((addr
& PMB_V
) && (data
& PMB_V
)) ? 'V' : ' ',
469 (addr
>> 24) & 0xff, (data
>> 24) & 0xff,
470 sz_str
, (data
& PMB_C
) ? 'C' : ' ',
471 (data
& PMB_WT
) ? "WT" : "CB",
472 (data
& PMB_UB
) ? "UB" : " B");
478 static int pmb_debugfs_open(struct inode
*inode
, struct file
*file
)
480 return single_open(file
, pmb_seq_show
, NULL
);
483 static const struct file_operations pmb_debugfs_fops
= {
484 .owner
= THIS_MODULE
,
485 .open
= pmb_debugfs_open
,
488 .release
= single_release
,
491 static int __init
pmb_debugfs_init(void)
493 struct dentry
*dentry
;
495 dentry
= debugfs_create_file("pmb", S_IFREG
| S_IRUGO
,
496 sh_debugfs_root
, NULL
, &pmb_debugfs_fops
);
500 return PTR_ERR(dentry
);
504 postcore_initcall(pmb_debugfs_init
);
507 static int pmb_sysdev_suspend(struct sys_device
*dev
, pm_message_t state
)
509 static pm_message_t prev_state
;
512 /* Restore the PMB after a resume from hibernation */
513 if (state
.event
== PM_EVENT_ON
&&
514 prev_state
.event
== PM_EVENT_FREEZE
) {
515 struct pmb_entry
*pmbe
;
516 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
517 if (test_bit(i
, &pmb_map
)) {
518 pmbe
= &pmb_entry_list
[i
];
527 static int pmb_sysdev_resume(struct sys_device
*dev
)
529 return pmb_sysdev_suspend(dev
, PMSG_ON
);
532 static struct sysdev_driver pmb_sysdev_driver
= {
533 .suspend
= pmb_sysdev_suspend
,
534 .resume
= pmb_sysdev_resume
,
537 static int __init
pmb_sysdev_init(void)
539 return sysdev_driver_register(&cpu_sysdev_class
, &pmb_sysdev_driver
);
541 subsys_initcall(pmb_sysdev_init
);