4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/rwlock.h>
27 #include <asm/sizes.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
33 #include <asm/mmu_context.h>
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry
*link
;
55 static void pmb_unmap_entry(struct pmb_entry
*, int depth
);
57 static DEFINE_RWLOCK(pmb_rwlock
);
58 static struct pmb_entry pmb_entry_list
[NR_PMB_ENTRIES
];
59 static DECLARE_BITMAP(pmb_map
, NR_PMB_ENTRIES
);
61 static __always_inline
unsigned long mk_pmb_entry(unsigned int entry
)
63 return (entry
& PMB_E_MASK
) << PMB_E_SHIFT
;
66 static __always_inline
unsigned long mk_pmb_addr(unsigned int entry
)
68 return mk_pmb_entry(entry
) | PMB_ADDR
;
71 static __always_inline
unsigned long mk_pmb_data(unsigned int entry
)
73 return mk_pmb_entry(entry
) | PMB_DATA
;
76 static int pmb_alloc_entry(void)
80 pos
= find_first_zero_bit(pmb_map
, NR_PMB_ENTRIES
);
81 if (pos
>= 0 && pos
< NR_PMB_ENTRIES
)
82 __set_bit(pos
, pmb_map
);
89 static struct pmb_entry
*pmb_alloc(unsigned long vpn
, unsigned long ppn
,
90 unsigned long flags
, int entry
)
92 struct pmb_entry
*pmbe
;
93 unsigned long irqflags
;
97 write_lock_irqsave(&pmb_rwlock
, irqflags
);
99 if (entry
== PMB_NO_ENTRY
) {
100 pos
= pmb_alloc_entry();
101 if (unlikely(pos
< 0)) {
106 if (__test_and_set_bit(entry
, pmb_map
)) {
107 ret
= ERR_PTR(-ENOSPC
);
114 write_unlock_irqrestore(&pmb_rwlock
, irqflags
);
116 pmbe
= &pmb_entry_list
[pos
];
118 memset(pmbe
, 0, sizeof(struct pmb_entry
));
120 spin_lock_init(&pmbe
->lock
);
130 write_unlock_irqrestore(&pmb_rwlock
, irqflags
);
134 static void pmb_free(struct pmb_entry
*pmbe
)
136 __clear_bit(pmbe
->entry
, pmb_map
);
138 pmbe
->entry
= PMB_NO_ENTRY
;
143 * Ensure that the PMB entries match our cache configuration.
145 * When we are in 32-bit address extended mode, CCR.CB becomes
146 * invalid, so care must be taken to manually adjust cacheable
149 static __always_inline
unsigned long pmb_cache_flags(void)
151 unsigned long flags
= 0;
153 #if defined(CONFIG_CACHE_WRITETHROUGH)
154 flags
|= PMB_C
| PMB_WT
| PMB_UB
;
155 #elif defined(CONFIG_CACHE_WRITEBACK)
163 * Must be run uncached.
165 static void __set_pmb_entry(struct pmb_entry
*pmbe
)
167 writel_uncached(pmbe
->vpn
| PMB_V
, mk_pmb_addr(pmbe
->entry
));
168 writel_uncached(pmbe
->ppn
| pmbe
->flags
| PMB_V
,
169 mk_pmb_data(pmbe
->entry
));
172 static void __clear_pmb_entry(struct pmb_entry
*pmbe
)
174 unsigned long addr
, data
;
175 unsigned long addr_val
, data_val
;
177 addr
= mk_pmb_addr(pmbe
->entry
);
178 data
= mk_pmb_data(pmbe
->entry
);
180 addr_val
= __raw_readl(addr
);
181 data_val
= __raw_readl(data
);
184 writel_uncached(addr_val
& ~PMB_V
, addr
);
185 writel_uncached(data_val
& ~PMB_V
, data
);
188 static void set_pmb_entry(struct pmb_entry
*pmbe
)
192 spin_lock_irqsave(&pmbe
->lock
, flags
);
193 __set_pmb_entry(pmbe
);
194 spin_unlock_irqrestore(&pmbe
->lock
, flags
);
201 { .size
= SZ_512M
, .flag
= PMB_SZ_512M
, },
202 { .size
= SZ_128M
, .flag
= PMB_SZ_128M
, },
203 { .size
= SZ_64M
, .flag
= PMB_SZ_64M
, },
204 { .size
= SZ_16M
, .flag
= PMB_SZ_16M
, },
207 long pmb_remap(unsigned long vaddr
, unsigned long phys
,
208 unsigned long size
, pgprot_t prot
)
210 struct pmb_entry
*pmbp
, *pmbe
;
211 unsigned long wanted
;
216 flags
= pgprot_val(prot
);
218 pmb_flags
= PMB_WT
| PMB_UB
;
220 /* Convert typical pgprot value to the PMB equivalent */
221 if (flags
& _PAGE_CACHABLE
) {
224 if ((flags
& _PAGE_WT
) == 0)
225 pmb_flags
&= ~(PMB_WT
| PMB_UB
);
232 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++) {
235 if (size
< pmb_sizes
[i
].size
)
238 pmbe
= pmb_alloc(vaddr
, phys
, pmb_flags
| pmb_sizes
[i
].flag
,
245 spin_lock_irqsave(&pmbe
->lock
, flags
);
247 __set_pmb_entry(pmbe
);
249 phys
+= pmb_sizes
[i
].size
;
250 vaddr
+= pmb_sizes
[i
].size
;
251 size
-= pmb_sizes
[i
].size
;
253 pmbe
->size
= pmb_sizes
[i
].size
;
256 * Link adjacent entries that span multiple PMB entries
257 * for easier tear-down.
260 spin_lock(&pmbp
->lock
);
262 spin_unlock(&pmbp
->lock
);
268 * Instead of trying smaller sizes on every iteration
269 * (even if we succeed in allocating space), try using
270 * pmb_sizes[i].size again.
274 spin_unlock_irqrestore(&pmbe
->lock
, flags
);
280 return wanted
- size
;
283 pmb_unmap_entry(pmbp
, NR_PMB_ENTRIES
);
288 void pmb_unmap(unsigned long addr
)
290 struct pmb_entry
*pmbe
= NULL
;
293 read_lock(&pmb_rwlock
);
295 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
296 if (test_bit(i
, pmb_map
)) {
297 pmbe
= &pmb_entry_list
[i
];
298 if (pmbe
->vpn
== addr
)
303 read_unlock(&pmb_rwlock
);
305 pmb_unmap_entry(pmbe
, NR_PMB_ENTRIES
);
308 static bool pmb_can_merge(struct pmb_entry
*a
, struct pmb_entry
*b
)
310 return (b
->vpn
== (a
->vpn
+ a
->size
)) &&
311 (b
->ppn
== (a
->ppn
+ a
->size
)) &&
312 (b
->flags
== a
->flags
);
315 static bool pmb_size_valid(unsigned long size
)
319 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
320 if (pmb_sizes
[i
].size
== size
)
326 static int pmb_size_to_flags(unsigned long size
)
330 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
331 if (pmb_sizes
[i
].size
== size
)
332 return pmb_sizes
[i
].flag
;
337 static void __pmb_unmap_entry(struct pmb_entry
*pmbe
, int depth
)
340 struct pmb_entry
*pmblink
= pmbe
;
343 * We may be called before this pmb_entry has been
344 * entered into the PMB table via set_pmb_entry(), but
345 * that's OK because we've allocated a unique slot for
346 * this entry in pmb_alloc() (even if we haven't filled
349 * Therefore, calling __clear_pmb_entry() is safe as no
350 * other mapping can be using that slot.
352 __clear_pmb_entry(pmbe
);
354 pmbe
= pmblink
->link
;
357 } while (pmbe
&& --depth
);
360 static void pmb_unmap_entry(struct pmb_entry
*pmbe
, int depth
)
367 write_lock_irqsave(&pmb_rwlock
, flags
);
368 __pmb_unmap_entry(pmbe
, depth
);
369 write_unlock_irqrestore(&pmb_rwlock
, flags
);
372 static __always_inline
unsigned int pmb_ppn_in_range(unsigned long ppn
)
374 return ppn
>= __pa(memory_start
) && ppn
< __pa(memory_end
);
377 static void __init
pmb_notify(void)
381 pr_info("PMB: boot mappings:\n");
383 read_lock(&pmb_rwlock
);
385 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
386 struct pmb_entry
*pmbe
;
388 if (!test_bit(i
, pmb_map
))
391 pmbe
= &pmb_entry_list
[i
];
393 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
394 pmbe
->vpn
>> PAGE_SHIFT
, pmbe
->ppn
>> PAGE_SHIFT
,
395 pmbe
->size
>> 20, (pmbe
->flags
& PMB_C
) ? "" : "un");
398 read_unlock(&pmb_rwlock
);
402 * Sync our software copy of the PMB mappings with those in hardware. The
403 * mappings in the hardware PMB were either set up by the bootloader or
404 * very early on by the kernel.
406 static void __init
pmb_synchronize(void)
408 struct pmb_entry
*pmbp
= NULL
;
412 * Run through the initial boot mappings, log the established
413 * ones, and blow away anything that falls outside of the valid
414 * PPN range. Specifically, we only care about existing mappings
415 * that impact the cached/uncached sections.
417 * Note that touching these can be a bit of a minefield; the boot
418 * loader can establish multi-page mappings with the same caching
419 * attributes, so we need to ensure that we aren't modifying a
420 * mapping that we're presently executing from, or may execute
421 * from in the case of straddling page boundaries.
423 * In the future we will have to tidy up after the boot loader by
424 * jumping between the cached and uncached mappings and tearing
425 * down alternating mappings while executing from the other.
427 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
428 unsigned long addr
, data
;
429 unsigned long addr_val
, data_val
;
430 unsigned long ppn
, vpn
, flags
;
431 unsigned long irqflags
;
433 struct pmb_entry
*pmbe
;
435 addr
= mk_pmb_addr(i
);
436 data
= mk_pmb_data(i
);
438 addr_val
= __raw_readl(addr
);
439 data_val
= __raw_readl(data
);
442 * Skip over any bogus entries
444 if (!(data_val
& PMB_V
) || !(addr_val
& PMB_V
))
447 ppn
= data_val
& PMB_PFN_MASK
;
448 vpn
= addr_val
& PMB_PFN_MASK
;
451 * Only preserve in-range mappings.
453 if (!pmb_ppn_in_range(ppn
)) {
455 * Invalidate anything out of bounds.
457 writel_uncached(addr_val
& ~PMB_V
, addr
);
458 writel_uncached(data_val
& ~PMB_V
, data
);
463 * Update the caching attributes if necessary
465 if (data_val
& PMB_C
) {
466 data_val
&= ~PMB_CACHE_MASK
;
467 data_val
|= pmb_cache_flags();
469 writel_uncached(data_val
, data
);
472 size
= data_val
& PMB_SZ_MASK
;
473 flags
= size
| (data_val
& PMB_CACHE_MASK
);
475 pmbe
= pmb_alloc(vpn
, ppn
, flags
, i
);
481 spin_lock_irqsave(&pmbe
->lock
, irqflags
);
483 for (j
= 0; j
< ARRAY_SIZE(pmb_sizes
); j
++)
484 if (pmb_sizes
[j
].flag
== size
)
485 pmbe
->size
= pmb_sizes
[j
].size
;
488 spin_lock(&pmbp
->lock
);
491 * Compare the previous entry against the current one to
492 * see if the entries span a contiguous mapping. If so,
493 * setup the entry links accordingly. Compound mappings
494 * are later coalesced.
496 if (pmb_can_merge(pmbp
, pmbe
))
499 spin_unlock(&pmbp
->lock
);
504 spin_unlock_irqrestore(&pmbe
->lock
, irqflags
);
508 static void __init
pmb_merge(struct pmb_entry
*head
)
510 unsigned long span
, newsize
;
511 struct pmb_entry
*tail
;
512 int i
= 1, depth
= 0;
514 span
= newsize
= head
->size
;
520 if (pmb_size_valid(span
)) {
525 /* This is the end of the line.. */
534 * The merged page size must be valid.
536 if (!pmb_size_valid(newsize
))
539 head
->flags
&= ~PMB_SZ_MASK
;
540 head
->flags
|= pmb_size_to_flags(newsize
);
542 head
->size
= newsize
;
544 __pmb_unmap_entry(head
->link
, depth
);
545 __set_pmb_entry(head
);
548 static void __init
pmb_coalesce(void)
553 write_lock_irqsave(&pmb_rwlock
, flags
);
555 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
556 struct pmb_entry
*pmbe
;
558 if (!test_bit(i
, pmb_map
))
561 pmbe
= &pmb_entry_list
[i
];
564 * We're only interested in compound mappings
570 * Nothing to do if it already uses the largest possible
573 if (pmbe
->size
== SZ_512M
)
579 write_unlock_irqrestore(&pmb_rwlock
, flags
);
582 #ifdef CONFIG_UNCACHED_MAPPING
583 static void __init
pmb_resize(void)
588 * If the uncached mapping was constructed by the kernel, it will
589 * already be a reasonable size.
591 if (uncached_size
== SZ_16M
)
594 read_lock(&pmb_rwlock
);
596 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
597 struct pmb_entry
*pmbe
;
600 if (!test_bit(i
, pmb_map
))
603 pmbe
= &pmb_entry_list
[i
];
605 if (pmbe
->vpn
!= uncached_start
)
609 * Found it, now resize it.
611 spin_lock_irqsave(&pmbe
->lock
, flags
);
614 pmbe
->flags
&= ~PMB_SZ_MASK
;
615 pmbe
->flags
|= pmb_size_to_flags(pmbe
->size
);
617 uncached_resize(pmbe
->size
);
619 __set_pmb_entry(pmbe
);
621 spin_unlock_irqrestore(&pmbe
->lock
, flags
);
624 read_lock(&pmb_rwlock
);
628 void __init
pmb_init(void)
630 /* Synchronize software state */
633 /* Attempt to combine compound mappings */
636 #ifdef CONFIG_UNCACHED_MAPPING
637 /* Resize initial mappings, if necessary */
644 writel_uncached(0, PMB_IRMCR
);
646 /* Flush out the TLB */
647 __raw_writel(__raw_readl(MMUCR
) | MMUCR_TI
, MMUCR
);
651 bool __in_29bit_mode(void)
653 return (__raw_readl(PMB_PASCR
) & PASCR_SE
) == 0;
656 static int pmb_seq_show(struct seq_file
*file
, void *iter
)
660 seq_printf(file
, "V: Valid, C: Cacheable, WT: Write-Through\n"
661 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
662 seq_printf(file
, "ety vpn ppn size flags\n");
664 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
665 unsigned long addr
, data
;
669 addr
= __raw_readl(mk_pmb_addr(i
));
670 data
= __raw_readl(mk_pmb_data(i
));
672 size
= data
& PMB_SZ_MASK
;
673 sz_str
= (size
== PMB_SZ_16M
) ? " 16MB":
674 (size
== PMB_SZ_64M
) ? " 64MB":
675 (size
== PMB_SZ_128M
) ? "128MB":
678 /* 02: V 0x88 0x08 128MB C CB B */
679 seq_printf(file
, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
680 i
, ((addr
& PMB_V
) && (data
& PMB_V
)) ? 'V' : ' ',
681 (addr
>> 24) & 0xff, (data
>> 24) & 0xff,
682 sz_str
, (data
& PMB_C
) ? 'C' : ' ',
683 (data
& PMB_WT
) ? "WT" : "CB",
684 (data
& PMB_UB
) ? "UB" : " B");
690 static int pmb_debugfs_open(struct inode
*inode
, struct file
*file
)
692 return single_open(file
, pmb_seq_show
, NULL
);
695 static const struct file_operations pmb_debugfs_fops
= {
696 .owner
= THIS_MODULE
,
697 .open
= pmb_debugfs_open
,
700 .release
= single_release
,
703 static int __init
pmb_debugfs_init(void)
705 struct dentry
*dentry
;
707 dentry
= debugfs_create_file("pmb", S_IFREG
| S_IRUGO
,
708 sh_debugfs_root
, NULL
, &pmb_debugfs_fops
);
712 return PTR_ERR(dentry
);
716 postcore_initcall(pmb_debugfs_init
);
719 static int pmb_sysdev_suspend(struct sys_device
*dev
, pm_message_t state
)
721 static pm_message_t prev_state
;
724 /* Restore the PMB after a resume from hibernation */
725 if (state
.event
== PM_EVENT_ON
&&
726 prev_state
.event
== PM_EVENT_FREEZE
) {
727 struct pmb_entry
*pmbe
;
729 read_lock(&pmb_rwlock
);
731 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
732 if (test_bit(i
, pmb_map
)) {
733 pmbe
= &pmb_entry_list
[i
];
738 read_unlock(&pmb_rwlock
);
746 static int pmb_sysdev_resume(struct sys_device
*dev
)
748 return pmb_sysdev_suspend(dev
, PMSG_ON
);
751 static struct sysdev_driver pmb_sysdev_driver
= {
752 .suspend
= pmb_sysdev_suspend
,
753 .resume
= pmb_sysdev_resume
,
756 static int __init
pmb_sysdev_init(void)
758 return sysdev_driver_register(&cpu_sysdev_class
, &pmb_sysdev_driver
);
760 subsys_initcall(pmb_sysdev_init
);