Merge commit 'kumar/next' into merge
[deliverable/linux.git] / arch / sh / mm / pmb.c
1 /*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
6 * Copyright (C) 2005, 2006, 2007 Paul Mundt
7 *
8 * P1/P2 Section mapping definitions from map32.h, which was:
9 *
10 * Copyright 2003 (c) Lineo Solutions,Inc.
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
15 */
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/sysdev.h>
19 #include <linux/cpu.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/bitops.h>
23 #include <linux/debugfs.h>
24 #include <linux/fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/err.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/mmu.h>
31 #include <asm/io.h>
32 #include <asm/mmu_context.h>
33
34 #define NR_PMB_ENTRIES 16
35
36 static void __pmb_unmap(struct pmb_entry *);
37
38 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
39 static unsigned long pmb_map;
40
41 static inline unsigned long mk_pmb_entry(unsigned int entry)
42 {
43 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
44 }
45
46 static inline unsigned long mk_pmb_addr(unsigned int entry)
47 {
48 return mk_pmb_entry(entry) | PMB_ADDR;
49 }
50
51 static inline unsigned long mk_pmb_data(unsigned int entry)
52 {
53 return mk_pmb_entry(entry) | PMB_DATA;
54 }
55
56 static int pmb_alloc_entry(void)
57 {
58 unsigned int pos;
59
60 repeat:
61 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
62
63 if (unlikely(pos > NR_PMB_ENTRIES))
64 return -ENOSPC;
65
66 if (test_and_set_bit(pos, &pmb_map))
67 goto repeat;
68
69 return pos;
70 }
71
72 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
73 unsigned long flags, int entry)
74 {
75 struct pmb_entry *pmbe;
76 int pos;
77
78 if (entry == PMB_NO_ENTRY) {
79 pos = pmb_alloc_entry();
80 if (pos < 0)
81 return ERR_PTR(pos);
82 } else {
83 if (test_bit(entry, &pmb_map))
84 return ERR_PTR(-ENOSPC);
85 pos = entry;
86 }
87
88 pmbe = &pmb_entry_list[pos];
89 if (!pmbe)
90 return ERR_PTR(-ENOMEM);
91
92 pmbe->vpn = vpn;
93 pmbe->ppn = ppn;
94 pmbe->flags = flags;
95 pmbe->entry = pos;
96
97 return pmbe;
98 }
99
100 static void pmb_free(struct pmb_entry *pmbe)
101 {
102 int pos = pmbe->entry;
103
104 pmbe->vpn = 0;
105 pmbe->ppn = 0;
106 pmbe->flags = 0;
107 pmbe->entry = 0;
108
109 clear_bit(pos, &pmb_map);
110 }
111
112 /*
113 * Must be in P2 for __set_pmb_entry()
114 */
115 static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
116 unsigned long flags, int pos)
117 {
118 ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
119
120 #ifdef CONFIG_CACHE_WRITETHROUGH
121 /*
122 * When we are in 32-bit address extended mode, CCR.CB becomes
123 * invalid, so care must be taken to manually adjust cacheable
124 * translations.
125 */
126 if (likely(flags & PMB_C))
127 flags |= PMB_WT;
128 #endif
129
130 ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
131 }
132
133 static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
134 {
135 jump_to_uncached();
136 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
137 back_to_cached();
138 }
139
140 static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
141 {
142 unsigned int entry = pmbe->entry;
143 unsigned long addr;
144
145 if (unlikely(entry >= NR_PMB_ENTRIES))
146 return;
147
148 jump_to_uncached();
149
150 /* Clear V-bit */
151 addr = mk_pmb_addr(entry);
152 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
153
154 addr = mk_pmb_data(entry);
155 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
156
157 back_to_cached();
158 }
159
160
161 static struct {
162 unsigned long size;
163 int flag;
164 } pmb_sizes[] = {
165 { .size = 0x20000000, .flag = PMB_SZ_512M, },
166 { .size = 0x08000000, .flag = PMB_SZ_128M, },
167 { .size = 0x04000000, .flag = PMB_SZ_64M, },
168 { .size = 0x01000000, .flag = PMB_SZ_16M, },
169 };
170
171 long pmb_remap(unsigned long vaddr, unsigned long phys,
172 unsigned long size, unsigned long flags)
173 {
174 struct pmb_entry *pmbp, *pmbe;
175 unsigned long wanted;
176 int pmb_flags, i;
177 long err;
178
179 /* Convert typical pgprot value to the PMB equivalent */
180 if (flags & _PAGE_CACHABLE) {
181 if (flags & _PAGE_WT)
182 pmb_flags = PMB_WT;
183 else
184 pmb_flags = PMB_C;
185 } else
186 pmb_flags = PMB_WT | PMB_UB;
187
188 pmbp = NULL;
189 wanted = size;
190
191 again:
192 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
193 if (size < pmb_sizes[i].size)
194 continue;
195
196 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
197 PMB_NO_ENTRY);
198 if (IS_ERR(pmbe)) {
199 err = PTR_ERR(pmbe);
200 goto out;
201 }
202
203 set_pmb_entry(pmbe);
204
205 phys += pmb_sizes[i].size;
206 vaddr += pmb_sizes[i].size;
207 size -= pmb_sizes[i].size;
208
209 /*
210 * Link adjacent entries that span multiple PMB entries
211 * for easier tear-down.
212 */
213 if (likely(pmbp))
214 pmbp->link = pmbe;
215
216 pmbp = pmbe;
217
218 /*
219 * Instead of trying smaller sizes on every iteration
220 * (even if we succeed in allocating space), try using
221 * pmb_sizes[i].size again.
222 */
223 i--;
224 }
225
226 if (size >= 0x1000000)
227 goto again;
228
229 return wanted - size;
230
231 out:
232 if (pmbp)
233 __pmb_unmap(pmbp);
234
235 return err;
236 }
237
238 void pmb_unmap(unsigned long addr)
239 {
240 struct pmb_entry *pmbe = NULL;
241 int i;
242
243 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
244 if (test_bit(i, &pmb_map)) {
245 pmbe = &pmb_entry_list[i];
246 if (pmbe->vpn == addr)
247 break;
248 }
249 }
250
251 if (unlikely(!pmbe))
252 return;
253
254 __pmb_unmap(pmbe);
255 }
256
257 static void __pmb_unmap(struct pmb_entry *pmbe)
258 {
259 BUG_ON(!test_bit(pmbe->entry, &pmb_map));
260
261 do {
262 struct pmb_entry *pmblink = pmbe;
263
264 /*
265 * We may be called before this pmb_entry has been
266 * entered into the PMB table via set_pmb_entry(), but
267 * that's OK because we've allocated a unique slot for
268 * this entry in pmb_alloc() (even if we haven't filled
269 * it yet).
270 *
271 * Therefore, calling clear_pmb_entry() is safe as no
272 * other mapping can be using that slot.
273 */
274 clear_pmb_entry(pmbe);
275
276 pmbe = pmblink->link;
277
278 pmb_free(pmblink);
279 } while (pmbe);
280 }
281
282 #ifdef CONFIG_PMB
283 int __uses_jump_to_uncached pmb_init(void)
284 {
285 unsigned int i;
286 long size, ret;
287
288 jump_to_uncached();
289
290 /*
291 * Insert PMB entries for the P1 and P2 areas so that, after
292 * we've switched the MMU to 32-bit mode, the semantics of P1
293 * and P2 are the same as in 29-bit mode, e.g.
294 *
295 * P1 - provides a cached window onto physical memory
296 * P2 - provides an uncached window onto physical memory
297 */
298 size = __MEMORY_START + __MEMORY_SIZE;
299
300 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
301 BUG_ON(ret != size);
302
303 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
304 BUG_ON(ret != size);
305
306 ctrl_outl(0, PMB_IRMCR);
307
308 /* PMB.SE and UB[7] */
309 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
310
311 /* Flush out the TLB */
312 i = ctrl_inl(MMUCR);
313 i |= MMUCR_TI;
314 ctrl_outl(i, MMUCR);
315
316 back_to_cached();
317
318 return 0;
319 }
320 #else
321 int __uses_jump_to_uncached pmb_init(void)
322 {
323 int i;
324 unsigned long addr, data;
325
326 jump_to_uncached();
327
328 for (i = 0; i < PMB_ENTRY_MAX; i++) {
329 struct pmb_entry *pmbe;
330 unsigned long vpn, ppn, flags;
331
332 addr = PMB_DATA + (i << PMB_E_SHIFT);
333 data = ctrl_inl(addr);
334 if (!(data & PMB_V))
335 continue;
336
337 if (data & PMB_C) {
338 #if defined(CONFIG_CACHE_WRITETHROUGH)
339 data |= PMB_WT;
340 #elif defined(CONFIG_CACHE_WRITEBACK)
341 data &= ~PMB_WT;
342 #else
343 data &= ~(PMB_C | PMB_WT);
344 #endif
345 }
346 ctrl_outl(data, addr);
347
348 ppn = data & PMB_PFN_MASK;
349
350 flags = data & (PMB_C | PMB_WT | PMB_UB);
351 flags |= data & PMB_SZ_MASK;
352
353 addr = PMB_ADDR + (i << PMB_E_SHIFT);
354 data = ctrl_inl(addr);
355
356 vpn = data & PMB_PFN_MASK;
357
358 pmbe = pmb_alloc(vpn, ppn, flags, i);
359 WARN_ON(IS_ERR(pmbe));
360 }
361
362 back_to_cached();
363
364 return 0;
365 }
366 #endif /* CONFIG_PMB */
367
368 static int pmb_seq_show(struct seq_file *file, void *iter)
369 {
370 int i;
371
372 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
373 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
374 seq_printf(file, "ety vpn ppn size flags\n");
375
376 for (i = 0; i < NR_PMB_ENTRIES; i++) {
377 unsigned long addr, data;
378 unsigned int size;
379 char *sz_str = NULL;
380
381 addr = ctrl_inl(mk_pmb_addr(i));
382 data = ctrl_inl(mk_pmb_data(i));
383
384 size = data & PMB_SZ_MASK;
385 sz_str = (size == PMB_SZ_16M) ? " 16MB":
386 (size == PMB_SZ_64M) ? " 64MB":
387 (size == PMB_SZ_128M) ? "128MB":
388 "512MB";
389
390 /* 02: V 0x88 0x08 128MB C CB B */
391 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
392 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
393 (addr >> 24) & 0xff, (data >> 24) & 0xff,
394 sz_str, (data & PMB_C) ? 'C' : ' ',
395 (data & PMB_WT) ? "WT" : "CB",
396 (data & PMB_UB) ? "UB" : " B");
397 }
398
399 return 0;
400 }
401
402 static int pmb_debugfs_open(struct inode *inode, struct file *file)
403 {
404 return single_open(file, pmb_seq_show, NULL);
405 }
406
407 static const struct file_operations pmb_debugfs_fops = {
408 .owner = THIS_MODULE,
409 .open = pmb_debugfs_open,
410 .read = seq_read,
411 .llseek = seq_lseek,
412 .release = single_release,
413 };
414
415 static int __init pmb_debugfs_init(void)
416 {
417 struct dentry *dentry;
418
419 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
420 sh_debugfs_root, NULL, &pmb_debugfs_fops);
421 if (!dentry)
422 return -ENOMEM;
423 if (IS_ERR(dentry))
424 return PTR_ERR(dentry);
425
426 return 0;
427 }
428 postcore_initcall(pmb_debugfs_init);
429
430 #ifdef CONFIG_PM
431 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
432 {
433 static pm_message_t prev_state;
434 int i;
435
436 /* Restore the PMB after a resume from hibernation */
437 if (state.event == PM_EVENT_ON &&
438 prev_state.event == PM_EVENT_FREEZE) {
439 struct pmb_entry *pmbe;
440 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
441 if (test_bit(i, &pmb_map)) {
442 pmbe = &pmb_entry_list[i];
443 set_pmb_entry(pmbe);
444 }
445 }
446 }
447 prev_state = state;
448 return 0;
449 }
450
451 static int pmb_sysdev_resume(struct sys_device *dev)
452 {
453 return pmb_sysdev_suspend(dev, PMSG_ON);
454 }
455
456 static struct sysdev_driver pmb_sysdev_driver = {
457 .suspend = pmb_sysdev_suspend,
458 .resume = pmb_sysdev_resume,
459 };
460
461 static int __init pmb_sysdev_init(void)
462 {
463 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
464 }
465
466 subsys_initcall(pmb_sysdev_init);
467 #endif
This page took 0.042653 seconds and 6 git commands to generate.