Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[deliverable/linux.git] / arch / mips / mm / c-r4k.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/bitops.h>
18
19 #include <asm/bcache.h>
20 #include <asm/bootinfo.h>
21 #include <asm/cache.h>
22 #include <asm/cacheops.h>
23 #include <asm/cpu.h>
24 #include <asm/cpu-features.h>
25 #include <asm/io.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/r4kcache.h>
29 #include <asm/sections.h>
30 #include <asm/system.h>
31 #include <asm/mmu_context.h>
32 #include <asm/war.h>
33 #include <asm/cacheflush.h> /* for run_uncached() */
34
35
36 /*
37 * Special Variant of smp_call_function for use by cache functions:
38 *
39 * o No return value
40 * o collapses to normal function call on UP kernels
41 * o collapses to normal function call on systems with a single shared
42 * primary cache.
43 */
44 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
45 int retry, int wait)
46 {
47 preempt_disable();
48
49 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
50 smp_call_function(func, info, retry, wait);
51 #endif
52 func(info);
53 preempt_enable();
54 }
55
56 /*
57 * Must die.
58 */
59 static unsigned long icache_size __read_mostly;
60 static unsigned long dcache_size __read_mostly;
61 static unsigned long scache_size __read_mostly;
62
63 /*
64 * Dummy cache handling routines for machines without boardcaches
65 */
66 static void cache_noop(void) {}
67
68 static struct bcache_ops no_sc_ops = {
69 .bc_enable = (void *)cache_noop,
70 .bc_disable = (void *)cache_noop,
71 .bc_wback_inv = (void *)cache_noop,
72 .bc_inv = (void *)cache_noop
73 };
74
75 struct bcache_ops *bcops = &no_sc_ops;
76
77 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
78 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
79
80 #define R4600_HIT_CACHEOP_WAR_IMPL \
81 do { \
82 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
83 *(volatile unsigned long *)CKSEG1; \
84 if (R4600_V1_HIT_CACHEOP_WAR) \
85 __asm__ __volatile__("nop;nop;nop;nop"); \
86 } while (0)
87
88 static void (*r4k_blast_dcache_page)(unsigned long addr);
89
90 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
91 {
92 R4600_HIT_CACHEOP_WAR_IMPL;
93 blast_dcache32_page(addr);
94 }
95
96 static void __cpuinit r4k_blast_dcache_page_setup(void)
97 {
98 unsigned long dc_lsize = cpu_dcache_line_size();
99
100 if (dc_lsize == 0)
101 r4k_blast_dcache_page = (void *)cache_noop;
102 else if (dc_lsize == 16)
103 r4k_blast_dcache_page = blast_dcache16_page;
104 else if (dc_lsize == 32)
105 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
106 }
107
108 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
109
110 static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
111 {
112 unsigned long dc_lsize = cpu_dcache_line_size();
113
114 if (dc_lsize == 0)
115 r4k_blast_dcache_page_indexed = (void *)cache_noop;
116 else if (dc_lsize == 16)
117 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
118 else if (dc_lsize == 32)
119 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
120 }
121
122 static void (* r4k_blast_dcache)(void);
123
124 static void __cpuinit r4k_blast_dcache_setup(void)
125 {
126 unsigned long dc_lsize = cpu_dcache_line_size();
127
128 if (dc_lsize == 0)
129 r4k_blast_dcache = (void *)cache_noop;
130 else if (dc_lsize == 16)
131 r4k_blast_dcache = blast_dcache16;
132 else if (dc_lsize == 32)
133 r4k_blast_dcache = blast_dcache32;
134 }
135
136 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
137 #define JUMP_TO_ALIGN(order) \
138 __asm__ __volatile__( \
139 "b\t1f\n\t" \
140 ".align\t" #order "\n\t" \
141 "1:\n\t" \
142 )
143 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
144 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
145
146 static inline void blast_r4600_v1_icache32(void)
147 {
148 unsigned long flags;
149
150 local_irq_save(flags);
151 blast_icache32();
152 local_irq_restore(flags);
153 }
154
155 static inline void tx49_blast_icache32(void)
156 {
157 unsigned long start = INDEX_BASE;
158 unsigned long end = start + current_cpu_data.icache.waysize;
159 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
160 unsigned long ws_end = current_cpu_data.icache.ways <<
161 current_cpu_data.icache.waybit;
162 unsigned long ws, addr;
163
164 CACHE32_UNROLL32_ALIGN2;
165 /* I'm in even chunk. blast odd chunks */
166 for (ws = 0; ws < ws_end; ws += ws_inc)
167 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
168 cache32_unroll32(addr|ws, Index_Invalidate_I);
169 CACHE32_UNROLL32_ALIGN;
170 /* I'm in odd chunk. blast even chunks */
171 for (ws = 0; ws < ws_end; ws += ws_inc)
172 for (addr = start; addr < end; addr += 0x400 * 2)
173 cache32_unroll32(addr|ws, Index_Invalidate_I);
174 }
175
176 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
177 {
178 unsigned long flags;
179
180 local_irq_save(flags);
181 blast_icache32_page_indexed(page);
182 local_irq_restore(flags);
183 }
184
185 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
186 {
187 unsigned long indexmask = current_cpu_data.icache.waysize - 1;
188 unsigned long start = INDEX_BASE + (page & indexmask);
189 unsigned long end = start + PAGE_SIZE;
190 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
191 unsigned long ws_end = current_cpu_data.icache.ways <<
192 current_cpu_data.icache.waybit;
193 unsigned long ws, addr;
194
195 CACHE32_UNROLL32_ALIGN2;
196 /* I'm in even chunk. blast odd chunks */
197 for (ws = 0; ws < ws_end; ws += ws_inc)
198 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
199 cache32_unroll32(addr|ws, Index_Invalidate_I);
200 CACHE32_UNROLL32_ALIGN;
201 /* I'm in odd chunk. blast even chunks */
202 for (ws = 0; ws < ws_end; ws += ws_inc)
203 for (addr = start; addr < end; addr += 0x400 * 2)
204 cache32_unroll32(addr|ws, Index_Invalidate_I);
205 }
206
207 static void (* r4k_blast_icache_page)(unsigned long addr);
208
209 static void __cpuinit r4k_blast_icache_page_setup(void)
210 {
211 unsigned long ic_lsize = cpu_icache_line_size();
212
213 if (ic_lsize == 0)
214 r4k_blast_icache_page = (void *)cache_noop;
215 else if (ic_lsize == 16)
216 r4k_blast_icache_page = blast_icache16_page;
217 else if (ic_lsize == 32)
218 r4k_blast_icache_page = blast_icache32_page;
219 else if (ic_lsize == 64)
220 r4k_blast_icache_page = blast_icache64_page;
221 }
222
223
224 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
225
226 static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
227 {
228 unsigned long ic_lsize = cpu_icache_line_size();
229
230 if (ic_lsize == 0)
231 r4k_blast_icache_page_indexed = (void *)cache_noop;
232 else if (ic_lsize == 16)
233 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
234 else if (ic_lsize == 32) {
235 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
236 r4k_blast_icache_page_indexed =
237 blast_icache32_r4600_v1_page_indexed;
238 else if (TX49XX_ICACHE_INDEX_INV_WAR)
239 r4k_blast_icache_page_indexed =
240 tx49_blast_icache32_page_indexed;
241 else
242 r4k_blast_icache_page_indexed =
243 blast_icache32_page_indexed;
244 } else if (ic_lsize == 64)
245 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
246 }
247
248 static void (* r4k_blast_icache)(void);
249
250 static void __cpuinit r4k_blast_icache_setup(void)
251 {
252 unsigned long ic_lsize = cpu_icache_line_size();
253
254 if (ic_lsize == 0)
255 r4k_blast_icache = (void *)cache_noop;
256 else if (ic_lsize == 16)
257 r4k_blast_icache = blast_icache16;
258 else if (ic_lsize == 32) {
259 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
260 r4k_blast_icache = blast_r4600_v1_icache32;
261 else if (TX49XX_ICACHE_INDEX_INV_WAR)
262 r4k_blast_icache = tx49_blast_icache32;
263 else
264 r4k_blast_icache = blast_icache32;
265 } else if (ic_lsize == 64)
266 r4k_blast_icache = blast_icache64;
267 }
268
269 static void (* r4k_blast_scache_page)(unsigned long addr);
270
271 static void __cpuinit r4k_blast_scache_page_setup(void)
272 {
273 unsigned long sc_lsize = cpu_scache_line_size();
274
275 if (scache_size == 0)
276 r4k_blast_scache_page = (void *)cache_noop;
277 else if (sc_lsize == 16)
278 r4k_blast_scache_page = blast_scache16_page;
279 else if (sc_lsize == 32)
280 r4k_blast_scache_page = blast_scache32_page;
281 else if (sc_lsize == 64)
282 r4k_blast_scache_page = blast_scache64_page;
283 else if (sc_lsize == 128)
284 r4k_blast_scache_page = blast_scache128_page;
285 }
286
287 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
288
289 static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
290 {
291 unsigned long sc_lsize = cpu_scache_line_size();
292
293 if (scache_size == 0)
294 r4k_blast_scache_page_indexed = (void *)cache_noop;
295 else if (sc_lsize == 16)
296 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
297 else if (sc_lsize == 32)
298 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
299 else if (sc_lsize == 64)
300 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
301 else if (sc_lsize == 128)
302 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
303 }
304
305 static void (* r4k_blast_scache)(void);
306
307 static void __cpuinit r4k_blast_scache_setup(void)
308 {
309 unsigned long sc_lsize = cpu_scache_line_size();
310
311 if (scache_size == 0)
312 r4k_blast_scache = (void *)cache_noop;
313 else if (sc_lsize == 16)
314 r4k_blast_scache = blast_scache16;
315 else if (sc_lsize == 32)
316 r4k_blast_scache = blast_scache32;
317 else if (sc_lsize == 64)
318 r4k_blast_scache = blast_scache64;
319 else if (sc_lsize == 128)
320 r4k_blast_scache = blast_scache128;
321 }
322
323 static inline void local_r4k___flush_cache_all(void * args)
324 {
325 #if defined(CONFIG_CPU_LOONGSON2)
326 r4k_blast_scache();
327 return;
328 #endif
329 r4k_blast_dcache();
330 r4k_blast_icache();
331
332 switch (current_cpu_type()) {
333 case CPU_R4000SC:
334 case CPU_R4000MC:
335 case CPU_R4400SC:
336 case CPU_R4400MC:
337 case CPU_R10000:
338 case CPU_R12000:
339 case CPU_R14000:
340 r4k_blast_scache();
341 }
342 }
343
344 static void r4k___flush_cache_all(void)
345 {
346 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
347 }
348
349 static inline int has_valid_asid(const struct mm_struct *mm)
350 {
351 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
352 int i;
353
354 for_each_online_cpu(i)
355 if (cpu_context(i, mm))
356 return 1;
357
358 return 0;
359 #else
360 return cpu_context(smp_processor_id(), mm);
361 #endif
362 }
363
364 static inline void local_r4k_flush_cache_range(void * args)
365 {
366 struct vm_area_struct *vma = args;
367 int exec = vma->vm_flags & VM_EXEC;
368
369 if (!(has_valid_asid(vma->vm_mm)))
370 return;
371
372 r4k_blast_dcache();
373 if (exec)
374 r4k_blast_icache();
375 }
376
377 static void r4k_flush_cache_range(struct vm_area_struct *vma,
378 unsigned long start, unsigned long end)
379 {
380 int exec = vma->vm_flags & VM_EXEC;
381
382 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
383 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
384 }
385
386 static inline void local_r4k_flush_cache_mm(void * args)
387 {
388 struct mm_struct *mm = args;
389
390 if (!has_valid_asid(mm))
391 return;
392
393 /*
394 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
395 * only flush the primary caches but R10000 and R12000 behave sane ...
396 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
397 * caches, so we can bail out early.
398 */
399 if (current_cpu_type() == CPU_R4000SC ||
400 current_cpu_type() == CPU_R4000MC ||
401 current_cpu_type() == CPU_R4400SC ||
402 current_cpu_type() == CPU_R4400MC) {
403 r4k_blast_scache();
404 return;
405 }
406
407 r4k_blast_dcache();
408 }
409
410 static void r4k_flush_cache_mm(struct mm_struct *mm)
411 {
412 if (!cpu_has_dc_aliases)
413 return;
414
415 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
416 }
417
418 struct flush_cache_page_args {
419 struct vm_area_struct *vma;
420 unsigned long addr;
421 unsigned long pfn;
422 };
423
424 static inline void local_r4k_flush_cache_page(void *args)
425 {
426 struct flush_cache_page_args *fcp_args = args;
427 struct vm_area_struct *vma = fcp_args->vma;
428 unsigned long addr = fcp_args->addr;
429 struct page *page = pfn_to_page(fcp_args->pfn);
430 int exec = vma->vm_flags & VM_EXEC;
431 struct mm_struct *mm = vma->vm_mm;
432 pgd_t *pgdp;
433 pud_t *pudp;
434 pmd_t *pmdp;
435 pte_t *ptep;
436 void *vaddr;
437
438 /*
439 * If ownes no valid ASID yet, cannot possibly have gotten
440 * this page into the cache.
441 */
442 if (!has_valid_asid(mm))
443 return;
444
445 addr &= PAGE_MASK;
446 pgdp = pgd_offset(mm, addr);
447 pudp = pud_offset(pgdp, addr);
448 pmdp = pmd_offset(pudp, addr);
449 ptep = pte_offset(pmdp, addr);
450
451 /*
452 * If the page isn't marked valid, the page cannot possibly be
453 * in the cache.
454 */
455 if (!(pte_present(*ptep)))
456 return;
457
458 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
459 vaddr = NULL;
460 else {
461 /*
462 * Use kmap_coherent or kmap_atomic to do flushes for
463 * another ASID than the current one.
464 */
465 if (cpu_has_dc_aliases)
466 vaddr = kmap_coherent(page, addr);
467 else
468 vaddr = kmap_atomic(page, KM_USER0);
469 addr = (unsigned long)vaddr;
470 }
471
472 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
473 r4k_blast_dcache_page(addr);
474 }
475 if (exec) {
476 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
477 int cpu = smp_processor_id();
478
479 if (cpu_context(cpu, mm) != 0)
480 drop_mmu_context(mm, cpu);
481 } else
482 r4k_blast_icache_page(addr);
483 }
484
485 if (vaddr) {
486 if (cpu_has_dc_aliases)
487 kunmap_coherent();
488 else
489 kunmap_atomic(vaddr, KM_USER0);
490 }
491 }
492
493 static void r4k_flush_cache_page(struct vm_area_struct *vma,
494 unsigned long addr, unsigned long pfn)
495 {
496 struct flush_cache_page_args args;
497
498 args.vma = vma;
499 args.addr = addr;
500 args.pfn = pfn;
501
502 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
503 }
504
505 static inline void local_r4k_flush_data_cache_page(void * addr)
506 {
507 r4k_blast_dcache_page((unsigned long) addr);
508 }
509
510 static void r4k_flush_data_cache_page(unsigned long addr)
511 {
512 if (in_atomic())
513 local_r4k_flush_data_cache_page((void *)addr);
514 else
515 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
516 1, 1);
517 }
518
519 struct flush_icache_range_args {
520 unsigned long start;
521 unsigned long end;
522 };
523
524 static inline void local_r4k_flush_icache_range(void *args)
525 {
526 struct flush_icache_range_args *fir_args = args;
527 unsigned long start = fir_args->start;
528 unsigned long end = fir_args->end;
529
530 if (!cpu_has_ic_fills_f_dc) {
531 if (end - start >= dcache_size) {
532 r4k_blast_dcache();
533 } else {
534 R4600_HIT_CACHEOP_WAR_IMPL;
535 protected_blast_dcache_range(start, end);
536 }
537 }
538
539 if (end - start > icache_size)
540 r4k_blast_icache();
541 else
542 protected_blast_icache_range(start, end);
543 }
544
545 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
546 {
547 struct flush_icache_range_args args;
548
549 args.start = start;
550 args.end = end;
551
552 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
553 instruction_hazard();
554 }
555
556 #ifdef CONFIG_DMA_NONCOHERENT
557
558 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
559 {
560 /* Catch bad driver code */
561 BUG_ON(size == 0);
562
563 if (cpu_has_inclusive_pcaches) {
564 if (size >= scache_size)
565 r4k_blast_scache();
566 else
567 blast_scache_range(addr, addr + size);
568 return;
569 }
570
571 /*
572 * Either no secondary cache or the available caches don't have the
573 * subset property so we have to flush the primary caches
574 * explicitly
575 */
576 if (size >= dcache_size) {
577 r4k_blast_dcache();
578 } else {
579 R4600_HIT_CACHEOP_WAR_IMPL;
580 blast_dcache_range(addr, addr + size);
581 }
582
583 bc_wback_inv(addr, size);
584 }
585
586 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
587 {
588 /* Catch bad driver code */
589 BUG_ON(size == 0);
590
591 if (cpu_has_inclusive_pcaches) {
592 if (size >= scache_size)
593 r4k_blast_scache();
594 else
595 blast_inv_scache_range(addr, addr + size);
596 return;
597 }
598
599 if (size >= dcache_size) {
600 r4k_blast_dcache();
601 } else {
602 R4600_HIT_CACHEOP_WAR_IMPL;
603 blast_inv_dcache_range(addr, addr + size);
604 }
605
606 bc_inv(addr, size);
607 }
608 #endif /* CONFIG_DMA_NONCOHERENT */
609
610 /*
611 * While we're protected against bad userland addresses we don't care
612 * very much about what happens in that case. Usually a segmentation
613 * fault will dump the process later on anyway ...
614 */
615 static void local_r4k_flush_cache_sigtramp(void * arg)
616 {
617 unsigned long ic_lsize = cpu_icache_line_size();
618 unsigned long dc_lsize = cpu_dcache_line_size();
619 unsigned long sc_lsize = cpu_scache_line_size();
620 unsigned long addr = (unsigned long) arg;
621
622 R4600_HIT_CACHEOP_WAR_IMPL;
623 if (dc_lsize)
624 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
625 if (!cpu_icache_snoops_remote_store && scache_size)
626 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
627 if (ic_lsize)
628 protected_flush_icache_line(addr & ~(ic_lsize - 1));
629 if (MIPS4K_ICACHE_REFILL_WAR) {
630 __asm__ __volatile__ (
631 ".set push\n\t"
632 ".set noat\n\t"
633 ".set mips3\n\t"
634 #ifdef CONFIG_32BIT
635 "la $at,1f\n\t"
636 #endif
637 #ifdef CONFIG_64BIT
638 "dla $at,1f\n\t"
639 #endif
640 "cache %0,($at)\n\t"
641 "nop; nop; nop\n"
642 "1:\n\t"
643 ".set pop"
644 :
645 : "i" (Hit_Invalidate_I));
646 }
647 if (MIPS_CACHE_SYNC_WAR)
648 __asm__ __volatile__ ("sync");
649 }
650
651 static void r4k_flush_cache_sigtramp(unsigned long addr)
652 {
653 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
654 }
655
656 static void r4k_flush_icache_all(void)
657 {
658 if (cpu_has_vtag_icache)
659 r4k_blast_icache();
660 }
661
662 static inline void rm7k_erratum31(void)
663 {
664 const unsigned long ic_lsize = 32;
665 unsigned long addr;
666
667 /* RM7000 erratum #31. The icache is screwed at startup. */
668 write_c0_taglo(0);
669 write_c0_taghi(0);
670
671 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
672 __asm__ __volatile__ (
673 ".set push\n\t"
674 ".set noreorder\n\t"
675 ".set mips3\n\t"
676 "cache\t%1, 0(%0)\n\t"
677 "cache\t%1, 0x1000(%0)\n\t"
678 "cache\t%1, 0x2000(%0)\n\t"
679 "cache\t%1, 0x3000(%0)\n\t"
680 "cache\t%2, 0(%0)\n\t"
681 "cache\t%2, 0x1000(%0)\n\t"
682 "cache\t%2, 0x2000(%0)\n\t"
683 "cache\t%2, 0x3000(%0)\n\t"
684 "cache\t%1, 0(%0)\n\t"
685 "cache\t%1, 0x1000(%0)\n\t"
686 "cache\t%1, 0x2000(%0)\n\t"
687 "cache\t%1, 0x3000(%0)\n\t"
688 ".set pop\n"
689 :
690 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
691 }
692 }
693
694 static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
695 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
696 };
697
698 static void __cpuinit probe_pcache(void)
699 {
700 struct cpuinfo_mips *c = &current_cpu_data;
701 unsigned int config = read_c0_config();
702 unsigned int prid = read_c0_prid();
703 unsigned long config1;
704 unsigned int lsize;
705
706 switch (c->cputype) {
707 case CPU_R4600: /* QED style two way caches? */
708 case CPU_R4700:
709 case CPU_R5000:
710 case CPU_NEVADA:
711 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
712 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
713 c->icache.ways = 2;
714 c->icache.waybit = __ffs(icache_size/2);
715
716 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
717 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
718 c->dcache.ways = 2;
719 c->dcache.waybit= __ffs(dcache_size/2);
720
721 c->options |= MIPS_CPU_CACHE_CDEX_P;
722 break;
723
724 case CPU_R5432:
725 case CPU_R5500:
726 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
727 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
728 c->icache.ways = 2;
729 c->icache.waybit= 0;
730
731 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
732 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
733 c->dcache.ways = 2;
734 c->dcache.waybit = 0;
735
736 c->options |= MIPS_CPU_CACHE_CDEX_P;
737 break;
738
739 case CPU_TX49XX:
740 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
741 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
742 c->icache.ways = 4;
743 c->icache.waybit= 0;
744
745 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
746 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
747 c->dcache.ways = 4;
748 c->dcache.waybit = 0;
749
750 c->options |= MIPS_CPU_CACHE_CDEX_P;
751 c->options |= MIPS_CPU_PREFETCH;
752 break;
753
754 case CPU_R4000PC:
755 case CPU_R4000SC:
756 case CPU_R4000MC:
757 case CPU_R4400PC:
758 case CPU_R4400SC:
759 case CPU_R4400MC:
760 case CPU_R4300:
761 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
762 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
763 c->icache.ways = 1;
764 c->icache.waybit = 0; /* doesn't matter */
765
766 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
767 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
768 c->dcache.ways = 1;
769 c->dcache.waybit = 0; /* does not matter */
770
771 c->options |= MIPS_CPU_CACHE_CDEX_P;
772 break;
773
774 case CPU_R10000:
775 case CPU_R12000:
776 case CPU_R14000:
777 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
778 c->icache.linesz = 64;
779 c->icache.ways = 2;
780 c->icache.waybit = 0;
781
782 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
783 c->dcache.linesz = 32;
784 c->dcache.ways = 2;
785 c->dcache.waybit = 0;
786
787 c->options |= MIPS_CPU_PREFETCH;
788 break;
789
790 case CPU_VR4133:
791 write_c0_config(config & ~VR41_CONF_P4K);
792 case CPU_VR4131:
793 /* Workaround for cache instruction bug of VR4131 */
794 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
795 c->processor_id == 0x0c82U) {
796 config |= 0x00400000U;
797 if (c->processor_id == 0x0c80U)
798 config |= VR41_CONF_BP;
799 write_c0_config(config);
800 } else
801 c->options |= MIPS_CPU_CACHE_CDEX_P;
802
803 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
804 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
805 c->icache.ways = 2;
806 c->icache.waybit = __ffs(icache_size/2);
807
808 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
809 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
810 c->dcache.ways = 2;
811 c->dcache.waybit = __ffs(dcache_size/2);
812 break;
813
814 case CPU_VR41XX:
815 case CPU_VR4111:
816 case CPU_VR4121:
817 case CPU_VR4122:
818 case CPU_VR4181:
819 case CPU_VR4181A:
820 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
821 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
822 c->icache.ways = 1;
823 c->icache.waybit = 0; /* doesn't matter */
824
825 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
826 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
827 c->dcache.ways = 1;
828 c->dcache.waybit = 0; /* does not matter */
829
830 c->options |= MIPS_CPU_CACHE_CDEX_P;
831 break;
832
833 case CPU_RM7000:
834 rm7k_erratum31();
835
836 case CPU_RM9000:
837 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
838 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
839 c->icache.ways = 4;
840 c->icache.waybit = __ffs(icache_size / c->icache.ways);
841
842 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
843 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
844 c->dcache.ways = 4;
845 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
846
847 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
848 c->options |= MIPS_CPU_CACHE_CDEX_P;
849 #endif
850 c->options |= MIPS_CPU_PREFETCH;
851 break;
852
853 case CPU_LOONGSON2:
854 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
855 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
856 if (prid & 0x3)
857 c->icache.ways = 4;
858 else
859 c->icache.ways = 2;
860 c->icache.waybit = 0;
861
862 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
863 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
864 if (prid & 0x3)
865 c->dcache.ways = 4;
866 else
867 c->dcache.ways = 2;
868 c->dcache.waybit = 0;
869 break;
870
871 default:
872 if (!(config & MIPS_CONF_M))
873 panic("Don't know how to probe P-caches on this cpu.");
874
875 /*
876 * So we seem to be a MIPS32 or MIPS64 CPU
877 * So let's probe the I-cache ...
878 */
879 config1 = read_c0_config1();
880
881 if ((lsize = ((config1 >> 19) & 7)))
882 c->icache.linesz = 2 << lsize;
883 else
884 c->icache.linesz = lsize;
885 c->icache.sets = 64 << ((config1 >> 22) & 7);
886 c->icache.ways = 1 + ((config1 >> 16) & 7);
887
888 icache_size = c->icache.sets *
889 c->icache.ways *
890 c->icache.linesz;
891 c->icache.waybit = __ffs(icache_size/c->icache.ways);
892
893 if (config & 0x8) /* VI bit */
894 c->icache.flags |= MIPS_CACHE_VTAG;
895
896 /*
897 * Now probe the MIPS32 / MIPS64 data cache.
898 */
899 c->dcache.flags = 0;
900
901 if ((lsize = ((config1 >> 10) & 7)))
902 c->dcache.linesz = 2 << lsize;
903 else
904 c->dcache.linesz= lsize;
905 c->dcache.sets = 64 << ((config1 >> 13) & 7);
906 c->dcache.ways = 1 + ((config1 >> 7) & 7);
907
908 dcache_size = c->dcache.sets *
909 c->dcache.ways *
910 c->dcache.linesz;
911 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
912
913 c->options |= MIPS_CPU_PREFETCH;
914 break;
915 }
916
917 /*
918 * Processor configuration sanity check for the R4000SC erratum
919 * #5. With page sizes larger than 32kB there is no possibility
920 * to get a VCE exception anymore so we don't care about this
921 * misconfiguration. The case is rather theoretical anyway;
922 * presumably no vendor is shipping his hardware in the "bad"
923 * configuration.
924 */
925 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
926 !(config & CONF_SC) && c->icache.linesz != 16 &&
927 PAGE_SIZE <= 0x8000)
928 panic("Improper R4000SC processor configuration detected");
929
930 /* compute a couple of other cache variables */
931 c->icache.waysize = icache_size / c->icache.ways;
932 c->dcache.waysize = dcache_size / c->dcache.ways;
933
934 c->icache.sets = c->icache.linesz ?
935 icache_size / (c->icache.linesz * c->icache.ways) : 0;
936 c->dcache.sets = c->dcache.linesz ?
937 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
938
939 /*
940 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
941 * 2-way virtually indexed so normally would suffer from aliases. So
942 * normally they'd suffer from aliases but magic in the hardware deals
943 * with that for us so we don't need to take care ourselves.
944 */
945 switch (c->cputype) {
946 case CPU_20KC:
947 case CPU_25KF:
948 case CPU_SB1:
949 case CPU_SB1A:
950 c->dcache.flags |= MIPS_CACHE_PINDEX;
951 break;
952
953 case CPU_R10000:
954 case CPU_R12000:
955 case CPU_R14000:
956 break;
957
958 case CPU_24K:
959 case CPU_34K:
960 case CPU_74K:
961 if ((read_c0_config7() & (1 << 16))) {
962 /* effectively physically indexed dcache,
963 thus no virtual aliases. */
964 c->dcache.flags |= MIPS_CACHE_PINDEX;
965 break;
966 }
967 default:
968 if (c->dcache.waysize > PAGE_SIZE)
969 c->dcache.flags |= MIPS_CACHE_ALIASES;
970 }
971
972 switch (c->cputype) {
973 case CPU_20KC:
974 /*
975 * Some older 20Kc chips doesn't have the 'VI' bit in
976 * the config register.
977 */
978 c->icache.flags |= MIPS_CACHE_VTAG;
979 break;
980
981 case CPU_AU1000:
982 case CPU_AU1500:
983 case CPU_AU1100:
984 case CPU_AU1550:
985 case CPU_AU1200:
986 case CPU_AU1210:
987 case CPU_AU1250:
988 c->icache.flags |= MIPS_CACHE_IC_F_DC;
989 break;
990 }
991
992 #ifdef CONFIG_CPU_LOONGSON2
993 /*
994 * LOONGSON2 has 4 way icache, but when using indexed cache op,
995 * one op will act on all 4 ways
996 */
997 c->icache.ways = 1;
998 #endif
999
1000 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1001 icache_size >> 10,
1002 cpu_has_vtag_icache ? "VIVT" : "VIPT",
1003 way_string[c->icache.ways], c->icache.linesz);
1004
1005 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1006 dcache_size >> 10, way_string[c->dcache.ways],
1007 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1008 (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1009 "cache aliases" : "no aliases",
1010 c->dcache.linesz);
1011 }
1012
1013 /*
1014 * If you even _breathe_ on this function, look at the gcc output and make sure
1015 * it does not pop things on and off the stack for the cache sizing loop that
1016 * executes in KSEG1 space or else you will crash and burn badly. You have
1017 * been warned.
1018 */
1019 static int __cpuinit probe_scache(void)
1020 {
1021 unsigned long flags, addr, begin, end, pow2;
1022 unsigned int config = read_c0_config();
1023 struct cpuinfo_mips *c = &current_cpu_data;
1024 int tmp;
1025
1026 if (config & CONF_SC)
1027 return 0;
1028
1029 begin = (unsigned long) &_stext;
1030 begin &= ~((4 * 1024 * 1024) - 1);
1031 end = begin + (4 * 1024 * 1024);
1032
1033 /*
1034 * This is such a bitch, you'd think they would make it easy to do
1035 * this. Away you daemons of stupidity!
1036 */
1037 local_irq_save(flags);
1038
1039 /* Fill each size-multiple cache line with a valid tag. */
1040 pow2 = (64 * 1024);
1041 for (addr = begin; addr < end; addr = (begin + pow2)) {
1042 unsigned long *p = (unsigned long *) addr;
1043 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1044 pow2 <<= 1;
1045 }
1046
1047 /* Load first line with zero (therefore invalid) tag. */
1048 write_c0_taglo(0);
1049 write_c0_taghi(0);
1050 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1051 cache_op(Index_Store_Tag_I, begin);
1052 cache_op(Index_Store_Tag_D, begin);
1053 cache_op(Index_Store_Tag_SD, begin);
1054
1055 /* Now search for the wrap around point. */
1056 pow2 = (128 * 1024);
1057 tmp = 0;
1058 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1059 cache_op(Index_Load_Tag_SD, addr);
1060 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1061 if (!read_c0_taglo())
1062 break;
1063 pow2 <<= 1;
1064 }
1065 local_irq_restore(flags);
1066 addr -= begin;
1067
1068 scache_size = addr;
1069 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1070 c->scache.ways = 1;
1071 c->dcache.waybit = 0; /* does not matter */
1072
1073 return 1;
1074 }
1075
1076 #if defined(CONFIG_CPU_LOONGSON2)
1077 static void __init loongson2_sc_init(void)
1078 {
1079 struct cpuinfo_mips *c = &current_cpu_data;
1080
1081 scache_size = 512*1024;
1082 c->scache.linesz = 32;
1083 c->scache.ways = 4;
1084 c->scache.waybit = 0;
1085 c->scache.waysize = scache_size / (c->scache.ways);
1086 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1087 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1088 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1089
1090 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1091 }
1092 #endif
1093
1094 extern int r5k_sc_init(void);
1095 extern int rm7k_sc_init(void);
1096 extern int mips_sc_init(void);
1097
1098 static void __cpuinit setup_scache(void)
1099 {
1100 struct cpuinfo_mips *c = &current_cpu_data;
1101 unsigned int config = read_c0_config();
1102 int sc_present = 0;
1103
1104 /*
1105 * Do the probing thing on R4000SC and R4400SC processors. Other
1106 * processors don't have a S-cache that would be relevant to the
1107 * Linux memory management.
1108 */
1109 switch (c->cputype) {
1110 case CPU_R4000SC:
1111 case CPU_R4000MC:
1112 case CPU_R4400SC:
1113 case CPU_R4400MC:
1114 sc_present = run_uncached(probe_scache);
1115 if (sc_present)
1116 c->options |= MIPS_CPU_CACHE_CDEX_S;
1117 break;
1118
1119 case CPU_R10000:
1120 case CPU_R12000:
1121 case CPU_R14000:
1122 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1123 c->scache.linesz = 64 << ((config >> 13) & 1);
1124 c->scache.ways = 2;
1125 c->scache.waybit= 0;
1126 sc_present = 1;
1127 break;
1128
1129 case CPU_R5000:
1130 case CPU_NEVADA:
1131 #ifdef CONFIG_R5000_CPU_SCACHE
1132 r5k_sc_init();
1133 #endif
1134 return;
1135
1136 case CPU_RM7000:
1137 case CPU_RM9000:
1138 #ifdef CONFIG_RM7000_CPU_SCACHE
1139 rm7k_sc_init();
1140 #endif
1141 return;
1142
1143 #if defined(CONFIG_CPU_LOONGSON2)
1144 case CPU_LOONGSON2:
1145 loongson2_sc_init();
1146 return;
1147 #endif
1148
1149 default:
1150 if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1151 c->isa_level == MIPS_CPU_ISA_M32R2 ||
1152 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1153 c->isa_level == MIPS_CPU_ISA_M64R2) {
1154 #ifdef CONFIG_MIPS_CPU_SCACHE
1155 if (mips_sc_init ()) {
1156 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1157 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1158 scache_size >> 10,
1159 way_string[c->scache.ways], c->scache.linesz);
1160 }
1161 #else
1162 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1163 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1164 #endif
1165 return;
1166 }
1167 sc_present = 0;
1168 }
1169
1170 if (!sc_present)
1171 return;
1172
1173 /* compute a couple of other cache variables */
1174 c->scache.waysize = scache_size / c->scache.ways;
1175
1176 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1177
1178 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1179 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1180
1181 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1182 }
1183
1184 void au1x00_fixup_config_od(void)
1185 {
1186 /*
1187 * c0_config.od (bit 19) was write only (and read as 0)
1188 * on the early revisions of Alchemy SOCs. It disables the bus
1189 * transaction overlapping and needs to be set to fix various errata.
1190 */
1191 switch (read_c0_prid()) {
1192 case 0x00030100: /* Au1000 DA */
1193 case 0x00030201: /* Au1000 HA */
1194 case 0x00030202: /* Au1000 HB */
1195 case 0x01030200: /* Au1500 AB */
1196 /*
1197 * Au1100 errata actually keeps silence about this bit, so we set it
1198 * just in case for those revisions that require it to be set according
1199 * to arch/mips/au1000/common/cputable.c
1200 */
1201 case 0x02030200: /* Au1100 AB */
1202 case 0x02030201: /* Au1100 BA */
1203 case 0x02030202: /* Au1100 BC */
1204 set_c0_config(1 << 19);
1205 break;
1206 }
1207 }
1208
1209 static void __cpuinit coherency_setup(void)
1210 {
1211 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1212
1213 /*
1214 * c0_status.cu=0 specifies that updates by the sc instruction use
1215 * the coherency mode specified by the TLB; 1 means cachable
1216 * coherent update on write will be used. Not all processors have
1217 * this bit and; some wire it to zero, others like Toshiba had the
1218 * silly idea of putting something else there ...
1219 */
1220 switch (current_cpu_type()) {
1221 case CPU_R4000PC:
1222 case CPU_R4000SC:
1223 case CPU_R4000MC:
1224 case CPU_R4400PC:
1225 case CPU_R4400SC:
1226 case CPU_R4400MC:
1227 clear_c0_config(CONF_CU);
1228 break;
1229 /*
1230 * We need to catch the early Alchemy SOCs with
1231 * the write-only co_config.od bit and set it back to one...
1232 */
1233 case CPU_AU1000: /* rev. DA, HA, HB */
1234 case CPU_AU1100: /* rev. AB, BA, BC ?? */
1235 case CPU_AU1500: /* rev. AB */
1236 au1x00_fixup_config_od();
1237 break;
1238 }
1239 }
1240
1241 void __cpuinit r4k_cache_init(void)
1242 {
1243 extern void build_clear_page(void);
1244 extern void build_copy_page(void);
1245 extern char __weak except_vec2_generic;
1246 extern char __weak except_vec2_sb1;
1247 struct cpuinfo_mips *c = &current_cpu_data;
1248
1249 switch (c->cputype) {
1250 case CPU_SB1:
1251 case CPU_SB1A:
1252 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1253 break;
1254
1255 default:
1256 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1257 break;
1258 }
1259
1260 probe_pcache();
1261 setup_scache();
1262
1263 r4k_blast_dcache_page_setup();
1264 r4k_blast_dcache_page_indexed_setup();
1265 r4k_blast_dcache_setup();
1266 r4k_blast_icache_page_setup();
1267 r4k_blast_icache_page_indexed_setup();
1268 r4k_blast_icache_setup();
1269 r4k_blast_scache_page_setup();
1270 r4k_blast_scache_page_indexed_setup();
1271 r4k_blast_scache_setup();
1272
1273 /*
1274 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1275 * This code supports virtually indexed processors and will be
1276 * unnecessarily inefficient on physically indexed processors.
1277 */
1278 if (c->dcache.linesz)
1279 shm_align_mask = max_t( unsigned long,
1280 c->dcache.sets * c->dcache.linesz - 1,
1281 PAGE_SIZE - 1);
1282 else
1283 shm_align_mask = PAGE_SIZE-1;
1284 flush_cache_all = cache_noop;
1285 __flush_cache_all = r4k___flush_cache_all;
1286 flush_cache_mm = r4k_flush_cache_mm;
1287 flush_cache_page = r4k_flush_cache_page;
1288 flush_cache_range = r4k_flush_cache_range;
1289
1290 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1291 flush_icache_all = r4k_flush_icache_all;
1292 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
1293 flush_data_cache_page = r4k_flush_data_cache_page;
1294 flush_icache_range = r4k_flush_icache_range;
1295
1296 #ifdef CONFIG_DMA_NONCOHERENT
1297 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1298 _dma_cache_wback = r4k_dma_cache_wback_inv;
1299 _dma_cache_inv = r4k_dma_cache_inv;
1300 #endif
1301
1302 build_clear_page();
1303 build_copy_page();
1304 local_r4k___flush_cache_all(NULL);
1305 coherency_setup();
1306 }
This page took 0.056471 seconds and 6 git commands to generate.