2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #include <linux/cpu_pm.h>
11 #include <linux/hardirq.h>
12 #include <linux/init.h>
13 #include <linux/highmem.h>
14 #include <linux/kernel.h>
15 #include <linux/linkage.h>
16 #include <linux/preempt.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
20 #include <linux/module.h>
21 #include <linux/bitops.h>
23 #include <asm/bcache.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cache.h>
26 #include <asm/cacheops.h>
28 #include <asm/cpu-features.h>
29 #include <asm/cpu-type.h>
32 #include <asm/pgtable.h>
33 #include <asm/r4kcache.h>
34 #include <asm/sections.h>
35 #include <asm/mmu_context.h>
37 #include <asm/cacheflush.h> /* for run_uncached() */
38 #include <asm/traps.h>
39 #include <asm/dma-coherence.h>
40 #include <asm/mips-cm.h>
43 * Special Variant of smp_call_function for use by cache functions:
46 * o collapses to normal function call on UP kernels
47 * o collapses to normal function call on systems with a single shared
49 * o doesn't disable interrupts on the local CPU
51 static inline void r4k_on_each_cpu(void (*func
) (void *info
), void *info
)
56 * The Coherent Manager propagates address-based cache ops to other
57 * cores but not index-based ops. However, r4k_on_each_cpu is used
58 * in both cases so there is no easy way to tell what kind of op is
59 * executed to the other cores. The best we can probably do is
60 * to restrict that call when a CM is not present because both
61 * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
63 if (!mips_cm_present())
64 smp_call_function_many(&cpu_foreign_map
, func
, info
, 1);
69 #if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
70 #define cpu_has_safe_index_cacheops 0
72 #define cpu_has_safe_index_cacheops 1
78 static unsigned long icache_size __read_mostly
;
79 static unsigned long dcache_size __read_mostly
;
80 static unsigned long vcache_size __read_mostly
;
81 static unsigned long scache_size __read_mostly
;
84 * Dummy cache handling routines for machines without boardcaches
86 static void cache_noop(void) {}
88 static struct bcache_ops no_sc_ops
= {
89 .bc_enable
= (void *)cache_noop
,
90 .bc_disable
= (void *)cache_noop
,
91 .bc_wback_inv
= (void *)cache_noop
,
92 .bc_inv
= (void *)cache_noop
95 struct bcache_ops
*bcops
= &no_sc_ops
;
97 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
98 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
100 #define R4600_HIT_CACHEOP_WAR_IMPL \
102 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
103 *(volatile unsigned long *)CKSEG1; \
104 if (R4600_V1_HIT_CACHEOP_WAR) \
105 __asm__ __volatile__("nop;nop;nop;nop"); \
108 static void (*r4k_blast_dcache_page
)(unsigned long addr
);
110 static inline void r4k_blast_dcache_page_dc32(unsigned long addr
)
112 R4600_HIT_CACHEOP_WAR_IMPL
;
113 blast_dcache32_page(addr
);
116 static inline void r4k_blast_dcache_page_dc64(unsigned long addr
)
118 blast_dcache64_page(addr
);
121 static inline void r4k_blast_dcache_page_dc128(unsigned long addr
)
123 blast_dcache128_page(addr
);
126 static void r4k_blast_dcache_page_setup(void)
128 unsigned long dc_lsize
= cpu_dcache_line_size();
132 r4k_blast_dcache_page
= (void *)cache_noop
;
135 r4k_blast_dcache_page
= blast_dcache16_page
;
138 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc32
;
141 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc64
;
144 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc128
;
152 #define r4k_blast_dcache_user_page r4k_blast_dcache_page
155 static void (*r4k_blast_dcache_user_page
)(unsigned long addr
);
157 static void r4k_blast_dcache_user_page_setup(void)
159 unsigned long dc_lsize
= cpu_dcache_line_size();
162 r4k_blast_dcache_user_page
= (void *)cache_noop
;
163 else if (dc_lsize
== 16)
164 r4k_blast_dcache_user_page
= blast_dcache16_user_page
;
165 else if (dc_lsize
== 32)
166 r4k_blast_dcache_user_page
= blast_dcache32_user_page
;
167 else if (dc_lsize
== 64)
168 r4k_blast_dcache_user_page
= blast_dcache64_user_page
;
173 static void (* r4k_blast_dcache_page_indexed
)(unsigned long addr
);
175 static void r4k_blast_dcache_page_indexed_setup(void)
177 unsigned long dc_lsize
= cpu_dcache_line_size();
180 r4k_blast_dcache_page_indexed
= (void *)cache_noop
;
181 else if (dc_lsize
== 16)
182 r4k_blast_dcache_page_indexed
= blast_dcache16_page_indexed
;
183 else if (dc_lsize
== 32)
184 r4k_blast_dcache_page_indexed
= blast_dcache32_page_indexed
;
185 else if (dc_lsize
== 64)
186 r4k_blast_dcache_page_indexed
= blast_dcache64_page_indexed
;
187 else if (dc_lsize
== 128)
188 r4k_blast_dcache_page_indexed
= blast_dcache128_page_indexed
;
191 void (* r4k_blast_dcache
)(void);
192 EXPORT_SYMBOL(r4k_blast_dcache
);
194 static void r4k_blast_dcache_setup(void)
196 unsigned long dc_lsize
= cpu_dcache_line_size();
199 r4k_blast_dcache
= (void *)cache_noop
;
200 else if (dc_lsize
== 16)
201 r4k_blast_dcache
= blast_dcache16
;
202 else if (dc_lsize
== 32)
203 r4k_blast_dcache
= blast_dcache32
;
204 else if (dc_lsize
== 64)
205 r4k_blast_dcache
= blast_dcache64
;
206 else if (dc_lsize
== 128)
207 r4k_blast_dcache
= blast_dcache128
;
210 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
211 #define JUMP_TO_ALIGN(order) \
212 __asm__ __volatile__( \
214 ".align\t" #order "\n\t" \
217 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
218 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
220 static inline void blast_r4600_v1_icache32(void)
224 local_irq_save(flags
);
226 local_irq_restore(flags
);
229 static inline void tx49_blast_icache32(void)
231 unsigned long start
= INDEX_BASE
;
232 unsigned long end
= start
+ current_cpu_data
.icache
.waysize
;
233 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
234 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
235 current_cpu_data
.icache
.waybit
;
236 unsigned long ws
, addr
;
238 CACHE32_UNROLL32_ALIGN2
;
239 /* I'm in even chunk. blast odd chunks */
240 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
241 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
242 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
243 CACHE32_UNROLL32_ALIGN
;
244 /* I'm in odd chunk. blast even chunks */
245 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
246 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
247 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
250 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page
)
254 local_irq_save(flags
);
255 blast_icache32_page_indexed(page
);
256 local_irq_restore(flags
);
259 static inline void tx49_blast_icache32_page_indexed(unsigned long page
)
261 unsigned long indexmask
= current_cpu_data
.icache
.waysize
- 1;
262 unsigned long start
= INDEX_BASE
+ (page
& indexmask
);
263 unsigned long end
= start
+ PAGE_SIZE
;
264 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
265 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
266 current_cpu_data
.icache
.waybit
;
267 unsigned long ws
, addr
;
269 CACHE32_UNROLL32_ALIGN2
;
270 /* I'm in even chunk. blast odd chunks */
271 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
272 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
273 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
274 CACHE32_UNROLL32_ALIGN
;
275 /* I'm in odd chunk. blast even chunks */
276 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
277 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
278 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
281 static void (* r4k_blast_icache_page
)(unsigned long addr
);
283 static void r4k_blast_icache_page_setup(void)
285 unsigned long ic_lsize
= cpu_icache_line_size();
288 r4k_blast_icache_page
= (void *)cache_noop
;
289 else if (ic_lsize
== 16)
290 r4k_blast_icache_page
= blast_icache16_page
;
291 else if (ic_lsize
== 32 && current_cpu_type() == CPU_LOONGSON2
)
292 r4k_blast_icache_page
= loongson2_blast_icache32_page
;
293 else if (ic_lsize
== 32)
294 r4k_blast_icache_page
= blast_icache32_page
;
295 else if (ic_lsize
== 64)
296 r4k_blast_icache_page
= blast_icache64_page
;
297 else if (ic_lsize
== 128)
298 r4k_blast_icache_page
= blast_icache128_page
;
302 #define r4k_blast_icache_user_page r4k_blast_icache_page
305 static void (*r4k_blast_icache_user_page
)(unsigned long addr
);
307 static void r4k_blast_icache_user_page_setup(void)
309 unsigned long ic_lsize
= cpu_icache_line_size();
312 r4k_blast_icache_user_page
= (void *)cache_noop
;
313 else if (ic_lsize
== 16)
314 r4k_blast_icache_user_page
= blast_icache16_user_page
;
315 else if (ic_lsize
== 32)
316 r4k_blast_icache_user_page
= blast_icache32_user_page
;
317 else if (ic_lsize
== 64)
318 r4k_blast_icache_user_page
= blast_icache64_user_page
;
323 static void (* r4k_blast_icache_page_indexed
)(unsigned long addr
);
325 static void r4k_blast_icache_page_indexed_setup(void)
327 unsigned long ic_lsize
= cpu_icache_line_size();
330 r4k_blast_icache_page_indexed
= (void *)cache_noop
;
331 else if (ic_lsize
== 16)
332 r4k_blast_icache_page_indexed
= blast_icache16_page_indexed
;
333 else if (ic_lsize
== 32) {
334 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
335 r4k_blast_icache_page_indexed
=
336 blast_icache32_r4600_v1_page_indexed
;
337 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
338 r4k_blast_icache_page_indexed
=
339 tx49_blast_icache32_page_indexed
;
340 else if (current_cpu_type() == CPU_LOONGSON2
)
341 r4k_blast_icache_page_indexed
=
342 loongson2_blast_icache32_page_indexed
;
344 r4k_blast_icache_page_indexed
=
345 blast_icache32_page_indexed
;
346 } else if (ic_lsize
== 64)
347 r4k_blast_icache_page_indexed
= blast_icache64_page_indexed
;
350 void (* r4k_blast_icache
)(void);
351 EXPORT_SYMBOL(r4k_blast_icache
);
353 static void r4k_blast_icache_setup(void)
355 unsigned long ic_lsize
= cpu_icache_line_size();
358 r4k_blast_icache
= (void *)cache_noop
;
359 else if (ic_lsize
== 16)
360 r4k_blast_icache
= blast_icache16
;
361 else if (ic_lsize
== 32) {
362 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
363 r4k_blast_icache
= blast_r4600_v1_icache32
;
364 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
365 r4k_blast_icache
= tx49_blast_icache32
;
366 else if (current_cpu_type() == CPU_LOONGSON2
)
367 r4k_blast_icache
= loongson2_blast_icache32
;
369 r4k_blast_icache
= blast_icache32
;
370 } else if (ic_lsize
== 64)
371 r4k_blast_icache
= blast_icache64
;
372 else if (ic_lsize
== 128)
373 r4k_blast_icache
= blast_icache128
;
376 static void (* r4k_blast_scache_page
)(unsigned long addr
);
378 static void r4k_blast_scache_page_setup(void)
380 unsigned long sc_lsize
= cpu_scache_line_size();
382 if (scache_size
== 0)
383 r4k_blast_scache_page
= (void *)cache_noop
;
384 else if (sc_lsize
== 16)
385 r4k_blast_scache_page
= blast_scache16_page
;
386 else if (sc_lsize
== 32)
387 r4k_blast_scache_page
= blast_scache32_page
;
388 else if (sc_lsize
== 64)
389 r4k_blast_scache_page
= blast_scache64_page
;
390 else if (sc_lsize
== 128)
391 r4k_blast_scache_page
= blast_scache128_page
;
394 static void (* r4k_blast_scache_page_indexed
)(unsigned long addr
);
396 static void r4k_blast_scache_page_indexed_setup(void)
398 unsigned long sc_lsize
= cpu_scache_line_size();
400 if (scache_size
== 0)
401 r4k_blast_scache_page_indexed
= (void *)cache_noop
;
402 else if (sc_lsize
== 16)
403 r4k_blast_scache_page_indexed
= blast_scache16_page_indexed
;
404 else if (sc_lsize
== 32)
405 r4k_blast_scache_page_indexed
= blast_scache32_page_indexed
;
406 else if (sc_lsize
== 64)
407 r4k_blast_scache_page_indexed
= blast_scache64_page_indexed
;
408 else if (sc_lsize
== 128)
409 r4k_blast_scache_page_indexed
= blast_scache128_page_indexed
;
412 static void (* r4k_blast_scache
)(void);
414 static void r4k_blast_scache_setup(void)
416 unsigned long sc_lsize
= cpu_scache_line_size();
418 if (scache_size
== 0)
419 r4k_blast_scache
= (void *)cache_noop
;
420 else if (sc_lsize
== 16)
421 r4k_blast_scache
= blast_scache16
;
422 else if (sc_lsize
== 32)
423 r4k_blast_scache
= blast_scache32
;
424 else if (sc_lsize
== 64)
425 r4k_blast_scache
= blast_scache64
;
426 else if (sc_lsize
== 128)
427 r4k_blast_scache
= blast_scache128
;
430 static inline void local_r4k___flush_cache_all(void * args
)
432 switch (current_cpu_type()) {
444 * These caches are inclusive caches, that is, if something
445 * is not cached in the S-cache, we know it also won't be
446 * in one of the primary caches.
463 static void r4k___flush_cache_all(void)
465 r4k_on_each_cpu(local_r4k___flush_cache_all
, NULL
);
468 static inline int has_valid_asid(const struct mm_struct
*mm
)
470 #ifdef CONFIG_MIPS_MT_SMP
473 for_each_online_cpu(i
)
474 if (cpu_context(i
, mm
))
479 return cpu_context(smp_processor_id(), mm
);
483 static void r4k__flush_cache_vmap(void)
488 static void r4k__flush_cache_vunmap(void)
493 static inline void local_r4k_flush_cache_range(void * args
)
495 struct vm_area_struct
*vma
= args
;
496 int exec
= vma
->vm_flags
& VM_EXEC
;
498 if (!(has_valid_asid(vma
->vm_mm
)))
502 * If dcache can alias, we must blast it since mapping is changing.
503 * If executable, we must ensure any dirty lines are written back far
504 * enough to be visible to icache.
506 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
))
508 /* If executable, blast stale lines from icache */
513 static void r4k_flush_cache_range(struct vm_area_struct
*vma
,
514 unsigned long start
, unsigned long end
)
516 int exec
= vma
->vm_flags
& VM_EXEC
;
518 if (cpu_has_dc_aliases
|| exec
)
519 r4k_on_each_cpu(local_r4k_flush_cache_range
, vma
);
522 static inline void local_r4k_flush_cache_mm(void * args
)
524 struct mm_struct
*mm
= args
;
526 if (!has_valid_asid(mm
))
530 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
531 * only flush the primary caches but R1x000 behave sane ...
532 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
533 * caches, so we can bail out early.
535 if (current_cpu_type() == CPU_R4000SC
||
536 current_cpu_type() == CPU_R4000MC
||
537 current_cpu_type() == CPU_R4400SC
||
538 current_cpu_type() == CPU_R4400MC
) {
546 static void r4k_flush_cache_mm(struct mm_struct
*mm
)
548 if (!cpu_has_dc_aliases
)
551 r4k_on_each_cpu(local_r4k_flush_cache_mm
, mm
);
554 struct flush_cache_page_args
{
555 struct vm_area_struct
*vma
;
560 static inline void local_r4k_flush_cache_page(void *args
)
562 struct flush_cache_page_args
*fcp_args
= args
;
563 struct vm_area_struct
*vma
= fcp_args
->vma
;
564 unsigned long addr
= fcp_args
->addr
;
565 struct page
*page
= pfn_to_page(fcp_args
->pfn
);
566 int exec
= vma
->vm_flags
& VM_EXEC
;
567 struct mm_struct
*mm
= vma
->vm_mm
;
568 int map_coherent
= 0;
576 * If ownes no valid ASID yet, cannot possibly have gotten
577 * this page into the cache.
579 if (!has_valid_asid(mm
))
583 pgdp
= pgd_offset(mm
, addr
);
584 pudp
= pud_offset(pgdp
, addr
);
585 pmdp
= pmd_offset(pudp
, addr
);
586 ptep
= pte_offset(pmdp
, addr
);
589 * If the page isn't marked valid, the page cannot possibly be
592 if (!(pte_present(*ptep
)))
595 if ((mm
== current
->active_mm
) && (pte_val(*ptep
) & _PAGE_VALID
))
599 * Use kmap_coherent or kmap_atomic to do flushes for
600 * another ASID than the current one.
602 map_coherent
= (cpu_has_dc_aliases
&&
603 page_mapcount(page
) &&
604 !Page_dcache_dirty(page
));
606 vaddr
= kmap_coherent(page
, addr
);
608 vaddr
= kmap_atomic(page
);
609 addr
= (unsigned long)vaddr
;
612 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
)) {
613 vaddr
? r4k_blast_dcache_page(addr
) :
614 r4k_blast_dcache_user_page(addr
);
615 if (exec
&& !cpu_icache_snoops_remote_store
)
616 r4k_blast_scache_page(addr
);
619 if (vaddr
&& cpu_has_vtag_icache
&& mm
== current
->active_mm
) {
620 int cpu
= smp_processor_id();
622 if (cpu_context(cpu
, mm
) != 0)
623 drop_mmu_context(mm
, cpu
);
625 vaddr
? r4k_blast_icache_page(addr
) :
626 r4k_blast_icache_user_page(addr
);
633 kunmap_atomic(vaddr
);
637 static void r4k_flush_cache_page(struct vm_area_struct
*vma
,
638 unsigned long addr
, unsigned long pfn
)
640 struct flush_cache_page_args args
;
646 r4k_on_each_cpu(local_r4k_flush_cache_page
, &args
);
649 static inline void local_r4k_flush_data_cache_page(void * addr
)
651 r4k_blast_dcache_page((unsigned long) addr
);
654 static void r4k_flush_data_cache_page(unsigned long addr
)
657 local_r4k_flush_data_cache_page((void *)addr
);
659 r4k_on_each_cpu(local_r4k_flush_data_cache_page
, (void *) addr
);
662 struct flush_icache_range_args
{
667 static inline void local_r4k_flush_icache_range(unsigned long start
, unsigned long end
)
669 if (!cpu_has_ic_fills_f_dc
) {
670 if (end
- start
>= dcache_size
) {
673 R4600_HIT_CACHEOP_WAR_IMPL
;
674 protected_blast_dcache_range(start
, end
);
678 if (end
- start
> icache_size
)
681 switch (boot_cpu_type()) {
683 protected_loongson2_blast_icache_range(start
, end
);
687 protected_blast_icache_range(start
, end
);
693 * Due to all possible segment mappings, there might cache aliases
694 * caused by the bootloader being in non-EVA mode, and the CPU switching
695 * to EVA during early kernel init. It's best to flush the scache
696 * to avoid having secondary cores fetching stale data and lead to
699 bc_wback_inv(start
, (end
- start
));
704 static inline void local_r4k_flush_icache_range_ipi(void *args
)
706 struct flush_icache_range_args
*fir_args
= args
;
707 unsigned long start
= fir_args
->start
;
708 unsigned long end
= fir_args
->end
;
710 local_r4k_flush_icache_range(start
, end
);
713 static void r4k_flush_icache_range(unsigned long start
, unsigned long end
)
715 struct flush_icache_range_args args
;
720 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi
, &args
);
721 instruction_hazard();
724 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
726 static void r4k_dma_cache_wback_inv(unsigned long addr
, unsigned long size
)
728 /* Catch bad driver code */
732 if (cpu_has_inclusive_pcaches
) {
733 if (size
>= scache_size
)
736 blast_scache_range(addr
, addr
+ size
);
743 * Either no secondary cache or the available caches don't have the
744 * subset property so we have to flush the primary caches
747 if (cpu_has_safe_index_cacheops
&& size
>= dcache_size
) {
750 R4600_HIT_CACHEOP_WAR_IMPL
;
751 blast_dcache_range(addr
, addr
+ size
);
755 bc_wback_inv(addr
, size
);
759 static void r4k_dma_cache_inv(unsigned long addr
, unsigned long size
)
761 /* Catch bad driver code */
765 if (cpu_has_inclusive_pcaches
) {
766 if (size
>= scache_size
)
770 * There is no clearly documented alignment requirement
771 * for the cache instruction on MIPS processors and
772 * some processors, among them the RM5200 and RM7000
773 * QED processors will throw an address error for cache
774 * hit ops with insufficient alignment. Solved by
775 * aligning the address to cache line size.
777 blast_inv_scache_range(addr
, addr
+ size
);
784 if (cpu_has_safe_index_cacheops
&& size
>= dcache_size
) {
787 R4600_HIT_CACHEOP_WAR_IMPL
;
788 blast_inv_dcache_range(addr
, addr
+ size
);
795 #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
798 * While we're protected against bad userland addresses we don't care
799 * very much about what happens in that case. Usually a segmentation
800 * fault will dump the process later on anyway ...
802 static void local_r4k_flush_cache_sigtramp(void * arg
)
804 unsigned long ic_lsize
= cpu_icache_line_size();
805 unsigned long dc_lsize
= cpu_dcache_line_size();
806 unsigned long sc_lsize
= cpu_scache_line_size();
807 unsigned long addr
= (unsigned long) arg
;
809 R4600_HIT_CACHEOP_WAR_IMPL
;
811 protected_writeback_dcache_line(addr
& ~(dc_lsize
- 1));
812 if (!cpu_icache_snoops_remote_store
&& scache_size
)
813 protected_writeback_scache_line(addr
& ~(sc_lsize
- 1));
815 protected_flush_icache_line(addr
& ~(ic_lsize
- 1));
816 if (MIPS4K_ICACHE_REFILL_WAR
) {
817 __asm__
__volatile__ (
820 ".set "MIPS_ISA_LEVEL
"\n\t"
832 : "i" (Hit_Invalidate_I
));
834 if (MIPS_CACHE_SYNC_WAR
)
835 __asm__
__volatile__ ("sync");
838 static void r4k_flush_cache_sigtramp(unsigned long addr
)
840 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp
, (void *) addr
);
843 static void r4k_flush_icache_all(void)
845 if (cpu_has_vtag_icache
)
849 struct flush_kernel_vmap_range_args
{
854 static inline void local_r4k_flush_kernel_vmap_range(void *args
)
856 struct flush_kernel_vmap_range_args
*vmra
= args
;
857 unsigned long vaddr
= vmra
->vaddr
;
858 int size
= vmra
->size
;
861 * Aliases only affect the primary caches so don't bother with
862 * S-caches or T-caches.
864 if (cpu_has_safe_index_cacheops
&& size
>= dcache_size
)
867 R4600_HIT_CACHEOP_WAR_IMPL
;
868 blast_dcache_range(vaddr
, vaddr
+ size
);
872 static void r4k_flush_kernel_vmap_range(unsigned long vaddr
, int size
)
874 struct flush_kernel_vmap_range_args args
;
876 args
.vaddr
= (unsigned long) vaddr
;
879 r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range
, &args
);
882 static inline void rm7k_erratum31(void)
884 const unsigned long ic_lsize
= 32;
887 /* RM7000 erratum #31. The icache is screwed at startup. */
891 for (addr
= INDEX_BASE
; addr
<= INDEX_BASE
+ 4096; addr
+= ic_lsize
) {
892 __asm__
__volatile__ (
896 "cache\t%1, 0(%0)\n\t"
897 "cache\t%1, 0x1000(%0)\n\t"
898 "cache\t%1, 0x2000(%0)\n\t"
899 "cache\t%1, 0x3000(%0)\n\t"
900 "cache\t%2, 0(%0)\n\t"
901 "cache\t%2, 0x1000(%0)\n\t"
902 "cache\t%2, 0x2000(%0)\n\t"
903 "cache\t%2, 0x3000(%0)\n\t"
904 "cache\t%1, 0(%0)\n\t"
905 "cache\t%1, 0x1000(%0)\n\t"
906 "cache\t%1, 0x2000(%0)\n\t"
907 "cache\t%1, 0x3000(%0)\n\t"
910 : "r" (addr
), "i" (Index_Store_Tag_I
), "i" (Fill
));
914 static inline int alias_74k_erratum(struct cpuinfo_mips
*c
)
916 unsigned int imp
= c
->processor_id
& PRID_IMP_MASK
;
917 unsigned int rev
= c
->processor_id
& PRID_REV_MASK
;
921 * Early versions of the 74K do not update the cache tags on a
922 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
923 * aliases. In this case it is better to treat the cache as always
924 * having aliases. Also disable the synonym tag update feature
925 * where available. In this case no opportunistic tag update will
926 * happen where a load causes a virtual address miss but a physical
927 * address hit during a D-cache look-up.
931 if (rev
<= PRID_REV_ENCODE_332(2, 4, 0))
933 if (rev
== PRID_REV_ENCODE_332(2, 4, 0))
934 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND
);
937 if (rev
<= PRID_REV_ENCODE_332(1, 1, 0)) {
939 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND
);
949 static void b5k_instruction_hazard(void)
953 __asm__
__volatile__(
954 " nop; nop; nop; nop; nop; nop; nop; nop\n"
955 " nop; nop; nop; nop; nop; nop; nop; nop\n"
956 " nop; nop; nop; nop; nop; nop; nop; nop\n"
957 " nop; nop; nop; nop; nop; nop; nop; nop\n"
961 static char *way_string
[] = { NULL
, "direct mapped", "2-way",
962 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
963 "9-way", "10-way", "11-way", "12-way",
964 "13-way", "14-way", "15-way", "16-way",
967 static void probe_pcache(void)
969 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
970 unsigned int config
= read_c0_config();
971 unsigned int prid
= read_c0_prid();
972 int has_74k_erratum
= 0;
973 unsigned long config1
;
976 switch (current_cpu_type()) {
977 case CPU_R4600
: /* QED style two way caches? */
981 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
982 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
984 c
->icache
.waybit
= __ffs(icache_size
/2);
986 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
987 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
989 c
->dcache
.waybit
= __ffs(dcache_size
/2);
991 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
996 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
997 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1001 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1002 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1004 c
->dcache
.waybit
= 0;
1006 c
->options
|= MIPS_CPU_CACHE_CDEX_P
| MIPS_CPU_PREFETCH
;
1010 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1011 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1013 c
->icache
.waybit
= 0;
1015 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1016 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1018 c
->dcache
.waybit
= 0;
1020 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1021 c
->options
|= MIPS_CPU_PREFETCH
;
1031 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1032 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1034 c
->icache
.waybit
= 0; /* doesn't matter */
1036 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1037 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1039 c
->dcache
.waybit
= 0; /* does not matter */
1041 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1048 icache_size
= 1 << (12 + ((config
& R10K_CONF_IC
) >> 29));
1049 c
->icache
.linesz
= 64;
1051 c
->icache
.waybit
= 0;
1053 dcache_size
= 1 << (12 + ((config
& R10K_CONF_DC
) >> 26));
1054 c
->dcache
.linesz
= 32;
1056 c
->dcache
.waybit
= 0;
1058 c
->options
|= MIPS_CPU_PREFETCH
;
1062 write_c0_config(config
& ~VR41_CONF_P4K
);
1064 /* Workaround for cache instruction bug of VR4131 */
1065 if (c
->processor_id
== 0x0c80U
|| c
->processor_id
== 0x0c81U
||
1066 c
->processor_id
== 0x0c82U
) {
1067 config
|= 0x00400000U
;
1068 if (c
->processor_id
== 0x0c80U
)
1069 config
|= VR41_CONF_BP
;
1070 write_c0_config(config
);
1072 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1074 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
1075 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1077 c
->icache
.waybit
= __ffs(icache_size
/2);
1079 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
1080 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1082 c
->dcache
.waybit
= __ffs(dcache_size
/2);
1091 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
1092 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1094 c
->icache
.waybit
= 0; /* doesn't matter */
1096 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
1097 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1099 c
->dcache
.waybit
= 0; /* does not matter */
1101 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1107 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1108 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1110 c
->icache
.waybit
= __ffs(icache_size
/ c
->icache
.ways
);
1112 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1113 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1115 c
->dcache
.waybit
= __ffs(dcache_size
/ c
->dcache
.ways
);
1117 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1118 c
->options
|= MIPS_CPU_PREFETCH
;
1122 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1123 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1128 c
->icache
.waybit
= 0;
1130 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1131 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1136 c
->dcache
.waybit
= 0;
1140 config1
= read_c0_config1();
1141 lsize
= (config1
>> 19) & 7;
1143 c
->icache
.linesz
= 2 << lsize
;
1145 c
->icache
.linesz
= 0;
1146 c
->icache
.sets
= 64 << ((config1
>> 22) & 7);
1147 c
->icache
.ways
= 1 + ((config1
>> 16) & 7);
1148 icache_size
= c
->icache
.sets
*
1151 c
->icache
.waybit
= 0;
1153 lsize
= (config1
>> 10) & 7;
1155 c
->dcache
.linesz
= 2 << lsize
;
1157 c
->dcache
.linesz
= 0;
1158 c
->dcache
.sets
= 64 << ((config1
>> 13) & 7);
1159 c
->dcache
.ways
= 1 + ((config1
>> 7) & 7);
1160 dcache_size
= c
->dcache
.sets
*
1163 c
->dcache
.waybit
= 0;
1166 case CPU_CAVIUM_OCTEON3
:
1167 /* For now lie about the number of ways. */
1168 c
->icache
.linesz
= 128;
1169 c
->icache
.sets
= 16;
1171 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
1172 icache_size
= c
->icache
.sets
* c
->icache
.ways
* c
->icache
.linesz
;
1174 c
->dcache
.linesz
= 128;
1177 dcache_size
= c
->dcache
.sets
* c
->dcache
.ways
* c
->dcache
.linesz
;
1178 c
->options
|= MIPS_CPU_PREFETCH
;
1182 if (!(config
& MIPS_CONF_M
))
1183 panic("Don't know how to probe P-caches on this cpu.");
1186 * So we seem to be a MIPS32 or MIPS64 CPU
1187 * So let's probe the I-cache ...
1189 config1
= read_c0_config1();
1191 lsize
= (config1
>> 19) & 7;
1193 /* IL == 7 is reserved */
1195 panic("Invalid icache line size");
1197 c
->icache
.linesz
= lsize
? 2 << lsize
: 0;
1199 c
->icache
.sets
= 32 << (((config1
>> 22) + 1) & 7);
1200 c
->icache
.ways
= 1 + ((config1
>> 16) & 7);
1202 icache_size
= c
->icache
.sets
*
1205 c
->icache
.waybit
= __ffs(icache_size
/c
->icache
.ways
);
1207 if (config
& 0x8) /* VI bit */
1208 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
1211 * Now probe the MIPS32 / MIPS64 data cache.
1213 c
->dcache
.flags
= 0;
1215 lsize
= (config1
>> 10) & 7;
1217 /* DL == 7 is reserved */
1219 panic("Invalid dcache line size");
1221 c
->dcache
.linesz
= lsize
? 2 << lsize
: 0;
1223 c
->dcache
.sets
= 32 << (((config1
>> 13) + 1) & 7);
1224 c
->dcache
.ways
= 1 + ((config1
>> 7) & 7);
1226 dcache_size
= c
->dcache
.sets
*
1229 c
->dcache
.waybit
= __ffs(dcache_size
/c
->dcache
.ways
);
1231 c
->options
|= MIPS_CPU_PREFETCH
;
1236 * Processor configuration sanity check for the R4000SC erratum
1237 * #5. With page sizes larger than 32kB there is no possibility
1238 * to get a VCE exception anymore so we don't care about this
1239 * misconfiguration. The case is rather theoretical anyway;
1240 * presumably no vendor is shipping his hardware in the "bad"
1243 if ((prid
& PRID_IMP_MASK
) == PRID_IMP_R4000
&&
1244 (prid
& PRID_REV_MASK
) < PRID_REV_R4400
&&
1245 !(config
& CONF_SC
) && c
->icache
.linesz
!= 16 &&
1246 PAGE_SIZE
<= 0x8000)
1247 panic("Improper R4000SC processor configuration detected");
1249 /* compute a couple of other cache variables */
1250 c
->icache
.waysize
= icache_size
/ c
->icache
.ways
;
1251 c
->dcache
.waysize
= dcache_size
/ c
->dcache
.ways
;
1253 c
->icache
.sets
= c
->icache
.linesz
?
1254 icache_size
/ (c
->icache
.linesz
* c
->icache
.ways
) : 0;
1255 c
->dcache
.sets
= c
->dcache
.linesz
?
1256 dcache_size
/ (c
->dcache
.linesz
* c
->dcache
.ways
) : 0;
1259 * R1x000 P-caches are odd in a positive way. They're 32kB 2-way
1260 * virtually indexed so normally would suffer from aliases. So
1261 * normally they'd suffer from aliases but magic in the hardware deals
1262 * with that for us so we don't need to take care ourselves.
1264 switch (current_cpu_type()) {
1270 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
1281 has_74k_erratum
= alias_74k_erratum(c
);
1288 case CPU_INTERAPTIV
:
1292 case CPU_QEMU_GENERIC
:
1296 if (!(read_c0_config7() & MIPS_CONF7_IAR
) &&
1297 (c
->icache
.waysize
> PAGE_SIZE
))
1298 c
->icache
.flags
|= MIPS_CACHE_ALIASES
;
1299 if (!has_74k_erratum
&& (read_c0_config7() & MIPS_CONF7_AR
)) {
1301 * Effectively physically indexed dcache,
1302 * thus no virtual aliases.
1304 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
1308 if (has_74k_erratum
|| c
->dcache
.waysize
> PAGE_SIZE
)
1309 c
->dcache
.flags
|= MIPS_CACHE_ALIASES
;
1312 switch (current_cpu_type()) {
1315 * Some older 20Kc chips doesn't have the 'VI' bit in
1316 * the config register.
1318 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
1323 c
->icache
.flags
|= MIPS_CACHE_IC_F_DC
;
1327 c
->icache
.flags
|= MIPS_CACHE_IC_F_DC
;
1328 /* Cache aliases are handled in hardware; allow HIGHMEM */
1329 c
->dcache
.flags
&= ~MIPS_CACHE_ALIASES
;
1334 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1335 * one op will act on all 4 ways
1340 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1342 c
->icache
.flags
& MIPS_CACHE_VTAG
? "VIVT" : "VIPT",
1343 way_string
[c
->icache
.ways
], c
->icache
.linesz
);
1345 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1346 dcache_size
>> 10, way_string
[c
->dcache
.ways
],
1347 (c
->dcache
.flags
& MIPS_CACHE_PINDEX
) ? "PIPT" : "VIPT",
1348 (c
->dcache
.flags
& MIPS_CACHE_ALIASES
) ?
1349 "cache aliases" : "no aliases",
1353 static void probe_vcache(void)
1355 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1356 unsigned int config2
, lsize
;
1358 if (current_cpu_type() != CPU_LOONGSON3
)
1361 config2
= read_c0_config2();
1362 if ((lsize
= ((config2
>> 20) & 15)))
1363 c
->vcache
.linesz
= 2 << lsize
;
1365 c
->vcache
.linesz
= lsize
;
1367 c
->vcache
.sets
= 64 << ((config2
>> 24) & 15);
1368 c
->vcache
.ways
= 1 + ((config2
>> 16) & 15);
1370 vcache_size
= c
->vcache
.sets
* c
->vcache
.ways
* c
->vcache
.linesz
;
1372 c
->vcache
.waybit
= 0;
1374 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1375 vcache_size
>> 10, way_string
[c
->vcache
.ways
], c
->vcache
.linesz
);
1379 * If you even _breathe_ on this function, look at the gcc output and make sure
1380 * it does not pop things on and off the stack for the cache sizing loop that
1381 * executes in KSEG1 space or else you will crash and burn badly. You have
1384 static int probe_scache(void)
1386 unsigned long flags
, addr
, begin
, end
, pow2
;
1387 unsigned int config
= read_c0_config();
1388 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1390 if (config
& CONF_SC
)
1393 begin
= (unsigned long) &_stext
;
1394 begin
&= ~((4 * 1024 * 1024) - 1);
1395 end
= begin
+ (4 * 1024 * 1024);
1398 * This is such a bitch, you'd think they would make it easy to do
1399 * this. Away you daemons of stupidity!
1401 local_irq_save(flags
);
1403 /* Fill each size-multiple cache line with a valid tag. */
1405 for (addr
= begin
; addr
< end
; addr
= (begin
+ pow2
)) {
1406 unsigned long *p
= (unsigned long *) addr
;
1407 __asm__
__volatile__("nop" : : "r" (*p
)); /* whee... */
1411 /* Load first line with zero (therefore invalid) tag. */
1414 __asm__
__volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1415 cache_op(Index_Store_Tag_I
, begin
);
1416 cache_op(Index_Store_Tag_D
, begin
);
1417 cache_op(Index_Store_Tag_SD
, begin
);
1419 /* Now search for the wrap around point. */
1420 pow2
= (128 * 1024);
1421 for (addr
= begin
+ (128 * 1024); addr
< end
; addr
= begin
+ pow2
) {
1422 cache_op(Index_Load_Tag_SD
, addr
);
1423 __asm__
__volatile__("nop; nop; nop; nop;"); /* hazard... */
1424 if (!read_c0_taglo())
1428 local_irq_restore(flags
);
1432 c
->scache
.linesz
= 16 << ((config
& R4K_CONF_SB
) >> 22);
1434 c
->scache
.waybit
= 0; /* does not matter */
1439 static void __init
loongson2_sc_init(void)
1441 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1443 scache_size
= 512*1024;
1444 c
->scache
.linesz
= 32;
1446 c
->scache
.waybit
= 0;
1447 c
->scache
.waysize
= scache_size
/ (c
->scache
.ways
);
1448 c
->scache
.sets
= scache_size
/ (c
->scache
.linesz
* c
->scache
.ways
);
1449 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1450 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1452 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1455 static void __init
loongson3_sc_init(void)
1457 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1458 unsigned int config2
, lsize
;
1460 config2
= read_c0_config2();
1461 lsize
= (config2
>> 4) & 15;
1463 c
->scache
.linesz
= 2 << lsize
;
1465 c
->scache
.linesz
= 0;
1466 c
->scache
.sets
= 64 << ((config2
>> 8) & 15);
1467 c
->scache
.ways
= 1 + (config2
& 15);
1469 scache_size
= c
->scache
.sets
*
1472 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1474 c
->scache
.waybit
= 0;
1475 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1476 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1478 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1482 extern int r5k_sc_init(void);
1483 extern int rm7k_sc_init(void);
1484 extern int mips_sc_init(void);
1486 static void setup_scache(void)
1488 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1489 unsigned int config
= read_c0_config();
1493 * Do the probing thing on R4000SC and R4400SC processors. Other
1494 * processors don't have a S-cache that would be relevant to the
1495 * Linux memory management.
1497 switch (current_cpu_type()) {
1502 sc_present
= run_uncached(probe_scache
);
1504 c
->options
|= MIPS_CPU_CACHE_CDEX_S
;
1511 scache_size
= 0x80000 << ((config
& R10K_CONF_SS
) >> 16);
1512 c
->scache
.linesz
= 64 << ((config
>> 13) & 1);
1514 c
->scache
.waybit
= 0;
1520 #ifdef CONFIG_R5000_CPU_SCACHE
1526 #ifdef CONFIG_RM7000_CPU_SCACHE
1532 loongson2_sc_init();
1536 loongson3_sc_init();
1539 case CPU_CAVIUM_OCTEON3
:
1541 /* don't need to worry about L2, fully coherent */
1545 if (c
->isa_level
& (MIPS_CPU_ISA_M32R1
| MIPS_CPU_ISA_M32R2
|
1546 MIPS_CPU_ISA_M32R6
| MIPS_CPU_ISA_M64R1
|
1547 MIPS_CPU_ISA_M64R2
| MIPS_CPU_ISA_M64R6
)) {
1548 #ifdef CONFIG_MIPS_CPU_SCACHE
1549 if (mips_sc_init ()) {
1550 scache_size
= c
->scache
.ways
* c
->scache
.sets
* c
->scache
.linesz
;
1551 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1553 way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1556 if (!(c
->scache
.flags
& MIPS_CACHE_NOT_PRESENT
))
1557 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1567 /* compute a couple of other cache variables */
1568 c
->scache
.waysize
= scache_size
/ c
->scache
.ways
;
1570 c
->scache
.sets
= scache_size
/ (c
->scache
.linesz
* c
->scache
.ways
);
1572 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1573 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1575 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1578 void au1x00_fixup_config_od(void)
1581 * c0_config.od (bit 19) was write only (and read as 0)
1582 * on the early revisions of Alchemy SOCs. It disables the bus
1583 * transaction overlapping and needs to be set to fix various errata.
1585 switch (read_c0_prid()) {
1586 case 0x00030100: /* Au1000 DA */
1587 case 0x00030201: /* Au1000 HA */
1588 case 0x00030202: /* Au1000 HB */
1589 case 0x01030200: /* Au1500 AB */
1591 * Au1100 errata actually keeps silence about this bit, so we set it
1592 * just in case for those revisions that require it to be set according
1593 * to the (now gone) cpu table.
1595 case 0x02030200: /* Au1100 AB */
1596 case 0x02030201: /* Au1100 BA */
1597 case 0x02030202: /* Au1100 BC */
1598 set_c0_config(1 << 19);
1603 /* CP0 hazard avoidance. */
1604 #define NXP_BARRIER() \
1605 __asm__ __volatile__( \
1606 ".set noreorder\n\t" \
1607 "nop; nop; nop; nop; nop; nop;\n\t" \
1610 static void nxp_pr4450_fixup_config(void)
1612 unsigned long config0
;
1614 config0
= read_c0_config();
1616 /* clear all three cache coherency fields */
1617 config0
&= ~(0x7 | (7 << 25) | (7 << 28));
1618 config0
|= (((_page_cachable_default
>> _CACHE_SHIFT
) << 0) |
1619 ((_page_cachable_default
>> _CACHE_SHIFT
) << 25) |
1620 ((_page_cachable_default
>> _CACHE_SHIFT
) << 28));
1621 write_c0_config(config0
);
1625 static int cca
= -1;
1627 static int __init
cca_setup(char *str
)
1629 get_option(&str
, &cca
);
1634 early_param("cca", cca_setup
);
1636 static void coherency_setup(void)
1638 if (cca
< 0 || cca
> 7)
1639 cca
= read_c0_config() & CONF_CM_CMASK
;
1640 _page_cachable_default
= cca
<< _CACHE_SHIFT
;
1642 pr_debug("Using cache attribute %d\n", cca
);
1643 change_c0_config(CONF_CM_CMASK
, cca
);
1646 * c0_status.cu=0 specifies that updates by the sc instruction use
1647 * the coherency mode specified by the TLB; 1 means cachable
1648 * coherent update on write will be used. Not all processors have
1649 * this bit and; some wire it to zero, others like Toshiba had the
1650 * silly idea of putting something else there ...
1652 switch (current_cpu_type()) {
1659 clear_c0_config(CONF_CU
);
1662 * We need to catch the early Alchemy SOCs with
1663 * the write-only co_config.od bit and set it back to one on:
1664 * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
1667 au1x00_fixup_config_od();
1670 case PRID_IMP_PR4450
:
1671 nxp_pr4450_fixup_config();
1676 static void r4k_cache_error_setup(void)
1678 extern char __weak except_vec2_generic
;
1679 extern char __weak except_vec2_sb1
;
1681 switch (current_cpu_type()) {
1684 set_uncached_handler(0x100, &except_vec2_sb1
, 0x80);
1688 set_uncached_handler(0x100, &except_vec2_generic
, 0x80);
1693 void r4k_cache_init(void)
1695 extern void build_clear_page(void);
1696 extern void build_copy_page(void);
1697 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1703 r4k_blast_dcache_page_setup();
1704 r4k_blast_dcache_page_indexed_setup();
1705 r4k_blast_dcache_setup();
1706 r4k_blast_icache_page_setup();
1707 r4k_blast_icache_page_indexed_setup();
1708 r4k_blast_icache_setup();
1709 r4k_blast_scache_page_setup();
1710 r4k_blast_scache_page_indexed_setup();
1711 r4k_blast_scache_setup();
1713 r4k_blast_dcache_user_page_setup();
1714 r4k_blast_icache_user_page_setup();
1718 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1719 * This code supports virtually indexed processors and will be
1720 * unnecessarily inefficient on physically indexed processors.
1722 if (c
->dcache
.linesz
)
1723 shm_align_mask
= max_t( unsigned long,
1724 c
->dcache
.sets
* c
->dcache
.linesz
- 1,
1727 shm_align_mask
= PAGE_SIZE
-1;
1729 __flush_cache_vmap
= r4k__flush_cache_vmap
;
1730 __flush_cache_vunmap
= r4k__flush_cache_vunmap
;
1732 flush_cache_all
= cache_noop
;
1733 __flush_cache_all
= r4k___flush_cache_all
;
1734 flush_cache_mm
= r4k_flush_cache_mm
;
1735 flush_cache_page
= r4k_flush_cache_page
;
1736 flush_cache_range
= r4k_flush_cache_range
;
1738 __flush_kernel_vmap_range
= r4k_flush_kernel_vmap_range
;
1740 flush_cache_sigtramp
= r4k_flush_cache_sigtramp
;
1741 flush_icache_all
= r4k_flush_icache_all
;
1742 local_flush_data_cache_page
= local_r4k_flush_data_cache_page
;
1743 flush_data_cache_page
= r4k_flush_data_cache_page
;
1744 flush_icache_range
= r4k_flush_icache_range
;
1745 local_flush_icache_range
= local_r4k_flush_icache_range
;
1747 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
1749 _dma_cache_wback_inv
= (void *)cache_noop
;
1750 _dma_cache_wback
= (void *)cache_noop
;
1751 _dma_cache_inv
= (void *)cache_noop
;
1753 _dma_cache_wback_inv
= r4k_dma_cache_wback_inv
;
1754 _dma_cache_wback
= r4k_dma_cache_wback_inv
;
1755 _dma_cache_inv
= r4k_dma_cache_inv
;
1763 * We want to run CMP kernels on core with and without coherent
1764 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1765 * or not to flush caches.
1767 local_r4k___flush_cache_all(NULL
);
1770 board_cache_error_setup
= r4k_cache_error_setup
;
1775 switch (current_cpu_type()) {
1778 /* No IPI is needed because all CPUs share the same D$ */
1779 flush_data_cache_page
= r4k_blast_dcache_page
;
1782 /* We lose our superpowers if L2 is disabled */
1783 if (c
->scache
.flags
& MIPS_CACHE_NOT_PRESENT
)
1786 /* I$ fills from D$ just by emptying the write buffers */
1787 flush_cache_page
= (void *)b5k_instruction_hazard
;
1788 flush_cache_range
= (void *)b5k_instruction_hazard
;
1789 flush_cache_sigtramp
= (void *)b5k_instruction_hazard
;
1790 local_flush_data_cache_page
= (void *)b5k_instruction_hazard
;
1791 flush_data_cache_page
= (void *)b5k_instruction_hazard
;
1792 flush_icache_range
= (void *)b5k_instruction_hazard
;
1793 local_flush_icache_range
= (void *)b5k_instruction_hazard
;
1796 /* Optimization: an L2 flush implicitly flushes the L1 */
1797 current_cpu_data
.options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1802 static int r4k_cache_pm_notifier(struct notifier_block
*self
, unsigned long cmd
,
1806 case CPU_PM_ENTER_FAILED
:
1815 static struct notifier_block r4k_cache_pm_notifier_block
= {
1816 .notifier_call
= r4k_cache_pm_notifier
,
1819 int __init
r4k_cache_init_pm(void)
1821 return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block
);
1823 arch_initcall(r4k_cache_init_pm
);