Fix common misspellings
[deliverable/linux.git] / arch / microblaze / kernel / cpu / cache.c
CommitLineData
8beb8503
MS
1/*
2 * Cache control for MicroBlaze cache memories
3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
2ee2ff87 6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
8beb8503
MS
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 */
12
13#include <asm/cacheflush.h>
14#include <linux/cache.h>
15#include <asm/cpuinfo.h>
2ee2ff87 16#include <asm/pvr.h>
8beb8503 17
2ee2ff87 18static inline void __enable_icache_msr(void)
8beb8503 19{
2ee2ff87
MS
20 __asm__ __volatile__ (" msrset r0, %0; \
21 nop; " \
22 : : "i" (MSR_ICE) : "memory");
23}
24
25static inline void __disable_icache_msr(void)
26{
27 __asm__ __volatile__ (" msrclr r0, %0; \
28 nop; " \
29 : : "i" (MSR_ICE) : "memory");
30}
31
32static inline void __enable_dcache_msr(void)
33{
34 __asm__ __volatile__ (" msrset r0, %0; \
35 nop; " \
36 : \
37 : "i" (MSR_DCE) \
8beb8503 38 : "memory");
8beb8503
MS
39}
40
2ee2ff87 41static inline void __disable_dcache_msr(void)
8beb8503 42{
2ee2ff87
MS
43 __asm__ __volatile__ (" msrclr r0, %0; \
44 nop; " \
45 : \
46 : "i" (MSR_DCE) \
8beb8503 47 : "memory");
2ee2ff87
MS
48}
49
50static inline void __enable_icache_nomsr(void)
51{
52 __asm__ __volatile__ (" mfs r12, rmsr; \
53 nop; \
54 ori r12, r12, %0; \
55 mts rmsr, r12; \
56 nop; " \
57 : \
58 : "i" (MSR_ICE) \
8beb8503 59 : "memory", "r12");
8beb8503
MS
60}
61
2ee2ff87 62static inline void __disable_icache_nomsr(void)
8beb8503 63{
2ee2ff87
MS
64 __asm__ __volatile__ (" mfs r12, rmsr; \
65 nop; \
66 andi r12, r12, ~%0; \
67 mts rmsr, r12; \
68 nop; " \
69 : \
70 : "i" (MSR_ICE) \
71 : "memory", "r12");
8beb8503
MS
72}
73
2ee2ff87 74static inline void __enable_dcache_nomsr(void)
8beb8503 75{
2ee2ff87
MS
76 __asm__ __volatile__ (" mfs r12, rmsr; \
77 nop; \
78 ori r12, r12, %0; \
79 mts rmsr, r12; \
80 nop; " \
81 : \
82 : "i" (MSR_DCE) \
8beb8503 83 : "memory", "r12");
8beb8503
MS
84}
85
2ee2ff87 86static inline void __disable_dcache_nomsr(void)
8beb8503 87{
2ee2ff87
MS
88 __asm__ __volatile__ (" mfs r12, rmsr; \
89 nop; \
90 andi r12, r12, ~%0; \
91 mts rmsr, r12; \
92 nop; " \
93 : \
94 : "i" (MSR_DCE) \
8beb8503 95 : "memory", "r12");
8beb8503
MS
96}
97
2ee2ff87 98
3274c570
MS
99/* Helper macro for computing the limits of cache range loops
100 *
101 * End address can be unaligned which is OK for C implementation.
102 * ASM implementation align it in ASM macros
103 */
2ee2ff87
MS
104#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
105do { \
106 int align = ~(cache_line_length - 1); \
107 end = min(start + cache_size, end); \
108 start &= align; \
2ee2ff87
MS
109} while (0);
110
111/*
112 * Helper macro to loop over the specified cache_size/line_length and
113 * execute 'op' on that cacheline
114 */
115#define CACHE_ALL_LOOP(cache_size, line_length, op) \
116do { \
3274c570 117 unsigned int len = cache_size - line_length; \
2ee2ff87 118 int step = -line_length; \
3274c570 119 WARN_ON(step >= 0); \
2ee2ff87
MS
120 \
121 __asm__ __volatile__ (" 1: " #op " %0, r0; \
122 bgtid %0, 1b; \
123 addk %0, %0, %1; \
124 " : : "r" (len), "r" (step) \
125 : "memory"); \
126} while (0);
127
3274c570
MS
128/* Used for wdc.flush/clear which can use rB for offset which is not possible
129 * to use for simple wdc or wic.
130 *
131 * start address is cache aligned
25985edc 132 * end address is not aligned, if end is aligned then I have to subtract
3274c570
MS
133 * cacheline length because I can't flush/invalidate the next cacheline.
134 * If is not, I align it because I will flush/invalidate whole line.
135 */
2ee2ff87
MS
136#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
137do { \
138 int step = -line_length; \
3274c570 139 int align = ~(line_length - 1); \
ddfbc935 140 int count; \
3274c570 141 end = ((end & align) == end) ? end - line_length : end & align; \
ddfbc935 142 count = end - start; \
3274c570 143 WARN_ON(count < 0); \
2ee2ff87 144 \
22607a28
MS
145 __asm__ __volatile__ (" 1: " #op " %0, %1; \
146 bgtid %1, 1b; \
147 addk %1, %1, %2; \
2ee2ff87
MS
148 " : : "r" (start), "r" (count), \
149 "r" (step) : "memory"); \
150} while (0);
151
152/* It is used only first parameter for OP - for wic, wdc */
153#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
154do { \
0d670b24 155 int volatile temp; \
3274c570
MS
156 int align = ~(line_length - 1); \
157 end = ((end & align) == end) ? end - line_length : end & align; \
158 WARN_ON(end - start < 0); \
2ee2ff87 159 \
22607a28 160 __asm__ __volatile__ (" 1: " #op " %1, r0; \
0d670b24
MS
161 cmpu %0, %1, %2; \
162 bgtid %0, 1b; \
163 addk %1, %1, %3; \
164 " : : "r" (temp), "r" (start), "r" (end),\
165 "r" (line_length) : "memory"); \
2ee2ff87
MS
166} while (0);
167
22607a28
MS
168#define ASM_LOOP
169
2ee2ff87 170static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
8beb8503 171{
2ee2ff87 172 unsigned long flags;
22607a28
MS
173#ifndef ASM_LOOP
174 int i;
175#endif
2ee2ff87
MS
176 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
177 (unsigned int)start, (unsigned int) end);
178
179 CACHE_LOOP_LIMITS(start, end,
180 cpuinfo.icache_line_length, cpuinfo.icache_size);
181
182 local_irq_save(flags);
183 __disable_icache_msr();
184
22607a28 185#ifdef ASM_LOOP
2ee2ff87 186 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
22607a28
MS
187#else
188 for (i = start; i < end; i += cpuinfo.icache_line_length)
189 __asm__ __volatile__ ("wic %0, r0;" \
190 : : "r" (i));
191#endif
2ee2ff87
MS
192 __enable_icache_msr();
193 local_irq_restore(flags);
8beb8503
MS
194}
195
2ee2ff87
MS
196static void __flush_icache_range_nomsr_irq(unsigned long start,
197 unsigned long end)
8beb8503 198{
2ee2ff87 199 unsigned long flags;
22607a28
MS
200#ifndef ASM_LOOP
201 int i;
202#endif
2ee2ff87
MS
203 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
204 (unsigned int)start, (unsigned int) end);
8beb8503 205
2ee2ff87
MS
206 CACHE_LOOP_LIMITS(start, end,
207 cpuinfo.icache_line_length, cpuinfo.icache_size);
8beb8503 208
2ee2ff87
MS
209 local_irq_save(flags);
210 __disable_icache_nomsr();
211
22607a28 212#ifdef ASM_LOOP
2ee2ff87 213 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
22607a28
MS
214#else
215 for (i = start; i < end; i += cpuinfo.icache_line_length)
216 __asm__ __volatile__ ("wic %0, r0;" \
217 : : "r" (i));
218#endif
2ee2ff87
MS
219
220 __enable_icache_nomsr();
221 local_irq_restore(flags);
8beb8503
MS
222}
223
2ee2ff87
MS
224static void __flush_icache_range_noirq(unsigned long start,
225 unsigned long end)
8beb8503 226{
22607a28
MS
227#ifndef ASM_LOOP
228 int i;
229#endif
2ee2ff87
MS
230 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
231 (unsigned int)start, (unsigned int) end);
232
233 CACHE_LOOP_LIMITS(start, end,
234 cpuinfo.icache_line_length, cpuinfo.icache_size);
22607a28 235#ifdef ASM_LOOP
2ee2ff87 236 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
22607a28
MS
237#else
238 for (i = start; i < end; i += cpuinfo.icache_line_length)
239 __asm__ __volatile__ ("wic %0, r0;" \
240 : : "r" (i));
241#endif
2ee2ff87
MS
242}
243
244static void __flush_icache_all_msr_irq(void)
245{
246 unsigned long flags;
22607a28
MS
247#ifndef ASM_LOOP
248 int i;
249#endif
2ee2ff87
MS
250 pr_debug("%s\n", __func__);
251
252 local_irq_save(flags);
253 __disable_icache_msr();
22607a28 254#ifdef ASM_LOOP
2ee2ff87 255 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
22607a28
MS
256#else
257 for (i = 0; i < cpuinfo.icache_size;
258 i += cpuinfo.icache_line_length)
259 __asm__ __volatile__ ("wic %0, r0;" \
260 : : "r" (i));
261#endif
2ee2ff87
MS
262 __enable_icache_msr();
263 local_irq_restore(flags);
264}
265
266static void __flush_icache_all_nomsr_irq(void)
267{
268 unsigned long flags;
22607a28
MS
269#ifndef ASM_LOOP
270 int i;
271#endif
2ee2ff87
MS
272 pr_debug("%s\n", __func__);
273
274 local_irq_save(flags);
275 __disable_icache_nomsr();
22607a28 276#ifdef ASM_LOOP
2ee2ff87 277 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
22607a28
MS
278#else
279 for (i = 0; i < cpuinfo.icache_size;
280 i += cpuinfo.icache_line_length)
281 __asm__ __volatile__ ("wic %0, r0;" \
282 : : "r" (i));
283#endif
2ee2ff87
MS
284 __enable_icache_nomsr();
285 local_irq_restore(flags);
8beb8503
MS
286}
287
2ee2ff87 288static void __flush_icache_all_noirq(void)
8beb8503 289{
22607a28
MS
290#ifndef ASM_LOOP
291 int i;
292#endif
2ee2ff87 293 pr_debug("%s\n", __func__);
22607a28 294#ifdef ASM_LOOP
2ee2ff87 295 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
22607a28
MS
296#else
297 for (i = 0; i < cpuinfo.icache_size;
298 i += cpuinfo.icache_line_length)
299 __asm__ __volatile__ ("wic %0, r0;" \
300 : : "r" (i));
301#endif
8beb8503
MS
302}
303
2ee2ff87 304static void __invalidate_dcache_all_msr_irq(void)
8beb8503 305{
2ee2ff87 306 unsigned long flags;
22607a28
MS
307#ifndef ASM_LOOP
308 int i;
309#endif
2ee2ff87
MS
310 pr_debug("%s\n", __func__);
311
312 local_irq_save(flags);
313 __disable_dcache_msr();
22607a28 314#ifdef ASM_LOOP
2ee2ff87 315 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
22607a28
MS
316#else
317 for (i = 0; i < cpuinfo.dcache_size;
318 i += cpuinfo.dcache_line_length)
319 __asm__ __volatile__ ("wdc %0, r0;" \
320 : : "r" (i));
321#endif
2ee2ff87
MS
322 __enable_dcache_msr();
323 local_irq_restore(flags);
8beb8503
MS
324}
325
2ee2ff87 326static void __invalidate_dcache_all_nomsr_irq(void)
8beb8503 327{
2ee2ff87 328 unsigned long flags;
22607a28
MS
329#ifndef ASM_LOOP
330 int i;
331#endif
2ee2ff87
MS
332 pr_debug("%s\n", __func__);
333
334 local_irq_save(flags);
335 __disable_dcache_nomsr();
22607a28 336#ifdef ASM_LOOP
2ee2ff87 337 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
22607a28
MS
338#else
339 for (i = 0; i < cpuinfo.dcache_size;
340 i += cpuinfo.dcache_line_length)
341 __asm__ __volatile__ ("wdc %0, r0;" \
342 : : "r" (i));
343#endif
2ee2ff87
MS
344 __enable_dcache_nomsr();
345 local_irq_restore(flags);
8beb8503
MS
346}
347
2ee2ff87 348static void __invalidate_dcache_all_noirq_wt(void)
8beb8503 349{
22607a28
MS
350#ifndef ASM_LOOP
351 int i;
352#endif
2ee2ff87 353 pr_debug("%s\n", __func__);
22607a28 354#ifdef ASM_LOOP
2ee2ff87 355 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
22607a28
MS
356#else
357 for (i = 0; i < cpuinfo.dcache_size;
358 i += cpuinfo.dcache_line_length)
359 __asm__ __volatile__ ("wdc %0, r0;" \
360 : : "r" (i));
361#endif
8beb8503
MS
362}
363
3274c570
MS
364/* FIXME It is blindly invalidation as is expected
365 * but can't be called on noMMU in microblaze_cache_init below
366 *
367 * MS: noMMU kernel won't boot if simple wdc is used
368 * The reason should be that there are discared data which kernel needs
369 */
2ee2ff87 370static void __invalidate_dcache_all_wb(void)
8beb8503 371{
22607a28
MS
372#ifndef ASM_LOOP
373 int i;
374#endif
2ee2ff87 375 pr_debug("%s\n", __func__);
22607a28 376#ifdef ASM_LOOP
3274c570
MS
377 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
378 wdc)
22607a28
MS
379#else
380 for (i = 0; i < cpuinfo.dcache_size;
381 i += cpuinfo.dcache_line_length)
3274c570 382 __asm__ __volatile__ ("wdc %0, r0;" \
22607a28
MS
383 : : "r" (i));
384#endif
2ee2ff87
MS
385}
386
387static void __invalidate_dcache_range_wb(unsigned long start,
388 unsigned long end)
389{
22607a28
MS
390#ifndef ASM_LOOP
391 int i;
392#endif
2ee2ff87
MS
393 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
394 (unsigned int)start, (unsigned int) end);
395
396 CACHE_LOOP_LIMITS(start, end,
397 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
22607a28 398#ifdef ASM_LOOP
2ee2ff87 399 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
22607a28 400#else
c17e1a1c 401 for (i = start; i < end; i += cpuinfo.dcache_line_length)
22607a28
MS
402 __asm__ __volatile__ ("wdc.clear %0, r0;" \
403 : : "r" (i));
404#endif
2ee2ff87
MS
405}
406
407static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
408 unsigned long end)
409{
22607a28
MS
410#ifndef ASM_LOOP
411 int i;
412#endif
2ee2ff87
MS
413 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
414 (unsigned int)start, (unsigned int) end);
415 CACHE_LOOP_LIMITS(start, end,
416 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
417
22607a28 418#ifdef ASM_LOOP
2ee2ff87 419 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
22607a28 420#else
c17e1a1c 421 for (i = start; i < end; i += cpuinfo.dcache_line_length)
22607a28
MS
422 __asm__ __volatile__ ("wdc %0, r0;" \
423 : : "r" (i));
424#endif
8beb8503
MS
425}
426
2ee2ff87
MS
427static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
428 unsigned long end)
8beb8503 429{
2ee2ff87 430 unsigned long flags;
22607a28
MS
431#ifndef ASM_LOOP
432 int i;
433#endif
2ee2ff87
MS
434 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
435 (unsigned int)start, (unsigned int) end);
436 CACHE_LOOP_LIMITS(start, end,
437 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
438
439 local_irq_save(flags);
440 __disable_dcache_msr();
441
22607a28 442#ifdef ASM_LOOP
2ee2ff87 443 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
22607a28 444#else
c17e1a1c 445 for (i = start; i < end; i += cpuinfo.dcache_line_length)
22607a28
MS
446 __asm__ __volatile__ ("wdc %0, r0;" \
447 : : "r" (i));
448#endif
2ee2ff87
MS
449
450 __enable_dcache_msr();
451 local_irq_restore(flags);
452}
453
454static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
455 unsigned long end)
456{
457 unsigned long flags;
22607a28
MS
458#ifndef ASM_LOOP
459 int i;
460#endif
2ee2ff87
MS
461 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
462 (unsigned int)start, (unsigned int) end);
463
464 CACHE_LOOP_LIMITS(start, end,
465 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
466
467 local_irq_save(flags);
468 __disable_dcache_nomsr();
469
22607a28 470#ifdef ASM_LOOP
2ee2ff87 471 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
22607a28 472#else
c17e1a1c 473 for (i = start; i < end; i += cpuinfo.dcache_line_length)
22607a28
MS
474 __asm__ __volatile__ ("wdc %0, r0;" \
475 : : "r" (i));
476#endif
2ee2ff87
MS
477
478 __enable_dcache_nomsr();
479 local_irq_restore(flags);
480}
481
482static void __flush_dcache_all_wb(void)
483{
22607a28
MS
484#ifndef ASM_LOOP
485 int i;
486#endif
2ee2ff87 487 pr_debug("%s\n", __func__);
22607a28 488#ifdef ASM_LOOP
2ee2ff87
MS
489 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
490 wdc.flush);
22607a28
MS
491#else
492 for (i = 0; i < cpuinfo.dcache_size;
493 i += cpuinfo.dcache_line_length)
494 __asm__ __volatile__ ("wdc.flush %0, r0;" \
495 : : "r" (i));
496#endif
8beb8503
MS
497}
498
2ee2ff87 499static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
8beb8503 500{
22607a28
MS
501#ifndef ASM_LOOP
502 int i;
503#endif
2ee2ff87
MS
504 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
505 (unsigned int)start, (unsigned int) end);
506
507 CACHE_LOOP_LIMITS(start, end,
508 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
22607a28 509#ifdef ASM_LOOP
2ee2ff87 510 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
22607a28 511#else
c17e1a1c 512 for (i = start; i < end; i += cpuinfo.dcache_line_length)
22607a28
MS
513 __asm__ __volatile__ ("wdc.flush %0, r0;" \
514 : : "r" (i));
515#endif
2ee2ff87
MS
516}
517
518/* struct for wb caches and for wt caches */
519struct scache *mbc;
520
521/* new wb cache model */
954e8b95 522static const struct scache wb_msr = {
2ee2ff87
MS
523 .ie = __enable_icache_msr,
524 .id = __disable_icache_msr,
525 .ifl = __flush_icache_all_noirq,
526 .iflr = __flush_icache_range_noirq,
527 .iin = __flush_icache_all_noirq,
528 .iinr = __flush_icache_range_noirq,
529 .de = __enable_dcache_msr,
530 .dd = __disable_dcache_msr,
531 .dfl = __flush_dcache_all_wb,
532 .dflr = __flush_dcache_range_wb,
533 .din = __invalidate_dcache_all_wb,
534 .dinr = __invalidate_dcache_range_wb,
535};
536
537/* There is only difference in ie, id, de, dd functions */
954e8b95 538static const struct scache wb_nomsr = {
2ee2ff87
MS
539 .ie = __enable_icache_nomsr,
540 .id = __disable_icache_nomsr,
541 .ifl = __flush_icache_all_noirq,
542 .iflr = __flush_icache_range_noirq,
543 .iin = __flush_icache_all_noirq,
544 .iinr = __flush_icache_range_noirq,
545 .de = __enable_dcache_nomsr,
546 .dd = __disable_dcache_nomsr,
547 .dfl = __flush_dcache_all_wb,
548 .dflr = __flush_dcache_range_wb,
549 .din = __invalidate_dcache_all_wb,
550 .dinr = __invalidate_dcache_range_wb,
551};
552
553/* Old wt cache model with disabling irq and turn off cache */
954e8b95 554static const struct scache wt_msr = {
2ee2ff87
MS
555 .ie = __enable_icache_msr,
556 .id = __disable_icache_msr,
557 .ifl = __flush_icache_all_msr_irq,
558 .iflr = __flush_icache_range_msr_irq,
559 .iin = __flush_icache_all_msr_irq,
560 .iinr = __flush_icache_range_msr_irq,
561 .de = __enable_dcache_msr,
562 .dd = __disable_dcache_msr,
563 .dfl = __invalidate_dcache_all_msr_irq,
564 .dflr = __invalidate_dcache_range_msr_irq_wt,
565 .din = __invalidate_dcache_all_msr_irq,
566 .dinr = __invalidate_dcache_range_msr_irq_wt,
567};
568
954e8b95 569static const struct scache wt_nomsr = {
2ee2ff87
MS
570 .ie = __enable_icache_nomsr,
571 .id = __disable_icache_nomsr,
572 .ifl = __flush_icache_all_nomsr_irq,
573 .iflr = __flush_icache_range_nomsr_irq,
574 .iin = __flush_icache_all_nomsr_irq,
575 .iinr = __flush_icache_range_nomsr_irq,
576 .de = __enable_dcache_nomsr,
577 .dd = __disable_dcache_nomsr,
578 .dfl = __invalidate_dcache_all_nomsr_irq,
579 .dflr = __invalidate_dcache_range_nomsr_irq,
580 .din = __invalidate_dcache_all_nomsr_irq,
581 .dinr = __invalidate_dcache_range_nomsr_irq,
582};
583
584/* New wt cache model for newer Microblaze versions */
954e8b95 585static const struct scache wt_msr_noirq = {
2ee2ff87
MS
586 .ie = __enable_icache_msr,
587 .id = __disable_icache_msr,
588 .ifl = __flush_icache_all_noirq,
589 .iflr = __flush_icache_range_noirq,
590 .iin = __flush_icache_all_noirq,
591 .iinr = __flush_icache_range_noirq,
592 .de = __enable_dcache_msr,
593 .dd = __disable_dcache_msr,
594 .dfl = __invalidate_dcache_all_noirq_wt,
595 .dflr = __invalidate_dcache_range_nomsr_wt,
596 .din = __invalidate_dcache_all_noirq_wt,
597 .dinr = __invalidate_dcache_range_nomsr_wt,
598};
599
954e8b95 600static const struct scache wt_nomsr_noirq = {
2ee2ff87
MS
601 .ie = __enable_icache_nomsr,
602 .id = __disable_icache_nomsr,
603 .ifl = __flush_icache_all_noirq,
604 .iflr = __flush_icache_range_noirq,
605 .iin = __flush_icache_all_noirq,
606 .iinr = __flush_icache_range_noirq,
607 .de = __enable_dcache_nomsr,
608 .dd = __disable_dcache_nomsr,
609 .dfl = __invalidate_dcache_all_noirq_wt,
610 .dflr = __invalidate_dcache_range_nomsr_wt,
611 .din = __invalidate_dcache_all_noirq_wt,
612 .dinr = __invalidate_dcache_range_nomsr_wt,
613};
614
615/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
616#define CPUVER_7_20_A 0x0c
617#define CPUVER_7_20_D 0x0f
618
4c912c1a 619#define INFO(s) printk(KERN_INFO "cache: " s "\n");
2ee2ff87
MS
620
621void microblaze_cache_init(void)
622{
623 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
624 if (cpuinfo.dcache_wb) {
625 INFO("wb_msr");
626 mbc = (struct scache *)&wb_msr;
b9dc9e77 627 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
2ee2ff87
MS
628 /* MS: problem with signal handling - hw bug */
629 INFO("WB won't work properly");
630 }
631 } else {
632 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
633 INFO("wt_msr_noirq");
634 mbc = (struct scache *)&wt_msr_noirq;
635 } else {
636 INFO("wt_msr");
637 mbc = (struct scache *)&wt_msr;
638 }
639 }
640 } else {
641 if (cpuinfo.dcache_wb) {
642 INFO("wb_nomsr");
643 mbc = (struct scache *)&wb_nomsr;
b9dc9e77 644 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
2ee2ff87
MS
645 /* MS: problem with signal handling - hw bug */
646 INFO("WB won't work properly");
647 }
648 } else {
649 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
650 INFO("wt_nomsr_noirq");
651 mbc = (struct scache *)&wt_nomsr_noirq;
652 } else {
653 INFO("wt_nomsr");
654 mbc = (struct scache *)&wt_nomsr;
655 }
656 }
657 }
3274c570
MS
658/* FIXME Invalidation is done in U-BOOT
659 * WT cache: Data is already written to main memory
660 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
661 */
662 /* invalidate_dcache(); */
407c1da0
MS
663 enable_dcache();
664
665 invalidate_icache();
666 enable_icache();
8beb8503 667}
This page took 0.17263 seconds and 5 git commands to generate.