2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Inline assembly cache operations.
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/cpu-type.h>
19 #include <asm/mipsmtregs.h>
20 #include <asm/uaccess.h> /* for segment_eq() */
22 extern void (*r4k_blast_dcache
)(void);
23 extern void (*r4k_blast_icache
)(void);
26 * This macro return a properly sign-extended address suitable as base address
27 * for indexed cache operations. Two issues here:
29 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
30 * the index bits from the virtual address. This breaks with tradition
31 * set by the R4000. To keep unpleasant surprises from happening we pick
32 * an address in KSEG0 / CKSEG0.
33 * - We need a properly sign extended address for 64-bit code. To get away
34 * without ifdefs we let the compiler do it by a type cast.
36 #define INDEX_BASE CKSEG0
38 #define cache_op(op,addr) \
39 __asm__ __volatile__( \
41 " .set noreorder \n" \
42 " .set arch=r4000 \n" \
46 : "i" (op), "R" (*(unsigned char *)(addr)))
51 * Optionally force single-threaded execution during I-cache flushes.
53 #define PROTECT_CACHE_FLUSHES 1
55 #ifdef PROTECT_CACHE_FLUSHES
57 extern int mt_protiflush
;
58 extern int mt_protdflush
;
59 extern void mt_cflush_lockdown(void);
60 extern void mt_cflush_release(void);
62 #define BEGIN_MT_IPROT \
63 unsigned long flags = 0; \
64 unsigned long mtflags = 0; \
66 local_irq_save(flags); \
69 mt_cflush_lockdown(); \
72 #define END_MT_IPROT \
74 mt_cflush_release(); \
76 local_irq_restore(flags); \
79 #define BEGIN_MT_DPROT \
80 unsigned long flags = 0; \
81 unsigned long mtflags = 0; \
83 local_irq_save(flags); \
86 mt_cflush_lockdown(); \
89 #define END_MT_DPROT \
91 mt_cflush_release(); \
93 local_irq_restore(flags); \
98 #define BEGIN_MT_IPROT
99 #define BEGIN_MT_DPROT
103 #endif /* PROTECT_CACHE_FLUSHES */
105 #define __iflush_prologue \
106 unsigned long redundance; \
107 extern int mt_n_iflushes; \
109 for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
111 #define __iflush_epilogue \
115 #define __dflush_prologue \
116 unsigned long redundance; \
117 extern int mt_n_dflushes; \
119 for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
121 #define __dflush_epilogue \
125 #define __inv_dflush_prologue __dflush_prologue
126 #define __inv_dflush_epilogue __dflush_epilogue
127 #define __sflush_prologue {
128 #define __sflush_epilogue }
129 #define __inv_sflush_prologue __sflush_prologue
130 #define __inv_sflush_epilogue __sflush_epilogue
132 #else /* CONFIG_MIPS_MT */
134 #define __iflush_prologue {
135 #define __iflush_epilogue }
136 #define __dflush_prologue {
137 #define __dflush_epilogue }
138 #define __inv_dflush_prologue {
139 #define __inv_dflush_epilogue }
140 #define __sflush_prologue {
141 #define __sflush_epilogue }
142 #define __inv_sflush_prologue {
143 #define __inv_sflush_epilogue }
145 #endif /* CONFIG_MIPS_MT */
147 static inline void flush_icache_line_indexed(unsigned long addr
)
150 cache_op(Index_Invalidate_I
, addr
);
154 static inline void flush_dcache_line_indexed(unsigned long addr
)
157 cache_op(Index_Writeback_Inv_D
, addr
);
161 static inline void flush_scache_line_indexed(unsigned long addr
)
163 cache_op(Index_Writeback_Inv_SD
, addr
);
166 static inline void flush_icache_line(unsigned long addr
)
169 switch (boot_cpu_type()) {
171 cache_op(Hit_Invalidate_I_Loongson2
, addr
);
175 cache_op(Hit_Invalidate_I
, addr
);
181 static inline void flush_dcache_line(unsigned long addr
)
184 cache_op(Hit_Writeback_Inv_D
, addr
);
188 static inline void invalidate_dcache_line(unsigned long addr
)
191 cache_op(Hit_Invalidate_D
, addr
);
195 static inline void invalidate_scache_line(unsigned long addr
)
197 cache_op(Hit_Invalidate_SD
, addr
);
200 static inline void flush_scache_line(unsigned long addr
)
202 cache_op(Hit_Writeback_Inv_SD
, addr
);
205 #define protected_cache_op(op,addr) \
206 __asm__ __volatile__( \
208 " .set noreorder \n" \
209 " .set arch=r4000 \n" \
210 "1: cache %0, (%1) \n" \
212 " .section __ex_table,\"a\" \n" \
213 " "STR(PTR)" 1b, 2b \n" \
216 : "i" (op), "r" (addr))
218 #define protected_cachee_op(op,addr) \
219 __asm__ __volatile__( \
221 " .set noreorder \n" \
224 "1: cachee %0, (%1) \n" \
226 " .section __ex_table,\"a\" \n" \
227 " "STR(PTR)" 1b, 2b \n" \
230 : "i" (op), "r" (addr))
233 * The next two are for badland addresses like signal trampolines.
235 static inline void protected_flush_icache_line(unsigned long addr
)
237 switch (boot_cpu_type()) {
239 protected_cache_op(Hit_Invalidate_I_Loongson2
, addr
);
244 protected_cachee_op(Hit_Invalidate_I
, addr
);
246 protected_cache_op(Hit_Invalidate_I
, addr
);
253 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
254 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
255 * caches. We're talking about one cacheline unnecessarily getting invalidated
256 * here so the penalty isn't overly hard.
258 static inline void protected_writeback_dcache_line(unsigned long addr
)
261 protected_cachee_op(Hit_Writeback_Inv_D
, addr
);
263 protected_cache_op(Hit_Writeback_Inv_D
, addr
);
267 static inline void protected_writeback_scache_line(unsigned long addr
)
269 protected_cache_op(Hit_Writeback_Inv_SD
, addr
);
273 * This one is RM7000-specific
275 static inline void invalidate_tcache_page(unsigned long addr
)
277 cache_op(Page_Invalidate_T
, addr
);
280 #define cache16_unroll32(base,op) \
281 __asm__ __volatile__( \
283 " .set noreorder \n" \
285 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
286 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
287 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
288 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
289 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
290 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
291 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
292 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
293 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
294 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
295 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
296 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
297 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
298 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
299 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
300 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
306 #define cache32_unroll32(base,op) \
307 __asm__ __volatile__( \
309 " .set noreorder \n" \
311 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
312 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
313 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
314 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
315 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
316 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
317 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
318 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
319 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
320 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
321 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
322 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
323 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
324 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
325 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
326 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
332 #define cache64_unroll32(base,op) \
333 __asm__ __volatile__( \
335 " .set noreorder \n" \
337 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
338 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
339 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
340 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
341 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
342 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
343 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
344 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
345 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
346 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
347 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
348 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
349 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
350 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
351 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
352 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
358 #define cache128_unroll32(base,op) \
359 __asm__ __volatile__( \
361 " .set noreorder \n" \
363 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
364 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
365 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
366 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
367 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
368 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
369 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
370 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
371 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
372 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
373 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
374 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
375 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
376 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
377 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
378 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
385 * Perform the cache operation specified by op using a user mode virtual
386 * address while in kernel mode.
388 #define cache16_unroll32_user(base,op) \
389 __asm__ __volatile__( \
391 " .set noreorder \n" \
394 " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
395 " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
396 " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
397 " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
398 " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
399 " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
400 " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
401 " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
402 " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
403 " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
404 " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
405 " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
406 " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
407 " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
408 " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
409 " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
415 #define cache32_unroll32_user(base, op) \
416 __asm__ __volatile__( \
418 " .set noreorder \n" \
421 " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
422 " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
423 " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
424 " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
425 " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
426 " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
427 " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
428 " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
429 " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
430 " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
431 " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
432 " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
433 " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
434 " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
435 " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
436 " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
442 #define cache64_unroll32_user(base, op) \
443 __asm__ __volatile__( \
445 " .set noreorder \n" \
448 " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
449 " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
450 " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
451 " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
452 " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
453 " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
454 " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
455 " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
456 " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
457 " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
458 " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
459 " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
460 " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
461 " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
462 " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
463 " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
469 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
470 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
471 static inline void extra##blast_##pfx##cache##lsize(void) \
473 unsigned long start = INDEX_BASE; \
474 unsigned long end = start + current_cpu_data.desc.waysize; \
475 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
476 unsigned long ws_end = current_cpu_data.desc.ways << \
477 current_cpu_data.desc.waybit; \
478 unsigned long ws, addr; \
480 __##pfx##flush_prologue \
482 for (ws = 0; ws < ws_end; ws += ws_inc) \
483 for (addr = start; addr < end; addr += lsize * 32) \
484 cache##lsize##_unroll32(addr|ws, indexop); \
486 __##pfx##flush_epilogue \
489 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
491 unsigned long start = page; \
492 unsigned long end = page + PAGE_SIZE; \
494 __##pfx##flush_prologue \
497 cache##lsize##_unroll32(start, hitop); \
498 start += lsize * 32; \
499 } while (start < end); \
501 __##pfx##flush_epilogue \
504 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
506 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
507 unsigned long start = INDEX_BASE + (page & indexmask); \
508 unsigned long end = start + PAGE_SIZE; \
509 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
510 unsigned long ws_end = current_cpu_data.desc.ways << \
511 current_cpu_data.desc.waybit; \
512 unsigned long ws, addr; \
514 __##pfx##flush_prologue \
516 for (ws = 0; ws < ws_end; ws += ws_inc) \
517 for (addr = start; addr < end; addr += lsize * 32) \
518 cache##lsize##_unroll32(addr|ws, indexop); \
520 __##pfx##flush_epilogue \
523 __BUILD_BLAST_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
, 16, )
524 __BUILD_BLAST_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 16, )
525 __BUILD_BLAST_CACHE(s
, scache
, Index_Writeback_Inv_SD
, Hit_Writeback_Inv_SD
, 16, )
526 __BUILD_BLAST_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
, 32, )
527 __BUILD_BLAST_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 32, )
528 __BUILD_BLAST_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I_Loongson2
, 32, loongson2_
)
529 __BUILD_BLAST_CACHE(s
, scache
, Index_Writeback_Inv_SD
, Hit_Writeback_Inv_SD
, 32, )
530 __BUILD_BLAST_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
, 64, )
531 __BUILD_BLAST_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 64, )
532 __BUILD_BLAST_CACHE(s
, scache
, Index_Writeback_Inv_SD
, Hit_Writeback_Inv_SD
, 64, )
533 __BUILD_BLAST_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
, 128, )
534 __BUILD_BLAST_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 128, )
535 __BUILD_BLAST_CACHE(s
, scache
, Index_Writeback_Inv_SD
, Hit_Writeback_Inv_SD
, 128, )
537 __BUILD_BLAST_CACHE(inv_d
, dcache
, Index_Writeback_Inv_D
, Hit_Invalidate_D
, 16, )
538 __BUILD_BLAST_CACHE(inv_d
, dcache
, Index_Writeback_Inv_D
, Hit_Invalidate_D
, 32, )
539 __BUILD_BLAST_CACHE(inv_s
, scache
, Index_Writeback_Inv_SD
, Hit_Invalidate_SD
, 16, )
540 __BUILD_BLAST_CACHE(inv_s
, scache
, Index_Writeback_Inv_SD
, Hit_Invalidate_SD
, 32, )
541 __BUILD_BLAST_CACHE(inv_s
, scache
, Index_Writeback_Inv_SD
, Hit_Invalidate_SD
, 64, )
542 __BUILD_BLAST_CACHE(inv_s
, scache
, Index_Writeback_Inv_SD
, Hit_Invalidate_SD
, 128, )
544 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
545 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
547 unsigned long start = page; \
548 unsigned long end = page + PAGE_SIZE; \
550 __##pfx##flush_prologue \
553 cache##lsize##_unroll32_user(start, hitop); \
554 start += lsize * 32; \
555 } while (start < end); \
557 __##pfx##flush_epilogue \
560 __BUILD_BLAST_USER_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
,
562 __BUILD_BLAST_USER_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 16)
563 __BUILD_BLAST_USER_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
,
565 __BUILD_BLAST_USER_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 32)
566 __BUILD_BLAST_USER_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
,
568 __BUILD_BLAST_USER_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 64)
570 /* build blast_xxx_range, protected_blast_xxx_range */
571 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
572 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
575 unsigned long lsize = cpu_##desc##_line_size(); \
576 unsigned long addr = start & ~(lsize - 1); \
577 unsigned long aend = (end - 1) & ~(lsize - 1); \
579 __##pfx##flush_prologue \
582 prot##cache_op(hitop, addr); \
588 __##pfx##flush_epilogue \
593 __BUILD_BLAST_CACHE_RANGE(d
, dcache
, Hit_Writeback_Inv_D
, protected_
, )
594 __BUILD_BLAST_CACHE_RANGE(i
, icache
, Hit_Invalidate_I
, protected_
, )
598 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
599 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
602 unsigned long lsize = cpu_##desc##_line_size(); \
603 unsigned long addr = start & ~(lsize - 1); \
604 unsigned long aend = (end - 1) & ~(lsize - 1); \
606 __##pfx##flush_prologue \
608 if (segment_eq(get_fs(), USER_DS)) { \
610 protected_cachee_op(hitop, addr); \
617 protected_cache_op(hitop, addr); \
624 __##pfx##flush_epilogue \
627 __BUILD_PROT_BLAST_CACHE_RANGE(d
, dcache
, Hit_Writeback_Inv_D
)
628 __BUILD_PROT_BLAST_CACHE_RANGE(i
, icache
, Hit_Invalidate_I
)
631 __BUILD_BLAST_CACHE_RANGE(s
, scache
, Hit_Writeback_Inv_SD
, protected_
, )
632 __BUILD_BLAST_CACHE_RANGE(i
, icache
, Hit_Invalidate_I_Loongson2
, \
633 protected_
, loongson2_
)
634 __BUILD_BLAST_CACHE_RANGE(d
, dcache
, Hit_Writeback_Inv_D
, , )
635 __BUILD_BLAST_CACHE_RANGE(i
, icache
, Hit_Invalidate_I
, , )
636 __BUILD_BLAST_CACHE_RANGE(s
, scache
, Hit_Writeback_Inv_SD
, , )
637 /* blast_inv_dcache_range */
638 __BUILD_BLAST_CACHE_RANGE(inv_d
, dcache
, Hit_Invalidate_D
, , )
639 __BUILD_BLAST_CACHE_RANGE(inv_s
, scache
, Hit_Invalidate_SD
, , )
641 #endif /* _ASM_R4KCACHE_H */