Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Inline assembly cache operations. | |
7 | * | |
8 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | |
9 | * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org) | |
10 | * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org) | |
11 | */ | |
12 | #ifndef _ASM_R4KCACHE_H | |
13 | #define _ASM_R4KCACHE_H | |
14 | ||
15 | #include <asm/asm.h> | |
16 | #include <asm/cacheops.h> | |
17 | ||
18 | /* | |
19 | * This macro return a properly sign-extended address suitable as base address | |
20 | * for indexed cache operations. Two issues here: | |
21 | * | |
22 | * - The MIPS32 and MIPS64 specs permit an implementation to directly derive | |
23 | * the index bits from the virtual address. This breaks with tradition | |
24 | * set by the R4000. To keep unpleassant surprises from happening we pick | |
25 | * an address in KSEG0 / CKSEG0. | |
26 | * - We need a properly sign extended address for 64-bit code. To get away | |
27 | * without ifdefs we let the compiler do it by a type cast. | |
28 | */ | |
29 | #define INDEX_BASE CKSEG0 | |
30 | ||
31 | #define cache_op(op,addr) \ | |
32 | __asm__ __volatile__( \ | |
33 | " .set noreorder \n" \ | |
34 | " .set mips3\n\t \n" \ | |
35 | " cache %0, %1 \n" \ | |
36 | " .set mips0 \n" \ | |
37 | " .set reorder" \ | |
38 | : \ | |
39 | : "i" (op), "m" (*(unsigned char *)(addr))) | |
40 | ||
41 | static inline void flush_icache_line_indexed(unsigned long addr) | |
42 | { | |
43 | cache_op(Index_Invalidate_I, addr); | |
44 | } | |
45 | ||
46 | static inline void flush_dcache_line_indexed(unsigned long addr) | |
47 | { | |
48 | cache_op(Index_Writeback_Inv_D, addr); | |
49 | } | |
50 | ||
51 | static inline void flush_scache_line_indexed(unsigned long addr) | |
52 | { | |
53 | cache_op(Index_Writeback_Inv_SD, addr); | |
54 | } | |
55 | ||
56 | static inline void flush_icache_line(unsigned long addr) | |
57 | { | |
58 | cache_op(Hit_Invalidate_I, addr); | |
59 | } | |
60 | ||
61 | static inline void flush_dcache_line(unsigned long addr) | |
62 | { | |
63 | cache_op(Hit_Writeback_Inv_D, addr); | |
64 | } | |
65 | ||
66 | static inline void invalidate_dcache_line(unsigned long addr) | |
67 | { | |
68 | cache_op(Hit_Invalidate_D, addr); | |
69 | } | |
70 | ||
71 | static inline void invalidate_scache_line(unsigned long addr) | |
72 | { | |
73 | cache_op(Hit_Invalidate_SD, addr); | |
74 | } | |
75 | ||
76 | static inline void flush_scache_line(unsigned long addr) | |
77 | { | |
78 | cache_op(Hit_Writeback_Inv_SD, addr); | |
79 | } | |
80 | ||
81 | /* | |
82 | * The next two are for badland addresses like signal trampolines. | |
83 | */ | |
84 | static inline void protected_flush_icache_line(unsigned long addr) | |
85 | { | |
86 | __asm__ __volatile__( | |
87 | ".set noreorder\n\t" | |
88 | ".set mips3\n" | |
89 | "1:\tcache %0,(%1)\n" | |
90 | "2:\t.set mips0\n\t" | |
91 | ".set reorder\n\t" | |
92 | ".section\t__ex_table,\"a\"\n\t" | |
93 | STR(PTR)"\t1b,2b\n\t" | |
94 | ".previous" | |
95 | : | |
96 | : "i" (Hit_Invalidate_I), "r" (addr)); | |
97 | } | |
98 | ||
99 | /* | |
100 | * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D | |
101 | * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style | |
102 | * caches. We're talking about one cacheline unnecessarily getting invalidated | |
103 | * here so the penaltiy isn't overly hard. | |
104 | */ | |
105 | static inline void protected_writeback_dcache_line(unsigned long addr) | |
106 | { | |
107 | __asm__ __volatile__( | |
108 | ".set noreorder\n\t" | |
109 | ".set mips3\n" | |
110 | "1:\tcache %0,(%1)\n" | |
111 | "2:\t.set mips0\n\t" | |
112 | ".set reorder\n\t" | |
113 | ".section\t__ex_table,\"a\"\n\t" | |
114 | STR(PTR)"\t1b,2b\n\t" | |
115 | ".previous" | |
116 | : | |
117 | : "i" (Hit_Writeback_Inv_D), "r" (addr)); | |
118 | } | |
119 | ||
120 | static inline void protected_writeback_scache_line(unsigned long addr) | |
121 | { | |
122 | __asm__ __volatile__( | |
123 | ".set noreorder\n\t" | |
124 | ".set mips3\n" | |
125 | "1:\tcache %0,(%1)\n" | |
126 | "2:\t.set mips0\n\t" | |
127 | ".set reorder\n\t" | |
128 | ".section\t__ex_table,\"a\"\n\t" | |
129 | STR(PTR)"\t1b,2b\n\t" | |
130 | ".previous" | |
131 | : | |
132 | : "i" (Hit_Writeback_Inv_SD), "r" (addr)); | |
133 | } | |
134 | ||
135 | /* | |
136 | * This one is RM7000-specific | |
137 | */ | |
138 | static inline void invalidate_tcache_page(unsigned long addr) | |
139 | { | |
140 | cache_op(Page_Invalidate_T, addr); | |
141 | } | |
142 | ||
143 | #define cache16_unroll32(base,op) \ | |
144 | __asm__ __volatile__( \ | |
145 | " .set noreorder \n" \ | |
146 | " .set mips3 \n" \ | |
147 | " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \ | |
148 | " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \ | |
149 | " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \ | |
150 | " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \ | |
151 | " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \ | |
152 | " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \ | |
153 | " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \ | |
154 | " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \ | |
155 | " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \ | |
156 | " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \ | |
157 | " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \ | |
158 | " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \ | |
159 | " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \ | |
160 | " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \ | |
161 | " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \ | |
162 | " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \ | |
163 | " .set mips0 \n" \ | |
164 | " .set reorder \n" \ | |
165 | : \ | |
166 | : "r" (base), \ | |
167 | "i" (op)); | |
168 | ||
169 | static inline void blast_dcache16(void) | |
170 | { | |
171 | unsigned long start = INDEX_BASE; | |
172 | unsigned long end = start + current_cpu_data.dcache.waysize; | |
173 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; | |
42a3b4f2 | 174 | unsigned long ws_end = current_cpu_data.dcache.ways << |
1da177e4 LT |
175 | current_cpu_data.dcache.waybit; |
176 | unsigned long ws, addr; | |
177 | ||
42a3b4f2 | 178 | for (ws = 0; ws < ws_end; ws += ws_inc) |
1da177e4 LT |
179 | for (addr = start; addr < end; addr += 0x200) |
180 | cache16_unroll32(addr|ws,Index_Writeback_Inv_D); | |
181 | } | |
182 | ||
183 | static inline void blast_dcache16_page(unsigned long page) | |
184 | { | |
185 | unsigned long start = page; | |
186 | unsigned long end = start + PAGE_SIZE; | |
187 | ||
188 | do { | |
189 | cache16_unroll32(start,Hit_Writeback_Inv_D); | |
190 | start += 0x200; | |
191 | } while (start < end); | |
192 | } | |
193 | ||
194 | static inline void blast_dcache16_page_indexed(unsigned long page) | |
195 | { | |
196 | unsigned long start = page; | |
197 | unsigned long end = start + PAGE_SIZE; | |
198 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; | |
199 | unsigned long ws_end = current_cpu_data.dcache.ways << | |
200 | current_cpu_data.dcache.waybit; | |
201 | unsigned long ws, addr; | |
202 | ||
42a3b4f2 RB |
203 | for (ws = 0; ws < ws_end; ws += ws_inc) |
204 | for (addr = start; addr < end; addr += 0x200) | |
1da177e4 LT |
205 | cache16_unroll32(addr|ws,Index_Writeback_Inv_D); |
206 | } | |
207 | ||
208 | static inline void blast_icache16(void) | |
209 | { | |
210 | unsigned long start = INDEX_BASE; | |
211 | unsigned long end = start + current_cpu_data.icache.waysize; | |
212 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | |
213 | unsigned long ws_end = current_cpu_data.icache.ways << | |
214 | current_cpu_data.icache.waybit; | |
215 | unsigned long ws, addr; | |
216 | ||
42a3b4f2 RB |
217 | for (ws = 0; ws < ws_end; ws += ws_inc) |
218 | for (addr = start; addr < end; addr += 0x200) | |
1da177e4 LT |
219 | cache16_unroll32(addr|ws,Index_Invalidate_I); |
220 | } | |
221 | ||
222 | static inline void blast_icache16_page(unsigned long page) | |
223 | { | |
224 | unsigned long start = page; | |
225 | unsigned long end = start + PAGE_SIZE; | |
226 | ||
227 | do { | |
228 | cache16_unroll32(start,Hit_Invalidate_I); | |
229 | start += 0x200; | |
230 | } while (start < end); | |
231 | } | |
232 | ||
233 | static inline void blast_icache16_page_indexed(unsigned long page) | |
234 | { | |
235 | unsigned long start = page; | |
236 | unsigned long end = start + PAGE_SIZE; | |
237 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | |
238 | unsigned long ws_end = current_cpu_data.icache.ways << | |
239 | current_cpu_data.icache.waybit; | |
240 | unsigned long ws, addr; | |
241 | ||
42a3b4f2 RB |
242 | for (ws = 0; ws < ws_end; ws += ws_inc) |
243 | for (addr = start; addr < end; addr += 0x200) | |
1da177e4 LT |
244 | cache16_unroll32(addr|ws,Index_Invalidate_I); |
245 | } | |
246 | ||
247 | static inline void blast_scache16(void) | |
248 | { | |
249 | unsigned long start = INDEX_BASE; | |
250 | unsigned long end = start + current_cpu_data.scache.waysize; | |
251 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | |
42a3b4f2 | 252 | unsigned long ws_end = current_cpu_data.scache.ways << |
1da177e4 LT |
253 | current_cpu_data.scache.waybit; |
254 | unsigned long ws, addr; | |
255 | ||
42a3b4f2 | 256 | for (ws = 0; ws < ws_end; ws += ws_inc) |
1da177e4 LT |
257 | for (addr = start; addr < end; addr += 0x200) |
258 | cache16_unroll32(addr|ws,Index_Writeback_Inv_SD); | |
259 | } | |
260 | ||
261 | static inline void blast_scache16_page(unsigned long page) | |
262 | { | |
263 | unsigned long start = page; | |
264 | unsigned long end = page + PAGE_SIZE; | |
265 | ||
266 | do { | |
267 | cache16_unroll32(start,Hit_Writeback_Inv_SD); | |
268 | start += 0x200; | |
269 | } while (start < end); | |
270 | } | |
271 | ||
272 | static inline void blast_scache16_page_indexed(unsigned long page) | |
273 | { | |
274 | unsigned long start = page; | |
275 | unsigned long end = start + PAGE_SIZE; | |
276 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | |
277 | unsigned long ws_end = current_cpu_data.scache.ways << | |
278 | current_cpu_data.scache.waybit; | |
279 | unsigned long ws, addr; | |
280 | ||
42a3b4f2 RB |
281 | for (ws = 0; ws < ws_end; ws += ws_inc) |
282 | for (addr = start; addr < end; addr += 0x200) | |
1da177e4 LT |
283 | cache16_unroll32(addr|ws,Index_Writeback_Inv_SD); |
284 | } | |
285 | ||
286 | #define cache32_unroll32(base,op) \ | |
287 | __asm__ __volatile__( \ | |
288 | " .set noreorder \n" \ | |
289 | " .set mips3 \n" \ | |
290 | " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \ | |
291 | " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \ | |
292 | " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \ | |
293 | " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \ | |
294 | " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \ | |
295 | " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \ | |
296 | " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \ | |
297 | " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \ | |
298 | " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \ | |
299 | " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \ | |
300 | " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \ | |
301 | " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \ | |
302 | " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \ | |
303 | " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \ | |
304 | " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \ | |
305 | " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \ | |
306 | " .set mips0 \n" \ | |
307 | " .set reorder \n" \ | |
308 | : \ | |
309 | : "r" (base), \ | |
310 | "i" (op)); | |
311 | ||
312 | static inline void blast_dcache32(void) | |
313 | { | |
314 | unsigned long start = INDEX_BASE; | |
315 | unsigned long end = start + current_cpu_data.dcache.waysize; | |
316 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; | |
317 | unsigned long ws_end = current_cpu_data.dcache.ways << | |
318 | current_cpu_data.dcache.waybit; | |
319 | unsigned long ws, addr; | |
320 | ||
42a3b4f2 RB |
321 | for (ws = 0; ws < ws_end; ws += ws_inc) |
322 | for (addr = start; addr < end; addr += 0x400) | |
1da177e4 LT |
323 | cache32_unroll32(addr|ws,Index_Writeback_Inv_D); |
324 | } | |
325 | ||
326 | static inline void blast_dcache32_page(unsigned long page) | |
327 | { | |
328 | unsigned long start = page; | |
329 | unsigned long end = start + PAGE_SIZE; | |
330 | ||
331 | do { | |
332 | cache32_unroll32(start,Hit_Writeback_Inv_D); | |
333 | start += 0x400; | |
334 | } while (start < end); | |
335 | } | |
336 | ||
337 | static inline void blast_dcache32_page_indexed(unsigned long page) | |
338 | { | |
339 | unsigned long start = page; | |
340 | unsigned long end = start + PAGE_SIZE; | |
341 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; | |
342 | unsigned long ws_end = current_cpu_data.dcache.ways << | |
343 | current_cpu_data.dcache.waybit; | |
344 | unsigned long ws, addr; | |
345 | ||
42a3b4f2 RB |
346 | for (ws = 0; ws < ws_end; ws += ws_inc) |
347 | for (addr = start; addr < end; addr += 0x400) | |
1da177e4 LT |
348 | cache32_unroll32(addr|ws,Index_Writeback_Inv_D); |
349 | } | |
350 | ||
351 | static inline void blast_icache32(void) | |
352 | { | |
353 | unsigned long start = INDEX_BASE; | |
354 | unsigned long end = start + current_cpu_data.icache.waysize; | |
355 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | |
356 | unsigned long ws_end = current_cpu_data.icache.ways << | |
357 | current_cpu_data.icache.waybit; | |
358 | unsigned long ws, addr; | |
359 | ||
42a3b4f2 RB |
360 | for (ws = 0; ws < ws_end; ws += ws_inc) |
361 | for (addr = start; addr < end; addr += 0x400) | |
1da177e4 LT |
362 | cache32_unroll32(addr|ws,Index_Invalidate_I); |
363 | } | |
364 | ||
365 | static inline void blast_icache32_page(unsigned long page) | |
366 | { | |
367 | unsigned long start = page; | |
368 | unsigned long end = start + PAGE_SIZE; | |
369 | ||
370 | do { | |
371 | cache32_unroll32(start,Hit_Invalidate_I); | |
372 | start += 0x400; | |
373 | } while (start < end); | |
374 | } | |
375 | ||
376 | static inline void blast_icache32_page_indexed(unsigned long page) | |
377 | { | |
378 | unsigned long start = page; | |
379 | unsigned long end = start + PAGE_SIZE; | |
380 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | |
381 | unsigned long ws_end = current_cpu_data.icache.ways << | |
382 | current_cpu_data.icache.waybit; | |
383 | unsigned long ws, addr; | |
384 | ||
385 | for (ws = 0; ws < ws_end; ws += ws_inc) | |
42a3b4f2 | 386 | for (addr = start; addr < end; addr += 0x400) |
1da177e4 LT |
387 | cache32_unroll32(addr|ws,Index_Invalidate_I); |
388 | } | |
389 | ||
390 | static inline void blast_scache32(void) | |
391 | { | |
392 | unsigned long start = INDEX_BASE; | |
393 | unsigned long end = start + current_cpu_data.scache.waysize; | |
394 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | |
42a3b4f2 | 395 | unsigned long ws_end = current_cpu_data.scache.ways << |
1da177e4 LT |
396 | current_cpu_data.scache.waybit; |
397 | unsigned long ws, addr; | |
398 | ||
42a3b4f2 | 399 | for (ws = 0; ws < ws_end; ws += ws_inc) |
1da177e4 LT |
400 | for (addr = start; addr < end; addr += 0x400) |
401 | cache32_unroll32(addr|ws,Index_Writeback_Inv_SD); | |
402 | } | |
403 | ||
404 | static inline void blast_scache32_page(unsigned long page) | |
405 | { | |
406 | unsigned long start = page; | |
407 | unsigned long end = page + PAGE_SIZE; | |
408 | ||
409 | do { | |
410 | cache32_unroll32(start,Hit_Writeback_Inv_SD); | |
411 | start += 0x400; | |
412 | } while (start < end); | |
413 | } | |
414 | ||
415 | static inline void blast_scache32_page_indexed(unsigned long page) | |
416 | { | |
417 | unsigned long start = page; | |
418 | unsigned long end = start + PAGE_SIZE; | |
419 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | |
420 | unsigned long ws_end = current_cpu_data.scache.ways << | |
421 | current_cpu_data.scache.waybit; | |
422 | unsigned long ws, addr; | |
423 | ||
42a3b4f2 RB |
424 | for (ws = 0; ws < ws_end; ws += ws_inc) |
425 | for (addr = start; addr < end; addr += 0x400) | |
1da177e4 LT |
426 | cache32_unroll32(addr|ws,Index_Writeback_Inv_SD); |
427 | } | |
428 | ||
429 | #define cache64_unroll32(base,op) \ | |
430 | __asm__ __volatile__( \ | |
431 | " .set noreorder \n" \ | |
432 | " .set mips3 \n" \ | |
433 | " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \ | |
434 | " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \ | |
435 | " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \ | |
436 | " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \ | |
437 | " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \ | |
438 | " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \ | |
439 | " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \ | |
440 | " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \ | |
441 | " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \ | |
442 | " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \ | |
443 | " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \ | |
444 | " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \ | |
445 | " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \ | |
446 | " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \ | |
447 | " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \ | |
448 | " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \ | |
449 | " .set mips0 \n" \ | |
450 | " .set reorder \n" \ | |
451 | : \ | |
452 | : "r" (base), \ | |
453 | "i" (op)); | |
454 | ||
455 | static inline void blast_icache64(void) | |
456 | { | |
457 | unsigned long start = INDEX_BASE; | |
458 | unsigned long end = start + current_cpu_data.icache.waysize; | |
459 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | |
460 | unsigned long ws_end = current_cpu_data.icache.ways << | |
461 | current_cpu_data.icache.waybit; | |
462 | unsigned long ws, addr; | |
463 | ||
42a3b4f2 RB |
464 | for (ws = 0; ws < ws_end; ws += ws_inc) |
465 | for (addr = start; addr < end; addr += 0x800) | |
1da177e4 LT |
466 | cache64_unroll32(addr|ws,Index_Invalidate_I); |
467 | } | |
468 | ||
469 | static inline void blast_icache64_page(unsigned long page) | |
470 | { | |
471 | unsigned long start = page; | |
472 | unsigned long end = start + PAGE_SIZE; | |
473 | ||
474 | do { | |
475 | cache64_unroll32(start,Hit_Invalidate_I); | |
476 | start += 0x800; | |
477 | } while (start < end); | |
478 | } | |
479 | ||
480 | static inline void blast_icache64_page_indexed(unsigned long page) | |
481 | { | |
482 | unsigned long start = page; | |
483 | unsigned long end = start + PAGE_SIZE; | |
484 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | |
485 | unsigned long ws_end = current_cpu_data.icache.ways << | |
486 | current_cpu_data.icache.waybit; | |
487 | unsigned long ws, addr; | |
488 | ||
489 | for (ws = 0; ws < ws_end; ws += ws_inc) | |
42a3b4f2 | 490 | for (addr = start; addr < end; addr += 0x800) |
1da177e4 LT |
491 | cache64_unroll32(addr|ws,Index_Invalidate_I); |
492 | } | |
493 | ||
494 | static inline void blast_scache64(void) | |
495 | { | |
496 | unsigned long start = INDEX_BASE; | |
497 | unsigned long end = start + current_cpu_data.scache.waysize; | |
498 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | |
42a3b4f2 | 499 | unsigned long ws_end = current_cpu_data.scache.ways << |
1da177e4 LT |
500 | current_cpu_data.scache.waybit; |
501 | unsigned long ws, addr; | |
502 | ||
42a3b4f2 | 503 | for (ws = 0; ws < ws_end; ws += ws_inc) |
1da177e4 LT |
504 | for (addr = start; addr < end; addr += 0x800) |
505 | cache64_unroll32(addr|ws,Index_Writeback_Inv_SD); | |
506 | } | |
507 | ||
508 | static inline void blast_scache64_page(unsigned long page) | |
509 | { | |
510 | unsigned long start = page; | |
511 | unsigned long end = page + PAGE_SIZE; | |
512 | ||
513 | do { | |
514 | cache64_unroll32(start,Hit_Writeback_Inv_SD); | |
515 | start += 0x800; | |
516 | } while (start < end); | |
517 | } | |
518 | ||
519 | static inline void blast_scache64_page_indexed(unsigned long page) | |
520 | { | |
521 | unsigned long start = page; | |
522 | unsigned long end = start + PAGE_SIZE; | |
523 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | |
524 | unsigned long ws_end = current_cpu_data.scache.ways << | |
525 | current_cpu_data.scache.waybit; | |
526 | unsigned long ws, addr; | |
527 | ||
42a3b4f2 RB |
528 | for (ws = 0; ws < ws_end; ws += ws_inc) |
529 | for (addr = start; addr < end; addr += 0x800) | |
1da177e4 LT |
530 | cache64_unroll32(addr|ws,Index_Writeback_Inv_SD); |
531 | } | |
532 | ||
533 | #define cache128_unroll32(base,op) \ | |
534 | __asm__ __volatile__( \ | |
535 | " .set noreorder \n" \ | |
536 | " .set mips3 \n" \ | |
537 | " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \ | |
538 | " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \ | |
539 | " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \ | |
540 | " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \ | |
541 | " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \ | |
542 | " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \ | |
543 | " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \ | |
544 | " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \ | |
545 | " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \ | |
546 | " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \ | |
547 | " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \ | |
548 | " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \ | |
549 | " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \ | |
550 | " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \ | |
551 | " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \ | |
552 | " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \ | |
553 | " .set mips0 \n" \ | |
554 | " .set reorder \n" \ | |
555 | : \ | |
556 | : "r" (base), \ | |
557 | "i" (op)); | |
558 | ||
559 | static inline void blast_scache128(void) | |
560 | { | |
561 | unsigned long start = INDEX_BASE; | |
562 | unsigned long end = start + current_cpu_data.scache.waysize; | |
563 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | |
42a3b4f2 | 564 | unsigned long ws_end = current_cpu_data.scache.ways << |
1da177e4 LT |
565 | current_cpu_data.scache.waybit; |
566 | unsigned long ws, addr; | |
567 | ||
42a3b4f2 | 568 | for (ws = 0; ws < ws_end; ws += ws_inc) |
1da177e4 LT |
569 | for (addr = start; addr < end; addr += 0x1000) |
570 | cache128_unroll32(addr|ws,Index_Writeback_Inv_SD); | |
571 | } | |
572 | ||
573 | static inline void blast_scache128_page(unsigned long page) | |
574 | { | |
575 | unsigned long start = page; | |
576 | unsigned long end = page + PAGE_SIZE; | |
577 | ||
578 | do { | |
579 | cache128_unroll32(start,Hit_Writeback_Inv_SD); | |
580 | start += 0x1000; | |
581 | } while (start < end); | |
582 | } | |
583 | ||
584 | static inline void blast_scache128_page_indexed(unsigned long page) | |
585 | { | |
586 | unsigned long start = page; | |
587 | unsigned long end = start + PAGE_SIZE; | |
588 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | |
589 | unsigned long ws_end = current_cpu_data.scache.ways << | |
590 | current_cpu_data.scache.waybit; | |
591 | unsigned long ws, addr; | |
592 | ||
42a3b4f2 RB |
593 | for (ws = 0; ws < ws_end; ws += ws_inc) |
594 | for (addr = start; addr < end; addr += 0x1000) | |
1da177e4 LT |
595 | cache128_unroll32(addr|ws,Index_Writeback_Inv_SD); |
596 | } | |
597 | ||
598 | #endif /* _ASM_R4KCACHE_H */ |