2 * ARC700 VIPT Cache Management
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
11 * -flush_cache_dup_mm (fork)
12 * -likewise for flush_cache_mm (exit/execve)
13 * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
16 * -Now that MMU can support larger pg sz (16K), the determiniation of
17 * aliasing shd not be based on assumption of 8k pg
20 * -optimised version of flush_icache_range( ) for making I/D coherent
21 * when vaddr is available (agnostic of num of aliases)
24 * -Added documentation about I-cache aliasing on ARC700 and the way it
25 * was handled up until MMU V2.
26 * -Spotted a three year old bug when killing the 4 aliases, which needs
27 * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
28 * instead of paddr | {0x00, 0x01, 0x10, 0x11}
29 * (Rajesh you owe me one now)
32 * -Off-by-one error when computing num_of_lines to flush
33 * This broke signal handling with bionic which uses synthetic sigret stub
36 * -GCC can't generate ZOL for core cache flush loops.
37 * Conv them into iterations based as opposed to while (start < end) types
40 * -In I-cache flush routine we used to chk for aliasing for every line INV.
41 * Instead now we setup routines per cache geometry and invoke them
42 * via function pointers.
45 * -Cache Line flush routines used to flush an extra line beyond end addr
46 * because check was while (end >= start) instead of (end > start)
47 * =Some call sites had to work around by doing -1, -4 etc to end param
48 * =Some callers didnt care. This was spec bad in case of INV routines
49 * which would discard valid data (cause of the horrible ext2 bug
52 * vineetg: June 11th 2008: Fixed flush_icache_range( )
53 * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
54 * to be flushed, which it was not doing.
55 * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
56 * however ARC cache maintenance OPs require PHY addr. Thus need to do
58 * -Also added optimisation there, that for range > PAGE SIZE we flush the
59 * entire cache in one shot rather than line by line. For e.g. a module
60 * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
61 * while cache is only 16 or 32k.
64 #include <linux/module.h>
66 #include <linux/sched.h>
67 #include <linux/cache.h>
68 #include <linux/mmu_context.h>
69 #include <linux/syscalls.h>
70 #include <linux/uaccess.h>
71 #include <asm/cacheflush.h>
72 #include <asm/cachectl.h>
73 #include <asm/setup.h>
75 char *arc_cache_mumbojumbo(int cpu_id
, char *buf
, int len
)
78 unsigned int c
= smp_processor_id();
80 #define PR_CACHE(p, enb, str) \
83 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
85 n += scnprintf(buf + n, len - n, \
86 str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
87 TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
88 enb ? "" : "DISABLED (kernel-build)"); \
91 PR_CACHE(&cpuinfo_arc700
[c
].icache
, __CONFIG_ARC_HAS_ICACHE
, "I-Cache");
92 PR_CACHE(&cpuinfo_arc700
[c
].dcache
, __CONFIG_ARC_HAS_DCACHE
, "D-Cache");
98 * Read the Cache Build Confuration Registers, Decode them and save into
99 * the cpuinfo structure for later use.
100 * No Validation done here, simply read/convert the BCRs
102 void __cpuinit
read_decode_cache_bcr(void)
104 struct bcr_cache ibcr
, dbcr
;
105 struct cpuinfo_arc_cache
*p_ic
, *p_dc
;
106 unsigned int cpu
= smp_processor_id();
108 p_ic
= &cpuinfo_arc700
[cpu
].icache
;
109 READ_BCR(ARC_REG_IC_BCR
, ibcr
);
111 if (ibcr
.config
== 0x3)
113 p_ic
->line_len
= 8 << ibcr
.line_len
;
114 p_ic
->sz
= 0x200 << ibcr
.sz
;
115 p_ic
->ver
= ibcr
.ver
;
117 p_dc
= &cpuinfo_arc700
[cpu
].dcache
;
118 READ_BCR(ARC_REG_DC_BCR
, dbcr
);
120 if (dbcr
.config
== 0x2)
122 p_dc
->line_len
= 16 << dbcr
.line_len
;
123 p_dc
->sz
= 0x200 << dbcr
.sz
;
124 p_dc
->ver
= dbcr
.ver
;
128 * 1. Validate the Cache Geomtery (compile time config matches hardware)
129 * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
130 * (aliasing D-cache configurations are not supported YET)
131 * 3. Enable the Caches, setup default flush mode for D-Cache
132 * 3. Calculate the SHMLBA used by user space
134 void __cpuinit
arc_cache_init(void)
137 unsigned int cpu
= smp_processor_id();
138 struct cpuinfo_arc_cache
*ic
= &cpuinfo_arc700
[cpu
].icache
;
139 struct cpuinfo_arc_cache
*dc
= &cpuinfo_arc700
[cpu
].dcache
;
140 int way_pg_ratio
= way_pg_ratio
;
143 printk(arc_cache_mumbojumbo(0, str
, sizeof(str
)));
148 #ifdef CONFIG_ARC_HAS_ICACHE
149 /* 1. Confirm some of I-cache params which Linux assumes */
150 if ((ic
->assoc
!= ARC_ICACHE_WAYS
) ||
151 (ic
->line_len
!= ARC_ICACHE_LINE_LEN
)) {
152 panic("Cache H/W doesn't match kernel Config");
154 #if (CONFIG_ARC_MMU_VER > 2)
157 panic("Cache ver doesn't match MMU ver\n");
159 /* For ISS - suggest the toggles to use */
160 pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
166 /* Enable/disable I-Cache */
167 temp
= read_aux_reg(ARC_REG_IC_CTRL
);
169 #ifdef CONFIG_ARC_HAS_ICACHE
170 temp
&= ~IC_CTRL_CACHE_DISABLE
;
172 temp
|= IC_CTRL_CACHE_DISABLE
;
175 write_aux_reg(ARC_REG_IC_CTRL
, temp
);
181 #ifdef CONFIG_ARC_HAS_DCACHE
182 if ((dc
->assoc
!= ARC_DCACHE_WAYS
) ||
183 (dc
->line_len
!= ARC_DCACHE_LINE_LEN
)) {
184 panic("Cache H/W doesn't match kernel Config");
187 /* check for D-Cache aliasing */
188 if ((dc
->sz
/ ARC_DCACHE_WAYS
) > PAGE_SIZE
)
189 panic("D$ aliasing not handled right now\n");
192 /* Set the default Invalidate Mode to "simpy discard dirty lines"
193 * as this is more frequent then flush before invalidate
194 * Ofcourse we toggle this default behviour when desired
196 temp
= read_aux_reg(ARC_REG_DC_CTRL
);
197 temp
&= ~DC_CTRL_INV_MODE_FLUSH
;
199 #ifdef CONFIG_ARC_HAS_DCACHE
200 /* Enable D-Cache: Clear Bit 0 */
201 write_aux_reg(ARC_REG_DC_CTRL
, temp
& ~IC_CTRL_CACHE_DISABLE
);
204 write_aux_reg(ARC_REG_DC_FLSH
, 0x1);
205 /* Disable D cache */
206 write_aux_reg(ARC_REG_DC_CTRL
, temp
| IC_CTRL_CACHE_DISABLE
);
214 #define OP_FLUSH_N_INV 0x3
216 #ifdef CONFIG_ARC_HAS_DCACHE
218 /***************************************************************
219 * Machine specific helpers for Entire D-Cache or Per Line ops
222 static inline void wait_for_flush(void)
224 while (read_aux_reg(ARC_REG_DC_CTRL
) & DC_CTRL_FLUSH_STATUS
)
229 * Operation on Entire D-Cache
230 * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
231 * Note that constant propagation ensures all the checks are gone
234 static inline void __dc_entire_op(const int cacheop
)
236 unsigned long flags
, tmp
= tmp
;
239 local_irq_save(flags
);
241 if (cacheop
== OP_FLUSH_N_INV
) {
242 /* Dcache provides 2 cmd: FLUSH or INV
243 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
244 * flush-n-inv is achieved by INV cmd but with IM=1
245 * Default INV sub-mode is DISCARD, which needs to be toggled
247 tmp
= read_aux_reg(ARC_REG_DC_CTRL
);
248 write_aux_reg(ARC_REG_DC_CTRL
, tmp
| DC_CTRL_INV_MODE_FLUSH
);
251 if (cacheop
& OP_INV
) /* Inv or flush-n-inv use same cmd reg */
252 aux
= ARC_REG_DC_IVDC
;
254 aux
= ARC_REG_DC_FLSH
;
256 write_aux_reg(aux
, 0x1);
258 if (cacheop
& OP_FLUSH
) /* flush / flush-n-inv both wait */
261 /* Switch back the DISCARD ONLY Invalidate mode */
262 if (cacheop
== OP_FLUSH_N_INV
)
263 write_aux_reg(ARC_REG_DC_CTRL
, tmp
& ~DC_CTRL_INV_MODE_FLUSH
);
265 local_irq_restore(flags
);
269 * Per Line Operation on D-Cache
270 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
271 * It's sole purpose is to help gcc generate ZOL
273 static inline void __dc_line_loop(unsigned long start
, unsigned long sz
,
276 int num_lines
, slack
;
278 /* Ensure we properly floor/ceil the non-line aligned/sized requests
279 * and have @start - aligned to cache line and integral @num_lines.
280 * This however can be avoided for page sized since:
281 * -@start will be cache-line aligned already (being page aligned)
282 * -@sz will be integral multiple of line size (being page sized).
284 if (!(__builtin_constant_p(sz
) && sz
== PAGE_SIZE
)) {
285 slack
= start
& ~DCACHE_LINE_MASK
;
290 num_lines
= DIV_ROUND_UP(sz
, ARC_DCACHE_LINE_LEN
);
292 while (num_lines
-- > 0) {
293 #if (CONFIG_ARC_MMU_VER > 2)
295 * Just as for I$, in MMU v3, D$ ops also require
296 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
297 * But we pass phy addr for both. This works since Linux
298 * doesn't support aliasing configs for D$, yet.
299 * Thus paddr is enough to provide both tag and index.
301 write_aux_reg(ARC_REG_DC_PTAG
, start
);
303 write_aux_reg(aux_reg
, start
);
304 start
+= ARC_DCACHE_LINE_LEN
;
309 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
311 static inline void __dc_line_op(unsigned long start
, unsigned long sz
,
314 unsigned long flags
, tmp
= tmp
;
317 local_irq_save(flags
);
319 if (cacheop
== OP_FLUSH_N_INV
) {
321 * Dcache provides 2 cmd: FLUSH or INV
322 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
323 * flush-n-inv is achieved by INV cmd but with IM=1
324 * Default INV sub-mode is DISCARD, which needs to be toggled
326 tmp
= read_aux_reg(ARC_REG_DC_CTRL
);
327 write_aux_reg(ARC_REG_DC_CTRL
, tmp
| DC_CTRL_INV_MODE_FLUSH
);
330 if (cacheop
& OP_INV
) /* Inv / flush-n-inv use same cmd reg */
331 aux
= ARC_REG_DC_IVDL
;
333 aux
= ARC_REG_DC_FLDL
;
335 __dc_line_loop(start
, sz
, aux
);
337 if (cacheop
& OP_FLUSH
) /* flush / flush-n-inv both wait */
340 /* Switch back the DISCARD ONLY Invalidate mode */
341 if (cacheop
== OP_FLUSH_N_INV
)
342 write_aux_reg(ARC_REG_DC_CTRL
, tmp
& ~DC_CTRL_INV_MODE_FLUSH
);
344 local_irq_restore(flags
);
349 #define __dc_entire_op(cacheop)
350 #define __dc_line_op(start, sz, cacheop)
352 #endif /* CONFIG_ARC_HAS_DCACHE */
355 #ifdef CONFIG_ARC_HAS_ICACHE
358 * I-Cache Aliasing in ARC700 VIPT caches
360 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
361 * The orig Cache Management Module "CDU" only required paddr to invalidate a
362 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
363 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
364 * the exact same line.
366 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
367 * paddr alone could not be used to correctly index the cache.
370 * MMU v1/v2 (Fixed Page Size 8k)
372 * The solution was to provide CDU with these additonal vaddr bits. These
373 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
374 * standard page size of 8k.
375 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
376 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
377 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
378 * represent the offset within cache-line. The adv of using this "clumsy"
379 * interface for additional info was no new reg was needed in CDU programming
382 * 17:13 represented the max num of bits passable, actual bits needed were
383 * fewer, based on the num-of-aliases possible.
384 * -for 2 alias possibility, only bit 13 needed (32K cache)
385 * -for 4 alias possibility, bits 14:13 needed (64K cache)
390 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
391 * only support 8k (default), 16k and 4k.
392 * However from hardware perspective, smaller page sizes aggrevate aliasing
393 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
394 * the existing scheme of piggybacking won't work for certain configurations.
395 * Two new registers IC_PTAG and DC_PTAG inttoduced.
396 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
399 /***********************************************************
400 * Machine specific helper for per line I-Cache invalidate.
402 static void __ic_line_inv_vaddr(unsigned long phy_start
, unsigned long vaddr
,
406 int num_lines
, slack
;
410 * Ensure we properly floor/ceil the non-line aligned/sized requests:
411 * However page sized flushes can be compile time optimised.
412 * -@phy_start will be cache-line aligned already (being page aligned)
413 * -@sz will be integral multiple of line size (being page sized).
415 if (!(__builtin_constant_p(sz
) && sz
== PAGE_SIZE
)) {
416 slack
= phy_start
& ~ICACHE_LINE_MASK
;
421 num_lines
= DIV_ROUND_UP(sz
, ARC_ICACHE_LINE_LEN
);
423 #if (CONFIG_ARC_MMU_VER > 2)
424 vaddr
&= ~ICACHE_LINE_MASK
;
427 /* bits 17:13 of vaddr go as bits 4:0 of paddr */
428 addr
= phy_start
| ((vaddr
>> 13) & 0x1F);
431 local_irq_save(flags
);
432 while (num_lines
-- > 0) {
433 #if (CONFIG_ARC_MMU_VER > 2)
434 /* tag comes from phy addr */
435 write_aux_reg(ARC_REG_IC_PTAG
, addr
);
437 /* index bits come from vaddr */
438 write_aux_reg(ARC_REG_IC_IVIL
, vaddr
);
439 vaddr
+= ARC_ICACHE_LINE_LEN
;
441 /* paddr contains stuffed vaddrs bits */
442 write_aux_reg(ARC_REG_IC_IVIL
, addr
);
444 addr
+= ARC_ICACHE_LINE_LEN
;
446 local_irq_restore(flags
);
451 #define __ic_line_inv_vaddr(pstart, vstart, sz)
453 #endif /* CONFIG_ARC_HAS_ICACHE */
456 /***********************************************************
460 void flush_dcache_page(struct page
*page
)
462 /* Make a note that dcache is not yet flushed for this page */
463 set_bit(PG_arch_1
, &page
->flags
);
465 EXPORT_SYMBOL(flush_dcache_page
);
468 void dma_cache_wback_inv(unsigned long start
, unsigned long sz
)
470 __dc_line_op(start
, sz
, OP_FLUSH_N_INV
);
472 EXPORT_SYMBOL(dma_cache_wback_inv
);
474 void dma_cache_inv(unsigned long start
, unsigned long sz
)
476 __dc_line_op(start
, sz
, OP_INV
);
478 EXPORT_SYMBOL(dma_cache_inv
);
480 void dma_cache_wback(unsigned long start
, unsigned long sz
)
482 __dc_line_op(start
, sz
, OP_FLUSH
);
484 EXPORT_SYMBOL(dma_cache_wback
);
487 * This is API for making I/D Caches consistent when modifying
488 * kernel code (loadable modules, kprobes, kgdb...)
489 * This is called on insmod, with kernel virtual address for CODE of
490 * the module. ARC cache maintenance ops require PHY address thus we
491 * need to convert vmalloc addr to PHY addr
493 void flush_icache_range(unsigned long kstart
, unsigned long kend
)
495 unsigned int tot_sz
, off
, sz
;
496 unsigned long phy
, pfn
;
498 /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
500 /* This is not the right API for user virtual address */
501 if (kstart
< TASK_SIZE
) {
502 BUG_ON("Flush icache range for user virtual addr space");
506 /* Shortcut for bigger flush ranges.
507 * Here we don't care if this was kernel virtual or phy addr
509 tot_sz
= kend
- kstart
;
510 if (tot_sz
> PAGE_SIZE
) {
515 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
516 if (likely(kstart
> PAGE_OFFSET
)) {
518 * The 2nd arg despite being paddr will be used to index icache
519 * This is OK since no alternate virtual mappings will exist
520 * given the callers for this case: kprobe/kgdb in built-in
523 __sync_icache_dcache(kstart
, kstart
, kend
- kstart
);
528 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
529 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
530 * handling of kernel vaddr.
532 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
533 * it still needs to handle a 2 page scenario, where the range
534 * straddles across 2 virtual pages and hence need for loop
537 off
= kstart
% PAGE_SIZE
;
538 pfn
= vmalloc_to_pfn((void *)kstart
);
539 phy
= (pfn
<< PAGE_SHIFT
) + off
;
540 sz
= min_t(unsigned int, tot_sz
, PAGE_SIZE
- off
);
541 __sync_icache_dcache(phy
, kstart
, sz
);
548 * General purpose helper to make I and D cache lines consistent.
549 * @paddr is phy addr of region
550 * @vaddr is typically user or kernel vaddr (vmalloc)
551 * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in
552 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
553 * use a paddr to index the cache (despite VIPT). This is fine since since a
554 * built-in kernel page will not have any virtual mappings (not even kernel)
555 * kprobe on loadable module is different as it will have kvaddr.
557 void __sync_icache_dcache(unsigned long paddr
, unsigned long vaddr
, int len
)
561 local_irq_save(flags
);
562 __ic_line_inv_vaddr(paddr
, vaddr
, len
);
563 __dc_line_op(paddr
, len
, OP_FLUSH
);
564 local_irq_restore(flags
);
567 /* wrapper to compile time eliminate alignment checks in flush loop */
568 void __inv_icache_page(unsigned long paddr
, unsigned long vaddr
)
570 __ic_line_inv_vaddr(paddr
, vaddr
, PAGE_SIZE
);
573 void __flush_dcache_page(unsigned long paddr
)
575 __dc_line_op(paddr
, PAGE_SIZE
, OP_FLUSH_N_INV
);
578 void flush_icache_all(void)
582 local_irq_save(flags
);
584 write_aux_reg(ARC_REG_IC_IVIC
, 1);
586 /* lr will not complete till the icache inv operation is not over */
587 read_aux_reg(ARC_REG_IC_CTRL
);
588 local_irq_restore(flags
);
591 noinline
void flush_cache_all(void)
595 local_irq_save(flags
);
598 __dc_entire_op(OP_FLUSH_N_INV
);
600 local_irq_restore(flags
);
604 /**********************************************************************
605 * Explicit Cache flush request from user space via syscall
606 * Needed for JITs which generate code on the fly
608 SYSCALL_DEFINE3(cacheflush
, uint32_t, start
, uint32_t, sz
, uint32_t, flags
)
610 /* TBD: optimize this */
This page took 0.064526 seconds and 5 git commands to generate.