4 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/cache.h>
16 #include <linux/mmu_context.h>
17 #include <linux/syscalls.h>
18 #include <linux/uaccess.h>
19 #include <linux/pagemap.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cachectl.h>
22 #include <asm/setup.h>
24 static int l2_line_sz
;
26 volatile int slc_enable
= 1, ioc_enable
= 1;
27 unsigned long perip_base
= ARC_UNCACHED_ADDR_SPACE
; /* legacy value for boot */
28 unsigned long perip_end
= 0xFFFFFFFF; /* legacy value */
30 void (*_cache_line_loop_ic_fn
)(phys_addr_t paddr
, unsigned long vaddr
,
31 unsigned long sz
, const int cacheop
);
33 void (*__dma_cache_wback_inv
)(phys_addr_t start
, unsigned long sz
);
34 void (*__dma_cache_inv
)(phys_addr_t start
, unsigned long sz
);
35 void (*__dma_cache_wback
)(phys_addr_t start
, unsigned long sz
);
37 char *arc_cache_mumbojumbo(int c
, char *buf
, int len
)
40 struct cpuinfo_arc_cache
*p
;
42 #define PR_CACHE(p, cfg, str) \
44 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
46 n += scnprintf(buf + n, len - n, \
47 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
48 (p)->sz_k, (p)->assoc, (p)->line_len, \
49 (p)->vipt ? "VIPT" : "PIPT", \
50 (p)->alias ? " aliasing" : "", \
53 PR_CACHE(&cpuinfo_arc700
[c
].icache
, CONFIG_ARC_HAS_ICACHE
, "I-Cache");
54 PR_CACHE(&cpuinfo_arc700
[c
].dcache
, CONFIG_ARC_HAS_DCACHE
, "D-Cache");
59 p
= &cpuinfo_arc700
[c
].slc
;
61 n
+= scnprintf(buf
+ n
, len
- n
,
62 "SLC\t\t: %uK, %uB Line%s\n",
63 p
->sz_k
, p
->line_len
, IS_USED_RUN(slc_enable
));
66 n
+= scnprintf(buf
+ n
, len
- n
, "IOC\t\t:%s\n",
67 IS_DISABLED_RUN(ioc_enable
));
73 * Read the Cache Build Confuration Registers, Decode them and save into
74 * the cpuinfo structure for later use.
75 * No Validation done here, simply read/convert the BCRs
77 static void read_decode_cache_bcr_arcv2(int cpu
)
79 struct cpuinfo_arc_cache
*p_slc
= &cpuinfo_arc700
[cpu
].slc
;
80 struct bcr_generic sbcr
;
83 #ifdef CONFIG_CPU_BIG_ENDIAN
84 unsigned int pad
:24, way
:2, lsz
:2, sz
:4;
86 unsigned int sz
:4, lsz
:2, way
:2, pad
:24;
90 struct bcr_clust_cfg
{
91 #ifdef CONFIG_CPU_BIG_ENDIAN
92 unsigned int pad
:7, c
:1, num_entries
:8, num_cores
:8, ver
:8;
94 unsigned int ver
:8, num_cores
:8, num_entries
:8, c
:1, pad
:7;
99 #ifdef CONFIG_CPU_BIG_ENDIAN
100 unsigned int start
:4, limit
:4, pad
:22, order
:1, disable
:1;
102 unsigned int disable
:1, order
:1, pad
:22, limit
:4, start
:4;
107 READ_BCR(ARC_REG_SLC_BCR
, sbcr
);
109 READ_BCR(ARC_REG_SLC_CFG
, slc_cfg
);
110 p_slc
->ver
= sbcr
.ver
;
111 p_slc
->sz_k
= 128 << slc_cfg
.sz
;
112 l2_line_sz
= p_slc
->line_len
= (slc_cfg
.lsz
== 0) ? 128 : 64;
115 READ_BCR(ARC_REG_CLUSTER_BCR
, cbcr
);
116 if (cbcr
.c
&& ioc_enable
)
119 /* HS 2.0 didn't have AUX_VOL */
120 if (cpuinfo_arc700
[cpu
].core
.family
> 0x51) {
121 READ_BCR(AUX_VOL
, vol
);
122 perip_base
= vol
.start
<< 28;
123 /* HS 3.0 has limit and strict-ordering fields */
124 if (cpuinfo_arc700
[cpu
].core
.family
> 0x52)
125 perip_end
= (vol
.limit
<< 28) - 1;
129 void read_decode_cache_bcr(void)
131 struct cpuinfo_arc_cache
*p_ic
, *p_dc
;
132 unsigned int cpu
= smp_processor_id();
134 #ifdef CONFIG_CPU_BIG_ENDIAN
135 unsigned int pad
:12, line_len
:4, sz
:4, config
:4, ver
:8;
137 unsigned int ver
:8, config
:4, sz
:4, line_len
:4, pad
:12;
141 p_ic
= &cpuinfo_arc700
[cpu
].icache
;
142 READ_BCR(ARC_REG_IC_BCR
, ibcr
);
148 BUG_ON(ibcr
.config
!= 3);
149 p_ic
->assoc
= 2; /* Fixed to 2w set assoc */
150 } else if (ibcr
.ver
>= 4) {
151 p_ic
->assoc
= 1 << ibcr
.config
; /* 1,2,4,8 */
154 p_ic
->line_len
= 8 << ibcr
.line_len
;
155 p_ic
->sz_k
= 1 << (ibcr
.sz
- 1);
156 p_ic
->ver
= ibcr
.ver
;
158 p_ic
->alias
= p_ic
->sz_k
/p_ic
->assoc
/TO_KB(PAGE_SIZE
) > 1;
161 p_dc
= &cpuinfo_arc700
[cpu
].dcache
;
162 READ_BCR(ARC_REG_DC_BCR
, dbcr
);
168 BUG_ON(dbcr
.config
!= 2);
169 p_dc
->assoc
= 4; /* Fixed to 4w set assoc */
171 p_dc
->alias
= p_dc
->sz_k
/p_dc
->assoc
/TO_KB(PAGE_SIZE
) > 1;
172 } else if (dbcr
.ver
>= 4) {
173 p_dc
->assoc
= 1 << dbcr
.config
; /* 1,2,4,8 */
175 p_dc
->alias
= 0; /* PIPT so can't VIPT alias */
178 p_dc
->line_len
= 16 << dbcr
.line_len
;
179 p_dc
->sz_k
= 1 << (dbcr
.sz
- 1);
180 p_dc
->ver
= dbcr
.ver
;
184 read_decode_cache_bcr_arcv2(cpu
);
188 * Line Operation on {I,D}-Cache
193 #define OP_FLUSH_N_INV 0x3
194 #define OP_INV_IC 0x4
197 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
199 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
200 * The orig Cache Management Module "CDU" only required paddr to invalidate a
201 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
202 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
203 * the exact same line.
205 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
206 * paddr alone could not be used to correctly index the cache.
209 * MMU v1/v2 (Fixed Page Size 8k)
211 * The solution was to provide CDU with these additonal vaddr bits. These
212 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
213 * standard page size of 8k.
214 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
215 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
216 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
217 * represent the offset within cache-line. The adv of using this "clumsy"
218 * interface for additional info was no new reg was needed in CDU programming
221 * 17:13 represented the max num of bits passable, actual bits needed were
222 * fewer, based on the num-of-aliases possible.
223 * -for 2 alias possibility, only bit 13 needed (32K cache)
224 * -for 4 alias possibility, bits 14:13 needed (64K cache)
229 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
230 * only support 8k (default), 16k and 4k.
231 * However from hardware perspective, smaller page sizes aggravate aliasing
232 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
233 * the existing scheme of piggybacking won't work for certain configurations.
234 * Two new registers IC_PTAG and DC_PTAG inttoduced.
235 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
239 void __cache_line_loop_v2(phys_addr_t paddr
, unsigned long vaddr
,
240 unsigned long sz
, const int op
)
242 unsigned int aux_cmd
;
244 const int full_page
= __builtin_constant_p(sz
) && sz
== PAGE_SIZE
;
246 if (op
== OP_INV_IC
) {
247 aux_cmd
= ARC_REG_IC_IVIL
;
249 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
250 aux_cmd
= op
& OP_INV
? ARC_REG_DC_IVDL
: ARC_REG_DC_FLDL
;
253 /* Ensure we properly floor/ceil the non-line aligned/sized requests
254 * and have @paddr - aligned to cache line and integral @num_lines.
255 * This however can be avoided for page sized since:
256 * -@paddr will be cache-line aligned already (being page aligned)
257 * -@sz will be integral multiple of line size (being page sized).
260 sz
+= paddr
& ~CACHE_LINE_MASK
;
261 paddr
&= CACHE_LINE_MASK
;
262 vaddr
&= CACHE_LINE_MASK
;
265 num_lines
= DIV_ROUND_UP(sz
, L1_CACHE_BYTES
);
267 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
268 paddr
|= (vaddr
>> PAGE_SHIFT
) & 0x1F;
270 while (num_lines
-- > 0) {
271 write_aux_reg(aux_cmd
, paddr
);
272 paddr
+= L1_CACHE_BYTES
;
277 * For ARC700 MMUv3 I-cache and D-cache flushes
278 * Also reused for HS38 aliasing I-cache configuration
281 void __cache_line_loop_v3(phys_addr_t paddr
, unsigned long vaddr
,
282 unsigned long sz
, const int op
)
284 unsigned int aux_cmd
, aux_tag
;
286 const int full_page
= __builtin_constant_p(sz
) && sz
== PAGE_SIZE
;
288 if (op
== OP_INV_IC
) {
289 aux_cmd
= ARC_REG_IC_IVIL
;
290 aux_tag
= ARC_REG_IC_PTAG
;
292 aux_cmd
= op
& OP_INV
? ARC_REG_DC_IVDL
: ARC_REG_DC_FLDL
;
293 aux_tag
= ARC_REG_DC_PTAG
;
296 /* Ensure we properly floor/ceil the non-line aligned/sized requests
297 * and have @paddr - aligned to cache line and integral @num_lines.
298 * This however can be avoided for page sized since:
299 * -@paddr will be cache-line aligned already (being page aligned)
300 * -@sz will be integral multiple of line size (being page sized).
303 sz
+= paddr
& ~CACHE_LINE_MASK
;
304 paddr
&= CACHE_LINE_MASK
;
305 vaddr
&= CACHE_LINE_MASK
;
307 num_lines
= DIV_ROUND_UP(sz
, L1_CACHE_BYTES
);
310 * MMUv3, cache ops require paddr in PTAG reg
311 * if V-P const for loop, PTAG can be written once outside loop
314 write_aux_reg(aux_tag
, paddr
);
317 * This is technically for MMU v4, using the MMU v3 programming model
318 * Special work for HS38 aliasing I-cache configuration with PAE40
319 * - upper 8 bits of paddr need to be written into PTAG_HI
320 * - (and needs to be written before the lower 32 bits)
321 * Note that PTAG_HI is hoisted outside the line loop
323 if (is_pae40_enabled() && op
== OP_INV_IC
)
324 write_aux_reg(ARC_REG_IC_PTAG_HI
, (u64
)paddr
>> 32);
326 while (num_lines
-- > 0) {
328 write_aux_reg(aux_tag
, paddr
);
329 paddr
+= L1_CACHE_BYTES
;
332 write_aux_reg(aux_cmd
, vaddr
);
333 vaddr
+= L1_CACHE_BYTES
;
338 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
339 * Here's how cache ops are implemented
341 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
342 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
343 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
344 * respectively, similar to MMU v3 programming model, hence
345 * __cache_line_loop_v3() is used)
347 * If PAE40 is enabled, independent of aliasing considerations, the higher bits
348 * needs to be written into PTAG_HI
351 void __cache_line_loop_v4(phys_addr_t paddr
, unsigned long vaddr
,
352 unsigned long sz
, const int cacheop
)
354 unsigned int aux_cmd
;
356 const int full_page_op
= __builtin_constant_p(sz
) && sz
== PAGE_SIZE
;
358 if (cacheop
== OP_INV_IC
) {
359 aux_cmd
= ARC_REG_IC_IVIL
;
361 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
362 aux_cmd
= cacheop
& OP_INV
? ARC_REG_DC_IVDL
: ARC_REG_DC_FLDL
;
365 /* Ensure we properly floor/ceil the non-line aligned/sized requests
366 * and have @paddr - aligned to cache line and integral @num_lines.
367 * This however can be avoided for page sized since:
368 * -@paddr will be cache-line aligned already (being page aligned)
369 * -@sz will be integral multiple of line size (being page sized).
372 sz
+= paddr
& ~CACHE_LINE_MASK
;
373 paddr
&= CACHE_LINE_MASK
;
376 num_lines
= DIV_ROUND_UP(sz
, L1_CACHE_BYTES
);
379 * For HS38 PAE40 configuration
380 * - upper 8 bits of paddr need to be written into PTAG_HI
381 * - (and needs to be written before the lower 32 bits)
383 if (is_pae40_enabled()) {
384 if (cacheop
== OP_INV_IC
)
386 * Non aliasing I-cache in HS38,
387 * aliasing I-cache handled in __cache_line_loop_v3()
389 write_aux_reg(ARC_REG_IC_PTAG_HI
, (u64
)paddr
>> 32);
391 write_aux_reg(ARC_REG_DC_PTAG_HI
, (u64
)paddr
>> 32);
394 while (num_lines
-- > 0) {
395 write_aux_reg(aux_cmd
, paddr
);
396 paddr
+= L1_CACHE_BYTES
;
400 #if (CONFIG_ARC_MMU_VER < 3)
401 #define __cache_line_loop __cache_line_loop_v2
402 #elif (CONFIG_ARC_MMU_VER == 3)
403 #define __cache_line_loop __cache_line_loop_v3
404 #elif (CONFIG_ARC_MMU_VER > 3)
405 #define __cache_line_loop __cache_line_loop_v4
408 #ifdef CONFIG_ARC_HAS_DCACHE
410 /***************************************************************
411 * Machine specific helpers for Entire D-Cache or Per Line ops
414 static inline void __before_dc_op(const int op
)
416 if (op
== OP_FLUSH_N_INV
) {
417 /* Dcache provides 2 cmd: FLUSH or INV
418 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
419 * flush-n-inv is achieved by INV cmd but with IM=1
420 * So toggle INV sub-mode depending on op request and default
422 const unsigned int ctl
= ARC_REG_DC_CTRL
;
423 write_aux_reg(ctl
, read_aux_reg(ctl
) | DC_CTRL_INV_MODE_FLUSH
);
427 static inline void __after_dc_op(const int op
)
430 const unsigned int ctl
= ARC_REG_DC_CTRL
;
433 /* flush / flush-n-inv both wait */
434 while ((reg
= read_aux_reg(ctl
)) & DC_CTRL_FLUSH_STATUS
)
437 /* Switch back to default Invalidate mode */
438 if (op
== OP_FLUSH_N_INV
)
439 write_aux_reg(ctl
, reg
& ~DC_CTRL_INV_MODE_FLUSH
);
444 * Operation on Entire D-Cache
445 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
446 * Note that constant propagation ensures all the checks are gone
449 static inline void __dc_entire_op(const int op
)
455 if (op
& OP_INV
) /* Inv or flush-n-inv use same cmd reg */
456 aux
= ARC_REG_DC_IVDC
;
458 aux
= ARC_REG_DC_FLSH
;
460 write_aux_reg(aux
, 0x1);
465 /* For kernel mappings cache operation: index is same as paddr */
466 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
469 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
471 static inline void __dc_line_op(phys_addr_t paddr
, unsigned long vaddr
,
472 unsigned long sz
, const int op
)
476 local_irq_save(flags
);
480 __cache_line_loop(paddr
, vaddr
, sz
, op
);
484 local_irq_restore(flags
);
489 #define __dc_entire_op(op)
490 #define __dc_line_op(paddr, vaddr, sz, op)
491 #define __dc_line_op_k(paddr, sz, op)
493 #endif /* CONFIG_ARC_HAS_DCACHE */
495 #ifdef CONFIG_ARC_HAS_ICACHE
497 static inline void __ic_entire_inv(void)
499 write_aux_reg(ARC_REG_IC_IVIC
, 1);
500 read_aux_reg(ARC_REG_IC_CTRL
); /* blocks */
504 __ic_line_inv_vaddr_local(phys_addr_t paddr
, unsigned long vaddr
,
509 local_irq_save(flags
);
510 (*_cache_line_loop_ic_fn
)(paddr
, vaddr
, sz
, OP_INV_IC
);
511 local_irq_restore(flags
);
516 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
521 phys_addr_t paddr
, vaddr
;
525 static void __ic_line_inv_vaddr_helper(void *info
)
527 struct ic_inv_args
*ic_inv
= info
;
529 __ic_line_inv_vaddr_local(ic_inv
->paddr
, ic_inv
->vaddr
, ic_inv
->sz
);
532 static void __ic_line_inv_vaddr(phys_addr_t paddr
, unsigned long vaddr
,
535 struct ic_inv_args ic_inv
= {
541 on_each_cpu(__ic_line_inv_vaddr_helper
, &ic_inv
, 1);
544 #endif /* CONFIG_SMP */
546 #else /* !CONFIG_ARC_HAS_ICACHE */
548 #define __ic_entire_inv()
549 #define __ic_line_inv_vaddr(pstart, vstart, sz)
551 #endif /* CONFIG_ARC_HAS_ICACHE */
553 noinline
void slc_op(phys_addr_t paddr
, unsigned long sz
, const int op
)
555 #ifdef CONFIG_ISA_ARCV2
557 * SLC is shared between all cores and concurrent aux operations from
558 * multiple cores need to be serialized using a spinlock
559 * A concurrent operation can be silently ignored and/or the old/new
560 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
563 static DEFINE_SPINLOCK(lock
);
567 spin_lock_irqsave(&lock
, flags
);
570 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
571 * - b'000 (default) is Flush,
572 * - b'001 is Invalidate if CTRL.IM == 0
573 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
575 ctrl
= read_aux_reg(ARC_REG_SLC_CTRL
);
577 /* Don't rely on default value of IM bit */
578 if (!(op
& OP_FLUSH
)) /* i.e. OP_INV */
579 ctrl
&= ~SLC_CTRL_IM
; /* clear IM: Disable flush before Inv */
584 ctrl
|= SLC_CTRL_RGN_OP_INV
; /* Inv or flush-n-inv */
586 ctrl
&= ~SLC_CTRL_RGN_OP_INV
;
588 write_aux_reg(ARC_REG_SLC_CTRL
, ctrl
);
591 * Lower bits are ignored, no need to clip
592 * END needs to be setup before START (latter triggers the operation)
593 * END can't be same as START, so add (l2_line_sz - 1) to sz
595 write_aux_reg(ARC_REG_SLC_RGN_END
, (paddr
+ sz
+ l2_line_sz
- 1));
596 write_aux_reg(ARC_REG_SLC_RGN_START
, paddr
);
598 while (read_aux_reg(ARC_REG_SLC_CTRL
) & SLC_CTRL_BUSY
);
600 spin_unlock_irqrestore(&lock
, flags
);
604 /***********************************************************
609 * Handle cache congruency of kernel and userspace mappings of page when kernel
610 * writes-to/reads-from
612 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
613 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
614 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
615 * -In SMP, if hardware caches are coherent
617 * There's a corollary case, where kernel READs from a userspace mapped page.
618 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
620 void flush_dcache_page(struct page
*page
)
622 struct address_space
*mapping
;
624 if (!cache_is_vipt_aliasing()) {
625 clear_bit(PG_dc_clean
, &page
->flags
);
629 /* don't handle anon pages here */
630 mapping
= page_mapping(page
);
635 * pagecache page, file not yet mapped to userspace
636 * Make a note that K-mapping is dirty
638 if (!mapping_mapped(mapping
)) {
639 clear_bit(PG_dc_clean
, &page
->flags
);
640 } else if (page_mapcount(page
)) {
642 /* kernel reading from page with U-mapping */
643 phys_addr_t paddr
= (unsigned long)page_address(page
);
644 unsigned long vaddr
= page
->index
<< PAGE_SHIFT
;
646 if (addr_not_cache_congruent(paddr
, vaddr
))
647 __flush_dcache_page(paddr
, vaddr
);
650 EXPORT_SYMBOL(flush_dcache_page
);
653 * DMA ops for systems with L1 cache only
654 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
656 static void __dma_cache_wback_inv_l1(phys_addr_t start
, unsigned long sz
)
658 __dc_line_op_k(start
, sz
, OP_FLUSH_N_INV
);
661 static void __dma_cache_inv_l1(phys_addr_t start
, unsigned long sz
)
663 __dc_line_op_k(start
, sz
, OP_INV
);
666 static void __dma_cache_wback_l1(phys_addr_t start
, unsigned long sz
)
668 __dc_line_op_k(start
, sz
, OP_FLUSH
);
672 * DMA ops for systems with both L1 and L2 caches, but without IOC
673 * Both L1 and L2 lines need to be explicitly flushed/invalidated
675 static void __dma_cache_wback_inv_slc(phys_addr_t start
, unsigned long sz
)
677 __dc_line_op_k(start
, sz
, OP_FLUSH_N_INV
);
678 slc_op(start
, sz
, OP_FLUSH_N_INV
);
681 static void __dma_cache_inv_slc(phys_addr_t start
, unsigned long sz
)
683 __dc_line_op_k(start
, sz
, OP_INV
);
684 slc_op(start
, sz
, OP_INV
);
687 static void __dma_cache_wback_slc(phys_addr_t start
, unsigned long sz
)
689 __dc_line_op_k(start
, sz
, OP_FLUSH
);
690 slc_op(start
, sz
, OP_FLUSH
);
694 * DMA ops for systems with IOC
695 * IOC hardware snoops all DMA traffic keeping the caches consistent with
696 * memory - eliding need for any explicit cache maintenance of DMA buffers
698 static void __dma_cache_wback_inv_ioc(phys_addr_t start
, unsigned long sz
) {}
699 static void __dma_cache_inv_ioc(phys_addr_t start
, unsigned long sz
) {}
700 static void __dma_cache_wback_ioc(phys_addr_t start
, unsigned long sz
) {}
705 void dma_cache_wback_inv(phys_addr_t start
, unsigned long sz
)
707 __dma_cache_wback_inv(start
, sz
);
709 EXPORT_SYMBOL(dma_cache_wback_inv
);
711 void dma_cache_inv(phys_addr_t start
, unsigned long sz
)
713 __dma_cache_inv(start
, sz
);
715 EXPORT_SYMBOL(dma_cache_inv
);
717 void dma_cache_wback(phys_addr_t start
, unsigned long sz
)
719 __dma_cache_wback(start
, sz
);
721 EXPORT_SYMBOL(dma_cache_wback
);
724 * This is API for making I/D Caches consistent when modifying
725 * kernel code (loadable modules, kprobes, kgdb...)
726 * This is called on insmod, with kernel virtual address for CODE of
727 * the module. ARC cache maintenance ops require PHY address thus we
728 * need to convert vmalloc addr to PHY addr
730 void flush_icache_range(unsigned long kstart
, unsigned long kend
)
734 WARN(kstart
< TASK_SIZE
, "%s() can't handle user vaddr", __func__
);
736 /* Shortcut for bigger flush ranges.
737 * Here we don't care if this was kernel virtual or phy addr
739 tot_sz
= kend
- kstart
;
740 if (tot_sz
> PAGE_SIZE
) {
745 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
746 if (likely(kstart
> PAGE_OFFSET
)) {
748 * The 2nd arg despite being paddr will be used to index icache
749 * This is OK since no alternate virtual mappings will exist
750 * given the callers for this case: kprobe/kgdb in built-in
753 __sync_icache_dcache(kstart
, kstart
, kend
- kstart
);
758 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
759 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
760 * handling of kernel vaddr.
762 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
763 * it still needs to handle a 2 page scenario, where the range
764 * straddles across 2 virtual pages and hence need for loop
767 unsigned int off
, sz
;
768 unsigned long phy
, pfn
;
770 off
= kstart
% PAGE_SIZE
;
771 pfn
= vmalloc_to_pfn((void *)kstart
);
772 phy
= (pfn
<< PAGE_SHIFT
) + off
;
773 sz
= min_t(unsigned int, tot_sz
, PAGE_SIZE
- off
);
774 __sync_icache_dcache(phy
, kstart
, sz
);
779 EXPORT_SYMBOL(flush_icache_range
);
782 * General purpose helper to make I and D cache lines consistent.
783 * @paddr is phy addr of region
784 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
785 * However in one instance, when called by kprobe (for a breakpt in
786 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
787 * use a paddr to index the cache (despite VIPT). This is fine since since a
788 * builtin kernel page will not have any virtual mappings.
789 * kprobe on loadable module will be kernel vaddr.
791 void __sync_icache_dcache(phys_addr_t paddr
, unsigned long vaddr
, int len
)
793 __dc_line_op(paddr
, vaddr
, len
, OP_FLUSH_N_INV
);
794 __ic_line_inv_vaddr(paddr
, vaddr
, len
);
797 /* wrapper to compile time eliminate alignment checks in flush loop */
798 void __inv_icache_page(phys_addr_t paddr
, unsigned long vaddr
)
800 __ic_line_inv_vaddr(paddr
, vaddr
, PAGE_SIZE
);
804 * wrapper to clearout kernel or userspace mappings of a page
805 * For kernel mappings @vaddr == @paddr
807 void __flush_dcache_page(phys_addr_t paddr
, unsigned long vaddr
)
809 __dc_line_op(paddr
, vaddr
& PAGE_MASK
, PAGE_SIZE
, OP_FLUSH_N_INV
);
812 noinline
void flush_cache_all(void)
816 local_irq_save(flags
);
819 __dc_entire_op(OP_FLUSH_N_INV
);
821 local_irq_restore(flags
);
825 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
827 void flush_cache_mm(struct mm_struct
*mm
)
832 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long u_vaddr
,
835 unsigned int paddr
= pfn
<< PAGE_SHIFT
;
837 u_vaddr
&= PAGE_MASK
;
839 __flush_dcache_page(paddr
, u_vaddr
);
841 if (vma
->vm_flags
& VM_EXEC
)
842 __inv_icache_page(paddr
, u_vaddr
);
845 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
851 void flush_anon_page(struct vm_area_struct
*vma
, struct page
*page
,
852 unsigned long u_vaddr
)
854 /* TBD: do we really need to clear the kernel mapping */
855 __flush_dcache_page(page_address(page
), u_vaddr
);
856 __flush_dcache_page(page_address(page
), page_address(page
));
862 void copy_user_highpage(struct page
*to
, struct page
*from
,
863 unsigned long u_vaddr
, struct vm_area_struct
*vma
)
865 void *kfrom
= kmap_atomic(from
);
866 void *kto
= kmap_atomic(to
);
867 int clean_src_k_mappings
= 0;
870 * If SRC page was already mapped in userspace AND it's U-mapping is
871 * not congruent with K-mapping, sync former to physical page so that
872 * K-mapping in memcpy below, sees the right data
874 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
875 * equally valid for SRC page as well
877 * For !VIPT cache, all of this gets compiled out as
878 * addr_not_cache_congruent() is 0
880 if (page_mapcount(from
) && addr_not_cache_congruent(kfrom
, u_vaddr
)) {
881 __flush_dcache_page((unsigned long)kfrom
, u_vaddr
);
882 clean_src_k_mappings
= 1;
885 copy_page(kto
, kfrom
);
888 * Mark DST page K-mapping as dirty for a later finalization by
889 * update_mmu_cache(). Although the finalization could have been done
890 * here as well (given that both vaddr/paddr are available).
891 * But update_mmu_cache() already has code to do that for other
892 * non copied user pages (e.g. read faults which wire in pagecache page
895 clear_bit(PG_dc_clean
, &to
->flags
);
898 * if SRC was already usermapped and non-congruent to kernel mapping
899 * sync the kernel mapping back to physical page
901 if (clean_src_k_mappings
) {
902 __flush_dcache_page((unsigned long)kfrom
, (unsigned long)kfrom
);
903 set_bit(PG_dc_clean
, &from
->flags
);
905 clear_bit(PG_dc_clean
, &from
->flags
);
909 kunmap_atomic(kfrom
);
912 void clear_user_page(void *to
, unsigned long u_vaddr
, struct page
*page
)
915 clear_bit(PG_dc_clean
, &page
->flags
);
919 /**********************************************************************
920 * Explicit Cache flush request from user space via syscall
921 * Needed for JITs which generate code on the fly
923 SYSCALL_DEFINE3(cacheflush
, uint32_t, start
, uint32_t, sz
, uint32_t, flags
)
925 /* TBD: optimize this */
930 void arc_cache_init(void)
932 unsigned int __maybe_unused cpu
= smp_processor_id();
935 printk(arc_cache_mumbojumbo(0, str
, sizeof(str
)));
938 * Only master CPU needs to execute rest of function:
939 * - Assume SMP so all cores will have same cache config so
940 * any geomtry checks will be same for all
941 * - IOC setup / dma callbacks only need to be setup once
946 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE
)) {
947 struct cpuinfo_arc_cache
*ic
= &cpuinfo_arc700
[cpu
].icache
;
950 panic("cache support enabled but non-existent cache\n");
952 if (ic
->line_len
!= L1_CACHE_BYTES
)
953 panic("ICache line [%d] != kernel Config [%d]",
954 ic
->line_len
, L1_CACHE_BYTES
);
956 if (ic
->ver
!= CONFIG_ARC_MMU_VER
)
957 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
958 ic
->ver
, CONFIG_ARC_MMU_VER
);
961 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
962 * pair to provide vaddr/paddr respectively, just as in MMU v3
964 if (is_isa_arcv2() && ic
->alias
)
965 _cache_line_loop_ic_fn
= __cache_line_loop_v3
;
967 _cache_line_loop_ic_fn
= __cache_line_loop
;
970 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE
)) {
971 struct cpuinfo_arc_cache
*dc
= &cpuinfo_arc700
[cpu
].dcache
;
974 panic("cache support enabled but non-existent cache\n");
976 if (dc
->line_len
!= L1_CACHE_BYTES
)
977 panic("DCache line [%d] != kernel Config [%d]",
978 dc
->line_len
, L1_CACHE_BYTES
);
980 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
981 if (is_isa_arcompact()) {
982 int handled
= IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING
);
984 if (dc
->alias
&& !handled
)
985 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
986 else if (!dc
->alias
&& handled
)
987 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
991 if (is_isa_arcv2() && l2_line_sz
&& !slc_enable
) {
993 /* IM set : flush before invalidate */
994 write_aux_reg(ARC_REG_SLC_CTRL
,
995 read_aux_reg(ARC_REG_SLC_CTRL
) | SLC_CTRL_IM
);
997 write_aux_reg(ARC_REG_SLC_INVALIDATE
, 1);
999 /* Important to wait for flush to complete */
1000 while (read_aux_reg(ARC_REG_SLC_CTRL
) & SLC_CTRL_BUSY
);
1001 write_aux_reg(ARC_REG_SLC_CTRL
,
1002 read_aux_reg(ARC_REG_SLC_CTRL
) | SLC_CTRL_DISABLE
);
1005 if (is_isa_arcv2() && ioc_exists
) {
1006 /* IO coherency base - 0x8z */
1007 write_aux_reg(ARC_REG_IO_COH_AP0_BASE
, 0x80000);
1008 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
1009 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE
, 0x11);
1010 /* Enable partial writes */
1011 write_aux_reg(ARC_REG_IO_COH_PARTIAL
, 1);
1012 /* Enable IO coherency */
1013 write_aux_reg(ARC_REG_IO_COH_ENABLE
, 1);
1015 __dma_cache_wback_inv
= __dma_cache_wback_inv_ioc
;
1016 __dma_cache_inv
= __dma_cache_inv_ioc
;
1017 __dma_cache_wback
= __dma_cache_wback_ioc
;
1018 } else if (is_isa_arcv2() && l2_line_sz
&& slc_enable
) {
1019 __dma_cache_wback_inv
= __dma_cache_wback_inv_slc
;
1020 __dma_cache_inv
= __dma_cache_inv_slc
;
1021 __dma_cache_wback
= __dma_cache_wback_slc
;
1023 __dma_cache_wback_inv
= __dma_cache_wback_inv_l1
;
1024 __dma_cache_inv
= __dma_cache_inv_l1
;
1025 __dma_cache_wback
= __dma_cache_wback_l1
;