ARC: Reduce Code for ECR printing
[deliverable/linux.git] / arch / arc / mm / cache_arc700.c
CommitLineData
95d6976d
VG
1/*
2 * ARC700 VIPT Cache Management
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
11 * -flush_cache_dup_mm (fork)
12 * -likewise for flush_cache_mm (exit/execve)
13 * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
14 *
15 * vineetg: Apr 2011
16 * -Now that MMU can support larger pg sz (16K), the determiniation of
17 * aliasing shd not be based on assumption of 8k pg
18 *
19 * vineetg: Mar 2011
20 * -optimised version of flush_icache_range( ) for making I/D coherent
21 * when vaddr is available (agnostic of num of aliases)
22 *
23 * vineetg: Mar 2011
24 * -Added documentation about I-cache aliasing on ARC700 and the way it
25 * was handled up until MMU V2.
26 * -Spotted a three year old bug when killing the 4 aliases, which needs
27 * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
28 * instead of paddr | {0x00, 0x01, 0x10, 0x11}
29 * (Rajesh you owe me one now)
30 *
31 * vineetg: Dec 2010
32 * -Off-by-one error when computing num_of_lines to flush
33 * This broke signal handling with bionic which uses synthetic sigret stub
34 *
35 * vineetg: Mar 2010
36 * -GCC can't generate ZOL for core cache flush loops.
37 * Conv them into iterations based as opposed to while (start < end) types
38 *
39 * Vineetg: July 2009
40 * -In I-cache flush routine we used to chk for aliasing for every line INV.
41 * Instead now we setup routines per cache geometry and invoke them
42 * via function pointers.
43 *
44 * Vineetg: Jan 2009
45 * -Cache Line flush routines used to flush an extra line beyond end addr
46 * because check was while (end >= start) instead of (end > start)
47 * =Some call sites had to work around by doing -1, -4 etc to end param
48 * =Some callers didnt care. This was spec bad in case of INV routines
49 * which would discard valid data (cause of the horrible ext2 bug
50 * in ARC IDE driver)
51 *
52 * vineetg: June 11th 2008: Fixed flush_icache_range( )
53 * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
54 * to be flushed, which it was not doing.
55 * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
56 * however ARC cache maintenance OPs require PHY addr. Thus need to do
57 * vmalloc_to_phy.
58 * -Also added optimisation there, that for range > PAGE SIZE we flush the
59 * entire cache in one shot rather than line by line. For e.g. a module
60 * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
61 * while cache is only 16 or 32k.
62 */
63
64#include <linux/module.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/cache.h>
68#include <linux/mmu_context.h>
69#include <linux/syscalls.h>
70#include <linux/uaccess.h>
4102b533 71#include <linux/pagemap.h>
95d6976d
VG
72#include <asm/cacheflush.h>
73#include <asm/cachectl.h>
74#include <asm/setup.h>
75
da1677b0
VG
76/* Instruction cache related Auxiliary registers */
77#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
78#define ARC_REG_IC_IVIC 0x10
79#define ARC_REG_IC_CTRL 0x11
80#define ARC_REG_IC_IVIL 0x19
81#if (CONFIG_ARC_MMU_VER > 2)
82#define ARC_REG_IC_PTAG 0x1E
83#endif
84
85/* Bit val in IC_CTRL */
86#define IC_CTRL_CACHE_DISABLE 0x1
87
88/* Data cache related Auxiliary registers */
89#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
90#define ARC_REG_DC_IVDC 0x47
91#define ARC_REG_DC_CTRL 0x48
92#define ARC_REG_DC_IVDL 0x4A
93#define ARC_REG_DC_FLSH 0x4B
94#define ARC_REG_DC_FLDL 0x4C
95#if (CONFIG_ARC_MMU_VER > 2)
96#define ARC_REG_DC_PTAG 0x5C
97#endif
98
99/* Bit val in DC_CTRL */
100#define DC_CTRL_INV_MODE_FLUSH 0x40
101#define DC_CTRL_FLUSH_STATUS 0x100
102
af617428
VG
103char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
104{
105 int n = 0;
106 unsigned int c = smp_processor_id();
107
108#define PR_CACHE(p, enb, str) \
109{ \
110 if (!(p)->ver) \
111 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
112 else \
113 n += scnprintf(buf + n, len - n, \
114 str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
115 TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
116 enb ? "" : "DISABLED (kernel-build)"); \
117}
118
8235703e
VG
119 PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE),
120 "I-Cache");
121 PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE),
122 "D-Cache");
af617428
VG
123
124 return buf;
125}
126
95d6976d
VG
127/*
128 * Read the Cache Build Confuration Registers, Decode them and save into
129 * the cpuinfo structure for later use.
130 * No Validation done here, simply read/convert the BCRs
131 */
30ecee8c 132void __cpuinit read_decode_cache_bcr(void)
95d6976d 133{
95d6976d
VG
134 struct cpuinfo_arc_cache *p_ic, *p_dc;
135 unsigned int cpu = smp_processor_id();
da1677b0
VG
136 struct bcr_cache {
137#ifdef CONFIG_CPU_BIG_ENDIAN
138 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
139#else
140 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
141#endif
142 } ibcr, dbcr;
95d6976d
VG
143
144 p_ic = &cpuinfo_arc700[cpu].icache;
145 READ_BCR(ARC_REG_IC_BCR, ibcr);
146
147 if (ibcr.config == 0x3)
148 p_ic->assoc = 2;
149 p_ic->line_len = 8 << ibcr.line_len;
150 p_ic->sz = 0x200 << ibcr.sz;
151 p_ic->ver = ibcr.ver;
152
153 p_dc = &cpuinfo_arc700[cpu].dcache;
154 READ_BCR(ARC_REG_DC_BCR, dbcr);
155
156 if (dbcr.config == 0x2)
157 p_dc->assoc = 4;
158 p_dc->line_len = 16 << dbcr.line_len;
159 p_dc->sz = 0x200 << dbcr.sz;
160 p_dc->ver = dbcr.ver;
161}
162
163/*
164 * 1. Validate the Cache Geomtery (compile time config matches hardware)
165 * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
166 * (aliasing D-cache configurations are not supported YET)
167 * 3. Enable the Caches, setup default flush mode for D-Cache
168 * 3. Calculate the SHMLBA used by user space
169 */
30ecee8c 170void __cpuinit arc_cache_init(void)
95d6976d 171{
95d6976d 172 unsigned int cpu = smp_processor_id();
d626f547
VG
173 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
174 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
da1677b0 175 unsigned int dcache_does_alias, temp;
af617428
VG
176 char str[256];
177
178 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
95d6976d 179
d626f547
VG
180 if (!ic->ver)
181 goto chk_dc;
95d6976d 182
d626f547 183#ifdef CONFIG_ARC_HAS_ICACHE
af617428
VG
184 /* 1. Confirm some of I-cache params which Linux assumes */
185 if ((ic->assoc != ARC_ICACHE_WAYS) ||
186 (ic->line_len != ARC_ICACHE_LINE_LEN)) {
187 panic("Cache H/W doesn't match kernel Config");
188 }
189#if (CONFIG_ARC_MMU_VER > 2)
190 if (ic->ver != 3) {
191 if (running_on_hw)
192 panic("Cache ver doesn't match MMU ver\n");
193
194 /* For ISS - suggest the toggles to use */
195 pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
196
197 }
198#endif
95d6976d
VG
199#endif
200
201 /* Enable/disable I-Cache */
202 temp = read_aux_reg(ARC_REG_IC_CTRL);
203
204#ifdef CONFIG_ARC_HAS_ICACHE
205 temp &= ~IC_CTRL_CACHE_DISABLE;
206#else
207 temp |= IC_CTRL_CACHE_DISABLE;
208#endif
209
210 write_aux_reg(ARC_REG_IC_CTRL, temp);
211
d626f547
VG
212chk_dc:
213 if (!dc->ver)
214 return;
95d6976d 215
d626f547 216#ifdef CONFIG_ARC_HAS_DCACHE
af617428
VG
217 if ((dc->assoc != ARC_DCACHE_WAYS) ||
218 (dc->line_len != ARC_DCACHE_LINE_LEN)) {
219 panic("Cache H/W doesn't match kernel Config");
220 }
221
4102b533
VG
222 dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
223
95d6976d 224 /* check for D-Cache aliasing */
4102b533
VG
225 if (dcache_does_alias && !cache_is_vipt_aliasing())
226 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
227 else if (!dcache_does_alias && cache_is_vipt_aliasing())
228 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
95d6976d
VG
229#endif
230
231 /* Set the default Invalidate Mode to "simpy discard dirty lines"
232 * as this is more frequent then flush before invalidate
233 * Ofcourse we toggle this default behviour when desired
234 */
235 temp = read_aux_reg(ARC_REG_DC_CTRL);
236 temp &= ~DC_CTRL_INV_MODE_FLUSH;
237
238#ifdef CONFIG_ARC_HAS_DCACHE
239 /* Enable D-Cache: Clear Bit 0 */
240 write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
241#else
242 /* Flush D cache */
243 write_aux_reg(ARC_REG_DC_FLSH, 0x1);
244 /* Disable D cache */
245 write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
246#endif
247
248 return;
249}
250
251#define OP_INV 0x1
252#define OP_FLUSH 0x2
253#define OP_FLUSH_N_INV 0x3
254
255#ifdef CONFIG_ARC_HAS_DCACHE
256
257/***************************************************************
258 * Machine specific helpers for Entire D-Cache or Per Line ops
259 */
260
261static inline void wait_for_flush(void)
262{
263 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
264 ;
265}
266
267/*
268 * Operation on Entire D-Cache
269 * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
270 * Note that constant propagation ensures all the checks are gone
271 * in generated code
272 */
273static inline void __dc_entire_op(const int cacheop)
274{
275 unsigned long flags, tmp = tmp;
276 int aux;
277
278 local_irq_save(flags);
279
280 if (cacheop == OP_FLUSH_N_INV) {
281 /* Dcache provides 2 cmd: FLUSH or INV
282 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
283 * flush-n-inv is achieved by INV cmd but with IM=1
284 * Default INV sub-mode is DISCARD, which needs to be toggled
285 */
286 tmp = read_aux_reg(ARC_REG_DC_CTRL);
287 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
288 }
289
290 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
291 aux = ARC_REG_DC_IVDC;
292 else
293 aux = ARC_REG_DC_FLSH;
294
295 write_aux_reg(aux, 0x1);
296
297 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
298 wait_for_flush();
299
300 /* Switch back the DISCARD ONLY Invalidate mode */
301 if (cacheop == OP_FLUSH_N_INV)
302 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
303
304 local_irq_restore(flags);
305}
306
307/*
308 * Per Line Operation on D-Cache
309 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
310 * It's sole purpose is to help gcc generate ZOL
6ec18a81 311 * (aliasing VIPT dcache flushing needs both vaddr and paddr)
95d6976d 312 */
6ec18a81
VG
313static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
314 unsigned long sz, const int aux_reg)
95d6976d 315{
a690984d 316 int num_lines;
95d6976d
VG
317
318 /* Ensure we properly floor/ceil the non-line aligned/sized requests
a690984d 319 * and have @paddr - aligned to cache line and integral @num_lines.
95d6976d 320 * This however can be avoided for page sized since:
a690984d 321 * -@paddr will be cache-line aligned already (being page aligned)
95d6976d
VG
322 * -@sz will be integral multiple of line size (being page sized).
323 */
324 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
a690984d
VG
325 sz += paddr & ~DCACHE_LINE_MASK;
326 paddr &= DCACHE_LINE_MASK;
6ec18a81 327 vaddr &= DCACHE_LINE_MASK;
95d6976d
VG
328 }
329
330 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
331
6ec18a81
VG
332#if (CONFIG_ARC_MMU_VER <= 2)
333 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
334#endif
335
95d6976d
VG
336 while (num_lines-- > 0) {
337#if (CONFIG_ARC_MMU_VER > 2)
338 /*
339 * Just as for I$, in MMU v3, D$ ops also require
340 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
95d6976d 341 */
a690984d 342 write_aux_reg(ARC_REG_DC_PTAG, paddr);
6ec18a81
VG
343
344 write_aux_reg(aux_reg, vaddr);
345 vaddr += ARC_DCACHE_LINE_LEN;
346#else
347 /* paddr contains stuffed vaddrs bits */
a690984d 348 write_aux_reg(aux_reg, paddr);
6ec18a81 349#endif
a690984d 350 paddr += ARC_DCACHE_LINE_LEN;
95d6976d
VG
351 }
352}
353
4102b533 354/* For kernel mappings cache operation: index is same as paddr */
6ec18a81
VG
355#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
356
95d6976d
VG
357/*
358 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
359 */
6ec18a81
VG
360static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
361 unsigned long sz, const int cacheop)
95d6976d
VG
362{
363 unsigned long flags, tmp = tmp;
364 int aux;
365
366 local_irq_save(flags);
367
368 if (cacheop == OP_FLUSH_N_INV) {
369 /*
370 * Dcache provides 2 cmd: FLUSH or INV
371 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
372 * flush-n-inv is achieved by INV cmd but with IM=1
373 * Default INV sub-mode is DISCARD, which needs to be toggled
374 */
375 tmp = read_aux_reg(ARC_REG_DC_CTRL);
376 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
377 }
378
379 if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */
380 aux = ARC_REG_DC_IVDL;
381 else
382 aux = ARC_REG_DC_FLDL;
383
6ec18a81 384 __dc_line_loop(paddr, vaddr, sz, aux);
95d6976d
VG
385
386 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
387 wait_for_flush();
388
389 /* Switch back the DISCARD ONLY Invalidate mode */
390 if (cacheop == OP_FLUSH_N_INV)
391 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
392
393 local_irq_restore(flags);
394}
395
396#else
397
398#define __dc_entire_op(cacheop)
6ec18a81
VG
399#define __dc_line_op(paddr, vaddr, sz, cacheop)
400#define __dc_line_op_k(paddr, sz, cacheop)
95d6976d
VG
401
402#endif /* CONFIG_ARC_HAS_DCACHE */
403
404
405#ifdef CONFIG_ARC_HAS_ICACHE
406
407/*
408 * I-Cache Aliasing in ARC700 VIPT caches
409 *
7f250a0f
VG
410 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
411 * The orig Cache Management Module "CDU" only required paddr to invalidate a
412 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
413 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
414 * the exact same line.
95d6976d 415 *
7f250a0f
VG
416 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
417 * paddr alone could not be used to correctly index the cache.
95d6976d
VG
418 *
419 * ------------------
420 * MMU v1/v2 (Fixed Page Size 8k)
421 * ------------------
422 * The solution was to provide CDU with these additonal vaddr bits. These
7f250a0f
VG
423 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
424 * standard page size of 8k.
95d6976d
VG
425 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
426 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
427 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
428 * represent the offset within cache-line. The adv of using this "clumsy"
7f250a0f
VG
429 * interface for additional info was no new reg was needed in CDU programming
430 * model.
95d6976d
VG
431 *
432 * 17:13 represented the max num of bits passable, actual bits needed were
433 * fewer, based on the num-of-aliases possible.
434 * -for 2 alias possibility, only bit 13 needed (32K cache)
435 * -for 4 alias possibility, bits 14:13 needed (64K cache)
436 *
95d6976d
VG
437 * ------------------
438 * MMU v3
439 * ------------------
7f250a0f
VG
440 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
441 * only support 8k (default), 16k and 4k.
95d6976d
VG
442 * However from hardware perspective, smaller page sizes aggrevate aliasing
443 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
444 * the existing scheme of piggybacking won't work for certain configurations.
445 * Two new registers IC_PTAG and DC_PTAG inttoduced.
446 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
447 */
448
449/***********************************************************
7f250a0f 450 * Machine specific helper for per line I-Cache invalidate.
95d6976d 451 */
a690984d 452static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
7f250a0f 453 unsigned long sz)
95d6976d
VG
454{
455 unsigned long flags;
a690984d 456 int num_lines;
95d6976d 457
764531cc
VG
458 /*
459 * Ensure we properly floor/ceil the non-line aligned/sized requests:
460 * However page sized flushes can be compile time optimised.
a690984d 461 * -@paddr will be cache-line aligned already (being page aligned)
764531cc
VG
462 * -@sz will be integral multiple of line size (being page sized).
463 */
464 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
a690984d
VG
465 sz += paddr & ~ICACHE_LINE_MASK;
466 paddr &= ICACHE_LINE_MASK;
467 vaddr &= ICACHE_LINE_MASK;
764531cc
VG
468 }
469
95d6976d
VG
470 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
471
a690984d 472#if (CONFIG_ARC_MMU_VER <= 2)
95d6976d 473 /* bits 17:13 of vaddr go as bits 4:0 of paddr */
a690984d 474 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
95d6976d
VG
475#endif
476
477 local_irq_save(flags);
478 while (num_lines-- > 0) {
479#if (CONFIG_ARC_MMU_VER > 2)
480 /* tag comes from phy addr */
a690984d 481 write_aux_reg(ARC_REG_IC_PTAG, paddr);
95d6976d
VG
482
483 /* index bits come from vaddr */
484 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
485 vaddr += ARC_ICACHE_LINE_LEN;
486#else
7f250a0f 487 /* paddr contains stuffed vaddrs bits */
a690984d 488 write_aux_reg(ARC_REG_IC_IVIL, paddr);
95d6976d 489#endif
a690984d 490 paddr += ARC_ICACHE_LINE_LEN;
95d6976d
VG
491 }
492 local_irq_restore(flags);
493}
494
495#else
496
95d6976d
VG
497#define __ic_line_inv_vaddr(pstart, vstart, sz)
498
499#endif /* CONFIG_ARC_HAS_ICACHE */
500
501
502/***********************************************************
503 * Exported APIs
504 */
505
4102b533
VG
506/*
507 * Handle cache congruency of kernel and userspace mappings of page when kernel
508 * writes-to/reads-from
509 *
510 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
511 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
512 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
513 * -In SMP, if hardware caches are coherent
514 *
515 * There's a corollary case, where kernel READs from a userspace mapped page.
516 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
517 */
95d6976d
VG
518void flush_dcache_page(struct page *page)
519{
4102b533
VG
520 struct address_space *mapping;
521
522 if (!cache_is_vipt_aliasing()) {
523 set_bit(PG_arch_1, &page->flags);
524 return;
525 }
526
527 /* don't handle anon pages here */
528 mapping = page_mapping(page);
529 if (!mapping)
530 return;
531
532 /*
533 * pagecache page, file not yet mapped to userspace
534 * Make a note that K-mapping is dirty
535 */
536 if (!mapping_mapped(mapping)) {
537 set_bit(PG_arch_1, &page->flags);
538 } else if (page_mapped(page)) {
539
540 /* kernel reading from page with U-mapping */
541 void *paddr = page_address(page);
542 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
543
544 if (addr_not_cache_congruent(paddr, vaddr))
545 __flush_dcache_page(paddr, vaddr);
546 }
95d6976d
VG
547}
548EXPORT_SYMBOL(flush_dcache_page);
549
550
551void dma_cache_wback_inv(unsigned long start, unsigned long sz)
552{
6ec18a81 553 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
95d6976d
VG
554}
555EXPORT_SYMBOL(dma_cache_wback_inv);
556
557void dma_cache_inv(unsigned long start, unsigned long sz)
558{
6ec18a81 559 __dc_line_op_k(start, sz, OP_INV);
95d6976d
VG
560}
561EXPORT_SYMBOL(dma_cache_inv);
562
563void dma_cache_wback(unsigned long start, unsigned long sz)
564{
6ec18a81 565 __dc_line_op_k(start, sz, OP_FLUSH);
95d6976d
VG
566}
567EXPORT_SYMBOL(dma_cache_wback);
568
569/*
7586bf72
VG
570 * This is API for making I/D Caches consistent when modifying
571 * kernel code (loadable modules, kprobes, kgdb...)
95d6976d
VG
572 * This is called on insmod, with kernel virtual address for CODE of
573 * the module. ARC cache maintenance ops require PHY address thus we
574 * need to convert vmalloc addr to PHY addr
575 */
576void flush_icache_range(unsigned long kstart, unsigned long kend)
577{
578 unsigned int tot_sz, off, sz;
579 unsigned long phy, pfn;
95d6976d
VG
580
581 /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
582
583 /* This is not the right API for user virtual address */
584 if (kstart < TASK_SIZE) {
585 BUG_ON("Flush icache range for user virtual addr space");
586 return;
587 }
588
589 /* Shortcut for bigger flush ranges.
590 * Here we don't care if this was kernel virtual or phy addr
591 */
592 tot_sz = kend - kstart;
593 if (tot_sz > PAGE_SIZE) {
594 flush_cache_all();
595 return;
596 }
597
598 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
599 if (likely(kstart > PAGE_OFFSET)) {
7586bf72
VG
600 /*
601 * The 2nd arg despite being paddr will be used to index icache
602 * This is OK since no alternate virtual mappings will exist
603 * given the callers for this case: kprobe/kgdb in built-in
604 * kernel code only.
605 */
94bad1af 606 __sync_icache_dcache(kstart, kstart, kend - kstart);
95d6976d
VG
607 return;
608 }
609
610 /*
611 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
612 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
613 * handling of kernel vaddr.
614 *
615 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
616 * it still needs to handle a 2 page scenario, where the range
617 * straddles across 2 virtual pages and hence need for loop
618 */
619 while (tot_sz > 0) {
620 off = kstart % PAGE_SIZE;
621 pfn = vmalloc_to_pfn((void *)kstart);
622 phy = (pfn << PAGE_SHIFT) + off;
623 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
94bad1af 624 __sync_icache_dcache(phy, kstart, sz);
95d6976d
VG
625 kstart += sz;
626 tot_sz -= sz;
627 }
628}
629
630/*
94bad1af
VG
631 * General purpose helper to make I and D cache lines consistent.
632 * @paddr is phy addr of region
633 * @vaddr is typically user or kernel vaddr (vmalloc)
634 * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in
635 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
636 * use a paddr to index the cache (despite VIPT). This is fine since since a
637 * built-in kernel page will not have any virtual mappings (not even kernel)
638 * kprobe on loadable module is different as it will have kvaddr.
95d6976d 639 */
94bad1af 640void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
95d6976d 641{
94bad1af
VG
642 unsigned long flags;
643
644 local_irq_save(flags);
645 __ic_line_inv_vaddr(paddr, vaddr, len);
f538881c 646 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
94bad1af 647 local_irq_restore(flags);
95d6976d
VG
648}
649
24603fdd
VG
650/* wrapper to compile time eliminate alignment checks in flush loop */
651void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
95d6976d 652{
24603fdd 653 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
95d6976d
VG
654}
655
6ec18a81
VG
656/*
657 * wrapper to clearout kernel or userspace mappings of a page
658 * For kernel mappings @vaddr == @paddr
659 */
de2a852c 660void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
eacd0e95 661{
6ec18a81 662 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
eacd0e95
VG
663}
664
95d6976d
VG
665void flush_icache_all(void)
666{
667 unsigned long flags;
668
669 local_irq_save(flags);
670
671 write_aux_reg(ARC_REG_IC_IVIC, 1);
672
673 /* lr will not complete till the icache inv operation is not over */
674 read_aux_reg(ARC_REG_IC_CTRL);
675 local_irq_restore(flags);
676}
677
678noinline void flush_cache_all(void)
679{
680 unsigned long flags;
681
682 local_irq_save(flags);
683
684 flush_icache_all();
685 __dc_entire_op(OP_FLUSH_N_INV);
686
687 local_irq_restore(flags);
688
689}
690
4102b533
VG
691#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
692
693void flush_cache_mm(struct mm_struct *mm)
694{
695 flush_cache_all();
696}
697
698void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
699 unsigned long pfn)
700{
701 unsigned int paddr = pfn << PAGE_SHIFT;
702
703 __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
704}
705
706void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
707 unsigned long end)
708{
709 flush_cache_all();
710}
711
7bb66f6e
VG
712void flush_anon_page(struct vm_area_struct *vma, struct page *page,
713 unsigned long u_vaddr)
714{
715 /* TBD: do we really need to clear the kernel mapping */
716 __flush_dcache_page(page_address(page), u_vaddr);
717 __flush_dcache_page(page_address(page), page_address(page));
718
719}
720
721#endif
722
4102b533
VG
723void copy_user_highpage(struct page *to, struct page *from,
724 unsigned long u_vaddr, struct vm_area_struct *vma)
725{
726 void *kfrom = page_address(from);
727 void *kto = page_address(to);
728 int clean_src_k_mappings = 0;
729
730 /*
731 * If SRC page was already mapped in userspace AND it's U-mapping is
732 * not congruent with K-mapping, sync former to physical page so that
733 * K-mapping in memcpy below, sees the right data
734 *
735 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
736 * equally valid for SRC page as well
737 */
738 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
739 __flush_dcache_page(kfrom, u_vaddr);
740 clean_src_k_mappings = 1;
741 }
742
743 copy_page(kto, kfrom);
744
745 /*
746 * Mark DST page K-mapping as dirty for a later finalization by
747 * update_mmu_cache(). Although the finalization could have been done
748 * here as well (given that both vaddr/paddr are available).
749 * But update_mmu_cache() already has code to do that for other
750 * non copied user pages (e.g. read faults which wire in pagecache page
751 * directly).
752 */
753 set_bit(PG_arch_1, &to->flags);
754
755 /*
756 * if SRC was already usermapped and non-congruent to kernel mapping
757 * sync the kernel mapping back to physical page
758 */
759 if (clean_src_k_mappings) {
760 __flush_dcache_page(kfrom, kfrom);
761 } else {
762 set_bit(PG_arch_1, &from->flags);
763 }
764}
765
766void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
767{
768 clear_page(to);
769 set_bit(PG_arch_1, &page->flags);
770}
771
4102b533 772
95d6976d
VG
773/**********************************************************************
774 * Explicit Cache flush request from user space via syscall
775 * Needed for JITs which generate code on the fly
776 */
777SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
778{
779 /* TBD: optimize this */
780 flush_cache_all();
781 return 0;
782}
This page took 0.116589 seconds and 5 git commands to generate.