Commit | Line | Data |
---|---|---|
95d6976d VG |
1 | /* |
2 | * ARC700 VIPT Cache Management | |
3 | * | |
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs | |
11 | * -flush_cache_dup_mm (fork) | |
12 | * -likewise for flush_cache_mm (exit/execve) | |
13 | * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break) | |
14 | * | |
15 | * vineetg: Apr 2011 | |
16 | * -Now that MMU can support larger pg sz (16K), the determiniation of | |
17 | * aliasing shd not be based on assumption of 8k pg | |
18 | * | |
19 | * vineetg: Mar 2011 | |
20 | * -optimised version of flush_icache_range( ) for making I/D coherent | |
21 | * when vaddr is available (agnostic of num of aliases) | |
22 | * | |
23 | * vineetg: Mar 2011 | |
24 | * -Added documentation about I-cache aliasing on ARC700 and the way it | |
25 | * was handled up until MMU V2. | |
26 | * -Spotted a three year old bug when killing the 4 aliases, which needs | |
27 | * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03} | |
28 | * instead of paddr | {0x00, 0x01, 0x10, 0x11} | |
29 | * (Rajesh you owe me one now) | |
30 | * | |
31 | * vineetg: Dec 2010 | |
32 | * -Off-by-one error when computing num_of_lines to flush | |
33 | * This broke signal handling with bionic which uses synthetic sigret stub | |
34 | * | |
35 | * vineetg: Mar 2010 | |
36 | * -GCC can't generate ZOL for core cache flush loops. | |
37 | * Conv them into iterations based as opposed to while (start < end) types | |
38 | * | |
39 | * Vineetg: July 2009 | |
40 | * -In I-cache flush routine we used to chk for aliasing for every line INV. | |
41 | * Instead now we setup routines per cache geometry and invoke them | |
42 | * via function pointers. | |
43 | * | |
44 | * Vineetg: Jan 2009 | |
45 | * -Cache Line flush routines used to flush an extra line beyond end addr | |
46 | * because check was while (end >= start) instead of (end > start) | |
47 | * =Some call sites had to work around by doing -1, -4 etc to end param | |
48 | * =Some callers didnt care. This was spec bad in case of INV routines | |
49 | * which would discard valid data (cause of the horrible ext2 bug | |
50 | * in ARC IDE driver) | |
51 | * | |
52 | * vineetg: June 11th 2008: Fixed flush_icache_range( ) | |
53 | * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need | |
54 | * to be flushed, which it was not doing. | |
55 | * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API, | |
56 | * however ARC cache maintenance OPs require PHY addr. Thus need to do | |
57 | * vmalloc_to_phy. | |
58 | * -Also added optimisation there, that for range > PAGE SIZE we flush the | |
59 | * entire cache in one shot rather than line by line. For e.g. a module | |
60 | * with Code sz 600k, old code flushed 600k worth of cache (line-by-line), | |
61 | * while cache is only 16 or 32k. | |
62 | */ | |
63 | ||
64 | #include <linux/module.h> | |
65 | #include <linux/mm.h> | |
66 | #include <linux/sched.h> | |
67 | #include <linux/cache.h> | |
68 | #include <linux/mmu_context.h> | |
69 | #include <linux/syscalls.h> | |
70 | #include <linux/uaccess.h> | |
4102b533 | 71 | #include <linux/pagemap.h> |
95d6976d VG |
72 | #include <asm/cacheflush.h> |
73 | #include <asm/cachectl.h> | |
74 | #include <asm/setup.h> | |
75 | ||
af617428 VG |
76 | char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) |
77 | { | |
78 | int n = 0; | |
79 | unsigned int c = smp_processor_id(); | |
80 | ||
81 | #define PR_CACHE(p, enb, str) \ | |
82 | { \ | |
83 | if (!(p)->ver) \ | |
84 | n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ | |
85 | else \ | |
86 | n += scnprintf(buf + n, len - n, \ | |
87 | str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \ | |
88 | TO_KB((p)->sz), (p)->assoc, (p)->line_len, \ | |
89 | enb ? "" : "DISABLED (kernel-build)"); \ | |
90 | } | |
91 | ||
92 | PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache"); | |
93 | PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache"); | |
94 | ||
95 | return buf; | |
96 | } | |
97 | ||
95d6976d VG |
98 | /* |
99 | * Read the Cache Build Confuration Registers, Decode them and save into | |
100 | * the cpuinfo structure for later use. | |
101 | * No Validation done here, simply read/convert the BCRs | |
102 | */ | |
30ecee8c | 103 | void __cpuinit read_decode_cache_bcr(void) |
95d6976d VG |
104 | { |
105 | struct bcr_cache ibcr, dbcr; | |
106 | struct cpuinfo_arc_cache *p_ic, *p_dc; | |
107 | unsigned int cpu = smp_processor_id(); | |
108 | ||
109 | p_ic = &cpuinfo_arc700[cpu].icache; | |
110 | READ_BCR(ARC_REG_IC_BCR, ibcr); | |
111 | ||
112 | if (ibcr.config == 0x3) | |
113 | p_ic->assoc = 2; | |
114 | p_ic->line_len = 8 << ibcr.line_len; | |
115 | p_ic->sz = 0x200 << ibcr.sz; | |
116 | p_ic->ver = ibcr.ver; | |
117 | ||
118 | p_dc = &cpuinfo_arc700[cpu].dcache; | |
119 | READ_BCR(ARC_REG_DC_BCR, dbcr); | |
120 | ||
121 | if (dbcr.config == 0x2) | |
122 | p_dc->assoc = 4; | |
123 | p_dc->line_len = 16 << dbcr.line_len; | |
124 | p_dc->sz = 0x200 << dbcr.sz; | |
125 | p_dc->ver = dbcr.ver; | |
126 | } | |
127 | ||
128 | /* | |
129 | * 1. Validate the Cache Geomtery (compile time config matches hardware) | |
130 | * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn) | |
131 | * (aliasing D-cache configurations are not supported YET) | |
132 | * 3. Enable the Caches, setup default flush mode for D-Cache | |
133 | * 3. Calculate the SHMLBA used by user space | |
134 | */ | |
30ecee8c | 135 | void __cpuinit arc_cache_init(void) |
95d6976d VG |
136 | { |
137 | unsigned int temp; | |
95d6976d | 138 | unsigned int cpu = smp_processor_id(); |
d626f547 VG |
139 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; |
140 | struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; | |
95d6976d | 141 | int way_pg_ratio = way_pg_ratio; |
4102b533 | 142 | int dcache_does_alias; |
af617428 VG |
143 | char str[256]; |
144 | ||
145 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); | |
95d6976d | 146 | |
d626f547 VG |
147 | if (!ic->ver) |
148 | goto chk_dc; | |
95d6976d | 149 | |
d626f547 | 150 | #ifdef CONFIG_ARC_HAS_ICACHE |
af617428 VG |
151 | /* 1. Confirm some of I-cache params which Linux assumes */ |
152 | if ((ic->assoc != ARC_ICACHE_WAYS) || | |
153 | (ic->line_len != ARC_ICACHE_LINE_LEN)) { | |
154 | panic("Cache H/W doesn't match kernel Config"); | |
155 | } | |
156 | #if (CONFIG_ARC_MMU_VER > 2) | |
157 | if (ic->ver != 3) { | |
158 | if (running_on_hw) | |
159 | panic("Cache ver doesn't match MMU ver\n"); | |
160 | ||
161 | /* For ISS - suggest the toggles to use */ | |
162 | pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n"); | |
163 | ||
164 | } | |
165 | #endif | |
95d6976d VG |
166 | #endif |
167 | ||
168 | /* Enable/disable I-Cache */ | |
169 | temp = read_aux_reg(ARC_REG_IC_CTRL); | |
170 | ||
171 | #ifdef CONFIG_ARC_HAS_ICACHE | |
172 | temp &= ~IC_CTRL_CACHE_DISABLE; | |
173 | #else | |
174 | temp |= IC_CTRL_CACHE_DISABLE; | |
175 | #endif | |
176 | ||
177 | write_aux_reg(ARC_REG_IC_CTRL, temp); | |
178 | ||
d626f547 VG |
179 | chk_dc: |
180 | if (!dc->ver) | |
181 | return; | |
95d6976d | 182 | |
d626f547 | 183 | #ifdef CONFIG_ARC_HAS_DCACHE |
af617428 VG |
184 | if ((dc->assoc != ARC_DCACHE_WAYS) || |
185 | (dc->line_len != ARC_DCACHE_LINE_LEN)) { | |
186 | panic("Cache H/W doesn't match kernel Config"); | |
187 | } | |
188 | ||
4102b533 VG |
189 | dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE; |
190 | ||
95d6976d | 191 | /* check for D-Cache aliasing */ |
4102b533 VG |
192 | if (dcache_does_alias && !cache_is_vipt_aliasing()) |
193 | panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); | |
194 | else if (!dcache_does_alias && cache_is_vipt_aliasing()) | |
195 | panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); | |
95d6976d VG |
196 | #endif |
197 | ||
198 | /* Set the default Invalidate Mode to "simpy discard dirty lines" | |
199 | * as this is more frequent then flush before invalidate | |
200 | * Ofcourse we toggle this default behviour when desired | |
201 | */ | |
202 | temp = read_aux_reg(ARC_REG_DC_CTRL); | |
203 | temp &= ~DC_CTRL_INV_MODE_FLUSH; | |
204 | ||
205 | #ifdef CONFIG_ARC_HAS_DCACHE | |
206 | /* Enable D-Cache: Clear Bit 0 */ | |
207 | write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE); | |
208 | #else | |
209 | /* Flush D cache */ | |
210 | write_aux_reg(ARC_REG_DC_FLSH, 0x1); | |
211 | /* Disable D cache */ | |
212 | write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE); | |
213 | #endif | |
214 | ||
215 | return; | |
216 | } | |
217 | ||
218 | #define OP_INV 0x1 | |
219 | #define OP_FLUSH 0x2 | |
220 | #define OP_FLUSH_N_INV 0x3 | |
221 | ||
222 | #ifdef CONFIG_ARC_HAS_DCACHE | |
223 | ||
224 | /*************************************************************** | |
225 | * Machine specific helpers for Entire D-Cache or Per Line ops | |
226 | */ | |
227 | ||
228 | static inline void wait_for_flush(void) | |
229 | { | |
230 | while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS) | |
231 | ; | |
232 | } | |
233 | ||
234 | /* | |
235 | * Operation on Entire D-Cache | |
236 | * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} | |
237 | * Note that constant propagation ensures all the checks are gone | |
238 | * in generated code | |
239 | */ | |
240 | static inline void __dc_entire_op(const int cacheop) | |
241 | { | |
242 | unsigned long flags, tmp = tmp; | |
243 | int aux; | |
244 | ||
245 | local_irq_save(flags); | |
246 | ||
247 | if (cacheop == OP_FLUSH_N_INV) { | |
248 | /* Dcache provides 2 cmd: FLUSH or INV | |
249 | * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE | |
250 | * flush-n-inv is achieved by INV cmd but with IM=1 | |
251 | * Default INV sub-mode is DISCARD, which needs to be toggled | |
252 | */ | |
253 | tmp = read_aux_reg(ARC_REG_DC_CTRL); | |
254 | write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH); | |
255 | } | |
256 | ||
257 | if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */ | |
258 | aux = ARC_REG_DC_IVDC; | |
259 | else | |
260 | aux = ARC_REG_DC_FLSH; | |
261 | ||
262 | write_aux_reg(aux, 0x1); | |
263 | ||
264 | if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ | |
265 | wait_for_flush(); | |
266 | ||
267 | /* Switch back the DISCARD ONLY Invalidate mode */ | |
268 | if (cacheop == OP_FLUSH_N_INV) | |
269 | write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); | |
270 | ||
271 | local_irq_restore(flags); | |
272 | } | |
273 | ||
274 | /* | |
275 | * Per Line Operation on D-Cache | |
276 | * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete | |
277 | * It's sole purpose is to help gcc generate ZOL | |
6ec18a81 | 278 | * (aliasing VIPT dcache flushing needs both vaddr and paddr) |
95d6976d | 279 | */ |
6ec18a81 VG |
280 | static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr, |
281 | unsigned long sz, const int aux_reg) | |
95d6976d | 282 | { |
a690984d | 283 | int num_lines; |
95d6976d VG |
284 | |
285 | /* Ensure we properly floor/ceil the non-line aligned/sized requests | |
a690984d | 286 | * and have @paddr - aligned to cache line and integral @num_lines. |
95d6976d | 287 | * This however can be avoided for page sized since: |
a690984d | 288 | * -@paddr will be cache-line aligned already (being page aligned) |
95d6976d VG |
289 | * -@sz will be integral multiple of line size (being page sized). |
290 | */ | |
291 | if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { | |
a690984d VG |
292 | sz += paddr & ~DCACHE_LINE_MASK; |
293 | paddr &= DCACHE_LINE_MASK; | |
6ec18a81 | 294 | vaddr &= DCACHE_LINE_MASK; |
95d6976d VG |
295 | } |
296 | ||
297 | num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); | |
298 | ||
6ec18a81 VG |
299 | #if (CONFIG_ARC_MMU_VER <= 2) |
300 | paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; | |
301 | #endif | |
302 | ||
95d6976d VG |
303 | while (num_lines-- > 0) { |
304 | #if (CONFIG_ARC_MMU_VER > 2) | |
305 | /* | |
306 | * Just as for I$, in MMU v3, D$ ops also require | |
307 | * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops | |
95d6976d | 308 | */ |
a690984d | 309 | write_aux_reg(ARC_REG_DC_PTAG, paddr); |
6ec18a81 VG |
310 | |
311 | write_aux_reg(aux_reg, vaddr); | |
312 | vaddr += ARC_DCACHE_LINE_LEN; | |
313 | #else | |
314 | /* paddr contains stuffed vaddrs bits */ | |
a690984d | 315 | write_aux_reg(aux_reg, paddr); |
6ec18a81 | 316 | #endif |
a690984d | 317 | paddr += ARC_DCACHE_LINE_LEN; |
95d6976d VG |
318 | } |
319 | } | |
320 | ||
4102b533 | 321 | /* For kernel mappings cache operation: index is same as paddr */ |
6ec18a81 VG |
322 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) |
323 | ||
95d6976d VG |
324 | /* |
325 | * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback) | |
326 | */ | |
6ec18a81 VG |
327 | static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, |
328 | unsigned long sz, const int cacheop) | |
95d6976d VG |
329 | { |
330 | unsigned long flags, tmp = tmp; | |
331 | int aux; | |
332 | ||
333 | local_irq_save(flags); | |
334 | ||
335 | if (cacheop == OP_FLUSH_N_INV) { | |
336 | /* | |
337 | * Dcache provides 2 cmd: FLUSH or INV | |
338 | * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE | |
339 | * flush-n-inv is achieved by INV cmd but with IM=1 | |
340 | * Default INV sub-mode is DISCARD, which needs to be toggled | |
341 | */ | |
342 | tmp = read_aux_reg(ARC_REG_DC_CTRL); | |
343 | write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH); | |
344 | } | |
345 | ||
346 | if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */ | |
347 | aux = ARC_REG_DC_IVDL; | |
348 | else | |
349 | aux = ARC_REG_DC_FLDL; | |
350 | ||
6ec18a81 | 351 | __dc_line_loop(paddr, vaddr, sz, aux); |
95d6976d VG |
352 | |
353 | if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ | |
354 | wait_for_flush(); | |
355 | ||
356 | /* Switch back the DISCARD ONLY Invalidate mode */ | |
357 | if (cacheop == OP_FLUSH_N_INV) | |
358 | write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); | |
359 | ||
360 | local_irq_restore(flags); | |
361 | } | |
362 | ||
363 | #else | |
364 | ||
365 | #define __dc_entire_op(cacheop) | |
6ec18a81 VG |
366 | #define __dc_line_op(paddr, vaddr, sz, cacheop) |
367 | #define __dc_line_op_k(paddr, sz, cacheop) | |
95d6976d VG |
368 | |
369 | #endif /* CONFIG_ARC_HAS_DCACHE */ | |
370 | ||
371 | ||
372 | #ifdef CONFIG_ARC_HAS_ICACHE | |
373 | ||
374 | /* | |
375 | * I-Cache Aliasing in ARC700 VIPT caches | |
376 | * | |
7f250a0f VG |
377 | * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. |
378 | * The orig Cache Management Module "CDU" only required paddr to invalidate a | |
379 | * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. | |
380 | * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching | |
381 | * the exact same line. | |
95d6976d | 382 | * |
7f250a0f VG |
383 | * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, |
384 | * paddr alone could not be used to correctly index the cache. | |
95d6976d VG |
385 | * |
386 | * ------------------ | |
387 | * MMU v1/v2 (Fixed Page Size 8k) | |
388 | * ------------------ | |
389 | * The solution was to provide CDU with these additonal vaddr bits. These | |
7f250a0f VG |
390 | * would be bits [x:13], x would depend on cache-geometry, 13 comes from |
391 | * standard page size of 8k. | |
95d6976d VG |
392 | * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits |
393 | * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the | |
394 | * orig 5 bits of paddr were anyways ignored by CDU line ops, as they | |
395 | * represent the offset within cache-line. The adv of using this "clumsy" | |
7f250a0f VG |
396 | * interface for additional info was no new reg was needed in CDU programming |
397 | * model. | |
95d6976d VG |
398 | * |
399 | * 17:13 represented the max num of bits passable, actual bits needed were | |
400 | * fewer, based on the num-of-aliases possible. | |
401 | * -for 2 alias possibility, only bit 13 needed (32K cache) | |
402 | * -for 4 alias possibility, bits 14:13 needed (64K cache) | |
403 | * | |
95d6976d VG |
404 | * ------------------ |
405 | * MMU v3 | |
406 | * ------------------ | |
7f250a0f VG |
407 | * This ver of MMU supports variable page sizes (1k-16k): although Linux will |
408 | * only support 8k (default), 16k and 4k. | |
95d6976d VG |
409 | * However from hardware perspective, smaller page sizes aggrevate aliasing |
410 | * meaning more vaddr bits needed to disambiguate the cache-line-op ; | |
411 | * the existing scheme of piggybacking won't work for certain configurations. | |
412 | * Two new registers IC_PTAG and DC_PTAG inttoduced. | |
413 | * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs | |
414 | */ | |
415 | ||
416 | /*********************************************************** | |
7f250a0f | 417 | * Machine specific helper for per line I-Cache invalidate. |
95d6976d | 418 | */ |
a690984d | 419 | static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, |
7f250a0f | 420 | unsigned long sz) |
95d6976d VG |
421 | { |
422 | unsigned long flags; | |
a690984d | 423 | int num_lines; |
95d6976d | 424 | |
764531cc VG |
425 | /* |
426 | * Ensure we properly floor/ceil the non-line aligned/sized requests: | |
427 | * However page sized flushes can be compile time optimised. | |
a690984d | 428 | * -@paddr will be cache-line aligned already (being page aligned) |
764531cc VG |
429 | * -@sz will be integral multiple of line size (being page sized). |
430 | */ | |
431 | if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { | |
a690984d VG |
432 | sz += paddr & ~ICACHE_LINE_MASK; |
433 | paddr &= ICACHE_LINE_MASK; | |
434 | vaddr &= ICACHE_LINE_MASK; | |
764531cc VG |
435 | } |
436 | ||
95d6976d VG |
437 | num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); |
438 | ||
a690984d | 439 | #if (CONFIG_ARC_MMU_VER <= 2) |
95d6976d | 440 | /* bits 17:13 of vaddr go as bits 4:0 of paddr */ |
a690984d | 441 | paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; |
95d6976d VG |
442 | #endif |
443 | ||
444 | local_irq_save(flags); | |
445 | while (num_lines-- > 0) { | |
446 | #if (CONFIG_ARC_MMU_VER > 2) | |
447 | /* tag comes from phy addr */ | |
a690984d | 448 | write_aux_reg(ARC_REG_IC_PTAG, paddr); |
95d6976d VG |
449 | |
450 | /* index bits come from vaddr */ | |
451 | write_aux_reg(ARC_REG_IC_IVIL, vaddr); | |
452 | vaddr += ARC_ICACHE_LINE_LEN; | |
453 | #else | |
7f250a0f | 454 | /* paddr contains stuffed vaddrs bits */ |
a690984d | 455 | write_aux_reg(ARC_REG_IC_IVIL, paddr); |
95d6976d | 456 | #endif |
a690984d | 457 | paddr += ARC_ICACHE_LINE_LEN; |
95d6976d VG |
458 | } |
459 | local_irq_restore(flags); | |
460 | } | |
461 | ||
462 | #else | |
463 | ||
95d6976d VG |
464 | #define __ic_line_inv_vaddr(pstart, vstart, sz) |
465 | ||
466 | #endif /* CONFIG_ARC_HAS_ICACHE */ | |
467 | ||
468 | ||
469 | /*********************************************************** | |
470 | * Exported APIs | |
471 | */ | |
472 | ||
4102b533 VG |
473 | /* |
474 | * Handle cache congruency of kernel and userspace mappings of page when kernel | |
475 | * writes-to/reads-from | |
476 | * | |
477 | * The idea is to defer flushing of kernel mapping after a WRITE, possible if: | |
478 | * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent | |
479 | * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) | |
480 | * -In SMP, if hardware caches are coherent | |
481 | * | |
482 | * There's a corollary case, where kernel READs from a userspace mapped page. | |
483 | * If the U-mapping is not congruent to to K-mapping, former needs flushing. | |
484 | */ | |
95d6976d VG |
485 | void flush_dcache_page(struct page *page) |
486 | { | |
4102b533 VG |
487 | struct address_space *mapping; |
488 | ||
489 | if (!cache_is_vipt_aliasing()) { | |
490 | set_bit(PG_arch_1, &page->flags); | |
491 | return; | |
492 | } | |
493 | ||
494 | /* don't handle anon pages here */ | |
495 | mapping = page_mapping(page); | |
496 | if (!mapping) | |
497 | return; | |
498 | ||
499 | /* | |
500 | * pagecache page, file not yet mapped to userspace | |
501 | * Make a note that K-mapping is dirty | |
502 | */ | |
503 | if (!mapping_mapped(mapping)) { | |
504 | set_bit(PG_arch_1, &page->flags); | |
505 | } else if (page_mapped(page)) { | |
506 | ||
507 | /* kernel reading from page with U-mapping */ | |
508 | void *paddr = page_address(page); | |
509 | unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; | |
510 | ||
511 | if (addr_not_cache_congruent(paddr, vaddr)) | |
512 | __flush_dcache_page(paddr, vaddr); | |
513 | } | |
95d6976d VG |
514 | } |
515 | EXPORT_SYMBOL(flush_dcache_page); | |
516 | ||
517 | ||
518 | void dma_cache_wback_inv(unsigned long start, unsigned long sz) | |
519 | { | |
6ec18a81 | 520 | __dc_line_op_k(start, sz, OP_FLUSH_N_INV); |
95d6976d VG |
521 | } |
522 | EXPORT_SYMBOL(dma_cache_wback_inv); | |
523 | ||
524 | void dma_cache_inv(unsigned long start, unsigned long sz) | |
525 | { | |
6ec18a81 | 526 | __dc_line_op_k(start, sz, OP_INV); |
95d6976d VG |
527 | } |
528 | EXPORT_SYMBOL(dma_cache_inv); | |
529 | ||
530 | void dma_cache_wback(unsigned long start, unsigned long sz) | |
531 | { | |
6ec18a81 | 532 | __dc_line_op_k(start, sz, OP_FLUSH); |
95d6976d VG |
533 | } |
534 | EXPORT_SYMBOL(dma_cache_wback); | |
535 | ||
536 | /* | |
7586bf72 VG |
537 | * This is API for making I/D Caches consistent when modifying |
538 | * kernel code (loadable modules, kprobes, kgdb...) | |
95d6976d VG |
539 | * This is called on insmod, with kernel virtual address for CODE of |
540 | * the module. ARC cache maintenance ops require PHY address thus we | |
541 | * need to convert vmalloc addr to PHY addr | |
542 | */ | |
543 | void flush_icache_range(unsigned long kstart, unsigned long kend) | |
544 | { | |
545 | unsigned int tot_sz, off, sz; | |
546 | unsigned long phy, pfn; | |
95d6976d VG |
547 | |
548 | /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */ | |
549 | ||
550 | /* This is not the right API for user virtual address */ | |
551 | if (kstart < TASK_SIZE) { | |
552 | BUG_ON("Flush icache range for user virtual addr space"); | |
553 | return; | |
554 | } | |
555 | ||
556 | /* Shortcut for bigger flush ranges. | |
557 | * Here we don't care if this was kernel virtual or phy addr | |
558 | */ | |
559 | tot_sz = kend - kstart; | |
560 | if (tot_sz > PAGE_SIZE) { | |
561 | flush_cache_all(); | |
562 | return; | |
563 | } | |
564 | ||
565 | /* Case: Kernel Phy addr (0x8000_0000 onwards) */ | |
566 | if (likely(kstart > PAGE_OFFSET)) { | |
7586bf72 VG |
567 | /* |
568 | * The 2nd arg despite being paddr will be used to index icache | |
569 | * This is OK since no alternate virtual mappings will exist | |
570 | * given the callers for this case: kprobe/kgdb in built-in | |
571 | * kernel code only. | |
572 | */ | |
94bad1af | 573 | __sync_icache_dcache(kstart, kstart, kend - kstart); |
95d6976d VG |
574 | return; |
575 | } | |
576 | ||
577 | /* | |
578 | * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) | |
579 | * (1) ARC Cache Maintenance ops only take Phy addr, hence special | |
580 | * handling of kernel vaddr. | |
581 | * | |
582 | * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), | |
583 | * it still needs to handle a 2 page scenario, where the range | |
584 | * straddles across 2 virtual pages and hence need for loop | |
585 | */ | |
586 | while (tot_sz > 0) { | |
587 | off = kstart % PAGE_SIZE; | |
588 | pfn = vmalloc_to_pfn((void *)kstart); | |
589 | phy = (pfn << PAGE_SHIFT) + off; | |
590 | sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); | |
94bad1af | 591 | __sync_icache_dcache(phy, kstart, sz); |
95d6976d VG |
592 | kstart += sz; |
593 | tot_sz -= sz; | |
594 | } | |
595 | } | |
596 | ||
597 | /* | |
94bad1af VG |
598 | * General purpose helper to make I and D cache lines consistent. |
599 | * @paddr is phy addr of region | |
600 | * @vaddr is typically user or kernel vaddr (vmalloc) | |
601 | * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in | |
602 | * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will | |
603 | * use a paddr to index the cache (despite VIPT). This is fine since since a | |
604 | * built-in kernel page will not have any virtual mappings (not even kernel) | |
605 | * kprobe on loadable module is different as it will have kvaddr. | |
95d6976d | 606 | */ |
94bad1af | 607 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) |
95d6976d | 608 | { |
94bad1af VG |
609 | unsigned long flags; |
610 | ||
611 | local_irq_save(flags); | |
612 | __ic_line_inv_vaddr(paddr, vaddr, len); | |
6ec18a81 | 613 | __dc_line_op(paddr, vaddr, len, OP_FLUSH); |
94bad1af | 614 | local_irq_restore(flags); |
95d6976d VG |
615 | } |
616 | ||
24603fdd VG |
617 | /* wrapper to compile time eliminate alignment checks in flush loop */ |
618 | void __inv_icache_page(unsigned long paddr, unsigned long vaddr) | |
95d6976d | 619 | { |
24603fdd | 620 | __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); |
95d6976d VG |
621 | } |
622 | ||
6ec18a81 VG |
623 | /* |
624 | * wrapper to clearout kernel or userspace mappings of a page | |
625 | * For kernel mappings @vaddr == @paddr | |
626 | */ | |
627 | void __flush_dcache_page(unsigned long paddr, unsigned long vaddr) | |
eacd0e95 | 628 | { |
6ec18a81 | 629 | __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); |
eacd0e95 VG |
630 | } |
631 | ||
95d6976d VG |
632 | void flush_icache_all(void) |
633 | { | |
634 | unsigned long flags; | |
635 | ||
636 | local_irq_save(flags); | |
637 | ||
638 | write_aux_reg(ARC_REG_IC_IVIC, 1); | |
639 | ||
640 | /* lr will not complete till the icache inv operation is not over */ | |
641 | read_aux_reg(ARC_REG_IC_CTRL); | |
642 | local_irq_restore(flags); | |
643 | } | |
644 | ||
645 | noinline void flush_cache_all(void) | |
646 | { | |
647 | unsigned long flags; | |
648 | ||
649 | local_irq_save(flags); | |
650 | ||
651 | flush_icache_all(); | |
652 | __dc_entire_op(OP_FLUSH_N_INV); | |
653 | ||
654 | local_irq_restore(flags); | |
655 | ||
656 | } | |
657 | ||
4102b533 VG |
658 | #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING |
659 | ||
660 | void flush_cache_mm(struct mm_struct *mm) | |
661 | { | |
662 | flush_cache_all(); | |
663 | } | |
664 | ||
665 | void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, | |
666 | unsigned long pfn) | |
667 | { | |
668 | unsigned int paddr = pfn << PAGE_SHIFT; | |
669 | ||
670 | __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); | |
671 | } | |
672 | ||
673 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |
674 | unsigned long end) | |
675 | { | |
676 | flush_cache_all(); | |
677 | } | |
678 | ||
679 | void copy_user_highpage(struct page *to, struct page *from, | |
680 | unsigned long u_vaddr, struct vm_area_struct *vma) | |
681 | { | |
682 | void *kfrom = page_address(from); | |
683 | void *kto = page_address(to); | |
684 | int clean_src_k_mappings = 0; | |
685 | ||
686 | /* | |
687 | * If SRC page was already mapped in userspace AND it's U-mapping is | |
688 | * not congruent with K-mapping, sync former to physical page so that | |
689 | * K-mapping in memcpy below, sees the right data | |
690 | * | |
691 | * Note that while @u_vaddr refers to DST page's userspace vaddr, it is | |
692 | * equally valid for SRC page as well | |
693 | */ | |
694 | if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { | |
695 | __flush_dcache_page(kfrom, u_vaddr); | |
696 | clean_src_k_mappings = 1; | |
697 | } | |
698 | ||
699 | copy_page(kto, kfrom); | |
700 | ||
701 | /* | |
702 | * Mark DST page K-mapping as dirty for a later finalization by | |
703 | * update_mmu_cache(). Although the finalization could have been done | |
704 | * here as well (given that both vaddr/paddr are available). | |
705 | * But update_mmu_cache() already has code to do that for other | |
706 | * non copied user pages (e.g. read faults which wire in pagecache page | |
707 | * directly). | |
708 | */ | |
709 | set_bit(PG_arch_1, &to->flags); | |
710 | ||
711 | /* | |
712 | * if SRC was already usermapped and non-congruent to kernel mapping | |
713 | * sync the kernel mapping back to physical page | |
714 | */ | |
715 | if (clean_src_k_mappings) { | |
716 | __flush_dcache_page(kfrom, kfrom); | |
717 | } else { | |
718 | set_bit(PG_arch_1, &from->flags); | |
719 | } | |
720 | } | |
721 | ||
722 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) | |
723 | { | |
724 | clear_page(to); | |
725 | set_bit(PG_arch_1, &page->flags); | |
726 | } | |
727 | ||
728 | void flush_anon_page(struct vm_area_struct *vma, struct page *page, | |
729 | unsigned long u_vaddr) | |
730 | { | |
731 | /* TBD: do we really need to clear the kernel mapping */ | |
732 | __flush_dcache_page(page_address(page), u_vaddr); | |
733 | __flush_dcache_page(page_address(page), page_address(page)); | |
734 | ||
735 | } | |
736 | ||
737 | #endif | |
738 | ||
95d6976d VG |
739 | /********************************************************************** |
740 | * Explicit Cache flush request from user space via syscall | |
741 | * Needed for JITs which generate code on the fly | |
742 | */ | |
743 | SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) | |
744 | { | |
745 | /* TBD: optimize this */ | |
746 | flush_cache_all(); | |
747 | return 0; | |
748 | } |