Merge branch 'omap-timer-for-v3.10' of git://github.com/jonhunter/linux into omap...
[deliverable/linux.git] / arch / mips / mm / c-octeon.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2007 Cavium Networks
7 */
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <linux/mm.h>
14 #include <linux/bitops.h>
15 #include <linux/cpu.h>
16 #include <linux/io.h>
17
18 #include <asm/bcache.h>
19 #include <asm/bootinfo.h>
20 #include <asm/cacheops.h>
21 #include <asm/cpu-features.h>
22 #include <asm/page.h>
23 #include <asm/pgtable.h>
24 #include <asm/r4kcache.h>
25 #include <asm/traps.h>
26 #include <asm/mmu_context.h>
27 #include <asm/war.h>
28
29 #include <asm/octeon/octeon.h>
30
31 unsigned long long cache_err_dcache[NR_CPUS];
32 EXPORT_SYMBOL_GPL(cache_err_dcache);
33
34 /**
35 * Octeon automatically flushes the dcache on tlb changes, so
36 * from Linux's viewpoint it acts much like a physically
37 * tagged cache. No flushing is needed
38 *
39 */
40 static void octeon_flush_data_cache_page(unsigned long addr)
41 {
42 /* Nothing to do */
43 }
44
45 static inline void octeon_local_flush_icache(void)
46 {
47 asm volatile ("synci 0($0)");
48 }
49
50 /*
51 * Flush local I-cache for the specified range.
52 */
53 static void local_octeon_flush_icache_range(unsigned long start,
54 unsigned long end)
55 {
56 octeon_local_flush_icache();
57 }
58
59 /**
60 * Flush caches as necessary for all cores affected by a
61 * vma. If no vma is supplied, all cores are flushed.
62 *
63 * @vma: VMA to flush or NULL to flush all icaches.
64 */
65 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
66 {
67 extern void octeon_send_ipi_single(int cpu, unsigned int action);
68 #ifdef CONFIG_SMP
69 int cpu;
70 cpumask_t mask;
71 #endif
72
73 mb();
74 octeon_local_flush_icache();
75 #ifdef CONFIG_SMP
76 preempt_disable();
77 cpu = smp_processor_id();
78
79 /*
80 * If we have a vma structure, we only need to worry about
81 * cores it has been used on
82 */
83 if (vma)
84 mask = *mm_cpumask(vma->vm_mm);
85 else
86 mask = *cpu_online_mask;
87 cpumask_clear_cpu(cpu, &mask);
88 for_each_cpu(cpu, &mask)
89 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
90
91 preempt_enable();
92 #endif
93 }
94
95
96 /**
97 * Called to flush the icache on all cores
98 */
99 static void octeon_flush_icache_all(void)
100 {
101 octeon_flush_icache_all_cores(NULL);
102 }
103
104
105 /**
106 * Called to flush all memory associated with a memory
107 * context.
108 *
109 * @mm: Memory context to flush
110 */
111 static void octeon_flush_cache_mm(struct mm_struct *mm)
112 {
113 /*
114 * According to the R4K version of this file, CPUs without
115 * dcache aliases don't need to do anything here
116 */
117 }
118
119
120 /**
121 * Flush a range of kernel addresses out of the icache
122 *
123 */
124 static void octeon_flush_icache_range(unsigned long start, unsigned long end)
125 {
126 octeon_flush_icache_all_cores(NULL);
127 }
128
129
130 /**
131 * Flush the icache for a trampoline. These are used for interrupt
132 * and exception hooking.
133 *
134 * @addr: Address to flush
135 */
136 static void octeon_flush_cache_sigtramp(unsigned long addr)
137 {
138 struct vm_area_struct *vma;
139
140 vma = find_vma(current->mm, addr);
141 octeon_flush_icache_all_cores(vma);
142 }
143
144
145 /**
146 * Flush a range out of a vma
147 *
148 * @vma: VMA to flush
149 * @start:
150 * @end:
151 */
152 static void octeon_flush_cache_range(struct vm_area_struct *vma,
153 unsigned long start, unsigned long end)
154 {
155 if (vma->vm_flags & VM_EXEC)
156 octeon_flush_icache_all_cores(vma);
157 }
158
159
160 /**
161 * Flush a specific page of a vma
162 *
163 * @vma: VMA to flush page for
164 * @page: Page to flush
165 * @pfn:
166 */
167 static void octeon_flush_cache_page(struct vm_area_struct *vma,
168 unsigned long page, unsigned long pfn)
169 {
170 if (vma->vm_flags & VM_EXEC)
171 octeon_flush_icache_all_cores(vma);
172 }
173
174 static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
175 {
176 BUG();
177 }
178
179 /**
180 * Probe Octeon's caches
181 *
182 */
183 static void __cpuinit probe_octeon(void)
184 {
185 unsigned long icache_size;
186 unsigned long dcache_size;
187 unsigned int config1;
188 struct cpuinfo_mips *c = &current_cpu_data;
189
190 config1 = read_c0_config1();
191 switch (c->cputype) {
192 case CPU_CAVIUM_OCTEON:
193 case CPU_CAVIUM_OCTEON_PLUS:
194 c->icache.linesz = 2 << ((config1 >> 19) & 7);
195 c->icache.sets = 64 << ((config1 >> 22) & 7);
196 c->icache.ways = 1 + ((config1 >> 16) & 7);
197 c->icache.flags |= MIPS_CACHE_VTAG;
198 icache_size =
199 c->icache.sets * c->icache.ways * c->icache.linesz;
200 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
201 c->dcache.linesz = 128;
202 if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
203 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
204 else
205 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
206 c->dcache.ways = 64;
207 dcache_size =
208 c->dcache.sets * c->dcache.ways * c->dcache.linesz;
209 c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
210 c->options |= MIPS_CPU_PREFETCH;
211 break;
212
213 case CPU_CAVIUM_OCTEON2:
214 c->icache.linesz = 2 << ((config1 >> 19) & 7);
215 c->icache.sets = 8;
216 c->icache.ways = 37;
217 c->icache.flags |= MIPS_CACHE_VTAG;
218 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
219
220 c->dcache.linesz = 128;
221 c->dcache.ways = 32;
222 c->dcache.sets = 8;
223 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
224 c->options |= MIPS_CPU_PREFETCH;
225 break;
226
227 default:
228 panic("Unsupported Cavium Networks CPU type");
229 break;
230 }
231
232 /* compute a couple of other cache variables */
233 c->icache.waysize = icache_size / c->icache.ways;
234 c->dcache.waysize = dcache_size / c->dcache.ways;
235
236 c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
237 c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
238
239 if (smp_processor_id() == 0) {
240 pr_notice("Primary instruction cache %ldkB, %s, %d way, "
241 "%d sets, linesize %d bytes.\n",
242 icache_size >> 10,
243 cpu_has_vtag_icache ?
244 "virtually tagged" : "physically tagged",
245 c->icache.ways, c->icache.sets, c->icache.linesz);
246
247 pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
248 "linesize %d bytes.\n",
249 dcache_size >> 10, c->dcache.ways,
250 c->dcache.sets, c->dcache.linesz);
251 }
252 }
253
254 static void __cpuinit octeon_cache_error_setup(void)
255 {
256 extern char except_vec2_octeon;
257 set_handler(0x100, &except_vec2_octeon, 0x80);
258 }
259
260 /**
261 * Setup the Octeon cache flush routines
262 *
263 */
264 void __cpuinit octeon_cache_init(void)
265 {
266 probe_octeon();
267
268 shm_align_mask = PAGE_SIZE - 1;
269
270 flush_cache_all = octeon_flush_icache_all;
271 __flush_cache_all = octeon_flush_icache_all;
272 flush_cache_mm = octeon_flush_cache_mm;
273 flush_cache_page = octeon_flush_cache_page;
274 flush_cache_range = octeon_flush_cache_range;
275 flush_cache_sigtramp = octeon_flush_cache_sigtramp;
276 flush_icache_all = octeon_flush_icache_all;
277 flush_data_cache_page = octeon_flush_data_cache_page;
278 flush_icache_range = octeon_flush_icache_range;
279 local_flush_icache_range = local_octeon_flush_icache_range;
280
281 __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
282
283 build_clear_page();
284 build_copy_page();
285
286 board_cache_error_setup = octeon_cache_error_setup;
287 }
288
289 /*
290 * Handle a cache error exception
291 */
292 static RAW_NOTIFIER_HEAD(co_cache_error_chain);
293
294 int register_co_cache_error_notifier(struct notifier_block *nb)
295 {
296 return raw_notifier_chain_register(&co_cache_error_chain, nb);
297 }
298 EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
299
300 int unregister_co_cache_error_notifier(struct notifier_block *nb)
301 {
302 return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
303 }
304 EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
305
306 static void co_cache_error_call_notifiers(unsigned long val)
307 {
308 int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
309 if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
310 u64 dcache_err;
311 unsigned long coreid = cvmx_get_core_num();
312 u64 icache_err = read_octeon_c0_icacheerr();
313
314 if (val) {
315 dcache_err = cache_err_dcache[coreid];
316 cache_err_dcache[coreid] = 0;
317 } else {
318 dcache_err = read_octeon_c0_dcacheerr();
319 }
320
321 pr_err("Core%lu: Cache error exception:\n", coreid);
322 pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
323 if (icache_err & 1) {
324 pr_err("CacheErr (Icache) == %llx\n",
325 (unsigned long long)icache_err);
326 write_octeon_c0_icacheerr(0);
327 }
328 if (dcache_err & 1) {
329 pr_err("CacheErr (Dcache) == %llx\n",
330 (unsigned long long)dcache_err);
331 }
332 }
333 }
334
335 /*
336 * Called when the the exception is recoverable
337 */
338
339 asmlinkage void cache_parity_error_octeon_recoverable(void)
340 {
341 co_cache_error_call_notifiers(0);
342 }
343
344 /**
345 * Called when the the exception is not recoverable
346 */
347
348 asmlinkage void cache_parity_error_octeon_non_recoverable(void)
349 {
350 co_cache_error_call_notifiers(1);
351 panic("Can't handle cache error: nested exception");
352 }
This page took 0.038006 seconds and 6 git commands to generate.