Commit | Line | Data |
---|---|---|
5b3b1688 DD |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2005-2007 Cavium Networks | |
7 | */ | |
8 | #include <linux/init.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/bitops.h> | |
13 | #include <linux/cpu.h> | |
14 | #include <linux/io.h> | |
15 | ||
16 | #include <asm/bcache.h> | |
17 | #include <asm/bootinfo.h> | |
18 | #include <asm/cacheops.h> | |
19 | #include <asm/cpu-features.h> | |
20 | #include <asm/page.h> | |
21 | #include <asm/pgtable.h> | |
22 | #include <asm/r4kcache.h> | |
23 | #include <asm/system.h> | |
24 | #include <asm/mmu_context.h> | |
25 | #include <asm/war.h> | |
26 | ||
27 | #include <asm/octeon/octeon.h> | |
28 | ||
29 | unsigned long long cache_err_dcache[NR_CPUS]; | |
30 | ||
31 | /** | |
32 | * Octeon automatically flushes the dcache on tlb changes, so | |
33 | * from Linux's viewpoint it acts much like a physically | |
34 | * tagged cache. No flushing is needed | |
35 | * | |
36 | */ | |
37 | static void octeon_flush_data_cache_page(unsigned long addr) | |
38 | { | |
39 | /* Nothing to do */ | |
40 | } | |
41 | ||
42 | static inline void octeon_local_flush_icache(void) | |
43 | { | |
44 | asm volatile ("synci 0($0)"); | |
45 | } | |
46 | ||
47 | /* | |
48 | * Flush local I-cache for the specified range. | |
49 | */ | |
50 | static void local_octeon_flush_icache_range(unsigned long start, | |
51 | unsigned long end) | |
52 | { | |
53 | octeon_local_flush_icache(); | |
54 | } | |
55 | ||
56 | /** | |
57 | * Flush caches as necessary for all cores affected by a | |
58 | * vma. If no vma is supplied, all cores are flushed. | |
59 | * | |
60 | * @vma: VMA to flush or NULL to flush all icaches. | |
61 | */ | |
62 | static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) | |
63 | { | |
64 | extern void octeon_send_ipi_single(int cpu, unsigned int action); | |
65 | #ifdef CONFIG_SMP | |
66 | int cpu; | |
67 | cpumask_t mask; | |
68 | #endif | |
69 | ||
70 | mb(); | |
71 | octeon_local_flush_icache(); | |
72 | #ifdef CONFIG_SMP | |
73 | preempt_disable(); | |
74 | cpu = smp_processor_id(); | |
75 | ||
76 | /* | |
77 | * If we have a vma structure, we only need to worry about | |
78 | * cores it has been used on | |
79 | */ | |
80 | if (vma) | |
81 | mask = vma->vm_mm->cpu_vm_mask; | |
82 | else | |
83 | mask = cpu_online_map; | |
84 | cpu_clear(cpu, mask); | |
85 | for_each_cpu_mask(cpu, mask) | |
86 | octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); | |
87 | ||
88 | preempt_enable(); | |
89 | #endif | |
90 | } | |
91 | ||
92 | ||
93 | /** | |
94 | * Called to flush the icache on all cores | |
95 | */ | |
96 | static void octeon_flush_icache_all(void) | |
97 | { | |
98 | octeon_flush_icache_all_cores(NULL); | |
99 | } | |
100 | ||
101 | ||
102 | /** | |
103 | * Called to flush all memory associated with a memory | |
104 | * context. | |
105 | * | |
106 | * @mm: Memory context to flush | |
107 | */ | |
108 | static void octeon_flush_cache_mm(struct mm_struct *mm) | |
109 | { | |
110 | /* | |
111 | * According to the R4K version of this file, CPUs without | |
112 | * dcache aliases don't need to do anything here | |
113 | */ | |
114 | } | |
115 | ||
116 | ||
117 | /** | |
118 | * Flush a range of kernel addresses out of the icache | |
119 | * | |
120 | */ | |
121 | static void octeon_flush_icache_range(unsigned long start, unsigned long end) | |
122 | { | |
123 | octeon_flush_icache_all_cores(NULL); | |
124 | } | |
125 | ||
126 | ||
127 | /** | |
128 | * Flush the icache for a trampoline. These are used for interrupt | |
129 | * and exception hooking. | |
130 | * | |
131 | * @addr: Address to flush | |
132 | */ | |
133 | static void octeon_flush_cache_sigtramp(unsigned long addr) | |
134 | { | |
135 | struct vm_area_struct *vma; | |
136 | ||
137 | vma = find_vma(current->mm, addr); | |
138 | octeon_flush_icache_all_cores(vma); | |
139 | } | |
140 | ||
141 | ||
142 | /** | |
143 | * Flush a range out of a vma | |
144 | * | |
145 | * @vma: VMA to flush | |
146 | * @start: | |
147 | * @end: | |
148 | */ | |
149 | static void octeon_flush_cache_range(struct vm_area_struct *vma, | |
150 | unsigned long start, unsigned long end) | |
151 | { | |
152 | if (vma->vm_flags & VM_EXEC) | |
153 | octeon_flush_icache_all_cores(vma); | |
154 | } | |
155 | ||
156 | ||
157 | /** | |
158 | * Flush a specific page of a vma | |
159 | * | |
160 | * @vma: VMA to flush page for | |
161 | * @page: Page to flush | |
162 | * @pfn: | |
163 | */ | |
164 | static void octeon_flush_cache_page(struct vm_area_struct *vma, | |
165 | unsigned long page, unsigned long pfn) | |
166 | { | |
167 | if (vma->vm_flags & VM_EXEC) | |
168 | octeon_flush_icache_all_cores(vma); | |
169 | } | |
170 | ||
171 | ||
172 | /** | |
173 | * Probe Octeon's caches | |
174 | * | |
175 | */ | |
176 | static void __devinit probe_octeon(void) | |
177 | { | |
178 | unsigned long icache_size; | |
179 | unsigned long dcache_size; | |
180 | unsigned int config1; | |
181 | struct cpuinfo_mips *c = ¤t_cpu_data; | |
182 | ||
183 | switch (c->cputype) { | |
184 | case CPU_CAVIUM_OCTEON: | |
185 | config1 = read_c0_config1(); | |
186 | c->icache.linesz = 2 << ((config1 >> 19) & 7); | |
187 | c->icache.sets = 64 << ((config1 >> 22) & 7); | |
188 | c->icache.ways = 1 + ((config1 >> 16) & 7); | |
189 | c->icache.flags |= MIPS_CACHE_VTAG; | |
190 | icache_size = | |
191 | c->icache.sets * c->icache.ways * c->icache.linesz; | |
192 | c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; | |
193 | c->dcache.linesz = 128; | |
194 | if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) | |
195 | c->dcache.sets = 1; /* CN3XXX has one Dcache set */ | |
196 | else | |
197 | c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ | |
198 | c->dcache.ways = 64; | |
199 | dcache_size = | |
200 | c->dcache.sets * c->dcache.ways * c->dcache.linesz; | |
201 | c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; | |
202 | c->options |= MIPS_CPU_PREFETCH; | |
203 | break; | |
204 | ||
205 | default: | |
206 | panic("Unsupported Cavium Networks CPU type\n"); | |
207 | break; | |
208 | } | |
209 | ||
210 | /* compute a couple of other cache variables */ | |
211 | c->icache.waysize = icache_size / c->icache.ways; | |
212 | c->dcache.waysize = dcache_size / c->dcache.ways; | |
213 | ||
214 | c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); | |
215 | c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); | |
216 | ||
217 | if (smp_processor_id() == 0) { | |
218 | pr_notice("Primary instruction cache %ldkB, %s, %d way, " | |
219 | "%d sets, linesize %d bytes.\n", | |
220 | icache_size >> 10, | |
221 | cpu_has_vtag_icache ? | |
222 | "virtually tagged" : "physically tagged", | |
223 | c->icache.ways, c->icache.sets, c->icache.linesz); | |
224 | ||
225 | pr_notice("Primary data cache %ldkB, %d-way, %d sets, " | |
226 | "linesize %d bytes.\n", | |
227 | dcache_size >> 10, c->dcache.ways, | |
228 | c->dcache.sets, c->dcache.linesz); | |
229 | } | |
230 | } | |
231 | ||
232 | ||
233 | /** | |
234 | * Setup the Octeon cache flush routines | |
235 | * | |
236 | */ | |
237 | void __devinit octeon_cache_init(void) | |
238 | { | |
239 | extern unsigned long ebase; | |
240 | extern char except_vec2_octeon; | |
241 | ||
242 | memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80); | |
243 | octeon_flush_cache_sigtramp(ebase + 0x100); | |
244 | ||
245 | probe_octeon(); | |
246 | ||
247 | shm_align_mask = PAGE_SIZE - 1; | |
248 | ||
249 | flush_cache_all = octeon_flush_icache_all; | |
250 | __flush_cache_all = octeon_flush_icache_all; | |
251 | flush_cache_mm = octeon_flush_cache_mm; | |
252 | flush_cache_page = octeon_flush_cache_page; | |
253 | flush_cache_range = octeon_flush_cache_range; | |
254 | flush_cache_sigtramp = octeon_flush_cache_sigtramp; | |
255 | flush_icache_all = octeon_flush_icache_all; | |
256 | flush_data_cache_page = octeon_flush_data_cache_page; | |
257 | flush_icache_range = octeon_flush_icache_range; | |
258 | local_flush_icache_range = local_octeon_flush_icache_range; | |
259 | ||
260 | build_clear_page(); | |
261 | build_copy_page(); | |
262 | } | |
263 | ||
264 | /** | |
265 | * Handle a cache error exception | |
266 | */ | |
267 | ||
268 | static void cache_parity_error_octeon(int non_recoverable) | |
269 | { | |
270 | unsigned long coreid = cvmx_get_core_num(); | |
271 | uint64_t icache_err = read_octeon_c0_icacheerr(); | |
272 | ||
273 | pr_err("Cache error exception:\n"); | |
274 | pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); | |
275 | if (icache_err & 1) { | |
276 | pr_err("CacheErr (Icache) == %llx\n", | |
277 | (unsigned long long)icache_err); | |
278 | write_octeon_c0_icacheerr(0); | |
279 | } | |
280 | if (cache_err_dcache[coreid] & 1) { | |
281 | pr_err("CacheErr (Dcache) == %llx\n", | |
282 | (unsigned long long)cache_err_dcache[coreid]); | |
283 | cache_err_dcache[coreid] = 0; | |
284 | } | |
285 | ||
286 | if (non_recoverable) | |
287 | panic("Can't handle cache error: nested exception"); | |
288 | } | |
289 | ||
290 | /** | |
291 | * Called when the the exception is not recoverable | |
292 | */ | |
293 | ||
294 | asmlinkage void cache_parity_error_octeon_recoverable(void) | |
295 | { | |
296 | cache_parity_error_octeon(0); | |
297 | } | |
298 | ||
299 | /** | |
300 | * Called when the the exception is recoverable | |
301 | */ | |
302 | ||
303 | asmlinkage void cache_parity_error_octeon_non_recoverable(void) | |
304 | { | |
305 | cache_parity_error_octeon(1); | |
306 | } | |
307 |