Commit | Line | Data |
---|---|---|
cc532915 ME |
1 | /* |
2 | * Architecture specific (PPC64) functions for kexec based crash dumps. | |
3 | * | |
4 | * Copyright (C) 2005, IBM Corp. | |
5 | * | |
6 | * Created by: Haren Myneni | |
7 | * | |
8 | * This source code is licensed under the GNU General Public License, | |
9 | * Version 2. See the file COPYING for more details. | |
10 | * | |
11 | */ | |
12 | ||
13 | #undef DEBUG | |
14 | ||
15 | #include <linux/kernel.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/reboot.h> | |
18 | #include <linux/kexec.h> | |
19 | #include <linux/bootmem.h> | |
20 | #include <linux/crash_dump.h> | |
cc532915 ME |
21 | #include <linux/delay.h> |
22 | #include <linux/elf.h> | |
23 | #include <linux/elfcore.h> | |
24 | #include <linux/init.h> | |
d6c1a908 | 25 | #include <linux/irq.h> |
cc532915 | 26 | #include <linux/types.h> |
95f72d1e | 27 | #include <linux/memblock.h> |
cc532915 ME |
28 | |
29 | #include <asm/processor.h> | |
30 | #include <asm/machdep.h> | |
c0ce7d08 | 31 | #include <asm/kexec.h> |
cc532915 | 32 | #include <asm/kdump.h> |
d9b2b2a2 | 33 | #include <asm/prom.h> |
cc532915 | 34 | #include <asm/firmware.h> |
f6cc82fc | 35 | #include <asm/smp.h> |
496b010e MN |
36 | #include <asm/system.h> |
37 | #include <asm/setjmp.h> | |
cc532915 ME |
38 | |
39 | #ifdef DEBUG | |
40 | #include <asm/udbg.h> | |
41 | #define DBG(fmt...) udbg_printf(fmt) | |
42 | #else | |
43 | #define DBG(fmt...) | |
44 | #endif | |
45 | ||
46 | /* This keeps a track of which one is crashing cpu. */ | |
47 | int crashing_cpu = -1; | |
c0ce7d08 | 48 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; |
b6f35b49 | 49 | cpumask_t cpus_in_sr = CPU_MASK_NONE; |
cc532915 | 50 | |
36c35be3 | 51 | #define CRASH_HANDLER_MAX 2 |
496b010e MN |
52 | /* NULL terminated list of shutdown handles */ |
53 | static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; | |
54 | static DEFINE_SPINLOCK(crash_handlers_lock); | |
55 | ||
cc532915 | 56 | #ifdef CONFIG_SMP |
c0ce7d08 | 57 | static atomic_t enter_on_soft_reset = ATOMIC_INIT(0); |
cc532915 ME |
58 | |
59 | void crash_ipi_callback(struct pt_regs *regs) | |
60 | { | |
61 | int cpu = smp_processor_id(); | |
62 | ||
cc532915 ME |
63 | if (!cpu_online(cpu)) |
64 | return; | |
65 | ||
d04c56f7 | 66 | hard_irq_disable(); |
c0ce7d08 | 67 | if (!cpu_isset(cpu, cpus_in_crash)) |
85916f81 | 68 | crash_save_cpu(regs, cpu); |
c0ce7d08 | 69 | cpu_set(cpu, cpus_in_crash); |
cc532915 | 70 | |
c0ce7d08 DW |
71 | /* |
72 | * Entered via soft-reset - could be the kdump | |
73 | * process is invoked using soft-reset or user activated | |
74 | * it if some CPU did not respond to an IPI. | |
75 | * For soft-reset, the secondary CPU can enter this func | |
76 | * twice. 1 - using IPI, and 2. soft-reset. | |
77 | * Tell the kexec CPU that entered via soft-reset and ready | |
78 | * to go down. | |
79 | */ | |
80 | if (cpu_isset(cpu, cpus_in_sr)) { | |
81 | cpu_clear(cpu, cpus_in_sr); | |
82 | atomic_inc(&enter_on_soft_reset); | |
83 | } | |
84 | ||
85 | /* | |
86 | * Starting the kdump boot. | |
87 | * This barrier is needed to make sure that all CPUs are stopped. | |
88 | * If not, soft-reset will be invoked to bring other CPUs. | |
89 | */ | |
90 | while (!cpu_isset(crashing_cpu, cpus_in_crash)) | |
91 | cpu_relax(); | |
92 | ||
93 | if (ppc_md.kexec_cpu_down) | |
94 | ppc_md.kexec_cpu_down(1, 1); | |
b6f35b49 ME |
95 | |
96 | #ifdef CONFIG_PPC64 | |
cc532915 | 97 | kexec_smp_wait(); |
b6f35b49 ME |
98 | #else |
99 | for (;;); /* FIXME */ | |
100 | #endif | |
101 | ||
cc532915 ME |
102 | /* NOTREACHED */ |
103 | } | |
104 | ||
c0ce7d08 DW |
105 | /* |
106 | * Wait until all CPUs are entered via soft-reset. | |
107 | */ | |
108 | static void crash_soft_reset_check(int cpu) | |
109 | { | |
110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | |
111 | ||
112 | cpu_clear(cpu, cpus_in_sr); | |
113 | while (atomic_read(&enter_on_soft_reset) != ncpus) | |
114 | cpu_relax(); | |
115 | } | |
116 | ||
117 | ||
118 | static void crash_kexec_prepare_cpus(int cpu) | |
cc532915 ME |
119 | { |
120 | unsigned int msecs; | |
121 | ||
c0ce7d08 | 122 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ |
cc532915 ME |
123 | |
124 | crash_send_ipi(crash_ipi_callback); | |
125 | smp_wmb(); | |
126 | ||
127 | /* | |
128 | * FIXME: Until we will have the way to stop other CPUSs reliabally, | |
129 | * the crash CPU will send an IPI and wait for other CPUs to | |
c0ce7d08 | 130 | * respond. |
01aaed9d | 131 | * Delay of at least 10 seconds. |
cc532915 | 132 | */ |
c0ce7d08 | 133 | printk(KERN_EMERG "Sending IPI to other cpus...\n"); |
01aaed9d | 134 | msecs = 10000; |
c0ce7d08 DW |
135 | while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { |
136 | cpu_relax(); | |
cc532915 ME |
137 | mdelay(1); |
138 | } | |
139 | ||
140 | /* Would it be better to replace the trap vector here? */ | |
141 | ||
142 | /* | |
143 | * FIXME: In case if we do not get all CPUs, one possibility: ask the | |
144 | * user to do soft reset such that we get all. | |
c0ce7d08 DW |
145 | * Soft-reset will be used until better mechanism is implemented. |
146 | */ | |
147 | if (cpus_weight(cpus_in_crash) < ncpus) { | |
148 | printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", | |
149 | ncpus - cpus_weight(cpus_in_crash)); | |
150 | printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); | |
151 | cpus_in_sr = CPU_MASK_NONE; | |
152 | atomic_set(&enter_on_soft_reset, 0); | |
153 | while (cpus_weight(cpus_in_crash) < ncpus) | |
154 | cpu_relax(); | |
155 | } | |
156 | /* | |
157 | * Make sure all CPUs are entered via soft-reset if the kdump is | |
158 | * invoked using soft-reset. | |
cc532915 | 159 | */ |
c0ce7d08 DW |
160 | if (cpu_isset(cpu, cpus_in_sr)) |
161 | crash_soft_reset_check(cpu); | |
cc532915 ME |
162 | /* Leave the IPI callback set */ |
163 | } | |
c0ce7d08 | 164 | |
60adec62 | 165 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ |
b3df895a | 166 | #ifdef CONFIG_PPC_STD_MMU_64 |
60adec62 MN |
167 | static void crash_kexec_wait_realmode(int cpu) |
168 | { | |
169 | unsigned int msecs; | |
170 | int i; | |
171 | ||
172 | msecs = 10000; | |
173 | for (i=0; i < NR_CPUS && msecs > 0; i++) { | |
174 | if (i == cpu) | |
175 | continue; | |
176 | ||
177 | while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { | |
178 | barrier(); | |
179 | if (!cpu_possible(i)) { | |
180 | break; | |
181 | } | |
182 | if (!cpu_online(i)) { | |
183 | break; | |
184 | } | |
185 | msecs--; | |
186 | mdelay(1); | |
187 | } | |
188 | } | |
189 | mb(); | |
190 | } | |
b3df895a | 191 | #endif |
60adec62 | 192 | |
c0ce7d08 DW |
193 | /* |
194 | * This function will be called by secondary cpus or by kexec cpu | |
195 | * if soft-reset is activated to stop some CPUs. | |
196 | */ | |
197 | void crash_kexec_secondary(struct pt_regs *regs) | |
198 | { | |
199 | int cpu = smp_processor_id(); | |
200 | unsigned long flags; | |
201 | int msecs = 5; | |
202 | ||
203 | local_irq_save(flags); | |
204 | /* Wait 5ms if the kexec CPU is not entered yet. */ | |
205 | while (crashing_cpu < 0) { | |
206 | if (--msecs < 0) { | |
207 | /* | |
208 | * Either kdump image is not loaded or | |
209 | * kdump process is not started - Probably xmon | |
210 | * exited using 'x'(exit and recover) or | |
211 | * kexec_should_crash() failed for all running tasks. | |
212 | */ | |
213 | cpu_clear(cpu, cpus_in_sr); | |
214 | local_irq_restore(flags); | |
215 | return; | |
216 | } | |
217 | mdelay(1); | |
218 | cpu_relax(); | |
219 | } | |
220 | if (cpu == crashing_cpu) { | |
221 | /* | |
222 | * Panic CPU will enter this func only via soft-reset. | |
223 | * Wait until all secondary CPUs entered and | |
224 | * then start kexec boot. | |
225 | */ | |
226 | crash_soft_reset_check(cpu); | |
227 | cpu_set(crashing_cpu, cpus_in_crash); | |
228 | if (ppc_md.kexec_cpu_down) | |
229 | ppc_md.kexec_cpu_down(1, 0); | |
230 | machine_kexec(kexec_crash_image); | |
231 | /* NOTREACHED */ | |
232 | } | |
233 | crash_ipi_callback(regs); | |
234 | } | |
235 | ||
cc532915 | 236 | #else |
c0ce7d08 | 237 | static void crash_kexec_prepare_cpus(int cpu) |
cc532915 ME |
238 | { |
239 | /* | |
240 | * move the secondarys to us so that we can copy | |
241 | * the new kernel 0-0x100 safely | |
242 | * | |
243 | * do this if kexec in setup.c ? | |
244 | */ | |
b6f35b49 | 245 | #ifdef CONFIG_PPC64 |
cc532915 | 246 | smp_release_cpus(); |
b6f35b49 ME |
247 | #else |
248 | /* FIXME */ | |
249 | #endif | |
cc532915 ME |
250 | } |
251 | ||
c0ce7d08 DW |
252 | void crash_kexec_secondary(struct pt_regs *regs) |
253 | { | |
254 | cpus_in_sr = CPU_MASK_NONE; | |
255 | } | |
cc532915 | 256 | #endif |
8d2655e6 AD |
257 | #ifdef CONFIG_SPU_BASE |
258 | ||
259 | #include <asm/spu.h> | |
260 | #include <asm/spu_priv1.h> | |
261 | ||
262 | struct crash_spu_info { | |
263 | struct spu *spu; | |
264 | u32 saved_spu_runcntl_RW; | |
265 | u32 saved_spu_status_R; | |
266 | u32 saved_spu_npc_RW; | |
267 | u64 saved_mfc_sr1_RW; | |
268 | u64 saved_mfc_dar; | |
269 | u64 saved_mfc_dsisr; | |
270 | }; | |
271 | ||
272 | #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ | |
273 | static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; | |
274 | ||
275 | static void crash_kexec_stop_spus(void) | |
276 | { | |
277 | struct spu *spu; | |
278 | int i; | |
279 | u64 tmp; | |
280 | ||
281 | for (i = 0; i < CRASH_NUM_SPUS; i++) { | |
282 | if (!crash_spu_info[i].spu) | |
283 | continue; | |
284 | ||
285 | spu = crash_spu_info[i].spu; | |
286 | ||
287 | crash_spu_info[i].saved_spu_runcntl_RW = | |
288 | in_be32(&spu->problem->spu_runcntl_RW); | |
289 | crash_spu_info[i].saved_spu_status_R = | |
290 | in_be32(&spu->problem->spu_status_R); | |
291 | crash_spu_info[i].saved_spu_npc_RW = | |
292 | in_be32(&spu->problem->spu_npc_RW); | |
293 | ||
294 | crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); | |
295 | crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); | |
296 | tmp = spu_mfc_sr1_get(spu); | |
297 | crash_spu_info[i].saved_mfc_sr1_RW = tmp; | |
298 | ||
299 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; | |
300 | spu_mfc_sr1_set(spu, tmp); | |
301 | ||
302 | __delay(200); | |
303 | } | |
304 | } | |
305 | ||
306 | void crash_register_spus(struct list_head *list) | |
307 | { | |
308 | struct spu *spu; | |
309 | ||
310 | list_for_each_entry(spu, list, full_list) { | |
311 | if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) | |
312 | continue; | |
313 | ||
314 | crash_spu_info[spu->number].spu = spu; | |
315 | } | |
316 | } | |
317 | ||
318 | #else | |
319 | static inline void crash_kexec_stop_spus(void) | |
320 | { | |
321 | } | |
322 | #endif /* CONFIG_SPU_BASE */ | |
cc532915 | 323 | |
496b010e MN |
324 | /* |
325 | * Register a function to be called on shutdown. Only use this if you | |
326 | * can't reset your device in the second kernel. | |
327 | */ | |
328 | int crash_shutdown_register(crash_shutdown_t handler) | |
329 | { | |
330 | unsigned int i, rc; | |
331 | ||
332 | spin_lock(&crash_handlers_lock); | |
333 | for (i = 0 ; i < CRASH_HANDLER_MAX; i++) | |
334 | if (!crash_shutdown_handles[i]) { | |
335 | /* Insert handle at first empty entry */ | |
336 | crash_shutdown_handles[i] = handler; | |
337 | rc = 0; | |
338 | break; | |
339 | } | |
340 | ||
341 | if (i == CRASH_HANDLER_MAX) { | |
342 | printk(KERN_ERR "Crash shutdown handles full, " | |
343 | "not registered.\n"); | |
344 | rc = 1; | |
345 | } | |
346 | ||
347 | spin_unlock(&crash_handlers_lock); | |
348 | return rc; | |
349 | } | |
350 | EXPORT_SYMBOL(crash_shutdown_register); | |
351 | ||
352 | int crash_shutdown_unregister(crash_shutdown_t handler) | |
353 | { | |
354 | unsigned int i, rc; | |
355 | ||
356 | spin_lock(&crash_handlers_lock); | |
357 | for (i = 0 ; i < CRASH_HANDLER_MAX; i++) | |
358 | if (crash_shutdown_handles[i] == handler) | |
359 | break; | |
360 | ||
361 | if (i == CRASH_HANDLER_MAX) { | |
362 | printk(KERN_ERR "Crash shutdown handle not found\n"); | |
363 | rc = 1; | |
364 | } else { | |
365 | /* Shift handles down */ | |
366 | for (; crash_shutdown_handles[i]; i++) | |
367 | crash_shutdown_handles[i] = | |
368 | crash_shutdown_handles[i+1]; | |
369 | rc = 0; | |
370 | } | |
371 | ||
372 | spin_unlock(&crash_handlers_lock); | |
373 | return rc; | |
374 | } | |
375 | EXPORT_SYMBOL(crash_shutdown_unregister); | |
376 | ||
377 | static unsigned long crash_shutdown_buf[JMP_BUF_LEN]; | |
06440794 | 378 | static int crash_shutdown_cpu = -1; |
496b010e MN |
379 | |
380 | static int handle_fault(struct pt_regs *regs) | |
381 | { | |
06440794 AB |
382 | if (crash_shutdown_cpu == smp_processor_id()) |
383 | longjmp(crash_shutdown_buf, 1); | |
496b010e MN |
384 | return 0; |
385 | } | |
386 | ||
cc532915 ME |
387 | void default_machine_crash_shutdown(struct pt_regs *regs) |
388 | { | |
496b010e MN |
389 | unsigned int i; |
390 | int (*old_handler)(struct pt_regs *regs); | |
391 | ||
d6c1a908 | 392 | |
cc532915 ME |
393 | /* |
394 | * This function is only called after the system | |
f18190bd | 395 | * has panicked or is otherwise in a critical state. |
cc532915 ME |
396 | * The minimum amount of code to allow a kexec'd kernel |
397 | * to run successfully needs to happen here. | |
398 | * | |
399 | * In practice this means stopping other cpus in | |
400 | * an SMP system. | |
401 | * The kernel is broken so disable interrupts. | |
402 | */ | |
d04c56f7 | 403 | hard_irq_disable(); |
cc532915 | 404 | |
249ec228 AB |
405 | /* |
406 | * Make a note of crashing cpu. Will be used in machine_kexec | |
407 | * such that another IPI will not be sent. | |
408 | */ | |
409 | crashing_cpu = smp_processor_id(); | |
410 | crash_save_cpu(regs, crashing_cpu); | |
411 | crash_kexec_prepare_cpus(crashing_cpu); | |
412 | cpu_set(crashing_cpu, cpus_in_crash); | |
413 | #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) | |
414 | crash_kexec_wait_realmode(crashing_cpu); | |
415 | #endif | |
416 | ||
496b010e | 417 | for_each_irq(i) { |
6cff46f4 | 418 | struct irq_desc *desc = irq_to_desc(i); |
d6c1a908 | 419 | |
426b6cb4 MU |
420 | if (!desc || !desc->chip || !desc->chip->eoi) |
421 | continue; | |
422 | ||
d6c1a908 | 423 | if (desc->status & IRQ_INPROGRESS) |
496b010e | 424 | desc->chip->eoi(i); |
d6c1a908 ME |
425 | |
426 | if (!(desc->status & IRQ_DISABLED)) | |
5d7a8721 | 427 | desc->chip->shutdown(i); |
496b010e MN |
428 | } |
429 | ||
430 | /* | |
431 | * Call registered shutdown routines savely. Swap out | |
432 | * __debugger_fault_handler, and replace on exit. | |
433 | */ | |
434 | old_handler = __debugger_fault_handler; | |
435 | __debugger_fault_handler = handle_fault; | |
06440794 | 436 | crash_shutdown_cpu = smp_processor_id(); |
496b010e MN |
437 | for (i = 0; crash_shutdown_handles[i]; i++) { |
438 | if (setjmp(crash_shutdown_buf) == 0) { | |
439 | /* | |
440 | * Insert syncs and delay to ensure | |
441 | * instructions in the dangerous region don't | |
442 | * leak away from this protected region. | |
443 | */ | |
444 | asm volatile("sync; isync"); | |
445 | /* dangerous region */ | |
446 | crash_shutdown_handles[i](); | |
447 | asm volatile("sync; isync"); | |
448 | } | |
d6c1a908 | 449 | } |
06440794 | 450 | crash_shutdown_cpu = -1; |
496b010e | 451 | __debugger_fault_handler = old_handler; |
d6c1a908 | 452 | |
8d2655e6 | 453 | crash_kexec_stop_spus(); |
249ec228 | 454 | |
c0ce7d08 DW |
455 | if (ppc_md.kexec_cpu_down) |
456 | ppc_md.kexec_cpu_down(1, 0); | |
cc532915 | 457 | } |