2 * NMI watchdog support on APIC systems
4 * Started by Ingo Molnar <mingo@redhat.com>
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
9 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
11 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/nmi.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/sysdev.h>
22 #include <linux/sysctl.h>
23 #include <linux/percpu.h>
24 #include <linux/kprobes.h>
25 #include <linux/cpumask.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/kdebug.h>
28 #include <linux/smp.h>
29 #include <linux/nmi.h>
31 #include <asm/i8259.h>
32 #include <asm/io_apic.h>
33 #include <asm/proto.h>
34 #include <asm/timer.h>
38 #include <mach_traps.h>
40 int unknown_nmi_panic
;
41 int nmi_watchdog_enabled
;
43 static cpumask_t backtrace_mask
= CPU_MASK_NONE
;
46 * >0: the lapic NMI watchdog is active, but can be disabled
47 * <0: the lapic NMI watchdog has not been set up, and cannot
49 * 0: the lapic NMI watchdog is disabled, but can be enabled
51 atomic_t nmi_active
= ATOMIC_INIT(0); /* oprofile uses this */
52 EXPORT_SYMBOL(nmi_active
);
54 unsigned int nmi_watchdog
= NMI_NONE
;
55 EXPORT_SYMBOL(nmi_watchdog
);
57 static int panic_on_timeout
;
59 static unsigned int nmi_hz
= HZ
;
60 static DEFINE_PER_CPU(short, wd_enabled
);
61 static int endflag __initdata
;
63 static inline unsigned int get_nmi_count(int cpu
)
66 return cpu_pda(cpu
)->__nmi_count
;
68 return nmi_count(cpu
);
72 static inline int mce_in_progress(void)
74 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
75 return atomic_read(&mce_entry
) > 0;
81 * Take the local apic timer and PIT/HPET into account. We don't
82 * know which one is active, when we have highres/dyntick on
84 static inline unsigned int get_timer_irqs(int cpu
)
87 return read_pda(apic_timer_irqs
) + read_pda(irq0_irqs
);
89 return per_cpu(irq_stat
, cpu
).apic_timer_irqs
+
90 per_cpu(irq_stat
, cpu
).irq0_irqs
;
96 * The performance counters used by NMI_LOCAL_APIC don't trigger when
97 * the CPU is idle. To make sure the NMI watchdog really ticks on all
98 * CPUs during the test make them busy.
100 static __init
void nmi_cpu_busy(void *data
)
102 local_irq_enable_in_hardirq();
104 * Intentionally don't use cpu_relax here. This is
105 * to make sure that the performance counter really ticks,
106 * even if there is a simulator or similar that catches the
107 * pause instruction. On a real HT machine this is fine because
108 * all other CPUs are busy with "useless" delay loops and don't
109 * care if they get somewhat less cycles.
116 static void report_broken_nmi(int cpu
, int *prev_nmi_count
)
118 printk(KERN_CONT
"\n");
121 "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
122 cpu
, prev_nmi_count
[cpu
], get_nmi_count(cpu
));
125 "Please report this to bugzilla.kernel.org,\n");
127 "and attach the output of the 'dmesg' command.\n");
129 per_cpu(wd_enabled
, cpu
) = 0;
130 atomic_dec(&nmi_active
);
133 static void __acpi_nmi_disable(void *__unused
)
135 apic_write(APIC_LVT0
, APIC_DM_NMI
| APIC_LVT_MASKED
);
138 int __init
check_nmi_watchdog(void)
140 unsigned int *prev_nmi_count
;
143 if (!nmi_watchdog_active() || !atomic_read(&nmi_active
))
146 prev_nmi_count
= kmalloc(nr_cpu_ids
* sizeof(int), GFP_KERNEL
);
150 printk(KERN_INFO
"Testing NMI watchdog ... ");
153 if (nmi_watchdog
== NMI_LOCAL_APIC
)
154 smp_call_function(nmi_cpu_busy
, (void *)&endflag
, 0);
157 for_each_possible_cpu(cpu
)
158 prev_nmi_count
[cpu
] = get_nmi_count(cpu
);
160 mdelay((20 * 1000) / nmi_hz
); /* wait 20 ticks */
162 for_each_online_cpu(cpu
) {
163 if (!per_cpu(wd_enabled
, cpu
))
165 if (get_nmi_count(cpu
) - prev_nmi_count
[cpu
] <= 5)
166 report_broken_nmi(cpu
, prev_nmi_count
);
169 if (!atomic_read(&nmi_active
)) {
170 kfree(prev_nmi_count
);
171 atomic_set(&nmi_active
, -1);
177 * now that we know it works we can reduce NMI frequency to
178 * something more reasonable; makes a difference in some configs
180 if (nmi_watchdog
== NMI_LOCAL_APIC
)
181 nmi_hz
= lapic_adjust_nmi_hz(1);
183 kfree(prev_nmi_count
);
186 if (nmi_watchdog
== NMI_IO_APIC
) {
187 if (!timer_through_8259
)
188 disable_8259A_irq(0);
189 on_each_cpu(__acpi_nmi_disable
, NULL
, 1);
198 static int __init
setup_nmi_watchdog(char *str
)
202 if (!strncmp(str
, "panic", 5)) {
203 panic_on_timeout
= 1;
204 str
= strchr(str
, ',');
210 if (!strncmp(str
, "lapic", 5))
211 nmi_watchdog
= NMI_LOCAL_APIC
;
212 else if (!strncmp(str
, "ioapic", 6))
213 nmi_watchdog
= NMI_IO_APIC
;
215 get_option(&str
, &nmi
);
216 if (nmi
>= NMI_INVALID
)
223 __setup("nmi_watchdog=", setup_nmi_watchdog
);
226 * Suspend/resume support
230 static int nmi_pm_active
; /* nmi_active before suspend */
232 static int lapic_nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
234 /* only CPU0 goes here, other CPUs should be offline */
235 nmi_pm_active
= atomic_read(&nmi_active
);
236 stop_apic_nmi_watchdog(NULL
);
237 BUG_ON(atomic_read(&nmi_active
) != 0);
241 static int lapic_nmi_resume(struct sys_device
*dev
)
243 /* only CPU0 goes here, other CPUs should be offline */
244 if (nmi_pm_active
> 0) {
245 setup_apic_nmi_watchdog(NULL
);
246 touch_nmi_watchdog();
251 static struct sysdev_class nmi_sysclass
= {
253 .resume
= lapic_nmi_resume
,
254 .suspend
= lapic_nmi_suspend
,
257 static struct sys_device device_lapic_nmi
= {
259 .cls
= &nmi_sysclass
,
262 static int __init
init_lapic_nmi_sysfs(void)
267 * should really be a BUG_ON but b/c this is an
268 * init call, it just doesn't work. -dcz
270 if (nmi_watchdog
!= NMI_LOCAL_APIC
)
273 if (atomic_read(&nmi_active
) < 0)
276 error
= sysdev_class_register(&nmi_sysclass
);
278 error
= sysdev_register(&device_lapic_nmi
);
282 /* must come after the local APIC's device_initcall() */
283 late_initcall(init_lapic_nmi_sysfs
);
285 #endif /* CONFIG_PM */
287 static void __acpi_nmi_enable(void *__unused
)
289 apic_write(APIC_LVT0
, APIC_DM_NMI
);
293 * Enable timer based NMIs on all CPUs:
295 void acpi_nmi_enable(void)
297 if (atomic_read(&nmi_active
) && nmi_watchdog
== NMI_IO_APIC
)
298 on_each_cpu(__acpi_nmi_enable
, NULL
, 1);
302 * Disable timer based NMIs on all CPUs:
304 void acpi_nmi_disable(void)
306 if (atomic_read(&nmi_active
) && nmi_watchdog
== NMI_IO_APIC
)
307 on_each_cpu(__acpi_nmi_disable
, NULL
, 1);
311 * This function is called as soon the LAPIC NMI watchdog driver has everything
312 * in place and it's ready to check if the NMIs belong to the NMI watchdog
314 void cpu_nmi_set_wd_enabled(void)
316 __get_cpu_var(wd_enabled
) = 1;
319 void setup_apic_nmi_watchdog(void *unused
)
321 if (__get_cpu_var(wd_enabled
))
324 /* cheap hack to support suspend/resume */
325 /* if cpu0 is not active neither should the other cpus */
326 if (smp_processor_id() != 0 && atomic_read(&nmi_active
) <= 0)
329 switch (nmi_watchdog
) {
331 if (lapic_watchdog_init(nmi_hz
) < 0) {
332 __get_cpu_var(wd_enabled
) = 0;
337 __get_cpu_var(wd_enabled
) = 1;
338 atomic_inc(&nmi_active
);
342 void stop_apic_nmi_watchdog(void *unused
)
344 /* only support LOCAL and IO APICs for now */
345 if (!nmi_watchdog_active())
347 if (__get_cpu_var(wd_enabled
) == 0)
349 if (nmi_watchdog
== NMI_LOCAL_APIC
)
350 lapic_watchdog_stop();
352 __acpi_nmi_disable(NULL
);
353 __get_cpu_var(wd_enabled
) = 0;
354 atomic_dec(&nmi_active
);
358 * the best way to detect whether a CPU has a 'hard lockup' problem
359 * is to check it's local APIC timer IRQ counts. If they are not
360 * changing then that CPU has some problem.
362 * as these watchdog NMI IRQs are generated on every CPU, we only
363 * have to check the current processor.
365 * since NMIs don't listen to _any_ locks, we have to be extremely
366 * careful not to rely on unsafe variables. The printk might lock
367 * up though, so we have to break up any console locks first ...
368 * [when there will be more tty-related locks, break them up here too!]
371 static DEFINE_PER_CPU(unsigned, last_irq_sum
);
372 static DEFINE_PER_CPU(local_t
, alert_counter
);
373 static DEFINE_PER_CPU(int, nmi_touch
);
375 void touch_nmi_watchdog(void)
377 if (nmi_watchdog_active()) {
381 * Tell other CPUs to reset their alert counters. We cannot
382 * do it ourselves because the alert count increase is not
385 for_each_present_cpu(cpu
) {
386 if (per_cpu(nmi_touch
, cpu
) != 1)
387 per_cpu(nmi_touch
, cpu
) = 1;
392 * Tickle the softlockup detector too:
394 touch_softlockup_watchdog();
396 EXPORT_SYMBOL(touch_nmi_watchdog
);
398 notrace __kprobes
int
399 nmi_watchdog_tick(struct pt_regs
*regs
, unsigned reason
)
402 * Since current_thread_info()-> is always on the stack, and we
403 * always switch the stack NMI-atomically, it's safe to use
404 * smp_processor_id().
408 int cpu
= smp_processor_id();
411 /* check for other users first */
412 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
)
418 sum
= get_timer_irqs(cpu
);
420 if (__get_cpu_var(nmi_touch
)) {
421 __get_cpu_var(nmi_touch
) = 0;
425 if (cpu_isset(cpu
, backtrace_mask
)) {
426 static DEFINE_SPINLOCK(lock
); /* Serialise the printks */
429 printk(KERN_WARNING
"NMI backtrace for cpu %d\n", cpu
);
432 cpu_clear(cpu
, backtrace_mask
);
435 /* Could check oops_in_progress here too, but it's safer not to */
436 if (mce_in_progress())
439 /* if the none of the timers isn't firing, this cpu isn't doing much */
440 if (!touched
&& __get_cpu_var(last_irq_sum
) == sum
) {
442 * Ayiee, looks like this CPU is stuck ...
443 * wait a few IRQs (5 seconds) before doing the oops ...
445 local_inc(&__get_cpu_var(alert_counter
));
446 if (local_read(&__get_cpu_var(alert_counter
)) == 5 * nmi_hz
)
448 * die_nmi will return ONLY if NOTIFY_STOP happens..
450 die_nmi("BUG: NMI Watchdog detected LOCKUP",
451 regs
, panic_on_timeout
);
453 __get_cpu_var(last_irq_sum
) = sum
;
454 local_set(&__get_cpu_var(alert_counter
), 0);
457 /* see if the nmi watchdog went off */
458 if (!__get_cpu_var(wd_enabled
))
460 switch (nmi_watchdog
) {
462 rc
|= lapic_wd_event(nmi_hz
);
466 * don't know how to accurately check for this.
467 * just assume it was a watchdog timer interrupt
468 * This matches the old behaviour.
478 static void enable_ioapic_nmi_watchdog_single(void *unused
)
480 __get_cpu_var(wd_enabled
) = 1;
481 atomic_inc(&nmi_active
);
482 __acpi_nmi_enable(NULL
);
485 static void enable_ioapic_nmi_watchdog(void)
487 on_each_cpu(enable_ioapic_nmi_watchdog_single
, NULL
, 1);
488 touch_nmi_watchdog();
491 static void disable_ioapic_nmi_watchdog(void)
493 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 1);
496 static int __init
setup_unknown_nmi_panic(char *str
)
498 unknown_nmi_panic
= 1;
501 __setup("unknown_nmi_panic", setup_unknown_nmi_panic
);
503 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
)
505 unsigned char reason
= get_nmi_reason();
508 sprintf(buf
, "NMI received for unknown reason %02x\n", reason
);
509 die_nmi(buf
, regs
, 1); /* Always panic here */
514 * proc handler for /proc/sys/kernel/nmi
516 int proc_nmi_enabled(struct ctl_table
*table
, int write
, struct file
*file
,
517 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
521 nmi_watchdog_enabled
= (atomic_read(&nmi_active
) > 0) ? 1 : 0;
522 old_state
= nmi_watchdog_enabled
;
523 proc_dointvec(table
, write
, file
, buffer
, length
, ppos
);
524 if (!!old_state
== !!nmi_watchdog_enabled
)
527 if (atomic_read(&nmi_active
) < 0 || !nmi_watchdog_active()) {
529 "NMI watchdog is permanently disabled\n");
533 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
534 if (nmi_watchdog_enabled
)
535 enable_lapic_nmi_watchdog();
537 disable_lapic_nmi_watchdog();
538 } else if (nmi_watchdog
== NMI_IO_APIC
) {
539 if (nmi_watchdog_enabled
)
540 enable_ioapic_nmi_watchdog();
542 disable_ioapic_nmi_watchdog();
545 "NMI watchdog doesn't know what hardware to touch\n");
551 #endif /* CONFIG_SYSCTL */
553 int do_nmi_callback(struct pt_regs
*regs
, int cpu
)
556 if (unknown_nmi_panic
)
557 return unknown_nmi_panic_callback(regs
, cpu
);
562 void __trigger_all_cpu_backtrace(void)
566 backtrace_mask
= cpu_online_map
;
567 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
568 for (i
= 0; i
< 10 * 1000; i
++) {
569 if (cpus_empty(backtrace_mask
))