2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/sysdev.h>
20 #include <linux/nmi.h>
21 #include <linux/sysctl.h>
22 #include <linux/kprobes.h>
26 #include <asm/proto.h>
27 #include <asm/kdebug.h>
29 #include <asm/intel_arch_perfmon.h>
32 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
33 * - it may be reserved by some other driver, or not
34 * - when not reserved by some other driver, it may be used for
35 * the NMI watchdog, or not
37 * This is maintained separately from nmi_active because the NMI
38 * watchdog may also be driven from the I/O APIC timer.
40 static DEFINE_SPINLOCK(lapic_nmi_owner_lock
);
41 static unsigned int lapic_nmi_owner
;
42 #define LAPIC_NMI_WATCHDOG (1<<0)
43 #define LAPIC_NMI_RESERVED (1<<1)
46 * +1: the lapic NMI watchdog is active, but can be disabled
47 * 0: the lapic NMI watchdog has not been set up, and cannot
49 * -1: the lapic NMI watchdog is disabled, but can be enabled
51 int nmi_active
; /* oprofile uses this */
54 unsigned int nmi_watchdog
= NMI_DEFAULT
;
55 static unsigned int nmi_hz
= HZ
;
56 static unsigned int nmi_perfctr_msr
; /* the MSR to reset in NMI handler */
57 static unsigned int nmi_p4_cccr_val
;
59 /* Note that these events don't tick when the CPU idles. This means
60 the frequency varies with CPU load. */
62 #define K7_EVNTSEL_ENABLE (1 << 22)
63 #define K7_EVNTSEL_INT (1 << 20)
64 #define K7_EVNTSEL_OS (1 << 17)
65 #define K7_EVNTSEL_USR (1 << 16)
66 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
67 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
69 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
70 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
72 #define MSR_P4_MISC_ENABLE 0x1A0
73 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
74 #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
75 #define MSR_P4_PERFCTR0 0x300
76 #define MSR_P4_CCCR0 0x360
77 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
78 #define P4_ESCR_OS (1<<3)
79 #define P4_ESCR_USR (1<<2)
80 #define P4_CCCR_OVF_PMI0 (1<<26)
81 #define P4_CCCR_OVF_PMI1 (1<<27)
82 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
83 #define P4_CCCR_COMPLEMENT (1<<19)
84 #define P4_CCCR_COMPARE (1<<18)
85 #define P4_CCCR_REQUIRED (3<<16)
86 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
87 #define P4_CCCR_ENABLE (1<<12)
88 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
89 CRU_ESCR0 (with any non-null event selector) through a complemented
90 max threshold. [IA32-Vol3, Section 14.9.9] */
91 #define MSR_P4_IQ_COUNTER0 0x30C
92 #define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR)
93 #define P4_NMI_IQ_CCCR0 \
94 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
95 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
97 static __cpuinit
inline int nmi_known_cpu(void)
99 switch (boot_cpu_data
.x86_vendor
) {
101 return boot_cpu_data
.x86
== 15;
102 case X86_VENDOR_INTEL
:
103 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
106 return (boot_cpu_data
.x86
== 15);
111 /* Run after command line and cpu_init init, but before all other checks */
112 void __cpuinit
nmi_watchdog_default(void)
114 if (nmi_watchdog
!= NMI_DEFAULT
)
117 nmi_watchdog
= NMI_LOCAL_APIC
;
119 nmi_watchdog
= NMI_IO_APIC
;
123 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
124 * the CPU is idle. To make sure the NMI watchdog really ticks on all
125 * CPUs during the test make them busy.
127 static __init
void nmi_cpu_busy(void *data
)
129 volatile int *endflag
= data
;
130 local_irq_enable_in_hardirq();
131 /* Intentionally don't use cpu_relax here. This is
132 to make sure that the performance counter really ticks,
133 even if there is a simulator or similar that catches the
134 pause instruction. On a real HT machine this is fine because
135 all other CPUs are busy with "useless" delay loops and don't
136 care if they get somewhat less cycles. */
137 while (*endflag
== 0)
142 int __init
check_nmi_watchdog (void)
144 volatile int endflag
= 0;
148 counts
= kmalloc(NR_CPUS
* sizeof(int), GFP_KERNEL
);
152 printk(KERN_INFO
"testing NMI watchdog ... ");
155 if (nmi_watchdog
== NMI_LOCAL_APIC
)
156 smp_call_function(nmi_cpu_busy
, (void *)&endflag
, 0, 0);
159 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
160 counts
[cpu
] = cpu_pda(cpu
)->__nmi_count
;
162 mdelay((10*1000)/nmi_hz
); // wait 10 ticks
164 for_each_online_cpu(cpu
) {
165 if (cpu_pda(cpu
)->__nmi_count
- counts
[cpu
] <= 5) {
167 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
170 cpu_pda(cpu
)->__nmi_count
);
172 lapic_nmi_owner
&= ~LAPIC_NMI_WATCHDOG
;
181 /* now that we know it works we can reduce NMI frequency to
182 something more reasonable; makes a difference in some configs */
183 if (nmi_watchdog
== NMI_LOCAL_APIC
)
190 int __init
setup_nmi_watchdog(char *str
)
194 if (!strncmp(str
,"panic",5)) {
195 panic_on_timeout
= 1;
196 str
= strchr(str
, ',');
202 get_option(&str
, &nmi
);
204 if (nmi
>= NMI_INVALID
)
210 __setup("nmi_watchdog=", setup_nmi_watchdog
);
212 static void disable_intel_arch_watchdog(void);
214 static void disable_lapic_nmi_watchdog(void)
218 switch (boot_cpu_data
.x86_vendor
) {
220 wrmsr(MSR_K7_EVNTSEL0
, 0, 0);
222 case X86_VENDOR_INTEL
:
223 if (boot_cpu_data
.x86
== 15) {
224 wrmsr(MSR_P4_IQ_CCCR0
, 0, 0);
225 wrmsr(MSR_P4_CRU_ESCR0
, 0, 0);
226 } else if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
227 disable_intel_arch_watchdog();
232 /* tell do_nmi() and others that we're not active any more */
236 static void enable_lapic_nmi_watchdog(void)
238 if (nmi_active
< 0) {
239 nmi_watchdog
= NMI_LOCAL_APIC
;
240 touch_nmi_watchdog();
241 setup_apic_nmi_watchdog();
245 int reserve_lapic_nmi(void)
247 unsigned int old_owner
;
249 spin_lock(&lapic_nmi_owner_lock
);
250 old_owner
= lapic_nmi_owner
;
251 lapic_nmi_owner
|= LAPIC_NMI_RESERVED
;
252 spin_unlock(&lapic_nmi_owner_lock
);
253 if (old_owner
& LAPIC_NMI_RESERVED
)
255 if (old_owner
& LAPIC_NMI_WATCHDOG
)
256 disable_lapic_nmi_watchdog();
260 void release_lapic_nmi(void)
262 unsigned int new_owner
;
264 spin_lock(&lapic_nmi_owner_lock
);
265 new_owner
= lapic_nmi_owner
& ~LAPIC_NMI_RESERVED
;
266 lapic_nmi_owner
= new_owner
;
267 spin_unlock(&lapic_nmi_owner_lock
);
268 if (new_owner
& LAPIC_NMI_WATCHDOG
)
269 enable_lapic_nmi_watchdog();
272 void disable_timer_nmi_watchdog(void)
274 if ((nmi_watchdog
!= NMI_IO_APIC
) || (nmi_active
<= 0))
278 unset_nmi_callback();
280 nmi_watchdog
= NMI_NONE
;
283 void enable_timer_nmi_watchdog(void)
285 if (nmi_active
< 0) {
286 nmi_watchdog
= NMI_IO_APIC
;
287 touch_nmi_watchdog();
295 static int nmi_pm_active
; /* nmi_active before suspend */
297 static int lapic_nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
299 nmi_pm_active
= nmi_active
;
300 disable_lapic_nmi_watchdog();
304 static int lapic_nmi_resume(struct sys_device
*dev
)
306 if (nmi_pm_active
> 0)
307 enable_lapic_nmi_watchdog();
311 static struct sysdev_class nmi_sysclass
= {
312 set_kset_name("lapic_nmi"),
313 .resume
= lapic_nmi_resume
,
314 .suspend
= lapic_nmi_suspend
,
317 static struct sys_device device_lapic_nmi
= {
319 .cls
= &nmi_sysclass
,
322 static int __init
init_lapic_nmi_sysfs(void)
326 if (nmi_active
== 0 || nmi_watchdog
!= NMI_LOCAL_APIC
)
329 error
= sysdev_class_register(&nmi_sysclass
);
331 error
= sysdev_register(&device_lapic_nmi
);
334 /* must come after the local APIC's device_initcall() */
335 late_initcall(init_lapic_nmi_sysfs
);
337 #endif /* CONFIG_PM */
340 * Activate the NMI watchdog via the local APIC.
341 * Original code written by Keith Owens.
344 static void clear_msr_range(unsigned int base
, unsigned int n
)
348 for(i
= 0; i
< n
; ++i
)
352 static void setup_k7_watchdog(void)
355 unsigned int evntsel
;
357 nmi_perfctr_msr
= MSR_K7_PERFCTR0
;
359 for(i
= 0; i
< 4; ++i
) {
360 /* Simulator may not support it */
361 if (checking_wrmsrl(MSR_K7_EVNTSEL0
+i
, 0UL)) {
365 wrmsrl(MSR_K7_PERFCTR0
+i
, 0UL);
368 evntsel
= K7_EVNTSEL_INT
373 wrmsr(MSR_K7_EVNTSEL0
, evntsel
, 0);
374 wrmsrl(MSR_K7_PERFCTR0
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
375 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
376 evntsel
|= K7_EVNTSEL_ENABLE
;
377 wrmsr(MSR_K7_EVNTSEL0
, evntsel
, 0);
380 static void disable_intel_arch_watchdog(void)
385 * Check whether the Architectural PerfMon supports
386 * Unhalted Core Cycles Event or not.
387 * NOTE: Corresponding bit = 0 in ebp indicates event present.
390 if (!(ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
391 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0
, 0, 0);
394 static int setup_intel_arch_watchdog(void)
396 unsigned int evntsel
;
400 * Check whether the Architectural PerfMon supports
401 * Unhalted Core Cycles Event or not.
402 * NOTE: Corresponding bit = 0 in ebp indicates event present.
405 if ((ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
408 nmi_perfctr_msr
= MSR_ARCH_PERFMON_PERFCTR0
;
410 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0
, 2);
411 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0
, 2);
413 evntsel
= ARCH_PERFMON_EVENTSEL_INT
414 | ARCH_PERFMON_EVENTSEL_OS
415 | ARCH_PERFMON_EVENTSEL_USR
416 | ARCH_PERFMON_NMI_EVENT_SEL
417 | ARCH_PERFMON_NMI_EVENT_UMASK
;
419 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0
, evntsel
, 0);
420 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
421 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
422 evntsel
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
423 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0
, evntsel
, 0);
428 static int setup_p4_watchdog(void)
430 unsigned int misc_enable
, dummy
;
432 rdmsr(MSR_P4_MISC_ENABLE
, misc_enable
, dummy
);
433 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PERF_AVAIL
))
436 nmi_perfctr_msr
= MSR_P4_IQ_COUNTER0
;
437 nmi_p4_cccr_val
= P4_NMI_IQ_CCCR0
;
439 if (smp_num_siblings
== 2)
440 nmi_p4_cccr_val
|= P4_CCCR_OVF_PMI1
;
443 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PEBS_UNAVAIL
))
444 clear_msr_range(0x3F1, 2);
445 /* MSR 0x3F0 seems to have a default value of 0xFC00, but current
446 docs doesn't fully define it, so leave it alone for now. */
447 if (boot_cpu_data
.x86_model
>= 0x3) {
448 /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
449 clear_msr_range(0x3A0, 26);
450 clear_msr_range(0x3BC, 3);
452 clear_msr_range(0x3A0, 31);
454 clear_msr_range(0x3C0, 6);
455 clear_msr_range(0x3C8, 6);
456 clear_msr_range(0x3E0, 2);
457 clear_msr_range(MSR_P4_CCCR0
, 18);
458 clear_msr_range(MSR_P4_PERFCTR0
, 18);
460 wrmsr(MSR_P4_CRU_ESCR0
, P4_NMI_CRU_ESCR0
, 0);
461 wrmsr(MSR_P4_IQ_CCCR0
, P4_NMI_IQ_CCCR0
& ~P4_CCCR_ENABLE
, 0);
462 Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz
* 1000UL / nmi_hz
));
463 wrmsrl(MSR_P4_IQ_COUNTER0
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
464 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
465 wrmsr(MSR_P4_IQ_CCCR0
, nmi_p4_cccr_val
, 0);
469 void setup_apic_nmi_watchdog(void)
471 switch (boot_cpu_data
.x86_vendor
) {
473 if (boot_cpu_data
.x86
!= 15)
475 if (strstr(boot_cpu_data
.x86_model_id
, "Screwdriver"))
479 case X86_VENDOR_INTEL
:
480 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
481 if (!setup_intel_arch_watchdog())
483 } else if (boot_cpu_data
.x86
== 15) {
484 if (!setup_p4_watchdog())
495 lapic_nmi_owner
= LAPIC_NMI_WATCHDOG
;
500 * the best way to detect whether a CPU has a 'hard lockup' problem
501 * is to check it's local APIC timer IRQ counts. If they are not
502 * changing then that CPU has some problem.
504 * as these watchdog NMI IRQs are generated on every CPU, we only
505 * have to check the current processor.
508 static DEFINE_PER_CPU(unsigned, last_irq_sum
);
509 static DEFINE_PER_CPU(local_t
, alert_counter
);
510 static DEFINE_PER_CPU(int, nmi_touch
);
512 void touch_nmi_watchdog (void)
514 if (nmi_watchdog
> 0) {
518 * Tell other CPUs to reset their alert counters. We cannot
519 * do it ourselves because the alert count increase is not
522 for_each_present_cpu (cpu
)
523 per_cpu(nmi_touch
, cpu
) = 1;
526 touch_softlockup_watchdog();
529 void __kprobes
nmi_watchdog_tick(struct pt_regs
* regs
, unsigned reason
)
534 sum
= read_pda(apic_timer_irqs
);
535 if (__get_cpu_var(nmi_touch
)) {
536 __get_cpu_var(nmi_touch
) = 0;
539 #ifdef CONFIG_X86_MCE
540 /* Could check oops_in_progress here too, but it's safer
542 if (atomic_read(&mce_entry
) > 0)
545 if (!touched
&& __get_cpu_var(last_irq_sum
) == sum
) {
547 * Ayiee, looks like this CPU is stuck ...
548 * wait a few IRQs (5 seconds) before doing the oops ...
550 local_inc(&__get_cpu_var(alert_counter
));
551 if (local_read(&__get_cpu_var(alert_counter
)) == 5*nmi_hz
) {
552 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
)
554 local_set(&__get_cpu_var(alert_counter
), 0);
557 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs
);
560 __get_cpu_var(last_irq_sum
) = sum
;
561 local_set(&__get_cpu_var(alert_counter
), 0);
563 if (nmi_perfctr_msr
) {
564 if (nmi_perfctr_msr
== MSR_P4_IQ_COUNTER0
) {
567 * - An overflown perfctr will assert its interrupt
568 * until the OVF flag in its CCCR is cleared.
569 * - LVTPC is masked on interrupt and must be
570 * unmasked by the LVTPC handler.
572 wrmsr(MSR_P4_IQ_CCCR0
, nmi_p4_cccr_val
, 0);
573 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
574 } else if (nmi_perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR0
) {
576 * For Intel based architectural perfmon
577 * - LVTPC is masked on interrupt and must be
578 * unmasked by the LVTPC handler.
580 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
582 wrmsrl(nmi_perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
586 static __kprobes
int dummy_nmi_callback(struct pt_regs
* regs
, int cpu
)
591 static nmi_callback_t nmi_callback
= dummy_nmi_callback
;
593 asmlinkage __kprobes
void do_nmi(struct pt_regs
* regs
, long error_code
)
595 int cpu
= safe_smp_processor_id();
598 add_pda(__nmi_count
,1);
599 if (!rcu_dereference(nmi_callback
)(regs
, cpu
))
600 default_do_nmi(regs
);
604 void set_nmi_callback(nmi_callback_t callback
)
607 rcu_assign_pointer(nmi_callback
, callback
);
609 EXPORT_SYMBOL_GPL(set_nmi_callback
);
611 void unset_nmi_callback(void)
613 nmi_callback
= dummy_nmi_callback
;
615 EXPORT_SYMBOL_GPL(unset_nmi_callback
);
619 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
)
621 unsigned char reason
= get_nmi_reason();
624 if (!(reason
& 0xc0)) {
625 sprintf(buf
, "NMI received for unknown reason %02x\n", reason
);
632 * proc handler for /proc/sys/kernel/unknown_nmi_panic
634 int proc_unknown_nmi_panic(struct ctl_table
*table
, int write
, struct file
*file
,
635 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
639 old_state
= unknown_nmi_panic
;
640 proc_dointvec(table
, write
, file
, buffer
, length
, ppos
);
641 if (!!old_state
== !!unknown_nmi_panic
)
644 if (unknown_nmi_panic
) {
645 if (reserve_lapic_nmi() < 0) {
646 unknown_nmi_panic
= 0;
649 set_nmi_callback(unknown_nmi_panic_callback
);
653 unset_nmi_callback();
660 EXPORT_SYMBOL(nmi_active
);
661 EXPORT_SYMBOL(nmi_watchdog
);
662 EXPORT_SYMBOL(reserve_lapic_nmi
);
663 EXPORT_SYMBOL(release_lapic_nmi
);
664 EXPORT_SYMBOL(disable_timer_nmi_watchdog
);
665 EXPORT_SYMBOL(enable_timer_nmi_watchdog
);
666 EXPORT_SYMBOL(touch_nmi_watchdog
);