Commit | Line | Data |
---|---|---|
1d48922c DZ |
1 | /* |
2 | * Copyright (C) 1991, 1992 Linus Torvalds | |
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | |
9c48f1c6 | 4 | * Copyright (C) 2011 Don Zickus Red Hat, Inc. |
1d48922c DZ |
5 | * |
6 | * Pentium III FXSR, SSE support | |
7 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
8 | */ | |
9 | ||
10 | /* | |
11 | * Handle hardware traps and faults. | |
12 | */ | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/kprobes.h> | |
15 | #include <linux/kdebug.h> | |
16 | #include <linux/nmi.h> | |
c9126b2e DZ |
17 | #include <linux/delay.h> |
18 | #include <linux/hardirq.h> | |
19 | #include <linux/slab.h> | |
1d48922c DZ |
20 | |
21 | #if defined(CONFIG_EDAC) | |
22 | #include <linux/edac.h> | |
23 | #endif | |
24 | ||
25 | #include <linux/atomic.h> | |
26 | #include <asm/traps.h> | |
27 | #include <asm/mach_traps.h> | |
c9126b2e DZ |
28 | #include <asm/nmi.h> |
29 | ||
30 | #define NMI_MAX_NAMELEN 16 | |
31 | struct nmiaction { | |
32 | struct list_head list; | |
33 | nmi_handler_t handler; | |
34 | unsigned int flags; | |
35 | char *name; | |
36 | }; | |
37 | ||
38 | struct nmi_desc { | |
39 | spinlock_t lock; | |
40 | struct list_head head; | |
41 | }; | |
42 | ||
43 | static struct nmi_desc nmi_desc[NMI_MAX] = | |
44 | { | |
45 | { | |
46 | .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), | |
47 | .head = LIST_HEAD_INIT(nmi_desc[0].head), | |
48 | }, | |
49 | { | |
50 | .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), | |
51 | .head = LIST_HEAD_INIT(nmi_desc[1].head), | |
52 | }, | |
53 | ||
54 | }; | |
1d48922c | 55 | |
efc3aac5 DZ |
56 | struct nmi_stats { |
57 | unsigned int normal; | |
58 | unsigned int unknown; | |
59 | unsigned int external; | |
60 | unsigned int swallow; | |
61 | }; | |
62 | ||
63 | static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); | |
64 | ||
1d48922c DZ |
65 | static int ignore_nmis; |
66 | ||
67 | int unknown_nmi_panic; | |
68 | /* | |
69 | * Prevent NMI reason port (0x61) being accessed simultaneously, can | |
70 | * only be used in NMI handler. | |
71 | */ | |
72 | static DEFINE_RAW_SPINLOCK(nmi_reason_lock); | |
73 | ||
74 | static int __init setup_unknown_nmi_panic(char *str) | |
75 | { | |
76 | unknown_nmi_panic = 1; | |
77 | return 1; | |
78 | } | |
79 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | |
80 | ||
c9126b2e DZ |
81 | #define nmi_to_desc(type) (&nmi_desc[type]) |
82 | ||
b227e233 | 83 | static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) |
c9126b2e DZ |
84 | { |
85 | struct nmi_desc *desc = nmi_to_desc(type); | |
86 | struct nmiaction *a; | |
87 | int handled=0; | |
88 | ||
89 | rcu_read_lock(); | |
90 | ||
91 | /* | |
92 | * NMIs are edge-triggered, which means if you have enough | |
93 | * of them concurrently, you can lose some because only one | |
94 | * can be latched at any given time. Walk the whole list | |
95 | * to handle those situations. | |
96 | */ | |
b227e233 | 97 | list_for_each_entry_rcu(a, &desc->head, list) |
c9126b2e DZ |
98 | handled += a->handler(type, regs); |
99 | ||
c9126b2e DZ |
100 | rcu_read_unlock(); |
101 | ||
102 | /* return total number of NMI events handled */ | |
103 | return handled; | |
104 | } | |
105 | ||
106 | static int __setup_nmi(unsigned int type, struct nmiaction *action) | |
107 | { | |
108 | struct nmi_desc *desc = nmi_to_desc(type); | |
109 | unsigned long flags; | |
110 | ||
111 | spin_lock_irqsave(&desc->lock, flags); | |
112 | ||
b227e233 DZ |
113 | /* |
114 | * most handlers of type NMI_UNKNOWN never return because | |
115 | * they just assume the NMI is theirs. Just a sanity check | |
116 | * to manage expectations | |
117 | */ | |
118 | WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); | |
119 | ||
c9126b2e DZ |
120 | /* |
121 | * some handlers need to be executed first otherwise a fake | |
122 | * event confuses some handlers (kdump uses this flag) | |
123 | */ | |
124 | if (action->flags & NMI_FLAG_FIRST) | |
125 | list_add_rcu(&action->list, &desc->head); | |
126 | else | |
127 | list_add_tail_rcu(&action->list, &desc->head); | |
128 | ||
129 | spin_unlock_irqrestore(&desc->lock, flags); | |
130 | return 0; | |
131 | } | |
132 | ||
133 | static struct nmiaction *__free_nmi(unsigned int type, const char *name) | |
134 | { | |
135 | struct nmi_desc *desc = nmi_to_desc(type); | |
136 | struct nmiaction *n; | |
137 | unsigned long flags; | |
138 | ||
139 | spin_lock_irqsave(&desc->lock, flags); | |
140 | ||
141 | list_for_each_entry_rcu(n, &desc->head, list) { | |
142 | /* | |
143 | * the name passed in to describe the nmi handler | |
144 | * is used as the lookup key | |
145 | */ | |
146 | if (!strcmp(n->name, name)) { | |
147 | WARN(in_nmi(), | |
148 | "Trying to free NMI (%s) from NMI context!\n", n->name); | |
149 | list_del_rcu(&n->list); | |
150 | break; | |
151 | } | |
152 | } | |
153 | ||
154 | spin_unlock_irqrestore(&desc->lock, flags); | |
155 | synchronize_rcu(); | |
156 | return (n); | |
157 | } | |
158 | ||
159 | int register_nmi_handler(unsigned int type, nmi_handler_t handler, | |
160 | unsigned long nmiflags, const char *devname) | |
161 | { | |
162 | struct nmiaction *action; | |
163 | int retval = -ENOMEM; | |
164 | ||
165 | if (!handler) | |
166 | return -EINVAL; | |
167 | ||
168 | action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL); | |
169 | if (!action) | |
170 | goto fail_action; | |
171 | ||
172 | action->handler = handler; | |
173 | action->flags = nmiflags; | |
174 | action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL); | |
175 | if (!action->name) | |
176 | goto fail_action_name; | |
177 | ||
178 | retval = __setup_nmi(type, action); | |
179 | ||
180 | if (retval) | |
181 | goto fail_setup_nmi; | |
182 | ||
183 | return retval; | |
184 | ||
185 | fail_setup_nmi: | |
186 | kfree(action->name); | |
187 | fail_action_name: | |
188 | kfree(action); | |
189 | fail_action: | |
190 | ||
191 | return retval; | |
192 | } | |
193 | EXPORT_SYMBOL_GPL(register_nmi_handler); | |
194 | ||
195 | void unregister_nmi_handler(unsigned int type, const char *name) | |
196 | { | |
197 | struct nmiaction *a; | |
198 | ||
199 | a = __free_nmi(type, name); | |
200 | if (a) { | |
201 | kfree(a->name); | |
202 | kfree(a); | |
203 | } | |
204 | } | |
205 | ||
206 | EXPORT_SYMBOL_GPL(unregister_nmi_handler); | |
207 | ||
1d48922c DZ |
208 | static notrace __kprobes void |
209 | pci_serr_error(unsigned char reason, struct pt_regs *regs) | |
210 | { | |
211 | pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", | |
212 | reason, smp_processor_id()); | |
213 | ||
214 | /* | |
215 | * On some machines, PCI SERR line is used to report memory | |
216 | * errors. EDAC makes use of it. | |
217 | */ | |
218 | #if defined(CONFIG_EDAC) | |
219 | if (edac_handler_set()) { | |
220 | edac_atomic_assert_error(); | |
221 | return; | |
222 | } | |
223 | #endif | |
224 | ||
225 | if (panic_on_unrecovered_nmi) | |
226 | panic("NMI: Not continuing"); | |
227 | ||
228 | pr_emerg("Dazed and confused, but trying to continue\n"); | |
229 | ||
230 | /* Clear and disable the PCI SERR error line. */ | |
231 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; | |
232 | outb(reason, NMI_REASON_PORT); | |
233 | } | |
234 | ||
235 | static notrace __kprobes void | |
236 | io_check_error(unsigned char reason, struct pt_regs *regs) | |
237 | { | |
238 | unsigned long i; | |
239 | ||
240 | pr_emerg( | |
241 | "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", | |
242 | reason, smp_processor_id()); | |
243 | show_registers(regs); | |
244 | ||
245 | if (panic_on_io_nmi) | |
246 | panic("NMI IOCK error: Not continuing"); | |
247 | ||
248 | /* Re-enable the IOCK line, wait for a few seconds */ | |
249 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; | |
250 | outb(reason, NMI_REASON_PORT); | |
251 | ||
252 | i = 20000; | |
253 | while (--i) { | |
254 | touch_nmi_watchdog(); | |
255 | udelay(100); | |
256 | } | |
257 | ||
258 | reason &= ~NMI_REASON_CLEAR_IOCHK; | |
259 | outb(reason, NMI_REASON_PORT); | |
260 | } | |
261 | ||
262 | static notrace __kprobes void | |
263 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) | |
264 | { | |
9c48f1c6 DZ |
265 | int handled; |
266 | ||
b227e233 DZ |
267 | /* |
268 | * Use 'false' as back-to-back NMIs are dealt with one level up. | |
269 | * Of course this makes having multiple 'unknown' handlers useless | |
270 | * as only the first one is ever run (unless it can actually determine | |
271 | * if it caused the NMI) | |
272 | */ | |
273 | handled = nmi_handle(NMI_UNKNOWN, regs, false); | |
efc3aac5 DZ |
274 | if (handled) { |
275 | __this_cpu_add(nmi_stats.unknown, handled); | |
1d48922c | 276 | return; |
efc3aac5 DZ |
277 | } |
278 | ||
279 | __this_cpu_add(nmi_stats.unknown, 1); | |
280 | ||
1d48922c DZ |
281 | #ifdef CONFIG_MCA |
282 | /* | |
283 | * Might actually be able to figure out what the guilty party | |
284 | * is: | |
285 | */ | |
286 | if (MCA_bus) { | |
287 | mca_handle_nmi(); | |
288 | return; | |
289 | } | |
290 | #endif | |
291 | pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | |
292 | reason, smp_processor_id()); | |
293 | ||
294 | pr_emerg("Do you have a strange power saving mode enabled?\n"); | |
295 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) | |
296 | panic("NMI: Not continuing"); | |
297 | ||
298 | pr_emerg("Dazed and confused, but trying to continue\n"); | |
299 | } | |
300 | ||
b227e233 DZ |
301 | static DEFINE_PER_CPU(bool, swallow_nmi); |
302 | static DEFINE_PER_CPU(unsigned long, last_nmi_rip); | |
303 | ||
1d48922c DZ |
304 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) |
305 | { | |
306 | unsigned char reason = 0; | |
9c48f1c6 | 307 | int handled; |
b227e233 | 308 | bool b2b = false; |
1d48922c DZ |
309 | |
310 | /* | |
311 | * CPU-specific NMI must be processed before non-CPU-specific | |
312 | * NMI, otherwise we may lose it, because the CPU-specific | |
313 | * NMI can not be detected/processed on other CPUs. | |
314 | */ | |
b227e233 DZ |
315 | |
316 | /* | |
317 | * Back-to-back NMIs are interesting because they can either | |
318 | * be two NMI or more than two NMIs (any thing over two is dropped | |
319 | * due to NMI being edge-triggered). If this is the second half | |
320 | * of the back-to-back NMI, assume we dropped things and process | |
321 | * more handlers. Otherwise reset the 'swallow' NMI behaviour | |
322 | */ | |
323 | if (regs->ip == __this_cpu_read(last_nmi_rip)) | |
324 | b2b = true; | |
325 | else | |
326 | __this_cpu_write(swallow_nmi, false); | |
327 | ||
328 | __this_cpu_write(last_nmi_rip, regs->ip); | |
329 | ||
330 | handled = nmi_handle(NMI_LOCAL, regs, b2b); | |
efc3aac5 | 331 | __this_cpu_add(nmi_stats.normal, handled); |
b227e233 DZ |
332 | if (handled) { |
333 | /* | |
334 | * There are cases when a NMI handler handles multiple | |
335 | * events in the current NMI. One of these events may | |
336 | * be queued for in the next NMI. Because the event is | |
337 | * already handled, the next NMI will result in an unknown | |
338 | * NMI. Instead lets flag this for a potential NMI to | |
339 | * swallow. | |
340 | */ | |
341 | if (handled > 1) | |
342 | __this_cpu_write(swallow_nmi, true); | |
1d48922c | 343 | return; |
b227e233 | 344 | } |
1d48922c DZ |
345 | |
346 | /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ | |
347 | raw_spin_lock(&nmi_reason_lock); | |
348 | reason = get_nmi_reason(); | |
349 | ||
350 | if (reason & NMI_REASON_MASK) { | |
351 | if (reason & NMI_REASON_SERR) | |
352 | pci_serr_error(reason, regs); | |
353 | else if (reason & NMI_REASON_IOCHK) | |
354 | io_check_error(reason, regs); | |
355 | #ifdef CONFIG_X86_32 | |
356 | /* | |
357 | * Reassert NMI in case it became active | |
358 | * meanwhile as it's edge-triggered: | |
359 | */ | |
360 | reassert_nmi(); | |
361 | #endif | |
efc3aac5 | 362 | __this_cpu_add(nmi_stats.external, 1); |
1d48922c DZ |
363 | raw_spin_unlock(&nmi_reason_lock); |
364 | return; | |
365 | } | |
366 | raw_spin_unlock(&nmi_reason_lock); | |
367 | ||
b227e233 DZ |
368 | /* |
369 | * Only one NMI can be latched at a time. To handle | |
370 | * this we may process multiple nmi handlers at once to | |
371 | * cover the case where an NMI is dropped. The downside | |
372 | * to this approach is we may process an NMI prematurely, | |
373 | * while its real NMI is sitting latched. This will cause | |
374 | * an unknown NMI on the next run of the NMI processing. | |
375 | * | |
376 | * We tried to flag that condition above, by setting the | |
377 | * swallow_nmi flag when we process more than one event. | |
378 | * This condition is also only present on the second half | |
379 | * of a back-to-back NMI, so we flag that condition too. | |
380 | * | |
381 | * If both are true, we assume we already processed this | |
382 | * NMI previously and we swallow it. Otherwise we reset | |
383 | * the logic. | |
384 | * | |
385 | * There are scenarios where we may accidentally swallow | |
386 | * a 'real' unknown NMI. For example, while processing | |
387 | * a perf NMI another perf NMI comes in along with a | |
388 | * 'real' unknown NMI. These two NMIs get combined into | |
389 | * one (as descibed above). When the next NMI gets | |
390 | * processed, it will be flagged by perf as handled, but | |
391 | * noone will know that there was a 'real' unknown NMI sent | |
392 | * also. As a result it gets swallowed. Or if the first | |
393 | * perf NMI returns two events handled then the second | |
394 | * NMI will get eaten by the logic below, again losing a | |
395 | * 'real' unknown NMI. But this is the best we can do | |
396 | * for now. | |
397 | */ | |
398 | if (b2b && __this_cpu_read(swallow_nmi)) | |
efc3aac5 | 399 | __this_cpu_add(nmi_stats.swallow, 1); |
b227e233 DZ |
400 | else |
401 | unknown_nmi_error(reason, regs); | |
1d48922c DZ |
402 | } |
403 | ||
404 | dotraplinkage notrace __kprobes void | |
405 | do_nmi(struct pt_regs *regs, long error_code) | |
406 | { | |
407 | nmi_enter(); | |
408 | ||
409 | inc_irq_stat(__nmi_count); | |
410 | ||
411 | if (!ignore_nmis) | |
412 | default_do_nmi(regs); | |
413 | ||
414 | nmi_exit(); | |
415 | } | |
416 | ||
417 | void stop_nmi(void) | |
418 | { | |
419 | ignore_nmis++; | |
420 | } | |
421 | ||
422 | void restart_nmi(void) | |
423 | { | |
424 | ignore_nmis--; | |
425 | } | |
b227e233 DZ |
426 | |
427 | /* reset the back-to-back NMI logic */ | |
428 | void local_touch_nmi(void) | |
429 | { | |
430 | __this_cpu_write(last_nmi_rip, 0); | |
431 | } |