Commit | Line | Data |
---|---|---|
1d48922c DZ |
1 | /* |
2 | * Copyright (C) 1991, 1992 Linus Torvalds | |
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | |
9c48f1c6 | 4 | * Copyright (C) 2011 Don Zickus Red Hat, Inc. |
1d48922c DZ |
5 | * |
6 | * Pentium III FXSR, SSE support | |
7 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
8 | */ | |
9 | ||
10 | /* | |
11 | * Handle hardware traps and faults. | |
12 | */ | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/kprobes.h> | |
15 | #include <linux/kdebug.h> | |
16 | #include <linux/nmi.h> | |
c9126b2e DZ |
17 | #include <linux/delay.h> |
18 | #include <linux/hardirq.h> | |
19 | #include <linux/slab.h> | |
1d48922c | 20 | |
d48b0e17 IM |
21 | #include <linux/mca.h> |
22 | ||
1d48922c DZ |
23 | #if defined(CONFIG_EDAC) |
24 | #include <linux/edac.h> | |
25 | #endif | |
26 | ||
27 | #include <linux/atomic.h> | |
28 | #include <asm/traps.h> | |
29 | #include <asm/mach_traps.h> | |
c9126b2e DZ |
30 | #include <asm/nmi.h> |
31 | ||
32 | #define NMI_MAX_NAMELEN 16 | |
33 | struct nmiaction { | |
34 | struct list_head list; | |
35 | nmi_handler_t handler; | |
36 | unsigned int flags; | |
37 | char *name; | |
38 | }; | |
39 | ||
40 | struct nmi_desc { | |
41 | spinlock_t lock; | |
42 | struct list_head head; | |
43 | }; | |
44 | ||
45 | static struct nmi_desc nmi_desc[NMI_MAX] = | |
46 | { | |
47 | { | |
48 | .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), | |
49 | .head = LIST_HEAD_INIT(nmi_desc[0].head), | |
50 | }, | |
51 | { | |
52 | .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), | |
53 | .head = LIST_HEAD_INIT(nmi_desc[1].head), | |
54 | }, | |
55 | ||
56 | }; | |
1d48922c | 57 | |
efc3aac5 DZ |
58 | struct nmi_stats { |
59 | unsigned int normal; | |
60 | unsigned int unknown; | |
61 | unsigned int external; | |
62 | unsigned int swallow; | |
63 | }; | |
64 | ||
65 | static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); | |
66 | ||
1d48922c DZ |
67 | static int ignore_nmis; |
68 | ||
69 | int unknown_nmi_panic; | |
70 | /* | |
71 | * Prevent NMI reason port (0x61) being accessed simultaneously, can | |
72 | * only be used in NMI handler. | |
73 | */ | |
74 | static DEFINE_RAW_SPINLOCK(nmi_reason_lock); | |
75 | ||
76 | static int __init setup_unknown_nmi_panic(char *str) | |
77 | { | |
78 | unknown_nmi_panic = 1; | |
79 | return 1; | |
80 | } | |
81 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | |
82 | ||
c9126b2e DZ |
83 | #define nmi_to_desc(type) (&nmi_desc[type]) |
84 | ||
b227e233 | 85 | static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) |
c9126b2e DZ |
86 | { |
87 | struct nmi_desc *desc = nmi_to_desc(type); | |
88 | struct nmiaction *a; | |
89 | int handled=0; | |
90 | ||
91 | rcu_read_lock(); | |
92 | ||
93 | /* | |
94 | * NMIs are edge-triggered, which means if you have enough | |
95 | * of them concurrently, you can lose some because only one | |
96 | * can be latched at any given time. Walk the whole list | |
97 | * to handle those situations. | |
98 | */ | |
b227e233 | 99 | list_for_each_entry_rcu(a, &desc->head, list) |
c9126b2e DZ |
100 | handled += a->handler(type, regs); |
101 | ||
c9126b2e DZ |
102 | rcu_read_unlock(); |
103 | ||
104 | /* return total number of NMI events handled */ | |
105 | return handled; | |
106 | } | |
107 | ||
108 | static int __setup_nmi(unsigned int type, struct nmiaction *action) | |
109 | { | |
110 | struct nmi_desc *desc = nmi_to_desc(type); | |
111 | unsigned long flags; | |
112 | ||
113 | spin_lock_irqsave(&desc->lock, flags); | |
114 | ||
b227e233 DZ |
115 | /* |
116 | * most handlers of type NMI_UNKNOWN never return because | |
117 | * they just assume the NMI is theirs. Just a sanity check | |
118 | * to manage expectations | |
119 | */ | |
120 | WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); | |
121 | ||
c9126b2e DZ |
122 | /* |
123 | * some handlers need to be executed first otherwise a fake | |
124 | * event confuses some handlers (kdump uses this flag) | |
125 | */ | |
126 | if (action->flags & NMI_FLAG_FIRST) | |
127 | list_add_rcu(&action->list, &desc->head); | |
128 | else | |
129 | list_add_tail_rcu(&action->list, &desc->head); | |
130 | ||
131 | spin_unlock_irqrestore(&desc->lock, flags); | |
132 | return 0; | |
133 | } | |
134 | ||
135 | static struct nmiaction *__free_nmi(unsigned int type, const char *name) | |
136 | { | |
137 | struct nmi_desc *desc = nmi_to_desc(type); | |
138 | struct nmiaction *n; | |
139 | unsigned long flags; | |
140 | ||
141 | spin_lock_irqsave(&desc->lock, flags); | |
142 | ||
143 | list_for_each_entry_rcu(n, &desc->head, list) { | |
144 | /* | |
145 | * the name passed in to describe the nmi handler | |
146 | * is used as the lookup key | |
147 | */ | |
148 | if (!strcmp(n->name, name)) { | |
149 | WARN(in_nmi(), | |
150 | "Trying to free NMI (%s) from NMI context!\n", n->name); | |
151 | list_del_rcu(&n->list); | |
152 | break; | |
153 | } | |
154 | } | |
155 | ||
156 | spin_unlock_irqrestore(&desc->lock, flags); | |
157 | synchronize_rcu(); | |
158 | return (n); | |
159 | } | |
160 | ||
161 | int register_nmi_handler(unsigned int type, nmi_handler_t handler, | |
162 | unsigned long nmiflags, const char *devname) | |
163 | { | |
164 | struct nmiaction *action; | |
165 | int retval = -ENOMEM; | |
166 | ||
167 | if (!handler) | |
168 | return -EINVAL; | |
169 | ||
170 | action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL); | |
171 | if (!action) | |
172 | goto fail_action; | |
173 | ||
174 | action->handler = handler; | |
175 | action->flags = nmiflags; | |
176 | action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL); | |
177 | if (!action->name) | |
178 | goto fail_action_name; | |
179 | ||
180 | retval = __setup_nmi(type, action); | |
181 | ||
182 | if (retval) | |
183 | goto fail_setup_nmi; | |
184 | ||
185 | return retval; | |
186 | ||
187 | fail_setup_nmi: | |
188 | kfree(action->name); | |
189 | fail_action_name: | |
190 | kfree(action); | |
191 | fail_action: | |
192 | ||
193 | return retval; | |
194 | } | |
195 | EXPORT_SYMBOL_GPL(register_nmi_handler); | |
196 | ||
197 | void unregister_nmi_handler(unsigned int type, const char *name) | |
198 | { | |
199 | struct nmiaction *a; | |
200 | ||
201 | a = __free_nmi(type, name); | |
202 | if (a) { | |
203 | kfree(a->name); | |
204 | kfree(a); | |
205 | } | |
206 | } | |
207 | ||
208 | EXPORT_SYMBOL_GPL(unregister_nmi_handler); | |
209 | ||
1d48922c DZ |
210 | static notrace __kprobes void |
211 | pci_serr_error(unsigned char reason, struct pt_regs *regs) | |
212 | { | |
213 | pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", | |
214 | reason, smp_processor_id()); | |
215 | ||
216 | /* | |
217 | * On some machines, PCI SERR line is used to report memory | |
218 | * errors. EDAC makes use of it. | |
219 | */ | |
220 | #if defined(CONFIG_EDAC) | |
221 | if (edac_handler_set()) { | |
222 | edac_atomic_assert_error(); | |
223 | return; | |
224 | } | |
225 | #endif | |
226 | ||
227 | if (panic_on_unrecovered_nmi) | |
228 | panic("NMI: Not continuing"); | |
229 | ||
230 | pr_emerg("Dazed and confused, but trying to continue\n"); | |
231 | ||
232 | /* Clear and disable the PCI SERR error line. */ | |
233 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; | |
234 | outb(reason, NMI_REASON_PORT); | |
235 | } | |
236 | ||
237 | static notrace __kprobes void | |
238 | io_check_error(unsigned char reason, struct pt_regs *regs) | |
239 | { | |
240 | unsigned long i; | |
241 | ||
242 | pr_emerg( | |
243 | "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", | |
244 | reason, smp_processor_id()); | |
245 | show_registers(regs); | |
246 | ||
247 | if (panic_on_io_nmi) | |
248 | panic("NMI IOCK error: Not continuing"); | |
249 | ||
250 | /* Re-enable the IOCK line, wait for a few seconds */ | |
251 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; | |
252 | outb(reason, NMI_REASON_PORT); | |
253 | ||
254 | i = 20000; | |
255 | while (--i) { | |
256 | touch_nmi_watchdog(); | |
257 | udelay(100); | |
258 | } | |
259 | ||
260 | reason &= ~NMI_REASON_CLEAR_IOCHK; | |
261 | outb(reason, NMI_REASON_PORT); | |
262 | } | |
263 | ||
264 | static notrace __kprobes void | |
265 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) | |
266 | { | |
9c48f1c6 DZ |
267 | int handled; |
268 | ||
b227e233 DZ |
269 | /* |
270 | * Use 'false' as back-to-back NMIs are dealt with one level up. | |
271 | * Of course this makes having multiple 'unknown' handlers useless | |
272 | * as only the first one is ever run (unless it can actually determine | |
273 | * if it caused the NMI) | |
274 | */ | |
275 | handled = nmi_handle(NMI_UNKNOWN, regs, false); | |
efc3aac5 DZ |
276 | if (handled) { |
277 | __this_cpu_add(nmi_stats.unknown, handled); | |
1d48922c | 278 | return; |
efc3aac5 DZ |
279 | } |
280 | ||
281 | __this_cpu_add(nmi_stats.unknown, 1); | |
282 | ||
1d48922c DZ |
283 | #ifdef CONFIG_MCA |
284 | /* | |
285 | * Might actually be able to figure out what the guilty party | |
286 | * is: | |
287 | */ | |
288 | if (MCA_bus) { | |
289 | mca_handle_nmi(); | |
290 | return; | |
291 | } | |
292 | #endif | |
293 | pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | |
294 | reason, smp_processor_id()); | |
295 | ||
296 | pr_emerg("Do you have a strange power saving mode enabled?\n"); | |
297 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) | |
298 | panic("NMI: Not continuing"); | |
299 | ||
300 | pr_emerg("Dazed and confused, but trying to continue\n"); | |
301 | } | |
302 | ||
b227e233 DZ |
303 | static DEFINE_PER_CPU(bool, swallow_nmi); |
304 | static DEFINE_PER_CPU(unsigned long, last_nmi_rip); | |
305 | ||
1d48922c DZ |
306 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) |
307 | { | |
308 | unsigned char reason = 0; | |
9c48f1c6 | 309 | int handled; |
b227e233 | 310 | bool b2b = false; |
1d48922c DZ |
311 | |
312 | /* | |
313 | * CPU-specific NMI must be processed before non-CPU-specific | |
314 | * NMI, otherwise we may lose it, because the CPU-specific | |
315 | * NMI can not be detected/processed on other CPUs. | |
316 | */ | |
b227e233 DZ |
317 | |
318 | /* | |
319 | * Back-to-back NMIs are interesting because they can either | |
320 | * be two NMI or more than two NMIs (any thing over two is dropped | |
321 | * due to NMI being edge-triggered). If this is the second half | |
322 | * of the back-to-back NMI, assume we dropped things and process | |
323 | * more handlers. Otherwise reset the 'swallow' NMI behaviour | |
324 | */ | |
325 | if (regs->ip == __this_cpu_read(last_nmi_rip)) | |
326 | b2b = true; | |
327 | else | |
328 | __this_cpu_write(swallow_nmi, false); | |
329 | ||
330 | __this_cpu_write(last_nmi_rip, regs->ip); | |
331 | ||
332 | handled = nmi_handle(NMI_LOCAL, regs, b2b); | |
efc3aac5 | 333 | __this_cpu_add(nmi_stats.normal, handled); |
b227e233 DZ |
334 | if (handled) { |
335 | /* | |
336 | * There are cases when a NMI handler handles multiple | |
337 | * events in the current NMI. One of these events may | |
338 | * be queued for in the next NMI. Because the event is | |
339 | * already handled, the next NMI will result in an unknown | |
340 | * NMI. Instead lets flag this for a potential NMI to | |
341 | * swallow. | |
342 | */ | |
343 | if (handled > 1) | |
344 | __this_cpu_write(swallow_nmi, true); | |
1d48922c | 345 | return; |
b227e233 | 346 | } |
1d48922c DZ |
347 | |
348 | /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ | |
349 | raw_spin_lock(&nmi_reason_lock); | |
350 | reason = get_nmi_reason(); | |
351 | ||
352 | if (reason & NMI_REASON_MASK) { | |
353 | if (reason & NMI_REASON_SERR) | |
354 | pci_serr_error(reason, regs); | |
355 | else if (reason & NMI_REASON_IOCHK) | |
356 | io_check_error(reason, regs); | |
357 | #ifdef CONFIG_X86_32 | |
358 | /* | |
359 | * Reassert NMI in case it became active | |
360 | * meanwhile as it's edge-triggered: | |
361 | */ | |
362 | reassert_nmi(); | |
363 | #endif | |
efc3aac5 | 364 | __this_cpu_add(nmi_stats.external, 1); |
1d48922c DZ |
365 | raw_spin_unlock(&nmi_reason_lock); |
366 | return; | |
367 | } | |
368 | raw_spin_unlock(&nmi_reason_lock); | |
369 | ||
b227e233 DZ |
370 | /* |
371 | * Only one NMI can be latched at a time. To handle | |
372 | * this we may process multiple nmi handlers at once to | |
373 | * cover the case where an NMI is dropped. The downside | |
374 | * to this approach is we may process an NMI prematurely, | |
375 | * while its real NMI is sitting latched. This will cause | |
376 | * an unknown NMI on the next run of the NMI processing. | |
377 | * | |
378 | * We tried to flag that condition above, by setting the | |
379 | * swallow_nmi flag when we process more than one event. | |
380 | * This condition is also only present on the second half | |
381 | * of a back-to-back NMI, so we flag that condition too. | |
382 | * | |
383 | * If both are true, we assume we already processed this | |
384 | * NMI previously and we swallow it. Otherwise we reset | |
385 | * the logic. | |
386 | * | |
387 | * There are scenarios where we may accidentally swallow | |
388 | * a 'real' unknown NMI. For example, while processing | |
389 | * a perf NMI another perf NMI comes in along with a | |
390 | * 'real' unknown NMI. These two NMIs get combined into | |
391 | * one (as descibed above). When the next NMI gets | |
392 | * processed, it will be flagged by perf as handled, but | |
393 | * noone will know that there was a 'real' unknown NMI sent | |
394 | * also. As a result it gets swallowed. Or if the first | |
395 | * perf NMI returns two events handled then the second | |
396 | * NMI will get eaten by the logic below, again losing a | |
397 | * 'real' unknown NMI. But this is the best we can do | |
398 | * for now. | |
399 | */ | |
400 | if (b2b && __this_cpu_read(swallow_nmi)) | |
efc3aac5 | 401 | __this_cpu_add(nmi_stats.swallow, 1); |
b227e233 DZ |
402 | else |
403 | unknown_nmi_error(reason, regs); | |
1d48922c DZ |
404 | } |
405 | ||
406 | dotraplinkage notrace __kprobes void | |
407 | do_nmi(struct pt_regs *regs, long error_code) | |
408 | { | |
409 | nmi_enter(); | |
410 | ||
411 | inc_irq_stat(__nmi_count); | |
412 | ||
413 | if (!ignore_nmis) | |
414 | default_do_nmi(regs); | |
415 | ||
416 | nmi_exit(); | |
417 | } | |
418 | ||
419 | void stop_nmi(void) | |
420 | { | |
421 | ignore_nmis++; | |
422 | } | |
423 | ||
424 | void restart_nmi(void) | |
425 | { | |
426 | ignore_nmis--; | |
427 | } | |
b227e233 DZ |
428 | |
429 | /* reset the back-to-back NMI logic */ | |
430 | void local_touch_nmi(void) | |
431 | { | |
432 | __this_cpu_write(last_nmi_rip, 0); | |
433 | } |