regmap: Add provisions to have user-defined read operation
[deliverable/linux.git] / kernel / watchdog.c
1 /*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
13
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/lockdep.h>
22 #include <linux/notifier.h>
23 #include <linux/module.h>
24 #include <linux/sysctl.h>
25 #include <linux/smpboot.h>
26
27 #include <asm/irq_regs.h>
28 #include <linux/kvm_para.h>
29 #include <linux/perf_event.h>
30
31 int watchdog_enabled = 1;
32 int __read_mostly watchdog_thresh = 10;
33 static int __read_mostly watchdog_disabled;
34 static u64 __read_mostly sample_period;
35
36 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
37 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
38 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
39 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
40 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
41 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
42 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
43 #ifdef CONFIG_HARDLOCKUP_DETECTOR
44 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
45 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
46 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
47 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
48 #endif
49
50 /* boot commands */
51 /*
52 * Should we panic when a soft-lockup or hard-lockup occurs:
53 */
54 #ifdef CONFIG_HARDLOCKUP_DETECTOR
55 static int hardlockup_panic =
56 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
57
58 static int __init hardlockup_panic_setup(char *str)
59 {
60 if (!strncmp(str, "panic", 5))
61 hardlockup_panic = 1;
62 else if (!strncmp(str, "nopanic", 7))
63 hardlockup_panic = 0;
64 else if (!strncmp(str, "0", 1))
65 watchdog_enabled = 0;
66 return 1;
67 }
68 __setup("nmi_watchdog=", hardlockup_panic_setup);
69 #endif
70
71 unsigned int __read_mostly softlockup_panic =
72 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
73
74 static int __init softlockup_panic_setup(char *str)
75 {
76 softlockup_panic = simple_strtoul(str, NULL, 0);
77
78 return 1;
79 }
80 __setup("softlockup_panic=", softlockup_panic_setup);
81
82 static int __init nowatchdog_setup(char *str)
83 {
84 watchdog_enabled = 0;
85 return 1;
86 }
87 __setup("nowatchdog", nowatchdog_setup);
88
89 /* deprecated */
90 static int __init nosoftlockup_setup(char *str)
91 {
92 watchdog_enabled = 0;
93 return 1;
94 }
95 __setup("nosoftlockup", nosoftlockup_setup);
96 /* */
97
98 /*
99 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
100 * lockups can have false positives under extreme conditions. So we generally
101 * want a higher threshold for soft lockups than for hard lockups. So we couple
102 * the thresholds with a factor: we make the soft threshold twice the amount of
103 * time the hard threshold is.
104 */
105 static int get_softlockup_thresh(void)
106 {
107 return watchdog_thresh * 2;
108 }
109
110 /*
111 * Returns seconds, approximately. We don't need nanosecond
112 * resolution, and we don't need to waste time with a big divide when
113 * 2^30ns == 1.074s.
114 */
115 static unsigned long get_timestamp(int this_cpu)
116 {
117 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
118 }
119
120 static void set_sample_period(void)
121 {
122 /*
123 * convert watchdog_thresh from seconds to ns
124 * the divide by 5 is to give hrtimer several chances (two
125 * or three with the current relation between the soft
126 * and hard thresholds) to increment before the
127 * hardlockup detector generates a warning
128 */
129 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
130 }
131
132 /* Commands for resetting the watchdog */
133 static void __touch_watchdog(void)
134 {
135 int this_cpu = smp_processor_id();
136
137 __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
138 }
139
140 void touch_softlockup_watchdog(void)
141 {
142 __this_cpu_write(watchdog_touch_ts, 0);
143 }
144 EXPORT_SYMBOL(touch_softlockup_watchdog);
145
146 void touch_all_softlockup_watchdogs(void)
147 {
148 int cpu;
149
150 /*
151 * this is done lockless
152 * do we care if a 0 races with a timestamp?
153 * all it means is the softlock check starts one cycle later
154 */
155 for_each_online_cpu(cpu)
156 per_cpu(watchdog_touch_ts, cpu) = 0;
157 }
158
159 #ifdef CONFIG_HARDLOCKUP_DETECTOR
160 void touch_nmi_watchdog(void)
161 {
162 if (watchdog_enabled) {
163 unsigned cpu;
164
165 for_each_present_cpu(cpu) {
166 if (per_cpu(watchdog_nmi_touch, cpu) != true)
167 per_cpu(watchdog_nmi_touch, cpu) = true;
168 }
169 }
170 touch_softlockup_watchdog();
171 }
172 EXPORT_SYMBOL(touch_nmi_watchdog);
173
174 #endif
175
176 void touch_softlockup_watchdog_sync(void)
177 {
178 __raw_get_cpu_var(softlockup_touch_sync) = true;
179 __raw_get_cpu_var(watchdog_touch_ts) = 0;
180 }
181
182 #ifdef CONFIG_HARDLOCKUP_DETECTOR
183 /* watchdog detector functions */
184 static int is_hardlockup(void)
185 {
186 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
187
188 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
189 return 1;
190
191 __this_cpu_write(hrtimer_interrupts_saved, hrint);
192 return 0;
193 }
194 #endif
195
196 static int is_softlockup(unsigned long touch_ts)
197 {
198 unsigned long now = get_timestamp(smp_processor_id());
199
200 /* Warn about unreasonable delays: */
201 if (time_after(now, touch_ts + get_softlockup_thresh()))
202 return now - touch_ts;
203
204 return 0;
205 }
206
207 #ifdef CONFIG_HARDLOCKUP_DETECTOR
208
209 static struct perf_event_attr wd_hw_attr = {
210 .type = PERF_TYPE_HARDWARE,
211 .config = PERF_COUNT_HW_CPU_CYCLES,
212 .size = sizeof(struct perf_event_attr),
213 .pinned = 1,
214 .disabled = 1,
215 };
216
217 /* Callback function for perf event subsystem */
218 static void watchdog_overflow_callback(struct perf_event *event,
219 struct perf_sample_data *data,
220 struct pt_regs *regs)
221 {
222 /* Ensure the watchdog never gets throttled */
223 event->hw.interrupts = 0;
224
225 if (__this_cpu_read(watchdog_nmi_touch) == true) {
226 __this_cpu_write(watchdog_nmi_touch, false);
227 return;
228 }
229
230 /* check for a hardlockup
231 * This is done by making sure our timer interrupt
232 * is incrementing. The timer interrupt should have
233 * fired multiple times before we overflow'd. If it hasn't
234 * then this is a good indication the cpu is stuck
235 */
236 if (is_hardlockup()) {
237 int this_cpu = smp_processor_id();
238
239 /* only print hardlockups once */
240 if (__this_cpu_read(hard_watchdog_warn) == true)
241 return;
242
243 if (hardlockup_panic)
244 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
245 else
246 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
247
248 __this_cpu_write(hard_watchdog_warn, true);
249 return;
250 }
251
252 __this_cpu_write(hard_watchdog_warn, false);
253 return;
254 }
255 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
256
257 static void watchdog_interrupt_count(void)
258 {
259 __this_cpu_inc(hrtimer_interrupts);
260 }
261
262 static int watchdog_nmi_enable(unsigned int cpu);
263 static void watchdog_nmi_disable(unsigned int cpu);
264
265 /* watchdog kicker functions */
266 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
267 {
268 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
269 struct pt_regs *regs = get_irq_regs();
270 int duration;
271
272 /* kick the hardlockup detector */
273 watchdog_interrupt_count();
274
275 /* kick the softlockup detector */
276 wake_up_process(__this_cpu_read(softlockup_watchdog));
277
278 /* .. and repeat */
279 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
280
281 if (touch_ts == 0) {
282 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
283 /*
284 * If the time stamp was touched atomically
285 * make sure the scheduler tick is up to date.
286 */
287 __this_cpu_write(softlockup_touch_sync, false);
288 sched_clock_tick();
289 }
290
291 /* Clear the guest paused flag on watchdog reset */
292 kvm_check_and_clear_guest_paused();
293 __touch_watchdog();
294 return HRTIMER_RESTART;
295 }
296
297 /* check for a softlockup
298 * This is done by making sure a high priority task is
299 * being scheduled. The task touches the watchdog to
300 * indicate it is getting cpu time. If it hasn't then
301 * this is a good indication some task is hogging the cpu
302 */
303 duration = is_softlockup(touch_ts);
304 if (unlikely(duration)) {
305 /*
306 * If a virtual machine is stopped by the host it can look to
307 * the watchdog like a soft lockup, check to see if the host
308 * stopped the vm before we issue the warning
309 */
310 if (kvm_check_and_clear_guest_paused())
311 return HRTIMER_RESTART;
312
313 /* only warn once */
314 if (__this_cpu_read(soft_watchdog_warn) == true)
315 return HRTIMER_RESTART;
316
317 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
318 smp_processor_id(), duration,
319 current->comm, task_pid_nr(current));
320 print_modules();
321 print_irqtrace_events(current);
322 if (regs)
323 show_regs(regs);
324 else
325 dump_stack();
326
327 if (softlockup_panic)
328 panic("softlockup: hung tasks");
329 __this_cpu_write(soft_watchdog_warn, true);
330 } else
331 __this_cpu_write(soft_watchdog_warn, false);
332
333 return HRTIMER_RESTART;
334 }
335
336 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
337 {
338 struct sched_param param = { .sched_priority = prio };
339
340 sched_setscheduler(current, policy, &param);
341 }
342
343 static void watchdog_enable(unsigned int cpu)
344 {
345 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
346
347 /* kick off the timer for the hardlockup detector */
348 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
349 hrtimer->function = watchdog_timer_fn;
350
351 if (!watchdog_enabled) {
352 kthread_park(current);
353 return;
354 }
355
356 /* Enable the perf event */
357 watchdog_nmi_enable(cpu);
358
359 /* done here because hrtimer_start can only pin to smp_processor_id() */
360 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
361 HRTIMER_MODE_REL_PINNED);
362
363 /* initialize timestamp */
364 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
365 __touch_watchdog();
366 }
367
368 static void watchdog_disable(unsigned int cpu)
369 {
370 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
371
372 watchdog_set_prio(SCHED_NORMAL, 0);
373 hrtimer_cancel(hrtimer);
374 /* disable the perf event */
375 watchdog_nmi_disable(cpu);
376 }
377
378 static int watchdog_should_run(unsigned int cpu)
379 {
380 return __this_cpu_read(hrtimer_interrupts) !=
381 __this_cpu_read(soft_lockup_hrtimer_cnt);
382 }
383
384 /*
385 * The watchdog thread function - touches the timestamp.
386 *
387 * It only runs once every sample_period seconds (4 seconds by
388 * default) to reset the softlockup timestamp. If this gets delayed
389 * for more than 2*watchdog_thresh seconds then the debug-printout
390 * triggers in watchdog_timer_fn().
391 */
392 static void watchdog(unsigned int cpu)
393 {
394 __this_cpu_write(soft_lockup_hrtimer_cnt,
395 __this_cpu_read(hrtimer_interrupts));
396 __touch_watchdog();
397 }
398
399 #ifdef CONFIG_HARDLOCKUP_DETECTOR
400 /*
401 * People like the simple clean cpu node info on boot.
402 * Reduce the watchdog noise by only printing messages
403 * that are different from what cpu0 displayed.
404 */
405 static unsigned long cpu0_err;
406
407 static int watchdog_nmi_enable(unsigned int cpu)
408 {
409 struct perf_event_attr *wd_attr;
410 struct perf_event *event = per_cpu(watchdog_ev, cpu);
411
412 /* is it already setup and enabled? */
413 if (event && event->state > PERF_EVENT_STATE_OFF)
414 goto out;
415
416 /* it is setup but not enabled */
417 if (event != NULL)
418 goto out_enable;
419
420 wd_attr = &wd_hw_attr;
421 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
422
423 /* Try to register using hardware perf events */
424 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
425
426 /* save cpu0 error for future comparision */
427 if (cpu == 0 && IS_ERR(event))
428 cpu0_err = PTR_ERR(event);
429
430 if (!IS_ERR(event)) {
431 /* only print for cpu0 or different than cpu0 */
432 if (cpu == 0 || cpu0_err)
433 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
434 goto out_save;
435 }
436
437 /* skip displaying the same error again */
438 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
439 return PTR_ERR(event);
440
441 /* vary the KERN level based on the returned errno */
442 if (PTR_ERR(event) == -EOPNOTSUPP)
443 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
444 else if (PTR_ERR(event) == -ENOENT)
445 pr_warning("disabled (cpu%i): hardware events not enabled\n",
446 cpu);
447 else
448 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
449 cpu, PTR_ERR(event));
450 return PTR_ERR(event);
451
452 /* success path */
453 out_save:
454 per_cpu(watchdog_ev, cpu) = event;
455 out_enable:
456 perf_event_enable(per_cpu(watchdog_ev, cpu));
457 out:
458 return 0;
459 }
460
461 static void watchdog_nmi_disable(unsigned int cpu)
462 {
463 struct perf_event *event = per_cpu(watchdog_ev, cpu);
464
465 if (event) {
466 perf_event_disable(event);
467 per_cpu(watchdog_ev, cpu) = NULL;
468
469 /* should be in cleanup, but blocks oprofile */
470 perf_event_release_kernel(event);
471 }
472 return;
473 }
474 #else
475 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
476 static void watchdog_nmi_disable(unsigned int cpu) { return; }
477 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
478
479 /* prepare/enable/disable routines */
480 /* sysctl functions */
481 #ifdef CONFIG_SYSCTL
482 static void watchdog_enable_all_cpus(void)
483 {
484 unsigned int cpu;
485
486 if (watchdog_disabled) {
487 watchdog_disabled = 0;
488 for_each_online_cpu(cpu)
489 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
490 }
491 }
492
493 static void watchdog_disable_all_cpus(void)
494 {
495 unsigned int cpu;
496
497 if (!watchdog_disabled) {
498 watchdog_disabled = 1;
499 for_each_online_cpu(cpu)
500 kthread_park(per_cpu(softlockup_watchdog, cpu));
501 }
502 }
503
504 /*
505 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
506 */
507
508 int proc_dowatchdog(struct ctl_table *table, int write,
509 void __user *buffer, size_t *lenp, loff_t *ppos)
510 {
511 int ret;
512
513 if (watchdog_disabled < 0)
514 return -ENODEV;
515
516 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
517 if (ret || !write)
518 return ret;
519
520 set_sample_period();
521 if (watchdog_enabled && watchdog_thresh)
522 watchdog_enable_all_cpus();
523 else
524 watchdog_disable_all_cpus();
525
526 return ret;
527 }
528 #endif /* CONFIG_SYSCTL */
529
530 static struct smp_hotplug_thread watchdog_threads = {
531 .store = &softlockup_watchdog,
532 .thread_should_run = watchdog_should_run,
533 .thread_fn = watchdog,
534 .thread_comm = "watchdog/%u",
535 .setup = watchdog_enable,
536 .park = watchdog_disable,
537 .unpark = watchdog_enable,
538 };
539
540 void __init lockup_detector_init(void)
541 {
542 set_sample_period();
543 if (smpboot_register_percpu_thread(&watchdog_threads)) {
544 pr_err("Failed to create watchdog threads, disabled\n");
545 watchdog_disabled = -ENODEV;
546 }
547 }
This page took 0.047951 seconds and 5 git commands to generate.