kernel/watchdog.c:touch_softlockup_watchdog(): use raw_cpu_write()
[deliverable/linux.git] / kernel / watchdog.c
CommitLineData
58687acb
DZ
1/*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
86f5e6a7
FLVC
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
58687acb
DZ
9 * to those contributors as well.
10 */
11
4501980a
AM
12#define pr_fmt(fmt) "NMI watchdog: " fmt
13
58687acb
DZ
14#include <linux/mm.h>
15#include <linux/cpu.h>
16#include <linux/nmi.h>
17#include <linux/init.h>
18#include <linux/delay.h>
19#include <linux/freezer.h>
20#include <linux/kthread.h>
21#include <linux/lockdep.h>
22#include <linux/notifier.h>
23#include <linux/module.h>
24#include <linux/sysctl.h>
bcd951cf 25#include <linux/smpboot.h>
8bd75c77 26#include <linux/sched/rt.h>
58687acb
DZ
27
28#include <asm/irq_regs.h>
5d1c0f4a 29#include <linux/kvm_para.h>
58687acb
DZ
30#include <linux/perf_event.h>
31
3c00ea82 32int watchdog_user_enabled = 1;
4eec42f3 33int __read_mostly watchdog_thresh = 10;
3c00ea82 34static int __read_mostly watchdog_running;
0f34c400 35static u64 __read_mostly sample_period;
58687acb
DZ
36
37static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
38static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
39static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
40static DEFINE_PER_CPU(bool, softlockup_touch_sync);
58687acb 41static DEFINE_PER_CPU(bool, soft_watchdog_warn);
bcd951cf
TG
42static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
43static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
23637d47 44#ifdef CONFIG_HARDLOCKUP_DETECTOR
cafcd80d
DZ
45static DEFINE_PER_CPU(bool, hard_watchdog_warn);
46static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
58687acb
DZ
47static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
48static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
49#endif
50
58687acb
DZ
51/* boot commands */
52/*
53 * Should we panic when a soft-lockup or hard-lockup occurs:
54 */
23637d47 55#ifdef CONFIG_HARDLOCKUP_DETECTOR
fef2c9bc
DZ
56static int hardlockup_panic =
57 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
58687acb
DZ
58
59static int __init hardlockup_panic_setup(char *str)
60{
61 if (!strncmp(str, "panic", 5))
62 hardlockup_panic = 1;
fef2c9bc
DZ
63 else if (!strncmp(str, "nopanic", 7))
64 hardlockup_panic = 0;
5dc30558 65 else if (!strncmp(str, "0", 1))
3c00ea82 66 watchdog_user_enabled = 0;
58687acb
DZ
67 return 1;
68}
69__setup("nmi_watchdog=", hardlockup_panic_setup);
70#endif
71
72unsigned int __read_mostly softlockup_panic =
73 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
74
75static int __init softlockup_panic_setup(char *str)
76{
77 softlockup_panic = simple_strtoul(str, NULL, 0);
78
79 return 1;
80}
81__setup("softlockup_panic=", softlockup_panic_setup);
82
83static int __init nowatchdog_setup(char *str)
84{
3c00ea82 85 watchdog_user_enabled = 0;
58687acb
DZ
86 return 1;
87}
88__setup("nowatchdog", nowatchdog_setup);
89
90/* deprecated */
91static int __init nosoftlockup_setup(char *str)
92{
3c00ea82 93 watchdog_user_enabled = 0;
58687acb
DZ
94 return 1;
95}
96__setup("nosoftlockup", nosoftlockup_setup);
97/* */
98
4eec42f3
MSB
99/*
100 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
101 * lockups can have false positives under extreme conditions. So we generally
102 * want a higher threshold for soft lockups than for hard lockups. So we couple
103 * the thresholds with a factor: we make the soft threshold twice the amount of
104 * time the hard threshold is.
105 */
6e9101ae 106static int get_softlockup_thresh(void)
4eec42f3
MSB
107{
108 return watchdog_thresh * 2;
109}
58687acb
DZ
110
111/*
112 * Returns seconds, approximately. We don't need nanosecond
113 * resolution, and we don't need to waste time with a big divide when
114 * 2^30ns == 1.074s.
115 */
c06b4f19 116static unsigned long get_timestamp(void)
58687acb 117{
c06b4f19 118 return local_clock() >> 30LL; /* 2^30 ~= 10^9 */
58687acb
DZ
119}
120
0f34c400 121static void set_sample_period(void)
58687acb
DZ
122{
123 /*
586692a5 124 * convert watchdog_thresh from seconds to ns
86f5e6a7
FLVC
125 * the divide by 5 is to give hrtimer several chances (two
126 * or three with the current relation between the soft
127 * and hard thresholds) to increment before the
128 * hardlockup detector generates a warning
58687acb 129 */
0f34c400 130 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
58687acb
DZ
131}
132
133/* Commands for resetting the watchdog */
134static void __touch_watchdog(void)
135{
c06b4f19 136 __this_cpu_write(watchdog_touch_ts, get_timestamp());
58687acb
DZ
137}
138
332fbdbc 139void touch_softlockup_watchdog(void)
58687acb 140{
7861144b
AM
141 /*
142 * Preemption can be enabled. It doesn't matter which CPU's timestamp
143 * gets zeroed here, so use the raw_ operation.
144 */
145 raw_cpu_write(watchdog_touch_ts, 0);
58687acb 146}
0167c781 147EXPORT_SYMBOL(touch_softlockup_watchdog);
58687acb 148
332fbdbc 149void touch_all_softlockup_watchdogs(void)
58687acb
DZ
150{
151 int cpu;
152
153 /*
154 * this is done lockless
155 * do we care if a 0 races with a timestamp?
156 * all it means is the softlock check starts one cycle later
157 */
158 for_each_online_cpu(cpu)
159 per_cpu(watchdog_touch_ts, cpu) = 0;
160}
161
cafcd80d 162#ifdef CONFIG_HARDLOCKUP_DETECTOR
58687acb
DZ
163void touch_nmi_watchdog(void)
164{
62572e29
BZ
165 /*
166 * Using __raw here because some code paths have
167 * preemption enabled. If preemption is enabled
168 * then interrupts should be enabled too, in which
169 * case we shouldn't have to worry about the watchdog
170 * going off.
171 */
172 __raw_get_cpu_var(watchdog_nmi_touch) = true;
332fbdbc 173 touch_softlockup_watchdog();
58687acb
DZ
174}
175EXPORT_SYMBOL(touch_nmi_watchdog);
176
cafcd80d
DZ
177#endif
178
58687acb
DZ
179void touch_softlockup_watchdog_sync(void)
180{
181 __raw_get_cpu_var(softlockup_touch_sync) = true;
182 __raw_get_cpu_var(watchdog_touch_ts) = 0;
183}
184
23637d47 185#ifdef CONFIG_HARDLOCKUP_DETECTOR
58687acb 186/* watchdog detector functions */
26e09c6e 187static int is_hardlockup(void)
58687acb 188{
909ea964 189 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
58687acb 190
909ea964 191 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
58687acb
DZ
192 return 1;
193
909ea964 194 __this_cpu_write(hrtimer_interrupts_saved, hrint);
58687acb
DZ
195 return 0;
196}
197#endif
198
26e09c6e 199static int is_softlockup(unsigned long touch_ts)
58687acb 200{
c06b4f19 201 unsigned long now = get_timestamp();
58687acb
DZ
202
203 /* Warn about unreasonable delays: */
4eec42f3 204 if (time_after(now, touch_ts + get_softlockup_thresh()))
58687acb
DZ
205 return now - touch_ts;
206
207 return 0;
208}
209
23637d47 210#ifdef CONFIG_HARDLOCKUP_DETECTOR
1880c4ae 211
58687acb
DZ
212static struct perf_event_attr wd_hw_attr = {
213 .type = PERF_TYPE_HARDWARE,
214 .config = PERF_COUNT_HW_CPU_CYCLES,
215 .size = sizeof(struct perf_event_attr),
216 .pinned = 1,
217 .disabled = 1,
218};
219
220/* Callback function for perf event subsystem */
a8b0ca17 221static void watchdog_overflow_callback(struct perf_event *event,
58687acb
DZ
222 struct perf_sample_data *data,
223 struct pt_regs *regs)
224{
c6db67cd
PZ
225 /* Ensure the watchdog never gets throttled */
226 event->hw.interrupts = 0;
227
909ea964
CL
228 if (__this_cpu_read(watchdog_nmi_touch) == true) {
229 __this_cpu_write(watchdog_nmi_touch, false);
58687acb
DZ
230 return;
231 }
232
233 /* check for a hardlockup
234 * This is done by making sure our timer interrupt
235 * is incrementing. The timer interrupt should have
236 * fired multiple times before we overflow'd. If it hasn't
237 * then this is a good indication the cpu is stuck
238 */
26e09c6e
DZ
239 if (is_hardlockup()) {
240 int this_cpu = smp_processor_id();
241
58687acb 242 /* only print hardlockups once */
909ea964 243 if (__this_cpu_read(hard_watchdog_warn) == true)
58687acb
DZ
244 return;
245
246 if (hardlockup_panic)
247 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
248 else
249 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
250
909ea964 251 __this_cpu_write(hard_watchdog_warn, true);
58687acb
DZ
252 return;
253 }
254
909ea964 255 __this_cpu_write(hard_watchdog_warn, false);
58687acb
DZ
256 return;
257}
bcd951cf
TG
258#endif /* CONFIG_HARDLOCKUP_DETECTOR */
259
58687acb
DZ
260static void watchdog_interrupt_count(void)
261{
909ea964 262 __this_cpu_inc(hrtimer_interrupts);
58687acb 263}
bcd951cf
TG
264
265static int watchdog_nmi_enable(unsigned int cpu);
266static void watchdog_nmi_disable(unsigned int cpu);
58687acb
DZ
267
268/* watchdog kicker functions */
269static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
270{
909ea964 271 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
58687acb
DZ
272 struct pt_regs *regs = get_irq_regs();
273 int duration;
274
275 /* kick the hardlockup detector */
276 watchdog_interrupt_count();
277
278 /* kick the softlockup detector */
909ea964 279 wake_up_process(__this_cpu_read(softlockup_watchdog));
58687acb
DZ
280
281 /* .. and repeat */
0f34c400 282 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
58687acb
DZ
283
284 if (touch_ts == 0) {
909ea964 285 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
58687acb
DZ
286 /*
287 * If the time stamp was touched atomically
288 * make sure the scheduler tick is up to date.
289 */
909ea964 290 __this_cpu_write(softlockup_touch_sync, false);
58687acb
DZ
291 sched_clock_tick();
292 }
5d1c0f4a
EM
293
294 /* Clear the guest paused flag on watchdog reset */
295 kvm_check_and_clear_guest_paused();
58687acb
DZ
296 __touch_watchdog();
297 return HRTIMER_RESTART;
298 }
299
300 /* check for a softlockup
301 * This is done by making sure a high priority task is
302 * being scheduled. The task touches the watchdog to
303 * indicate it is getting cpu time. If it hasn't then
304 * this is a good indication some task is hogging the cpu
305 */
26e09c6e 306 duration = is_softlockup(touch_ts);
58687acb 307 if (unlikely(duration)) {
5d1c0f4a
EM
308 /*
309 * If a virtual machine is stopped by the host it can look to
310 * the watchdog like a soft lockup, check to see if the host
311 * stopped the vm before we issue the warning
312 */
313 if (kvm_check_and_clear_guest_paused())
314 return HRTIMER_RESTART;
315
58687acb 316 /* only warn once */
909ea964 317 if (__this_cpu_read(soft_watchdog_warn) == true)
58687acb
DZ
318 return HRTIMER_RESTART;
319
b0f4c4b3 320 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
26e09c6e 321 smp_processor_id(), duration,
58687acb
DZ
322 current->comm, task_pid_nr(current));
323 print_modules();
324 print_irqtrace_events(current);
325 if (regs)
326 show_regs(regs);
327 else
328 dump_stack();
329
330 if (softlockup_panic)
331 panic("softlockup: hung tasks");
909ea964 332 __this_cpu_write(soft_watchdog_warn, true);
58687acb 333 } else
909ea964 334 __this_cpu_write(soft_watchdog_warn, false);
58687acb
DZ
335
336 return HRTIMER_RESTART;
337}
338
bcd951cf
TG
339static void watchdog_set_prio(unsigned int policy, unsigned int prio)
340{
341 struct sched_param param = { .sched_priority = prio };
58687acb 342
bcd951cf
TG
343 sched_setscheduler(current, policy, &param);
344}
345
346static void watchdog_enable(unsigned int cpu)
58687acb 347{
26e09c6e 348 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
58687acb 349
3935e895
BM
350 /* kick off the timer for the hardlockup detector */
351 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
352 hrtimer->function = watchdog_timer_fn;
353
bcd951cf
TG
354 /* Enable the perf event */
355 watchdog_nmi_enable(cpu);
58687acb 356
58687acb 357 /* done here because hrtimer_start can only pin to smp_processor_id() */
0f34c400 358 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
58687acb
DZ
359 HRTIMER_MODE_REL_PINNED);
360
bcd951cf
TG
361 /* initialize timestamp */
362 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
363 __touch_watchdog();
364}
58687acb 365
bcd951cf
TG
366static void watchdog_disable(unsigned int cpu)
367{
368 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
58687acb 369
bcd951cf
TG
370 watchdog_set_prio(SCHED_NORMAL, 0);
371 hrtimer_cancel(hrtimer);
372 /* disable the perf event */
373 watchdog_nmi_disable(cpu);
58687acb
DZ
374}
375
b8900bc0
FW
376static void watchdog_cleanup(unsigned int cpu, bool online)
377{
378 watchdog_disable(cpu);
379}
380
bcd951cf
TG
381static int watchdog_should_run(unsigned int cpu)
382{
383 return __this_cpu_read(hrtimer_interrupts) !=
384 __this_cpu_read(soft_lockup_hrtimer_cnt);
385}
386
387/*
388 * The watchdog thread function - touches the timestamp.
389 *
0f34c400 390 * It only runs once every sample_period seconds (4 seconds by
bcd951cf
TG
391 * default) to reset the softlockup timestamp. If this gets delayed
392 * for more than 2*watchdog_thresh seconds then the debug-printout
393 * triggers in watchdog_timer_fn().
394 */
395static void watchdog(unsigned int cpu)
396{
397 __this_cpu_write(soft_lockup_hrtimer_cnt,
398 __this_cpu_read(hrtimer_interrupts));
399 __touch_watchdog();
400}
58687acb 401
23637d47 402#ifdef CONFIG_HARDLOCKUP_DETECTOR
a7027046
DZ
403/*
404 * People like the simple clean cpu node info on boot.
405 * Reduce the watchdog noise by only printing messages
406 * that are different from what cpu0 displayed.
407 */
408static unsigned long cpu0_err;
409
bcd951cf 410static int watchdog_nmi_enable(unsigned int cpu)
58687acb
DZ
411{
412 struct perf_event_attr *wd_attr;
413 struct perf_event *event = per_cpu(watchdog_ev, cpu);
414
415 /* is it already setup and enabled? */
416 if (event && event->state > PERF_EVENT_STATE_OFF)
417 goto out;
418
419 /* it is setup but not enabled */
420 if (event != NULL)
421 goto out_enable;
422
58687acb 423 wd_attr = &wd_hw_attr;
4eec42f3 424 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
1880c4ae
CG
425
426 /* Try to register using hardware perf events */
4dc0da86 427 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
a7027046
DZ
428
429 /* save cpu0 error for future comparision */
430 if (cpu == 0 && IS_ERR(event))
431 cpu0_err = PTR_ERR(event);
432
58687acb 433 if (!IS_ERR(event)) {
a7027046
DZ
434 /* only print for cpu0 or different than cpu0 */
435 if (cpu == 0 || cpu0_err)
436 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
58687acb
DZ
437 goto out_save;
438 }
439
a7027046
DZ
440 /* skip displaying the same error again */
441 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
442 return PTR_ERR(event);
5651f7f4
DZ
443
444 /* vary the KERN level based on the returned errno */
445 if (PTR_ERR(event) == -EOPNOTSUPP)
4501980a 446 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
5651f7f4 447 else if (PTR_ERR(event) == -ENOENT)
4501980a
AM
448 pr_warning("disabled (cpu%i): hardware events not enabled\n",
449 cpu);
5651f7f4 450 else
4501980a
AM
451 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
452 cpu, PTR_ERR(event));
eac24335 453 return PTR_ERR(event);
58687acb
DZ
454
455 /* success path */
456out_save:
457 per_cpu(watchdog_ev, cpu) = event;
458out_enable:
459 perf_event_enable(per_cpu(watchdog_ev, cpu));
460out:
461 return 0;
462}
463
bcd951cf 464static void watchdog_nmi_disable(unsigned int cpu)
58687acb
DZ
465{
466 struct perf_event *event = per_cpu(watchdog_ev, cpu);
467
468 if (event) {
469 perf_event_disable(event);
470 per_cpu(watchdog_ev, cpu) = NULL;
471
472 /* should be in cleanup, but blocks oprofile */
473 perf_event_release_kernel(event);
474 }
475 return;
476}
477#else
bcd951cf
TG
478static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
479static void watchdog_nmi_disable(unsigned int cpu) { return; }
23637d47 480#endif /* CONFIG_HARDLOCKUP_DETECTOR */
58687acb 481
b8900bc0
FW
482static struct smp_hotplug_thread watchdog_threads = {
483 .store = &softlockup_watchdog,
484 .thread_should_run = watchdog_should_run,
485 .thread_fn = watchdog,
486 .thread_comm = "watchdog/%u",
487 .setup = watchdog_enable,
488 .cleanup = watchdog_cleanup,
489 .park = watchdog_disable,
490 .unpark = watchdog_enable,
491};
492
9809b18f
MH
493static void restart_watchdog_hrtimer(void *info)
494{
495 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
496 int ret;
497
498 /*
499 * No need to cancel and restart hrtimer if it is currently executing
500 * because it will reprogram itself with the new period now.
501 * We should never see it unqueued here because we are running per-cpu
502 * with interrupts disabled.
503 */
504 ret = hrtimer_try_to_cancel(hrtimer);
505 if (ret == 1)
506 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
507 HRTIMER_MODE_REL_PINNED);
508}
509
510static void update_timers(int cpu)
511{
9809b18f
MH
512 /*
513 * Make sure that perf event counter will adopt to a new
514 * sampling period. Updating the sampling period directly would
515 * be much nicer but we do not have an API for that now so
516 * let's use a big hammer.
517 * Hrtimer will adopt the new period on the next tick but this
518 * might be late already so we have to restart the timer as well.
519 */
520 watchdog_nmi_disable(cpu);
e0a23b06 521 smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
9809b18f
MH
522 watchdog_nmi_enable(cpu);
523}
524
525static void update_timers_all_cpus(void)
526{
527 int cpu;
528
529 get_online_cpus();
530 preempt_disable();
531 for_each_online_cpu(cpu)
532 update_timers(cpu);
533 preempt_enable();
534 put_online_cpus();
535}
536
537static int watchdog_enable_all_cpus(bool sample_period_changed)
58687acb 538{
b8900bc0 539 int err = 0;
58687acb 540
3c00ea82 541 if (!watchdog_running) {
b8900bc0
FW
542 err = smpboot_register_percpu_thread(&watchdog_threads);
543 if (err)
544 pr_err("Failed to create watchdog threads, disabled\n");
545 else
3c00ea82 546 watchdog_running = 1;
9809b18f
MH
547 } else if (sample_period_changed) {
548 update_timers_all_cpus();
bcd951cf 549 }
b8900bc0
FW
550
551 return err;
58687acb
DZ
552}
553
b8900bc0
FW
554/* prepare/enable/disable routines */
555/* sysctl functions */
556#ifdef CONFIG_SYSCTL
58687acb
DZ
557static void watchdog_disable_all_cpus(void)
558{
3c00ea82
FW
559 if (watchdog_running) {
560 watchdog_running = 0;
b8900bc0 561 smpboot_unregister_percpu_thread(&watchdog_threads);
bcd951cf 562 }
58687acb
DZ
563}
564
58687acb 565/*
586692a5 566 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
58687acb
DZ
567 */
568
586692a5
MSB
569int proc_dowatchdog(struct ctl_table *table, int write,
570 void __user *buffer, size_t *lenp, loff_t *ppos)
58687acb 571{
b8900bc0 572 int err, old_thresh, old_enabled;
359e6fab 573 static DEFINE_MUTEX(watchdog_proc_mutex);
58687acb 574
359e6fab 575 mutex_lock(&watchdog_proc_mutex);
b8900bc0 576 old_thresh = ACCESS_ONCE(watchdog_thresh);
3c00ea82 577 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
bcd951cf 578
b8900bc0
FW
579 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
580 if (err || !write)
359e6fab 581 goto out;
e04ab2bc 582
0f34c400 583 set_sample_period();
b66a2356 584 /*
585 * Watchdog threads shouldn't be enabled if they are
3c00ea82 586 * disabled. The 'watchdog_running' variable check in
b66a2356 587 * watchdog_*_all_cpus() function takes care of this.
588 */
3c00ea82 589 if (watchdog_user_enabled && watchdog_thresh)
9809b18f 590 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
e04ab2bc
MSB
591 else
592 watchdog_disable_all_cpus();
593
b8900bc0
FW
594 /* Restore old values on failure */
595 if (err) {
596 watchdog_thresh = old_thresh;
3c00ea82 597 watchdog_user_enabled = old_enabled;
b8900bc0 598 }
359e6fab
MH
599out:
600 mutex_unlock(&watchdog_proc_mutex);
b8900bc0 601 return err;
58687acb 602}
58687acb
DZ
603#endif /* CONFIG_SYSCTL */
604
004417a6 605void __init lockup_detector_init(void)
58687acb 606{
0f34c400 607 set_sample_period();
b8900bc0 608
3c00ea82 609 if (watchdog_user_enabled)
9809b18f 610 watchdog_enable_all_cpus(false);
58687acb 611}
This page took 0.198988 seconds and 5 git commands to generate.