Commit | Line | Data |
---|---|---|
b2c0b2cb RK |
1 | /* |
2 | * NMI backtrace support | |
3 | * | |
4 | * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, | |
5 | * with the following header: | |
6 | * | |
7 | * HW NMI watchdog support | |
8 | * | |
9 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. | |
10 | * | |
11 | * Arch specific calls to support NMI watchdog | |
12 | * | |
13 | * Bits copied from original nmi.c file | |
14 | */ | |
15 | #include <linux/cpumask.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/kprobes.h> | |
18 | #include <linux/nmi.h> | |
19 | #include <linux/seq_buf.h> | |
20 | ||
21 | #ifdef arch_trigger_all_cpu_backtrace | |
22 | /* For reliability, we're prepared to waste bits here. */ | |
23 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | |
24 | static cpumask_t printtrace_mask; | |
25 | ||
26 | #define NMI_BUF_SIZE 4096 | |
27 | ||
28 | struct nmi_seq_buf { | |
29 | unsigned char buffer[NMI_BUF_SIZE]; | |
30 | struct seq_buf seq; | |
31 | }; | |
32 | ||
33 | /* Safe printing in NMI context */ | |
34 | static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); | |
35 | ||
36 | /* "in progress" flag of arch_trigger_all_cpu_backtrace */ | |
37 | static unsigned long backtrace_flag; | |
38 | ||
39 | static void print_seq_line(struct nmi_seq_buf *s, int start, int end) | |
40 | { | |
41 | const char *buf = s->buffer + start; | |
42 | ||
43 | printk("%.*s", (end - start) + 1, buf); | |
44 | } | |
45 | ||
0768330d DT |
46 | /* |
47 | * When raise() is called it will be is passed a pointer to the | |
48 | * backtrace_mask. Architectures that call nmi_cpu_backtrace() | |
49 | * directly from their raise() functions may rely on the mask | |
50 | * they are passed being updated as a side effect of this call. | |
51 | */ | |
b2c0b2cb RK |
52 | void nmi_trigger_all_cpu_backtrace(bool include_self, |
53 | void (*raise)(cpumask_t *mask)) | |
54 | { | |
55 | struct nmi_seq_buf *s; | |
56 | int i, cpu, this_cpu = get_cpu(); | |
57 | ||
58 | if (test_and_set_bit(0, &backtrace_flag)) { | |
59 | /* | |
60 | * If there is already a trigger_all_cpu_backtrace() in progress | |
61 | * (backtrace_flag == 1), don't output double cpu dump infos. | |
62 | */ | |
63 | put_cpu(); | |
64 | return; | |
65 | } | |
66 | ||
67 | cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); | |
68 | if (!include_self) | |
69 | cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); | |
70 | ||
71 | cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask)); | |
72 | ||
73 | /* | |
74 | * Set up per_cpu seq_buf buffers that the NMIs running on the other | |
75 | * CPUs will write to. | |
76 | */ | |
77 | for_each_cpu(cpu, to_cpumask(backtrace_mask)) { | |
78 | s = &per_cpu(nmi_print_seq, cpu); | |
79 | seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE); | |
80 | } | |
81 | ||
82 | if (!cpumask_empty(to_cpumask(backtrace_mask))) { | |
83 | pr_info("Sending NMI to %s CPUs:\n", | |
84 | (include_self ? "all" : "other")); | |
85 | raise(to_cpumask(backtrace_mask)); | |
86 | } | |
87 | ||
88 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | |
89 | for (i = 0; i < 10 * 1000; i++) { | |
90 | if (cpumask_empty(to_cpumask(backtrace_mask))) | |
91 | break; | |
92 | mdelay(1); | |
93 | touch_softlockup_watchdog(); | |
94 | } | |
95 | ||
96 | /* | |
97 | * Now that all the NMIs have triggered, we can dump out their | |
98 | * back traces safely to the console. | |
99 | */ | |
100 | for_each_cpu(cpu, &printtrace_mask) { | |
101 | int len, last_i = 0; | |
102 | ||
103 | s = &per_cpu(nmi_print_seq, cpu); | |
104 | len = seq_buf_used(&s->seq); | |
105 | if (!len) | |
106 | continue; | |
107 | ||
108 | /* Print line by line. */ | |
109 | for (i = 0; i < len; i++) { | |
110 | if (s->buffer[i] == '\n') { | |
111 | print_seq_line(s, last_i, i); | |
112 | last_i = i + 1; | |
113 | } | |
114 | } | |
115 | /* Check if there was a partial line. */ | |
116 | if (last_i < len) { | |
117 | print_seq_line(s, last_i, len - 1); | |
118 | pr_cont("\n"); | |
119 | } | |
120 | } | |
121 | ||
122 | clear_bit(0, &backtrace_flag); | |
123 | smp_mb__after_atomic(); | |
124 | put_cpu(); | |
125 | } | |
126 | ||
127 | /* | |
128 | * It is not safe to call printk() directly from NMI handlers. | |
129 | * It may be fine if the NMI detected a lock up and we have no choice | |
130 | * but to do so, but doing a NMI on all other CPUs to get a back trace | |
131 | * can be done with a sysrq-l. We don't want that to lock up, which | |
132 | * can happen if the NMI interrupts a printk in progress. | |
133 | * | |
134 | * Instead, we redirect the vprintk() to this nmi_vprintk() that writes | |
135 | * the content into a per cpu seq_buf buffer. Then when the NMIs are | |
136 | * all done, we can safely dump the contents of the seq_buf to a printk() | |
137 | * from a non NMI context. | |
138 | */ | |
139 | static int nmi_vprintk(const char *fmt, va_list args) | |
140 | { | |
141 | struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); | |
142 | unsigned int len = seq_buf_used(&s->seq); | |
143 | ||
144 | seq_buf_vprintf(&s->seq, fmt, args); | |
145 | return seq_buf_used(&s->seq) - len; | |
146 | } | |
147 | ||
148 | bool nmi_cpu_backtrace(struct pt_regs *regs) | |
149 | { | |
150 | int cpu = smp_processor_id(); | |
151 | ||
152 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { | |
153 | printk_func_t printk_func_save = this_cpu_read(printk_func); | |
154 | ||
155 | /* Replace printk to write into the NMI seq */ | |
156 | this_cpu_write(printk_func, nmi_vprintk); | |
157 | pr_warn("NMI backtrace for cpu %d\n", cpu); | |
0768330d DT |
158 | if (regs) |
159 | show_regs(regs); | |
160 | else | |
161 | dump_stack(); | |
b2c0b2cb RK |
162 | this_cpu_write(printk_func, printk_func_save); |
163 | ||
164 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | |
165 | return true; | |
166 | } | |
167 | ||
168 | return false; | |
169 | } | |
170 | NOKPROBE_SYMBOL(nmi_cpu_backtrace); | |
171 | #endif |