ftrace: replace raw_local_irq_save with local_irq_save
[deliverable/linux.git] / kernel / trace / trace_stack.c
1 /*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include "trace.h"
16
17 #define STACK_TRACE_ENTRIES 500
18
19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
22
23 static struct stack_trace max_stack_trace = {
24 .max_entries = STACK_TRACE_ENTRIES,
25 .entries = stack_dump_trace,
26 };
27
28 static unsigned long max_stack_size;
29 static raw_spinlock_t max_stack_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
31
32 static int stack_trace_disabled __read_mostly;
33 static DEFINE_PER_CPU(int, trace_active);
34
35 static inline void check_stack(void)
36 {
37 unsigned long this_size, flags;
38 unsigned long *p, *top, *start;
39 int i;
40
41 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
42 this_size = THREAD_SIZE - this_size;
43
44 if (this_size <= max_stack_size)
45 return;
46
47 /* we do not handle interrupt stacks yet */
48 if (!object_is_on_stack(&this_size))
49 return;
50
51 local_irq_save(flags);
52 __raw_spin_lock(&max_stack_lock);
53
54 /* a race could have already updated it */
55 if (this_size <= max_stack_size)
56 goto out;
57
58 max_stack_size = this_size;
59
60 max_stack_trace.nr_entries = 0;
61 max_stack_trace.skip = 3;
62
63 save_stack_trace(&max_stack_trace);
64
65 /*
66 * Now find where in the stack these are.
67 */
68 i = 0;
69 start = &this_size;
70 top = (unsigned long *)
71 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
72
73 /*
74 * Loop through all the entries. One of the entries may
75 * for some reason be missed on the stack, so we may
76 * have to account for them. If they are all there, this
77 * loop will only happen once. This code only takes place
78 * on a new max, so it is far from a fast path.
79 */
80 while (i < max_stack_trace.nr_entries) {
81
82 stack_dump_index[i] = this_size;
83 p = start;
84
85 for (; p < top && i < max_stack_trace.nr_entries; p++) {
86 if (*p == stack_dump_trace[i]) {
87 this_size = stack_dump_index[i++] =
88 (top - p) * sizeof(unsigned long);
89 /* Start the search from here */
90 start = p + 1;
91 }
92 }
93
94 i++;
95 }
96
97 out:
98 __raw_spin_unlock(&max_stack_lock);
99 local_irq_restore(flags);
100 }
101
102 static void
103 stack_trace_call(unsigned long ip, unsigned long parent_ip)
104 {
105 int cpu, resched;
106
107 if (unlikely(!ftrace_enabled || stack_trace_disabled))
108 return;
109
110 resched = ftrace_preempt_disable();
111
112 cpu = raw_smp_processor_id();
113 /* no atomic needed, we only modify this variable by this cpu */
114 if (per_cpu(trace_active, cpu)++ != 0)
115 goto out;
116
117 check_stack();
118
119 out:
120 per_cpu(trace_active, cpu)--;
121 /* prevent recursion in schedule */
122 ftrace_preempt_enable(resched);
123 }
124
125 static struct ftrace_ops trace_ops __read_mostly =
126 {
127 .func = stack_trace_call,
128 };
129
130 static ssize_t
131 stack_max_size_read(struct file *filp, char __user *ubuf,
132 size_t count, loff_t *ppos)
133 {
134 unsigned long *ptr = filp->private_data;
135 char buf[64];
136 int r;
137
138 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
139 if (r > sizeof(buf))
140 r = sizeof(buf);
141 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
142 }
143
144 static ssize_t
145 stack_max_size_write(struct file *filp, const char __user *ubuf,
146 size_t count, loff_t *ppos)
147 {
148 long *ptr = filp->private_data;
149 unsigned long val, flags;
150 char buf[64];
151 int ret;
152
153 if (count >= sizeof(buf))
154 return -EINVAL;
155
156 if (copy_from_user(&buf, ubuf, count))
157 return -EFAULT;
158
159 buf[count] = 0;
160
161 ret = strict_strtoul(buf, 10, &val);
162 if (ret < 0)
163 return ret;
164
165 local_irq_save(flags);
166 __raw_spin_lock(&max_stack_lock);
167 *ptr = val;
168 __raw_spin_unlock(&max_stack_lock);
169 local_irq_restore(flags);
170
171 return count;
172 }
173
174 static struct file_operations stack_max_size_fops = {
175 .open = tracing_open_generic,
176 .read = stack_max_size_read,
177 .write = stack_max_size_write,
178 };
179
180 static void *
181 t_next(struct seq_file *m, void *v, loff_t *pos)
182 {
183 long i;
184
185 (*pos)++;
186
187 if (v == SEQ_START_TOKEN)
188 i = 0;
189 else {
190 i = *(long *)v;
191 i++;
192 }
193
194 if (i >= max_stack_trace.nr_entries ||
195 stack_dump_trace[i] == ULONG_MAX)
196 return NULL;
197
198 m->private = (void *)i;
199
200 return &m->private;
201 }
202
203 static void *t_start(struct seq_file *m, loff_t *pos)
204 {
205 void *t = SEQ_START_TOKEN;
206 loff_t l = 0;
207
208 local_irq_disable();
209 __raw_spin_lock(&max_stack_lock);
210
211 if (*pos == 0)
212 return SEQ_START_TOKEN;
213
214 for (; t && l < *pos; t = t_next(m, t, &l))
215 ;
216
217 return t;
218 }
219
220 static void t_stop(struct seq_file *m, void *p)
221 {
222 __raw_spin_unlock(&max_stack_lock);
223 local_irq_enable();
224 }
225
226 static int trace_lookup_stack(struct seq_file *m, long i)
227 {
228 unsigned long addr = stack_dump_trace[i];
229 #ifdef CONFIG_KALLSYMS
230 char str[KSYM_SYMBOL_LEN];
231
232 sprint_symbol(str, addr);
233
234 return seq_printf(m, "%s\n", str);
235 #else
236 return seq_printf(m, "%p\n", (void*)addr);
237 #endif
238 }
239
240 static int t_show(struct seq_file *m, void *v)
241 {
242 long i;
243 int size;
244
245 if (v == SEQ_START_TOKEN) {
246 seq_printf(m, " Depth Size Location"
247 " (%d entries)\n"
248 " ----- ---- --------\n",
249 max_stack_trace.nr_entries);
250 return 0;
251 }
252
253 i = *(long *)v;
254
255 if (i >= max_stack_trace.nr_entries ||
256 stack_dump_trace[i] == ULONG_MAX)
257 return 0;
258
259 if (i+1 == max_stack_trace.nr_entries ||
260 stack_dump_trace[i+1] == ULONG_MAX)
261 size = stack_dump_index[i];
262 else
263 size = stack_dump_index[i] - stack_dump_index[i+1];
264
265 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
266
267 trace_lookup_stack(m, i);
268
269 return 0;
270 }
271
272 static struct seq_operations stack_trace_seq_ops = {
273 .start = t_start,
274 .next = t_next,
275 .stop = t_stop,
276 .show = t_show,
277 };
278
279 static int stack_trace_open(struct inode *inode, struct file *file)
280 {
281 int ret;
282
283 ret = seq_open(file, &stack_trace_seq_ops);
284
285 return ret;
286 }
287
288 static struct file_operations stack_trace_fops = {
289 .open = stack_trace_open,
290 .read = seq_read,
291 .llseek = seq_lseek,
292 };
293
294 static __init int stack_trace_init(void)
295 {
296 struct dentry *d_tracer;
297 struct dentry *entry;
298
299 d_tracer = tracing_init_dentry();
300
301 entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
302 &max_stack_size, &stack_max_size_fops);
303 if (!entry)
304 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
305
306 entry = debugfs_create_file("stack_trace", 0444, d_tracer,
307 NULL, &stack_trace_fops);
308 if (!entry)
309 pr_warning("Could not create debugfs 'stack_trace' entry\n");
310
311 register_ftrace_function(&trace_ops);
312
313 return 0;
314 }
315
316 device_initcall(stack_trace_init);
This page took 0.057135 seconds and 5 git commands to generate.