ftrace: make sysprof dependent on x86 for now
[deliverable/linux.git] / kernel / trace / trace_sysprof.c
CommitLineData
f06c3810
IM
1/*
2 * trace stack traces
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
56a08bdc 6 * Copyright (C) 2004, 2005, Soeren Sandmann
f06c3810 7 */
f06c3810 8#include <linux/kallsyms.h>
0075fa80
IM
9#include <linux/debugfs.h>
10#include <linux/hrtimer.h>
f06c3810 11#include <linux/uaccess.h>
f06c3810 12#include <linux/ftrace.h>
0075fa80 13#include <linux/module.h>
56a08bdc 14#include <linux/irq.h>
0075fa80 15#include <linux/fs.h>
f06c3810
IM
16
17#include "trace.h"
18
56a08bdc 19static struct trace_array *sysprof_trace;
f06c3810
IM
20static int __read_mostly tracer_enabled;
21
56a08bdc
IM
22/*
23 * 10 msecs for now:
24 */
0075fa80 25static const unsigned long sample_period = 1000000;
842af315 26static const unsigned int sample_max_depth = 512;
0075fa80
IM
27
28/*
29 * Per CPU hrtimers that do the profiling:
30 */
31static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
32
56a08bdc
IM
33struct stack_frame {
34 const void __user *next_fp;
35 unsigned long return_address;
36};
37
38static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
39{
40 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
41 return 0;
42
43 if (__copy_from_user_inatomic(frame, frame_pointer, sizeof(*frame)))
44 return 0;
45
46 return 1;
47}
48
56a08bdc
IM
49static void timer_notify(struct pt_regs *regs, int cpu)
50{
51 const void __user *frame_pointer;
52 struct trace_array_cpu *data;
53 struct stack_frame frame;
54 struct trace_array *tr;
55 int is_user;
56 int i;
57
58 if (!regs)
59 return;
60
61 tr = sysprof_trace;
62 data = tr->data[cpu];
63 is_user = user_mode(regs);
64
65 if (!current || current->pid == 0)
66 return;
67
68 if (is_user && current->state != TASK_RUNNING)
69 return;
70
71 if (!is_user) {
72 /* kernel */
73 ftrace(tr, data, current->pid, 1, 0);
74 return;
75
76 }
77
78 trace_special(tr, data, 0, current->pid, regs->ip);
79
80 frame_pointer = (void __user *)regs->bp;
81
842af315 82 for (i = 0; i < sample_max_depth; i++) {
56a08bdc
IM
83 if (!copy_stack_frame(frame_pointer, &frame))
84 break;
85 if ((unsigned long)frame_pointer < regs->sp)
86 break;
87
88 trace_special(tr, data, 1, frame.return_address,
89 (unsigned long)frame_pointer);
90 frame_pointer = frame.next_fp;
91 }
92
93 trace_special(tr, data, 2, current->pid, i);
94
842af315 95 if (i == sample_max_depth)
56a08bdc
IM
96 trace_special(tr, data, -1, -1, -1);
97}
98
0075fa80
IM
99static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
100{
101 /* trace here */
56a08bdc 102 timer_notify(get_irq_regs(), smp_processor_id());
0075fa80
IM
103
104 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
105
106 return HRTIMER_RESTART;
107}
108
109static void start_stack_timer(int cpu)
110{
111 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
112
113 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
114 hrtimer->function = stack_trace_timer_fn;
115 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
116
117 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
118}
119
120static void start_stack_timers(void)
121{
122 cpumask_t saved_mask = current->cpus_allowed;
123 int cpu;
124
125 for_each_online_cpu(cpu) {
126 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
127 start_stack_timer(cpu);
0075fa80
IM
128 }
129 set_cpus_allowed_ptr(current, &saved_mask);
130}
131
132static void stop_stack_timer(int cpu)
133{
134 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
135
136 hrtimer_cancel(hrtimer);
0075fa80
IM
137}
138
139static void stop_stack_timers(void)
140{
141 int cpu;
142
143 for_each_online_cpu(cpu)
144 stop_stack_timer(cpu);
145}
146
f06c3810
IM
147static notrace void stack_reset(struct trace_array *tr)
148{
149 int cpu;
150
151 tr->time_start = ftrace_now(tr->cpu);
152
153 for_each_online_cpu(cpu)
154 tracing_reset(tr->data[cpu]);
155}
156
157static notrace void start_stack_trace(struct trace_array *tr)
158{
159 stack_reset(tr);
0075fa80 160 start_stack_timers();
f06c3810
IM
161 tracer_enabled = 1;
162}
163
164static notrace void stop_stack_trace(struct trace_array *tr)
165{
0075fa80 166 stop_stack_timers();
f06c3810
IM
167 tracer_enabled = 0;
168}
169
170static notrace void stack_trace_init(struct trace_array *tr)
171{
56a08bdc 172 sysprof_trace = tr;
f06c3810
IM
173
174 if (tr->ctrl)
175 start_stack_trace(tr);
176}
177
178static notrace void stack_trace_reset(struct trace_array *tr)
179{
180 if (tr->ctrl)
181 stop_stack_trace(tr);
182}
183
184static void stack_trace_ctrl_update(struct trace_array *tr)
185{
186 /* When starting a new trace, reset the buffers */
187 if (tr->ctrl)
188 start_stack_trace(tr);
189 else
190 stop_stack_trace(tr);
191}
192
193static struct tracer stack_trace __read_mostly =
194{
195 .name = "sysprof",
196 .init = stack_trace_init,
197 .reset = stack_trace_reset,
198 .ctrl_update = stack_trace_ctrl_update,
199#ifdef CONFIG_FTRACE_SELFTEST
a6dd24f8 200 .selftest = trace_selftest_startup_sysprof,
f06c3810
IM
201#endif
202};
203
204__init static int init_stack_trace(void)
205{
206 return register_tracer(&stack_trace);
207}
208device_initcall(init_stack_trace);
This page took 0.046771 seconds and 5 git commands to generate.