2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/module.h>
14 #include <linux/debugfs.h>
15 #include <linux/kallsyms.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
21 static struct trace_array
*wakeup_trace
;
22 static int __read_mostly tracer_enabled
;
24 static struct task_struct
*wakeup_task
;
25 static int wakeup_cpu
;
26 static unsigned wakeup_prio
= -1;
28 static DEFINE_SPINLOCK(wakeup_lock
);
30 static void notrace
__wakeup_reset(struct trace_array
*tr
);
33 * Should this new latency be reported/recorded?
35 static int notrace
report_latency(cycle_t delta
)
38 if (delta
< tracing_thresh
)
41 if (delta
<= tracing_max_latency
)
48 wakeup_sched_switch(struct task_struct
*prev
, struct task_struct
*next
)
50 unsigned long latency
= 0, t0
= 0, t1
= 0;
51 struct trace_array
*tr
= wakeup_trace
;
52 struct trace_array_cpu
*data
;
53 cycle_t T0
, T1
, delta
;
58 if (unlikely(!tracer_enabled
))
62 * When we start a new trace, we set wakeup_task to NULL
63 * and then set tracer_enabled = 1. We want to make sure
64 * that another CPU does not see the tracer_enabled = 1
65 * and the wakeup_task with an older task, that might
66 * actually be the same as next.
70 if (next
!= wakeup_task
)
73 /* The task we are waitng for is waking up */
74 data
= tr
->data
[wakeup_cpu
];
76 /* disable local data, not wakeup_cpu data */
77 cpu
= raw_smp_processor_id();
78 disabled
= atomic_inc_return(&tr
->data
[cpu
]->disabled
);
79 if (likely(disabled
!= 1))
82 spin_lock_irqsave(&wakeup_lock
, flags
);
84 /* We could race with grabbing wakeup_lock */
85 if (unlikely(!tracer_enabled
|| next
!= wakeup_task
))
88 trace_function(tr
, data
, CALLER_ADDR1
, CALLER_ADDR2
, flags
);
91 * usecs conversion is slow so we try to delay the conversion
92 * as long as possible:
94 T0
= data
->preempt_timestamp
;
98 if (!report_latency(delta
))
101 latency
= nsecs_to_usecs(delta
);
103 tracing_max_latency
= delta
;
104 t0
= nsecs_to_usecs(T0
);
105 t1
= nsecs_to_usecs(T1
);
107 update_max_tr(tr
, wakeup_task
, wakeup_cpu
);
109 if (tracing_thresh
) {
110 printk(KERN_INFO
"(%16s-%-5d|#%d):"
111 " %lu us wakeup latency violates %lu us threshold.\n",
112 wakeup_task
->comm
, wakeup_task
->pid
,
113 raw_smp_processor_id(),
114 latency
, nsecs_to_usecs(tracing_thresh
));
116 printk(KERN_INFO
"(%16s-%-5d|#%d):"
117 " new %lu us maximum wakeup latency.\n",
118 wakeup_task
->comm
, wakeup_task
->pid
,
124 spin_unlock_irqrestore(&wakeup_lock
, flags
);
126 atomic_dec(&tr
->data
[cpu
]->disabled
);
129 static void notrace
__wakeup_reset(struct trace_array
*tr
)
131 struct trace_array_cpu
*data
;
134 assert_spin_locked(&wakeup_lock
);
136 for_each_possible_cpu(cpu
) {
137 data
= tr
->data
[cpu
];
145 put_task_struct(wakeup_task
);
150 static void notrace
wakeup_reset(struct trace_array
*tr
)
154 spin_lock_irqsave(&wakeup_lock
, flags
);
156 spin_unlock_irqrestore(&wakeup_lock
, flags
);
160 wakeup_check_start(struct trace_array
*tr
, struct task_struct
*p
,
161 struct task_struct
*curr
)
163 int cpu
= smp_processor_id();
167 if (likely(!rt_task(p
)) ||
168 p
->prio
>= wakeup_prio
||
169 p
->prio
>= curr
->prio
)
172 disabled
= atomic_inc_return(&tr
->data
[cpu
]->disabled
);
173 if (unlikely(disabled
!= 1))
176 /* interrupts should be off from try_to_wake_up */
177 spin_lock(&wakeup_lock
);
179 /* check for races. */
180 if (!tracer_enabled
|| p
->prio
>= wakeup_prio
)
183 /* reset the trace */
186 wakeup_cpu
= task_cpu(p
);
187 wakeup_prio
= p
->prio
;
190 get_task_struct(wakeup_task
);
192 local_save_flags(flags
);
194 tr
->data
[wakeup_cpu
]->preempt_timestamp
= ftrace_now(cpu
);
195 trace_function(tr
, tr
->data
[wakeup_cpu
],
196 CALLER_ADDR1
, CALLER_ADDR2
, flags
);
199 spin_unlock(&wakeup_lock
);
201 atomic_dec(&tr
->data
[cpu
]->disabled
);
205 ftrace_wake_up_task(struct task_struct
*wakee
, struct task_struct
*curr
)
207 if (likely(!tracer_enabled
))
210 wakeup_check_start(wakeup_trace
, wakee
, curr
);
214 ftrace_wake_up_new_task(struct task_struct
*wakee
, struct task_struct
*curr
)
216 if (likely(!tracer_enabled
))
219 wakeup_check_start(wakeup_trace
, wakee
, curr
);
222 static notrace
void start_wakeup_tracer(struct trace_array
*tr
)
227 * Don't let the tracer_enabled = 1 show up before
228 * the wakeup_task is reset. This may be overkill since
229 * wakeup_reset does a spin_unlock after setting the
230 * wakeup_task to NULL, but I want to be safe.
231 * This is a slow path anyway.
240 static notrace
void stop_wakeup_tracer(struct trace_array
*tr
)
245 static notrace
void wakeup_tracer_init(struct trace_array
*tr
)
250 start_wakeup_tracer(tr
);
253 static notrace
void wakeup_tracer_reset(struct trace_array
*tr
)
256 stop_wakeup_tracer(tr
);
257 /* make sure we put back any tasks we are tracing */
262 static void wakeup_tracer_ctrl_update(struct trace_array
*tr
)
265 start_wakeup_tracer(tr
);
267 stop_wakeup_tracer(tr
);
270 static void notrace
wakeup_tracer_open(struct trace_iterator
*iter
)
272 /* stop the trace while dumping */
274 stop_wakeup_tracer(iter
->tr
);
277 static void notrace
wakeup_tracer_close(struct trace_iterator
*iter
)
279 /* forget about any processes we were recording */
281 start_wakeup_tracer(iter
->tr
);
284 static struct tracer wakeup_tracer __read_mostly
=
287 .init
= wakeup_tracer_init
,
288 .reset
= wakeup_tracer_reset
,
289 .open
= wakeup_tracer_open
,
290 .close
= wakeup_tracer_close
,
291 .ctrl_update
= wakeup_tracer_ctrl_update
,
293 #ifdef CONFIG_FTRACE_SELFTEST
294 .selftest
= trace_selftest_startup_wakeup
,
298 __init
static int init_wakeup_tracer(void)
302 ret
= register_tracer(&wakeup_tracer
);
308 device_initcall(init_wakeup_tracer
);