tracing: add sched_set_prio tracepoint
[deliverable/linux.git] / include / trace / events / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
10
11 /*
12 * Tracepoint for calling kthread_stop, performed to end a kthread:
13 */
14 TRACE_EVENT(sched_kthread_stop,
15
16 TP_PROTO(struct task_struct *t),
17
18 TP_ARGS(t),
19
20 TP_STRUCT__entry(
21 __array( char, comm, TASK_COMM_LEN )
22 __field( pid_t, pid )
23 ),
24
25 TP_fast_assign(
26 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 __entry->pid = t->pid;
28 ),
29
30 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
31 );
32
33 /*
34 * Tracepoint for the return value of the kthread stopping:
35 */
36 TRACE_EVENT(sched_kthread_stop_ret,
37
38 TP_PROTO(int ret),
39
40 TP_ARGS(ret),
41
42 TP_STRUCT__entry(
43 __field( int, ret )
44 ),
45
46 TP_fast_assign(
47 __entry->ret = ret;
48 ),
49
50 TP_printk("ret=%d", __entry->ret)
51 );
52
53 /*
54 * Tracepoint for waking up a task:
55 */
56 DECLARE_EVENT_CLASS(sched_wakeup_template,
57
58 TP_PROTO(struct task_struct *p),
59
60 TP_ARGS(__perf_task(p)),
61
62 TP_STRUCT__entry(
63 __array( char, comm, TASK_COMM_LEN )
64 __field( pid_t, pid )
65 __field( int, prio )
66 __field( int, success )
67 __field( int, target_cpu )
68 ),
69
70 TP_fast_assign(
71 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 __entry->pid = p->pid;
73 __entry->prio = p->prio;
74 __entry->success = 1; /* rudiment, kill when possible */
75 __entry->target_cpu = task_cpu(p);
76 ),
77
78 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
79 __entry->comm, __entry->pid, __entry->prio,
80 __entry->target_cpu)
81 );
82
83 /*
84 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
85 * called from the waking context.
86 */
87 DEFINE_EVENT(sched_wakeup_template, sched_waking,
88 TP_PROTO(struct task_struct *p),
89 TP_ARGS(p));
90
91 /*
92 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
93 * It it not always called from the waking context.
94 */
95 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
96 TP_PROTO(struct task_struct *p),
97 TP_ARGS(p));
98
99 /*
100 * Tracepoint for waking up a new task:
101 */
102 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
103 TP_PROTO(struct task_struct *p),
104 TP_ARGS(p));
105
106 #ifdef CREATE_TRACE_POINTS
107 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
108 {
109 #ifdef CONFIG_SCHED_DEBUG
110 BUG_ON(p != current);
111 #endif /* CONFIG_SCHED_DEBUG */
112
113 /*
114 * Preemption ignores task state, therefore preempted tasks are always
115 * RUNNING (we will not have dequeued if state != RUNNING).
116 */
117 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
118 }
119 #endif /* CREATE_TRACE_POINTS */
120
121 /*
122 * Tracepoint for task switches, performed by the scheduler:
123 */
124 TRACE_EVENT(sched_switch,
125
126 TP_PROTO(bool preempt,
127 struct task_struct *prev,
128 struct task_struct *next),
129
130 TP_ARGS(preempt, prev, next),
131
132 TP_STRUCT__entry(
133 __array( char, prev_comm, TASK_COMM_LEN )
134 __field( pid_t, prev_pid )
135 __field( int, prev_prio )
136 __field( long, prev_state )
137 __array( char, next_comm, TASK_COMM_LEN )
138 __field( pid_t, next_pid )
139 __field( int, next_prio )
140 ),
141
142 TP_fast_assign(
143 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
144 __entry->prev_pid = prev->pid;
145 __entry->prev_prio = prev->prio;
146 __entry->prev_state = __trace_sched_switch_state(preempt, prev);
147 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
148 __entry->next_pid = next->pid;
149 __entry->next_prio = next->prio;
150 ),
151
152 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
153 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
154 __entry->prev_state & (TASK_STATE_MAX-1) ?
155 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
156 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
157 { 16, "Z" }, { 32, "X" }, { 64, "x" },
158 { 128, "K" }, { 256, "W" }, { 512, "P" },
159 { 1024, "N" }) : "R",
160 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
161 __entry->next_comm, __entry->next_pid, __entry->next_prio)
162 );
163
164 /*
165 * Tracepoint for a task being migrated:
166 */
167 TRACE_EVENT(sched_migrate_task,
168
169 TP_PROTO(struct task_struct *p, int dest_cpu),
170
171 TP_ARGS(p, dest_cpu),
172
173 TP_STRUCT__entry(
174 __array( char, comm, TASK_COMM_LEN )
175 __field( pid_t, pid )
176 __field( int, prio )
177 __field( int, orig_cpu )
178 __field( int, dest_cpu )
179 ),
180
181 TP_fast_assign(
182 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
183 __entry->pid = p->pid;
184 __entry->prio = p->prio;
185 __entry->orig_cpu = task_cpu(p);
186 __entry->dest_cpu = dest_cpu;
187 ),
188
189 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
190 __entry->comm, __entry->pid, __entry->prio,
191 __entry->orig_cpu, __entry->dest_cpu)
192 );
193
194 DECLARE_EVENT_CLASS(sched_process_template,
195
196 TP_PROTO(struct task_struct *p),
197
198 TP_ARGS(p),
199
200 TP_STRUCT__entry(
201 __array( char, comm, TASK_COMM_LEN )
202 __field( pid_t, pid )
203 __field( int, prio )
204 ),
205
206 TP_fast_assign(
207 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
208 __entry->pid = p->pid;
209 __entry->prio = p->prio;
210 ),
211
212 TP_printk("comm=%s pid=%d prio=%d",
213 __entry->comm, __entry->pid, __entry->prio)
214 );
215
216 /*
217 * Tracepoint for freeing a task:
218 */
219 DEFINE_EVENT(sched_process_template, sched_process_free,
220 TP_PROTO(struct task_struct *p),
221 TP_ARGS(p));
222
223
224 /*
225 * Tracepoint for a task exiting:
226 */
227 DEFINE_EVENT(sched_process_template, sched_process_exit,
228 TP_PROTO(struct task_struct *p),
229 TP_ARGS(p));
230
231 /*
232 * Tracepoint for waiting on task to unschedule:
233 */
234 DEFINE_EVENT(sched_process_template, sched_wait_task,
235 TP_PROTO(struct task_struct *p),
236 TP_ARGS(p));
237
238 /*
239 * Tracepoint for a waiting task:
240 */
241 TRACE_EVENT(sched_process_wait,
242
243 TP_PROTO(struct pid *pid),
244
245 TP_ARGS(pid),
246
247 TP_STRUCT__entry(
248 __array( char, comm, TASK_COMM_LEN )
249 __field( pid_t, pid )
250 __field( int, prio )
251 ),
252
253 TP_fast_assign(
254 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
255 __entry->pid = pid_nr(pid);
256 __entry->prio = current->prio;
257 ),
258
259 TP_printk("comm=%s pid=%d prio=%d",
260 __entry->comm, __entry->pid, __entry->prio)
261 );
262
263 /*
264 * Tracepoint for do_fork:
265 */
266 TRACE_EVENT(sched_process_fork,
267
268 TP_PROTO(struct task_struct *parent, struct task_struct *child),
269
270 TP_ARGS(parent, child),
271
272 TP_STRUCT__entry(
273 __array( char, parent_comm, TASK_COMM_LEN )
274 __field( pid_t, parent_pid )
275 __array( char, child_comm, TASK_COMM_LEN )
276 __field( pid_t, child_pid )
277 ),
278
279 TP_fast_assign(
280 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
281 __entry->parent_pid = parent->pid;
282 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
283 __entry->child_pid = child->pid;
284 ),
285
286 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
287 __entry->parent_comm, __entry->parent_pid,
288 __entry->child_comm, __entry->child_pid)
289 );
290
291 /*
292 * Tracepoint for exec:
293 */
294 TRACE_EVENT(sched_process_exec,
295
296 TP_PROTO(struct task_struct *p, pid_t old_pid,
297 struct linux_binprm *bprm),
298
299 TP_ARGS(p, old_pid, bprm),
300
301 TP_STRUCT__entry(
302 __string( filename, bprm->filename )
303 __field( pid_t, pid )
304 __field( pid_t, old_pid )
305 ),
306
307 TP_fast_assign(
308 __assign_str(filename, bprm->filename);
309 __entry->pid = p->pid;
310 __entry->old_pid = old_pid;
311 ),
312
313 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
314 __entry->pid, __entry->old_pid)
315 );
316
317 /*
318 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
319 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
320 */
321 DECLARE_EVENT_CLASS(sched_stat_template,
322
323 TP_PROTO(struct task_struct *tsk, u64 delay),
324
325 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
326
327 TP_STRUCT__entry(
328 __array( char, comm, TASK_COMM_LEN )
329 __field( pid_t, pid )
330 __field( u64, delay )
331 ),
332
333 TP_fast_assign(
334 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
335 __entry->pid = tsk->pid;
336 __entry->delay = delay;
337 ),
338
339 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
340 __entry->comm, __entry->pid,
341 (unsigned long long)__entry->delay)
342 );
343
344
345 /*
346 * Tracepoint for accounting wait time (time the task is runnable
347 * but not actually running due to scheduler contention).
348 */
349 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
350 TP_PROTO(struct task_struct *tsk, u64 delay),
351 TP_ARGS(tsk, delay));
352
353 /*
354 * Tracepoint for accounting sleep time (time the task is not runnable,
355 * including iowait, see below).
356 */
357 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
358 TP_PROTO(struct task_struct *tsk, u64 delay),
359 TP_ARGS(tsk, delay));
360
361 /*
362 * Tracepoint for accounting iowait time (time the task is not runnable
363 * due to waiting on IO to complete).
364 */
365 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
366 TP_PROTO(struct task_struct *tsk, u64 delay),
367 TP_ARGS(tsk, delay));
368
369 /*
370 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
371 */
372 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
373 TP_PROTO(struct task_struct *tsk, u64 delay),
374 TP_ARGS(tsk, delay));
375
376 /*
377 * Tracepoint for accounting runtime (time the task is executing
378 * on a CPU).
379 */
380 DECLARE_EVENT_CLASS(sched_stat_runtime,
381
382 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
383
384 TP_ARGS(tsk, __perf_count(runtime), vruntime),
385
386 TP_STRUCT__entry(
387 __array( char, comm, TASK_COMM_LEN )
388 __field( pid_t, pid )
389 __field( u64, runtime )
390 __field( u64, vruntime )
391 ),
392
393 TP_fast_assign(
394 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
395 __entry->pid = tsk->pid;
396 __entry->runtime = runtime;
397 __entry->vruntime = vruntime;
398 ),
399
400 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
401 __entry->comm, __entry->pid,
402 (unsigned long long)__entry->runtime,
403 (unsigned long long)__entry->vruntime)
404 );
405
406 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
407 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
408 TP_ARGS(tsk, runtime, vruntime));
409
410 DECLARE_EVENT_CLASS(sched_prio_template,
411
412 TP_PROTO(struct task_struct *tsk, int newprio),
413
414 TP_ARGS(tsk, newprio),
415
416 TP_STRUCT__entry(
417 __array( char, comm, TASK_COMM_LEN )
418 __field( pid_t, pid )
419 __field( int, oldprio )
420 __field( int, newprio )
421 ),
422
423 TP_fast_assign(
424 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
425 __entry->pid = tsk->pid;
426 __entry->oldprio = tsk->prio;
427 __entry->newprio = newprio;
428 ),
429
430 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
431 __entry->comm, __entry->pid,
432 __entry->oldprio, __entry->newprio)
433 );
434
435 /*
436 * Tracepoint for showing priority inheritance modifying a tasks
437 * priority.
438 */
439 DEFINE_EVENT(sched_prio_template, sched_pi_setprio,
440 TP_PROTO(struct task_struct *tsk, int newprio),
441 TP_ARGS(tsk, newprio));
442
443 /*
444 * Tracepoint for priority changes of a task.
445 */
446 DEFINE_EVENT(sched_prio_template, sched_set_prio,
447 TP_PROTO(struct task_struct *tsk, int newprio),
448 TP_ARGS(tsk, newprio));
449
450 #ifdef CONFIG_DETECT_HUNG_TASK
451 TRACE_EVENT(sched_process_hang,
452 TP_PROTO(struct task_struct *tsk),
453 TP_ARGS(tsk),
454
455 TP_STRUCT__entry(
456 __array( char, comm, TASK_COMM_LEN )
457 __field( pid_t, pid )
458 ),
459
460 TP_fast_assign(
461 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
462 __entry->pid = tsk->pid;
463 ),
464
465 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
466 );
467 #endif /* CONFIG_DETECT_HUNG_TASK */
468
469 DECLARE_EVENT_CLASS(sched_move_task_template,
470
471 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
472
473 TP_ARGS(tsk, src_cpu, dst_cpu),
474
475 TP_STRUCT__entry(
476 __field( pid_t, pid )
477 __field( pid_t, tgid )
478 __field( pid_t, ngid )
479 __field( int, src_cpu )
480 __field( int, src_nid )
481 __field( int, dst_cpu )
482 __field( int, dst_nid )
483 ),
484
485 TP_fast_assign(
486 __entry->pid = task_pid_nr(tsk);
487 __entry->tgid = task_tgid_nr(tsk);
488 __entry->ngid = task_numa_group_id(tsk);
489 __entry->src_cpu = src_cpu;
490 __entry->src_nid = cpu_to_node(src_cpu);
491 __entry->dst_cpu = dst_cpu;
492 __entry->dst_nid = cpu_to_node(dst_cpu);
493 ),
494
495 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
496 __entry->pid, __entry->tgid, __entry->ngid,
497 __entry->src_cpu, __entry->src_nid,
498 __entry->dst_cpu, __entry->dst_nid)
499 );
500
501 /*
502 * Tracks migration of tasks from one runqueue to another. Can be used to
503 * detect if automatic NUMA balancing is bouncing between nodes
504 */
505 DEFINE_EVENT(sched_move_task_template, sched_move_numa,
506 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
507
508 TP_ARGS(tsk, src_cpu, dst_cpu)
509 );
510
511 DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
512 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
513
514 TP_ARGS(tsk, src_cpu, dst_cpu)
515 );
516
517 TRACE_EVENT(sched_swap_numa,
518
519 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
520 struct task_struct *dst_tsk, int dst_cpu),
521
522 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
523
524 TP_STRUCT__entry(
525 __field( pid_t, src_pid )
526 __field( pid_t, src_tgid )
527 __field( pid_t, src_ngid )
528 __field( int, src_cpu )
529 __field( int, src_nid )
530 __field( pid_t, dst_pid )
531 __field( pid_t, dst_tgid )
532 __field( pid_t, dst_ngid )
533 __field( int, dst_cpu )
534 __field( int, dst_nid )
535 ),
536
537 TP_fast_assign(
538 __entry->src_pid = task_pid_nr(src_tsk);
539 __entry->src_tgid = task_tgid_nr(src_tsk);
540 __entry->src_ngid = task_numa_group_id(src_tsk);
541 __entry->src_cpu = src_cpu;
542 __entry->src_nid = cpu_to_node(src_cpu);
543 __entry->dst_pid = task_pid_nr(dst_tsk);
544 __entry->dst_tgid = task_tgid_nr(dst_tsk);
545 __entry->dst_ngid = task_numa_group_id(dst_tsk);
546 __entry->dst_cpu = dst_cpu;
547 __entry->dst_nid = cpu_to_node(dst_cpu);
548 ),
549
550 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
551 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
552 __entry->src_cpu, __entry->src_nid,
553 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
554 __entry->dst_cpu, __entry->dst_nid)
555 );
556
557 /*
558 * Tracepoint for waking a polling cpu without an IPI.
559 */
560 TRACE_EVENT(sched_wake_idle_without_ipi,
561
562 TP_PROTO(int cpu),
563
564 TP_ARGS(cpu),
565
566 TP_STRUCT__entry(
567 __field( int, cpu )
568 ),
569
570 TP_fast_assign(
571 __entry->cpu = cpu;
572 ),
573
574 TP_printk("cpu=%d", __entry->cpu)
575 );
576 #endif /* _TRACE_SCHED_H */
577
578 /* This part must be outside protection */
579 #include <trace/define_trace.h>
This page took 0.044796 seconds and 5 git commands to generate.