tracing: extend sched_pi_setprio
[deliverable/linux.git] / include / trace / events / sched.h
CommitLineData
d0b6e04a
LZ
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
ea20d929 4#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
0a16b607
MD
5#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
4ff16c25 9#include <linux/binfmts.h>
0a16b607 10
90db6ecb
JD
11#define SCHEDULING_POLICY \
12 EM( SCHED_NORMAL, "SCHED_NORMAL") \
13 EM( SCHED_FIFO, "SCHED_FIFO") \
14 EM( SCHED_RR, "SCHED_RR") \
15 EM( SCHED_BATCH, "SCHED_BATCH") \
16 EM( SCHED_IDLE, "SCHED_IDLE") \
17 EMe(SCHED_DEADLINE, "SCHED_DEADLINE")
18
19/*
20 * First define the enums in the above macros to be exported to userspace
21 * via TRACE_DEFINE_ENUM().
22 */
23#undef EM
24#undef EMe
25#define EM(a, b) TRACE_DEFINE_ENUM(a);
26#define EMe(a, b) TRACE_DEFINE_ENUM(a);
27
28SCHEDULING_POLICY
29
30/*
31 * Now redefine the EM() and EMe() macros to map the enums to the strings
32 * that will be printed in the output.
33 */
34#undef EM
35#undef EMe
36#define EM(a, b) {a, b},
37#define EMe(a, b) {a, b}
38
ea20d929
SR
39/*
40 * Tracepoint for calling kthread_stop, performed to end a kthread:
41 */
42TRACE_EVENT(sched_kthread_stop,
43
44 TP_PROTO(struct task_struct *t),
45
46 TP_ARGS(t),
47
48 TP_STRUCT__entry(
49 __array( char, comm, TASK_COMM_LEN )
50 __field( pid_t, pid )
51 ),
52
53 TP_fast_assign(
54 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
55 __entry->pid = t->pid;
56 ),
57
434a83c3 58 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
ea20d929
SR
59);
60
61/*
62 * Tracepoint for the return value of the kthread stopping:
63 */
64TRACE_EVENT(sched_kthread_stop_ret,
65
66 TP_PROTO(int ret),
67
68 TP_ARGS(ret),
69
70 TP_STRUCT__entry(
71 __field( int, ret )
72 ),
73
74 TP_fast_assign(
75 __entry->ret = ret;
76 ),
77
434a83c3 78 TP_printk("ret=%d", __entry->ret)
ea20d929
SR
79);
80
ea20d929
SR
81/*
82 * Tracepoint for waking up a task:
ea20d929 83 */
091ad365 84DECLARE_EVENT_CLASS(sched_wakeup_template,
ea20d929 85
fbd705a0 86 TP_PROTO(struct task_struct *p),
ea20d929 87
fbd705a0 88 TP_ARGS(__perf_task(p)),
ea20d929
SR
89
90 TP_STRUCT__entry(
91 __array( char, comm, TASK_COMM_LEN )
92 __field( pid_t, pid )
93 __field( int, prio )
94 __field( int, success )
434a83c3 95 __field( int, target_cpu )
ea20d929
SR
96 ),
97
98 TP_fast_assign(
99 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
100 __entry->pid = p->pid;
101 __entry->prio = p->prio;
fbd705a0 102 __entry->success = 1; /* rudiment, kill when possible */
434a83c3 103 __entry->target_cpu = task_cpu(p);
ea20d929
SR
104 ),
105
fbd705a0 106 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
ea20d929 107 __entry->comm, __entry->pid, __entry->prio,
fbd705a0 108 __entry->target_cpu)
ea20d929
SR
109);
110
fbd705a0
PZ
111/*
112 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
113 * called from the waking context.
114 */
115DEFINE_EVENT(sched_wakeup_template, sched_waking,
116 TP_PROTO(struct task_struct *p),
117 TP_ARGS(p));
118
119/*
120 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
121 * It it not always called from the waking context.
122 */
75ec29ab 123DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
fbd705a0
PZ
124 TP_PROTO(struct task_struct *p),
125 TP_ARGS(p));
75ec29ab 126
ea20d929
SR
127/*
128 * Tracepoint for waking up a new task:
ea20d929 129 */
75ec29ab 130DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
fbd705a0
PZ
131 TP_PROTO(struct task_struct *p),
132 TP_ARGS(p));
ea20d929 133
ec3b8458
JD
134TRACE_EVENT_MAP(sched_waking, sched_waking_prio,
135
136 TP_PROTO(struct task_struct *p),
137
138 TP_ARGS(p),
139
140 TP_STRUCT__entry(
141 __array( char, comm, TASK_COMM_LEN )
142 __field( pid_t, pid )
143 __field( int, target_cpu )
144 __field( unsigned int, policy )
145 __field( int, nice )
146 __field( unsigned int, rt_priority )
147 __field( u64, dl_runtime )
148 __field( u64, dl_deadline )
149 __field( u64, dl_period )
150 __array( char, top_waiter_comm, TASK_COMM_LEN )
151 __field( pid_t, top_waiter_pid )
152 ),
153
154 TP_fast_assign(
155 struct task_struct *top_waiter = rt_mutex_get_top_task(p);
156
157 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
158 __entry->pid = p->pid;
159 __entry->target_cpu = task_cpu(p);
160 __entry->policy = rt_mutex_get_effective_policy(
161 p->policy, p->prio);
162 __entry->nice = task_nice(p);
163 __entry->rt_priority = rt_mutex_get_effective_rt_prio(
164 p->prio);
165 __entry->dl_runtime = dl_prio(p->prio) ?
166 p->dl.dl_runtime : 0;
167 __entry->dl_deadline = dl_prio(p->prio) ?
168 p->dl.dl_deadline : 0;
169 __entry->dl_period = dl_prio(p->prio) ?
170 p->dl.dl_period : 0;
171 if (top_waiter) {
172 memcpy(__entry->top_waiter_comm, top_waiter->comm,
173 TASK_COMM_LEN);
174 __entry->top_waiter_pid = top_waiter->pid;
175 } else {
176 __entry->top_waiter_comm[0] = '\0';
177 __entry->top_waiter_pid = -1;
178 }
179 ),
180
181 TP_printk("comm=%s, pid=%d, target_cpu=%03d, policy=%s, "
182 "nice=%d, rt_priority=%u, dl_runtime=%Lu, "
183 "dl_deadline=%Lu, dl_period=%Lu, "
184 "top_waiter_comm=%s, top_waiter_pid=%d",
185 __entry->comm, __entry->pid, __entry->target_cpu,
186 __print_symbolic(__entry->policy, SCHEDULING_POLICY),
187 __entry->nice, __entry->rt_priority, __entry->dl_runtime,
188 __entry->dl_deadline, __entry->dl_period,
189 __entry->top_waiter_comm, __entry->top_waiter_pid)
190);
191
02f72694 192#ifdef CREATE_TRACE_POINTS
c73464b1 193static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
02f72694 194{
8f9fbf09
ON
195#ifdef CONFIG_SCHED_DEBUG
196 BUG_ON(p != current);
197#endif /* CONFIG_SCHED_DEBUG */
c73464b1 198
02f72694 199 /*
c73464b1
PZ
200 * Preemption ignores task state, therefore preempted tasks are always
201 * RUNNING (we will not have dequeued if state != RUNNING).
02f72694 202 */
c73464b1 203 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
02f72694 204}
8f9fbf09 205#endif /* CREATE_TRACE_POINTS */
02f72694 206
ea20d929
SR
207/*
208 * Tracepoint for task switches, performed by the scheduler:
ea20d929
SR
209 */
210TRACE_EVENT(sched_switch,
211
c73464b1
PZ
212 TP_PROTO(bool preempt,
213 struct task_struct *prev,
ea20d929
SR
214 struct task_struct *next),
215
c73464b1 216 TP_ARGS(preempt, prev, next),
ea20d929
SR
217
218 TP_STRUCT__entry(
219 __array( char, prev_comm, TASK_COMM_LEN )
220 __field( pid_t, prev_pid )
221 __field( int, prev_prio )
937cdb9d 222 __field( long, prev_state )
ea20d929
SR
223 __array( char, next_comm, TASK_COMM_LEN )
224 __field( pid_t, next_pid )
225 __field( int, next_prio )
226 ),
227
228 TP_fast_assign(
229 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
230 __entry->prev_pid = prev->pid;
231 __entry->prev_prio = prev->prio;
c73464b1 232 __entry->prev_state = __trace_sched_switch_state(preempt, prev);
ea20d929
SR
233 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
234 __entry->next_pid = next->pid;
235 __entry->next_prio = next->prio;
236 ),
237
557ab425 238 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
ea20d929 239 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
557ab425
PZ
240 __entry->prev_state & (TASK_STATE_MAX-1) ?
241 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
937cdb9d
SR
242 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
243 { 16, "Z" }, { 32, "X" }, { 64, "x" },
80ed87c8
PZ
244 { 128, "K" }, { 256, "W" }, { 512, "P" },
245 { 1024, "N" }) : "R",
557ab425 246 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
ea20d929
SR
247 __entry->next_comm, __entry->next_pid, __entry->next_prio)
248);
249
ec3b8458
JD
250/*
251 * Tracepoint for task switches, performed by the scheduler:
252 */
253TRACE_EVENT_MAP(sched_switch, sched_switch_prio,
254 TP_PROTO(bool preempt,
255 struct task_struct *prev,
256 struct task_struct *next),
257
258 TP_ARGS(preempt, prev, next),
259
260 TP_STRUCT__entry(
261 __array( char, prev_comm, TASK_COMM_LEN )
262 __field( pid_t, prev_pid )
263 __field( long, prev_state )
264 __field( unsigned int, prev_policy )
265 __field( int, prev_nice )
266 __field( unsigned int, prev_rt_priority )
267 __field( u64, prev_dl_runtime )
268 __field( u64, prev_dl_deadline )
269 __field( u64, prev_dl_period )
270 __array( char, prev_top_waiter_comm, TASK_COMM_LEN )
271 __field( pid_t, prev_top_waiter_pid )
272 __array( char, next_comm, TASK_COMM_LEN )
273 __field( pid_t, next_pid )
274 __field( unsigned int, next_policy )
275 __field( int, next_nice )
276 __field( unsigned int, next_rt_priority )
277 __field( u64, next_dl_runtime )
278 __field( u64, next_dl_deadline )
279 __field( u64, next_dl_period )
280 __array( char, next_top_waiter_comm, TASK_COMM_LEN )
281 __field( pid_t, next_top_waiter_pid )
282 ),
283
284 TP_fast_assign(
285 struct task_struct *prev_top = rt_mutex_get_top_task(prev);
286 struct task_struct *next_top = rt_mutex_get_top_task(next);
287
288 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
289 __entry->prev_pid = prev->pid;
290 __entry->prev_state = __trace_sched_switch_state(
291 preempt, prev);
292 __entry->prev_policy = rt_mutex_get_effective_policy(
293 prev->policy, prev->prio);
294 __entry->prev_nice = task_nice(prev);
295 __entry->prev_rt_priority = rt_mutex_get_effective_rt_prio(
296 prev->prio);
297 __entry->prev_dl_runtime = dl_prio(prev->prio) ?
298 prev->dl.dl_runtime : 0;
299 __entry->prev_dl_deadline = dl_prio(prev->prio) ?
300 prev->dl.dl_deadline : 0;
301 __entry->prev_dl_period = dl_prio(prev->prio) ?
302 prev->dl.dl_period : 0;
303 if (prev_top) {
304 memcpy(__entry->prev_top_waiter_comm, prev_top->comm,
305 TASK_COMM_LEN);
306 __entry->prev_top_waiter_pid = prev_top->pid;
307 } else {
308 __entry->prev_top_waiter_comm[0] = '\0';
309 __entry->prev_top_waiter_pid = -1;
310 }
311
312 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
313 __entry->next_pid = next->pid;
314 __entry->next_policy = rt_mutex_get_effective_policy(
315 next->policy, prev->prio);
316 __entry->next_nice = task_nice(next);
317 __entry->next_rt_priority = rt_mutex_get_effective_rt_prio(
318 next->prio);
319 __entry->next_dl_runtime = dl_prio(next->prio) ?
320 next->dl.dl_runtime : 0;
321 __entry->next_dl_deadline = dl_prio(next->prio) ?
322 next->dl.dl_deadline : 0;
323 __entry->next_dl_period = dl_prio(next->prio) ?
324 next->dl.dl_period : 0;
325 if (next_top) {
326 memcpy(__entry->next_top_waiter_comm, next_top->comm,
327 TASK_COMM_LEN);
328 __entry->next_top_waiter_pid = next_top->pid;
329 } else {
330 __entry->next_top_waiter_comm[0] = '\0';
331 __entry->next_top_waiter_pid = -1;
332 }
333 ),
334
335 TP_printk("prev_comm=%s, prev_pid=%d, prev_policy=%s, prev_nice=%d, "
336 "prev_rt_priority=%u, prev_dl_runtime=%Lu, "
337 "prev_dl_deadline=%Lu, prev_dl_period=%Lu, "
338 "prev_state=%s%s, prev_top_waiter_comm=%s, "
339 "prev_top_waiter_pid=%d ==> next_comm=%s, next_pid=%d, "
340 "next_policy=%s, next_nice=%d, next_rt_priority=%u, "
341 "next_dl_runtime=%Lu, next_dl_deadline=%Lu, "
342 "next_dl_period=%Lu, next_top_waiter_comm=%s, "
343 "next_top_waiter_pid=%d",
344 __entry->prev_comm, __entry->prev_pid,
345 __print_symbolic(__entry->prev_policy, SCHEDULING_POLICY),
346 __entry->prev_nice, __entry->prev_rt_priority,
347 __entry->prev_dl_runtime, __entry->prev_dl_deadline,
348 __entry->prev_dl_period,
349 __entry->prev_state & (TASK_STATE_MAX-1) ?
350 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
351 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
352 { 16, "Z" }, { 32, "X" }, { 64, "x" },
353 { 128, "K" }, { 256, "W" }, { 512, "P" },
354 { 1024, "N" }) : "R",
355 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
356 __entry->prev_top_waiter_comm, __entry->prev_top_waiter_pid,
357 __entry->next_comm, __entry->next_pid,
358 __print_symbolic(__entry->next_policy, SCHEDULING_POLICY),
359 __entry->next_nice, __entry->next_rt_priority,
360 __entry->next_dl_runtime, __entry->next_dl_deadline,
361 __entry->next_dl_period, __entry->next_top_waiter_comm,
362 __entry->next_top_waiter_pid)
363);
364
ea20d929
SR
365/*
366 * Tracepoint for a task being migrated:
367 */
368TRACE_EVENT(sched_migrate_task,
369
de1d7286 370 TP_PROTO(struct task_struct *p, int dest_cpu),
ea20d929 371
de1d7286 372 TP_ARGS(p, dest_cpu),
ea20d929
SR
373
374 TP_STRUCT__entry(
375 __array( char, comm, TASK_COMM_LEN )
376 __field( pid_t, pid )
377 __field( int, prio )
378 __field( int, orig_cpu )
379 __field( int, dest_cpu )
380 ),
381
382 TP_fast_assign(
383 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
384 __entry->pid = p->pid;
385 __entry->prio = p->prio;
de1d7286 386 __entry->orig_cpu = task_cpu(p);
ea20d929
SR
387 __entry->dest_cpu = dest_cpu;
388 ),
389
434a83c3 390 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
ea20d929
SR
391 __entry->comm, __entry->pid, __entry->prio,
392 __entry->orig_cpu, __entry->dest_cpu)
393);
394
091ad365 395DECLARE_EVENT_CLASS(sched_process_template,
ea20d929
SR
396
397 TP_PROTO(struct task_struct *p),
398
399 TP_ARGS(p),
400
401 TP_STRUCT__entry(
402 __array( char, comm, TASK_COMM_LEN )
403 __field( pid_t, pid )
404 __field( int, prio )
405 ),
406
407 TP_fast_assign(
408 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
409 __entry->pid = p->pid;
410 __entry->prio = p->prio;
411 ),
412
434a83c3 413 TP_printk("comm=%s pid=%d prio=%d",
ea20d929
SR
414 __entry->comm, __entry->pid, __entry->prio)
415);
416
417/*
75ec29ab 418 * Tracepoint for freeing a task:
ea20d929 419 */
75ec29ab
SR
420DEFINE_EVENT(sched_process_template, sched_process_free,
421 TP_PROTO(struct task_struct *p),
422 TP_ARGS(p));
423
ea20d929 424
75ec29ab
SR
425/*
426 * Tracepoint for a task exiting:
427 */
428DEFINE_EVENT(sched_process_template, sched_process_exit,
429 TP_PROTO(struct task_struct *p),
430 TP_ARGS(p));
ea20d929 431
210f7669
LZ
432/*
433 * Tracepoint for waiting on task to unschedule:
434 */
435DEFINE_EVENT(sched_process_template, sched_wait_task,
436 TP_PROTO(struct task_struct *p),
437 TP_ARGS(p));
438
ea20d929
SR
439/*
440 * Tracepoint for a waiting task:
441 */
442TRACE_EVENT(sched_process_wait,
443
444 TP_PROTO(struct pid *pid),
445
446 TP_ARGS(pid),
447
448 TP_STRUCT__entry(
449 __array( char, comm, TASK_COMM_LEN )
450 __field( pid_t, pid )
451 __field( int, prio )
452 ),
453
454 TP_fast_assign(
455 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
456 __entry->pid = pid_nr(pid);
457 __entry->prio = current->prio;
458 ),
459
434a83c3 460 TP_printk("comm=%s pid=%d prio=%d",
ea20d929
SR
461 __entry->comm, __entry->pid, __entry->prio)
462);
463
464/*
465 * Tracepoint for do_fork:
466 */
467TRACE_EVENT(sched_process_fork,
468
469 TP_PROTO(struct task_struct *parent, struct task_struct *child),
470
471 TP_ARGS(parent, child),
472
473 TP_STRUCT__entry(
474 __array( char, parent_comm, TASK_COMM_LEN )
475 __field( pid_t, parent_pid )
476 __array( char, child_comm, TASK_COMM_LEN )
477 __field( pid_t, child_pid )
478 ),
479
480 TP_fast_assign(
481 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
482 __entry->parent_pid = parent->pid;
483 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
484 __entry->child_pid = child->pid;
485 ),
486
434a83c3 487 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
ea20d929
SR
488 __entry->parent_comm, __entry->parent_pid,
489 __entry->child_comm, __entry->child_pid)
490);
491
ec3b8458
JD
492TRACE_EVENT_MAP(sched_process_fork, sched_process_fork_prio,
493
494 TP_PROTO(struct task_struct *parent, struct task_struct *child),
495
496 TP_ARGS(parent, child),
497
498 TP_STRUCT__entry(
499 __array( char, parent_comm, TASK_COMM_LEN )
500 __field( pid_t, parent_pid )
501 __array( char, child_comm, TASK_COMM_LEN )
502 __field( pid_t, child_pid )
503 __field( unsigned int, child_policy )
504 __field( int, child_nice )
505 __field( unsigned int, child_rt_priority )
506 __field( u64, child_dl_runtime )
507 __field( u64, child_dl_deadline )
508 __field( u64, child_dl_period )
509 ),
510
511 TP_fast_assign(
512 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
513 __entry->parent_pid = parent->pid;
514 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
515 __entry->child_pid = child->pid;
516 __entry->child_policy = rt_mutex_get_effective_policy(
517 child->policy, child->prio);
518 __entry->child_nice = task_nice(child);
519 __entry->child_rt_priority = rt_mutex_get_effective_rt_prio(
520 child->prio);
521 __entry->child_dl_runtime = dl_prio(child->prio) ?
522 child->dl.dl_runtime : 0;
523 __entry->child_dl_deadline = dl_prio(child->prio) ?
524 child->dl.dl_deadline : 0;
525 __entry->child_dl_period = dl_prio(child->prio) ?
526 child->dl.dl_period : 0;
527 ),
528
529 TP_printk("comm=%s, pid=%d, child_comm=%s, child_pid=%d, "
530 "child_policy=%s, child_nice=%d, "
531 "child_rt_priority=%u, child_dl_runtime=%Lu, "
532 "child_dl_deadline=%Lu, child_dl_period=%Lu",
533 __entry->parent_comm, __entry->parent_pid,
534 __entry->child_comm, __entry->child_pid,
535 __print_symbolic(__entry->child_policy, SCHEDULING_POLICY),
536 __entry->child_nice, __entry->child_rt_priority,
537 __entry->child_dl_runtime, __entry->child_dl_deadline,
538 __entry->child_dl_period)
539);
540
4ff16c25
DS
541/*
542 * Tracepoint for exec:
543 */
544TRACE_EVENT(sched_process_exec,
545
546 TP_PROTO(struct task_struct *p, pid_t old_pid,
547 struct linux_binprm *bprm),
548
549 TP_ARGS(p, old_pid, bprm),
550
551 TP_STRUCT__entry(
552 __string( filename, bprm->filename )
553 __field( pid_t, pid )
554 __field( pid_t, old_pid )
555 ),
556
557 TP_fast_assign(
558 __assign_str(filename, bprm->filename);
559 __entry->pid = p->pid;
6308191f 560 __entry->old_pid = old_pid;
4ff16c25
DS
561 ),
562
563 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
564 __entry->pid, __entry->old_pid)
565);
566
768d0c27
PZ
567/*
568 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
569 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
570 */
091ad365 571DECLARE_EVENT_CLASS(sched_stat_template,
768d0c27
PZ
572
573 TP_PROTO(struct task_struct *tsk, u64 delay),
574
12473965 575 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
768d0c27
PZ
576
577 TP_STRUCT__entry(
578 __array( char, comm, TASK_COMM_LEN )
579 __field( pid_t, pid )
580 __field( u64, delay )
581 ),
582
583 TP_fast_assign(
584 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
585 __entry->pid = tsk->pid;
586 __entry->delay = delay;
768d0c27
PZ
587 ),
588
434a83c3 589 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
768d0c27
PZ
590 __entry->comm, __entry->pid,
591 (unsigned long long)__entry->delay)
592);
593
75ec29ab
SR
594
595/*
596 * Tracepoint for accounting wait time (time the task is runnable
597 * but not actually running due to scheduler contention).
598 */
599DEFINE_EVENT(sched_stat_template, sched_stat_wait,
600 TP_PROTO(struct task_struct *tsk, u64 delay),
601 TP_ARGS(tsk, delay));
602
603/*
604 * Tracepoint for accounting sleep time (time the task is not runnable,
605 * including iowait, see below).
606 */
470dda74
LZ
607DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
608 TP_PROTO(struct task_struct *tsk, u64 delay),
609 TP_ARGS(tsk, delay));
75ec29ab
SR
610
611/*
612 * Tracepoint for accounting iowait time (time the task is not runnable
613 * due to waiting on IO to complete).
614 */
470dda74
LZ
615DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
616 TP_PROTO(struct task_struct *tsk, u64 delay),
617 TP_ARGS(tsk, delay));
75ec29ab 618
b781a602
AV
619/*
620 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
621 */
622DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
623 TP_PROTO(struct task_struct *tsk, u64 delay),
624 TP_ARGS(tsk, delay));
625
f977bb49
IM
626/*
627 * Tracepoint for accounting runtime (time the task is executing
628 * on a CPU).
629 */
36009d07 630DECLARE_EVENT_CLASS(sched_stat_runtime,
f977bb49
IM
631
632 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
633
12473965 634 TP_ARGS(tsk, __perf_count(runtime), vruntime),
f977bb49
IM
635
636 TP_STRUCT__entry(
637 __array( char, comm, TASK_COMM_LEN )
638 __field( pid_t, pid )
639 __field( u64, runtime )
640 __field( u64, vruntime )
641 ),
642
643 TP_fast_assign(
644 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
645 __entry->pid = tsk->pid;
646 __entry->runtime = runtime;
647 __entry->vruntime = vruntime;
f977bb49
IM
648 ),
649
434a83c3 650 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
f977bb49
IM
651 __entry->comm, __entry->pid,
652 (unsigned long long)__entry->runtime,
653 (unsigned long long)__entry->vruntime)
654);
655
36009d07
ON
656DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
657 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
658 TP_ARGS(tsk, runtime, vruntime));
659
a8027073
SR
660/*
661 * Tracepoint for showing priority inheritance modifying a tasks
662 * priority.
663 */
664TRACE_EVENT(sched_pi_setprio,
665
666 TP_PROTO(struct task_struct *tsk, int newprio),
667
668 TP_ARGS(tsk, newprio),
669
670 TP_STRUCT__entry(
671 __array( char, comm, TASK_COMM_LEN )
672 __field( pid_t, pid )
673 __field( int, oldprio )
674 __field( int, newprio )
675 ),
676
677 TP_fast_assign(
678 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
679 __entry->pid = tsk->pid;
680 __entry->oldprio = tsk->prio;
681 __entry->newprio = newprio;
682 ),
683
684 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
685 __entry->comm, __entry->pid,
686 __entry->oldprio, __entry->newprio)
687);
688
f2d8feb1
JD
689/*
690 * Extract the complete scheduling information from the before
691 * and after the change of priority.
692 */
693TRACE_EVENT_MAP(sched_pi_setprio, sched_pi_update_prio,
694
695 TP_PROTO(struct task_struct *tsk, int newprio),
696
697 TP_ARGS(tsk, newprio),
698
699 TP_STRUCT__entry(
700 __array( char, comm, TASK_COMM_LEN )
701 __field( pid_t, pid )
702 __field( unsigned int, old_policy )
703 __field( int, old_nice )
704 __field( unsigned int, old_rt_priority )
705 __field( u64, old_dl_runtime )
706 __field( u64, old_dl_deadline )
707 __field( u64, old_dl_period )
708 __array( char, top_waiter_comm, TASK_COMM_LEN )
709 __field( pid_t, top_waiter_pid )
710 __field( unsigned int, new_policy )
711 __field( int, new_nice )
712 __field( unsigned int, new_rt_priority )
713 __field( u64, new_dl_runtime )
714 __field( u64, new_dl_deadline )
715 __field( u64, new_dl_period )
716 ),
717
718 TP_fast_assign(
719 struct task_struct *top_waiter = rt_mutex_get_top_task(tsk);
720
721 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
722 __entry->pid = tsk->pid;
723 __entry->old_policy = rt_mutex_get_effective_policy(
724 tsk->policy, tsk->prio);
725 __entry->old_nice = task_nice(tsk);
726 __entry->old_rt_priority = rt_mutex_get_effective_rt_prio(
727 tsk->prio);
728 __entry->old_dl_runtime = dl_prio(tsk->prio) ?
729 tsk->dl.dl_runtime : 0;
730 __entry->old_dl_deadline = dl_prio(tsk->prio) ?
731 tsk->dl.dl_deadline : 0;
732 __entry->old_dl_period = dl_prio(tsk->prio) ?
733 tsk->dl.dl_period : 0;
734 if (top_waiter) {
735 memcpy(__entry->top_waiter_comm, top_waiter->comm, TASK_COMM_LEN);
736 __entry->top_waiter_pid = top_waiter->pid;
737 /*
738 * The effective policy depends on the current policy of
739 * the target task.
740 */
741 __entry->new_policy = rt_mutex_get_effective_policy(
742 tsk->policy, top_waiter->prio);
743 __entry->new_nice = task_nice(top_waiter);
744 __entry->new_rt_priority = rt_mutex_get_effective_rt_prio(
745 top_waiter->prio);
746 __entry->new_dl_runtime = dl_prio(top_waiter->prio) ?
747 top_waiter->dl.dl_runtime : 0;
748 __entry->new_dl_deadline = dl_prio(top_waiter->prio) ?
749 top_waiter->dl.dl_deadline : 0;
750 __entry->new_dl_period = dl_prio(top_waiter->prio) ?
751 top_waiter->dl.dl_period : 0;
752 } else {
753 __entry->top_waiter_comm[0] = '\0';
754 __entry->top_waiter_pid = -1;
755 __entry->new_policy = 0;
756 __entry->new_nice = 0;
757 __entry->new_rt_priority = 0;
758 __entry->new_dl_runtime = 0;
759 __entry->new_dl_deadline = 0;
760 __entry->new_dl_period = 0;
761 }
762 ),
763
764 TP_printk("comm=%s, pid=%d, old_policy=%s, old_nice=%d, "
765 "old_rt_priority=%u, old_dl_runtime=%Lu, "
766 "old_dl_deadline=%Lu, old_dl_period=%Lu, "
767 "top_waiter_comm=%s, top_waiter_pid=%d, new_policy=%s, "
768 "new_nice=%d, new_rt_priority=%u, "
769 "new_dl_runtime=%Lu, new_dl_deadline=%Lu, "
770 "new_dl_period=%Lu",
771 __entry->comm, __entry->pid,
772 __print_symbolic(__entry->old_policy, SCHEDULING_POLICY),
773 __entry->old_nice, __entry->old_rt_priority,
774 __entry->old_dl_runtime, __entry->old_dl_deadline,
775 __entry->old_dl_period,
776 __entry->top_waiter_comm, __entry->top_waiter_pid,
777 __entry->new_policy >= 0 ?
778 __print_symbolic(__entry->new_policy,
779 SCHEDULING_POLICY) : "",
780 __entry->new_nice, __entry->new_rt_priority,
781 __entry->new_dl_runtime, __entry->new_dl_deadline,
782 __entry->new_dl_period)
783);
784
6a716c90
ON
785#ifdef CONFIG_DETECT_HUNG_TASK
786TRACE_EVENT(sched_process_hang,
787 TP_PROTO(struct task_struct *tsk),
788 TP_ARGS(tsk),
789
790 TP_STRUCT__entry(
791 __array( char, comm, TASK_COMM_LEN )
792 __field( pid_t, pid )
793 ),
794
795 TP_fast_assign(
796 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
797 __entry->pid = tsk->pid;
798 ),
799
800 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
801);
802#endif /* CONFIG_DETECT_HUNG_TASK */
803
286549dc
MG
804DECLARE_EVENT_CLASS(sched_move_task_template,
805
806 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
807
808 TP_ARGS(tsk, src_cpu, dst_cpu),
809
810 TP_STRUCT__entry(
811 __field( pid_t, pid )
812 __field( pid_t, tgid )
813 __field( pid_t, ngid )
814 __field( int, src_cpu )
815 __field( int, src_nid )
816 __field( int, dst_cpu )
817 __field( int, dst_nid )
818 ),
819
820 TP_fast_assign(
821 __entry->pid = task_pid_nr(tsk);
822 __entry->tgid = task_tgid_nr(tsk);
823 __entry->ngid = task_numa_group_id(tsk);
824 __entry->src_cpu = src_cpu;
825 __entry->src_nid = cpu_to_node(src_cpu);
826 __entry->dst_cpu = dst_cpu;
827 __entry->dst_nid = cpu_to_node(dst_cpu);
828 ),
829
830 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
831 __entry->pid, __entry->tgid, __entry->ngid,
832 __entry->src_cpu, __entry->src_nid,
833 __entry->dst_cpu, __entry->dst_nid)
834);
835
836/*
837 * Tracks migration of tasks from one runqueue to another. Can be used to
838 * detect if automatic NUMA balancing is bouncing between nodes
839 */
840DEFINE_EVENT(sched_move_task_template, sched_move_numa,
841 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
842
843 TP_ARGS(tsk, src_cpu, dst_cpu)
844);
845
846DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
847 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
848
849 TP_ARGS(tsk, src_cpu, dst_cpu)
850);
851
852TRACE_EVENT(sched_swap_numa,
853
854 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
855 struct task_struct *dst_tsk, int dst_cpu),
856
857 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
858
859 TP_STRUCT__entry(
860 __field( pid_t, src_pid )
861 __field( pid_t, src_tgid )
862 __field( pid_t, src_ngid )
863 __field( int, src_cpu )
864 __field( int, src_nid )
865 __field( pid_t, dst_pid )
866 __field( pid_t, dst_tgid )
867 __field( pid_t, dst_ngid )
868 __field( int, dst_cpu )
869 __field( int, dst_nid )
870 ),
871
872 TP_fast_assign(
873 __entry->src_pid = task_pid_nr(src_tsk);
874 __entry->src_tgid = task_tgid_nr(src_tsk);
875 __entry->src_ngid = task_numa_group_id(src_tsk);
876 __entry->src_cpu = src_cpu;
877 __entry->src_nid = cpu_to_node(src_cpu);
878 __entry->dst_pid = task_pid_nr(dst_tsk);
879 __entry->dst_tgid = task_tgid_nr(dst_tsk);
880 __entry->dst_ngid = task_numa_group_id(dst_tsk);
881 __entry->dst_cpu = dst_cpu;
882 __entry->dst_nid = cpu_to_node(dst_cpu);
883 ),
884
885 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
886 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
887 __entry->src_cpu, __entry->src_nid,
888 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
889 __entry->dst_cpu, __entry->dst_nid)
890);
dfc68f29
AL
891
892/*
893 * Tracepoint for waking a polling cpu without an IPI.
894 */
895TRACE_EVENT(sched_wake_idle_without_ipi,
896
897 TP_PROTO(int cpu),
898
899 TP_ARGS(cpu),
900
901 TP_STRUCT__entry(
902 __field( int, cpu )
903 ),
904
905 TP_fast_assign(
906 __entry->cpu = cpu;
907 ),
908
909 TP_printk("cpu=%d", __entry->cpu)
910);
90db6ecb
JD
911
912/*
913 * Tracepoint for showing scheduling priority changes.
914 */
915TRACE_EVENT(sched_update_prio,
916
917 TP_PROTO(struct task_struct *tsk),
918
919 TP_ARGS(tsk),
920
921 TP_STRUCT__entry(
922 __array( char, comm, TASK_COMM_LEN )
923 __field( pid_t, pid )
924 __field( unsigned int, policy )
925 __field( int, nice )
926 __field( unsigned int, rt_priority )
927 __field( u64, dl_runtime )
928 __field( u64, dl_deadline )
929 __field( u64, dl_period )
930 ),
931
932 TP_fast_assign(
933 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
934 __entry->pid = tsk->pid;
935 __entry->policy = tsk->policy;
936 __entry->nice = task_nice(tsk);
937 __entry->rt_priority = tsk->rt_priority;
938 __entry->dl_runtime = tsk->dl.dl_runtime;
939 __entry->dl_deadline = tsk->dl.dl_deadline;
940 __entry->dl_period = tsk->dl.dl_period;
941 ),
942
943 TP_printk("comm=%s pid=%d, policy=%s, nice=%d, rt_priority=%u, "
944 "dl_runtime=%Lu, dl_deadline=%Lu, dl_period=%Lu",
945 __entry->comm, __entry->pid,
946 __print_symbolic(__entry->policy, SCHEDULING_POLICY),
947 __entry->nice, __entry->rt_priority,
948 __entry->dl_runtime, __entry->dl_deadline,
949 __entry->dl_period)
950);
ea20d929 951#endif /* _TRACE_SCHED_H */
a8d154b0
SR
952
953/* This part must be outside protection */
954#include <trace/define_trace.h>
This page took 0.382607 seconds and 5 git commands to generate.