Merge branch 'linus' into tracing/blktrace
[deliverable/linux.git] / kernel / trace / trace_functions_graph.c
1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 #define TRACE_GRAPH_INDENT 2
18
19 /* Flag options */
20 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
21 #define TRACE_GRAPH_PRINT_CPU 0x2
22 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
23 #define TRACE_GRAPH_PRINT_PROC 0x8
24 #define TRACE_GRAPH_PRINT_DURATION 0x10
25 #define TRACE_GRAPH_PRINT_ABS_TIME 0X20
26
27 static struct tracer_opt trace_opts[] = {
28 /* Display overruns? (for self-debug purpose) */
29 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
30 /* Display CPU ? */
31 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
32 /* Display Overhead ? */
33 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
34 /* Display proc name/pid */
35 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
36 /* Display duration of execution */
37 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
38 /* Display absolute time of an entry */
39 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
40 { } /* Empty entry */
41 };
42
43 static struct tracer_flags tracer_flags = {
44 /* Don't display overruns and proc by default */
45 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
46 TRACE_GRAPH_PRINT_DURATION,
47 .opts = trace_opts
48 };
49
50 /* pid on the last trace processed */
51
52
53 static int graph_trace_init(struct trace_array *tr)
54 {
55 int ret = register_ftrace_graph(&trace_graph_return,
56 &trace_graph_entry);
57 if (ret)
58 return ret;
59 tracing_start_cmdline_record();
60
61 return 0;
62 }
63
64 static void graph_trace_reset(struct trace_array *tr)
65 {
66 tracing_stop_cmdline_record();
67 unregister_ftrace_graph();
68 }
69
70 static inline int log10_cpu(int nb)
71 {
72 if (nb / 100)
73 return 3;
74 if (nb / 10)
75 return 2;
76 return 1;
77 }
78
79 static enum print_line_t
80 print_graph_cpu(struct trace_seq *s, int cpu)
81 {
82 int i;
83 int ret;
84 int log10_this = log10_cpu(cpu);
85 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
86
87
88 /*
89 * Start with a space character - to make it stand out
90 * to the right a bit when trace output is pasted into
91 * email:
92 */
93 ret = trace_seq_printf(s, " ");
94
95 /*
96 * Tricky - we space the CPU field according to the max
97 * number of online CPUs. On a 2-cpu system it would take
98 * a maximum of 1 digit - on a 128 cpu system it would
99 * take up to 3 digits:
100 */
101 for (i = 0; i < log10_all - log10_this; i++) {
102 ret = trace_seq_printf(s, " ");
103 if (!ret)
104 return TRACE_TYPE_PARTIAL_LINE;
105 }
106 ret = trace_seq_printf(s, "%d) ", cpu);
107 if (!ret)
108 return TRACE_TYPE_PARTIAL_LINE;
109
110 return TRACE_TYPE_HANDLED;
111 }
112
113 #define TRACE_GRAPH_PROCINFO_LENGTH 14
114
115 static enum print_line_t
116 print_graph_proc(struct trace_seq *s, pid_t pid)
117 {
118 int i;
119 int ret;
120 int len;
121 char comm[8];
122 int spaces = 0;
123 /* sign + log10(MAX_INT) + '\0' */
124 char pid_str[11];
125
126 strncpy(comm, trace_find_cmdline(pid), 7);
127 comm[7] = '\0';
128 sprintf(pid_str, "%d", pid);
129
130 /* 1 stands for the "-" character */
131 len = strlen(comm) + strlen(pid_str) + 1;
132
133 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
134 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
135
136 /* First spaces to align center */
137 for (i = 0; i < spaces / 2; i++) {
138 ret = trace_seq_printf(s, " ");
139 if (!ret)
140 return TRACE_TYPE_PARTIAL_LINE;
141 }
142
143 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
144 if (!ret)
145 return TRACE_TYPE_PARTIAL_LINE;
146
147 /* Last spaces to align center */
148 for (i = 0; i < spaces - (spaces / 2); i++) {
149 ret = trace_seq_printf(s, " ");
150 if (!ret)
151 return TRACE_TYPE_PARTIAL_LINE;
152 }
153 return TRACE_TYPE_HANDLED;
154 }
155
156
157 /* If the pid changed since the last trace, output this event */
158 static enum print_line_t
159 verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
160 {
161 pid_t prev_pid;
162 pid_t *last_pid;
163 int ret;
164
165 if (!last_pids_cpu)
166 return TRACE_TYPE_HANDLED;
167
168 last_pid = per_cpu_ptr(last_pids_cpu, cpu);
169
170 if (*last_pid == pid)
171 return TRACE_TYPE_HANDLED;
172
173 prev_pid = *last_pid;
174 *last_pid = pid;
175
176 if (prev_pid == -1)
177 return TRACE_TYPE_HANDLED;
178 /*
179 * Context-switch trace line:
180
181 ------------------------------------------
182 | 1) migration/0--1 => sshd-1755
183 ------------------------------------------
184
185 */
186 ret = trace_seq_printf(s,
187 " ------------------------------------------\n");
188 if (!ret)
189 return TRACE_TYPE_PARTIAL_LINE;
190
191 ret = print_graph_cpu(s, cpu);
192 if (ret == TRACE_TYPE_PARTIAL_LINE)
193 return TRACE_TYPE_PARTIAL_LINE;
194
195 ret = print_graph_proc(s, prev_pid);
196 if (ret == TRACE_TYPE_PARTIAL_LINE)
197 return TRACE_TYPE_PARTIAL_LINE;
198
199 ret = trace_seq_printf(s, " => ");
200 if (!ret)
201 return TRACE_TYPE_PARTIAL_LINE;
202
203 ret = print_graph_proc(s, pid);
204 if (ret == TRACE_TYPE_PARTIAL_LINE)
205 return TRACE_TYPE_PARTIAL_LINE;
206
207 ret = trace_seq_printf(s,
208 "\n ------------------------------------------\n\n");
209 if (!ret)
210 return TRACE_TYPE_PARTIAL_LINE;
211
212 return TRACE_TYPE_HANDLED;
213 }
214
215 static struct ftrace_graph_ret_entry *
216 get_return_for_leaf(struct trace_iterator *iter,
217 struct ftrace_graph_ent_entry *curr)
218 {
219 struct ring_buffer_iter *ring_iter;
220 struct ring_buffer_event *event;
221 struct ftrace_graph_ret_entry *next;
222
223 ring_iter = iter->buffer_iter[iter->cpu];
224
225 /* First peek to compare current entry and the next one */
226 if (ring_iter)
227 event = ring_buffer_iter_peek(ring_iter, NULL);
228 else {
229 /* We need to consume the current entry to see the next one */
230 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
231 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
232 NULL);
233 }
234
235 if (!event)
236 return NULL;
237
238 next = ring_buffer_event_data(event);
239
240 if (next->ent.type != TRACE_GRAPH_RET)
241 return NULL;
242
243 if (curr->ent.pid != next->ent.pid ||
244 curr->graph_ent.func != next->ret.func)
245 return NULL;
246
247 /* this is a leaf, now advance the iterator */
248 if (ring_iter)
249 ring_buffer_read(ring_iter, NULL);
250
251 return next;
252 }
253
254 /* Signal a overhead of time execution to the output */
255 static int
256 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
257 {
258 /* If duration disappear, we don't need anything */
259 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
260 return 1;
261
262 /* Non nested entry or return */
263 if (duration == -1)
264 return trace_seq_printf(s, " ");
265
266 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
267 /* Duration exceeded 100 msecs */
268 if (duration > 100000ULL)
269 return trace_seq_printf(s, "! ");
270
271 /* Duration exceeded 10 msecs */
272 if (duration > 10000ULL)
273 return trace_seq_printf(s, "+ ");
274 }
275
276 return trace_seq_printf(s, " ");
277 }
278
279 static enum print_line_t
280 print_graph_irq(struct trace_seq *s, unsigned long addr,
281 enum trace_type type, int cpu, pid_t pid)
282 {
283 int ret;
284
285 if (addr < (unsigned long)__irqentry_text_start ||
286 addr >= (unsigned long)__irqentry_text_end)
287 return TRACE_TYPE_UNHANDLED;
288
289 /* Cpu */
290 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
291 ret = print_graph_cpu(s, cpu);
292 if (ret == TRACE_TYPE_PARTIAL_LINE)
293 return TRACE_TYPE_PARTIAL_LINE;
294 }
295 /* Proc */
296 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
297 ret = print_graph_proc(s, pid);
298 if (ret == TRACE_TYPE_PARTIAL_LINE)
299 return TRACE_TYPE_PARTIAL_LINE;
300 ret = trace_seq_printf(s, " | ");
301 if (!ret)
302 return TRACE_TYPE_PARTIAL_LINE;
303 }
304
305 /* No overhead */
306 ret = print_graph_overhead(-1, s);
307 if (!ret)
308 return TRACE_TYPE_PARTIAL_LINE;
309
310 if (type == TRACE_GRAPH_ENT)
311 ret = trace_seq_printf(s, "==========>");
312 else
313 ret = trace_seq_printf(s, "<==========");
314
315 if (!ret)
316 return TRACE_TYPE_PARTIAL_LINE;
317
318 /* Don't close the duration column if haven't one */
319 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
320 trace_seq_printf(s, " |");
321 ret = trace_seq_printf(s, "\n");
322
323 if (!ret)
324 return TRACE_TYPE_PARTIAL_LINE;
325 return TRACE_TYPE_HANDLED;
326 }
327
328 static enum print_line_t
329 print_graph_duration(unsigned long long duration, struct trace_seq *s)
330 {
331 unsigned long nsecs_rem = do_div(duration, 1000);
332 /* log10(ULONG_MAX) + '\0' */
333 char msecs_str[21];
334 char nsecs_str[5];
335 int ret, len;
336 int i;
337
338 sprintf(msecs_str, "%lu", (unsigned long) duration);
339
340 /* Print msecs */
341 ret = trace_seq_printf(s, "%s", msecs_str);
342 if (!ret)
343 return TRACE_TYPE_PARTIAL_LINE;
344
345 len = strlen(msecs_str);
346
347 /* Print nsecs (we don't want to exceed 7 numbers) */
348 if (len < 7) {
349 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
350 ret = trace_seq_printf(s, ".%s", nsecs_str);
351 if (!ret)
352 return TRACE_TYPE_PARTIAL_LINE;
353 len += strlen(nsecs_str);
354 }
355
356 ret = trace_seq_printf(s, " us ");
357 if (!ret)
358 return TRACE_TYPE_PARTIAL_LINE;
359
360 /* Print remaining spaces to fit the row's width */
361 for (i = len; i < 7; i++) {
362 ret = trace_seq_printf(s, " ");
363 if (!ret)
364 return TRACE_TYPE_PARTIAL_LINE;
365 }
366
367 ret = trace_seq_printf(s, "| ");
368 if (!ret)
369 return TRACE_TYPE_PARTIAL_LINE;
370 return TRACE_TYPE_HANDLED;
371
372 }
373
374 static int print_graph_abs_time(u64 t, struct trace_seq *s)
375 {
376 unsigned long usecs_rem;
377
378 usecs_rem = do_div(t, 1000000000);
379 usecs_rem /= 1000;
380
381 return trace_seq_printf(s, "%5lu.%06lu | ",
382 (unsigned long)t, usecs_rem);
383 }
384
385 /* Case of a leaf function on its call entry */
386 static enum print_line_t
387 print_graph_entry_leaf(struct trace_iterator *iter,
388 struct ftrace_graph_ent_entry *entry,
389 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
390 {
391 struct ftrace_graph_ret *graph_ret;
392 struct ftrace_graph_ent *call;
393 unsigned long long duration;
394 int ret;
395 int i;
396
397 graph_ret = &ret_entry->ret;
398 call = &entry->graph_ent;
399 duration = graph_ret->rettime - graph_ret->calltime;
400
401 /* Overhead */
402 ret = print_graph_overhead(duration, s);
403 if (!ret)
404 return TRACE_TYPE_PARTIAL_LINE;
405
406 /* Duration */
407 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
408 ret = print_graph_duration(duration, s);
409 if (ret == TRACE_TYPE_PARTIAL_LINE)
410 return TRACE_TYPE_PARTIAL_LINE;
411 }
412
413 /* Function */
414 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
415 ret = trace_seq_printf(s, " ");
416 if (!ret)
417 return TRACE_TYPE_PARTIAL_LINE;
418 }
419
420 ret = seq_print_ip_sym(s, call->func, 0);
421 if (!ret)
422 return TRACE_TYPE_PARTIAL_LINE;
423
424 ret = trace_seq_printf(s, "();\n");
425 if (!ret)
426 return TRACE_TYPE_PARTIAL_LINE;
427
428 return TRACE_TYPE_HANDLED;
429 }
430
431 static enum print_line_t
432 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
433 struct trace_seq *s, pid_t pid, int cpu)
434 {
435 int i;
436 int ret;
437 struct ftrace_graph_ent *call = &entry->graph_ent;
438
439 /* No overhead */
440 ret = print_graph_overhead(-1, s);
441 if (!ret)
442 return TRACE_TYPE_PARTIAL_LINE;
443
444 /* No time */
445 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
446 ret = trace_seq_printf(s, " | ");
447 if (!ret)
448 return TRACE_TYPE_PARTIAL_LINE;
449 }
450
451 /* Function */
452 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
453 ret = trace_seq_printf(s, " ");
454 if (!ret)
455 return TRACE_TYPE_PARTIAL_LINE;
456 }
457
458 ret = seq_print_ip_sym(s, call->func, 0);
459 if (!ret)
460 return TRACE_TYPE_PARTIAL_LINE;
461
462 ret = trace_seq_printf(s, "() {\n");
463 if (!ret)
464 return TRACE_TYPE_PARTIAL_LINE;
465
466 /*
467 * we already consumed the current entry to check the next one
468 * and see if this is a leaf.
469 */
470 return TRACE_TYPE_NO_CONSUME;
471 }
472
473 static enum print_line_t
474 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
475 struct trace_iterator *iter)
476 {
477 int ret;
478 int cpu = iter->cpu;
479 pid_t *last_entry = iter->private;
480 struct trace_entry *ent = iter->ent;
481 struct ftrace_graph_ent *call = &field->graph_ent;
482 struct ftrace_graph_ret_entry *leaf_ret;
483
484 /* Pid */
485 if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE)
486 return TRACE_TYPE_PARTIAL_LINE;
487
488 /* Interrupt */
489 ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, ent->pid);
490 if (ret == TRACE_TYPE_PARTIAL_LINE)
491 return TRACE_TYPE_PARTIAL_LINE;
492
493 /* Absolute time */
494 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
495 ret = print_graph_abs_time(iter->ts, s);
496 if (!ret)
497 return TRACE_TYPE_PARTIAL_LINE;
498 }
499
500 /* Cpu */
501 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
502 ret = print_graph_cpu(s, cpu);
503 if (ret == TRACE_TYPE_PARTIAL_LINE)
504 return TRACE_TYPE_PARTIAL_LINE;
505 }
506
507 /* Proc */
508 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
509 ret = print_graph_proc(s, ent->pid);
510 if (ret == TRACE_TYPE_PARTIAL_LINE)
511 return TRACE_TYPE_PARTIAL_LINE;
512
513 ret = trace_seq_printf(s, " | ");
514 if (!ret)
515 return TRACE_TYPE_PARTIAL_LINE;
516 }
517
518 leaf_ret = get_return_for_leaf(iter, field);
519 if (leaf_ret)
520 return print_graph_entry_leaf(iter, field, leaf_ret, s);
521 else
522 return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
523
524 }
525
526 static enum print_line_t
527 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
528 struct trace_entry *ent, struct trace_iterator *iter)
529 {
530 int i;
531 int ret;
532 int cpu = iter->cpu;
533 pid_t *last_pid = iter->private;
534 unsigned long long duration = trace->rettime - trace->calltime;
535
536 /* Pid */
537 if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE)
538 return TRACE_TYPE_PARTIAL_LINE;
539
540 /* Absolute time */
541 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
542 ret = print_graph_abs_time(iter->ts, s);
543 if (!ret)
544 return TRACE_TYPE_PARTIAL_LINE;
545 }
546
547 /* Cpu */
548 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
549 ret = print_graph_cpu(s, cpu);
550 if (ret == TRACE_TYPE_PARTIAL_LINE)
551 return TRACE_TYPE_PARTIAL_LINE;
552 }
553
554 /* Proc */
555 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
556 ret = print_graph_proc(s, ent->pid);
557 if (ret == TRACE_TYPE_PARTIAL_LINE)
558 return TRACE_TYPE_PARTIAL_LINE;
559
560 ret = trace_seq_printf(s, " | ");
561 if (!ret)
562 return TRACE_TYPE_PARTIAL_LINE;
563 }
564
565 /* Overhead */
566 ret = print_graph_overhead(duration, s);
567 if (!ret)
568 return TRACE_TYPE_PARTIAL_LINE;
569
570 /* Duration */
571 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
572 ret = print_graph_duration(duration, s);
573 if (ret == TRACE_TYPE_PARTIAL_LINE)
574 return TRACE_TYPE_PARTIAL_LINE;
575 }
576
577 /* Closing brace */
578 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
579 ret = trace_seq_printf(s, " ");
580 if (!ret)
581 return TRACE_TYPE_PARTIAL_LINE;
582 }
583
584 ret = trace_seq_printf(s, "}\n");
585 if (!ret)
586 return TRACE_TYPE_PARTIAL_LINE;
587
588 /* Overrun */
589 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
590 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
591 trace->overrun);
592 if (!ret)
593 return TRACE_TYPE_PARTIAL_LINE;
594 }
595
596 ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
597 if (ret == TRACE_TYPE_PARTIAL_LINE)
598 return TRACE_TYPE_PARTIAL_LINE;
599
600 return TRACE_TYPE_HANDLED;
601 }
602
603 static enum print_line_t
604 print_graph_comment(struct print_entry *trace, struct trace_seq *s,
605 struct trace_entry *ent, struct trace_iterator *iter)
606 {
607 int i;
608 int ret;
609 int cpu = iter->cpu;
610 pid_t *last_pid = iter->private;
611
612 /* Absolute time */
613 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
614 ret = print_graph_abs_time(iter->ts, s);
615 if (!ret)
616 return TRACE_TYPE_PARTIAL_LINE;
617 }
618
619 /* Pid */
620 if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE)
621 return TRACE_TYPE_PARTIAL_LINE;
622
623 /* Cpu */
624 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
625 ret = print_graph_cpu(s, cpu);
626 if (ret == TRACE_TYPE_PARTIAL_LINE)
627 return TRACE_TYPE_PARTIAL_LINE;
628 }
629
630 /* Proc */
631 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
632 ret = print_graph_proc(s, ent->pid);
633 if (ret == TRACE_TYPE_PARTIAL_LINE)
634 return TRACE_TYPE_PARTIAL_LINE;
635
636 ret = trace_seq_printf(s, " | ");
637 if (!ret)
638 return TRACE_TYPE_PARTIAL_LINE;
639 }
640
641 /* No overhead */
642 ret = print_graph_overhead(-1, s);
643 if (!ret)
644 return TRACE_TYPE_PARTIAL_LINE;
645
646 /* No time */
647 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
648 ret = trace_seq_printf(s, " | ");
649 if (!ret)
650 return TRACE_TYPE_PARTIAL_LINE;
651 }
652
653 /* Indentation */
654 if (trace->depth > 0)
655 for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
656 ret = trace_seq_printf(s, " ");
657 if (!ret)
658 return TRACE_TYPE_PARTIAL_LINE;
659 }
660
661 /* The comment */
662 ret = trace_seq_printf(s, "/* %s", trace->buf);
663 if (!ret)
664 return TRACE_TYPE_PARTIAL_LINE;
665
666 /* Strip ending newline */
667 if (s->buffer[s->len - 1] == '\n') {
668 s->buffer[s->len - 1] = '\0';
669 s->len--;
670 }
671
672 ret = trace_seq_printf(s, " */\n");
673 if (!ret)
674 return TRACE_TYPE_PARTIAL_LINE;
675
676 return TRACE_TYPE_HANDLED;
677 }
678
679
680 enum print_line_t
681 print_graph_function(struct trace_iterator *iter)
682 {
683 struct trace_seq *s = &iter->seq;
684 struct trace_entry *entry = iter->ent;
685
686 switch (entry->type) {
687 case TRACE_GRAPH_ENT: {
688 struct ftrace_graph_ent_entry *field;
689 trace_assign_type(field, entry);
690 return print_graph_entry(field, s, iter);
691 }
692 case TRACE_GRAPH_RET: {
693 struct ftrace_graph_ret_entry *field;
694 trace_assign_type(field, entry);
695 return print_graph_return(&field->ret, s, entry, iter);
696 }
697 case TRACE_PRINT: {
698 struct print_entry *field;
699 trace_assign_type(field, entry);
700 return print_graph_comment(field, s, entry, iter);
701 }
702 default:
703 return TRACE_TYPE_UNHANDLED;
704 }
705 }
706
707 static void print_graph_headers(struct seq_file *s)
708 {
709 /* 1st line */
710 seq_printf(s, "# ");
711 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
712 seq_printf(s, " TIME ");
713 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
714 seq_printf(s, "CPU");
715 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
716 seq_printf(s, " TASK/PID ");
717 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
718 seq_printf(s, " DURATION ");
719 seq_printf(s, " FUNCTION CALLS\n");
720
721 /* 2nd line */
722 seq_printf(s, "# ");
723 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
724 seq_printf(s, " | ");
725 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
726 seq_printf(s, "| ");
727 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
728 seq_printf(s, " | | ");
729 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
730 seq_printf(s, " | | ");
731 seq_printf(s, " | | | |\n");
732 }
733
734 static void graph_trace_open(struct trace_iterator *iter)
735 {
736 /* pid on the last trace processed */
737 pid_t *last_pid = alloc_percpu(pid_t);
738 int cpu;
739
740 if (!last_pid)
741 pr_warning("function graph tracer: not enough memory\n");
742 else
743 for_each_possible_cpu(cpu) {
744 pid_t *pid = per_cpu_ptr(last_pid, cpu);
745 *pid = -1;
746 }
747
748 iter->private = last_pid;
749 }
750
751 static void graph_trace_close(struct trace_iterator *iter)
752 {
753 percpu_free(iter->private);
754 }
755
756 static struct tracer graph_trace __read_mostly = {
757 .name = "function_graph",
758 .open = graph_trace_open,
759 .close = graph_trace_close,
760 .init = graph_trace_init,
761 .reset = graph_trace_reset,
762 .print_line = print_graph_function,
763 .print_header = print_graph_headers,
764 .flags = &tracer_flags,
765 #ifdef CONFIG_FTRACE_SELFTEST
766 .selftest = trace_selftest_startup_function_graph,
767 #endif
768 };
769
770 static __init int init_graph_trace(void)
771 {
772 return register_tracer(&graph_trace);
773 }
774
775 device_initcall(init_graph_trace);
This page took 0.0603 seconds and 6 git commands to generate.