net: filter: sk_chk_filter() no longer mangles filter
[deliverable/linux.git] / kernel / trace / trace_functions_graph.c
CommitLineData
fb52607a
FW
1/*
2 *
3 * Function graph tracer.
9005f3eb 4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
fb52607a
FW
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
5a0e3ad6 12#include <linux/slab.h>
fb52607a
FW
13#include <linux/fs.h>
14
15#include "trace.h"
f0868d1e 16#include "trace_output.h"
fb52607a 17
b304d044
SR
18/* When set, irq functions will be ignored */
19static int ftrace_graph_skip_irqs;
20
be1eca39 21struct fgraph_cpu_data {
2fbcdb35
SR
22 pid_t last_pid;
23 int depth;
2bd16212 24 int depth_irq;
be1eca39 25 int ignore;
f1c7f517 26 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
be1eca39
JO
27};
28
29struct fgraph_data {
6016ee13 30 struct fgraph_cpu_data __percpu *cpu_data;
be1eca39
JO
31
32 /* Place to preserve last processed entry. */
33 struct ftrace_graph_ent_entry ent;
34 struct ftrace_graph_ret_entry ret;
35 int failed;
36 int cpu;
2fbcdb35
SR
37};
38
287b6e68 39#define TRACE_GRAPH_INDENT 2
fb52607a 40
8741db53
SR
41static unsigned int max_depth;
42
fb52607a 43static struct tracer_opt trace_opts[] = {
9005f3eb 44 /* Display overruns? (for self-debug purpose) */
1a056155
FW
45 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
46 /* Display CPU ? */
47 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
48 /* Display Overhead ? */
49 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
11e84acc
FW
50 /* Display proc name/pid */
51 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
9005f3eb
FW
52 /* Display duration of execution */
53 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
54 /* Display absolute time of an entry */
55 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
2bd16212
JO
56 /* Display interrupts */
57 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
607e3a29
RE
58 /* Display function name after trailing } */
59 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
fb52607a
FW
60 { } /* Empty entry */
61};
62
63static struct tracer_flags tracer_flags = {
607e3a29 64 /* Don't display overruns, proc, or tail by default */
9005f3eb 65 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
2bd16212 66 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
fb52607a
FW
67 .opts = trace_opts
68};
69
1a0799a8 70static struct trace_array *graph_array;
9005f3eb 71
ffeb80fc
JO
72/*
73 * DURATION column is being also used to display IRQ signs,
74 * following values are used by print_graph_irq and others
75 * to fill in space into DURATION column.
76 */
77enum {
6fc84ea7
SRRH
78 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
79 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
80 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
ffeb80fc
JO
81};
82
83static enum print_line_t
84print_graph_duration(unsigned long long duration, struct trace_seq *s,
85 u32 flags);
fb52607a 86
712406a6
SR
87/* Add a function return address to the trace stack on thread info.*/
88int
71e308a2
SR
89ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
90 unsigned long frame_pointer)
712406a6 91{
5d1a03dc 92 unsigned long long calltime;
712406a6
SR
93 int index;
94
95 if (!current->ret_stack)
96 return -EBUSY;
97
82310a32
SR
98 /*
99 * We must make sure the ret_stack is tested before we read
100 * anything else.
101 */
102 smp_rmb();
103
712406a6
SR
104 /* The return trace stack is full */
105 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
106 atomic_inc(&current->trace_overrun);
107 return -EBUSY;
108 }
109
29ad23b0
NK
110 /*
111 * The curr_ret_stack is an index to ftrace return stack of
112 * current task. Its value should be in [0, FTRACE_RETFUNC_
113 * DEPTH) when the function graph tracer is used. To support
114 * filtering out specific functions, it makes the index
115 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
116 * so when it sees a negative index the ftrace will ignore
117 * the record. And the index gets recovered when returning
118 * from the filtered function by adding the FTRACE_NOTRACE_
119 * DEPTH and then it'll continue to record functions normally.
120 *
121 * The curr_ret_stack is initialized to -1 and get increased
122 * in this function. So it can be less than -1 only if it was
123 * filtered out via ftrace_graph_notrace_addr() which can be
124 * set from set_graph_notrace file in debugfs by user.
125 */
126 if (current->curr_ret_stack < -1)
127 return -EBUSY;
128
5d1a03dc
SR
129 calltime = trace_clock_local();
130
712406a6 131 index = ++current->curr_ret_stack;
29ad23b0
NK
132 if (ftrace_graph_notrace_addr(func))
133 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
712406a6
SR
134 barrier();
135 current->ret_stack[index].ret = ret;
136 current->ret_stack[index].func = func;
5d1a03dc 137 current->ret_stack[index].calltime = calltime;
a2a16d6a 138 current->ret_stack[index].subtime = 0;
71e308a2 139 current->ret_stack[index].fp = frame_pointer;
29ad23b0 140 *depth = current->curr_ret_stack;
712406a6
SR
141
142 return 0;
143}
144
145/* Retrieve a function return address to the trace stack on thread info.*/
a2a16d6a 146static void
71e308a2
SR
147ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
148 unsigned long frame_pointer)
712406a6
SR
149{
150 int index;
151
152 index = current->curr_ret_stack;
153
29ad23b0
NK
154 /*
155 * A negative index here means that it's just returned from a
156 * notrace'd function. Recover index to get an original
157 * return address. See ftrace_push_return_trace().
158 *
159 * TODO: Need to check whether the stack gets corrupted.
160 */
161 if (index < 0)
162 index += FTRACE_NOTRACE_DEPTH;
163
164 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
712406a6
SR
165 ftrace_graph_stop();
166 WARN_ON(1);
167 /* Might as well panic, otherwise we have no where to go */
168 *ret = (unsigned long)panic;
169 return;
170 }
171
781d0624 172#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
71e308a2
SR
173 /*
174 * The arch may choose to record the frame pointer used
175 * and check it here to make sure that it is what we expect it
176 * to be. If gcc does not set the place holder of the return
177 * address in the frame pointer, and does a copy instead, then
178 * the function graph trace will fail. This test detects this
179 * case.
180 *
181 * Currently, x86_32 with optimize for size (-Os) makes the latest
182 * gcc do the above.
781d0624
SR
183 *
184 * Note, -mfentry does not use frame pointers, and this test
185 * is not needed if CC_USING_FENTRY is set.
71e308a2
SR
186 */
187 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
188 ftrace_graph_stop();
189 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
b375a11a 190 " from func %ps return to %lx\n",
71e308a2
SR
191 current->ret_stack[index].fp,
192 frame_pointer,
193 (void *)current->ret_stack[index].func,
194 current->ret_stack[index].ret);
195 *ret = (unsigned long)panic;
196 return;
197 }
198#endif
199
712406a6
SR
200 *ret = current->ret_stack[index].ret;
201 trace->func = current->ret_stack[index].func;
202 trace->calltime = current->ret_stack[index].calltime;
203 trace->overrun = atomic_read(&current->trace_overrun);
204 trace->depth = index;
712406a6
SR
205}
206
207/*
208 * Send the trace to the ring-buffer.
209 * @return the original return address.
210 */
71e308a2 211unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
712406a6
SR
212{
213 struct ftrace_graph_ret trace;
214 unsigned long ret;
215
71e308a2 216 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
0012693a 217 trace.rettime = trace_clock_local();
a2a16d6a
SR
218 barrier();
219 current->curr_ret_stack--;
29ad23b0
NK
220 /*
221 * The curr_ret_stack can be less than -1 only if it was
222 * filtered out and it's about to return from the function.
223 * Recover the index and continue to trace normal functions.
224 */
225 if (current->curr_ret_stack < -1) {
226 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
227 return ret;
228 }
712406a6 229
03274a3f
SRRH
230 /*
231 * The trace should run after decrementing the ret counter
232 * in case an interrupt were to come in. We don't want to
233 * lose the interrupt if max_depth is set.
234 */
235 ftrace_graph_return(&trace);
236
712406a6
SR
237 if (unlikely(!ret)) {
238 ftrace_graph_stop();
239 WARN_ON(1);
240 /* Might as well panic. What else to do? */
241 ret = (unsigned long)panic;
242 }
243
244 return ret;
245}
246
62b915f1 247int __trace_graph_entry(struct trace_array *tr,
1a0799a8
FW
248 struct ftrace_graph_ent *trace,
249 unsigned long flags,
250 int pc)
251{
252 struct ftrace_event_call *call = &event_funcgraph_entry;
253 struct ring_buffer_event *event;
12883efb 254 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1a0799a8
FW
255 struct ftrace_graph_ent_entry *entry;
256
dd17c8f7 257 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1a0799a8
FW
258 return 0;
259
e77405ad 260 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
1a0799a8
FW
261 sizeof(*entry), flags, pc);
262 if (!event)
263 return 0;
264 entry = ring_buffer_event_data(event);
265 entry->graph_ent = *trace;
f306cc82 266 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 267 __buffer_unlock_commit(buffer, event);
1a0799a8
FW
268
269 return 1;
270}
271
b304d044
SR
272static inline int ftrace_graph_ignore_irqs(void)
273{
e4a3f541 274 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
b304d044
SR
275 return 0;
276
277 return in_irq();
278}
279
1a0799a8
FW
280int trace_graph_entry(struct ftrace_graph_ent *trace)
281{
282 struct trace_array *tr = graph_array;
283 struct trace_array_cpu *data;
284 unsigned long flags;
285 long disabled;
286 int ret;
287 int cpu;
288 int pc;
289
1a0799a8
FW
290 if (!ftrace_trace_task(current))
291 return 0;
292
ea2c68a0 293 /* trace it when it is-nested-in or is a function enabled. */
8741db53 294 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
29ad23b0 295 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
8741db53 296 (max_depth && trace->depth >= max_depth))
1a0799a8
FW
297 return 0;
298
29ad23b0
NK
299 /*
300 * Do not trace a function if it's filtered by set_graph_notrace.
301 * Make the index of ret stack negative to indicate that it should
302 * ignore further functions. But it needs its own ret stack entry
303 * to recover the original index in order to continue tracing after
304 * returning from the function.
305 */
306 if (ftrace_graph_notrace_addr(trace->func))
307 return 1;
308
1a0799a8
FW
309 local_irq_save(flags);
310 cpu = raw_smp_processor_id();
12883efb 311 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
1a0799a8
FW
312 disabled = atomic_inc_return(&data->disabled);
313 if (likely(disabled == 1)) {
314 pc = preempt_count();
315 ret = __trace_graph_entry(tr, trace, flags, pc);
316 } else {
317 ret = 0;
318 }
1a0799a8
FW
319
320 atomic_dec(&data->disabled);
321 local_irq_restore(flags);
322
323 return ret;
324}
325
0e950173
TB
326int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
327{
328 if (tracing_thresh)
329 return 1;
330 else
331 return trace_graph_entry(trace);
332}
333
0a772620
JO
334static void
335__trace_graph_function(struct trace_array *tr,
336 unsigned long ip, unsigned long flags, int pc)
337{
338 u64 time = trace_clock_local();
339 struct ftrace_graph_ent ent = {
340 .func = ip,
341 .depth = 0,
342 };
343 struct ftrace_graph_ret ret = {
344 .func = ip,
345 .depth = 0,
346 .calltime = time,
347 .rettime = time,
348 };
349
350 __trace_graph_entry(tr, &ent, flags, pc);
351 __trace_graph_return(tr, &ret, flags, pc);
352}
353
354void
355trace_graph_function(struct trace_array *tr,
356 unsigned long ip, unsigned long parent_ip,
357 unsigned long flags, int pc)
358{
0a772620
JO
359 __trace_graph_function(tr, ip, flags, pc);
360}
361
62b915f1 362void __trace_graph_return(struct trace_array *tr,
1a0799a8
FW
363 struct ftrace_graph_ret *trace,
364 unsigned long flags,
365 int pc)
366{
367 struct ftrace_event_call *call = &event_funcgraph_exit;
368 struct ring_buffer_event *event;
12883efb 369 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1a0799a8
FW
370 struct ftrace_graph_ret_entry *entry;
371
dd17c8f7 372 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1a0799a8
FW
373 return;
374
e77405ad 375 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
1a0799a8
FW
376 sizeof(*entry), flags, pc);
377 if (!event)
378 return;
379 entry = ring_buffer_event_data(event);
380 entry->ret = *trace;
f306cc82 381 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 382 __buffer_unlock_commit(buffer, event);
1a0799a8
FW
383}
384
385void trace_graph_return(struct ftrace_graph_ret *trace)
386{
387 struct trace_array *tr = graph_array;
388 struct trace_array_cpu *data;
389 unsigned long flags;
390 long disabled;
391 int cpu;
392 int pc;
393
394 local_irq_save(flags);
395 cpu = raw_smp_processor_id();
12883efb 396 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
1a0799a8
FW
397 disabled = atomic_inc_return(&data->disabled);
398 if (likely(disabled == 1)) {
399 pc = preempt_count();
400 __trace_graph_return(tr, trace, flags, pc);
401 }
1a0799a8
FW
402 atomic_dec(&data->disabled);
403 local_irq_restore(flags);
404}
405
24a53652
FW
406void set_graph_array(struct trace_array *tr)
407{
408 graph_array = tr;
409
410 /* Make graph_array visible before we start tracing */
411
412 smp_mb();
413}
414
0e950173
TB
415void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
416{
417 if (tracing_thresh &&
418 (trace->rettime - trace->calltime < tracing_thresh))
419 return;
420 else
421 trace_graph_return(trace);
422}
423
fb52607a
FW
424static int graph_trace_init(struct trace_array *tr)
425{
1a0799a8
FW
426 int ret;
427
24a53652 428 set_graph_array(tr);
0e950173
TB
429 if (tracing_thresh)
430 ret = register_ftrace_graph(&trace_graph_thresh_return,
431 &trace_graph_thresh_entry);
432 else
433 ret = register_ftrace_graph(&trace_graph_return,
434 &trace_graph_entry);
660c7f9b
SR
435 if (ret)
436 return ret;
437 tracing_start_cmdline_record();
438
439 return 0;
fb52607a
FW
440}
441
442static void graph_trace_reset(struct trace_array *tr)
443{
660c7f9b
SR
444 tracing_stop_cmdline_record();
445 unregister_ftrace_graph();
fb52607a
FW
446}
447
0c9e6f63 448static int max_bytes_for_cpu;
1a056155
FW
449
450static enum print_line_t
451print_graph_cpu(struct trace_seq *s, int cpu)
452{
1a056155 453 int ret;
1a056155 454
d51090b3
IM
455 /*
456 * Start with a space character - to make it stand out
457 * to the right a bit when trace output is pasted into
458 * email:
459 */
0c9e6f63 460 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
1a056155 461 if (!ret)
d51090b3
IM
462 return TRACE_TYPE_PARTIAL_LINE;
463
1a056155
FW
464 return TRACE_TYPE_HANDLED;
465}
466
11e84acc
FW
467#define TRACE_GRAPH_PROCINFO_LENGTH 14
468
469static enum print_line_t
470print_graph_proc(struct trace_seq *s, pid_t pid)
471{
4ca53085 472 char comm[TASK_COMM_LEN];
11e84acc
FW
473 /* sign + log10(MAX_INT) + '\0' */
474 char pid_str[11];
4ca53085
SR
475 int spaces = 0;
476 int ret;
477 int len;
478 int i;
11e84acc 479
4ca53085 480 trace_find_cmdline(pid, comm);
11e84acc
FW
481 comm[7] = '\0';
482 sprintf(pid_str, "%d", pid);
483
484 /* 1 stands for the "-" character */
485 len = strlen(comm) + strlen(pid_str) + 1;
486
487 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
488 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
489
490 /* First spaces to align center */
491 for (i = 0; i < spaces / 2; i++) {
146c3442 492 ret = trace_seq_putc(s, ' ');
11e84acc
FW
493 if (!ret)
494 return TRACE_TYPE_PARTIAL_LINE;
495 }
496
497 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
498 if (!ret)
499 return TRACE_TYPE_PARTIAL_LINE;
500
501 /* Last spaces to align center */
502 for (i = 0; i < spaces - (spaces / 2); i++) {
146c3442 503 ret = trace_seq_putc(s, ' ');
11e84acc
FW
504 if (!ret)
505 return TRACE_TYPE_PARTIAL_LINE;
506 }
507 return TRACE_TYPE_HANDLED;
508}
509
1a056155 510
49ff5903
SR
511static enum print_line_t
512print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
513{
f81c972d 514 if (!trace_seq_putc(s, ' '))
637e7e86
SR
515 return 0;
516
f81c972d 517 return trace_print_lat_fmt(s, entry);
49ff5903
SR
518}
519
287b6e68 520/* If the pid changed since the last trace, output this event */
11e84acc 521static enum print_line_t
2fbcdb35 522verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
287b6e68 523{
d51090b3 524 pid_t prev_pid;
9005f3eb 525 pid_t *last_pid;
d51090b3 526 int ret;
660c7f9b 527
2fbcdb35 528 if (!data)
9005f3eb
FW
529 return TRACE_TYPE_HANDLED;
530
be1eca39 531 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
9005f3eb
FW
532
533 if (*last_pid == pid)
11e84acc 534 return TRACE_TYPE_HANDLED;
fb52607a 535
9005f3eb
FW
536 prev_pid = *last_pid;
537 *last_pid = pid;
d51090b3 538
9005f3eb
FW
539 if (prev_pid == -1)
540 return TRACE_TYPE_HANDLED;
d51090b3
IM
541/*
542 * Context-switch trace line:
543
544 ------------------------------------------
545 | 1) migration/0--1 => sshd-1755
546 ------------------------------------------
547
548 */
146c3442 549 ret = trace_seq_puts(s,
1fd8f2a3 550 " ------------------------------------------\n");
11e84acc 551 if (!ret)
810dc732 552 return TRACE_TYPE_PARTIAL_LINE;
11e84acc
FW
553
554 ret = print_graph_cpu(s, cpu);
555 if (ret == TRACE_TYPE_PARTIAL_LINE)
810dc732 556 return TRACE_TYPE_PARTIAL_LINE;
11e84acc
FW
557
558 ret = print_graph_proc(s, prev_pid);
559 if (ret == TRACE_TYPE_PARTIAL_LINE)
810dc732 560 return TRACE_TYPE_PARTIAL_LINE;
11e84acc 561
146c3442 562 ret = trace_seq_puts(s, " => ");
11e84acc 563 if (!ret)
810dc732 564 return TRACE_TYPE_PARTIAL_LINE;
11e84acc
FW
565
566 ret = print_graph_proc(s, pid);
567 if (ret == TRACE_TYPE_PARTIAL_LINE)
810dc732 568 return TRACE_TYPE_PARTIAL_LINE;
11e84acc 569
146c3442 570 ret = trace_seq_puts(s,
11e84acc
FW
571 "\n ------------------------------------------\n\n");
572 if (!ret)
810dc732 573 return TRACE_TYPE_PARTIAL_LINE;
11e84acc 574
810dc732 575 return TRACE_TYPE_HANDLED;
287b6e68
FW
576}
577
b91facc3
FW
578static struct ftrace_graph_ret_entry *
579get_return_for_leaf(struct trace_iterator *iter,
83a8df61
FW
580 struct ftrace_graph_ent_entry *curr)
581{
be1eca39
JO
582 struct fgraph_data *data = iter->private;
583 struct ring_buffer_iter *ring_iter = NULL;
83a8df61
FW
584 struct ring_buffer_event *event;
585 struct ftrace_graph_ret_entry *next;
586
be1eca39
JO
587 /*
588 * If the previous output failed to write to the seq buffer,
589 * then we just reuse the data from before.
590 */
591 if (data && data->failed) {
592 curr = &data->ent;
593 next = &data->ret;
594 } else {
83a8df61 595
6d158a81 596 ring_iter = trace_buffer_iter(iter, iter->cpu);
be1eca39
JO
597
598 /* First peek to compare current entry and the next one */
599 if (ring_iter)
600 event = ring_buffer_iter_peek(ring_iter, NULL);
601 else {
602 /*
603 * We need to consume the current entry to see
604 * the next one.
605 */
12883efb 606 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
66a8cb95 607 NULL, NULL);
12883efb 608 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
66a8cb95 609 NULL, NULL);
be1eca39 610 }
83a8df61 611
be1eca39
JO
612 if (!event)
613 return NULL;
614
615 next = ring_buffer_event_data(event);
83a8df61 616
be1eca39
JO
617 if (data) {
618 /*
619 * Save current and next entries for later reference
620 * if the output fails.
621 */
622 data->ent = *curr;
575570f0
SL
623 /*
624 * If the next event is not a return type, then
625 * we only care about what type it is. Otherwise we can
626 * safely copy the entire event.
627 */
628 if (next->ent.type == TRACE_GRAPH_RET)
629 data->ret = *next;
630 else
631 data->ret.ent.type = next->ent.type;
be1eca39
JO
632 }
633 }
83a8df61
FW
634
635 if (next->ent.type != TRACE_GRAPH_RET)
b91facc3 636 return NULL;
83a8df61
FW
637
638 if (curr->ent.pid != next->ent.pid ||
639 curr->graph_ent.func != next->ret.func)
b91facc3 640 return NULL;
83a8df61 641
b91facc3
FW
642 /* this is a leaf, now advance the iterator */
643 if (ring_iter)
644 ring_buffer_read(ring_iter, NULL);
645
646 return next;
83a8df61
FW
647}
648
d1f9cbd7
FW
649static int print_graph_abs_time(u64 t, struct trace_seq *s)
650{
651 unsigned long usecs_rem;
652
653 usecs_rem = do_div(t, NSEC_PER_SEC);
654 usecs_rem /= 1000;
655
656 return trace_seq_printf(s, "%5lu.%06lu | ",
657 (unsigned long)t, usecs_rem);
658}
659
f8b755ac 660static enum print_line_t
d1f9cbd7 661print_graph_irq(struct trace_iterator *iter, unsigned long addr,
d7a8d9e9 662 enum trace_type type, int cpu, pid_t pid, u32 flags)
f8b755ac
FW
663{
664 int ret;
d1f9cbd7 665 struct trace_seq *s = &iter->seq;
f8b755ac
FW
666
667 if (addr < (unsigned long)__irqentry_text_start ||
668 addr >= (unsigned long)__irqentry_text_end)
669 return TRACE_TYPE_UNHANDLED;
670
749230b0
JO
671 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
672 /* Absolute time */
673 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
674 ret = print_graph_abs_time(iter->ts, s);
675 if (!ret)
676 return TRACE_TYPE_PARTIAL_LINE;
677 }
d1f9cbd7 678
749230b0
JO
679 /* Cpu */
680 if (flags & TRACE_GRAPH_PRINT_CPU) {
681 ret = print_graph_cpu(s, cpu);
682 if (ret == TRACE_TYPE_PARTIAL_LINE)
683 return TRACE_TYPE_PARTIAL_LINE;
684 }
49ff5903 685
749230b0
JO
686 /* Proc */
687 if (flags & TRACE_GRAPH_PRINT_PROC) {
688 ret = print_graph_proc(s, pid);
689 if (ret == TRACE_TYPE_PARTIAL_LINE)
690 return TRACE_TYPE_PARTIAL_LINE;
146c3442 691 ret = trace_seq_puts(s, " | ");
749230b0
JO
692 if (!ret)
693 return TRACE_TYPE_PARTIAL_LINE;
694 }
9005f3eb 695 }
f8b755ac 696
9005f3eb 697 /* No overhead */
6fc84ea7 698 ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
ffeb80fc
JO
699 if (ret != TRACE_TYPE_HANDLED)
700 return ret;
f8b755ac 701
9005f3eb 702 if (type == TRACE_GRAPH_ENT)
146c3442 703 ret = trace_seq_puts(s, "==========>");
9005f3eb 704 else
146c3442 705 ret = trace_seq_puts(s, "<==========");
9005f3eb
FW
706
707 if (!ret)
708 return TRACE_TYPE_PARTIAL_LINE;
709
6fc84ea7 710 ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
ffeb80fc
JO
711 if (ret != TRACE_TYPE_HANDLED)
712 return ret;
713
146c3442 714 ret = trace_seq_putc(s, '\n');
f8b755ac 715
f8b755ac
FW
716 if (!ret)
717 return TRACE_TYPE_PARTIAL_LINE;
718 return TRACE_TYPE_HANDLED;
719}
83a8df61 720
0706f1c4
SR
721enum print_line_t
722trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
83a8df61
FW
723{
724 unsigned long nsecs_rem = do_div(duration, 1000);
166d3c79
FW
725 /* log10(ULONG_MAX) + '\0' */
726 char msecs_str[21];
727 char nsecs_str[5];
728 int ret, len;
729 int i;
730
731 sprintf(msecs_str, "%lu", (unsigned long) duration);
732
733 /* Print msecs */
9005f3eb 734 ret = trace_seq_printf(s, "%s", msecs_str);
166d3c79
FW
735 if (!ret)
736 return TRACE_TYPE_PARTIAL_LINE;
737
738 len = strlen(msecs_str);
739
740 /* Print nsecs (we don't want to exceed 7 numbers) */
741 if (len < 7) {
14cae9bd
BP
742 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
743
744 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
166d3c79
FW
745 ret = trace_seq_printf(s, ".%s", nsecs_str);
746 if (!ret)
747 return TRACE_TYPE_PARTIAL_LINE;
748 len += strlen(nsecs_str);
749 }
750
146c3442 751 ret = trace_seq_puts(s, " us ");
166d3c79
FW
752 if (!ret)
753 return TRACE_TYPE_PARTIAL_LINE;
754
755 /* Print remaining spaces to fit the row's width */
756 for (i = len; i < 7; i++) {
146c3442 757 ret = trace_seq_putc(s, ' ');
166d3c79
FW
758 if (!ret)
759 return TRACE_TYPE_PARTIAL_LINE;
760 }
0706f1c4
SR
761 return TRACE_TYPE_HANDLED;
762}
763
764static enum print_line_t
ffeb80fc
JO
765print_graph_duration(unsigned long long duration, struct trace_seq *s,
766 u32 flags)
0706f1c4 767{
ffeb80fc
JO
768 int ret = -1;
769
749230b0
JO
770 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
771 !(trace_flags & TRACE_ITER_CONTEXT_INFO))
772 return TRACE_TYPE_HANDLED;
ffeb80fc
JO
773
774 /* No real adata, just filling the column with spaces */
6fc84ea7
SRRH
775 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
776 case FLAGS_FILL_FULL:
146c3442 777 ret = trace_seq_puts(s, " | ");
ffeb80fc 778 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
6fc84ea7 779 case FLAGS_FILL_START:
146c3442 780 ret = trace_seq_puts(s, " ");
ffeb80fc 781 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
6fc84ea7 782 case FLAGS_FILL_END:
146c3442 783 ret = trace_seq_puts(s, " |");
ffeb80fc
JO
784 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
785 }
786
787 /* Signal a overhead of time execution to the output */
788 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
789 /* Duration exceeded 100 msecs */
790 if (duration > 100000ULL)
146c3442 791 ret = trace_seq_puts(s, "! ");
ffeb80fc
JO
792 /* Duration exceeded 10 msecs */
793 else if (duration > 10000ULL)
146c3442 794 ret = trace_seq_puts(s, "+ ");
ffeb80fc
JO
795 }
796
797 /*
798 * The -1 means we either did not exceed the duration tresholds
799 * or we dont want to print out the overhead. Either way we need
800 * to fill out the space.
801 */
802 if (ret == -1)
146c3442 803 ret = trace_seq_puts(s, " ");
ffeb80fc
JO
804
805 /* Catching here any failure happenned above */
806 if (!ret)
807 return TRACE_TYPE_PARTIAL_LINE;
0706f1c4
SR
808
809 ret = trace_print_graph_duration(duration, s);
810 if (ret != TRACE_TYPE_HANDLED)
811 return ret;
166d3c79 812
146c3442 813 ret = trace_seq_puts(s, "| ");
166d3c79
FW
814 if (!ret)
815 return TRACE_TYPE_PARTIAL_LINE;
166d3c79 816
0706f1c4 817 return TRACE_TYPE_HANDLED;
83a8df61
FW
818}
819
83a8df61 820/* Case of a leaf function on its call entry */
287b6e68 821static enum print_line_t
83a8df61 822print_graph_entry_leaf(struct trace_iterator *iter,
b91facc3 823 struct ftrace_graph_ent_entry *entry,
d7a8d9e9
JO
824 struct ftrace_graph_ret_entry *ret_entry,
825 struct trace_seq *s, u32 flags)
fb52607a 826{
2fbcdb35 827 struct fgraph_data *data = iter->private;
83a8df61 828 struct ftrace_graph_ret *graph_ret;
83a8df61
FW
829 struct ftrace_graph_ent *call;
830 unsigned long long duration;
fb52607a 831 int ret;
1a056155 832 int i;
fb52607a 833
83a8df61
FW
834 graph_ret = &ret_entry->ret;
835 call = &entry->graph_ent;
836 duration = graph_ret->rettime - graph_ret->calltime;
837
2fbcdb35 838 if (data) {
f1c7f517 839 struct fgraph_cpu_data *cpu_data;
2fbcdb35 840 int cpu = iter->cpu;
f1c7f517
SR
841
842 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
2fbcdb35
SR
843
844 /*
845 * Comments display at + 1 to depth. Since
846 * this is a leaf function, keep the comments
847 * equal to this depth.
848 */
f1c7f517
SR
849 cpu_data->depth = call->depth - 1;
850
851 /* No need to keep this function around for this depth */
852 if (call->depth < FTRACE_RETFUNC_DEPTH)
853 cpu_data->enter_funcs[call->depth] = 0;
2fbcdb35
SR
854 }
855
ffeb80fc
JO
856 /* Overhead and duration */
857 ret = print_graph_duration(duration, s, flags);
858 if (ret == TRACE_TYPE_PARTIAL_LINE)
9005f3eb 859 return TRACE_TYPE_PARTIAL_LINE;
1a056155 860
83a8df61
FW
861 /* Function */
862 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
146c3442 863 ret = trace_seq_putc(s, ' ');
83a8df61
FW
864 if (!ret)
865 return TRACE_TYPE_PARTIAL_LINE;
866 }
867
b375a11a 868 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
83a8df61
FW
869 if (!ret)
870 return TRACE_TYPE_PARTIAL_LINE;
871
872 return TRACE_TYPE_HANDLED;
873}
874
875static enum print_line_t
2fbcdb35
SR
876print_graph_entry_nested(struct trace_iterator *iter,
877 struct ftrace_graph_ent_entry *entry,
d7a8d9e9 878 struct trace_seq *s, int cpu, u32 flags)
83a8df61 879{
83a8df61 880 struct ftrace_graph_ent *call = &entry->graph_ent;
2fbcdb35
SR
881 struct fgraph_data *data = iter->private;
882 int ret;
883 int i;
884
885 if (data) {
f1c7f517 886 struct fgraph_cpu_data *cpu_data;
2fbcdb35 887 int cpu = iter->cpu;
2fbcdb35 888
f1c7f517
SR
889 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
890 cpu_data->depth = call->depth;
891
892 /* Save this function pointer to see if the exit matches */
893 if (call->depth < FTRACE_RETFUNC_DEPTH)
894 cpu_data->enter_funcs[call->depth] = call->func;
2fbcdb35 895 }
83a8df61 896
9005f3eb 897 /* No time */
6fc84ea7 898 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
ffeb80fc
JO
899 if (ret != TRACE_TYPE_HANDLED)
900 return ret;
f8b755ac 901
83a8df61 902 /* Function */
287b6e68 903 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
146c3442 904 ret = trace_seq_putc(s, ' ');
fb52607a
FW
905 if (!ret)
906 return TRACE_TYPE_PARTIAL_LINE;
287b6e68
FW
907 }
908
b375a11a 909 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
83a8df61
FW
910 if (!ret)
911 return TRACE_TYPE_PARTIAL_LINE;
912
b91facc3
FW
913 /*
914 * we already consumed the current entry to check the next one
915 * and see if this is a leaf.
916 */
917 return TRACE_TYPE_NO_CONSUME;
287b6e68
FW
918}
919
83a8df61 920static enum print_line_t
ac5f6c96 921print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
d7a8d9e9 922 int type, unsigned long addr, u32 flags)
83a8df61 923{
2fbcdb35 924 struct fgraph_data *data = iter->private;
83a8df61 925 struct trace_entry *ent = iter->ent;
ac5f6c96
SR
926 int cpu = iter->cpu;
927 int ret;
83a8df61 928
1a056155 929 /* Pid */
2fbcdb35 930 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
9005f3eb
FW
931 return TRACE_TYPE_PARTIAL_LINE;
932
ac5f6c96
SR
933 if (type) {
934 /* Interrupt */
d7a8d9e9 935 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
ac5f6c96
SR
936 if (ret == TRACE_TYPE_PARTIAL_LINE)
937 return TRACE_TYPE_PARTIAL_LINE;
938 }
83a8df61 939
749230b0
JO
940 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
941 return 0;
942
9005f3eb 943 /* Absolute time */
d7a8d9e9 944 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
9005f3eb
FW
945 ret = print_graph_abs_time(iter->ts, s);
946 if (!ret)
947 return TRACE_TYPE_PARTIAL_LINE;
948 }
949
1a056155 950 /* Cpu */
d7a8d9e9 951 if (flags & TRACE_GRAPH_PRINT_CPU) {
1a056155 952 ret = print_graph_cpu(s, cpu);
11e84acc
FW
953 if (ret == TRACE_TYPE_PARTIAL_LINE)
954 return TRACE_TYPE_PARTIAL_LINE;
955 }
956
957 /* Proc */
d7a8d9e9 958 if (flags & TRACE_GRAPH_PRINT_PROC) {
00a8bf85 959 ret = print_graph_proc(s, ent->pid);
11e84acc
FW
960 if (ret == TRACE_TYPE_PARTIAL_LINE)
961 return TRACE_TYPE_PARTIAL_LINE;
962
146c3442 963 ret = trace_seq_puts(s, " | ");
1a056155
FW
964 if (!ret)
965 return TRACE_TYPE_PARTIAL_LINE;
966 }
83a8df61 967
49ff5903
SR
968 /* Latency format */
969 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
970 ret = print_graph_lat_fmt(s, ent);
971 if (ret == TRACE_TYPE_PARTIAL_LINE)
972 return TRACE_TYPE_PARTIAL_LINE;
973 }
974
ac5f6c96
SR
975 return 0;
976}
977
2bd16212
JO
978/*
979 * Entry check for irq code
980 *
981 * returns 1 if
982 * - we are inside irq code
25985edc 983 * - we just entered irq code
2bd16212
JO
984 *
985 * retunns 0 if
986 * - funcgraph-interrupts option is set
987 * - we are not inside irq code
988 */
989static int
990check_irq_entry(struct trace_iterator *iter, u32 flags,
991 unsigned long addr, int depth)
992{
993 int cpu = iter->cpu;
a9d61173 994 int *depth_irq;
2bd16212 995 struct fgraph_data *data = iter->private;
2bd16212 996
a9d61173
JO
997 /*
998 * If we are either displaying irqs, or we got called as
999 * a graph event and private data does not exist,
1000 * then we bypass the irq check.
1001 */
1002 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1003 (!data))
2bd16212
JO
1004 return 0;
1005
a9d61173
JO
1006 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1007
2bd16212
JO
1008 /*
1009 * We are inside the irq code
1010 */
1011 if (*depth_irq >= 0)
1012 return 1;
1013
1014 if ((addr < (unsigned long)__irqentry_text_start) ||
1015 (addr >= (unsigned long)__irqentry_text_end))
1016 return 0;
1017
1018 /*
1019 * We are entering irq code.
1020 */
1021 *depth_irq = depth;
1022 return 1;
1023}
1024
1025/*
1026 * Return check for irq code
1027 *
1028 * returns 1 if
1029 * - we are inside irq code
1030 * - we just left irq code
1031 *
1032 * returns 0 if
1033 * - funcgraph-interrupts option is set
1034 * - we are not inside irq code
1035 */
1036static int
1037check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1038{
1039 int cpu = iter->cpu;
a9d61173 1040 int *depth_irq;
2bd16212 1041 struct fgraph_data *data = iter->private;
2bd16212 1042
a9d61173
JO
1043 /*
1044 * If we are either displaying irqs, or we got called as
1045 * a graph event and private data does not exist,
1046 * then we bypass the irq check.
1047 */
1048 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1049 (!data))
2bd16212
JO
1050 return 0;
1051
a9d61173
JO
1052 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1053
2bd16212
JO
1054 /*
1055 * We are not inside the irq code.
1056 */
1057 if (*depth_irq == -1)
1058 return 0;
1059
1060 /*
1061 * We are inside the irq code, and this is returning entry.
1062 * Let's not trace it and clear the entry depth, since
1063 * we are out of irq code.
1064 *
1065 * This condition ensures that we 'leave the irq code' once
1066 * we are out of the entry depth. Thus protecting us from
1067 * the RETURN entry loss.
1068 */
1069 if (*depth_irq >= depth) {
1070 *depth_irq = -1;
1071 return 1;
1072 }
1073
1074 /*
1075 * We are inside the irq code, and this is not the entry.
1076 */
1077 return 1;
1078}
1079
ac5f6c96
SR
1080static enum print_line_t
1081print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
d7a8d9e9 1082 struct trace_iterator *iter, u32 flags)
ac5f6c96 1083{
be1eca39 1084 struct fgraph_data *data = iter->private;
ac5f6c96
SR
1085 struct ftrace_graph_ent *call = &field->graph_ent;
1086 struct ftrace_graph_ret_entry *leaf_ret;
be1eca39
JO
1087 static enum print_line_t ret;
1088 int cpu = iter->cpu;
ac5f6c96 1089
2bd16212
JO
1090 if (check_irq_entry(iter, flags, call->func, call->depth))
1091 return TRACE_TYPE_HANDLED;
1092
d7a8d9e9 1093 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
ac5f6c96
SR
1094 return TRACE_TYPE_PARTIAL_LINE;
1095
b91facc3
FW
1096 leaf_ret = get_return_for_leaf(iter, field);
1097 if (leaf_ret)
d7a8d9e9 1098 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
83a8df61 1099 else
d7a8d9e9 1100 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
83a8df61 1101
be1eca39
JO
1102 if (data) {
1103 /*
1104 * If we failed to write our output, then we need to make
1105 * note of it. Because we already consumed our entry.
1106 */
1107 if (s->full) {
1108 data->failed = 1;
1109 data->cpu = cpu;
1110 } else
1111 data->failed = 0;
1112 }
1113
1114 return ret;
83a8df61
FW
1115}
1116
287b6e68
FW
1117static enum print_line_t
1118print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
d7a8d9e9
JO
1119 struct trace_entry *ent, struct trace_iterator *iter,
1120 u32 flags)
287b6e68 1121{
83a8df61 1122 unsigned long long duration = trace->rettime - trace->calltime;
2fbcdb35
SR
1123 struct fgraph_data *data = iter->private;
1124 pid_t pid = ent->pid;
1125 int cpu = iter->cpu;
f1c7f517 1126 int func_match = 1;
2fbcdb35
SR
1127 int ret;
1128 int i;
1129
2bd16212
JO
1130 if (check_irq_return(iter, flags, trace->depth))
1131 return TRACE_TYPE_HANDLED;
1132
2fbcdb35 1133 if (data) {
f1c7f517
SR
1134 struct fgraph_cpu_data *cpu_data;
1135 int cpu = iter->cpu;
1136
1137 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
2fbcdb35
SR
1138
1139 /*
1140 * Comments display at + 1 to depth. This is the
1141 * return from a function, we now want the comments
1142 * to display at the same level of the bracket.
1143 */
f1c7f517
SR
1144 cpu_data->depth = trace->depth - 1;
1145
1146 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1147 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1148 func_match = 0;
1149 cpu_data->enter_funcs[trace->depth] = 0;
1150 }
2fbcdb35 1151 }
287b6e68 1152
d7a8d9e9 1153 if (print_graph_prologue(iter, s, 0, 0, flags))
437f24fb
SR
1154 return TRACE_TYPE_PARTIAL_LINE;
1155
ffeb80fc
JO
1156 /* Overhead and duration */
1157 ret = print_graph_duration(duration, s, flags);
1158 if (ret == TRACE_TYPE_PARTIAL_LINE)
9005f3eb 1159 return TRACE_TYPE_PARTIAL_LINE;
1a056155 1160
83a8df61 1161 /* Closing brace */
287b6e68 1162 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
146c3442 1163 ret = trace_seq_putc(s, ' ');
fb52607a
FW
1164 if (!ret)
1165 return TRACE_TYPE_PARTIAL_LINE;
287b6e68
FW
1166 }
1167
f1c7f517
SR
1168 /*
1169 * If the return function does not have a matching entry,
1170 * then the entry was lost. Instead of just printing
1171 * the '}' and letting the user guess what function this
607e3a29
RE
1172 * belongs to, write out the function name. Always do
1173 * that if the funcgraph-tail option is enabled.
f1c7f517 1174 */
607e3a29 1175 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
146c3442 1176 ret = trace_seq_puts(s, "}\n");
f1c7f517
SR
1177 if (!ret)
1178 return TRACE_TYPE_PARTIAL_LINE;
1179 } else {
a094fe04 1180 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
f1c7f517
SR
1181 if (!ret)
1182 return TRACE_TYPE_PARTIAL_LINE;
1183 }
fb52607a 1184
83a8df61 1185 /* Overrun */
d7a8d9e9 1186 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
287b6e68
FW
1187 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1188 trace->overrun);
fb52607a
FW
1189 if (!ret)
1190 return TRACE_TYPE_PARTIAL_LINE;
287b6e68 1191 }
f8b755ac 1192
d7a8d9e9
JO
1193 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1194 cpu, pid, flags);
f8b755ac
FW
1195 if (ret == TRACE_TYPE_PARTIAL_LINE)
1196 return TRACE_TYPE_PARTIAL_LINE;
1197
287b6e68
FW
1198 return TRACE_TYPE_HANDLED;
1199}
1200
1fd8f2a3 1201static enum print_line_t
d7a8d9e9
JO
1202print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1203 struct trace_iterator *iter, u32 flags)
1fd8f2a3 1204{
5087f8d2 1205 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2fbcdb35 1206 struct fgraph_data *data = iter->private;
5087f8d2 1207 struct trace_event *event;
2fbcdb35 1208 int depth = 0;
1fd8f2a3 1209 int ret;
2fbcdb35
SR
1210 int i;
1211
1212 if (data)
be1eca39 1213 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
9005f3eb 1214
d7a8d9e9 1215 if (print_graph_prologue(iter, s, 0, 0, flags))
d1f9cbd7
FW
1216 return TRACE_TYPE_PARTIAL_LINE;
1217
9005f3eb 1218 /* No time */
6fc84ea7 1219 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
ffeb80fc
JO
1220 if (ret != TRACE_TYPE_HANDLED)
1221 return ret;
1fd8f2a3 1222
1fd8f2a3 1223 /* Indentation */
2fbcdb35
SR
1224 if (depth > 0)
1225 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
146c3442 1226 ret = trace_seq_putc(s, ' ');
1fd8f2a3
FW
1227 if (!ret)
1228 return TRACE_TYPE_PARTIAL_LINE;
1229 }
1230
1231 /* The comment */
146c3442 1232 ret = trace_seq_puts(s, "/* ");
769b0441
FW
1233 if (!ret)
1234 return TRACE_TYPE_PARTIAL_LINE;
1235
5087f8d2
SR
1236 switch (iter->ent->type) {
1237 case TRACE_BPRINT:
1238 ret = trace_print_bprintk_msg_only(iter);
1239 if (ret != TRACE_TYPE_HANDLED)
1240 return ret;
1241 break;
1242 case TRACE_PRINT:
1243 ret = trace_print_printk_msg_only(iter);
1244 if (ret != TRACE_TYPE_HANDLED)
1245 return ret;
1246 break;
1247 default:
1248 event = ftrace_find_event(ent->type);
1249 if (!event)
1250 return TRACE_TYPE_UNHANDLED;
1251
a9a57763 1252 ret = event->funcs->trace(iter, sym_flags, event);
5087f8d2
SR
1253 if (ret != TRACE_TYPE_HANDLED)
1254 return ret;
1255 }
1fd8f2a3 1256
412d0bb5
FW
1257 /* Strip ending newline */
1258 if (s->buffer[s->len - 1] == '\n') {
1259 s->buffer[s->len - 1] = '\0';
1260 s->len--;
1261 }
1262
146c3442 1263 ret = trace_seq_puts(s, " */\n");
1fd8f2a3
FW
1264 if (!ret)
1265 return TRACE_TYPE_PARTIAL_LINE;
1266
1267 return TRACE_TYPE_HANDLED;
1268}
1269
1270
287b6e68 1271enum print_line_t
321e68b0 1272print_graph_function_flags(struct trace_iterator *iter, u32 flags)
287b6e68 1273{
be1eca39
JO
1274 struct ftrace_graph_ent_entry *field;
1275 struct fgraph_data *data = iter->private;
287b6e68 1276 struct trace_entry *entry = iter->ent;
5087f8d2 1277 struct trace_seq *s = &iter->seq;
be1eca39
JO
1278 int cpu = iter->cpu;
1279 int ret;
1280
1281 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1282 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1283 return TRACE_TYPE_HANDLED;
1284 }
1285
1286 /*
1287 * If the last output failed, there's a possibility we need
1288 * to print out the missing entry which would never go out.
1289 */
1290 if (data && data->failed) {
1291 field = &data->ent;
1292 iter->cpu = data->cpu;
d7a8d9e9 1293 ret = print_graph_entry(field, s, iter, flags);
be1eca39
JO
1294 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1295 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1296 ret = TRACE_TYPE_NO_CONSUME;
1297 }
1298 iter->cpu = cpu;
1299 return ret;
1300 }
fb52607a 1301
287b6e68
FW
1302 switch (entry->type) {
1303 case TRACE_GRAPH_ENT: {
38ceb592
LJ
1304 /*
1305 * print_graph_entry() may consume the current event,
1306 * thus @field may become invalid, so we need to save it.
1307 * sizeof(struct ftrace_graph_ent_entry) is very small,
1308 * it can be safely saved at the stack.
1309 */
be1eca39 1310 struct ftrace_graph_ent_entry saved;
287b6e68 1311 trace_assign_type(field, entry);
38ceb592 1312 saved = *field;
d7a8d9e9 1313 return print_graph_entry(&saved, s, iter, flags);
287b6e68
FW
1314 }
1315 case TRACE_GRAPH_RET: {
1316 struct ftrace_graph_ret_entry *field;
1317 trace_assign_type(field, entry);
d7a8d9e9 1318 return print_graph_return(&field->ret, s, entry, iter, flags);
287b6e68 1319 }
62b915f1
JO
1320 case TRACE_STACK:
1321 case TRACE_FN:
1322 /* dont trace stack and functions as comments */
1323 return TRACE_TYPE_UNHANDLED;
1324
287b6e68 1325 default:
d7a8d9e9 1326 return print_graph_comment(s, entry, iter, flags);
fb52607a 1327 }
5087f8d2
SR
1328
1329 return TRACE_TYPE_HANDLED;
fb52607a
FW
1330}
1331
d7a8d9e9
JO
1332static enum print_line_t
1333print_graph_function(struct trace_iterator *iter)
1334{
321e68b0 1335 return print_graph_function_flags(iter, tracer_flags.val);
d7a8d9e9
JO
1336}
1337
9106b693 1338static enum print_line_t
a9a57763
SR
1339print_graph_function_event(struct trace_iterator *iter, int flags,
1340 struct trace_event *event)
9106b693
JO
1341{
1342 return print_graph_function(iter);
1343}
1344
d7a8d9e9 1345static void print_lat_header(struct seq_file *s, u32 flags)
49ff5903
SR
1346{
1347 static const char spaces[] = " " /* 16 spaces */
1348 " " /* 4 spaces */
1349 " "; /* 17 spaces */
1350 int size = 0;
1351
d7a8d9e9 1352 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
49ff5903 1353 size += 16;
d7a8d9e9 1354 if (flags & TRACE_GRAPH_PRINT_CPU)
49ff5903 1355 size += 4;
d7a8d9e9 1356 if (flags & TRACE_GRAPH_PRINT_PROC)
49ff5903
SR
1357 size += 17;
1358
1359 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1360 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1361 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1362 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
199abfab 1363 seq_printf(s, "#%.*s||| / \n", size, spaces);
49ff5903
SR
1364}
1365
0a772620 1366static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
decbec38 1367{
49ff5903
SR
1368 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1369
1370 if (lat)
d7a8d9e9 1371 print_lat_header(s, flags);
49ff5903 1372
decbec38 1373 /* 1st line */
49ff5903 1374 seq_printf(s, "#");
d7a8d9e9 1375 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
9005f3eb 1376 seq_printf(s, " TIME ");
d7a8d9e9 1377 if (flags & TRACE_GRAPH_PRINT_CPU)
49ff5903 1378 seq_printf(s, " CPU");
d7a8d9e9 1379 if (flags & TRACE_GRAPH_PRINT_PROC)
49ff5903
SR
1380 seq_printf(s, " TASK/PID ");
1381 if (lat)
199abfab 1382 seq_printf(s, "||||");
d7a8d9e9 1383 if (flags & TRACE_GRAPH_PRINT_DURATION)
9005f3eb
FW
1384 seq_printf(s, " DURATION ");
1385 seq_printf(s, " FUNCTION CALLS\n");
decbec38
FW
1386
1387 /* 2nd line */
49ff5903 1388 seq_printf(s, "#");
d7a8d9e9 1389 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
9005f3eb 1390 seq_printf(s, " | ");
d7a8d9e9 1391 if (flags & TRACE_GRAPH_PRINT_CPU)
49ff5903 1392 seq_printf(s, " | ");
d7a8d9e9 1393 if (flags & TRACE_GRAPH_PRINT_PROC)
49ff5903
SR
1394 seq_printf(s, " | | ");
1395 if (lat)
199abfab 1396 seq_printf(s, "||||");
d7a8d9e9 1397 if (flags & TRACE_GRAPH_PRINT_DURATION)
9005f3eb
FW
1398 seq_printf(s, " | | ");
1399 seq_printf(s, " | | | |\n");
decbec38 1400}
9005f3eb 1401
62b915f1 1402void print_graph_headers(struct seq_file *s)
d7a8d9e9
JO
1403{
1404 print_graph_headers_flags(s, tracer_flags.val);
1405}
1406
0a772620
JO
1407void print_graph_headers_flags(struct seq_file *s, u32 flags)
1408{
1409 struct trace_iterator *iter = s->private;
1410
749230b0
JO
1411 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1412 return;
1413
0a772620
JO
1414 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1415 /* print nothing if the buffers are empty */
1416 if (trace_empty(iter))
1417 return;
1418
1419 print_trace_header(s, iter);
321e68b0 1420 }
0a772620
JO
1421
1422 __print_graph_headers_flags(s, flags);
1423}
1424
62b915f1 1425void graph_trace_open(struct trace_iterator *iter)
9005f3eb 1426{
2fbcdb35 1427 /* pid and depth on the last trace processed */
be1eca39 1428 struct fgraph_data *data;
9005f3eb
FW
1429 int cpu;
1430
be1eca39
JO
1431 iter->private = NULL;
1432
1433 data = kzalloc(sizeof(*data), GFP_KERNEL);
2fbcdb35 1434 if (!data)
be1eca39
JO
1435 goto out_err;
1436
1437 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1438 if (!data->cpu_data)
1439 goto out_err_free;
1440
1441 for_each_possible_cpu(cpu) {
1442 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1443 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1444 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
2bd16212
JO
1445 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1446
be1eca39
JO
1447 *pid = -1;
1448 *depth = 0;
1449 *ignore = 0;
2bd16212 1450 *depth_irq = -1;
be1eca39 1451 }
9005f3eb 1452
2fbcdb35 1453 iter->private = data;
be1eca39
JO
1454
1455 return;
1456
1457 out_err_free:
1458 kfree(data);
1459 out_err:
1460 pr_warning("function graph tracer: not enough memory\n");
9005f3eb
FW
1461}
1462
62b915f1 1463void graph_trace_close(struct trace_iterator *iter)
9005f3eb 1464{
be1eca39
JO
1465 struct fgraph_data *data = iter->private;
1466
1467 if (data) {
1468 free_percpu(data->cpu_data);
1469 kfree(data);
1470 }
9005f3eb
FW
1471}
1472
8c1a49ae
SRRH
1473static int
1474func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
b304d044
SR
1475{
1476 if (bit == TRACE_GRAPH_PRINT_IRQS)
1477 ftrace_graph_skip_irqs = !set;
1478
1479 return 0;
1480}
1481
a9a57763
SR
1482static struct trace_event_functions graph_functions = {
1483 .trace = print_graph_function_event,
1484};
1485
9106b693
JO
1486static struct trace_event graph_trace_entry_event = {
1487 .type = TRACE_GRAPH_ENT,
a9a57763 1488 .funcs = &graph_functions,
9106b693
JO
1489};
1490
1491static struct trace_event graph_trace_ret_event = {
1492 .type = TRACE_GRAPH_RET,
a9a57763 1493 .funcs = &graph_functions
9106b693
JO
1494};
1495
8f768993 1496static struct tracer graph_trace __tracer_data = {
ef18012b 1497 .name = "function_graph",
9005f3eb 1498 .open = graph_trace_open,
be1eca39 1499 .pipe_open = graph_trace_open,
9005f3eb 1500 .close = graph_trace_close,
be1eca39 1501 .pipe_close = graph_trace_close,
ef18012b
SR
1502 .init = graph_trace_init,
1503 .reset = graph_trace_reset,
decbec38
FW
1504 .print_line = print_graph_function,
1505 .print_header = print_graph_headers,
fb52607a 1506 .flags = &tracer_flags,
b304d044 1507 .set_flag = func_graph_set_flag,
7447dce9
FW
1508#ifdef CONFIG_FTRACE_SELFTEST
1509 .selftest = trace_selftest_startup_function_graph,
1510#endif
fb52607a
FW
1511};
1512
8741db53
SR
1513
1514static ssize_t
1515graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1516 loff_t *ppos)
1517{
1518 unsigned long val;
1519 int ret;
1520
1521 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1522 if (ret)
1523 return ret;
1524
1525 max_depth = val;
1526
1527 *ppos += cnt;
1528
1529 return cnt;
1530}
1531
1532static ssize_t
1533graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1534 loff_t *ppos)
1535{
1536 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1537 int n;
1538
1539 n = sprintf(buf, "%d\n", max_depth);
1540
1541 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1542}
1543
1544static const struct file_operations graph_depth_fops = {
1545 .open = tracing_open_generic,
1546 .write = graph_depth_write,
1547 .read = graph_depth_read,
1548 .llseek = generic_file_llseek,
1549};
1550
1551static __init int init_graph_debugfs(void)
1552{
1553 struct dentry *d_tracer;
1554
1555 d_tracer = tracing_init_dentry();
1556 if (!d_tracer)
1557 return 0;
1558
1559 trace_create_file("max_graph_depth", 0644, d_tracer,
1560 NULL, &graph_depth_fops);
1561
1562 return 0;
1563}
1564fs_initcall(init_graph_debugfs);
1565
fb52607a
FW
1566static __init int init_graph_trace(void)
1567{
0c9e6f63
LJ
1568 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1569
9106b693
JO
1570 if (!register_ftrace_event(&graph_trace_entry_event)) {
1571 pr_warning("Warning: could not register graph trace events\n");
1572 return 1;
1573 }
1574
1575 if (!register_ftrace_event(&graph_trace_ret_event)) {
1576 pr_warning("Warning: could not register graph trace events\n");
1577 return 1;
1578 }
1579
fb52607a
FW
1580 return register_tracer(&graph_trace);
1581}
1582
6f415672 1583core_initcall(init_graph_trace);
This page took 0.345551 seconds and 5 git commands to generate.