Merge remote-tracking branch 'irqchip/irqchip/for-next'
[deliverable/linux.git] / kernel / trace / trace_functions_graph.c
1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/interrupt.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 static bool kill_ftrace_graph;
19
20 /**
21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 *
23 * ftrace_graph_stop() is called when a severe error is detected in
24 * the function graph tracing. This function is called by the critical
25 * paths of function graph to keep those paths from doing any more harm.
26 */
27 bool ftrace_graph_is_dead(void)
28 {
29 return kill_ftrace_graph;
30 }
31
32 /**
33 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 *
35 * In case of an error int function graph tracing, this is called
36 * to try to keep function graph tracing from causing any more harm.
37 * Usually this is pretty severe and this is called to try to at least
38 * get a warning out to the user.
39 */
40 void ftrace_graph_stop(void)
41 {
42 kill_ftrace_graph = true;
43 }
44
45 /* When set, irq functions will be ignored */
46 static int ftrace_graph_skip_irqs;
47
48 struct fgraph_cpu_data {
49 pid_t last_pid;
50 int depth;
51 int depth_irq;
52 int ignore;
53 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
54 };
55
56 struct fgraph_data {
57 struct fgraph_cpu_data __percpu *cpu_data;
58
59 /* Place to preserve last processed entry. */
60 struct ftrace_graph_ent_entry ent;
61 struct ftrace_graph_ret_entry ret;
62 int failed;
63 int cpu;
64 };
65
66 #define TRACE_GRAPH_INDENT 2
67
68 static unsigned int max_depth;
69
70 static struct tracer_opt trace_opts[] = {
71 /* Display overruns? (for self-debug purpose) */
72 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
73 /* Display CPU ? */
74 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
75 /* Display Overhead ? */
76 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 /* Display proc name/pid */
78 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 /* Display duration of execution */
80 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
81 /* Display absolute time of an entry */
82 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 /* Display interrupts */
84 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 /* Display function name after trailing } */
86 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 /* Include sleep time (scheduled out) between entry and return */
88 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
89 /* Include time within nested functions */
90 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
91 { } /* Empty entry */
92 };
93
94 static struct tracer_flags tracer_flags = {
95 /* Don't display overruns, proc, or tail by default */
96 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
97 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
98 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
99 .opts = trace_opts
100 };
101
102 static struct trace_array *graph_array;
103
104 /*
105 * DURATION column is being also used to display IRQ signs,
106 * following values are used by print_graph_irq and others
107 * to fill in space into DURATION column.
108 */
109 enum {
110 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113 };
114
115 static void
116 print_graph_duration(struct trace_array *tr, unsigned long long duration,
117 struct trace_seq *s, u32 flags);
118
119 /* Add a function return address to the trace stack on thread info.*/
120 int
121 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
122 unsigned long frame_pointer, unsigned long *retp)
123 {
124 unsigned long long calltime;
125 int index;
126
127 if (unlikely(ftrace_graph_is_dead()))
128 return -EBUSY;
129
130 if (!current->ret_stack)
131 return -EBUSY;
132
133 /*
134 * We must make sure the ret_stack is tested before we read
135 * anything else.
136 */
137 smp_rmb();
138
139 /* The return trace stack is full */
140 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
141 atomic_inc(&current->trace_overrun);
142 return -EBUSY;
143 }
144
145 /*
146 * The curr_ret_stack is an index to ftrace return stack of
147 * current task. Its value should be in [0, FTRACE_RETFUNC_
148 * DEPTH) when the function graph tracer is used. To support
149 * filtering out specific functions, it makes the index
150 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
151 * so when it sees a negative index the ftrace will ignore
152 * the record. And the index gets recovered when returning
153 * from the filtered function by adding the FTRACE_NOTRACE_
154 * DEPTH and then it'll continue to record functions normally.
155 *
156 * The curr_ret_stack is initialized to -1 and get increased
157 * in this function. So it can be less than -1 only if it was
158 * filtered out via ftrace_graph_notrace_addr() which can be
159 * set from set_graph_notrace file in tracefs by user.
160 */
161 if (current->curr_ret_stack < -1)
162 return -EBUSY;
163
164 calltime = trace_clock_local();
165
166 index = ++current->curr_ret_stack;
167 if (ftrace_graph_notrace_addr(func))
168 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
169 barrier();
170 current->ret_stack[index].ret = ret;
171 current->ret_stack[index].func = func;
172 current->ret_stack[index].calltime = calltime;
173 current->ret_stack[index].subtime = 0;
174 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175 current->ret_stack[index].fp = frame_pointer;
176 #endif
177 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp;
179 #endif
180 *depth = current->curr_ret_stack;
181
182 return 0;
183 }
184
185 /* Retrieve a function return address to the trace stack on thread info.*/
186 static void
187 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
188 unsigned long frame_pointer)
189 {
190 int index;
191
192 index = current->curr_ret_stack;
193
194 /*
195 * A negative index here means that it's just returned from a
196 * notrace'd function. Recover index to get an original
197 * return address. See ftrace_push_return_trace().
198 *
199 * TODO: Need to check whether the stack gets corrupted.
200 */
201 if (index < 0)
202 index += FTRACE_NOTRACE_DEPTH;
203
204 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
205 ftrace_graph_stop();
206 WARN_ON(1);
207 /* Might as well panic, otherwise we have no where to go */
208 *ret = (unsigned long)panic;
209 return;
210 }
211
212 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
213 /*
214 * The arch may choose to record the frame pointer used
215 * and check it here to make sure that it is what we expect it
216 * to be. If gcc does not set the place holder of the return
217 * address in the frame pointer, and does a copy instead, then
218 * the function graph trace will fail. This test detects this
219 * case.
220 *
221 * Currently, x86_32 with optimize for size (-Os) makes the latest
222 * gcc do the above.
223 *
224 * Note, -mfentry does not use frame pointers, and this test
225 * is not needed if CC_USING_FENTRY is set.
226 */
227 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
228 ftrace_graph_stop();
229 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
230 " from func %ps return to %lx\n",
231 current->ret_stack[index].fp,
232 frame_pointer,
233 (void *)current->ret_stack[index].func,
234 current->ret_stack[index].ret);
235 *ret = (unsigned long)panic;
236 return;
237 }
238 #endif
239
240 *ret = current->ret_stack[index].ret;
241 trace->func = current->ret_stack[index].func;
242 trace->calltime = current->ret_stack[index].calltime;
243 trace->overrun = atomic_read(&current->trace_overrun);
244 trace->depth = index;
245 }
246
247 /*
248 * Send the trace to the ring-buffer.
249 * @return the original return address.
250 */
251 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
252 {
253 struct ftrace_graph_ret trace;
254 unsigned long ret;
255
256 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
257 trace.rettime = trace_clock_local();
258 barrier();
259 current->curr_ret_stack--;
260 /*
261 * The curr_ret_stack can be less than -1 only if it was
262 * filtered out and it's about to return from the function.
263 * Recover the index and continue to trace normal functions.
264 */
265 if (current->curr_ret_stack < -1) {
266 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
267 return ret;
268 }
269
270 /*
271 * The trace should run after decrementing the ret counter
272 * in case an interrupt were to come in. We don't want to
273 * lose the interrupt if max_depth is set.
274 */
275 ftrace_graph_return(&trace);
276
277 if (unlikely(!ret)) {
278 ftrace_graph_stop();
279 WARN_ON(1);
280 /* Might as well panic. What else to do? */
281 ret = (unsigned long)panic;
282 }
283
284 return ret;
285 }
286
287 /**
288 * ftrace_graph_ret_addr - convert a potentially modified stack return address
289 * to its original value
290 *
291 * This function can be called by stack unwinding code to convert a found stack
292 * return address ('ret') to its original value, in case the function graph
293 * tracer has modified it to be 'return_to_handler'. If the address hasn't
294 * been modified, the unchanged value of 'ret' is returned.
295 *
296 * 'idx' is a state variable which should be initialized by the caller to zero
297 * before the first call.
298 *
299 * 'retp' is a pointer to the return address on the stack. It's ignored if
300 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
301 */
302 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
303 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
304 unsigned long ret, unsigned long *retp)
305 {
306 int index = task->curr_ret_stack;
307 int i;
308
309 if (ret != (unsigned long)return_to_handler)
310 return ret;
311
312 if (index < -1)
313 index += FTRACE_NOTRACE_DEPTH;
314
315 if (index < 0)
316 return ret;
317
318 for (i = 0; i <= index; i++)
319 if (task->ret_stack[i].retp == retp)
320 return task->ret_stack[i].ret;
321
322 return ret;
323 }
324 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
325 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
326 unsigned long ret, unsigned long *retp)
327 {
328 int task_idx;
329
330 if (ret != (unsigned long)return_to_handler)
331 return ret;
332
333 task_idx = task->curr_ret_stack;
334
335 if (!task->ret_stack || task_idx < *idx)
336 return ret;
337
338 task_idx -= *idx;
339 (*idx)++;
340
341 return task->ret_stack[task_idx].ret;
342 }
343 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
344
345 int __trace_graph_entry(struct trace_array *tr,
346 struct ftrace_graph_ent *trace,
347 unsigned long flags,
348 int pc)
349 {
350 struct trace_event_call *call = &event_funcgraph_entry;
351 struct ring_buffer_event *event;
352 struct ring_buffer *buffer = tr->trace_buffer.buffer;
353 struct ftrace_graph_ent_entry *entry;
354
355 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
356 sizeof(*entry), flags, pc);
357 if (!event)
358 return 0;
359 entry = ring_buffer_event_data(event);
360 entry->graph_ent = *trace;
361 if (!call_filter_check_discard(call, entry, buffer, event))
362 __buffer_unlock_commit(buffer, event);
363
364 return 1;
365 }
366
367 static inline int ftrace_graph_ignore_irqs(void)
368 {
369 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
370 return 0;
371
372 return in_irq();
373 }
374
375 int trace_graph_entry(struct ftrace_graph_ent *trace)
376 {
377 struct trace_array *tr = graph_array;
378 struct trace_array_cpu *data;
379 unsigned long flags;
380 long disabled;
381 int ret;
382 int cpu;
383 int pc;
384
385 if (!ftrace_trace_task(tr))
386 return 0;
387
388 /* trace it when it is-nested-in or is a function enabled. */
389 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
390 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
391 (max_depth && trace->depth >= max_depth))
392 return 0;
393
394 /*
395 * Do not trace a function if it's filtered by set_graph_notrace.
396 * Make the index of ret stack negative to indicate that it should
397 * ignore further functions. But it needs its own ret stack entry
398 * to recover the original index in order to continue tracing after
399 * returning from the function.
400 */
401 if (ftrace_graph_notrace_addr(trace->func))
402 return 1;
403
404 /*
405 * Stop here if tracing_threshold is set. We only write function return
406 * events to the ring buffer.
407 */
408 if (tracing_thresh)
409 return 1;
410
411 local_irq_save(flags);
412 cpu = raw_smp_processor_id();
413 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
414 disabled = atomic_inc_return(&data->disabled);
415 if (likely(disabled == 1)) {
416 pc = preempt_count();
417 ret = __trace_graph_entry(tr, trace, flags, pc);
418 } else {
419 ret = 0;
420 }
421
422 atomic_dec(&data->disabled);
423 local_irq_restore(flags);
424
425 return ret;
426 }
427
428 static void
429 __trace_graph_function(struct trace_array *tr,
430 unsigned long ip, unsigned long flags, int pc)
431 {
432 u64 time = trace_clock_local();
433 struct ftrace_graph_ent ent = {
434 .func = ip,
435 .depth = 0,
436 };
437 struct ftrace_graph_ret ret = {
438 .func = ip,
439 .depth = 0,
440 .calltime = time,
441 .rettime = time,
442 };
443
444 __trace_graph_entry(tr, &ent, flags, pc);
445 __trace_graph_return(tr, &ret, flags, pc);
446 }
447
448 void
449 trace_graph_function(struct trace_array *tr,
450 unsigned long ip, unsigned long parent_ip,
451 unsigned long flags, int pc)
452 {
453 __trace_graph_function(tr, ip, flags, pc);
454 }
455
456 void __trace_graph_return(struct trace_array *tr,
457 struct ftrace_graph_ret *trace,
458 unsigned long flags,
459 int pc)
460 {
461 struct trace_event_call *call = &event_funcgraph_exit;
462 struct ring_buffer_event *event;
463 struct ring_buffer *buffer = tr->trace_buffer.buffer;
464 struct ftrace_graph_ret_entry *entry;
465
466 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
467 sizeof(*entry), flags, pc);
468 if (!event)
469 return;
470 entry = ring_buffer_event_data(event);
471 entry->ret = *trace;
472 if (!call_filter_check_discard(call, entry, buffer, event))
473 __buffer_unlock_commit(buffer, event);
474 }
475
476 void trace_graph_return(struct ftrace_graph_ret *trace)
477 {
478 struct trace_array *tr = graph_array;
479 struct trace_array_cpu *data;
480 unsigned long flags;
481 long disabled;
482 int cpu;
483 int pc;
484
485 local_irq_save(flags);
486 cpu = raw_smp_processor_id();
487 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
488 disabled = atomic_inc_return(&data->disabled);
489 if (likely(disabled == 1)) {
490 pc = preempt_count();
491 __trace_graph_return(tr, trace, flags, pc);
492 }
493 atomic_dec(&data->disabled);
494 local_irq_restore(flags);
495 }
496
497 void set_graph_array(struct trace_array *tr)
498 {
499 graph_array = tr;
500
501 /* Make graph_array visible before we start tracing */
502
503 smp_mb();
504 }
505
506 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
507 {
508 if (tracing_thresh &&
509 (trace->rettime - trace->calltime < tracing_thresh))
510 return;
511 else
512 trace_graph_return(trace);
513 }
514
515 static int graph_trace_init(struct trace_array *tr)
516 {
517 int ret;
518
519 set_graph_array(tr);
520 if (tracing_thresh)
521 ret = register_ftrace_graph(&trace_graph_thresh_return,
522 &trace_graph_entry);
523 else
524 ret = register_ftrace_graph(&trace_graph_return,
525 &trace_graph_entry);
526 if (ret)
527 return ret;
528 tracing_start_cmdline_record();
529
530 return 0;
531 }
532
533 static void graph_trace_reset(struct trace_array *tr)
534 {
535 tracing_stop_cmdline_record();
536 unregister_ftrace_graph();
537 }
538
539 static int graph_trace_update_thresh(struct trace_array *tr)
540 {
541 graph_trace_reset(tr);
542 return graph_trace_init(tr);
543 }
544
545 static int max_bytes_for_cpu;
546
547 static void print_graph_cpu(struct trace_seq *s, int cpu)
548 {
549 /*
550 * Start with a space character - to make it stand out
551 * to the right a bit when trace output is pasted into
552 * email:
553 */
554 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
555 }
556
557 #define TRACE_GRAPH_PROCINFO_LENGTH 14
558
559 static void print_graph_proc(struct trace_seq *s, pid_t pid)
560 {
561 char comm[TASK_COMM_LEN];
562 /* sign + log10(MAX_INT) + '\0' */
563 char pid_str[11];
564 int spaces = 0;
565 int len;
566 int i;
567
568 trace_find_cmdline(pid, comm);
569 comm[7] = '\0';
570 sprintf(pid_str, "%d", pid);
571
572 /* 1 stands for the "-" character */
573 len = strlen(comm) + strlen(pid_str) + 1;
574
575 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
576 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
577
578 /* First spaces to align center */
579 for (i = 0; i < spaces / 2; i++)
580 trace_seq_putc(s, ' ');
581
582 trace_seq_printf(s, "%s-%s", comm, pid_str);
583
584 /* Last spaces to align center */
585 for (i = 0; i < spaces - (spaces / 2); i++)
586 trace_seq_putc(s, ' ');
587 }
588
589
590 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
591 {
592 trace_seq_putc(s, ' ');
593 trace_print_lat_fmt(s, entry);
594 }
595
596 /* If the pid changed since the last trace, output this event */
597 static void
598 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
599 {
600 pid_t prev_pid;
601 pid_t *last_pid;
602
603 if (!data)
604 return;
605
606 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
607
608 if (*last_pid == pid)
609 return;
610
611 prev_pid = *last_pid;
612 *last_pid = pid;
613
614 if (prev_pid == -1)
615 return;
616 /*
617 * Context-switch trace line:
618
619 ------------------------------------------
620 | 1) migration/0--1 => sshd-1755
621 ------------------------------------------
622
623 */
624 trace_seq_puts(s, " ------------------------------------------\n");
625 print_graph_cpu(s, cpu);
626 print_graph_proc(s, prev_pid);
627 trace_seq_puts(s, " => ");
628 print_graph_proc(s, pid);
629 trace_seq_puts(s, "\n ------------------------------------------\n\n");
630 }
631
632 static struct ftrace_graph_ret_entry *
633 get_return_for_leaf(struct trace_iterator *iter,
634 struct ftrace_graph_ent_entry *curr)
635 {
636 struct fgraph_data *data = iter->private;
637 struct ring_buffer_iter *ring_iter = NULL;
638 struct ring_buffer_event *event;
639 struct ftrace_graph_ret_entry *next;
640
641 /*
642 * If the previous output failed to write to the seq buffer,
643 * then we just reuse the data from before.
644 */
645 if (data && data->failed) {
646 curr = &data->ent;
647 next = &data->ret;
648 } else {
649
650 ring_iter = trace_buffer_iter(iter, iter->cpu);
651
652 /* First peek to compare current entry and the next one */
653 if (ring_iter)
654 event = ring_buffer_iter_peek(ring_iter, NULL);
655 else {
656 /*
657 * We need to consume the current entry to see
658 * the next one.
659 */
660 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
661 NULL, NULL);
662 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
663 NULL, NULL);
664 }
665
666 if (!event)
667 return NULL;
668
669 next = ring_buffer_event_data(event);
670
671 if (data) {
672 /*
673 * Save current and next entries for later reference
674 * if the output fails.
675 */
676 data->ent = *curr;
677 /*
678 * If the next event is not a return type, then
679 * we only care about what type it is. Otherwise we can
680 * safely copy the entire event.
681 */
682 if (next->ent.type == TRACE_GRAPH_RET)
683 data->ret = *next;
684 else
685 data->ret.ent.type = next->ent.type;
686 }
687 }
688
689 if (next->ent.type != TRACE_GRAPH_RET)
690 return NULL;
691
692 if (curr->ent.pid != next->ent.pid ||
693 curr->graph_ent.func != next->ret.func)
694 return NULL;
695
696 /* this is a leaf, now advance the iterator */
697 if (ring_iter)
698 ring_buffer_read(ring_iter, NULL);
699
700 return next;
701 }
702
703 static void print_graph_abs_time(u64 t, struct trace_seq *s)
704 {
705 unsigned long usecs_rem;
706
707 usecs_rem = do_div(t, NSEC_PER_SEC);
708 usecs_rem /= 1000;
709
710 trace_seq_printf(s, "%5lu.%06lu | ",
711 (unsigned long)t, usecs_rem);
712 }
713
714 static void
715 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
716 enum trace_type type, int cpu, pid_t pid, u32 flags)
717 {
718 struct trace_array *tr = iter->tr;
719 struct trace_seq *s = &iter->seq;
720 struct trace_entry *ent = iter->ent;
721
722 if (addr < (unsigned long)__irqentry_text_start ||
723 addr >= (unsigned long)__irqentry_text_end)
724 return;
725
726 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
727 /* Absolute time */
728 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
729 print_graph_abs_time(iter->ts, s);
730
731 /* Cpu */
732 if (flags & TRACE_GRAPH_PRINT_CPU)
733 print_graph_cpu(s, cpu);
734
735 /* Proc */
736 if (flags & TRACE_GRAPH_PRINT_PROC) {
737 print_graph_proc(s, pid);
738 trace_seq_puts(s, " | ");
739 }
740
741 /* Latency format */
742 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
743 print_graph_lat_fmt(s, ent);
744 }
745
746 /* No overhead */
747 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
748
749 if (type == TRACE_GRAPH_ENT)
750 trace_seq_puts(s, "==========>");
751 else
752 trace_seq_puts(s, "<==========");
753
754 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
755 trace_seq_putc(s, '\n');
756 }
757
758 void
759 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
760 {
761 unsigned long nsecs_rem = do_div(duration, 1000);
762 /* log10(ULONG_MAX) + '\0' */
763 char usecs_str[21];
764 char nsecs_str[5];
765 int len;
766 int i;
767
768 sprintf(usecs_str, "%lu", (unsigned long) duration);
769
770 /* Print msecs */
771 trace_seq_printf(s, "%s", usecs_str);
772
773 len = strlen(usecs_str);
774
775 /* Print nsecs (we don't want to exceed 7 numbers) */
776 if (len < 7) {
777 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
778
779 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
780 trace_seq_printf(s, ".%s", nsecs_str);
781 len += strlen(nsecs_str) + 1;
782 }
783
784 trace_seq_puts(s, " us ");
785
786 /* Print remaining spaces to fit the row's width */
787 for (i = len; i < 8; i++)
788 trace_seq_putc(s, ' ');
789 }
790
791 static void
792 print_graph_duration(struct trace_array *tr, unsigned long long duration,
793 struct trace_seq *s, u32 flags)
794 {
795 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
796 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
797 return;
798
799 /* No real adata, just filling the column with spaces */
800 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
801 case FLAGS_FILL_FULL:
802 trace_seq_puts(s, " | ");
803 return;
804 case FLAGS_FILL_START:
805 trace_seq_puts(s, " ");
806 return;
807 case FLAGS_FILL_END:
808 trace_seq_puts(s, " |");
809 return;
810 }
811
812 /* Signal a overhead of time execution to the output */
813 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
814 trace_seq_printf(s, "%c ", trace_find_mark(duration));
815 else
816 trace_seq_puts(s, " ");
817
818 trace_print_graph_duration(duration, s);
819 trace_seq_puts(s, "| ");
820 }
821
822 /* Case of a leaf function on its call entry */
823 static enum print_line_t
824 print_graph_entry_leaf(struct trace_iterator *iter,
825 struct ftrace_graph_ent_entry *entry,
826 struct ftrace_graph_ret_entry *ret_entry,
827 struct trace_seq *s, u32 flags)
828 {
829 struct fgraph_data *data = iter->private;
830 struct trace_array *tr = iter->tr;
831 struct ftrace_graph_ret *graph_ret;
832 struct ftrace_graph_ent *call;
833 unsigned long long duration;
834 int i;
835
836 graph_ret = &ret_entry->ret;
837 call = &entry->graph_ent;
838 duration = graph_ret->rettime - graph_ret->calltime;
839
840 if (data) {
841 struct fgraph_cpu_data *cpu_data;
842 int cpu = iter->cpu;
843
844 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
845
846 /*
847 * Comments display at + 1 to depth. Since
848 * this is a leaf function, keep the comments
849 * equal to this depth.
850 */
851 cpu_data->depth = call->depth - 1;
852
853 /* No need to keep this function around for this depth */
854 if (call->depth < FTRACE_RETFUNC_DEPTH)
855 cpu_data->enter_funcs[call->depth] = 0;
856 }
857
858 /* Overhead and duration */
859 print_graph_duration(tr, duration, s, flags);
860
861 /* Function */
862 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
863 trace_seq_putc(s, ' ');
864
865 trace_seq_printf(s, "%ps();\n", (void *)call->func);
866
867 return trace_handle_return(s);
868 }
869
870 static enum print_line_t
871 print_graph_entry_nested(struct trace_iterator *iter,
872 struct ftrace_graph_ent_entry *entry,
873 struct trace_seq *s, int cpu, u32 flags)
874 {
875 struct ftrace_graph_ent *call = &entry->graph_ent;
876 struct fgraph_data *data = iter->private;
877 struct trace_array *tr = iter->tr;
878 int i;
879
880 if (data) {
881 struct fgraph_cpu_data *cpu_data;
882 int cpu = iter->cpu;
883
884 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
885 cpu_data->depth = call->depth;
886
887 /* Save this function pointer to see if the exit matches */
888 if (call->depth < FTRACE_RETFUNC_DEPTH)
889 cpu_data->enter_funcs[call->depth] = call->func;
890 }
891
892 /* No time */
893 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
894
895 /* Function */
896 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
897 trace_seq_putc(s, ' ');
898
899 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
900
901 if (trace_seq_has_overflowed(s))
902 return TRACE_TYPE_PARTIAL_LINE;
903
904 /*
905 * we already consumed the current entry to check the next one
906 * and see if this is a leaf.
907 */
908 return TRACE_TYPE_NO_CONSUME;
909 }
910
911 static void
912 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
913 int type, unsigned long addr, u32 flags)
914 {
915 struct fgraph_data *data = iter->private;
916 struct trace_entry *ent = iter->ent;
917 struct trace_array *tr = iter->tr;
918 int cpu = iter->cpu;
919
920 /* Pid */
921 verif_pid(s, ent->pid, cpu, data);
922
923 if (type)
924 /* Interrupt */
925 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
926
927 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
928 return;
929
930 /* Absolute time */
931 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
932 print_graph_abs_time(iter->ts, s);
933
934 /* Cpu */
935 if (flags & TRACE_GRAPH_PRINT_CPU)
936 print_graph_cpu(s, cpu);
937
938 /* Proc */
939 if (flags & TRACE_GRAPH_PRINT_PROC) {
940 print_graph_proc(s, ent->pid);
941 trace_seq_puts(s, " | ");
942 }
943
944 /* Latency format */
945 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
946 print_graph_lat_fmt(s, ent);
947
948 return;
949 }
950
951 /*
952 * Entry check for irq code
953 *
954 * returns 1 if
955 * - we are inside irq code
956 * - we just entered irq code
957 *
958 * retunns 0 if
959 * - funcgraph-interrupts option is set
960 * - we are not inside irq code
961 */
962 static int
963 check_irq_entry(struct trace_iterator *iter, u32 flags,
964 unsigned long addr, int depth)
965 {
966 int cpu = iter->cpu;
967 int *depth_irq;
968 struct fgraph_data *data = iter->private;
969
970 /*
971 * If we are either displaying irqs, or we got called as
972 * a graph event and private data does not exist,
973 * then we bypass the irq check.
974 */
975 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
976 (!data))
977 return 0;
978
979 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
980
981 /*
982 * We are inside the irq code
983 */
984 if (*depth_irq >= 0)
985 return 1;
986
987 if ((addr < (unsigned long)__irqentry_text_start) ||
988 (addr >= (unsigned long)__irqentry_text_end))
989 return 0;
990
991 /*
992 * We are entering irq code.
993 */
994 *depth_irq = depth;
995 return 1;
996 }
997
998 /*
999 * Return check for irq code
1000 *
1001 * returns 1 if
1002 * - we are inside irq code
1003 * - we just left irq code
1004 *
1005 * returns 0 if
1006 * - funcgraph-interrupts option is set
1007 * - we are not inside irq code
1008 */
1009 static int
1010 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1011 {
1012 int cpu = iter->cpu;
1013 int *depth_irq;
1014 struct fgraph_data *data = iter->private;
1015
1016 /*
1017 * If we are either displaying irqs, or we got called as
1018 * a graph event and private data does not exist,
1019 * then we bypass the irq check.
1020 */
1021 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1022 (!data))
1023 return 0;
1024
1025 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1026
1027 /*
1028 * We are not inside the irq code.
1029 */
1030 if (*depth_irq == -1)
1031 return 0;
1032
1033 /*
1034 * We are inside the irq code, and this is returning entry.
1035 * Let's not trace it and clear the entry depth, since
1036 * we are out of irq code.
1037 *
1038 * This condition ensures that we 'leave the irq code' once
1039 * we are out of the entry depth. Thus protecting us from
1040 * the RETURN entry loss.
1041 */
1042 if (*depth_irq >= depth) {
1043 *depth_irq = -1;
1044 return 1;
1045 }
1046
1047 /*
1048 * We are inside the irq code, and this is not the entry.
1049 */
1050 return 1;
1051 }
1052
1053 static enum print_line_t
1054 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1055 struct trace_iterator *iter, u32 flags)
1056 {
1057 struct fgraph_data *data = iter->private;
1058 struct ftrace_graph_ent *call = &field->graph_ent;
1059 struct ftrace_graph_ret_entry *leaf_ret;
1060 static enum print_line_t ret;
1061 int cpu = iter->cpu;
1062
1063 if (check_irq_entry(iter, flags, call->func, call->depth))
1064 return TRACE_TYPE_HANDLED;
1065
1066 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1067
1068 leaf_ret = get_return_for_leaf(iter, field);
1069 if (leaf_ret)
1070 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1071 else
1072 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1073
1074 if (data) {
1075 /*
1076 * If we failed to write our output, then we need to make
1077 * note of it. Because we already consumed our entry.
1078 */
1079 if (s->full) {
1080 data->failed = 1;
1081 data->cpu = cpu;
1082 } else
1083 data->failed = 0;
1084 }
1085
1086 return ret;
1087 }
1088
1089 static enum print_line_t
1090 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1091 struct trace_entry *ent, struct trace_iterator *iter,
1092 u32 flags)
1093 {
1094 unsigned long long duration = trace->rettime - trace->calltime;
1095 struct fgraph_data *data = iter->private;
1096 struct trace_array *tr = iter->tr;
1097 pid_t pid = ent->pid;
1098 int cpu = iter->cpu;
1099 int func_match = 1;
1100 int i;
1101
1102 if (check_irq_return(iter, flags, trace->depth))
1103 return TRACE_TYPE_HANDLED;
1104
1105 if (data) {
1106 struct fgraph_cpu_data *cpu_data;
1107 int cpu = iter->cpu;
1108
1109 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1110
1111 /*
1112 * Comments display at + 1 to depth. This is the
1113 * return from a function, we now want the comments
1114 * to display at the same level of the bracket.
1115 */
1116 cpu_data->depth = trace->depth - 1;
1117
1118 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1119 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1120 func_match = 0;
1121 cpu_data->enter_funcs[trace->depth] = 0;
1122 }
1123 }
1124
1125 print_graph_prologue(iter, s, 0, 0, flags);
1126
1127 /* Overhead and duration */
1128 print_graph_duration(tr, duration, s, flags);
1129
1130 /* Closing brace */
1131 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1132 trace_seq_putc(s, ' ');
1133
1134 /*
1135 * If the return function does not have a matching entry,
1136 * then the entry was lost. Instead of just printing
1137 * the '}' and letting the user guess what function this
1138 * belongs to, write out the function name. Always do
1139 * that if the funcgraph-tail option is enabled.
1140 */
1141 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1142 trace_seq_puts(s, "}\n");
1143 else
1144 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1145
1146 /* Overrun */
1147 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1148 trace_seq_printf(s, " (Overruns: %lu)\n",
1149 trace->overrun);
1150
1151 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1152 cpu, pid, flags);
1153
1154 return trace_handle_return(s);
1155 }
1156
1157 static enum print_line_t
1158 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1159 struct trace_iterator *iter, u32 flags)
1160 {
1161 struct trace_array *tr = iter->tr;
1162 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1163 struct fgraph_data *data = iter->private;
1164 struct trace_event *event;
1165 int depth = 0;
1166 int ret;
1167 int i;
1168
1169 if (data)
1170 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1171
1172 print_graph_prologue(iter, s, 0, 0, flags);
1173
1174 /* No time */
1175 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1176
1177 /* Indentation */
1178 if (depth > 0)
1179 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1180 trace_seq_putc(s, ' ');
1181
1182 /* The comment */
1183 trace_seq_puts(s, "/* ");
1184
1185 switch (iter->ent->type) {
1186 case TRACE_BPRINT:
1187 ret = trace_print_bprintk_msg_only(iter);
1188 if (ret != TRACE_TYPE_HANDLED)
1189 return ret;
1190 break;
1191 case TRACE_PRINT:
1192 ret = trace_print_printk_msg_only(iter);
1193 if (ret != TRACE_TYPE_HANDLED)
1194 return ret;
1195 break;
1196 default:
1197 event = ftrace_find_event(ent->type);
1198 if (!event)
1199 return TRACE_TYPE_UNHANDLED;
1200
1201 ret = event->funcs->trace(iter, sym_flags, event);
1202 if (ret != TRACE_TYPE_HANDLED)
1203 return ret;
1204 }
1205
1206 if (trace_seq_has_overflowed(s))
1207 goto out;
1208
1209 /* Strip ending newline */
1210 if (s->buffer[s->seq.len - 1] == '\n') {
1211 s->buffer[s->seq.len - 1] = '\0';
1212 s->seq.len--;
1213 }
1214
1215 trace_seq_puts(s, " */\n");
1216 out:
1217 return trace_handle_return(s);
1218 }
1219
1220
1221 enum print_line_t
1222 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1223 {
1224 struct ftrace_graph_ent_entry *field;
1225 struct fgraph_data *data = iter->private;
1226 struct trace_entry *entry = iter->ent;
1227 struct trace_seq *s = &iter->seq;
1228 int cpu = iter->cpu;
1229 int ret;
1230
1231 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1232 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1233 return TRACE_TYPE_HANDLED;
1234 }
1235
1236 /*
1237 * If the last output failed, there's a possibility we need
1238 * to print out the missing entry which would never go out.
1239 */
1240 if (data && data->failed) {
1241 field = &data->ent;
1242 iter->cpu = data->cpu;
1243 ret = print_graph_entry(field, s, iter, flags);
1244 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1245 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1246 ret = TRACE_TYPE_NO_CONSUME;
1247 }
1248 iter->cpu = cpu;
1249 return ret;
1250 }
1251
1252 switch (entry->type) {
1253 case TRACE_GRAPH_ENT: {
1254 /*
1255 * print_graph_entry() may consume the current event,
1256 * thus @field may become invalid, so we need to save it.
1257 * sizeof(struct ftrace_graph_ent_entry) is very small,
1258 * it can be safely saved at the stack.
1259 */
1260 struct ftrace_graph_ent_entry saved;
1261 trace_assign_type(field, entry);
1262 saved = *field;
1263 return print_graph_entry(&saved, s, iter, flags);
1264 }
1265 case TRACE_GRAPH_RET: {
1266 struct ftrace_graph_ret_entry *field;
1267 trace_assign_type(field, entry);
1268 return print_graph_return(&field->ret, s, entry, iter, flags);
1269 }
1270 case TRACE_STACK:
1271 case TRACE_FN:
1272 /* dont trace stack and functions as comments */
1273 return TRACE_TYPE_UNHANDLED;
1274
1275 default:
1276 return print_graph_comment(s, entry, iter, flags);
1277 }
1278
1279 return TRACE_TYPE_HANDLED;
1280 }
1281
1282 static enum print_line_t
1283 print_graph_function(struct trace_iterator *iter)
1284 {
1285 return print_graph_function_flags(iter, tracer_flags.val);
1286 }
1287
1288 static enum print_line_t
1289 print_graph_function_event(struct trace_iterator *iter, int flags,
1290 struct trace_event *event)
1291 {
1292 return print_graph_function(iter);
1293 }
1294
1295 static void print_lat_header(struct seq_file *s, u32 flags)
1296 {
1297 static const char spaces[] = " " /* 16 spaces */
1298 " " /* 4 spaces */
1299 " "; /* 17 spaces */
1300 int size = 0;
1301
1302 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1303 size += 16;
1304 if (flags & TRACE_GRAPH_PRINT_CPU)
1305 size += 4;
1306 if (flags & TRACE_GRAPH_PRINT_PROC)
1307 size += 17;
1308
1309 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1310 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1311 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1312 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1313 seq_printf(s, "#%.*s||| / \n", size, spaces);
1314 }
1315
1316 static void __print_graph_headers_flags(struct trace_array *tr,
1317 struct seq_file *s, u32 flags)
1318 {
1319 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1320
1321 if (lat)
1322 print_lat_header(s, flags);
1323
1324 /* 1st line */
1325 seq_putc(s, '#');
1326 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1327 seq_puts(s, " TIME ");
1328 if (flags & TRACE_GRAPH_PRINT_CPU)
1329 seq_puts(s, " CPU");
1330 if (flags & TRACE_GRAPH_PRINT_PROC)
1331 seq_puts(s, " TASK/PID ");
1332 if (lat)
1333 seq_puts(s, "||||");
1334 if (flags & TRACE_GRAPH_PRINT_DURATION)
1335 seq_puts(s, " DURATION ");
1336 seq_puts(s, " FUNCTION CALLS\n");
1337
1338 /* 2nd line */
1339 seq_putc(s, '#');
1340 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1341 seq_puts(s, " | ");
1342 if (flags & TRACE_GRAPH_PRINT_CPU)
1343 seq_puts(s, " | ");
1344 if (flags & TRACE_GRAPH_PRINT_PROC)
1345 seq_puts(s, " | | ");
1346 if (lat)
1347 seq_puts(s, "||||");
1348 if (flags & TRACE_GRAPH_PRINT_DURATION)
1349 seq_puts(s, " | | ");
1350 seq_puts(s, " | | | |\n");
1351 }
1352
1353 static void print_graph_headers(struct seq_file *s)
1354 {
1355 print_graph_headers_flags(s, tracer_flags.val);
1356 }
1357
1358 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1359 {
1360 struct trace_iterator *iter = s->private;
1361 struct trace_array *tr = iter->tr;
1362
1363 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1364 return;
1365
1366 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1367 /* print nothing if the buffers are empty */
1368 if (trace_empty(iter))
1369 return;
1370
1371 print_trace_header(s, iter);
1372 }
1373
1374 __print_graph_headers_flags(tr, s, flags);
1375 }
1376
1377 void graph_trace_open(struct trace_iterator *iter)
1378 {
1379 /* pid and depth on the last trace processed */
1380 struct fgraph_data *data;
1381 gfp_t gfpflags;
1382 int cpu;
1383
1384 iter->private = NULL;
1385
1386 /* We can be called in atomic context via ftrace_dump() */
1387 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1388
1389 data = kzalloc(sizeof(*data), gfpflags);
1390 if (!data)
1391 goto out_err;
1392
1393 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1394 if (!data->cpu_data)
1395 goto out_err_free;
1396
1397 for_each_possible_cpu(cpu) {
1398 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1399 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1400 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1401 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1402
1403 *pid = -1;
1404 *depth = 0;
1405 *ignore = 0;
1406 *depth_irq = -1;
1407 }
1408
1409 iter->private = data;
1410
1411 return;
1412
1413 out_err_free:
1414 kfree(data);
1415 out_err:
1416 pr_warn("function graph tracer: not enough memory\n");
1417 }
1418
1419 void graph_trace_close(struct trace_iterator *iter)
1420 {
1421 struct fgraph_data *data = iter->private;
1422
1423 if (data) {
1424 free_percpu(data->cpu_data);
1425 kfree(data);
1426 }
1427 }
1428
1429 static int
1430 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1431 {
1432 if (bit == TRACE_GRAPH_PRINT_IRQS)
1433 ftrace_graph_skip_irqs = !set;
1434
1435 if (bit == TRACE_GRAPH_SLEEP_TIME)
1436 ftrace_graph_sleep_time_control(set);
1437
1438 if (bit == TRACE_GRAPH_GRAPH_TIME)
1439 ftrace_graph_graph_time_control(set);
1440
1441 return 0;
1442 }
1443
1444 static struct trace_event_functions graph_functions = {
1445 .trace = print_graph_function_event,
1446 };
1447
1448 static struct trace_event graph_trace_entry_event = {
1449 .type = TRACE_GRAPH_ENT,
1450 .funcs = &graph_functions,
1451 };
1452
1453 static struct trace_event graph_trace_ret_event = {
1454 .type = TRACE_GRAPH_RET,
1455 .funcs = &graph_functions
1456 };
1457
1458 static struct tracer graph_trace __tracer_data = {
1459 .name = "function_graph",
1460 .update_thresh = graph_trace_update_thresh,
1461 .open = graph_trace_open,
1462 .pipe_open = graph_trace_open,
1463 .close = graph_trace_close,
1464 .pipe_close = graph_trace_close,
1465 .init = graph_trace_init,
1466 .reset = graph_trace_reset,
1467 .print_line = print_graph_function,
1468 .print_header = print_graph_headers,
1469 .flags = &tracer_flags,
1470 .set_flag = func_graph_set_flag,
1471 #ifdef CONFIG_FTRACE_SELFTEST
1472 .selftest = trace_selftest_startup_function_graph,
1473 #endif
1474 };
1475
1476
1477 static ssize_t
1478 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1479 loff_t *ppos)
1480 {
1481 unsigned long val;
1482 int ret;
1483
1484 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1485 if (ret)
1486 return ret;
1487
1488 max_depth = val;
1489
1490 *ppos += cnt;
1491
1492 return cnt;
1493 }
1494
1495 static ssize_t
1496 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1497 loff_t *ppos)
1498 {
1499 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1500 int n;
1501
1502 n = sprintf(buf, "%d\n", max_depth);
1503
1504 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1505 }
1506
1507 static const struct file_operations graph_depth_fops = {
1508 .open = tracing_open_generic,
1509 .write = graph_depth_write,
1510 .read = graph_depth_read,
1511 .llseek = generic_file_llseek,
1512 };
1513
1514 static __init int init_graph_tracefs(void)
1515 {
1516 struct dentry *d_tracer;
1517
1518 d_tracer = tracing_init_dentry();
1519 if (IS_ERR(d_tracer))
1520 return 0;
1521
1522 trace_create_file("max_graph_depth", 0644, d_tracer,
1523 NULL, &graph_depth_fops);
1524
1525 return 0;
1526 }
1527 fs_initcall(init_graph_tracefs);
1528
1529 static __init int init_graph_trace(void)
1530 {
1531 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1532
1533 if (!register_trace_event(&graph_trace_entry_event)) {
1534 pr_warn("Warning: could not register graph trace events\n");
1535 return 1;
1536 }
1537
1538 if (!register_trace_event(&graph_trace_ret_event)) {
1539 pr_warn("Warning: could not register graph trace events\n");
1540 return 1;
1541 }
1542
1543 return register_tracer(&graph_trace);
1544 }
1545
1546 core_initcall(init_graph_trace);
This page took 0.065881 seconds and 5 git commands to generate.