Merge remote-tracking branch 'ftrace/for-next'
[deliverable/linux.git] / kernel / trace / trace_functions_graph.c
1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/interrupt.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 static bool kill_ftrace_graph;
19
20 /**
21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 *
23 * ftrace_graph_stop() is called when a severe error is detected in
24 * the function graph tracing. This function is called by the critical
25 * paths of function graph to keep those paths from doing any more harm.
26 */
27 bool ftrace_graph_is_dead(void)
28 {
29 return kill_ftrace_graph;
30 }
31
32 /**
33 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 *
35 * In case of an error int function graph tracing, this is called
36 * to try to keep function graph tracing from causing any more harm.
37 * Usually this is pretty severe and this is called to try to at least
38 * get a warning out to the user.
39 */
40 void ftrace_graph_stop(void)
41 {
42 kill_ftrace_graph = true;
43 }
44
45 /* When set, irq functions will be ignored */
46 static int ftrace_graph_skip_irqs;
47
48 struct fgraph_cpu_data {
49 pid_t last_pid;
50 int depth;
51 int depth_irq;
52 int ignore;
53 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
54 };
55
56 struct fgraph_data {
57 struct fgraph_cpu_data __percpu *cpu_data;
58
59 /* Place to preserve last processed entry. */
60 struct ftrace_graph_ent_entry ent;
61 struct ftrace_graph_ret_entry ret;
62 int failed;
63 int cpu;
64 };
65
66 #define TRACE_GRAPH_INDENT 2
67
68 static unsigned int max_depth;
69
70 static struct tracer_opt trace_opts[] = {
71 /* Display overruns? (for self-debug purpose) */
72 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
73 /* Display CPU ? */
74 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
75 /* Display Overhead ? */
76 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 /* Display proc name/pid */
78 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 /* Display duration of execution */
80 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
81 /* Display absolute time of an entry */
82 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 /* Display interrupts */
84 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 /* Display function name after trailing } */
86 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 /* Include sleep time (scheduled out) between entry and return */
88 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
89 /* Include time within nested functions */
90 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
91 { } /* Empty entry */
92 };
93
94 static struct tracer_flags tracer_flags = {
95 /* Don't display overruns, proc, or tail by default */
96 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
97 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
98 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
99 .opts = trace_opts
100 };
101
102 static struct trace_array *graph_array;
103
104 /*
105 * DURATION column is being also used to display IRQ signs,
106 * following values are used by print_graph_irq and others
107 * to fill in space into DURATION column.
108 */
109 enum {
110 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113 };
114
115 static void
116 print_graph_duration(struct trace_array *tr, unsigned long long duration,
117 struct trace_seq *s, u32 flags);
118
119 /* Add a function return address to the trace stack on thread info.*/
120 int
121 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
122 unsigned long frame_pointer, unsigned long *retp)
123 {
124 unsigned long long calltime;
125 int index;
126
127 if (unlikely(ftrace_graph_is_dead()))
128 return -EBUSY;
129
130 if (!current->ret_stack)
131 return -EBUSY;
132
133 /*
134 * We must make sure the ret_stack is tested before we read
135 * anything else.
136 */
137 smp_rmb();
138
139 /* The return trace stack is full */
140 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
141 atomic_inc(&current->trace_overrun);
142 return -EBUSY;
143 }
144
145 /*
146 * The curr_ret_stack is an index to ftrace return stack of
147 * current task. Its value should be in [0, FTRACE_RETFUNC_
148 * DEPTH) when the function graph tracer is used. To support
149 * filtering out specific functions, it makes the index
150 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
151 * so when it sees a negative index the ftrace will ignore
152 * the record. And the index gets recovered when returning
153 * from the filtered function by adding the FTRACE_NOTRACE_
154 * DEPTH and then it'll continue to record functions normally.
155 *
156 * The curr_ret_stack is initialized to -1 and get increased
157 * in this function. So it can be less than -1 only if it was
158 * filtered out via ftrace_graph_notrace_addr() which can be
159 * set from set_graph_notrace file in tracefs by user.
160 */
161 if (current->curr_ret_stack < -1)
162 return -EBUSY;
163
164 calltime = trace_clock_local();
165
166 index = ++current->curr_ret_stack;
167 if (ftrace_graph_notrace_addr(func))
168 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
169 barrier();
170 current->ret_stack[index].ret = ret;
171 current->ret_stack[index].func = func;
172 current->ret_stack[index].calltime = calltime;
173 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
174 current->ret_stack[index].fp = frame_pointer;
175 #endif
176 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
177 current->ret_stack[index].retp = retp;
178 #endif
179 *depth = current->curr_ret_stack;
180
181 return 0;
182 }
183
184 /* Retrieve a function return address to the trace stack on thread info.*/
185 static void
186 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
187 unsigned long frame_pointer)
188 {
189 int index;
190
191 index = current->curr_ret_stack;
192
193 /*
194 * A negative index here means that it's just returned from a
195 * notrace'd function. Recover index to get an original
196 * return address. See ftrace_push_return_trace().
197 *
198 * TODO: Need to check whether the stack gets corrupted.
199 */
200 if (index < 0)
201 index += FTRACE_NOTRACE_DEPTH;
202
203 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
204 ftrace_graph_stop();
205 WARN_ON(1);
206 /* Might as well panic, otherwise we have no where to go */
207 *ret = (unsigned long)panic;
208 return;
209 }
210
211 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
212 /*
213 * The arch may choose to record the frame pointer used
214 * and check it here to make sure that it is what we expect it
215 * to be. If gcc does not set the place holder of the return
216 * address in the frame pointer, and does a copy instead, then
217 * the function graph trace will fail. This test detects this
218 * case.
219 *
220 * Currently, x86_32 with optimize for size (-Os) makes the latest
221 * gcc do the above.
222 *
223 * Note, -mfentry does not use frame pointers, and this test
224 * is not needed if CC_USING_FENTRY is set.
225 */
226 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
227 ftrace_graph_stop();
228 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
229 " from func %ps return to %lx\n",
230 current->ret_stack[index].fp,
231 frame_pointer,
232 (void *)current->ret_stack[index].func,
233 current->ret_stack[index].ret);
234 *ret = (unsigned long)panic;
235 return;
236 }
237 #endif
238
239 *ret = current->ret_stack[index].ret;
240 trace->func = current->ret_stack[index].func;
241 trace->calltime = current->ret_stack[index].calltime;
242 trace->overrun = atomic_read(&current->trace_overrun);
243 trace->depth = index;
244 }
245
246 /*
247 * Send the trace to the ring-buffer.
248 * @return the original return address.
249 */
250 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
251 {
252 struct ftrace_graph_ret trace;
253 unsigned long ret;
254
255 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
256 trace.rettime = trace_clock_local();
257 barrier();
258 current->curr_ret_stack--;
259 /*
260 * The curr_ret_stack can be less than -1 only if it was
261 * filtered out and it's about to return from the function.
262 * Recover the index and continue to trace normal functions.
263 */
264 if (current->curr_ret_stack < -1) {
265 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
266 return ret;
267 }
268
269 /*
270 * The trace should run after decrementing the ret counter
271 * in case an interrupt were to come in. We don't want to
272 * lose the interrupt if max_depth is set.
273 */
274 ftrace_graph_return(&trace);
275
276 if (unlikely(!ret)) {
277 ftrace_graph_stop();
278 WARN_ON(1);
279 /* Might as well panic. What else to do? */
280 ret = (unsigned long)panic;
281 }
282
283 return ret;
284 }
285
286 /**
287 * ftrace_graph_ret_addr - convert a potentially modified stack return address
288 * to its original value
289 *
290 * This function can be called by stack unwinding code to convert a found stack
291 * return address ('ret') to its original value, in case the function graph
292 * tracer has modified it to be 'return_to_handler'. If the address hasn't
293 * been modified, the unchanged value of 'ret' is returned.
294 *
295 * 'idx' is a state variable which should be initialized by the caller to zero
296 * before the first call.
297 *
298 * 'retp' is a pointer to the return address on the stack. It's ignored if
299 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
300 */
301 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
302 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
303 unsigned long ret, unsigned long *retp)
304 {
305 int index = task->curr_ret_stack;
306 int i;
307
308 if (ret != (unsigned long)return_to_handler)
309 return ret;
310
311 if (index < -1)
312 index += FTRACE_NOTRACE_DEPTH;
313
314 if (index < 0)
315 return ret;
316
317 for (i = 0; i <= index; i++)
318 if (task->ret_stack[i].retp == retp)
319 return task->ret_stack[i].ret;
320
321 return ret;
322 }
323 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
324 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
325 unsigned long ret, unsigned long *retp)
326 {
327 int task_idx;
328
329 if (ret != (unsigned long)return_to_handler)
330 return ret;
331
332 task_idx = task->curr_ret_stack;
333
334 if (!task->ret_stack || task_idx < *idx)
335 return ret;
336
337 task_idx -= *idx;
338 (*idx)++;
339
340 return task->ret_stack[task_idx].ret;
341 }
342 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
343
344 int __trace_graph_entry(struct trace_array *tr,
345 struct ftrace_graph_ent *trace,
346 unsigned long flags,
347 int pc)
348 {
349 struct trace_event_call *call = &event_funcgraph_entry;
350 struct ring_buffer_event *event;
351 struct ring_buffer *buffer = tr->trace_buffer.buffer;
352 struct ftrace_graph_ent_entry *entry;
353
354 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
355 sizeof(*entry), flags, pc);
356 if (!event)
357 return 0;
358 entry = ring_buffer_event_data(event);
359 entry->graph_ent = *trace;
360 if (!call_filter_check_discard(call, entry, buffer, event))
361 __buffer_unlock_commit(buffer, event);
362
363 return 1;
364 }
365
366 static inline int ftrace_graph_ignore_irqs(void)
367 {
368 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
369 return 0;
370
371 return in_irq();
372 }
373
374 int trace_graph_entry(struct ftrace_graph_ent *trace)
375 {
376 struct trace_array *tr = graph_array;
377 struct trace_array_cpu *data;
378 unsigned long flags;
379 long disabled;
380 int ret;
381 int cpu;
382 int pc;
383
384 if (!ftrace_trace_task(tr))
385 return 0;
386
387 /* trace it when it is-nested-in or is a function enabled. */
388 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
389 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
390 (max_depth && trace->depth >= max_depth))
391 return 0;
392
393 /*
394 * Do not trace a function if it's filtered by set_graph_notrace.
395 * Make the index of ret stack negative to indicate that it should
396 * ignore further functions. But it needs its own ret stack entry
397 * to recover the original index in order to continue tracing after
398 * returning from the function.
399 */
400 if (ftrace_graph_notrace_addr(trace->func))
401 return 1;
402
403 /*
404 * Stop here if tracing_threshold is set. We only write function return
405 * events to the ring buffer.
406 */
407 if (tracing_thresh)
408 return 1;
409
410 local_irq_save(flags);
411 cpu = raw_smp_processor_id();
412 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
413 disabled = atomic_inc_return(&data->disabled);
414 if (likely(disabled == 1)) {
415 pc = preempt_count();
416 ret = __trace_graph_entry(tr, trace, flags, pc);
417 } else {
418 ret = 0;
419 }
420
421 atomic_dec(&data->disabled);
422 local_irq_restore(flags);
423
424 return ret;
425 }
426
427 static void
428 __trace_graph_function(struct trace_array *tr,
429 unsigned long ip, unsigned long flags, int pc)
430 {
431 u64 time = trace_clock_local();
432 struct ftrace_graph_ent ent = {
433 .func = ip,
434 .depth = 0,
435 };
436 struct ftrace_graph_ret ret = {
437 .func = ip,
438 .depth = 0,
439 .calltime = time,
440 .rettime = time,
441 };
442
443 __trace_graph_entry(tr, &ent, flags, pc);
444 __trace_graph_return(tr, &ret, flags, pc);
445 }
446
447 void
448 trace_graph_function(struct trace_array *tr,
449 unsigned long ip, unsigned long parent_ip,
450 unsigned long flags, int pc)
451 {
452 __trace_graph_function(tr, ip, flags, pc);
453 }
454
455 void __trace_graph_return(struct trace_array *tr,
456 struct ftrace_graph_ret *trace,
457 unsigned long flags,
458 int pc)
459 {
460 struct trace_event_call *call = &event_funcgraph_exit;
461 struct ring_buffer_event *event;
462 struct ring_buffer *buffer = tr->trace_buffer.buffer;
463 struct ftrace_graph_ret_entry *entry;
464
465 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
466 sizeof(*entry), flags, pc);
467 if (!event)
468 return;
469 entry = ring_buffer_event_data(event);
470 entry->ret = *trace;
471 if (!call_filter_check_discard(call, entry, buffer, event))
472 __buffer_unlock_commit(buffer, event);
473 }
474
475 void trace_graph_return(struct ftrace_graph_ret *trace)
476 {
477 struct trace_array *tr = graph_array;
478 struct trace_array_cpu *data;
479 unsigned long flags;
480 long disabled;
481 int cpu;
482 int pc;
483
484 local_irq_save(flags);
485 cpu = raw_smp_processor_id();
486 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
487 disabled = atomic_inc_return(&data->disabled);
488 if (likely(disabled == 1)) {
489 pc = preempt_count();
490 __trace_graph_return(tr, trace, flags, pc);
491 }
492 atomic_dec(&data->disabled);
493 local_irq_restore(flags);
494 }
495
496 void set_graph_array(struct trace_array *tr)
497 {
498 graph_array = tr;
499
500 /* Make graph_array visible before we start tracing */
501
502 smp_mb();
503 }
504
505 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
506 {
507 if (tracing_thresh &&
508 (trace->rettime - trace->calltime < tracing_thresh))
509 return;
510 else
511 trace_graph_return(trace);
512 }
513
514 static int graph_trace_init(struct trace_array *tr)
515 {
516 int ret;
517
518 set_graph_array(tr);
519 if (tracing_thresh)
520 ret = register_ftrace_graph(&trace_graph_thresh_return,
521 &trace_graph_entry);
522 else
523 ret = register_ftrace_graph(&trace_graph_return,
524 &trace_graph_entry);
525 if (ret)
526 return ret;
527 tracing_start_cmdline_record();
528
529 return 0;
530 }
531
532 static void graph_trace_reset(struct trace_array *tr)
533 {
534 tracing_stop_cmdline_record();
535 unregister_ftrace_graph();
536 }
537
538 static int graph_trace_update_thresh(struct trace_array *tr)
539 {
540 graph_trace_reset(tr);
541 return graph_trace_init(tr);
542 }
543
544 static int max_bytes_for_cpu;
545
546 static void print_graph_cpu(struct trace_seq *s, int cpu)
547 {
548 /*
549 * Start with a space character - to make it stand out
550 * to the right a bit when trace output is pasted into
551 * email:
552 */
553 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
554 }
555
556 #define TRACE_GRAPH_PROCINFO_LENGTH 14
557
558 static void print_graph_proc(struct trace_seq *s, pid_t pid)
559 {
560 char comm[TASK_COMM_LEN];
561 /* sign + log10(MAX_INT) + '\0' */
562 char pid_str[11];
563 int spaces = 0;
564 int len;
565 int i;
566
567 trace_find_cmdline(pid, comm);
568 comm[7] = '\0';
569 sprintf(pid_str, "%d", pid);
570
571 /* 1 stands for the "-" character */
572 len = strlen(comm) + strlen(pid_str) + 1;
573
574 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
575 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
576
577 /* First spaces to align center */
578 for (i = 0; i < spaces / 2; i++)
579 trace_seq_putc(s, ' ');
580
581 trace_seq_printf(s, "%s-%s", comm, pid_str);
582
583 /* Last spaces to align center */
584 for (i = 0; i < spaces - (spaces / 2); i++)
585 trace_seq_putc(s, ' ');
586 }
587
588
589 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
590 {
591 trace_seq_putc(s, ' ');
592 trace_print_lat_fmt(s, entry);
593 }
594
595 /* If the pid changed since the last trace, output this event */
596 static void
597 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
598 {
599 pid_t prev_pid;
600 pid_t *last_pid;
601
602 if (!data)
603 return;
604
605 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
606
607 if (*last_pid == pid)
608 return;
609
610 prev_pid = *last_pid;
611 *last_pid = pid;
612
613 if (prev_pid == -1)
614 return;
615 /*
616 * Context-switch trace line:
617
618 ------------------------------------------
619 | 1) migration/0--1 => sshd-1755
620 ------------------------------------------
621
622 */
623 trace_seq_puts(s, " ------------------------------------------\n");
624 print_graph_cpu(s, cpu);
625 print_graph_proc(s, prev_pid);
626 trace_seq_puts(s, " => ");
627 print_graph_proc(s, pid);
628 trace_seq_puts(s, "\n ------------------------------------------\n\n");
629 }
630
631 static struct ftrace_graph_ret_entry *
632 get_return_for_leaf(struct trace_iterator *iter,
633 struct ftrace_graph_ent_entry *curr)
634 {
635 struct fgraph_data *data = iter->private;
636 struct ring_buffer_iter *ring_iter = NULL;
637 struct ring_buffer_event *event;
638 struct ftrace_graph_ret_entry *next;
639
640 /*
641 * If the previous output failed to write to the seq buffer,
642 * then we just reuse the data from before.
643 */
644 if (data && data->failed) {
645 curr = &data->ent;
646 next = &data->ret;
647 } else {
648
649 ring_iter = trace_buffer_iter(iter, iter->cpu);
650
651 /* First peek to compare current entry and the next one */
652 if (ring_iter)
653 event = ring_buffer_iter_peek(ring_iter, NULL);
654 else {
655 /*
656 * We need to consume the current entry to see
657 * the next one.
658 */
659 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
660 NULL, NULL);
661 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
662 NULL, NULL);
663 }
664
665 if (!event)
666 return NULL;
667
668 next = ring_buffer_event_data(event);
669
670 if (data) {
671 /*
672 * Save current and next entries for later reference
673 * if the output fails.
674 */
675 data->ent = *curr;
676 /*
677 * If the next event is not a return type, then
678 * we only care about what type it is. Otherwise we can
679 * safely copy the entire event.
680 */
681 if (next->ent.type == TRACE_GRAPH_RET)
682 data->ret = *next;
683 else
684 data->ret.ent.type = next->ent.type;
685 }
686 }
687
688 if (next->ent.type != TRACE_GRAPH_RET)
689 return NULL;
690
691 if (curr->ent.pid != next->ent.pid ||
692 curr->graph_ent.func != next->ret.func)
693 return NULL;
694
695 /* this is a leaf, now advance the iterator */
696 if (ring_iter)
697 ring_buffer_read(ring_iter, NULL);
698
699 return next;
700 }
701
702 static void print_graph_abs_time(u64 t, struct trace_seq *s)
703 {
704 unsigned long usecs_rem;
705
706 usecs_rem = do_div(t, NSEC_PER_SEC);
707 usecs_rem /= 1000;
708
709 trace_seq_printf(s, "%5lu.%06lu | ",
710 (unsigned long)t, usecs_rem);
711 }
712
713 static void
714 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
715 enum trace_type type, int cpu, pid_t pid, u32 flags)
716 {
717 struct trace_array *tr = iter->tr;
718 struct trace_seq *s = &iter->seq;
719 struct trace_entry *ent = iter->ent;
720
721 if (addr < (unsigned long)__irqentry_text_start ||
722 addr >= (unsigned long)__irqentry_text_end)
723 return;
724
725 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
726 /* Absolute time */
727 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
728 print_graph_abs_time(iter->ts, s);
729
730 /* Cpu */
731 if (flags & TRACE_GRAPH_PRINT_CPU)
732 print_graph_cpu(s, cpu);
733
734 /* Proc */
735 if (flags & TRACE_GRAPH_PRINT_PROC) {
736 print_graph_proc(s, pid);
737 trace_seq_puts(s, " | ");
738 }
739
740 /* Latency format */
741 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
742 print_graph_lat_fmt(s, ent);
743 }
744
745 /* No overhead */
746 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
747
748 if (type == TRACE_GRAPH_ENT)
749 trace_seq_puts(s, "==========>");
750 else
751 trace_seq_puts(s, "<==========");
752
753 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
754 trace_seq_putc(s, '\n');
755 }
756
757 void
758 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
759 {
760 unsigned long nsecs_rem = do_div(duration, 1000);
761 /* log10(ULONG_MAX) + '\0' */
762 char usecs_str[21];
763 char nsecs_str[5];
764 int len;
765 int i;
766
767 sprintf(usecs_str, "%lu", (unsigned long) duration);
768
769 /* Print msecs */
770 trace_seq_printf(s, "%s", usecs_str);
771
772 len = strlen(usecs_str);
773
774 /* Print nsecs (we don't want to exceed 7 numbers) */
775 if (len < 7) {
776 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
777
778 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
779 trace_seq_printf(s, ".%s", nsecs_str);
780 len += strlen(nsecs_str) + 1;
781 }
782
783 trace_seq_puts(s, " us ");
784
785 /* Print remaining spaces to fit the row's width */
786 for (i = len; i < 8; i++)
787 trace_seq_putc(s, ' ');
788 }
789
790 static void
791 print_graph_duration(struct trace_array *tr, unsigned long long duration,
792 struct trace_seq *s, u32 flags)
793 {
794 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
795 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
796 return;
797
798 /* No real adata, just filling the column with spaces */
799 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
800 case FLAGS_FILL_FULL:
801 trace_seq_puts(s, " | ");
802 return;
803 case FLAGS_FILL_START:
804 trace_seq_puts(s, " ");
805 return;
806 case FLAGS_FILL_END:
807 trace_seq_puts(s, " |");
808 return;
809 }
810
811 /* Signal a overhead of time execution to the output */
812 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
813 trace_seq_printf(s, "%c ", trace_find_mark(duration));
814 else
815 trace_seq_puts(s, " ");
816
817 trace_print_graph_duration(duration, s);
818 trace_seq_puts(s, "| ");
819 }
820
821 /* Case of a leaf function on its call entry */
822 static enum print_line_t
823 print_graph_entry_leaf(struct trace_iterator *iter,
824 struct ftrace_graph_ent_entry *entry,
825 struct ftrace_graph_ret_entry *ret_entry,
826 struct trace_seq *s, u32 flags)
827 {
828 struct fgraph_data *data = iter->private;
829 struct trace_array *tr = iter->tr;
830 struct ftrace_graph_ret *graph_ret;
831 struct ftrace_graph_ent *call;
832 unsigned long long duration;
833 int i;
834
835 graph_ret = &ret_entry->ret;
836 call = &entry->graph_ent;
837 duration = graph_ret->rettime - graph_ret->calltime;
838
839 if (data) {
840 struct fgraph_cpu_data *cpu_data;
841 int cpu = iter->cpu;
842
843 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
844
845 /*
846 * Comments display at + 1 to depth. Since
847 * this is a leaf function, keep the comments
848 * equal to this depth.
849 */
850 cpu_data->depth = call->depth - 1;
851
852 /* No need to keep this function around for this depth */
853 if (call->depth < FTRACE_RETFUNC_DEPTH)
854 cpu_data->enter_funcs[call->depth] = 0;
855 }
856
857 /* Overhead and duration */
858 print_graph_duration(tr, duration, s, flags);
859
860 /* Function */
861 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
862 trace_seq_putc(s, ' ');
863
864 trace_seq_printf(s, "%ps();\n", (void *)call->func);
865
866 return trace_handle_return(s);
867 }
868
869 static enum print_line_t
870 print_graph_entry_nested(struct trace_iterator *iter,
871 struct ftrace_graph_ent_entry *entry,
872 struct trace_seq *s, int cpu, u32 flags)
873 {
874 struct ftrace_graph_ent *call = &entry->graph_ent;
875 struct fgraph_data *data = iter->private;
876 struct trace_array *tr = iter->tr;
877 int i;
878
879 if (data) {
880 struct fgraph_cpu_data *cpu_data;
881 int cpu = iter->cpu;
882
883 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
884 cpu_data->depth = call->depth;
885
886 /* Save this function pointer to see if the exit matches */
887 if (call->depth < FTRACE_RETFUNC_DEPTH)
888 cpu_data->enter_funcs[call->depth] = call->func;
889 }
890
891 /* No time */
892 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
893
894 /* Function */
895 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
896 trace_seq_putc(s, ' ');
897
898 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
899
900 if (trace_seq_has_overflowed(s))
901 return TRACE_TYPE_PARTIAL_LINE;
902
903 /*
904 * we already consumed the current entry to check the next one
905 * and see if this is a leaf.
906 */
907 return TRACE_TYPE_NO_CONSUME;
908 }
909
910 static void
911 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
912 int type, unsigned long addr, u32 flags)
913 {
914 struct fgraph_data *data = iter->private;
915 struct trace_entry *ent = iter->ent;
916 struct trace_array *tr = iter->tr;
917 int cpu = iter->cpu;
918
919 /* Pid */
920 verif_pid(s, ent->pid, cpu, data);
921
922 if (type)
923 /* Interrupt */
924 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
925
926 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
927 return;
928
929 /* Absolute time */
930 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
931 print_graph_abs_time(iter->ts, s);
932
933 /* Cpu */
934 if (flags & TRACE_GRAPH_PRINT_CPU)
935 print_graph_cpu(s, cpu);
936
937 /* Proc */
938 if (flags & TRACE_GRAPH_PRINT_PROC) {
939 print_graph_proc(s, ent->pid);
940 trace_seq_puts(s, " | ");
941 }
942
943 /* Latency format */
944 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
945 print_graph_lat_fmt(s, ent);
946
947 return;
948 }
949
950 /*
951 * Entry check for irq code
952 *
953 * returns 1 if
954 * - we are inside irq code
955 * - we just entered irq code
956 *
957 * retunns 0 if
958 * - funcgraph-interrupts option is set
959 * - we are not inside irq code
960 */
961 static int
962 check_irq_entry(struct trace_iterator *iter, u32 flags,
963 unsigned long addr, int depth)
964 {
965 int cpu = iter->cpu;
966 int *depth_irq;
967 struct fgraph_data *data = iter->private;
968
969 /*
970 * If we are either displaying irqs, or we got called as
971 * a graph event and private data does not exist,
972 * then we bypass the irq check.
973 */
974 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
975 (!data))
976 return 0;
977
978 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
979
980 /*
981 * We are inside the irq code
982 */
983 if (*depth_irq >= 0)
984 return 1;
985
986 if ((addr < (unsigned long)__irqentry_text_start) ||
987 (addr >= (unsigned long)__irqentry_text_end))
988 return 0;
989
990 /*
991 * We are entering irq code.
992 */
993 *depth_irq = depth;
994 return 1;
995 }
996
997 /*
998 * Return check for irq code
999 *
1000 * returns 1 if
1001 * - we are inside irq code
1002 * - we just left irq code
1003 *
1004 * returns 0 if
1005 * - funcgraph-interrupts option is set
1006 * - we are not inside irq code
1007 */
1008 static int
1009 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1010 {
1011 int cpu = iter->cpu;
1012 int *depth_irq;
1013 struct fgraph_data *data = iter->private;
1014
1015 /*
1016 * If we are either displaying irqs, or we got called as
1017 * a graph event and private data does not exist,
1018 * then we bypass the irq check.
1019 */
1020 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1021 (!data))
1022 return 0;
1023
1024 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1025
1026 /*
1027 * We are not inside the irq code.
1028 */
1029 if (*depth_irq == -1)
1030 return 0;
1031
1032 /*
1033 * We are inside the irq code, and this is returning entry.
1034 * Let's not trace it and clear the entry depth, since
1035 * we are out of irq code.
1036 *
1037 * This condition ensures that we 'leave the irq code' once
1038 * we are out of the entry depth. Thus protecting us from
1039 * the RETURN entry loss.
1040 */
1041 if (*depth_irq >= depth) {
1042 *depth_irq = -1;
1043 return 1;
1044 }
1045
1046 /*
1047 * We are inside the irq code, and this is not the entry.
1048 */
1049 return 1;
1050 }
1051
1052 static enum print_line_t
1053 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1054 struct trace_iterator *iter, u32 flags)
1055 {
1056 struct fgraph_data *data = iter->private;
1057 struct ftrace_graph_ent *call = &field->graph_ent;
1058 struct ftrace_graph_ret_entry *leaf_ret;
1059 static enum print_line_t ret;
1060 int cpu = iter->cpu;
1061
1062 if (check_irq_entry(iter, flags, call->func, call->depth))
1063 return TRACE_TYPE_HANDLED;
1064
1065 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1066
1067 leaf_ret = get_return_for_leaf(iter, field);
1068 if (leaf_ret)
1069 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1070 else
1071 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1072
1073 if (data) {
1074 /*
1075 * If we failed to write our output, then we need to make
1076 * note of it. Because we already consumed our entry.
1077 */
1078 if (s->full) {
1079 data->failed = 1;
1080 data->cpu = cpu;
1081 } else
1082 data->failed = 0;
1083 }
1084
1085 return ret;
1086 }
1087
1088 static enum print_line_t
1089 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1090 struct trace_entry *ent, struct trace_iterator *iter,
1091 u32 flags)
1092 {
1093 unsigned long long duration = trace->rettime - trace->calltime;
1094 struct fgraph_data *data = iter->private;
1095 struct trace_array *tr = iter->tr;
1096 pid_t pid = ent->pid;
1097 int cpu = iter->cpu;
1098 int func_match = 1;
1099 int i;
1100
1101 if (check_irq_return(iter, flags, trace->depth))
1102 return TRACE_TYPE_HANDLED;
1103
1104 if (data) {
1105 struct fgraph_cpu_data *cpu_data;
1106 int cpu = iter->cpu;
1107
1108 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1109
1110 /*
1111 * Comments display at + 1 to depth. This is the
1112 * return from a function, we now want the comments
1113 * to display at the same level of the bracket.
1114 */
1115 cpu_data->depth = trace->depth - 1;
1116
1117 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1118 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1119 func_match = 0;
1120 cpu_data->enter_funcs[trace->depth] = 0;
1121 }
1122 }
1123
1124 print_graph_prologue(iter, s, 0, 0, flags);
1125
1126 /* Overhead and duration */
1127 print_graph_duration(tr, duration, s, flags);
1128
1129 /* Closing brace */
1130 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1131 trace_seq_putc(s, ' ');
1132
1133 /*
1134 * If the return function does not have a matching entry,
1135 * then the entry was lost. Instead of just printing
1136 * the '}' and letting the user guess what function this
1137 * belongs to, write out the function name. Always do
1138 * that if the funcgraph-tail option is enabled.
1139 */
1140 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1141 trace_seq_puts(s, "}\n");
1142 else
1143 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1144
1145 /* Overrun */
1146 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1147 trace_seq_printf(s, " (Overruns: %lu)\n",
1148 trace->overrun);
1149
1150 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1151 cpu, pid, flags);
1152
1153 return trace_handle_return(s);
1154 }
1155
1156 static enum print_line_t
1157 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1158 struct trace_iterator *iter, u32 flags)
1159 {
1160 struct trace_array *tr = iter->tr;
1161 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1162 struct fgraph_data *data = iter->private;
1163 struct trace_event *event;
1164 int depth = 0;
1165 int ret;
1166 int i;
1167
1168 if (data)
1169 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1170
1171 print_graph_prologue(iter, s, 0, 0, flags);
1172
1173 /* No time */
1174 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1175
1176 /* Indentation */
1177 if (depth > 0)
1178 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1179 trace_seq_putc(s, ' ');
1180
1181 /* The comment */
1182 trace_seq_puts(s, "/* ");
1183
1184 switch (iter->ent->type) {
1185 case TRACE_BPUTS:
1186 ret = trace_print_bputs_msg_only(iter);
1187 if (ret != TRACE_TYPE_HANDLED)
1188 return ret;
1189 break;
1190 case TRACE_BPRINT:
1191 ret = trace_print_bprintk_msg_only(iter);
1192 if (ret != TRACE_TYPE_HANDLED)
1193 return ret;
1194 break;
1195 case TRACE_PRINT:
1196 ret = trace_print_printk_msg_only(iter);
1197 if (ret != TRACE_TYPE_HANDLED)
1198 return ret;
1199 break;
1200 default:
1201 event = ftrace_find_event(ent->type);
1202 if (!event)
1203 return TRACE_TYPE_UNHANDLED;
1204
1205 ret = event->funcs->trace(iter, sym_flags, event);
1206 if (ret != TRACE_TYPE_HANDLED)
1207 return ret;
1208 }
1209
1210 if (trace_seq_has_overflowed(s))
1211 goto out;
1212
1213 /* Strip ending newline */
1214 if (s->buffer[s->seq.len - 1] == '\n') {
1215 s->buffer[s->seq.len - 1] = '\0';
1216 s->seq.len--;
1217 }
1218
1219 trace_seq_puts(s, " */\n");
1220 out:
1221 return trace_handle_return(s);
1222 }
1223
1224
1225 enum print_line_t
1226 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1227 {
1228 struct ftrace_graph_ent_entry *field;
1229 struct fgraph_data *data = iter->private;
1230 struct trace_entry *entry = iter->ent;
1231 struct trace_seq *s = &iter->seq;
1232 int cpu = iter->cpu;
1233 int ret;
1234
1235 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1236 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1237 return TRACE_TYPE_HANDLED;
1238 }
1239
1240 /*
1241 * If the last output failed, there's a possibility we need
1242 * to print out the missing entry which would never go out.
1243 */
1244 if (data && data->failed) {
1245 field = &data->ent;
1246 iter->cpu = data->cpu;
1247 ret = print_graph_entry(field, s, iter, flags);
1248 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1249 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1250 ret = TRACE_TYPE_NO_CONSUME;
1251 }
1252 iter->cpu = cpu;
1253 return ret;
1254 }
1255
1256 switch (entry->type) {
1257 case TRACE_GRAPH_ENT: {
1258 /*
1259 * print_graph_entry() may consume the current event,
1260 * thus @field may become invalid, so we need to save it.
1261 * sizeof(struct ftrace_graph_ent_entry) is very small,
1262 * it can be safely saved at the stack.
1263 */
1264 struct ftrace_graph_ent_entry saved;
1265 trace_assign_type(field, entry);
1266 saved = *field;
1267 return print_graph_entry(&saved, s, iter, flags);
1268 }
1269 case TRACE_GRAPH_RET: {
1270 struct ftrace_graph_ret_entry *field;
1271 trace_assign_type(field, entry);
1272 return print_graph_return(&field->ret, s, entry, iter, flags);
1273 }
1274 case TRACE_STACK:
1275 case TRACE_FN:
1276 /* dont trace stack and functions as comments */
1277 return TRACE_TYPE_UNHANDLED;
1278
1279 default:
1280 return print_graph_comment(s, entry, iter, flags);
1281 }
1282
1283 return TRACE_TYPE_HANDLED;
1284 }
1285
1286 static enum print_line_t
1287 print_graph_function(struct trace_iterator *iter)
1288 {
1289 return print_graph_function_flags(iter, tracer_flags.val);
1290 }
1291
1292 static enum print_line_t
1293 print_graph_function_event(struct trace_iterator *iter, int flags,
1294 struct trace_event *event)
1295 {
1296 return print_graph_function(iter);
1297 }
1298
1299 static void print_lat_header(struct seq_file *s, u32 flags)
1300 {
1301 static const char spaces[] = " " /* 16 spaces */
1302 " " /* 4 spaces */
1303 " "; /* 17 spaces */
1304 int size = 0;
1305
1306 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1307 size += 16;
1308 if (flags & TRACE_GRAPH_PRINT_CPU)
1309 size += 4;
1310 if (flags & TRACE_GRAPH_PRINT_PROC)
1311 size += 17;
1312
1313 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1314 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1315 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1316 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1317 seq_printf(s, "#%.*s||| / \n", size, spaces);
1318 }
1319
1320 static void __print_graph_headers_flags(struct trace_array *tr,
1321 struct seq_file *s, u32 flags)
1322 {
1323 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1324
1325 if (lat)
1326 print_lat_header(s, flags);
1327
1328 /* 1st line */
1329 seq_putc(s, '#');
1330 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1331 seq_puts(s, " TIME ");
1332 if (flags & TRACE_GRAPH_PRINT_CPU)
1333 seq_puts(s, " CPU");
1334 if (flags & TRACE_GRAPH_PRINT_PROC)
1335 seq_puts(s, " TASK/PID ");
1336 if (lat)
1337 seq_puts(s, "||||");
1338 if (flags & TRACE_GRAPH_PRINT_DURATION)
1339 seq_puts(s, " DURATION ");
1340 seq_puts(s, " FUNCTION CALLS\n");
1341
1342 /* 2nd line */
1343 seq_putc(s, '#');
1344 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1345 seq_puts(s, " | ");
1346 if (flags & TRACE_GRAPH_PRINT_CPU)
1347 seq_puts(s, " | ");
1348 if (flags & TRACE_GRAPH_PRINT_PROC)
1349 seq_puts(s, " | | ");
1350 if (lat)
1351 seq_puts(s, "||||");
1352 if (flags & TRACE_GRAPH_PRINT_DURATION)
1353 seq_puts(s, " | | ");
1354 seq_puts(s, " | | | |\n");
1355 }
1356
1357 static void print_graph_headers(struct seq_file *s)
1358 {
1359 print_graph_headers_flags(s, tracer_flags.val);
1360 }
1361
1362 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1363 {
1364 struct trace_iterator *iter = s->private;
1365 struct trace_array *tr = iter->tr;
1366
1367 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1368 return;
1369
1370 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1371 /* print nothing if the buffers are empty */
1372 if (trace_empty(iter))
1373 return;
1374
1375 print_trace_header(s, iter);
1376 }
1377
1378 __print_graph_headers_flags(tr, s, flags);
1379 }
1380
1381 void graph_trace_open(struct trace_iterator *iter)
1382 {
1383 /* pid and depth on the last trace processed */
1384 struct fgraph_data *data;
1385 gfp_t gfpflags;
1386 int cpu;
1387
1388 iter->private = NULL;
1389
1390 /* We can be called in atomic context via ftrace_dump() */
1391 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1392
1393 data = kzalloc(sizeof(*data), gfpflags);
1394 if (!data)
1395 goto out_err;
1396
1397 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1398 if (!data->cpu_data)
1399 goto out_err_free;
1400
1401 for_each_possible_cpu(cpu) {
1402 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1403 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1404 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1405 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1406
1407 *pid = -1;
1408 *depth = 0;
1409 *ignore = 0;
1410 *depth_irq = -1;
1411 }
1412
1413 iter->private = data;
1414
1415 return;
1416
1417 out_err_free:
1418 kfree(data);
1419 out_err:
1420 pr_warn("function graph tracer: not enough memory\n");
1421 }
1422
1423 void graph_trace_close(struct trace_iterator *iter)
1424 {
1425 struct fgraph_data *data = iter->private;
1426
1427 if (data) {
1428 free_percpu(data->cpu_data);
1429 kfree(data);
1430 }
1431 }
1432
1433 static int
1434 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1435 {
1436 if (bit == TRACE_GRAPH_PRINT_IRQS)
1437 ftrace_graph_skip_irqs = !set;
1438
1439 if (bit == TRACE_GRAPH_SLEEP_TIME)
1440 ftrace_graph_sleep_time_control(set);
1441
1442 if (bit == TRACE_GRAPH_GRAPH_TIME)
1443 ftrace_graph_graph_time_control(set);
1444
1445 return 0;
1446 }
1447
1448 static struct trace_event_functions graph_functions = {
1449 .trace = print_graph_function_event,
1450 };
1451
1452 static struct trace_event graph_trace_entry_event = {
1453 .type = TRACE_GRAPH_ENT,
1454 .funcs = &graph_functions,
1455 };
1456
1457 static struct trace_event graph_trace_ret_event = {
1458 .type = TRACE_GRAPH_RET,
1459 .funcs = &graph_functions
1460 };
1461
1462 static struct tracer graph_trace __tracer_data = {
1463 .name = "function_graph",
1464 .update_thresh = graph_trace_update_thresh,
1465 .open = graph_trace_open,
1466 .pipe_open = graph_trace_open,
1467 .close = graph_trace_close,
1468 .pipe_close = graph_trace_close,
1469 .init = graph_trace_init,
1470 .reset = graph_trace_reset,
1471 .print_line = print_graph_function,
1472 .print_header = print_graph_headers,
1473 .flags = &tracer_flags,
1474 .set_flag = func_graph_set_flag,
1475 #ifdef CONFIG_FTRACE_SELFTEST
1476 .selftest = trace_selftest_startup_function_graph,
1477 #endif
1478 };
1479
1480
1481 static ssize_t
1482 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1483 loff_t *ppos)
1484 {
1485 unsigned long val;
1486 int ret;
1487
1488 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1489 if (ret)
1490 return ret;
1491
1492 max_depth = val;
1493
1494 *ppos += cnt;
1495
1496 return cnt;
1497 }
1498
1499 static ssize_t
1500 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1501 loff_t *ppos)
1502 {
1503 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1504 int n;
1505
1506 n = sprintf(buf, "%d\n", max_depth);
1507
1508 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1509 }
1510
1511 static const struct file_operations graph_depth_fops = {
1512 .open = tracing_open_generic,
1513 .write = graph_depth_write,
1514 .read = graph_depth_read,
1515 .llseek = generic_file_llseek,
1516 };
1517
1518 static __init int init_graph_tracefs(void)
1519 {
1520 struct dentry *d_tracer;
1521
1522 d_tracer = tracing_init_dentry();
1523 if (IS_ERR(d_tracer))
1524 return 0;
1525
1526 trace_create_file("max_graph_depth", 0644, d_tracer,
1527 NULL, &graph_depth_fops);
1528
1529 return 0;
1530 }
1531 fs_initcall(init_graph_tracefs);
1532
1533 static __init int init_graph_trace(void)
1534 {
1535 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1536
1537 if (!register_trace_event(&graph_trace_entry_event)) {
1538 pr_warn("Warning: could not register graph trace events\n");
1539 return 1;
1540 }
1541
1542 if (!register_trace_event(&graph_trace_ret_event)) {
1543 pr_warn("Warning: could not register graph trace events\n");
1544 return 1;
1545 }
1546
1547 return register_tracer(&graph_trace);
1548 }
1549
1550 core_initcall(init_graph_trace);
This page took 0.065123 seconds and 5 git commands to generate.