tracing: Have max_latency be defined for HWLAT_TRACER as well
[deliverable/linux.git] / kernel / trace / trace_functions_graph.c
1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/interrupt.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 static bool kill_ftrace_graph;
19
20 /**
21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 *
23 * ftrace_graph_stop() is called when a severe error is detected in
24 * the function graph tracing. This function is called by the critical
25 * paths of function graph to keep those paths from doing any more harm.
26 */
27 bool ftrace_graph_is_dead(void)
28 {
29 return kill_ftrace_graph;
30 }
31
32 /**
33 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 *
35 * In case of an error int function graph tracing, this is called
36 * to try to keep function graph tracing from causing any more harm.
37 * Usually this is pretty severe and this is called to try to at least
38 * get a warning out to the user.
39 */
40 void ftrace_graph_stop(void)
41 {
42 kill_ftrace_graph = true;
43 }
44
45 /* When set, irq functions will be ignored */
46 static int ftrace_graph_skip_irqs;
47
48 struct fgraph_cpu_data {
49 pid_t last_pid;
50 int depth;
51 int depth_irq;
52 int ignore;
53 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
54 };
55
56 struct fgraph_data {
57 struct fgraph_cpu_data __percpu *cpu_data;
58
59 /* Place to preserve last processed entry. */
60 struct ftrace_graph_ent_entry ent;
61 struct ftrace_graph_ret_entry ret;
62 int failed;
63 int cpu;
64 };
65
66 #define TRACE_GRAPH_INDENT 2
67
68 static unsigned int max_depth;
69
70 static struct tracer_opt trace_opts[] = {
71 /* Display overruns? (for self-debug purpose) */
72 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
73 /* Display CPU ? */
74 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
75 /* Display Overhead ? */
76 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 /* Display proc name/pid */
78 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 /* Display duration of execution */
80 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
81 /* Display absolute time of an entry */
82 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 /* Display interrupts */
84 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 /* Display function name after trailing } */
86 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 /* Include sleep time (scheduled out) between entry and return */
88 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
89 /* Include time within nested functions */
90 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
91 { } /* Empty entry */
92 };
93
94 static struct tracer_flags tracer_flags = {
95 /* Don't display overruns, proc, or tail by default */
96 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
97 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
98 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
99 .opts = trace_opts
100 };
101
102 static struct trace_array *graph_array;
103
104 /*
105 * DURATION column is being also used to display IRQ signs,
106 * following values are used by print_graph_irq and others
107 * to fill in space into DURATION column.
108 */
109 enum {
110 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113 };
114
115 static void
116 print_graph_duration(struct trace_array *tr, unsigned long long duration,
117 struct trace_seq *s, u32 flags);
118
119 /* Add a function return address to the trace stack on thread info.*/
120 int
121 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
122 unsigned long frame_pointer)
123 {
124 unsigned long long calltime;
125 int index;
126
127 if (unlikely(ftrace_graph_is_dead()))
128 return -EBUSY;
129
130 if (!current->ret_stack)
131 return -EBUSY;
132
133 /*
134 * We must make sure the ret_stack is tested before we read
135 * anything else.
136 */
137 smp_rmb();
138
139 /* The return trace stack is full */
140 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
141 atomic_inc(&current->trace_overrun);
142 return -EBUSY;
143 }
144
145 /*
146 * The curr_ret_stack is an index to ftrace return stack of
147 * current task. Its value should be in [0, FTRACE_RETFUNC_
148 * DEPTH) when the function graph tracer is used. To support
149 * filtering out specific functions, it makes the index
150 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
151 * so when it sees a negative index the ftrace will ignore
152 * the record. And the index gets recovered when returning
153 * from the filtered function by adding the FTRACE_NOTRACE_
154 * DEPTH and then it'll continue to record functions normally.
155 *
156 * The curr_ret_stack is initialized to -1 and get increased
157 * in this function. So it can be less than -1 only if it was
158 * filtered out via ftrace_graph_notrace_addr() which can be
159 * set from set_graph_notrace file in tracefs by user.
160 */
161 if (current->curr_ret_stack < -1)
162 return -EBUSY;
163
164 calltime = trace_clock_local();
165
166 index = ++current->curr_ret_stack;
167 if (ftrace_graph_notrace_addr(func))
168 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
169 barrier();
170 current->ret_stack[index].ret = ret;
171 current->ret_stack[index].func = func;
172 current->ret_stack[index].calltime = calltime;
173 current->ret_stack[index].fp = frame_pointer;
174 *depth = current->curr_ret_stack;
175
176 return 0;
177 }
178
179 /* Retrieve a function return address to the trace stack on thread info.*/
180 static void
181 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
182 unsigned long frame_pointer)
183 {
184 int index;
185
186 index = current->curr_ret_stack;
187
188 /*
189 * A negative index here means that it's just returned from a
190 * notrace'd function. Recover index to get an original
191 * return address. See ftrace_push_return_trace().
192 *
193 * TODO: Need to check whether the stack gets corrupted.
194 */
195 if (index < 0)
196 index += FTRACE_NOTRACE_DEPTH;
197
198 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
199 ftrace_graph_stop();
200 WARN_ON(1);
201 /* Might as well panic, otherwise we have no where to go */
202 *ret = (unsigned long)panic;
203 return;
204 }
205
206 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
207 /*
208 * The arch may choose to record the frame pointer used
209 * and check it here to make sure that it is what we expect it
210 * to be. If gcc does not set the place holder of the return
211 * address in the frame pointer, and does a copy instead, then
212 * the function graph trace will fail. This test detects this
213 * case.
214 *
215 * Currently, x86_32 with optimize for size (-Os) makes the latest
216 * gcc do the above.
217 *
218 * Note, -mfentry does not use frame pointers, and this test
219 * is not needed if CC_USING_FENTRY is set.
220 */
221 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
222 ftrace_graph_stop();
223 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
224 " from func %ps return to %lx\n",
225 current->ret_stack[index].fp,
226 frame_pointer,
227 (void *)current->ret_stack[index].func,
228 current->ret_stack[index].ret);
229 *ret = (unsigned long)panic;
230 return;
231 }
232 #endif
233
234 *ret = current->ret_stack[index].ret;
235 trace->func = current->ret_stack[index].func;
236 trace->calltime = current->ret_stack[index].calltime;
237 trace->overrun = atomic_read(&current->trace_overrun);
238 trace->depth = index;
239 }
240
241 /*
242 * Send the trace to the ring-buffer.
243 * @return the original return address.
244 */
245 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
246 {
247 struct ftrace_graph_ret trace;
248 unsigned long ret;
249
250 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
251 trace.rettime = trace_clock_local();
252 barrier();
253 current->curr_ret_stack--;
254 /*
255 * The curr_ret_stack can be less than -1 only if it was
256 * filtered out and it's about to return from the function.
257 * Recover the index and continue to trace normal functions.
258 */
259 if (current->curr_ret_stack < -1) {
260 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
261 return ret;
262 }
263
264 /*
265 * The trace should run after decrementing the ret counter
266 * in case an interrupt were to come in. We don't want to
267 * lose the interrupt if max_depth is set.
268 */
269 ftrace_graph_return(&trace);
270
271 if (unlikely(!ret)) {
272 ftrace_graph_stop();
273 WARN_ON(1);
274 /* Might as well panic. What else to do? */
275 ret = (unsigned long)panic;
276 }
277
278 return ret;
279 }
280
281 int __trace_graph_entry(struct trace_array *tr,
282 struct ftrace_graph_ent *trace,
283 unsigned long flags,
284 int pc)
285 {
286 struct trace_event_call *call = &event_funcgraph_entry;
287 struct ring_buffer_event *event;
288 struct ring_buffer *buffer = tr->trace_buffer.buffer;
289 struct ftrace_graph_ent_entry *entry;
290
291 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
292 sizeof(*entry), flags, pc);
293 if (!event)
294 return 0;
295 entry = ring_buffer_event_data(event);
296 entry->graph_ent = *trace;
297 if (!call_filter_check_discard(call, entry, buffer, event))
298 __buffer_unlock_commit(buffer, event);
299
300 return 1;
301 }
302
303 static inline int ftrace_graph_ignore_irqs(void)
304 {
305 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
306 return 0;
307
308 return in_irq();
309 }
310
311 int trace_graph_entry(struct ftrace_graph_ent *trace)
312 {
313 struct trace_array *tr = graph_array;
314 struct trace_array_cpu *data;
315 unsigned long flags;
316 long disabled;
317 int ret;
318 int cpu;
319 int pc;
320
321 if (!ftrace_trace_task(tr))
322 return 0;
323
324 /* trace it when it is-nested-in or is a function enabled. */
325 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
326 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
327 (max_depth && trace->depth >= max_depth))
328 return 0;
329
330 /*
331 * Do not trace a function if it's filtered by set_graph_notrace.
332 * Make the index of ret stack negative to indicate that it should
333 * ignore further functions. But it needs its own ret stack entry
334 * to recover the original index in order to continue tracing after
335 * returning from the function.
336 */
337 if (ftrace_graph_notrace_addr(trace->func))
338 return 1;
339
340 /*
341 * Stop here if tracing_threshold is set. We only write function return
342 * events to the ring buffer.
343 */
344 if (tracing_thresh)
345 return 1;
346
347 local_irq_save(flags);
348 cpu = raw_smp_processor_id();
349 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
350 disabled = atomic_inc_return(&data->disabled);
351 if (likely(disabled == 1)) {
352 pc = preempt_count();
353 ret = __trace_graph_entry(tr, trace, flags, pc);
354 } else {
355 ret = 0;
356 }
357
358 atomic_dec(&data->disabled);
359 local_irq_restore(flags);
360
361 return ret;
362 }
363
364 static void
365 __trace_graph_function(struct trace_array *tr,
366 unsigned long ip, unsigned long flags, int pc)
367 {
368 u64 time = trace_clock_local();
369 struct ftrace_graph_ent ent = {
370 .func = ip,
371 .depth = 0,
372 };
373 struct ftrace_graph_ret ret = {
374 .func = ip,
375 .depth = 0,
376 .calltime = time,
377 .rettime = time,
378 };
379
380 __trace_graph_entry(tr, &ent, flags, pc);
381 __trace_graph_return(tr, &ret, flags, pc);
382 }
383
384 void
385 trace_graph_function(struct trace_array *tr,
386 unsigned long ip, unsigned long parent_ip,
387 unsigned long flags, int pc)
388 {
389 __trace_graph_function(tr, ip, flags, pc);
390 }
391
392 void __trace_graph_return(struct trace_array *tr,
393 struct ftrace_graph_ret *trace,
394 unsigned long flags,
395 int pc)
396 {
397 struct trace_event_call *call = &event_funcgraph_exit;
398 struct ring_buffer_event *event;
399 struct ring_buffer *buffer = tr->trace_buffer.buffer;
400 struct ftrace_graph_ret_entry *entry;
401
402 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
403 sizeof(*entry), flags, pc);
404 if (!event)
405 return;
406 entry = ring_buffer_event_data(event);
407 entry->ret = *trace;
408 if (!call_filter_check_discard(call, entry, buffer, event))
409 __buffer_unlock_commit(buffer, event);
410 }
411
412 void trace_graph_return(struct ftrace_graph_ret *trace)
413 {
414 struct trace_array *tr = graph_array;
415 struct trace_array_cpu *data;
416 unsigned long flags;
417 long disabled;
418 int cpu;
419 int pc;
420
421 local_irq_save(flags);
422 cpu = raw_smp_processor_id();
423 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
424 disabled = atomic_inc_return(&data->disabled);
425 if (likely(disabled == 1)) {
426 pc = preempt_count();
427 __trace_graph_return(tr, trace, flags, pc);
428 }
429 atomic_dec(&data->disabled);
430 local_irq_restore(flags);
431 }
432
433 void set_graph_array(struct trace_array *tr)
434 {
435 graph_array = tr;
436
437 /* Make graph_array visible before we start tracing */
438
439 smp_mb();
440 }
441
442 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
443 {
444 if (tracing_thresh &&
445 (trace->rettime - trace->calltime < tracing_thresh))
446 return;
447 else
448 trace_graph_return(trace);
449 }
450
451 static int graph_trace_init(struct trace_array *tr)
452 {
453 int ret;
454
455 set_graph_array(tr);
456 if (tracing_thresh)
457 ret = register_ftrace_graph(&trace_graph_thresh_return,
458 &trace_graph_entry);
459 else
460 ret = register_ftrace_graph(&trace_graph_return,
461 &trace_graph_entry);
462 if (ret)
463 return ret;
464 tracing_start_cmdline_record();
465
466 return 0;
467 }
468
469 static void graph_trace_reset(struct trace_array *tr)
470 {
471 tracing_stop_cmdline_record();
472 unregister_ftrace_graph();
473 }
474
475 static int graph_trace_update_thresh(struct trace_array *tr)
476 {
477 graph_trace_reset(tr);
478 return graph_trace_init(tr);
479 }
480
481 static int max_bytes_for_cpu;
482
483 static void print_graph_cpu(struct trace_seq *s, int cpu)
484 {
485 /*
486 * Start with a space character - to make it stand out
487 * to the right a bit when trace output is pasted into
488 * email:
489 */
490 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
491 }
492
493 #define TRACE_GRAPH_PROCINFO_LENGTH 14
494
495 static void print_graph_proc(struct trace_seq *s, pid_t pid)
496 {
497 char comm[TASK_COMM_LEN];
498 /* sign + log10(MAX_INT) + '\0' */
499 char pid_str[11];
500 int spaces = 0;
501 int len;
502 int i;
503
504 trace_find_cmdline(pid, comm);
505 comm[7] = '\0';
506 sprintf(pid_str, "%d", pid);
507
508 /* 1 stands for the "-" character */
509 len = strlen(comm) + strlen(pid_str) + 1;
510
511 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
512 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
513
514 /* First spaces to align center */
515 for (i = 0; i < spaces / 2; i++)
516 trace_seq_putc(s, ' ');
517
518 trace_seq_printf(s, "%s-%s", comm, pid_str);
519
520 /* Last spaces to align center */
521 for (i = 0; i < spaces - (spaces / 2); i++)
522 trace_seq_putc(s, ' ');
523 }
524
525
526 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
527 {
528 trace_seq_putc(s, ' ');
529 trace_print_lat_fmt(s, entry);
530 }
531
532 /* If the pid changed since the last trace, output this event */
533 static void
534 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
535 {
536 pid_t prev_pid;
537 pid_t *last_pid;
538
539 if (!data)
540 return;
541
542 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
543
544 if (*last_pid == pid)
545 return;
546
547 prev_pid = *last_pid;
548 *last_pid = pid;
549
550 if (prev_pid == -1)
551 return;
552 /*
553 * Context-switch trace line:
554
555 ------------------------------------------
556 | 1) migration/0--1 => sshd-1755
557 ------------------------------------------
558
559 */
560 trace_seq_puts(s, " ------------------------------------------\n");
561 print_graph_cpu(s, cpu);
562 print_graph_proc(s, prev_pid);
563 trace_seq_puts(s, " => ");
564 print_graph_proc(s, pid);
565 trace_seq_puts(s, "\n ------------------------------------------\n\n");
566 }
567
568 static struct ftrace_graph_ret_entry *
569 get_return_for_leaf(struct trace_iterator *iter,
570 struct ftrace_graph_ent_entry *curr)
571 {
572 struct fgraph_data *data = iter->private;
573 struct ring_buffer_iter *ring_iter = NULL;
574 struct ring_buffer_event *event;
575 struct ftrace_graph_ret_entry *next;
576
577 /*
578 * If the previous output failed to write to the seq buffer,
579 * then we just reuse the data from before.
580 */
581 if (data && data->failed) {
582 curr = &data->ent;
583 next = &data->ret;
584 } else {
585
586 ring_iter = trace_buffer_iter(iter, iter->cpu);
587
588 /* First peek to compare current entry and the next one */
589 if (ring_iter)
590 event = ring_buffer_iter_peek(ring_iter, NULL);
591 else {
592 /*
593 * We need to consume the current entry to see
594 * the next one.
595 */
596 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
597 NULL, NULL);
598 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
599 NULL, NULL);
600 }
601
602 if (!event)
603 return NULL;
604
605 next = ring_buffer_event_data(event);
606
607 if (data) {
608 /*
609 * Save current and next entries for later reference
610 * if the output fails.
611 */
612 data->ent = *curr;
613 /*
614 * If the next event is not a return type, then
615 * we only care about what type it is. Otherwise we can
616 * safely copy the entire event.
617 */
618 if (next->ent.type == TRACE_GRAPH_RET)
619 data->ret = *next;
620 else
621 data->ret.ent.type = next->ent.type;
622 }
623 }
624
625 if (next->ent.type != TRACE_GRAPH_RET)
626 return NULL;
627
628 if (curr->ent.pid != next->ent.pid ||
629 curr->graph_ent.func != next->ret.func)
630 return NULL;
631
632 /* this is a leaf, now advance the iterator */
633 if (ring_iter)
634 ring_buffer_read(ring_iter, NULL);
635
636 return next;
637 }
638
639 static void print_graph_abs_time(u64 t, struct trace_seq *s)
640 {
641 unsigned long usecs_rem;
642
643 usecs_rem = do_div(t, NSEC_PER_SEC);
644 usecs_rem /= 1000;
645
646 trace_seq_printf(s, "%5lu.%06lu | ",
647 (unsigned long)t, usecs_rem);
648 }
649
650 static void
651 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
652 enum trace_type type, int cpu, pid_t pid, u32 flags)
653 {
654 struct trace_array *tr = iter->tr;
655 struct trace_seq *s = &iter->seq;
656 struct trace_entry *ent = iter->ent;
657
658 if (addr < (unsigned long)__irqentry_text_start ||
659 addr >= (unsigned long)__irqentry_text_end)
660 return;
661
662 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
663 /* Absolute time */
664 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
665 print_graph_abs_time(iter->ts, s);
666
667 /* Cpu */
668 if (flags & TRACE_GRAPH_PRINT_CPU)
669 print_graph_cpu(s, cpu);
670
671 /* Proc */
672 if (flags & TRACE_GRAPH_PRINT_PROC) {
673 print_graph_proc(s, pid);
674 trace_seq_puts(s, " | ");
675 }
676
677 /* Latency format */
678 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
679 print_graph_lat_fmt(s, ent);
680 }
681
682 /* No overhead */
683 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
684
685 if (type == TRACE_GRAPH_ENT)
686 trace_seq_puts(s, "==========>");
687 else
688 trace_seq_puts(s, "<==========");
689
690 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
691 trace_seq_putc(s, '\n');
692 }
693
694 void
695 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
696 {
697 unsigned long nsecs_rem = do_div(duration, 1000);
698 /* log10(ULONG_MAX) + '\0' */
699 char usecs_str[21];
700 char nsecs_str[5];
701 int len;
702 int i;
703
704 sprintf(usecs_str, "%lu", (unsigned long) duration);
705
706 /* Print msecs */
707 trace_seq_printf(s, "%s", usecs_str);
708
709 len = strlen(usecs_str);
710
711 /* Print nsecs (we don't want to exceed 7 numbers) */
712 if (len < 7) {
713 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
714
715 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
716 trace_seq_printf(s, ".%s", nsecs_str);
717 len += strlen(nsecs_str) + 1;
718 }
719
720 trace_seq_puts(s, " us ");
721
722 /* Print remaining spaces to fit the row's width */
723 for (i = len; i < 8; i++)
724 trace_seq_putc(s, ' ');
725 }
726
727 static void
728 print_graph_duration(struct trace_array *tr, unsigned long long duration,
729 struct trace_seq *s, u32 flags)
730 {
731 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
732 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
733 return;
734
735 /* No real adata, just filling the column with spaces */
736 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
737 case FLAGS_FILL_FULL:
738 trace_seq_puts(s, " | ");
739 return;
740 case FLAGS_FILL_START:
741 trace_seq_puts(s, " ");
742 return;
743 case FLAGS_FILL_END:
744 trace_seq_puts(s, " |");
745 return;
746 }
747
748 /* Signal a overhead of time execution to the output */
749 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
750 trace_seq_printf(s, "%c ", trace_find_mark(duration));
751 else
752 trace_seq_puts(s, " ");
753
754 trace_print_graph_duration(duration, s);
755 trace_seq_puts(s, "| ");
756 }
757
758 /* Case of a leaf function on its call entry */
759 static enum print_line_t
760 print_graph_entry_leaf(struct trace_iterator *iter,
761 struct ftrace_graph_ent_entry *entry,
762 struct ftrace_graph_ret_entry *ret_entry,
763 struct trace_seq *s, u32 flags)
764 {
765 struct fgraph_data *data = iter->private;
766 struct trace_array *tr = iter->tr;
767 struct ftrace_graph_ret *graph_ret;
768 struct ftrace_graph_ent *call;
769 unsigned long long duration;
770 int i;
771
772 graph_ret = &ret_entry->ret;
773 call = &entry->graph_ent;
774 duration = graph_ret->rettime - graph_ret->calltime;
775
776 if (data) {
777 struct fgraph_cpu_data *cpu_data;
778 int cpu = iter->cpu;
779
780 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
781
782 /*
783 * Comments display at + 1 to depth. Since
784 * this is a leaf function, keep the comments
785 * equal to this depth.
786 */
787 cpu_data->depth = call->depth - 1;
788
789 /* No need to keep this function around for this depth */
790 if (call->depth < FTRACE_RETFUNC_DEPTH)
791 cpu_data->enter_funcs[call->depth] = 0;
792 }
793
794 /* Overhead and duration */
795 print_graph_duration(tr, duration, s, flags);
796
797 /* Function */
798 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
799 trace_seq_putc(s, ' ');
800
801 trace_seq_printf(s, "%ps();\n", (void *)call->func);
802
803 return trace_handle_return(s);
804 }
805
806 static enum print_line_t
807 print_graph_entry_nested(struct trace_iterator *iter,
808 struct ftrace_graph_ent_entry *entry,
809 struct trace_seq *s, int cpu, u32 flags)
810 {
811 struct ftrace_graph_ent *call = &entry->graph_ent;
812 struct fgraph_data *data = iter->private;
813 struct trace_array *tr = iter->tr;
814 int i;
815
816 if (data) {
817 struct fgraph_cpu_data *cpu_data;
818 int cpu = iter->cpu;
819
820 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
821 cpu_data->depth = call->depth;
822
823 /* Save this function pointer to see if the exit matches */
824 if (call->depth < FTRACE_RETFUNC_DEPTH)
825 cpu_data->enter_funcs[call->depth] = call->func;
826 }
827
828 /* No time */
829 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
830
831 /* Function */
832 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
833 trace_seq_putc(s, ' ');
834
835 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
836
837 if (trace_seq_has_overflowed(s))
838 return TRACE_TYPE_PARTIAL_LINE;
839
840 /*
841 * we already consumed the current entry to check the next one
842 * and see if this is a leaf.
843 */
844 return TRACE_TYPE_NO_CONSUME;
845 }
846
847 static void
848 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
849 int type, unsigned long addr, u32 flags)
850 {
851 struct fgraph_data *data = iter->private;
852 struct trace_entry *ent = iter->ent;
853 struct trace_array *tr = iter->tr;
854 int cpu = iter->cpu;
855
856 /* Pid */
857 verif_pid(s, ent->pid, cpu, data);
858
859 if (type)
860 /* Interrupt */
861 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
862
863 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
864 return;
865
866 /* Absolute time */
867 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
868 print_graph_abs_time(iter->ts, s);
869
870 /* Cpu */
871 if (flags & TRACE_GRAPH_PRINT_CPU)
872 print_graph_cpu(s, cpu);
873
874 /* Proc */
875 if (flags & TRACE_GRAPH_PRINT_PROC) {
876 print_graph_proc(s, ent->pid);
877 trace_seq_puts(s, " | ");
878 }
879
880 /* Latency format */
881 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
882 print_graph_lat_fmt(s, ent);
883
884 return;
885 }
886
887 /*
888 * Entry check for irq code
889 *
890 * returns 1 if
891 * - we are inside irq code
892 * - we just entered irq code
893 *
894 * retunns 0 if
895 * - funcgraph-interrupts option is set
896 * - we are not inside irq code
897 */
898 static int
899 check_irq_entry(struct trace_iterator *iter, u32 flags,
900 unsigned long addr, int depth)
901 {
902 int cpu = iter->cpu;
903 int *depth_irq;
904 struct fgraph_data *data = iter->private;
905
906 /*
907 * If we are either displaying irqs, or we got called as
908 * a graph event and private data does not exist,
909 * then we bypass the irq check.
910 */
911 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
912 (!data))
913 return 0;
914
915 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
916
917 /*
918 * We are inside the irq code
919 */
920 if (*depth_irq >= 0)
921 return 1;
922
923 if ((addr < (unsigned long)__irqentry_text_start) ||
924 (addr >= (unsigned long)__irqentry_text_end))
925 return 0;
926
927 /*
928 * We are entering irq code.
929 */
930 *depth_irq = depth;
931 return 1;
932 }
933
934 /*
935 * Return check for irq code
936 *
937 * returns 1 if
938 * - we are inside irq code
939 * - we just left irq code
940 *
941 * returns 0 if
942 * - funcgraph-interrupts option is set
943 * - we are not inside irq code
944 */
945 static int
946 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
947 {
948 int cpu = iter->cpu;
949 int *depth_irq;
950 struct fgraph_data *data = iter->private;
951
952 /*
953 * If we are either displaying irqs, or we got called as
954 * a graph event and private data does not exist,
955 * then we bypass the irq check.
956 */
957 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
958 (!data))
959 return 0;
960
961 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
962
963 /*
964 * We are not inside the irq code.
965 */
966 if (*depth_irq == -1)
967 return 0;
968
969 /*
970 * We are inside the irq code, and this is returning entry.
971 * Let's not trace it and clear the entry depth, since
972 * we are out of irq code.
973 *
974 * This condition ensures that we 'leave the irq code' once
975 * we are out of the entry depth. Thus protecting us from
976 * the RETURN entry loss.
977 */
978 if (*depth_irq >= depth) {
979 *depth_irq = -1;
980 return 1;
981 }
982
983 /*
984 * We are inside the irq code, and this is not the entry.
985 */
986 return 1;
987 }
988
989 static enum print_line_t
990 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
991 struct trace_iterator *iter, u32 flags)
992 {
993 struct fgraph_data *data = iter->private;
994 struct ftrace_graph_ent *call = &field->graph_ent;
995 struct ftrace_graph_ret_entry *leaf_ret;
996 static enum print_line_t ret;
997 int cpu = iter->cpu;
998
999 if (check_irq_entry(iter, flags, call->func, call->depth))
1000 return TRACE_TYPE_HANDLED;
1001
1002 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1003
1004 leaf_ret = get_return_for_leaf(iter, field);
1005 if (leaf_ret)
1006 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1007 else
1008 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1009
1010 if (data) {
1011 /*
1012 * If we failed to write our output, then we need to make
1013 * note of it. Because we already consumed our entry.
1014 */
1015 if (s->full) {
1016 data->failed = 1;
1017 data->cpu = cpu;
1018 } else
1019 data->failed = 0;
1020 }
1021
1022 return ret;
1023 }
1024
1025 static enum print_line_t
1026 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1027 struct trace_entry *ent, struct trace_iterator *iter,
1028 u32 flags)
1029 {
1030 unsigned long long duration = trace->rettime - trace->calltime;
1031 struct fgraph_data *data = iter->private;
1032 struct trace_array *tr = iter->tr;
1033 pid_t pid = ent->pid;
1034 int cpu = iter->cpu;
1035 int func_match = 1;
1036 int i;
1037
1038 if (check_irq_return(iter, flags, trace->depth))
1039 return TRACE_TYPE_HANDLED;
1040
1041 if (data) {
1042 struct fgraph_cpu_data *cpu_data;
1043 int cpu = iter->cpu;
1044
1045 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1046
1047 /*
1048 * Comments display at + 1 to depth. This is the
1049 * return from a function, we now want the comments
1050 * to display at the same level of the bracket.
1051 */
1052 cpu_data->depth = trace->depth - 1;
1053
1054 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1055 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1056 func_match = 0;
1057 cpu_data->enter_funcs[trace->depth] = 0;
1058 }
1059 }
1060
1061 print_graph_prologue(iter, s, 0, 0, flags);
1062
1063 /* Overhead and duration */
1064 print_graph_duration(tr, duration, s, flags);
1065
1066 /* Closing brace */
1067 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1068 trace_seq_putc(s, ' ');
1069
1070 /*
1071 * If the return function does not have a matching entry,
1072 * then the entry was lost. Instead of just printing
1073 * the '}' and letting the user guess what function this
1074 * belongs to, write out the function name. Always do
1075 * that if the funcgraph-tail option is enabled.
1076 */
1077 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1078 trace_seq_puts(s, "}\n");
1079 else
1080 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1081
1082 /* Overrun */
1083 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1084 trace_seq_printf(s, " (Overruns: %lu)\n",
1085 trace->overrun);
1086
1087 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1088 cpu, pid, flags);
1089
1090 return trace_handle_return(s);
1091 }
1092
1093 static enum print_line_t
1094 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1095 struct trace_iterator *iter, u32 flags)
1096 {
1097 struct trace_array *tr = iter->tr;
1098 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1099 struct fgraph_data *data = iter->private;
1100 struct trace_event *event;
1101 int depth = 0;
1102 int ret;
1103 int i;
1104
1105 if (data)
1106 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1107
1108 print_graph_prologue(iter, s, 0, 0, flags);
1109
1110 /* No time */
1111 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1112
1113 /* Indentation */
1114 if (depth > 0)
1115 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1116 trace_seq_putc(s, ' ');
1117
1118 /* The comment */
1119 trace_seq_puts(s, "/* ");
1120
1121 switch (iter->ent->type) {
1122 case TRACE_BPUTS:
1123 ret = trace_print_bputs_msg_only(iter);
1124 if (ret != TRACE_TYPE_HANDLED)
1125 return ret;
1126 break;
1127 case TRACE_BPRINT:
1128 ret = trace_print_bprintk_msg_only(iter);
1129 if (ret != TRACE_TYPE_HANDLED)
1130 return ret;
1131 break;
1132 case TRACE_PRINT:
1133 ret = trace_print_printk_msg_only(iter);
1134 if (ret != TRACE_TYPE_HANDLED)
1135 return ret;
1136 break;
1137 default:
1138 event = ftrace_find_event(ent->type);
1139 if (!event)
1140 return TRACE_TYPE_UNHANDLED;
1141
1142 ret = event->funcs->trace(iter, sym_flags, event);
1143 if (ret != TRACE_TYPE_HANDLED)
1144 return ret;
1145 }
1146
1147 if (trace_seq_has_overflowed(s))
1148 goto out;
1149
1150 /* Strip ending newline */
1151 if (s->buffer[s->seq.len - 1] == '\n') {
1152 s->buffer[s->seq.len - 1] = '\0';
1153 s->seq.len--;
1154 }
1155
1156 trace_seq_puts(s, " */\n");
1157 out:
1158 return trace_handle_return(s);
1159 }
1160
1161
1162 enum print_line_t
1163 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1164 {
1165 struct ftrace_graph_ent_entry *field;
1166 struct fgraph_data *data = iter->private;
1167 struct trace_entry *entry = iter->ent;
1168 struct trace_seq *s = &iter->seq;
1169 int cpu = iter->cpu;
1170 int ret;
1171
1172 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1173 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1174 return TRACE_TYPE_HANDLED;
1175 }
1176
1177 /*
1178 * If the last output failed, there's a possibility we need
1179 * to print out the missing entry which would never go out.
1180 */
1181 if (data && data->failed) {
1182 field = &data->ent;
1183 iter->cpu = data->cpu;
1184 ret = print_graph_entry(field, s, iter, flags);
1185 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1186 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1187 ret = TRACE_TYPE_NO_CONSUME;
1188 }
1189 iter->cpu = cpu;
1190 return ret;
1191 }
1192
1193 switch (entry->type) {
1194 case TRACE_GRAPH_ENT: {
1195 /*
1196 * print_graph_entry() may consume the current event,
1197 * thus @field may become invalid, so we need to save it.
1198 * sizeof(struct ftrace_graph_ent_entry) is very small,
1199 * it can be safely saved at the stack.
1200 */
1201 struct ftrace_graph_ent_entry saved;
1202 trace_assign_type(field, entry);
1203 saved = *field;
1204 return print_graph_entry(&saved, s, iter, flags);
1205 }
1206 case TRACE_GRAPH_RET: {
1207 struct ftrace_graph_ret_entry *field;
1208 trace_assign_type(field, entry);
1209 return print_graph_return(&field->ret, s, entry, iter, flags);
1210 }
1211 case TRACE_STACK:
1212 case TRACE_FN:
1213 /* dont trace stack and functions as comments */
1214 return TRACE_TYPE_UNHANDLED;
1215
1216 default:
1217 return print_graph_comment(s, entry, iter, flags);
1218 }
1219
1220 return TRACE_TYPE_HANDLED;
1221 }
1222
1223 static enum print_line_t
1224 print_graph_function(struct trace_iterator *iter)
1225 {
1226 return print_graph_function_flags(iter, tracer_flags.val);
1227 }
1228
1229 static enum print_line_t
1230 print_graph_function_event(struct trace_iterator *iter, int flags,
1231 struct trace_event *event)
1232 {
1233 return print_graph_function(iter);
1234 }
1235
1236 static void print_lat_header(struct seq_file *s, u32 flags)
1237 {
1238 static const char spaces[] = " " /* 16 spaces */
1239 " " /* 4 spaces */
1240 " "; /* 17 spaces */
1241 int size = 0;
1242
1243 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1244 size += 16;
1245 if (flags & TRACE_GRAPH_PRINT_CPU)
1246 size += 4;
1247 if (flags & TRACE_GRAPH_PRINT_PROC)
1248 size += 17;
1249
1250 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1251 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1252 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1253 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1254 seq_printf(s, "#%.*s||| / \n", size, spaces);
1255 }
1256
1257 static void __print_graph_headers_flags(struct trace_array *tr,
1258 struct seq_file *s, u32 flags)
1259 {
1260 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1261
1262 if (lat)
1263 print_lat_header(s, flags);
1264
1265 /* 1st line */
1266 seq_putc(s, '#');
1267 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1268 seq_puts(s, " TIME ");
1269 if (flags & TRACE_GRAPH_PRINT_CPU)
1270 seq_puts(s, " CPU");
1271 if (flags & TRACE_GRAPH_PRINT_PROC)
1272 seq_puts(s, " TASK/PID ");
1273 if (lat)
1274 seq_puts(s, "||||");
1275 if (flags & TRACE_GRAPH_PRINT_DURATION)
1276 seq_puts(s, " DURATION ");
1277 seq_puts(s, " FUNCTION CALLS\n");
1278
1279 /* 2nd line */
1280 seq_putc(s, '#');
1281 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1282 seq_puts(s, " | ");
1283 if (flags & TRACE_GRAPH_PRINT_CPU)
1284 seq_puts(s, " | ");
1285 if (flags & TRACE_GRAPH_PRINT_PROC)
1286 seq_puts(s, " | | ");
1287 if (lat)
1288 seq_puts(s, "||||");
1289 if (flags & TRACE_GRAPH_PRINT_DURATION)
1290 seq_puts(s, " | | ");
1291 seq_puts(s, " | | | |\n");
1292 }
1293
1294 static void print_graph_headers(struct seq_file *s)
1295 {
1296 print_graph_headers_flags(s, tracer_flags.val);
1297 }
1298
1299 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1300 {
1301 struct trace_iterator *iter = s->private;
1302 struct trace_array *tr = iter->tr;
1303
1304 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1305 return;
1306
1307 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1308 /* print nothing if the buffers are empty */
1309 if (trace_empty(iter))
1310 return;
1311
1312 print_trace_header(s, iter);
1313 }
1314
1315 __print_graph_headers_flags(tr, s, flags);
1316 }
1317
1318 void graph_trace_open(struct trace_iterator *iter)
1319 {
1320 /* pid and depth on the last trace processed */
1321 struct fgraph_data *data;
1322 gfp_t gfpflags;
1323 int cpu;
1324
1325 iter->private = NULL;
1326
1327 /* We can be called in atomic context via ftrace_dump() */
1328 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1329
1330 data = kzalloc(sizeof(*data), gfpflags);
1331 if (!data)
1332 goto out_err;
1333
1334 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1335 if (!data->cpu_data)
1336 goto out_err_free;
1337
1338 for_each_possible_cpu(cpu) {
1339 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1340 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1341 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1342 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1343
1344 *pid = -1;
1345 *depth = 0;
1346 *ignore = 0;
1347 *depth_irq = -1;
1348 }
1349
1350 iter->private = data;
1351
1352 return;
1353
1354 out_err_free:
1355 kfree(data);
1356 out_err:
1357 pr_warn("function graph tracer: not enough memory\n");
1358 }
1359
1360 void graph_trace_close(struct trace_iterator *iter)
1361 {
1362 struct fgraph_data *data = iter->private;
1363
1364 if (data) {
1365 free_percpu(data->cpu_data);
1366 kfree(data);
1367 }
1368 }
1369
1370 static int
1371 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1372 {
1373 if (bit == TRACE_GRAPH_PRINT_IRQS)
1374 ftrace_graph_skip_irqs = !set;
1375
1376 if (bit == TRACE_GRAPH_SLEEP_TIME)
1377 ftrace_graph_sleep_time_control(set);
1378
1379 if (bit == TRACE_GRAPH_GRAPH_TIME)
1380 ftrace_graph_graph_time_control(set);
1381
1382 return 0;
1383 }
1384
1385 static struct trace_event_functions graph_functions = {
1386 .trace = print_graph_function_event,
1387 };
1388
1389 static struct trace_event graph_trace_entry_event = {
1390 .type = TRACE_GRAPH_ENT,
1391 .funcs = &graph_functions,
1392 };
1393
1394 static struct trace_event graph_trace_ret_event = {
1395 .type = TRACE_GRAPH_RET,
1396 .funcs = &graph_functions
1397 };
1398
1399 static struct tracer graph_trace __tracer_data = {
1400 .name = "function_graph",
1401 .update_thresh = graph_trace_update_thresh,
1402 .open = graph_trace_open,
1403 .pipe_open = graph_trace_open,
1404 .close = graph_trace_close,
1405 .pipe_close = graph_trace_close,
1406 .init = graph_trace_init,
1407 .reset = graph_trace_reset,
1408 .print_line = print_graph_function,
1409 .print_header = print_graph_headers,
1410 .flags = &tracer_flags,
1411 .set_flag = func_graph_set_flag,
1412 #ifdef CONFIG_FTRACE_SELFTEST
1413 .selftest = trace_selftest_startup_function_graph,
1414 #endif
1415 };
1416
1417
1418 static ssize_t
1419 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1420 loff_t *ppos)
1421 {
1422 unsigned long val;
1423 int ret;
1424
1425 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1426 if (ret)
1427 return ret;
1428
1429 max_depth = val;
1430
1431 *ppos += cnt;
1432
1433 return cnt;
1434 }
1435
1436 static ssize_t
1437 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1438 loff_t *ppos)
1439 {
1440 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1441 int n;
1442
1443 n = sprintf(buf, "%d\n", max_depth);
1444
1445 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1446 }
1447
1448 static const struct file_operations graph_depth_fops = {
1449 .open = tracing_open_generic,
1450 .write = graph_depth_write,
1451 .read = graph_depth_read,
1452 .llseek = generic_file_llseek,
1453 };
1454
1455 static __init int init_graph_tracefs(void)
1456 {
1457 struct dentry *d_tracer;
1458
1459 d_tracer = tracing_init_dentry();
1460 if (IS_ERR(d_tracer))
1461 return 0;
1462
1463 trace_create_file("max_graph_depth", 0644, d_tracer,
1464 NULL, &graph_depth_fops);
1465
1466 return 0;
1467 }
1468 fs_initcall(init_graph_tracefs);
1469
1470 static __init int init_graph_trace(void)
1471 {
1472 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1473
1474 if (!register_trace_event(&graph_trace_entry_event)) {
1475 pr_warn("Warning: could not register graph trace events\n");
1476 return 1;
1477 }
1478
1479 if (!register_trace_event(&graph_trace_ret_event)) {
1480 pr_warn("Warning: could not register graph trace events\n");
1481 return 1;
1482 }
1483
1484 return register_tracer(&graph_trace);
1485 }
1486
1487 core_initcall(init_graph_trace);
This page took 0.0742 seconds and 5 git commands to generate.