2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly
;
48 static int last_ftrace_enabled
;
50 /* set when tracing only a pid */
51 struct pid
*ftrace_pid_trace
;
52 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
54 /* Quick disabling of function tracer. */
55 int function_trace_stop
;
58 * ftrace_disabled is set when an anomaly is discovered.
59 * ftrace_disabled is much stronger than ftrace_enabled.
61 static int ftrace_disabled __read_mostly
;
63 static DEFINE_SPINLOCK(ftrace_lock
);
64 static DEFINE_MUTEX(ftrace_sysctl_lock
);
65 static DEFINE_MUTEX(ftrace_start_lock
);
67 static struct ftrace_ops ftrace_list_end __read_mostly
=
72 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
73 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
74 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
75 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
77 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
79 struct ftrace_ops
*op
= ftrace_list
;
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
84 while (op
!= &ftrace_list_end
) {
86 read_barrier_depends();
87 op
->func(ip
, parent_ip
);
92 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
)
94 if (!test_tsk_trace_trace(current
))
97 ftrace_pid_function(ip
, parent_ip
);
100 static void set_ftrace_pid_function(ftrace_func_t func
)
102 /* do not set ftrace_pid_function to itself! */
103 if (func
!= ftrace_pid_func
)
104 ftrace_pid_function
= func
;
108 * clear_ftrace_function - reset the ftrace function
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
113 void clear_ftrace_function(void)
115 ftrace_trace_function
= ftrace_stub
;
116 __ftrace_trace_function
= ftrace_stub
;
117 ftrace_pid_function
= ftrace_stub
;
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
125 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
127 if (function_trace_stop
)
130 __ftrace_trace_function(ip
, parent_ip
);
134 static int __register_ftrace_function(struct ftrace_ops
*ops
)
136 /* should not be called from interrupt context */
137 spin_lock(&ftrace_lock
);
139 ops
->next
= ftrace_list
;
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
149 if (ftrace_enabled
) {
152 if (ops
->next
== &ftrace_list_end
)
155 func
= ftrace_list_func
;
157 if (ftrace_pid_trace
) {
158 set_ftrace_pid_function(func
);
159 func
= ftrace_pid_func
;
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
166 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
167 ftrace_trace_function
= func
;
169 __ftrace_trace_function
= func
;
170 ftrace_trace_function
= ftrace_test_stop_func
;
174 spin_unlock(&ftrace_lock
);
179 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
181 struct ftrace_ops
**p
;
184 /* should not be called from interrupt context */
185 spin_lock(&ftrace_lock
);
188 * If we are removing the last function, then simply point
189 * to the ftrace_stub.
191 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
192 ftrace_trace_function
= ftrace_stub
;
193 ftrace_list
= &ftrace_list_end
;
197 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
208 if (ftrace_enabled
) {
209 /* If we only have one func left, then call that directly */
210 if (ftrace_list
->next
== &ftrace_list_end
) {
211 ftrace_func_t func
= ftrace_list
->func
;
213 if (ftrace_pid_trace
) {
214 set_ftrace_pid_function(func
);
215 func
= ftrace_pid_func
;
217 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 ftrace_trace_function
= func
;
220 __ftrace_trace_function
= func
;
226 spin_unlock(&ftrace_lock
);
231 static void ftrace_update_pid_func(void)
235 /* should not be called from interrupt context */
236 spin_lock(&ftrace_lock
);
238 if (ftrace_trace_function
== ftrace_stub
)
241 func
= ftrace_trace_function
;
243 if (ftrace_pid_trace
) {
244 set_ftrace_pid_function(func
);
245 func
= ftrace_pid_func
;
247 if (func
== ftrace_pid_func
)
248 func
= ftrace_pid_function
;
251 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 ftrace_trace_function
= func
;
254 __ftrace_trace_function
= func
;
258 spin_unlock(&ftrace_lock
);
261 #ifdef CONFIG_DYNAMIC_FTRACE
262 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
263 # error Dynamic ftrace depends on MCOUNT_RECORD
267 FTRACE_ENABLE_CALLS
= (1 << 0),
268 FTRACE_DISABLE_CALLS
= (1 << 1),
269 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
270 FTRACE_ENABLE_MCOUNT
= (1 << 3),
271 FTRACE_DISABLE_MCOUNT
= (1 << 4),
272 FTRACE_START_FUNC_RET
= (1 << 5),
273 FTRACE_STOP_FUNC_RET
= (1 << 6),
276 static int ftrace_filtered
;
278 static LIST_HEAD(ftrace_new_addrs
);
280 static DEFINE_MUTEX(ftrace_regex_lock
);
283 struct ftrace_page
*next
;
285 struct dyn_ftrace records
[];
288 #define ENTRIES_PER_PAGE \
289 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
291 /* estimate from running different kernels */
292 #define NR_TO_INIT 10000
294 static struct ftrace_page
*ftrace_pages_start
;
295 static struct ftrace_page
*ftrace_pages
;
297 static struct dyn_ftrace
*ftrace_free_records
;
300 #ifdef CONFIG_KPROBES
302 static int frozen_record_count
;
304 static inline void freeze_record(struct dyn_ftrace
*rec
)
306 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
307 rec
->flags
|= FTRACE_FL_FROZEN
;
308 frozen_record_count
++;
312 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
314 if (rec
->flags
& FTRACE_FL_FROZEN
) {
315 rec
->flags
&= ~FTRACE_FL_FROZEN
;
316 frozen_record_count
--;
320 static inline int record_frozen(struct dyn_ftrace
*rec
)
322 return rec
->flags
& FTRACE_FL_FROZEN
;
325 # define freeze_record(rec) ({ 0; })
326 # define unfreeze_record(rec) ({ 0; })
327 # define record_frozen(rec) ({ 0; })
328 #endif /* CONFIG_KPROBES */
330 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
332 rec
->ip
= (unsigned long)ftrace_free_records
;
333 ftrace_free_records
= rec
;
334 rec
->flags
|= FTRACE_FL_FREE
;
337 void ftrace_release(void *start
, unsigned long size
)
339 struct dyn_ftrace
*rec
;
340 struct ftrace_page
*pg
;
341 unsigned long s
= (unsigned long)start
;
342 unsigned long e
= s
+ size
;
345 if (ftrace_disabled
|| !start
)
348 /* should not be called from interrupt context */
349 spin_lock(&ftrace_lock
);
351 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
352 for (i
= 0; i
< pg
->index
; i
++) {
353 rec
= &pg
->records
[i
];
355 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
356 ftrace_free_rec(rec
);
359 spin_unlock(&ftrace_lock
);
362 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
364 struct dyn_ftrace
*rec
;
366 /* First check for freed records */
367 if (ftrace_free_records
) {
368 rec
= ftrace_free_records
;
370 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
371 FTRACE_WARN_ON_ONCE(1);
372 ftrace_free_records
= NULL
;
376 ftrace_free_records
= (void *)rec
->ip
;
377 memset(rec
, 0, sizeof(*rec
));
381 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
382 if (!ftrace_pages
->next
) {
383 /* allocate another page */
385 (void *)get_zeroed_page(GFP_KERNEL
);
386 if (!ftrace_pages
->next
)
389 ftrace_pages
= ftrace_pages
->next
;
392 return &ftrace_pages
->records
[ftrace_pages
->index
++];
395 static struct dyn_ftrace
*
396 ftrace_record_ip(unsigned long ip
)
398 struct dyn_ftrace
*rec
;
403 rec
= ftrace_alloc_dyn_node(ip
);
409 list_add(&rec
->list
, &ftrace_new_addrs
);
414 static void print_ip_ins(const char *fmt
, unsigned char *p
)
418 printk(KERN_CONT
"%s", fmt
);
420 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
421 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
424 static void ftrace_bug(int failed
, unsigned long ip
)
428 FTRACE_WARN_ON_ONCE(1);
429 pr_info("ftrace faulted on modifying ");
433 FTRACE_WARN_ON_ONCE(1);
434 pr_info("ftrace failed to modify ");
436 print_ip_ins(" actual: ", (unsigned char *)ip
);
437 printk(KERN_CONT
"\n");
440 FTRACE_WARN_ON_ONCE(1);
441 pr_info("ftrace faulted on writing ");
445 FTRACE_WARN_ON_ONCE(1);
446 pr_info("ftrace faulted on unknown error ");
453 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
455 unsigned long ip
, fl
;
456 unsigned long ftrace_addr
;
458 ftrace_addr
= (unsigned long)FTRACE_ADDR
;
463 * If this record is not to be traced and
464 * it is not enabled then do nothing.
466 * If this record is not to be traced and
467 * it is enabled then disabled it.
470 if (rec
->flags
& FTRACE_FL_NOTRACE
) {
471 if (rec
->flags
& FTRACE_FL_ENABLED
)
472 rec
->flags
&= ~FTRACE_FL_ENABLED
;
476 } else if (ftrace_filtered
&& enable
) {
481 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
483 /* Record is filtered and enabled, do nothing */
484 if (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
))
487 /* Record is not filtered and is not enabled do nothing */
491 /* Record is not filtered but enabled, disable it */
492 if (fl
== FTRACE_FL_ENABLED
)
493 rec
->flags
&= ~FTRACE_FL_ENABLED
;
495 /* Otherwise record is filtered but not enabled, enable it */
496 rec
->flags
|= FTRACE_FL_ENABLED
;
498 /* Disable or not filtered */
501 /* if record is enabled, do nothing */
502 if (rec
->flags
& FTRACE_FL_ENABLED
)
505 rec
->flags
|= FTRACE_FL_ENABLED
;
509 /* if record is not enabled do nothing */
510 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
513 rec
->flags
&= ~FTRACE_FL_ENABLED
;
517 if (rec
->flags
& FTRACE_FL_ENABLED
)
518 return ftrace_make_call(rec
, ftrace_addr
);
520 return ftrace_make_nop(NULL
, rec
, ftrace_addr
);
523 static void ftrace_replace_code(int enable
)
526 struct dyn_ftrace
*rec
;
527 struct ftrace_page
*pg
;
529 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
530 for (i
= 0; i
< pg
->index
; i
++) {
531 rec
= &pg
->records
[i
];
534 * Skip over free records and records that have
537 if (rec
->flags
& FTRACE_FL_FREE
||
538 rec
->flags
& FTRACE_FL_FAILED
)
541 /* ignore updates to this record's mcount site */
542 if (get_kprobe((void *)rec
->ip
)) {
546 unfreeze_record(rec
);
549 failed
= __ftrace_replace_code(rec
, enable
);
550 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
551 rec
->flags
|= FTRACE_FL_FAILED
;
552 if ((system_state
== SYSTEM_BOOTING
) ||
553 !core_kernel_text(rec
->ip
)) {
554 ftrace_free_rec(rec
);
556 ftrace_bug(failed
, rec
->ip
);
563 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
570 ret
= ftrace_make_nop(mod
, rec
, MCOUNT_ADDR
);
573 rec
->flags
|= FTRACE_FL_FAILED
;
579 static int __ftrace_modify_code(void *data
)
583 if (*command
& FTRACE_ENABLE_CALLS
)
584 ftrace_replace_code(1);
585 else if (*command
& FTRACE_DISABLE_CALLS
)
586 ftrace_replace_code(0);
588 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
589 ftrace_update_ftrace_func(ftrace_trace_function
);
591 if (*command
& FTRACE_START_FUNC_RET
)
592 ftrace_enable_ftrace_graph_caller();
593 else if (*command
& FTRACE_STOP_FUNC_RET
)
594 ftrace_disable_ftrace_graph_caller();
599 static void ftrace_run_update_code(int command
)
601 stop_machine(__ftrace_modify_code
, &command
, NULL
);
604 static ftrace_func_t saved_ftrace_func
;
605 static int ftrace_start_up
;
607 static void ftrace_startup_enable(int command
)
609 if (saved_ftrace_func
!= ftrace_trace_function
) {
610 saved_ftrace_func
= ftrace_trace_function
;
611 command
|= FTRACE_UPDATE_TRACE_FUNC
;
614 if (!command
|| !ftrace_enabled
)
617 ftrace_run_update_code(command
);
620 static void ftrace_startup(int command
)
622 if (unlikely(ftrace_disabled
))
625 mutex_lock(&ftrace_start_lock
);
627 command
|= FTRACE_ENABLE_CALLS
;
629 ftrace_startup_enable(command
);
631 mutex_unlock(&ftrace_start_lock
);
634 static void ftrace_shutdown(int command
)
636 if (unlikely(ftrace_disabled
))
639 mutex_lock(&ftrace_start_lock
);
641 if (!ftrace_start_up
)
642 command
|= FTRACE_DISABLE_CALLS
;
644 if (saved_ftrace_func
!= ftrace_trace_function
) {
645 saved_ftrace_func
= ftrace_trace_function
;
646 command
|= FTRACE_UPDATE_TRACE_FUNC
;
649 if (!command
|| !ftrace_enabled
)
652 ftrace_run_update_code(command
);
654 mutex_unlock(&ftrace_start_lock
);
657 static void ftrace_startup_sysctl(void)
659 int command
= FTRACE_ENABLE_MCOUNT
;
661 if (unlikely(ftrace_disabled
))
664 mutex_lock(&ftrace_start_lock
);
665 /* Force update next time */
666 saved_ftrace_func
= NULL
;
667 /* ftrace_start_up is true if we want ftrace running */
669 command
|= FTRACE_ENABLE_CALLS
;
671 ftrace_run_update_code(command
);
672 mutex_unlock(&ftrace_start_lock
);
675 static void ftrace_shutdown_sysctl(void)
677 int command
= FTRACE_DISABLE_MCOUNT
;
679 if (unlikely(ftrace_disabled
))
682 mutex_lock(&ftrace_start_lock
);
683 /* ftrace_start_up is true if ftrace is running */
685 command
|= FTRACE_DISABLE_CALLS
;
687 ftrace_run_update_code(command
);
688 mutex_unlock(&ftrace_start_lock
);
691 static cycle_t ftrace_update_time
;
692 static unsigned long ftrace_update_cnt
;
693 unsigned long ftrace_update_tot_cnt
;
695 static int ftrace_update_code(struct module
*mod
)
697 struct dyn_ftrace
*p
, *t
;
700 start
= ftrace_now(raw_smp_processor_id());
701 ftrace_update_cnt
= 0;
703 list_for_each_entry_safe(p
, t
, &ftrace_new_addrs
, list
) {
705 /* If something went wrong, bail without enabling anything */
706 if (unlikely(ftrace_disabled
))
709 list_del_init(&p
->list
);
711 /* convert record (i.e, patch mcount-call with NOP) */
712 if (ftrace_code_disable(mod
, p
)) {
713 p
->flags
|= FTRACE_FL_CONVERTED
;
719 stop
= ftrace_now(raw_smp_processor_id());
720 ftrace_update_time
= stop
- start
;
721 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
726 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
728 struct ftrace_page
*pg
;
732 /* allocate a few pages */
733 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
734 if (!ftrace_pages_start
)
738 * Allocate a few more pages.
740 * TODO: have some parser search vmlinux before
741 * final linking to find all calls to ftrace.
743 * a) know how many pages to allocate.
745 * b) set up the table then.
747 * The dynamic code is still necessary for
751 pg
= ftrace_pages
= ftrace_pages_start
;
753 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
754 pr_info("ftrace: allocating %ld entries in %d pages\n",
755 num_to_init
, cnt
+ 1);
757 for (i
= 0; i
< cnt
; i
++) {
758 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
760 /* If we fail, we'll try later anyway */
771 FTRACE_ITER_FILTER
= (1 << 0),
772 FTRACE_ITER_CONT
= (1 << 1),
773 FTRACE_ITER_NOTRACE
= (1 << 2),
774 FTRACE_ITER_FAILURES
= (1 << 3),
777 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
779 struct ftrace_iterator
{
780 struct ftrace_page
*pg
;
783 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
789 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
791 struct ftrace_iterator
*iter
= m
->private;
792 struct dyn_ftrace
*rec
= NULL
;
796 /* should not be called from interrupt context */
797 spin_lock(&ftrace_lock
);
799 if (iter
->idx
>= iter
->pg
->index
) {
800 if (iter
->pg
->next
) {
801 iter
->pg
= iter
->pg
->next
;
808 rec
= &iter
->pg
->records
[iter
->idx
++];
809 if ((rec
->flags
& FTRACE_FL_FREE
) ||
811 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
812 (rec
->flags
& FTRACE_FL_FAILED
)) ||
814 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
815 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
817 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
818 !(rec
->flags
& FTRACE_FL_FILTER
)) ||
820 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
821 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
826 spin_unlock(&ftrace_lock
);
831 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
833 struct ftrace_iterator
*iter
= m
->private;
843 p
= t_next(m
, p
, pos
);
848 static void t_stop(struct seq_file
*m
, void *p
)
852 static int t_show(struct seq_file
*m
, void *v
)
854 struct dyn_ftrace
*rec
= v
;
855 char str
[KSYM_SYMBOL_LEN
];
860 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
862 seq_printf(m
, "%s\n", str
);
867 static struct seq_operations show_ftrace_seq_ops
= {
875 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
877 struct ftrace_iterator
*iter
;
880 if (unlikely(ftrace_disabled
))
883 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
887 iter
->pg
= ftrace_pages_start
;
889 ret
= seq_open(file
, &show_ftrace_seq_ops
);
891 struct seq_file
*m
= file
->private_data
;
901 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
903 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
904 struct ftrace_iterator
*iter
= m
->private;
906 seq_release(inode
, file
);
913 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
917 struct ftrace_iterator
*iter
;
919 ret
= ftrace_avail_open(inode
, file
);
921 m
= (struct seq_file
*)file
->private_data
;
922 iter
= (struct ftrace_iterator
*)m
->private;
923 iter
->flags
= FTRACE_ITER_FAILURES
;
930 static void ftrace_filter_reset(int enable
)
932 struct ftrace_page
*pg
;
933 struct dyn_ftrace
*rec
;
934 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
937 /* should not be called from interrupt context */
938 spin_lock(&ftrace_lock
);
941 pg
= ftrace_pages_start
;
943 for (i
= 0; i
< pg
->index
; i
++) {
944 rec
= &pg
->records
[i
];
945 if (rec
->flags
& FTRACE_FL_FAILED
)
951 spin_unlock(&ftrace_lock
);
955 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
957 struct ftrace_iterator
*iter
;
960 if (unlikely(ftrace_disabled
))
963 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
967 mutex_lock(&ftrace_regex_lock
);
968 if ((file
->f_mode
& FMODE_WRITE
) &&
969 !(file
->f_flags
& O_APPEND
))
970 ftrace_filter_reset(enable
);
972 if (file
->f_mode
& FMODE_READ
) {
973 iter
->pg
= ftrace_pages_start
;
974 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
977 ret
= seq_open(file
, &show_ftrace_seq_ops
);
979 struct seq_file
*m
= file
->private_data
;
984 file
->private_data
= iter
;
985 mutex_unlock(&ftrace_regex_lock
);
991 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
993 return ftrace_regex_open(inode
, file
, 1);
997 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
999 return ftrace_regex_open(inode
, file
, 0);
1003 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
1004 size_t cnt
, loff_t
*ppos
)
1006 if (file
->f_mode
& FMODE_READ
)
1007 return seq_read(file
, ubuf
, cnt
, ppos
);
1013 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
1017 if (file
->f_mode
& FMODE_READ
)
1018 ret
= seq_lseek(file
, offset
, origin
);
1020 file
->f_pos
= ret
= 1;
1033 ftrace_match(unsigned char *buff
, int len
, int enable
)
1035 char str
[KSYM_SYMBOL_LEN
];
1036 char *search
= NULL
;
1037 struct ftrace_page
*pg
;
1038 struct dyn_ftrace
*rec
;
1039 int type
= MATCH_FULL
;
1040 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1041 unsigned i
, match
= 0, search_len
= 0;
1044 if (buff
[0] == '!') {
1050 for (i
= 0; i
< len
; i
++) {
1051 if (buff
[i
] == '*') {
1053 search
= buff
+ i
+ 1;
1054 type
= MATCH_END_ONLY
;
1055 search_len
= len
- (i
+ 1);
1057 if (type
== MATCH_END_ONLY
) {
1058 type
= MATCH_MIDDLE_ONLY
;
1061 type
= MATCH_FRONT_ONLY
;
1069 /* should not be called from interrupt context */
1070 spin_lock(&ftrace_lock
);
1072 ftrace_filtered
= 1;
1073 pg
= ftrace_pages_start
;
1075 for (i
= 0; i
< pg
->index
; i
++) {
1079 rec
= &pg
->records
[i
];
1080 if (rec
->flags
& FTRACE_FL_FAILED
)
1082 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1085 if (strcmp(str
, buff
) == 0)
1088 case MATCH_FRONT_ONLY
:
1089 if (memcmp(str
, buff
, match
) == 0)
1092 case MATCH_MIDDLE_ONLY
:
1093 if (strstr(str
, search
))
1096 case MATCH_END_ONLY
:
1097 ptr
= strstr(str
, search
);
1098 if (ptr
&& (ptr
[search_len
] == 0))
1104 rec
->flags
&= ~flag
;
1111 spin_unlock(&ftrace_lock
);
1115 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1116 size_t cnt
, loff_t
*ppos
, int enable
)
1118 struct ftrace_iterator
*iter
;
1123 if (!cnt
|| cnt
< 0)
1126 mutex_lock(&ftrace_regex_lock
);
1128 if (file
->f_mode
& FMODE_READ
) {
1129 struct seq_file
*m
= file
->private_data
;
1132 iter
= file
->private_data
;
1135 iter
->flags
&= ~FTRACE_ITER_CONT
;
1136 iter
->buffer_idx
= 0;
1139 ret
= get_user(ch
, ubuf
++);
1145 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1146 /* skip white space */
1147 while (cnt
&& isspace(ch
)) {
1148 ret
= get_user(ch
, ubuf
++);
1156 file
->f_pos
+= read
;
1161 iter
->buffer_idx
= 0;
1164 while (cnt
&& !isspace(ch
)) {
1165 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1166 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1171 ret
= get_user(ch
, ubuf
++);
1180 iter
->buffer
[iter
->buffer_idx
] = 0;
1181 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1182 iter
->buffer_idx
= 0;
1184 iter
->flags
|= FTRACE_ITER_CONT
;
1187 file
->f_pos
+= read
;
1191 mutex_unlock(&ftrace_regex_lock
);
1197 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1198 size_t cnt
, loff_t
*ppos
)
1200 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1204 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1205 size_t cnt
, loff_t
*ppos
)
1207 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1211 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1213 if (unlikely(ftrace_disabled
))
1216 mutex_lock(&ftrace_regex_lock
);
1218 ftrace_filter_reset(enable
);
1220 ftrace_match(buf
, len
, enable
);
1221 mutex_unlock(&ftrace_regex_lock
);
1225 * ftrace_set_filter - set a function to filter on in ftrace
1226 * @buf - the string that holds the function filter text.
1227 * @len - the length of the string.
1228 * @reset - non zero to reset all filters before applying this filter.
1230 * Filters denote which functions should be enabled when tracing is enabled.
1231 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1233 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1235 ftrace_set_regex(buf
, len
, reset
, 1);
1239 * ftrace_set_notrace - set a function to not trace in ftrace
1240 * @buf - the string that holds the function notrace text.
1241 * @len - the length of the string.
1242 * @reset - non zero to reset all filters before applying this filter.
1244 * Notrace Filters denote which functions should not be enabled when tracing
1245 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1248 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1250 ftrace_set_regex(buf
, len
, reset
, 0);
1254 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1256 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1257 struct ftrace_iterator
*iter
;
1259 mutex_lock(&ftrace_regex_lock
);
1260 if (file
->f_mode
& FMODE_READ
) {
1263 seq_release(inode
, file
);
1265 iter
= file
->private_data
;
1267 if (iter
->buffer_idx
) {
1269 iter
->buffer
[iter
->buffer_idx
] = 0;
1270 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1273 mutex_lock(&ftrace_sysctl_lock
);
1274 mutex_lock(&ftrace_start_lock
);
1275 if (ftrace_start_up
&& ftrace_enabled
)
1276 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1277 mutex_unlock(&ftrace_start_lock
);
1278 mutex_unlock(&ftrace_sysctl_lock
);
1281 mutex_unlock(&ftrace_regex_lock
);
1286 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1288 return ftrace_regex_release(inode
, file
, 1);
1292 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1294 return ftrace_regex_release(inode
, file
, 0);
1297 static struct file_operations ftrace_avail_fops
= {
1298 .open
= ftrace_avail_open
,
1300 .llseek
= seq_lseek
,
1301 .release
= ftrace_avail_release
,
1304 static struct file_operations ftrace_failures_fops
= {
1305 .open
= ftrace_failures_open
,
1307 .llseek
= seq_lseek
,
1308 .release
= ftrace_avail_release
,
1311 static struct file_operations ftrace_filter_fops
= {
1312 .open
= ftrace_filter_open
,
1313 .read
= ftrace_regex_read
,
1314 .write
= ftrace_filter_write
,
1315 .llseek
= ftrace_regex_lseek
,
1316 .release
= ftrace_filter_release
,
1319 static struct file_operations ftrace_notrace_fops
= {
1320 .open
= ftrace_notrace_open
,
1321 .read
= ftrace_regex_read
,
1322 .write
= ftrace_notrace_write
,
1323 .llseek
= ftrace_regex_lseek
,
1324 .release
= ftrace_notrace_release
,
1327 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1329 static DEFINE_MUTEX(graph_lock
);
1331 int ftrace_graph_count
;
1332 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
1335 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1337 unsigned long *array
= m
->private;
1342 if (index
>= ftrace_graph_count
)
1345 return &array
[index
];
1348 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
1352 mutex_lock(&graph_lock
);
1354 p
= g_next(m
, p
, pos
);
1359 static void g_stop(struct seq_file
*m
, void *p
)
1361 mutex_unlock(&graph_lock
);
1364 static int g_show(struct seq_file
*m
, void *v
)
1366 unsigned long *ptr
= v
;
1367 char str
[KSYM_SYMBOL_LEN
];
1372 kallsyms_lookup(*ptr
, NULL
, NULL
, NULL
, str
);
1374 seq_printf(m
, "%s\n", str
);
1379 static struct seq_operations ftrace_graph_seq_ops
= {
1387 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
1391 if (unlikely(ftrace_disabled
))
1394 mutex_lock(&graph_lock
);
1395 if ((file
->f_mode
& FMODE_WRITE
) &&
1396 !(file
->f_flags
& O_APPEND
)) {
1397 ftrace_graph_count
= 0;
1398 memset(ftrace_graph_funcs
, 0, sizeof(ftrace_graph_funcs
));
1401 if (file
->f_mode
& FMODE_READ
) {
1402 ret
= seq_open(file
, &ftrace_graph_seq_ops
);
1404 struct seq_file
*m
= file
->private_data
;
1405 m
->private = ftrace_graph_funcs
;
1408 file
->private_data
= ftrace_graph_funcs
;
1409 mutex_unlock(&graph_lock
);
1415 ftrace_graph_read(struct file
*file
, char __user
*ubuf
,
1416 size_t cnt
, loff_t
*ppos
)
1418 if (file
->f_mode
& FMODE_READ
)
1419 return seq_read(file
, ubuf
, cnt
, ppos
);
1425 ftrace_set_func(unsigned long *array
, int idx
, char *buffer
)
1427 char str
[KSYM_SYMBOL_LEN
];
1428 struct dyn_ftrace
*rec
;
1429 struct ftrace_page
*pg
;
1433 if (ftrace_disabled
)
1436 /* should not be called from interrupt context */
1437 spin_lock(&ftrace_lock
);
1439 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1440 for (i
= 0; i
< pg
->index
; i
++) {
1441 rec
= &pg
->records
[i
];
1443 if (rec
->flags
& (FTRACE_FL_FAILED
| FTRACE_FL_FREE
))
1446 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1447 if (strcmp(str
, buffer
) == 0) {
1449 for (j
= 0; j
< idx
; j
++)
1450 if (array
[j
] == rec
->ip
) {
1455 array
[idx
] = rec
->ip
;
1460 spin_unlock(&ftrace_lock
);
1462 return found
? 0 : -EINVAL
;
1466 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
1467 size_t cnt
, loff_t
*ppos
)
1469 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
1470 unsigned long *array
;
1476 if (!cnt
|| cnt
< 0)
1479 mutex_lock(&graph_lock
);
1481 if (ftrace_graph_count
>= FTRACE_GRAPH_MAX_FUNCS
) {
1486 if (file
->f_mode
& FMODE_READ
) {
1487 struct seq_file
*m
= file
->private_data
;
1490 array
= file
->private_data
;
1492 ret
= get_user(ch
, ubuf
++);
1498 /* skip white space */
1499 while (cnt
&& isspace(ch
)) {
1500 ret
= get_user(ch
, ubuf
++);
1513 while (cnt
&& !isspace(ch
)) {
1514 if (index
< FTRACE_BUFF_MAX
)
1515 buffer
[index
++] = ch
;
1520 ret
= get_user(ch
, ubuf
++);
1528 /* we allow only one at a time */
1529 ret
= ftrace_set_func(array
, ftrace_graph_count
, buffer
);
1533 ftrace_graph_count
++;
1535 file
->f_pos
+= read
;
1539 mutex_unlock(&graph_lock
);
1544 static const struct file_operations ftrace_graph_fops
= {
1545 .open
= ftrace_graph_open
,
1546 .read
= ftrace_graph_read
,
1547 .write
= ftrace_graph_write
,
1549 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1551 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
1553 struct dentry
*entry
;
1555 entry
= debugfs_create_file("available_filter_functions", 0444,
1556 d_tracer
, NULL
, &ftrace_avail_fops
);
1558 pr_warning("Could not create debugfs "
1559 "'available_filter_functions' entry\n");
1561 entry
= debugfs_create_file("failures", 0444,
1562 d_tracer
, NULL
, &ftrace_failures_fops
);
1564 pr_warning("Could not create debugfs 'failures' entry\n");
1566 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1567 NULL
, &ftrace_filter_fops
);
1569 pr_warning("Could not create debugfs "
1570 "'set_ftrace_filter' entry\n");
1572 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1573 NULL
, &ftrace_notrace_fops
);
1575 pr_warning("Could not create debugfs "
1576 "'set_ftrace_notrace' entry\n");
1578 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1579 entry
= debugfs_create_file("set_graph_function", 0444, d_tracer
,
1581 &ftrace_graph_fops
);
1583 pr_warning("Could not create debugfs "
1584 "'set_graph_function' entry\n");
1585 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1590 static int ftrace_convert_nops(struct module
*mod
,
1591 unsigned long *start
,
1596 unsigned long flags
;
1598 mutex_lock(&ftrace_start_lock
);
1601 addr
= ftrace_call_adjust(*p
++);
1603 * Some architecture linkers will pad between
1604 * the different mcount_loc sections of different
1605 * object files to satisfy alignments.
1606 * Skip any NULL pointers.
1610 ftrace_record_ip(addr
);
1613 /* disable interrupts to prevent kstop machine */
1614 local_irq_save(flags
);
1615 ftrace_update_code(mod
);
1616 local_irq_restore(flags
);
1617 mutex_unlock(&ftrace_start_lock
);
1622 void ftrace_init_module(struct module
*mod
,
1623 unsigned long *start
, unsigned long *end
)
1625 if (ftrace_disabled
|| start
== end
)
1627 ftrace_convert_nops(mod
, start
, end
);
1630 extern unsigned long __start_mcount_loc
[];
1631 extern unsigned long __stop_mcount_loc
[];
1633 void __init
ftrace_init(void)
1635 unsigned long count
, addr
, flags
;
1638 /* Keep the ftrace pointer to the stub */
1639 addr
= (unsigned long)ftrace_stub
;
1641 local_irq_save(flags
);
1642 ftrace_dyn_arch_init(&addr
);
1643 local_irq_restore(flags
);
1645 /* ftrace_dyn_arch_init places the return code in addr */
1649 count
= __stop_mcount_loc
- __start_mcount_loc
;
1651 ret
= ftrace_dyn_table_alloc(count
);
1655 last_ftrace_enabled
= ftrace_enabled
= 1;
1657 ret
= ftrace_convert_nops(NULL
,
1663 ftrace_disabled
= 1;
1668 static int __init
ftrace_nodyn_init(void)
1673 device_initcall(ftrace_nodyn_init
);
1675 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
1676 static inline void ftrace_startup_enable(int command
) { }
1677 /* Keep as macros so we do not need to define the commands */
1678 # define ftrace_startup(command) do { } while (0)
1679 # define ftrace_shutdown(command) do { } while (0)
1680 # define ftrace_startup_sysctl() do { } while (0)
1681 # define ftrace_shutdown_sysctl() do { } while (0)
1682 #endif /* CONFIG_DYNAMIC_FTRACE */
1685 ftrace_pid_read(struct file
*file
, char __user
*ubuf
,
1686 size_t cnt
, loff_t
*ppos
)
1691 if (ftrace_pid_trace
== ftrace_swapper_pid
)
1692 r
= sprintf(buf
, "swapper tasks\n");
1693 else if (ftrace_pid_trace
)
1694 r
= sprintf(buf
, "%u\n", pid_nr(ftrace_pid_trace
));
1696 r
= sprintf(buf
, "no pid\n");
1698 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1701 static void clear_ftrace_swapper(void)
1703 struct task_struct
*p
;
1707 for_each_online_cpu(cpu
) {
1709 clear_tsk_trace_trace(p
);
1714 static void set_ftrace_swapper(void)
1716 struct task_struct
*p
;
1720 for_each_online_cpu(cpu
) {
1722 set_tsk_trace_trace(p
);
1727 static void clear_ftrace_pid(struct pid
*pid
)
1729 struct task_struct
*p
;
1731 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
1732 clear_tsk_trace_trace(p
);
1733 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
1737 static void set_ftrace_pid(struct pid
*pid
)
1739 struct task_struct
*p
;
1741 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
1742 set_tsk_trace_trace(p
);
1743 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
1746 static void clear_ftrace_pid_task(struct pid
**pid
)
1748 if (*pid
== ftrace_swapper_pid
)
1749 clear_ftrace_swapper();
1751 clear_ftrace_pid(*pid
);
1756 static void set_ftrace_pid_task(struct pid
*pid
)
1758 if (pid
== ftrace_swapper_pid
)
1759 set_ftrace_swapper();
1761 set_ftrace_pid(pid
);
1765 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
1766 size_t cnt
, loff_t
*ppos
)
1773 if (cnt
>= sizeof(buf
))
1776 if (copy_from_user(&buf
, ubuf
, cnt
))
1781 ret
= strict_strtol(buf
, 10, &val
);
1785 mutex_lock(&ftrace_start_lock
);
1787 /* disable pid tracing */
1788 if (!ftrace_pid_trace
)
1791 clear_ftrace_pid_task(&ftrace_pid_trace
);
1794 /* swapper task is special */
1796 pid
= ftrace_swapper_pid
;
1797 if (pid
== ftrace_pid_trace
)
1800 pid
= find_get_pid(val
);
1802 if (pid
== ftrace_pid_trace
) {
1808 if (ftrace_pid_trace
)
1809 clear_ftrace_pid_task(&ftrace_pid_trace
);
1814 ftrace_pid_trace
= pid
;
1816 set_ftrace_pid_task(ftrace_pid_trace
);
1819 /* update the function call */
1820 ftrace_update_pid_func();
1821 ftrace_startup_enable(0);
1824 mutex_unlock(&ftrace_start_lock
);
1829 static struct file_operations ftrace_pid_fops
= {
1830 .read
= ftrace_pid_read
,
1831 .write
= ftrace_pid_write
,
1834 static __init
int ftrace_init_debugfs(void)
1836 struct dentry
*d_tracer
;
1837 struct dentry
*entry
;
1839 d_tracer
= tracing_init_dentry();
1843 ftrace_init_dyn_debugfs(d_tracer
);
1845 entry
= debugfs_create_file("set_ftrace_pid", 0644, d_tracer
,
1846 NULL
, &ftrace_pid_fops
);
1848 pr_warning("Could not create debugfs "
1849 "'set_ftrace_pid' entry\n");
1853 fs_initcall(ftrace_init_debugfs
);
1856 * ftrace_kill - kill ftrace
1858 * This function should be used by panic code. It stops ftrace
1859 * but in a not so nice way. If you need to simply kill ftrace
1860 * from a non-atomic section, use ftrace_kill.
1862 void ftrace_kill(void)
1864 ftrace_disabled
= 1;
1866 clear_ftrace_function();
1870 * register_ftrace_function - register a function for profiling
1871 * @ops - ops structure that holds the function for profiling.
1873 * Register a function to be called by all functions in the
1876 * Note: @ops->func and all the functions it calls must be labeled
1877 * with "notrace", otherwise it will go into a
1880 int register_ftrace_function(struct ftrace_ops
*ops
)
1884 if (unlikely(ftrace_disabled
))
1887 mutex_lock(&ftrace_sysctl_lock
);
1889 ret
= __register_ftrace_function(ops
);
1892 mutex_unlock(&ftrace_sysctl_lock
);
1897 * unregister_ftrace_function - unresgister a function for profiling.
1898 * @ops - ops structure that holds the function to unregister
1900 * Unregister a function that was added to be called by ftrace profiling.
1902 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1906 mutex_lock(&ftrace_sysctl_lock
);
1907 ret
= __unregister_ftrace_function(ops
);
1909 mutex_unlock(&ftrace_sysctl_lock
);
1915 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1916 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1921 if (unlikely(ftrace_disabled
))
1924 mutex_lock(&ftrace_sysctl_lock
);
1926 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1928 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1931 last_ftrace_enabled
= ftrace_enabled
;
1933 if (ftrace_enabled
) {
1935 ftrace_startup_sysctl();
1937 /* we are starting ftrace again */
1938 if (ftrace_list
!= &ftrace_list_end
) {
1939 if (ftrace_list
->next
== &ftrace_list_end
)
1940 ftrace_trace_function
= ftrace_list
->func
;
1942 ftrace_trace_function
= ftrace_list_func
;
1946 /* stopping ftrace calls (just send to ftrace_stub) */
1947 ftrace_trace_function
= ftrace_stub
;
1949 ftrace_shutdown_sysctl();
1953 mutex_unlock(&ftrace_sysctl_lock
);
1957 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1959 static atomic_t ftrace_graph_active
;
1961 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
1966 /* The callbacks that hook a function */
1967 trace_func_graph_ret_t ftrace_graph_return
=
1968 (trace_func_graph_ret_t
)ftrace_stub
;
1969 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
1971 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1972 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
1976 unsigned long flags
;
1977 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
1978 struct task_struct
*g
, *t
;
1980 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
1981 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
1982 * sizeof(struct ftrace_ret_stack
),
1984 if (!ret_stack_list
[i
]) {
1992 read_lock_irqsave(&tasklist_lock
, flags
);
1993 do_each_thread(g
, t
) {
1999 if (t
->ret_stack
== NULL
) {
2000 t
->curr_ret_stack
= -1;
2001 /* Make sure IRQs see the -1 first: */
2003 t
->ret_stack
= ret_stack_list
[start
++];
2004 atomic_set(&t
->tracing_graph_pause
, 0);
2005 atomic_set(&t
->trace_overrun
, 0);
2007 } while_each_thread(g
, t
);
2010 read_unlock_irqrestore(&tasklist_lock
, flags
);
2012 for (i
= start
; i
< end
; i
++)
2013 kfree(ret_stack_list
[i
]);
2017 /* Allocate a return stack for each task */
2018 static int start_graph_tracing(void)
2020 struct ftrace_ret_stack
**ret_stack_list
;
2023 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
2024 sizeof(struct ftrace_ret_stack
*),
2027 if (!ret_stack_list
)
2031 ret
= alloc_retstack_tasklist(ret_stack_list
);
2032 } while (ret
== -EAGAIN
);
2034 kfree(ret_stack_list
);
2038 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
2039 trace_func_graph_ent_t entryfunc
)
2043 mutex_lock(&ftrace_sysctl_lock
);
2045 atomic_inc(&ftrace_graph_active
);
2046 ret
= start_graph_tracing();
2048 atomic_dec(&ftrace_graph_active
);
2052 ftrace_graph_return
= retfunc
;
2053 ftrace_graph_entry
= entryfunc
;
2055 ftrace_startup(FTRACE_START_FUNC_RET
);
2058 mutex_unlock(&ftrace_sysctl_lock
);
2062 void unregister_ftrace_graph(void)
2064 mutex_lock(&ftrace_sysctl_lock
);
2066 atomic_dec(&ftrace_graph_active
);
2067 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
2068 ftrace_graph_entry
= ftrace_graph_entry_stub
;
2069 ftrace_shutdown(FTRACE_STOP_FUNC_RET
);
2071 mutex_unlock(&ftrace_sysctl_lock
);
2074 /* Allocate a return stack for newly created task */
2075 void ftrace_graph_init_task(struct task_struct
*t
)
2077 if (atomic_read(&ftrace_graph_active
)) {
2078 t
->ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
2079 * sizeof(struct ftrace_ret_stack
),
2083 t
->curr_ret_stack
= -1;
2084 atomic_set(&t
->tracing_graph_pause
, 0);
2085 atomic_set(&t
->trace_overrun
, 0);
2087 t
->ret_stack
= NULL
;
2090 void ftrace_graph_exit_task(struct task_struct
*t
)
2092 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
2094 t
->ret_stack
= NULL
;
2095 /* NULL must become visible to IRQs before we free it: */
2101 void ftrace_graph_stop(void)