2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/ftrace.h>
25 #include <linux/sysctl.h>
26 #include <linux/ctype.h>
27 #include <linux/hash.h>
28 #include <linux/list.h>
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly
;
34 static int last_ftrace_enabled
;
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
40 static int ftrace_disabled __read_mostly
;
42 static DEFINE_SPINLOCK(ftrace_lock
);
43 static DEFINE_MUTEX(ftrace_sysctl_lock
);
45 static struct ftrace_ops ftrace_list_end __read_mostly
=
50 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
51 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
53 void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
55 struct ftrace_ops
*op
= ftrace_list
;
57 /* in case someone actually ports this to alpha! */
58 read_barrier_depends();
60 while (op
!= &ftrace_list_end
) {
62 read_barrier_depends();
63 op
->func(ip
, parent_ip
);
69 * clear_ftrace_function - reset the ftrace function
71 * This NULLs the ftrace function and in essence stops
72 * tracing. There may be lag
74 void clear_ftrace_function(void)
76 ftrace_trace_function
= ftrace_stub
;
79 static int __register_ftrace_function(struct ftrace_ops
*ops
)
81 /* Should never be called by interrupts */
82 spin_lock(&ftrace_lock
);
84 ops
->next
= ftrace_list
;
86 * We are entering ops into the ftrace_list but another
87 * CPU might be walking that list. We need to make sure
88 * the ops->next pointer is valid before another CPU sees
89 * the ops pointer included into the ftrace_list.
96 * For one func, simply call it directly.
97 * For more than one func, call the chain.
99 if (ops
->next
== &ftrace_list_end
)
100 ftrace_trace_function
= ops
->func
;
102 ftrace_trace_function
= ftrace_list_func
;
105 spin_unlock(&ftrace_lock
);
110 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
112 struct ftrace_ops
**p
;
115 spin_lock(&ftrace_lock
);
118 * If we are removing the last function, then simply point
119 * to the ftrace_stub.
121 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
122 ftrace_trace_function
= ftrace_stub
;
123 ftrace_list
= &ftrace_list_end
;
127 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
138 if (ftrace_enabled
) {
139 /* If we only have one func left, then call that directly */
140 if (ftrace_list
== &ftrace_list_end
||
141 ftrace_list
->next
== &ftrace_list_end
)
142 ftrace_trace_function
= ftrace_list
->func
;
146 spin_unlock(&ftrace_lock
);
151 #ifdef CONFIG_DYNAMIC_FTRACE
153 static struct task_struct
*ftraced_task
;
154 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters
);
155 static unsigned long ftraced_iteration_counter
;
158 FTRACE_ENABLE_CALLS
= (1 << 0),
159 FTRACE_DISABLE_CALLS
= (1 << 1),
160 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
161 FTRACE_ENABLE_MCOUNT
= (1 << 3),
162 FTRACE_DISABLE_MCOUNT
= (1 << 4),
165 static int ftrace_filtered
;
167 static struct hlist_head ftrace_hash
[FTRACE_HASHSIZE
];
169 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu
);
171 static DEFINE_SPINLOCK(ftrace_shutdown_lock
);
172 static DEFINE_MUTEX(ftraced_lock
);
173 static DEFINE_MUTEX(ftrace_filter_lock
);
176 struct ftrace_page
*next
;
178 struct dyn_ftrace records
[];
181 #define ENTRIES_PER_PAGE \
182 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
184 /* estimate from running different kernels */
185 #define NR_TO_INIT 10000
187 static struct ftrace_page
*ftrace_pages_start
;
188 static struct ftrace_page
*ftrace_pages
;
190 static int ftraced_trigger
;
191 static int ftraced_suspend
;
193 static int ftrace_record_suspend
;
195 static struct dyn_ftrace
*ftrace_free_records
;
198 ftrace_ip_in_hash(unsigned long ip
, unsigned long key
)
200 struct dyn_ftrace
*p
;
201 struct hlist_node
*t
;
204 hlist_for_each_entry(p
, t
, &ftrace_hash
[key
], node
) {
215 ftrace_add_hash(struct dyn_ftrace
*node
, unsigned long key
)
217 hlist_add_head(&node
->node
, &ftrace_hash
[key
]);
220 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
222 /* no locking, only called from kstop_machine */
224 rec
->ip
= (unsigned long)ftrace_free_records
;
225 ftrace_free_records
= rec
;
226 rec
->flags
|= FTRACE_FL_FREE
;
229 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
231 struct dyn_ftrace
*rec
;
233 /* First check for freed records */
234 if (ftrace_free_records
) {
235 rec
= ftrace_free_records
;
237 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
239 ftrace_free_records
= NULL
;
245 ftrace_free_records
= (void *)rec
->ip
;
246 memset(rec
, 0, sizeof(*rec
));
250 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
251 if (!ftrace_pages
->next
)
253 ftrace_pages
= ftrace_pages
->next
;
256 return &ftrace_pages
->records
[ftrace_pages
->index
++];
260 ftrace_record_ip(unsigned long ip
)
262 struct dyn_ftrace
*node
;
269 if (!ftrace_enabled
|| ftrace_disabled
)
272 resched
= need_resched();
273 preempt_disable_notrace();
276 * We simply need to protect against recursion.
277 * Use the the raw version of smp_processor_id and not
278 * __get_cpu_var which can call debug hooks that can
279 * cause a recursive crash here.
281 cpu
= raw_smp_processor_id();
282 per_cpu(ftrace_shutdown_disable_cpu
, cpu
)++;
283 if (per_cpu(ftrace_shutdown_disable_cpu
, cpu
) != 1)
286 if (unlikely(ftrace_record_suspend
))
289 key
= hash_long(ip
, FTRACE_HASHBITS
);
291 WARN_ON_ONCE(key
>= FTRACE_HASHSIZE
);
293 if (ftrace_ip_in_hash(ip
, key
))
296 atomic
= irqs_disabled();
298 spin_lock_irqsave(&ftrace_shutdown_lock
, flags
);
300 /* This ip may have hit the hash before the lock */
301 if (ftrace_ip_in_hash(ip
, key
))
305 * There's a slight race that the ftraced will update the
306 * hash and reset here. If it is already converted, skip it.
308 if (ftrace_ip_converted(ip
))
311 node
= ftrace_alloc_dyn_node(ip
);
317 ftrace_add_hash(node
, key
);
322 spin_unlock_irqrestore(&ftrace_shutdown_lock
, flags
);
324 per_cpu(ftrace_shutdown_disable_cpu
, cpu
)--;
326 /* prevent recursion with scheduler */
328 preempt_enable_no_resched_notrace();
330 preempt_enable_notrace();
333 #define FTRACE_ADDR ((long)(ftrace_caller))
334 #define MCOUNT_ADDR ((long)(mcount))
337 __ftrace_replace_code(struct dyn_ftrace
*rec
,
338 unsigned char *old
, unsigned char *new, int enable
)
345 if (ftrace_filtered
&& enable
) {
348 * If filtering is on:
350 * If this record is set to be filtered and
351 * is enabled then do nothing.
353 * If this record is set to be filtered and
354 * it is not enabled, enable it.
356 * If this record is not set to be filtered
357 * and it is not enabled do nothing.
359 * If this record is not set to be filtered and
360 * it is enabled, disable it.
362 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
364 if ((fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
)) ||
369 * If it is enabled disable it,
370 * otherwise enable it!
372 if (fl
== FTRACE_FL_ENABLED
) {
373 /* swap new and old */
375 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
376 rec
->flags
&= ~FTRACE_FL_ENABLED
;
378 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
379 rec
->flags
|= FTRACE_FL_ENABLED
;
384 new = ftrace_call_replace(ip
, FTRACE_ADDR
);
386 old
= ftrace_call_replace(ip
, FTRACE_ADDR
);
389 if (rec
->flags
& FTRACE_FL_ENABLED
)
391 rec
->flags
|= FTRACE_FL_ENABLED
;
393 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
395 rec
->flags
&= ~FTRACE_FL_ENABLED
;
399 failed
= ftrace_modify_code(ip
, old
, new);
402 /* It is possible that the function hasn't been converted yet */
403 key
= hash_long(ip
, FTRACE_HASHBITS
);
404 if (!ftrace_ip_in_hash(ip
, key
)) {
405 rec
->flags
|= FTRACE_FL_FAILED
;
406 ftrace_free_rec(rec
);
412 static void ftrace_replace_code(int enable
)
414 unsigned char *new = NULL
, *old
= NULL
;
415 struct dyn_ftrace
*rec
;
416 struct ftrace_page
*pg
;
420 old
= ftrace_nop_replace();
422 new = ftrace_nop_replace();
424 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
425 for (i
= 0; i
< pg
->index
; i
++) {
426 rec
= &pg
->records
[i
];
428 /* don't modify code that has already faulted */
429 if (rec
->flags
& FTRACE_FL_FAILED
)
432 __ftrace_replace_code(rec
, old
, new, enable
);
437 static void ftrace_shutdown_replenish(void)
439 if (ftrace_pages
->next
)
442 /* allocate another page */
443 ftrace_pages
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
447 ftrace_code_disable(struct dyn_ftrace
*rec
)
450 unsigned char *nop
, *call
;
455 nop
= ftrace_nop_replace();
456 call
= ftrace_call_replace(ip
, MCOUNT_ADDR
);
458 failed
= ftrace_modify_code(ip
, call
, nop
);
460 rec
->flags
|= FTRACE_FL_FAILED
;
461 ftrace_free_rec(rec
);
465 static int __ftrace_modify_code(void *data
)
470 if (*command
& FTRACE_ENABLE_CALLS
)
471 ftrace_replace_code(1);
472 else if (*command
& FTRACE_DISABLE_CALLS
)
473 ftrace_replace_code(0);
475 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
476 ftrace_update_ftrace_func(ftrace_trace_function
);
478 if (*command
& FTRACE_ENABLE_MCOUNT
) {
479 addr
= (unsigned long)ftrace_record_ip
;
480 ftrace_mcount_set(&addr
);
481 } else if (*command
& FTRACE_DISABLE_MCOUNT
) {
482 addr
= (unsigned long)ftrace_stub
;
483 ftrace_mcount_set(&addr
);
489 static void ftrace_run_update_code(int command
)
491 stop_machine_run(__ftrace_modify_code
, &command
, NR_CPUS
);
494 static ftrace_func_t saved_ftrace_func
;
496 static void ftrace_startup(void)
500 if (unlikely(ftrace_disabled
))
503 mutex_lock(&ftraced_lock
);
505 if (ftraced_suspend
== 1)
506 command
|= FTRACE_ENABLE_CALLS
;
508 if (saved_ftrace_func
!= ftrace_trace_function
) {
509 saved_ftrace_func
= ftrace_trace_function
;
510 command
|= FTRACE_UPDATE_TRACE_FUNC
;
513 if (!command
|| !ftrace_enabled
)
516 ftrace_run_update_code(command
);
518 mutex_unlock(&ftraced_lock
);
521 static void ftrace_shutdown(void)
525 if (unlikely(ftrace_disabled
))
528 mutex_lock(&ftraced_lock
);
530 if (!ftraced_suspend
)
531 command
|= FTRACE_DISABLE_CALLS
;
533 if (saved_ftrace_func
!= ftrace_trace_function
) {
534 saved_ftrace_func
= ftrace_trace_function
;
535 command
|= FTRACE_UPDATE_TRACE_FUNC
;
538 if (!command
|| !ftrace_enabled
)
541 ftrace_run_update_code(command
);
543 mutex_unlock(&ftraced_lock
);
546 static void ftrace_startup_sysctl(void)
548 int command
= FTRACE_ENABLE_MCOUNT
;
550 if (unlikely(ftrace_disabled
))
553 mutex_lock(&ftraced_lock
);
554 /* Force update next time */
555 saved_ftrace_func
= NULL
;
556 /* ftraced_suspend is true if we want ftrace running */
558 command
|= FTRACE_ENABLE_CALLS
;
560 ftrace_run_update_code(command
);
561 mutex_unlock(&ftraced_lock
);
564 static void ftrace_shutdown_sysctl(void)
566 int command
= FTRACE_DISABLE_MCOUNT
;
568 if (unlikely(ftrace_disabled
))
571 mutex_lock(&ftraced_lock
);
572 /* ftraced_suspend is true if ftrace is running */
574 command
|= FTRACE_DISABLE_CALLS
;
576 ftrace_run_update_code(command
);
577 mutex_unlock(&ftraced_lock
);
580 static cycle_t ftrace_update_time
;
581 static unsigned long ftrace_update_cnt
;
582 unsigned long ftrace_update_tot_cnt
;
584 static int __ftrace_update_code(void *ignore
)
586 struct dyn_ftrace
*p
;
587 struct hlist_head head
;
588 struct hlist_node
*t
;
589 int save_ftrace_enabled
;
593 /* Don't be recording funcs now */
594 save_ftrace_enabled
= ftrace_enabled
;
597 start
= ftrace_now(raw_smp_processor_id());
598 ftrace_update_cnt
= 0;
600 /* No locks needed, the machine is stopped! */
601 for (i
= 0; i
< FTRACE_HASHSIZE
; i
++) {
602 if (hlist_empty(&ftrace_hash
[i
]))
605 head
= ftrace_hash
[i
];
606 INIT_HLIST_HEAD(&ftrace_hash
[i
]);
608 /* all CPUS are stopped, we are safe to modify code */
609 hlist_for_each_entry(p
, t
, &head
, node
) {
610 ftrace_code_disable(p
);
616 stop
= ftrace_now(raw_smp_processor_id());
617 ftrace_update_time
= stop
- start
;
618 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
620 ftrace_enabled
= save_ftrace_enabled
;
625 static void ftrace_update_code(void)
627 if (unlikely(ftrace_disabled
))
630 stop_machine_run(__ftrace_update_code
, NULL
, NR_CPUS
);
633 static int ftraced(void *ignore
)
637 while (!kthread_should_stop()) {
639 set_current_state(TASK_INTERRUPTIBLE
);
641 /* check once a second */
642 schedule_timeout(HZ
);
644 if (unlikely(ftrace_disabled
))
647 mutex_lock(&ftrace_sysctl_lock
);
648 mutex_lock(&ftraced_lock
);
649 if (ftrace_enabled
&& ftraced_trigger
&& !ftraced_suspend
) {
650 ftrace_record_suspend
++;
651 ftrace_update_code();
652 usecs
= nsecs_to_usecs(ftrace_update_time
);
653 if (ftrace_update_tot_cnt
> 100000) {
654 ftrace_update_tot_cnt
= 0;
655 pr_info("hm, dftrace overflow: %lu change%s"
656 " (%lu total) in %lu usec%s\n",
658 ftrace_update_cnt
!= 1 ? "s" : "",
659 ftrace_update_tot_cnt
,
660 usecs
, usecs
!= 1 ? "s" : "");
665 ftrace_record_suspend
--;
667 ftraced_iteration_counter
++;
668 mutex_unlock(&ftraced_lock
);
669 mutex_unlock(&ftrace_sysctl_lock
);
671 wake_up_interruptible(&ftraced_waiters
);
673 ftrace_shutdown_replenish();
675 __set_current_state(TASK_RUNNING
);
679 static int __init
ftrace_dyn_table_alloc(void)
681 struct ftrace_page
*pg
;
685 /* allocate a few pages */
686 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
687 if (!ftrace_pages_start
)
691 * Allocate a few more pages.
693 * TODO: have some parser search vmlinux before
694 * final linking to find all calls to ftrace.
696 * a) know how many pages to allocate.
698 * b) set up the table then.
700 * The dynamic code is still necessary for
704 pg
= ftrace_pages
= ftrace_pages_start
;
706 cnt
= NR_TO_INIT
/ ENTRIES_PER_PAGE
;
708 for (i
= 0; i
< cnt
; i
++) {
709 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
711 /* If we fail, we'll try later anyway */
722 FTRACE_ITER_FILTER
= (1 << 0),
723 FTRACE_ITER_CONT
= (1 << 1),
726 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
728 struct ftrace_iterator
{
730 struct ftrace_page
*pg
;
733 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
739 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
741 struct ftrace_iterator
*iter
= m
->private;
742 struct dyn_ftrace
*rec
= NULL
;
747 if (iter
->idx
>= iter
->pg
->index
) {
748 if (iter
->pg
->next
) {
749 iter
->pg
= iter
->pg
->next
;
754 rec
= &iter
->pg
->records
[iter
->idx
++];
755 if ((rec
->flags
& FTRACE_FL_FAILED
) ||
756 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
757 !(rec
->flags
& FTRACE_FL_FILTER
))) {
768 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
770 struct ftrace_iterator
*iter
= m
->private;
774 if (*pos
!= iter
->pos
) {
775 for (p
= t_next(m
, p
, &l
); p
&& l
< *pos
; p
= t_next(m
, p
, &l
))
779 p
= t_next(m
, p
, &l
);
785 static void t_stop(struct seq_file
*m
, void *p
)
789 static int t_show(struct seq_file
*m
, void *v
)
791 struct dyn_ftrace
*rec
= v
;
792 char str
[KSYM_SYMBOL_LEN
];
797 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
799 seq_printf(m
, "%s\n", str
);
804 static struct seq_operations show_ftrace_seq_ops
= {
812 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
814 struct ftrace_iterator
*iter
;
817 if (unlikely(ftrace_disabled
))
820 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
824 iter
->pg
= ftrace_pages_start
;
827 ret
= seq_open(file
, &show_ftrace_seq_ops
);
829 struct seq_file
*m
= file
->private_data
;
839 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
841 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
842 struct ftrace_iterator
*iter
= m
->private;
844 seq_release(inode
, file
);
850 static void ftrace_filter_reset(void)
852 struct ftrace_page
*pg
;
853 struct dyn_ftrace
*rec
;
856 /* keep kstop machine from running */
859 pg
= ftrace_pages_start
;
861 for (i
= 0; i
< pg
->index
; i
++) {
862 rec
= &pg
->records
[i
];
863 if (rec
->flags
& FTRACE_FL_FAILED
)
865 rec
->flags
&= ~FTRACE_FL_FILTER
;
873 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
875 struct ftrace_iterator
*iter
;
878 if (unlikely(ftrace_disabled
))
881 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
885 mutex_lock(&ftrace_filter_lock
);
886 if ((file
->f_mode
& FMODE_WRITE
) &&
887 !(file
->f_flags
& O_APPEND
))
888 ftrace_filter_reset();
890 if (file
->f_mode
& FMODE_READ
) {
891 iter
->pg
= ftrace_pages_start
;
893 iter
->flags
= FTRACE_ITER_FILTER
;
895 ret
= seq_open(file
, &show_ftrace_seq_ops
);
897 struct seq_file
*m
= file
->private_data
;
902 file
->private_data
= iter
;
903 mutex_unlock(&ftrace_filter_lock
);
909 ftrace_filter_read(struct file
*file
, char __user
*ubuf
,
910 size_t cnt
, loff_t
*ppos
)
912 if (file
->f_mode
& FMODE_READ
)
913 return seq_read(file
, ubuf
, cnt
, ppos
);
919 ftrace_filter_lseek(struct file
*file
, loff_t offset
, int origin
)
923 if (file
->f_mode
& FMODE_READ
)
924 ret
= seq_lseek(file
, offset
, origin
);
926 file
->f_pos
= ret
= 1;
939 ftrace_match(unsigned char *buff
, int len
)
941 char str
[KSYM_SYMBOL_LEN
];
943 struct ftrace_page
*pg
;
944 struct dyn_ftrace
*rec
;
945 int type
= MATCH_FULL
;
946 unsigned i
, match
= 0, search_len
= 0;
948 for (i
= 0; i
< len
; i
++) {
949 if (buff
[i
] == '*') {
951 search
= buff
+ i
+ 1;
952 type
= MATCH_END_ONLY
;
953 search_len
= len
- (i
+ 1);
955 if (type
== MATCH_END_ONLY
) {
956 type
= MATCH_MIDDLE_ONLY
;
959 type
= MATCH_FRONT_ONLY
;
967 /* keep kstop machine from running */
970 pg
= ftrace_pages_start
;
972 for (i
= 0; i
< pg
->index
; i
++) {
976 rec
= &pg
->records
[i
];
977 if (rec
->flags
& FTRACE_FL_FAILED
)
979 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
982 if (strcmp(str
, buff
) == 0)
985 case MATCH_FRONT_ONLY
:
986 if (memcmp(str
, buff
, match
) == 0)
989 case MATCH_MIDDLE_ONLY
:
990 if (strstr(str
, search
))
994 ptr
= strstr(str
, search
);
995 if (ptr
&& (ptr
[search_len
] == 0))
1000 rec
->flags
|= FTRACE_FL_FILTER
;
1008 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1009 size_t cnt
, loff_t
*ppos
)
1011 struct ftrace_iterator
*iter
;
1016 if (!cnt
|| cnt
< 0)
1019 mutex_lock(&ftrace_filter_lock
);
1021 if (file
->f_mode
& FMODE_READ
) {
1022 struct seq_file
*m
= file
->private_data
;
1025 iter
= file
->private_data
;
1028 iter
->flags
&= ~FTRACE_ITER_CONT
;
1029 iter
->buffer_idx
= 0;
1032 ret
= get_user(ch
, ubuf
++);
1038 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1039 /* skip white space */
1040 while (cnt
&& isspace(ch
)) {
1041 ret
= get_user(ch
, ubuf
++);
1050 file
->f_pos
+= read
;
1055 iter
->buffer_idx
= 0;
1058 while (cnt
&& !isspace(ch
)) {
1059 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1060 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1065 ret
= get_user(ch
, ubuf
++);
1074 iter
->buffer
[iter
->buffer_idx
] = 0;
1075 ftrace_match(iter
->buffer
, iter
->buffer_idx
);
1076 iter
->buffer_idx
= 0;
1078 iter
->flags
|= FTRACE_ITER_CONT
;
1081 file
->f_pos
+= read
;
1085 mutex_unlock(&ftrace_filter_lock
);
1091 * ftrace_set_filter - set a function to filter on in ftrace
1092 * @buf - the string that holds the function filter text.
1093 * @len - the length of the string.
1094 * @reset - non zero to reset all filters before applying this filter.
1096 * Filters denote which functions should be enabled when tracing is enabled.
1097 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1099 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1101 if (unlikely(ftrace_disabled
))
1104 mutex_lock(&ftrace_filter_lock
);
1106 ftrace_filter_reset();
1108 ftrace_match(buf
, len
);
1109 mutex_unlock(&ftrace_filter_lock
);
1113 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1115 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1116 struct ftrace_iterator
*iter
;
1118 mutex_lock(&ftrace_filter_lock
);
1119 if (file
->f_mode
& FMODE_READ
) {
1122 seq_release(inode
, file
);
1124 iter
= file
->private_data
;
1126 if (iter
->buffer_idx
) {
1128 iter
->buffer
[iter
->buffer_idx
] = 0;
1129 ftrace_match(iter
->buffer
, iter
->buffer_idx
);
1132 mutex_lock(&ftrace_sysctl_lock
);
1133 mutex_lock(&ftraced_lock
);
1134 if (iter
->filtered
&& ftraced_suspend
&& ftrace_enabled
)
1135 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1136 mutex_unlock(&ftraced_lock
);
1137 mutex_unlock(&ftrace_sysctl_lock
);
1140 mutex_unlock(&ftrace_filter_lock
);
1144 static struct file_operations ftrace_avail_fops
= {
1145 .open
= ftrace_avail_open
,
1147 .llseek
= seq_lseek
,
1148 .release
= ftrace_avail_release
,
1151 static struct file_operations ftrace_filter_fops
= {
1152 .open
= ftrace_filter_open
,
1153 .read
= ftrace_filter_read
,
1154 .write
= ftrace_filter_write
,
1155 .llseek
= ftrace_filter_lseek
,
1156 .release
= ftrace_filter_release
,
1160 * ftrace_force_update - force an update to all recording ftrace functions
1162 * The ftrace dynamic update daemon only wakes up once a second.
1163 * There may be cases where an update needs to be done immediately
1164 * for tests or internal kernel tracing to begin. This function
1165 * wakes the daemon to do an update and will not return until the
1166 * update is complete.
1168 int ftrace_force_update(void)
1170 unsigned long last_counter
;
1171 DECLARE_WAITQUEUE(wait
, current
);
1174 if (unlikely(ftrace_disabled
))
1177 mutex_lock(&ftraced_lock
);
1178 last_counter
= ftraced_iteration_counter
;
1180 set_current_state(TASK_INTERRUPTIBLE
);
1181 add_wait_queue(&ftraced_waiters
, &wait
);
1183 if (unlikely(!ftraced_task
)) {
1189 mutex_unlock(&ftraced_lock
);
1190 wake_up_process(ftraced_task
);
1192 mutex_lock(&ftraced_lock
);
1193 if (signal_pending(current
)) {
1197 set_current_state(TASK_INTERRUPTIBLE
);
1198 } while (last_counter
== ftraced_iteration_counter
);
1201 mutex_unlock(&ftraced_lock
);
1202 remove_wait_queue(&ftraced_waiters
, &wait
);
1203 set_current_state(TASK_RUNNING
);
1208 static void ftrace_force_shutdown(void)
1210 struct task_struct
*task
;
1211 int command
= FTRACE_DISABLE_CALLS
| FTRACE_UPDATE_TRACE_FUNC
;
1213 mutex_lock(&ftraced_lock
);
1214 task
= ftraced_task
;
1215 ftraced_task
= NULL
;
1216 ftraced_suspend
= -1;
1217 ftrace_run_update_code(command
);
1218 mutex_unlock(&ftraced_lock
);
1224 static __init
int ftrace_init_debugfs(void)
1226 struct dentry
*d_tracer
;
1227 struct dentry
*entry
;
1229 d_tracer
= tracing_init_dentry();
1231 entry
= debugfs_create_file("available_filter_functions", 0444,
1232 d_tracer
, NULL
, &ftrace_avail_fops
);
1234 pr_warning("Could not create debugfs "
1235 "'available_filter_functions' entry\n");
1237 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1238 NULL
, &ftrace_filter_fops
);
1240 pr_warning("Could not create debugfs "
1241 "'set_ftrace_filter' entry\n");
1245 fs_initcall(ftrace_init_debugfs
);
1247 static int __init
ftrace_dynamic_init(void)
1249 struct task_struct
*p
;
1253 addr
= (unsigned long)ftrace_record_ip
;
1255 stop_machine_run(ftrace_dyn_arch_init
, &addr
, NR_CPUS
);
1257 /* ftrace_dyn_arch_init places the return code in addr */
1263 ret
= ftrace_dyn_table_alloc();
1267 p
= kthread_run(ftraced
, NULL
, "ftraced");
1273 last_ftrace_enabled
= ftrace_enabled
= 1;
1279 ftrace_disabled
= 1;
1283 core_initcall(ftrace_dynamic_init
);
1285 # define ftrace_startup() do { } while (0)
1286 # define ftrace_shutdown() do { } while (0)
1287 # define ftrace_startup_sysctl() do { } while (0)
1288 # define ftrace_shutdown_sysctl() do { } while (0)
1289 # define ftrace_force_shutdown() do { } while (0)
1290 #endif /* CONFIG_DYNAMIC_FTRACE */
1293 * ftrace_kill - totally shutdown ftrace
1295 * This is a safety measure. If something was detected that seems
1296 * wrong, calling this function will keep ftrace from doing
1297 * any more modifications, and updates.
1298 * used when something went wrong.
1300 void ftrace_kill(void)
1302 mutex_lock(&ftrace_sysctl_lock
);
1303 ftrace_disabled
= 1;
1306 clear_ftrace_function();
1307 mutex_unlock(&ftrace_sysctl_lock
);
1309 /* Try to totally disable ftrace */
1310 ftrace_force_shutdown();
1314 * register_ftrace_function - register a function for profiling
1315 * @ops - ops structure that holds the function for profiling.
1317 * Register a function to be called by all functions in the
1320 * Note: @ops->func and all the functions it calls must be labeled
1321 * with "notrace", otherwise it will go into a
1324 int register_ftrace_function(struct ftrace_ops
*ops
)
1328 if (unlikely(ftrace_disabled
))
1331 mutex_lock(&ftrace_sysctl_lock
);
1332 ret
= __register_ftrace_function(ops
);
1334 mutex_unlock(&ftrace_sysctl_lock
);
1340 * unregister_ftrace_function - unresgister a function for profiling.
1341 * @ops - ops structure that holds the function to unregister
1343 * Unregister a function that was added to be called by ftrace profiling.
1345 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1349 mutex_lock(&ftrace_sysctl_lock
);
1350 ret
= __unregister_ftrace_function(ops
);
1352 mutex_unlock(&ftrace_sysctl_lock
);
1358 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1359 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1364 if (unlikely(ftrace_disabled
))
1367 mutex_lock(&ftrace_sysctl_lock
);
1369 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1371 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1374 last_ftrace_enabled
= ftrace_enabled
;
1376 if (ftrace_enabled
) {
1378 ftrace_startup_sysctl();
1380 /* we are starting ftrace again */
1381 if (ftrace_list
!= &ftrace_list_end
) {
1382 if (ftrace_list
->next
== &ftrace_list_end
)
1383 ftrace_trace_function
= ftrace_list
->func
;
1385 ftrace_trace_function
= ftrace_list_func
;
1389 /* stopping ftrace calls (just send to ftrace_stub) */
1390 ftrace_trace_function
= ftrace_stub
;
1392 ftrace_shutdown_sysctl();
1396 mutex_unlock(&ftrace_sysctl_lock
);