ftrace: fix dyn ftrace filter
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
f22f9a89 24#include <linux/kprobes.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5072c59f 27#include <linux/ctype.h>
3d083395
SR
28#include <linux/list.h>
29
395a59d0
AS
30#include <asm/ftrace.h>
31
3d083395 32#include "trace.h"
16444a8a 33
6912896e
SR
34#define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40#define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
4eebcc81
SR
46/* ftrace_enabled is a method to turn ftrace on or off */
47int ftrace_enabled __read_mostly;
d61f82d0 48static int last_ftrace_enabled;
b0fc494f 49
60a7ecf4
SR
50/* Quick disabling of function tracer. */
51int function_trace_stop;
52
4eebcc81
SR
53/*
54 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled.
56 */
57static int ftrace_disabled __read_mostly;
58
3d083395 59static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
60static DEFINE_MUTEX(ftrace_sysctl_lock);
61
16444a8a
ACM
62static struct ftrace_ops ftrace_list_end __read_mostly =
63{
64 .func = ftrace_stub,
65};
66
67static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
68ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 69ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
16444a8a 70
f2252935 71static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
72{
73 struct ftrace_ops *op = ftrace_list;
74
75 /* in case someone actually ports this to alpha! */
76 read_barrier_depends();
77
78 while (op != &ftrace_list_end) {
79 /* silly alpha */
80 read_barrier_depends();
81 op->func(ip, parent_ip);
82 op = op->next;
83 };
84}
85
86/**
3d083395 87 * clear_ftrace_function - reset the ftrace function
16444a8a 88 *
3d083395
SR
89 * This NULLs the ftrace function and in essence stops
90 * tracing. There may be lag
16444a8a 91 */
3d083395 92void clear_ftrace_function(void)
16444a8a 93{
3d083395 94 ftrace_trace_function = ftrace_stub;
60a7ecf4 95 __ftrace_trace_function = ftrace_stub;
3d083395
SR
96}
97
60a7ecf4
SR
98#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
99/*
100 * For those archs that do not test ftrace_trace_stop in their
101 * mcount call site, we need to do it from C.
102 */
103static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
104{
105 if (function_trace_stop)
106 return;
107
108 __ftrace_trace_function(ip, parent_ip);
109}
110#endif
111
e309b41d 112static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 113{
99ecdc43 114 /* should not be called from interrupt context */
3d083395 115 spin_lock(&ftrace_lock);
16444a8a 116
16444a8a
ACM
117 ops->next = ftrace_list;
118 /*
119 * We are entering ops into the ftrace_list but another
120 * CPU might be walking that list. We need to make sure
121 * the ops->next pointer is valid before another CPU sees
122 * the ops pointer included into the ftrace_list.
123 */
124 smp_wmb();
125 ftrace_list = ops;
3d083395 126
b0fc494f
SR
127 if (ftrace_enabled) {
128 /*
129 * For one func, simply call it directly.
130 * For more than one func, call the chain.
131 */
60a7ecf4 132#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
b0fc494f
SR
133 if (ops->next == &ftrace_list_end)
134 ftrace_trace_function = ops->func;
135 else
136 ftrace_trace_function = ftrace_list_func;
60a7ecf4
SR
137#else
138 if (ops->next == &ftrace_list_end)
139 __ftrace_trace_function = ops->func;
140 else
141 __ftrace_trace_function = ftrace_list_func;
142 ftrace_trace_function = ftrace_test_stop_func;
143#endif
b0fc494f 144 }
3d083395
SR
145
146 spin_unlock(&ftrace_lock);
16444a8a
ACM
147
148 return 0;
149}
150
e309b41d 151static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 152{
16444a8a
ACM
153 struct ftrace_ops **p;
154 int ret = 0;
155
99ecdc43 156 /* should not be called from interrupt context */
3d083395 157 spin_lock(&ftrace_lock);
16444a8a
ACM
158
159 /*
3d083395
SR
160 * If we are removing the last function, then simply point
161 * to the ftrace_stub.
16444a8a
ACM
162 */
163 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
164 ftrace_trace_function = ftrace_stub;
165 ftrace_list = &ftrace_list_end;
166 goto out;
167 }
168
169 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
170 if (*p == ops)
171 break;
172
173 if (*p != ops) {
174 ret = -1;
175 goto out;
176 }
177
178 *p = (*p)->next;
179
b0fc494f
SR
180 if (ftrace_enabled) {
181 /* If we only have one func left, then call that directly */
b3535c63 182 if (ftrace_list->next == &ftrace_list_end)
b0fc494f
SR
183 ftrace_trace_function = ftrace_list->func;
184 }
16444a8a
ACM
185
186 out:
3d083395
SR
187 spin_unlock(&ftrace_lock);
188
189 return ret;
190}
191
192#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 193#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 194# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
195#endif
196
71c67d58
SN
197/*
198 * Since MCOUNT_ADDR may point to mcount itself, we do not want
199 * to get it confused by reading a reference in the code as we
200 * are parsing on objcopy output of text. Use a variable for
201 * it instead.
202 */
203static unsigned long mcount_addr = MCOUNT_ADDR;
204
d61f82d0
SR
205enum {
206 FTRACE_ENABLE_CALLS = (1 << 0),
207 FTRACE_DISABLE_CALLS = (1 << 1),
208 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
209 FTRACE_ENABLE_MCOUNT = (1 << 3),
210 FTRACE_DISABLE_MCOUNT = (1 << 4),
211};
212
5072c59f
SR
213static int ftrace_filtered;
214
08f5ac90 215static LIST_HEAD(ftrace_new_addrs);
3d083395 216
41c52c0d 217static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 218
3c1720f0
SR
219struct ftrace_page {
220 struct ftrace_page *next;
aa5e5cea 221 unsigned long index;
3c1720f0 222 struct dyn_ftrace records[];
aa5e5cea 223};
3c1720f0
SR
224
225#define ENTRIES_PER_PAGE \
226 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
227
228/* estimate from running different kernels */
229#define NR_TO_INIT 10000
230
231static struct ftrace_page *ftrace_pages_start;
232static struct ftrace_page *ftrace_pages;
233
37ad5084
SR
234static struct dyn_ftrace *ftrace_free_records;
235
ecea656d
AS
236
237#ifdef CONFIG_KPROBES
f17845e5
IM
238
239static int frozen_record_count;
240
ecea656d
AS
241static inline void freeze_record(struct dyn_ftrace *rec)
242{
243 if (!(rec->flags & FTRACE_FL_FROZEN)) {
244 rec->flags |= FTRACE_FL_FROZEN;
245 frozen_record_count++;
246 }
247}
248
249static inline void unfreeze_record(struct dyn_ftrace *rec)
250{
251 if (rec->flags & FTRACE_FL_FROZEN) {
252 rec->flags &= ~FTRACE_FL_FROZEN;
253 frozen_record_count--;
254 }
255}
256
257static inline int record_frozen(struct dyn_ftrace *rec)
258{
259 return rec->flags & FTRACE_FL_FROZEN;
260}
261#else
262# define freeze_record(rec) ({ 0; })
263# define unfreeze_record(rec) ({ 0; })
264# define record_frozen(rec) ({ 0; })
265#endif /* CONFIG_KPROBES */
266
e309b41d 267static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 268{
37ad5084
SR
269 rec->ip = (unsigned long)ftrace_free_records;
270 ftrace_free_records = rec;
271 rec->flags |= FTRACE_FL_FREE;
272}
273
fed1939c
SR
274void ftrace_release(void *start, unsigned long size)
275{
276 struct dyn_ftrace *rec;
277 struct ftrace_page *pg;
278 unsigned long s = (unsigned long)start;
279 unsigned long e = s + size;
280 int i;
281
00fd61ae 282 if (ftrace_disabled || !start)
fed1939c
SR
283 return;
284
99ecdc43 285 /* should not be called from interrupt context */
fed1939c
SR
286 spin_lock(&ftrace_lock);
287
288 for (pg = ftrace_pages_start; pg; pg = pg->next) {
289 for (i = 0; i < pg->index; i++) {
290 rec = &pg->records[i];
291
292 if ((rec->ip >= s) && (rec->ip < e))
293 ftrace_free_rec(rec);
294 }
295 }
296 spin_unlock(&ftrace_lock);
fed1939c
SR
297}
298
e309b41d 299static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 300{
37ad5084
SR
301 struct dyn_ftrace *rec;
302
303 /* First check for freed records */
304 if (ftrace_free_records) {
305 rec = ftrace_free_records;
306
37ad5084 307 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 308 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
309 ftrace_free_records = NULL;
310 return NULL;
311 }
312
313 ftrace_free_records = (void *)rec->ip;
314 memset(rec, 0, sizeof(*rec));
315 return rec;
316 }
317
3c1720f0 318 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
319 if (!ftrace_pages->next) {
320 /* allocate another page */
321 ftrace_pages->next =
322 (void *)get_zeroed_page(GFP_KERNEL);
323 if (!ftrace_pages->next)
324 return NULL;
325 }
3c1720f0
SR
326 ftrace_pages = ftrace_pages->next;
327 }
328
329 return &ftrace_pages->records[ftrace_pages->index++];
330}
331
08f5ac90 332static struct dyn_ftrace *
d61f82d0 333ftrace_record_ip(unsigned long ip)
3d083395 334{
08f5ac90 335 struct dyn_ftrace *rec;
3d083395 336
f3c7ac40 337 if (ftrace_disabled)
08f5ac90 338 return NULL;
3d083395 339
08f5ac90
SR
340 rec = ftrace_alloc_dyn_node(ip);
341 if (!rec)
342 return NULL;
3d083395 343
08f5ac90 344 rec->ip = ip;
3d083395 345
08f5ac90 346 list_add(&rec->list, &ftrace_new_addrs);
3d083395 347
08f5ac90 348 return rec;
3d083395
SR
349}
350
b17e8a37
SR
351static void print_ip_ins(const char *fmt, unsigned char *p)
352{
353 int i;
354
355 printk(KERN_CONT "%s", fmt);
356
357 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
358 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
359}
360
31e88909 361static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
362{
363 switch (failed) {
364 case -EFAULT:
365 FTRACE_WARN_ON_ONCE(1);
366 pr_info("ftrace faulted on modifying ");
367 print_ip_sym(ip);
368 break;
369 case -EINVAL:
370 FTRACE_WARN_ON_ONCE(1);
371 pr_info("ftrace failed to modify ");
372 print_ip_sym(ip);
b17e8a37 373 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
374 printk(KERN_CONT "\n");
375 break;
376 case -EPERM:
377 FTRACE_WARN_ON_ONCE(1);
378 pr_info("ftrace faulted on writing ");
379 print_ip_sym(ip);
380 break;
381 default:
382 FTRACE_WARN_ON_ONCE(1);
383 pr_info("ftrace faulted on unknown error ");
384 print_ip_sym(ip);
385 }
386}
387
caf8cdeb 388#define FTRACE_ADDR ((long)(ftrace_caller))
3c1720f0 389
0eb96701 390static int
31e88909 391__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 392{
41c52c0d 393 unsigned long ip, fl;
5072c59f
SR
394
395 ip = rec->ip;
396
982c350b
SR
397 /*
398 * If this record is not to be traced and
399 * it is not enabled then do nothing.
400 *
401 * If this record is not to be traced and
402 * it is enabled then disabled it.
403 *
404 */
405 if (rec->flags & FTRACE_FL_NOTRACE) {
406 if (rec->flags & FTRACE_FL_ENABLED)
407 rec->flags &= ~FTRACE_FL_ENABLED;
408 else
409 return 0;
410
411 } else if (ftrace_filtered && enable) {
5072c59f 412 /*
982c350b 413 * Filtering is on:
5072c59f 414 */
a4500b84 415
982c350b 416 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 417
982c350b
SR
418 /* Record is filtered and enabled, do nothing */
419 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 420 return 0;
5072c59f 421
982c350b
SR
422 /* Record is not filtered and is not enabled do nothing */
423 if (!fl)
424 return 0;
425
426 /* Record is not filtered but enabled, disable it */
427 if (fl == FTRACE_FL_ENABLED)
5072c59f 428 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
429 else
430 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 431 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 432 } else {
982c350b 433 /* Disable or not filtered */
5072c59f 434
41c52c0d 435 if (enable) {
982c350b 436 /* if record is enabled, do nothing */
5072c59f 437 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 438 return 0;
982c350b 439
5072c59f 440 rec->flags |= FTRACE_FL_ENABLED;
982c350b 441
5072c59f 442 } else {
982c350b
SR
443
444 /* if record is not enabled do nothing */
5072c59f 445 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 446 return 0;
982c350b 447
5072c59f
SR
448 rec->flags &= ~FTRACE_FL_ENABLED;
449 }
450 }
451
982c350b 452 if (rec->flags & FTRACE_FL_ENABLED)
31e88909
SR
453 return ftrace_make_call(rec, FTRACE_ADDR);
454 else
455 return ftrace_make_nop(NULL, rec, FTRACE_ADDR);
5072c59f
SR
456}
457
e309b41d 458static void ftrace_replace_code(int enable)
3c1720f0 459{
0eb96701 460 int i, failed;
3c1720f0
SR
461 struct dyn_ftrace *rec;
462 struct ftrace_page *pg;
3c1720f0 463
3c1720f0
SR
464 for (pg = ftrace_pages_start; pg; pg = pg->next) {
465 for (i = 0; i < pg->index; i++) {
466 rec = &pg->records[i];
467
918c1154
SR
468 /*
469 * Skip over free records and records that have
470 * failed.
471 */
472 if (rec->flags & FTRACE_FL_FREE ||
473 rec->flags & FTRACE_FL_FAILED)
3c1720f0
SR
474 continue;
475
f22f9a89 476 /* ignore updates to this record's mcount site */
98a05ed4
AS
477 if (get_kprobe((void *)rec->ip)) {
478 freeze_record(rec);
f22f9a89 479 continue;
98a05ed4
AS
480 } else {
481 unfreeze_record(rec);
482 }
f22f9a89 483
31e88909 484 failed = __ftrace_replace_code(rec, enable);
0eb96701
AS
485 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
486 rec->flags |= FTRACE_FL_FAILED;
487 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 488 !core_kernel_text(rec->ip)) {
0eb96701 489 ftrace_free_rec(rec);
b17e8a37 490 } else
31e88909 491 ftrace_bug(failed, rec->ip);
0eb96701 492 }
3c1720f0
SR
493 }
494 }
495}
496
492a7ea5 497static int
31e88909 498ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
499{
500 unsigned long ip;
593eb8a2 501 int ret;
3c1720f0
SR
502
503 ip = rec->ip;
504
31e88909 505 ret = ftrace_make_nop(mod, rec, mcount_addr);
593eb8a2 506 if (ret) {
31e88909 507 ftrace_bug(ret, ip);
3c1720f0 508 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 509 return 0;
37ad5084 510 }
492a7ea5 511 return 1;
3c1720f0
SR
512}
513
e309b41d 514static int __ftrace_modify_code(void *data)
3d083395 515{
d61f82d0
SR
516 int *command = data;
517
a3583244 518 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 519 ftrace_replace_code(1);
a3583244 520 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
521 ftrace_replace_code(0);
522
523 if (*command & FTRACE_UPDATE_TRACE_FUNC)
524 ftrace_update_ftrace_func(ftrace_trace_function);
525
d61f82d0 526 return 0;
3d083395
SR
527}
528
e309b41d 529static void ftrace_run_update_code(int command)
3d083395 530{
784e2d76 531 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
532}
533
d61f82d0 534static ftrace_func_t saved_ftrace_func;
60a7ecf4 535static int ftrace_start_up;
cb7be3b2 536static DEFINE_MUTEX(ftrace_start_lock);
d61f82d0 537
e309b41d 538static void ftrace_startup(void)
3d083395 539{
d61f82d0
SR
540 int command = 0;
541
4eebcc81
SR
542 if (unlikely(ftrace_disabled))
543 return;
544
cb7be3b2 545 mutex_lock(&ftrace_start_lock);
60a7ecf4 546 ftrace_start_up++;
982c350b 547 command |= FTRACE_ENABLE_CALLS;
d61f82d0
SR
548
549 if (saved_ftrace_func != ftrace_trace_function) {
550 saved_ftrace_func = ftrace_trace_function;
551 command |= FTRACE_UPDATE_TRACE_FUNC;
552 }
553
554 if (!command || !ftrace_enabled)
3d083395 555 goto out;
3d083395 556
d61f82d0 557 ftrace_run_update_code(command);
3d083395 558 out:
cb7be3b2 559 mutex_unlock(&ftrace_start_lock);
3d083395
SR
560}
561
e309b41d 562static void ftrace_shutdown(void)
3d083395 563{
d61f82d0
SR
564 int command = 0;
565
4eebcc81
SR
566 if (unlikely(ftrace_disabled))
567 return;
568
cb7be3b2 569 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
570 ftrace_start_up--;
571 if (!ftrace_start_up)
d61f82d0 572 command |= FTRACE_DISABLE_CALLS;
3d083395 573
d61f82d0
SR
574 if (saved_ftrace_func != ftrace_trace_function) {
575 saved_ftrace_func = ftrace_trace_function;
576 command |= FTRACE_UPDATE_TRACE_FUNC;
577 }
3d083395 578
d61f82d0
SR
579 if (!command || !ftrace_enabled)
580 goto out;
581
582 ftrace_run_update_code(command);
3d083395 583 out:
cb7be3b2 584 mutex_unlock(&ftrace_start_lock);
3d083395
SR
585}
586
e309b41d 587static void ftrace_startup_sysctl(void)
b0fc494f 588{
d61f82d0
SR
589 int command = FTRACE_ENABLE_MCOUNT;
590
4eebcc81
SR
591 if (unlikely(ftrace_disabled))
592 return;
593
cb7be3b2 594 mutex_lock(&ftrace_start_lock);
d61f82d0
SR
595 /* Force update next time */
596 saved_ftrace_func = NULL;
60a7ecf4
SR
597 /* ftrace_start_up is true if we want ftrace running */
598 if (ftrace_start_up)
d61f82d0
SR
599 command |= FTRACE_ENABLE_CALLS;
600
601 ftrace_run_update_code(command);
cb7be3b2 602 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
603}
604
e309b41d 605static void ftrace_shutdown_sysctl(void)
b0fc494f 606{
d61f82d0
SR
607 int command = FTRACE_DISABLE_MCOUNT;
608
4eebcc81
SR
609 if (unlikely(ftrace_disabled))
610 return;
611
cb7be3b2 612 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
613 /* ftrace_start_up is true if ftrace is running */
614 if (ftrace_start_up)
d61f82d0
SR
615 command |= FTRACE_DISABLE_CALLS;
616
617 ftrace_run_update_code(command);
cb7be3b2 618 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
619}
620
3d083395
SR
621static cycle_t ftrace_update_time;
622static unsigned long ftrace_update_cnt;
623unsigned long ftrace_update_tot_cnt;
624
31e88909 625static int ftrace_update_code(struct module *mod)
3d083395 626{
08f5ac90 627 struct dyn_ftrace *p, *t;
f22f9a89 628 cycle_t start, stop;
3d083395 629
750ed1a4 630 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
631 ftrace_update_cnt = 0;
632
08f5ac90 633 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 634
08f5ac90
SR
635 /* If something went wrong, bail without enabling anything */
636 if (unlikely(ftrace_disabled))
637 return -1;
f22f9a89 638
08f5ac90 639 list_del_init(&p->list);
f22f9a89 640
08f5ac90 641 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 642 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
643 p->flags |= FTRACE_FL_CONVERTED;
644 ftrace_update_cnt++;
645 } else
646 ftrace_free_rec(p);
3d083395
SR
647 }
648
750ed1a4 649 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
650 ftrace_update_time = stop - start;
651 ftrace_update_tot_cnt += ftrace_update_cnt;
652
16444a8a
ACM
653 return 0;
654}
655
68bf21aa 656static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
657{
658 struct ftrace_page *pg;
659 int cnt;
660 int i;
3c1720f0
SR
661
662 /* allocate a few pages */
663 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
664 if (!ftrace_pages_start)
665 return -1;
666
667 /*
668 * Allocate a few more pages.
669 *
670 * TODO: have some parser search vmlinux before
671 * final linking to find all calls to ftrace.
672 * Then we can:
673 * a) know how many pages to allocate.
674 * and/or
675 * b) set up the table then.
676 *
677 * The dynamic code is still necessary for
678 * modules.
679 */
680
681 pg = ftrace_pages = ftrace_pages_start;
682
68bf21aa 683 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 684 pr_info("ftrace: allocating %ld entries in %d pages\n",
68bf21aa 685 num_to_init, cnt);
3c1720f0
SR
686
687 for (i = 0; i < cnt; i++) {
688 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
689
690 /* If we fail, we'll try later anyway */
691 if (!pg->next)
692 break;
693
694 pg = pg->next;
695 }
696
697 return 0;
698}
699
5072c59f
SR
700enum {
701 FTRACE_ITER_FILTER = (1 << 0),
702 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 703 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 704 FTRACE_ITER_FAILURES = (1 << 3),
5072c59f
SR
705};
706
707#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
708
709struct ftrace_iterator {
710 loff_t pos;
711 struct ftrace_page *pg;
712 unsigned idx;
713 unsigned flags;
714 unsigned char buffer[FTRACE_BUFF_MAX+1];
715 unsigned buffer_idx;
716 unsigned filtered;
717};
718
e309b41d 719static void *
5072c59f
SR
720t_next(struct seq_file *m, void *v, loff_t *pos)
721{
722 struct ftrace_iterator *iter = m->private;
723 struct dyn_ftrace *rec = NULL;
724
725 (*pos)++;
726
99ecdc43
SR
727 /* should not be called from interrupt context */
728 spin_lock(&ftrace_lock);
5072c59f
SR
729 retry:
730 if (iter->idx >= iter->pg->index) {
731 if (iter->pg->next) {
732 iter->pg = iter->pg->next;
733 iter->idx = 0;
734 goto retry;
735 }
736 } else {
737 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
738 if ((rec->flags & FTRACE_FL_FREE) ||
739
740 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
741 (rec->flags & FTRACE_FL_FAILED)) ||
742
743 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 744 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 745
0183fb1c
SR
746 ((iter->flags & FTRACE_ITER_FILTER) &&
747 !(rec->flags & FTRACE_FL_FILTER)) ||
748
41c52c0d
SR
749 ((iter->flags & FTRACE_ITER_NOTRACE) &&
750 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
751 rec = NULL;
752 goto retry;
753 }
754 }
99ecdc43 755 spin_unlock(&ftrace_lock);
5072c59f
SR
756
757 iter->pos = *pos;
758
759 return rec;
760}
761
762static void *t_start(struct seq_file *m, loff_t *pos)
763{
764 struct ftrace_iterator *iter = m->private;
765 void *p = NULL;
766 loff_t l = -1;
767
768 if (*pos != iter->pos) {
769 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
770 ;
771 } else {
772 l = *pos;
773 p = t_next(m, p, &l);
774 }
775
776 return p;
777}
778
779static void t_stop(struct seq_file *m, void *p)
780{
781}
782
783static int t_show(struct seq_file *m, void *v)
784{
785 struct dyn_ftrace *rec = v;
786 char str[KSYM_SYMBOL_LEN];
787
788 if (!rec)
789 return 0;
790
791 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
792
793 seq_printf(m, "%s\n", str);
794
795 return 0;
796}
797
798static struct seq_operations show_ftrace_seq_ops = {
799 .start = t_start,
800 .next = t_next,
801 .stop = t_stop,
802 .show = t_show,
803};
804
e309b41d 805static int
5072c59f
SR
806ftrace_avail_open(struct inode *inode, struct file *file)
807{
808 struct ftrace_iterator *iter;
809 int ret;
810
4eebcc81
SR
811 if (unlikely(ftrace_disabled))
812 return -ENODEV;
813
5072c59f
SR
814 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
815 if (!iter)
816 return -ENOMEM;
817
818 iter->pg = ftrace_pages_start;
819 iter->pos = -1;
820
821 ret = seq_open(file, &show_ftrace_seq_ops);
822 if (!ret) {
823 struct seq_file *m = file->private_data;
4bf39a94 824
5072c59f 825 m->private = iter;
4bf39a94 826 } else {
5072c59f 827 kfree(iter);
4bf39a94 828 }
5072c59f
SR
829
830 return ret;
831}
832
833int ftrace_avail_release(struct inode *inode, struct file *file)
834{
835 struct seq_file *m = (struct seq_file *)file->private_data;
836 struct ftrace_iterator *iter = m->private;
837
838 seq_release(inode, file);
839 kfree(iter);
4bf39a94 840
5072c59f
SR
841 return 0;
842}
843
eb9a7bf0
AS
844static int
845ftrace_failures_open(struct inode *inode, struct file *file)
846{
847 int ret;
848 struct seq_file *m;
849 struct ftrace_iterator *iter;
850
851 ret = ftrace_avail_open(inode, file);
852 if (!ret) {
853 m = (struct seq_file *)file->private_data;
854 iter = (struct ftrace_iterator *)m->private;
855 iter->flags = FTRACE_ITER_FAILURES;
856 }
857
858 return ret;
859}
860
861
41c52c0d 862static void ftrace_filter_reset(int enable)
5072c59f
SR
863{
864 struct ftrace_page *pg;
865 struct dyn_ftrace *rec;
41c52c0d 866 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
867 unsigned i;
868
99ecdc43
SR
869 /* should not be called from interrupt context */
870 spin_lock(&ftrace_lock);
41c52c0d
SR
871 if (enable)
872 ftrace_filtered = 0;
5072c59f
SR
873 pg = ftrace_pages_start;
874 while (pg) {
875 for (i = 0; i < pg->index; i++) {
876 rec = &pg->records[i];
877 if (rec->flags & FTRACE_FL_FAILED)
878 continue;
41c52c0d 879 rec->flags &= ~type;
5072c59f
SR
880 }
881 pg = pg->next;
882 }
99ecdc43 883 spin_unlock(&ftrace_lock);
5072c59f
SR
884}
885
e309b41d 886static int
41c52c0d 887ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
888{
889 struct ftrace_iterator *iter;
890 int ret = 0;
891
4eebcc81
SR
892 if (unlikely(ftrace_disabled))
893 return -ENODEV;
894
5072c59f
SR
895 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
896 if (!iter)
897 return -ENOMEM;
898
41c52c0d 899 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
900 if ((file->f_mode & FMODE_WRITE) &&
901 !(file->f_flags & O_APPEND))
41c52c0d 902 ftrace_filter_reset(enable);
5072c59f
SR
903
904 if (file->f_mode & FMODE_READ) {
905 iter->pg = ftrace_pages_start;
906 iter->pos = -1;
41c52c0d
SR
907 iter->flags = enable ? FTRACE_ITER_FILTER :
908 FTRACE_ITER_NOTRACE;
5072c59f
SR
909
910 ret = seq_open(file, &show_ftrace_seq_ops);
911 if (!ret) {
912 struct seq_file *m = file->private_data;
913 m->private = iter;
914 } else
915 kfree(iter);
916 } else
917 file->private_data = iter;
41c52c0d 918 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
919
920 return ret;
921}
922
41c52c0d
SR
923static int
924ftrace_filter_open(struct inode *inode, struct file *file)
925{
926 return ftrace_regex_open(inode, file, 1);
927}
928
929static int
930ftrace_notrace_open(struct inode *inode, struct file *file)
931{
932 return ftrace_regex_open(inode, file, 0);
933}
934
e309b41d 935static ssize_t
41c52c0d 936ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
937 size_t cnt, loff_t *ppos)
938{
939 if (file->f_mode & FMODE_READ)
940 return seq_read(file, ubuf, cnt, ppos);
941 else
942 return -EPERM;
943}
944
e309b41d 945static loff_t
41c52c0d 946ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
947{
948 loff_t ret;
949
950 if (file->f_mode & FMODE_READ)
951 ret = seq_lseek(file, offset, origin);
952 else
953 file->f_pos = ret = 1;
954
955 return ret;
956}
957
958enum {
959 MATCH_FULL,
960 MATCH_FRONT_ONLY,
961 MATCH_MIDDLE_ONLY,
962 MATCH_END_ONLY,
963};
964
e309b41d 965static void
41c52c0d 966ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
967{
968 char str[KSYM_SYMBOL_LEN];
969 char *search = NULL;
970 struct ftrace_page *pg;
971 struct dyn_ftrace *rec;
972 int type = MATCH_FULL;
41c52c0d 973 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
974 unsigned i, match = 0, search_len = 0;
975
976 for (i = 0; i < len; i++) {
977 if (buff[i] == '*') {
978 if (!i) {
979 search = buff + i + 1;
980 type = MATCH_END_ONLY;
981 search_len = len - (i + 1);
982 } else {
983 if (type == MATCH_END_ONLY) {
984 type = MATCH_MIDDLE_ONLY;
985 } else {
986 match = i;
987 type = MATCH_FRONT_ONLY;
988 }
989 buff[i] = 0;
990 break;
991 }
992 }
993 }
994
99ecdc43
SR
995 /* should not be called from interrupt context */
996 spin_lock(&ftrace_lock);
41c52c0d
SR
997 if (enable)
998 ftrace_filtered = 1;
5072c59f
SR
999 pg = ftrace_pages_start;
1000 while (pg) {
1001 for (i = 0; i < pg->index; i++) {
1002 int matched = 0;
1003 char *ptr;
1004
1005 rec = &pg->records[i];
1006 if (rec->flags & FTRACE_FL_FAILED)
1007 continue;
1008 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1009 switch (type) {
1010 case MATCH_FULL:
1011 if (strcmp(str, buff) == 0)
1012 matched = 1;
1013 break;
1014 case MATCH_FRONT_ONLY:
1015 if (memcmp(str, buff, match) == 0)
1016 matched = 1;
1017 break;
1018 case MATCH_MIDDLE_ONLY:
1019 if (strstr(str, search))
1020 matched = 1;
1021 break;
1022 case MATCH_END_ONLY:
1023 ptr = strstr(str, search);
1024 if (ptr && (ptr[search_len] == 0))
1025 matched = 1;
1026 break;
1027 }
1028 if (matched)
41c52c0d 1029 rec->flags |= flag;
5072c59f
SR
1030 }
1031 pg = pg->next;
1032 }
99ecdc43 1033 spin_unlock(&ftrace_lock);
5072c59f
SR
1034}
1035
e309b41d 1036static ssize_t
41c52c0d
SR
1037ftrace_regex_write(struct file *file, const char __user *ubuf,
1038 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1039{
1040 struct ftrace_iterator *iter;
1041 char ch;
1042 size_t read = 0;
1043 ssize_t ret;
1044
1045 if (!cnt || cnt < 0)
1046 return 0;
1047
41c52c0d 1048 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1049
1050 if (file->f_mode & FMODE_READ) {
1051 struct seq_file *m = file->private_data;
1052 iter = m->private;
1053 } else
1054 iter = file->private_data;
1055
1056 if (!*ppos) {
1057 iter->flags &= ~FTRACE_ITER_CONT;
1058 iter->buffer_idx = 0;
1059 }
1060
1061 ret = get_user(ch, ubuf++);
1062 if (ret)
1063 goto out;
1064 read++;
1065 cnt--;
1066
1067 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1068 /* skip white space */
1069 while (cnt && isspace(ch)) {
1070 ret = get_user(ch, ubuf++);
1071 if (ret)
1072 goto out;
1073 read++;
1074 cnt--;
1075 }
1076
5072c59f
SR
1077 if (isspace(ch)) {
1078 file->f_pos += read;
1079 ret = read;
1080 goto out;
1081 }
1082
1083 iter->buffer_idx = 0;
1084 }
1085
1086 while (cnt && !isspace(ch)) {
1087 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1088 iter->buffer[iter->buffer_idx++] = ch;
1089 else {
1090 ret = -EINVAL;
1091 goto out;
1092 }
1093 ret = get_user(ch, ubuf++);
1094 if (ret)
1095 goto out;
1096 read++;
1097 cnt--;
1098 }
1099
1100 if (isspace(ch)) {
1101 iter->filtered++;
1102 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1103 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1104 iter->buffer_idx = 0;
1105 } else
1106 iter->flags |= FTRACE_ITER_CONT;
1107
1108
1109 file->f_pos += read;
1110
1111 ret = read;
1112 out:
41c52c0d 1113 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1114
1115 return ret;
1116}
1117
41c52c0d
SR
1118static ssize_t
1119ftrace_filter_write(struct file *file, const char __user *ubuf,
1120 size_t cnt, loff_t *ppos)
1121{
1122 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1123}
1124
1125static ssize_t
1126ftrace_notrace_write(struct file *file, const char __user *ubuf,
1127 size_t cnt, loff_t *ppos)
1128{
1129 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1130}
1131
1132static void
1133ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1134{
1135 if (unlikely(ftrace_disabled))
1136 return;
1137
1138 mutex_lock(&ftrace_regex_lock);
1139 if (reset)
1140 ftrace_filter_reset(enable);
1141 if (buf)
1142 ftrace_match(buf, len, enable);
1143 mutex_unlock(&ftrace_regex_lock);
1144}
1145
77a2b37d
SR
1146/**
1147 * ftrace_set_filter - set a function to filter on in ftrace
1148 * @buf - the string that holds the function filter text.
1149 * @len - the length of the string.
1150 * @reset - non zero to reset all filters before applying this filter.
1151 *
1152 * Filters denote which functions should be enabled when tracing is enabled.
1153 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1154 */
e309b41d 1155void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1156{
41c52c0d
SR
1157 ftrace_set_regex(buf, len, reset, 1);
1158}
4eebcc81 1159
41c52c0d
SR
1160/**
1161 * ftrace_set_notrace - set a function to not trace in ftrace
1162 * @buf - the string that holds the function notrace text.
1163 * @len - the length of the string.
1164 * @reset - non zero to reset all filters before applying this filter.
1165 *
1166 * Notrace Filters denote which functions should not be enabled when tracing
1167 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1168 * for tracing.
1169 */
1170void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1171{
1172 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1173}
1174
e309b41d 1175static int
41c52c0d 1176ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1177{
1178 struct seq_file *m = (struct seq_file *)file->private_data;
1179 struct ftrace_iterator *iter;
1180
41c52c0d 1181 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1182 if (file->f_mode & FMODE_READ) {
1183 iter = m->private;
1184
1185 seq_release(inode, file);
1186 } else
1187 iter = file->private_data;
1188
1189 if (iter->buffer_idx) {
1190 iter->filtered++;
1191 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1192 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1193 }
1194
1195 mutex_lock(&ftrace_sysctl_lock);
cb7be3b2 1196 mutex_lock(&ftrace_start_lock);
60a7ecf4 1197 if (iter->filtered && ftrace_start_up && ftrace_enabled)
5072c59f 1198 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
cb7be3b2 1199 mutex_unlock(&ftrace_start_lock);
5072c59f
SR
1200 mutex_unlock(&ftrace_sysctl_lock);
1201
1202 kfree(iter);
41c52c0d 1203 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1204 return 0;
1205}
1206
41c52c0d
SR
1207static int
1208ftrace_filter_release(struct inode *inode, struct file *file)
1209{
1210 return ftrace_regex_release(inode, file, 1);
1211}
1212
1213static int
1214ftrace_notrace_release(struct inode *inode, struct file *file)
1215{
1216 return ftrace_regex_release(inode, file, 0);
1217}
1218
5072c59f
SR
1219static struct file_operations ftrace_avail_fops = {
1220 .open = ftrace_avail_open,
1221 .read = seq_read,
1222 .llseek = seq_lseek,
1223 .release = ftrace_avail_release,
1224};
1225
eb9a7bf0
AS
1226static struct file_operations ftrace_failures_fops = {
1227 .open = ftrace_failures_open,
1228 .read = seq_read,
1229 .llseek = seq_lseek,
1230 .release = ftrace_avail_release,
1231};
1232
5072c59f
SR
1233static struct file_operations ftrace_filter_fops = {
1234 .open = ftrace_filter_open,
41c52c0d 1235 .read = ftrace_regex_read,
5072c59f 1236 .write = ftrace_filter_write,
41c52c0d 1237 .llseek = ftrace_regex_lseek,
5072c59f
SR
1238 .release = ftrace_filter_release,
1239};
1240
41c52c0d
SR
1241static struct file_operations ftrace_notrace_fops = {
1242 .open = ftrace_notrace_open,
1243 .read = ftrace_regex_read,
1244 .write = ftrace_notrace_write,
1245 .llseek = ftrace_regex_lseek,
1246 .release = ftrace_notrace_release,
1247};
1248
5072c59f
SR
1249static __init int ftrace_init_debugfs(void)
1250{
1251 struct dentry *d_tracer;
1252 struct dentry *entry;
1253
1254 d_tracer = tracing_init_dentry();
1255
1256 entry = debugfs_create_file("available_filter_functions", 0444,
1257 d_tracer, NULL, &ftrace_avail_fops);
1258 if (!entry)
1259 pr_warning("Could not create debugfs "
1260 "'available_filter_functions' entry\n");
1261
eb9a7bf0
AS
1262 entry = debugfs_create_file("failures", 0444,
1263 d_tracer, NULL, &ftrace_failures_fops);
1264 if (!entry)
1265 pr_warning("Could not create debugfs 'failures' entry\n");
1266
5072c59f
SR
1267 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1268 NULL, &ftrace_filter_fops);
1269 if (!entry)
1270 pr_warning("Could not create debugfs "
1271 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1272
1273 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1274 NULL, &ftrace_notrace_fops);
1275 if (!entry)
1276 pr_warning("Could not create debugfs "
1277 "'set_ftrace_notrace' entry\n");
ad90c0e3 1278
5072c59f
SR
1279 return 0;
1280}
1281
1282fs_initcall(ftrace_init_debugfs);
1283
31e88909
SR
1284static int ftrace_convert_nops(struct module *mod,
1285 unsigned long *start,
68bf21aa
SR
1286 unsigned long *end)
1287{
1288 unsigned long *p;
1289 unsigned long addr;
1290 unsigned long flags;
1291
08f5ac90 1292 mutex_lock(&ftrace_start_lock);
68bf21aa
SR
1293 p = start;
1294 while (p < end) {
1295 addr = ftrace_call_adjust(*p++);
20e5227e
SR
1296 /*
1297 * Some architecture linkers will pad between
1298 * the different mcount_loc sections of different
1299 * object files to satisfy alignments.
1300 * Skip any NULL pointers.
1301 */
1302 if (!addr)
1303 continue;
68bf21aa 1304 ftrace_record_ip(addr);
68bf21aa
SR
1305 }
1306
08f5ac90 1307 /* disable interrupts to prevent kstop machine */
68bf21aa 1308 local_irq_save(flags);
31e88909 1309 ftrace_update_code(mod);
68bf21aa 1310 local_irq_restore(flags);
08f5ac90 1311 mutex_unlock(&ftrace_start_lock);
68bf21aa
SR
1312
1313 return 0;
1314}
1315
31e88909
SR
1316void ftrace_init_module(struct module *mod,
1317 unsigned long *start, unsigned long *end)
90d595fe 1318{
00fd61ae 1319 if (ftrace_disabled || start == end)
fed1939c 1320 return;
31e88909 1321 ftrace_convert_nops(mod, start, end);
90d595fe
SR
1322}
1323
68bf21aa
SR
1324extern unsigned long __start_mcount_loc[];
1325extern unsigned long __stop_mcount_loc[];
1326
1327void __init ftrace_init(void)
1328{
1329 unsigned long count, addr, flags;
1330 int ret;
1331
1332 /* Keep the ftrace pointer to the stub */
1333 addr = (unsigned long)ftrace_stub;
1334
1335 local_irq_save(flags);
1336 ftrace_dyn_arch_init(&addr);
1337 local_irq_restore(flags);
1338
1339 /* ftrace_dyn_arch_init places the return code in addr */
1340 if (addr)
1341 goto failed;
1342
1343 count = __stop_mcount_loc - __start_mcount_loc;
1344
1345 ret = ftrace_dyn_table_alloc(count);
1346 if (ret)
1347 goto failed;
1348
1349 last_ftrace_enabled = ftrace_enabled = 1;
1350
31e88909
SR
1351 ret = ftrace_convert_nops(NULL,
1352 __start_mcount_loc,
68bf21aa
SR
1353 __stop_mcount_loc);
1354
1355 return;
1356 failed:
1357 ftrace_disabled = 1;
1358}
68bf21aa 1359
3d083395 1360#else
0b6e4d56
FW
1361
1362static int __init ftrace_nodyn_init(void)
1363{
1364 ftrace_enabled = 1;
1365 return 0;
1366}
1367device_initcall(ftrace_nodyn_init);
1368
c7aafc54
IM
1369# define ftrace_startup() do { } while (0)
1370# define ftrace_shutdown() do { } while (0)
1371# define ftrace_startup_sysctl() do { } while (0)
1372# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1373#endif /* CONFIG_DYNAMIC_FTRACE */
1374
a2bb6a3d 1375/**
81adbdc0 1376 * ftrace_kill - kill ftrace
a2bb6a3d
SR
1377 *
1378 * This function should be used by panic code. It stops ftrace
1379 * but in a not so nice way. If you need to simply kill ftrace
1380 * from a non-atomic section, use ftrace_kill.
1381 */
81adbdc0 1382void ftrace_kill(void)
a2bb6a3d
SR
1383{
1384 ftrace_disabled = 1;
1385 ftrace_enabled = 0;
a2bb6a3d
SR
1386 clear_ftrace_function();
1387}
1388
16444a8a 1389/**
3d083395
SR
1390 * register_ftrace_function - register a function for profiling
1391 * @ops - ops structure that holds the function for profiling.
16444a8a 1392 *
3d083395
SR
1393 * Register a function to be called by all functions in the
1394 * kernel.
1395 *
1396 * Note: @ops->func and all the functions it calls must be labeled
1397 * with "notrace", otherwise it will go into a
1398 * recursive loop.
16444a8a 1399 */
3d083395 1400int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1401{
b0fc494f
SR
1402 int ret;
1403
4eebcc81
SR
1404 if (unlikely(ftrace_disabled))
1405 return -1;
1406
b0fc494f 1407 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1408 ret = __register_ftrace_function(ops);
d61f82d0 1409 ftrace_startup();
b0fc494f
SR
1410 mutex_unlock(&ftrace_sysctl_lock);
1411
1412 return ret;
3d083395
SR
1413}
1414
1415/**
1416 * unregister_ftrace_function - unresgister a function for profiling.
1417 * @ops - ops structure that holds the function to unregister
1418 *
1419 * Unregister a function that was added to be called by ftrace profiling.
1420 */
1421int unregister_ftrace_function(struct ftrace_ops *ops)
1422{
1423 int ret;
1424
b0fc494f 1425 mutex_lock(&ftrace_sysctl_lock);
3d083395 1426 ret = __unregister_ftrace_function(ops);
d61f82d0 1427 ftrace_shutdown();
b0fc494f
SR
1428 mutex_unlock(&ftrace_sysctl_lock);
1429
1430 return ret;
1431}
1432
e309b41d 1433int
b0fc494f 1434ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1435 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1436 loff_t *ppos)
1437{
1438 int ret;
1439
4eebcc81
SR
1440 if (unlikely(ftrace_disabled))
1441 return -ENODEV;
1442
b0fc494f
SR
1443 mutex_lock(&ftrace_sysctl_lock);
1444
5072c59f 1445 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1446
1447 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1448 goto out;
1449
1450 last_ftrace_enabled = ftrace_enabled;
1451
1452 if (ftrace_enabled) {
1453
1454 ftrace_startup_sysctl();
1455
1456 /* we are starting ftrace again */
1457 if (ftrace_list != &ftrace_list_end) {
1458 if (ftrace_list->next == &ftrace_list_end)
1459 ftrace_trace_function = ftrace_list->func;
1460 else
1461 ftrace_trace_function = ftrace_list_func;
1462 }
1463
1464 } else {
1465 /* stopping ftrace calls (just send to ftrace_stub) */
1466 ftrace_trace_function = ftrace_stub;
1467
1468 ftrace_shutdown_sysctl();
1469 }
1470
1471 out:
1472 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1473 return ret;
16444a8a 1474}
f17845e5 1475
15e6cb36
FW
1476#ifdef CONFIG_FUNCTION_RET_TRACER
1477trace_function_return_t ftrace_function_return =
1478 (trace_function_return_t)ftrace_stub;
1479void register_ftrace_return(trace_function_return_t func)
1480{
1481 ftrace_function_return = func;
1482}
1483
1484void unregister_ftrace_return(void)
1485{
1486 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1487}
1488#endif
1489
1490
1491
This page took 0.152225 seconds and 5 git commands to generate.