ftrace: disable ftrace on anomalies in trace start and stop
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
f22f9a89 24#include <linux/kprobes.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5072c59f 27#include <linux/ctype.h>
3d083395
SR
28#include <linux/list.h>
29
395a59d0
AS
30#include <asm/ftrace.h>
31
3d083395 32#include "trace.h"
16444a8a 33
6912896e
SR
34#define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40#define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
4eebcc81
SR
46/* ftrace_enabled is a method to turn ftrace on or off */
47int ftrace_enabled __read_mostly;
d61f82d0 48static int last_ftrace_enabled;
b0fc494f 49
60a7ecf4
SR
50/* Quick disabling of function tracer. */
51int function_trace_stop;
52
4eebcc81
SR
53/*
54 * ftrace_disabled is set when an anomaly is discovered.
55 * ftrace_disabled is much stronger than ftrace_enabled.
56 */
57static int ftrace_disabled __read_mostly;
58
3d083395 59static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
60static DEFINE_MUTEX(ftrace_sysctl_lock);
61
16444a8a
ACM
62static struct ftrace_ops ftrace_list_end __read_mostly =
63{
64 .func = ftrace_stub,
65};
66
67static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
68ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 69ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
16444a8a 70
f2252935 71static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
72{
73 struct ftrace_ops *op = ftrace_list;
74
75 /* in case someone actually ports this to alpha! */
76 read_barrier_depends();
77
78 while (op != &ftrace_list_end) {
79 /* silly alpha */
80 read_barrier_depends();
81 op->func(ip, parent_ip);
82 op = op->next;
83 };
84}
85
86/**
3d083395 87 * clear_ftrace_function - reset the ftrace function
16444a8a 88 *
3d083395
SR
89 * This NULLs the ftrace function and in essence stops
90 * tracing. There may be lag
16444a8a 91 */
3d083395 92void clear_ftrace_function(void)
16444a8a 93{
3d083395 94 ftrace_trace_function = ftrace_stub;
60a7ecf4 95 __ftrace_trace_function = ftrace_stub;
3d083395
SR
96}
97
60a7ecf4
SR
98#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
99/*
100 * For those archs that do not test ftrace_trace_stop in their
101 * mcount call site, we need to do it from C.
102 */
103static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
104{
105 if (function_trace_stop)
106 return;
107
108 __ftrace_trace_function(ip, parent_ip);
109}
110#endif
111
e309b41d 112static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 113{
99ecdc43 114 /* should not be called from interrupt context */
3d083395 115 spin_lock(&ftrace_lock);
16444a8a 116
16444a8a
ACM
117 ops->next = ftrace_list;
118 /*
119 * We are entering ops into the ftrace_list but another
120 * CPU might be walking that list. We need to make sure
121 * the ops->next pointer is valid before another CPU sees
122 * the ops pointer included into the ftrace_list.
123 */
124 smp_wmb();
125 ftrace_list = ops;
3d083395 126
b0fc494f
SR
127 if (ftrace_enabled) {
128 /*
129 * For one func, simply call it directly.
130 * For more than one func, call the chain.
131 */
60a7ecf4 132#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
b0fc494f
SR
133 if (ops->next == &ftrace_list_end)
134 ftrace_trace_function = ops->func;
135 else
136 ftrace_trace_function = ftrace_list_func;
60a7ecf4
SR
137#else
138 if (ops->next == &ftrace_list_end)
139 __ftrace_trace_function = ops->func;
140 else
141 __ftrace_trace_function = ftrace_list_func;
142 ftrace_trace_function = ftrace_test_stop_func;
143#endif
b0fc494f 144 }
3d083395
SR
145
146 spin_unlock(&ftrace_lock);
16444a8a
ACM
147
148 return 0;
149}
150
e309b41d 151static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 152{
16444a8a
ACM
153 struct ftrace_ops **p;
154 int ret = 0;
155
99ecdc43 156 /* should not be called from interrupt context */
3d083395 157 spin_lock(&ftrace_lock);
16444a8a
ACM
158
159 /*
3d083395
SR
160 * If we are removing the last function, then simply point
161 * to the ftrace_stub.
16444a8a
ACM
162 */
163 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
164 ftrace_trace_function = ftrace_stub;
165 ftrace_list = &ftrace_list_end;
166 goto out;
167 }
168
169 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
170 if (*p == ops)
171 break;
172
173 if (*p != ops) {
174 ret = -1;
175 goto out;
176 }
177
178 *p = (*p)->next;
179
b0fc494f
SR
180 if (ftrace_enabled) {
181 /* If we only have one func left, then call that directly */
b3535c63 182 if (ftrace_list->next == &ftrace_list_end)
b0fc494f
SR
183 ftrace_trace_function = ftrace_list->func;
184 }
16444a8a
ACM
185
186 out:
3d083395
SR
187 spin_unlock(&ftrace_lock);
188
189 return ret;
190}
191
192#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 193#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 194# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
195#endif
196
71c67d58
SN
197/*
198 * Since MCOUNT_ADDR may point to mcount itself, we do not want
199 * to get it confused by reading a reference in the code as we
200 * are parsing on objcopy output of text. Use a variable for
201 * it instead.
202 */
203static unsigned long mcount_addr = MCOUNT_ADDR;
204
d61f82d0
SR
205enum {
206 FTRACE_ENABLE_CALLS = (1 << 0),
207 FTRACE_DISABLE_CALLS = (1 << 1),
208 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
209 FTRACE_ENABLE_MCOUNT = (1 << 3),
210 FTRACE_DISABLE_MCOUNT = (1 << 4),
211};
212
5072c59f
SR
213static int ftrace_filtered;
214
08f5ac90 215static LIST_HEAD(ftrace_new_addrs);
3d083395 216
41c52c0d 217static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 218
3c1720f0
SR
219struct ftrace_page {
220 struct ftrace_page *next;
aa5e5cea 221 unsigned long index;
3c1720f0 222 struct dyn_ftrace records[];
aa5e5cea 223};
3c1720f0
SR
224
225#define ENTRIES_PER_PAGE \
226 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
227
228/* estimate from running different kernels */
229#define NR_TO_INIT 10000
230
231static struct ftrace_page *ftrace_pages_start;
232static struct ftrace_page *ftrace_pages;
233
37ad5084
SR
234static struct dyn_ftrace *ftrace_free_records;
235
ecea656d
AS
236
237#ifdef CONFIG_KPROBES
f17845e5
IM
238
239static int frozen_record_count;
240
ecea656d
AS
241static inline void freeze_record(struct dyn_ftrace *rec)
242{
243 if (!(rec->flags & FTRACE_FL_FROZEN)) {
244 rec->flags |= FTRACE_FL_FROZEN;
245 frozen_record_count++;
246 }
247}
248
249static inline void unfreeze_record(struct dyn_ftrace *rec)
250{
251 if (rec->flags & FTRACE_FL_FROZEN) {
252 rec->flags &= ~FTRACE_FL_FROZEN;
253 frozen_record_count--;
254 }
255}
256
257static inline int record_frozen(struct dyn_ftrace *rec)
258{
259 return rec->flags & FTRACE_FL_FROZEN;
260}
261#else
262# define freeze_record(rec) ({ 0; })
263# define unfreeze_record(rec) ({ 0; })
264# define record_frozen(rec) ({ 0; })
265#endif /* CONFIG_KPROBES */
266
e309b41d 267static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 268{
37ad5084
SR
269 rec->ip = (unsigned long)ftrace_free_records;
270 ftrace_free_records = rec;
271 rec->flags |= FTRACE_FL_FREE;
272}
273
fed1939c
SR
274void ftrace_release(void *start, unsigned long size)
275{
276 struct dyn_ftrace *rec;
277 struct ftrace_page *pg;
278 unsigned long s = (unsigned long)start;
279 unsigned long e = s + size;
280 int i;
281
00fd61ae 282 if (ftrace_disabled || !start)
fed1939c
SR
283 return;
284
99ecdc43 285 /* should not be called from interrupt context */
fed1939c
SR
286 spin_lock(&ftrace_lock);
287
288 for (pg = ftrace_pages_start; pg; pg = pg->next) {
289 for (i = 0; i < pg->index; i++) {
290 rec = &pg->records[i];
291
292 if ((rec->ip >= s) && (rec->ip < e))
293 ftrace_free_rec(rec);
294 }
295 }
296 spin_unlock(&ftrace_lock);
fed1939c
SR
297}
298
e309b41d 299static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 300{
37ad5084
SR
301 struct dyn_ftrace *rec;
302
303 /* First check for freed records */
304 if (ftrace_free_records) {
305 rec = ftrace_free_records;
306
37ad5084 307 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 308 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
309 ftrace_free_records = NULL;
310 return NULL;
311 }
312
313 ftrace_free_records = (void *)rec->ip;
314 memset(rec, 0, sizeof(*rec));
315 return rec;
316 }
317
3c1720f0 318 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
319 if (!ftrace_pages->next) {
320 /* allocate another page */
321 ftrace_pages->next =
322 (void *)get_zeroed_page(GFP_KERNEL);
323 if (!ftrace_pages->next)
324 return NULL;
325 }
3c1720f0
SR
326 ftrace_pages = ftrace_pages->next;
327 }
328
329 return &ftrace_pages->records[ftrace_pages->index++];
330}
331
08f5ac90 332static struct dyn_ftrace *
d61f82d0 333ftrace_record_ip(unsigned long ip)
3d083395 334{
08f5ac90 335 struct dyn_ftrace *rec;
3d083395 336
f3c7ac40 337 if (ftrace_disabled)
08f5ac90 338 return NULL;
3d083395 339
08f5ac90
SR
340 rec = ftrace_alloc_dyn_node(ip);
341 if (!rec)
342 return NULL;
3d083395 343
08f5ac90 344 rec->ip = ip;
3d083395 345
08f5ac90 346 list_add(&rec->list, &ftrace_new_addrs);
3d083395 347
08f5ac90 348 return rec;
3d083395
SR
349}
350
b17e8a37
SR
351static void print_ip_ins(const char *fmt, unsigned char *p)
352{
353 int i;
354
355 printk(KERN_CONT "%s", fmt);
356
357 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
358 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
359}
360
361static void ftrace_bug(int failed, unsigned long ip,
362 unsigned char *expected,
363 unsigned char *replace)
364{
365 switch (failed) {
366 case -EFAULT:
367 FTRACE_WARN_ON_ONCE(1);
368 pr_info("ftrace faulted on modifying ");
369 print_ip_sym(ip);
370 break;
371 case -EINVAL:
372 FTRACE_WARN_ON_ONCE(1);
373 pr_info("ftrace failed to modify ");
374 print_ip_sym(ip);
375 print_ip_ins(" expected: ", expected);
376 print_ip_ins(" actual: ", (unsigned char *)ip);
377 print_ip_ins(" replace: ", replace);
378 printk(KERN_CONT "\n");
379 break;
380 case -EPERM:
381 FTRACE_WARN_ON_ONCE(1);
382 pr_info("ftrace faulted on writing ");
383 print_ip_sym(ip);
384 break;
385 default:
386 FTRACE_WARN_ON_ONCE(1);
387 pr_info("ftrace faulted on unknown error ");
388 print_ip_sym(ip);
389 }
390}
391
caf8cdeb 392#define FTRACE_ADDR ((long)(ftrace_caller))
3c1720f0 393
0eb96701 394static int
5072c59f
SR
395__ftrace_replace_code(struct dyn_ftrace *rec,
396 unsigned char *old, unsigned char *new, int enable)
397{
41c52c0d 398 unsigned long ip, fl;
5072c59f
SR
399
400 ip = rec->ip;
401
402 if (ftrace_filtered && enable) {
5072c59f
SR
403 /*
404 * If filtering is on:
405 *
406 * If this record is set to be filtered and
407 * is enabled then do nothing.
408 *
409 * If this record is set to be filtered and
410 * it is not enabled, enable it.
411 *
412 * If this record is not set to be filtered
413 * and it is not enabled do nothing.
414 *
41c52c0d
SR
415 * If this record is set not to trace then
416 * do nothing.
417 *
a4500b84
AS
418 * If this record is set not to trace and
419 * it is enabled then disable it.
420 *
5072c59f
SR
421 * If this record is not set to be filtered and
422 * it is enabled, disable it.
423 */
a4500b84
AS
424
425 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
426 FTRACE_FL_ENABLED);
5072c59f
SR
427
428 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
a4500b84
AS
429 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
430 !fl || (fl == FTRACE_FL_NOTRACE))
0eb96701 431 return 0;
5072c59f
SR
432
433 /*
434 * If it is enabled disable it,
435 * otherwise enable it!
436 */
a4500b84 437 if (fl & FTRACE_FL_ENABLED) {
5072c59f
SR
438 /* swap new and old */
439 new = old;
440 old = ftrace_call_replace(ip, FTRACE_ADDR);
441 rec->flags &= ~FTRACE_FL_ENABLED;
442 } else {
443 new = ftrace_call_replace(ip, FTRACE_ADDR);
444 rec->flags |= FTRACE_FL_ENABLED;
445 }
446 } else {
447
41c52c0d
SR
448 if (enable) {
449 /*
450 * If this record is set not to trace and is
451 * not enabled, do nothing.
452 */
453 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
454 if (fl == FTRACE_FL_NOTRACE)
0eb96701 455 return 0;
41c52c0d 456
5072c59f 457 new = ftrace_call_replace(ip, FTRACE_ADDR);
41c52c0d 458 } else
5072c59f
SR
459 old = ftrace_call_replace(ip, FTRACE_ADDR);
460
461 if (enable) {
462 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 463 return 0;
5072c59f
SR
464 rec->flags |= FTRACE_FL_ENABLED;
465 } else {
466 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 467 return 0;
5072c59f
SR
468 rec->flags &= ~FTRACE_FL_ENABLED;
469 }
470 }
471
0eb96701 472 return ftrace_modify_code(ip, old, new);
5072c59f
SR
473}
474
e309b41d 475static void ftrace_replace_code(int enable)
3c1720f0 476{
0eb96701 477 int i, failed;
3c1720f0
SR
478 unsigned char *new = NULL, *old = NULL;
479 struct dyn_ftrace *rec;
480 struct ftrace_page *pg;
3c1720f0 481
5072c59f 482 if (enable)
3c1720f0
SR
483 old = ftrace_nop_replace();
484 else
485 new = ftrace_nop_replace();
486
487 for (pg = ftrace_pages_start; pg; pg = pg->next) {
488 for (i = 0; i < pg->index; i++) {
489 rec = &pg->records[i];
490
491 /* don't modify code that has already faulted */
492 if (rec->flags & FTRACE_FL_FAILED)
493 continue;
494
f22f9a89 495 /* ignore updates to this record's mcount site */
98a05ed4
AS
496 if (get_kprobe((void *)rec->ip)) {
497 freeze_record(rec);
f22f9a89 498 continue;
98a05ed4
AS
499 } else {
500 unfreeze_record(rec);
501 }
f22f9a89 502
0eb96701
AS
503 failed = __ftrace_replace_code(rec, old, new, enable);
504 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
505 rec->flags |= FTRACE_FL_FAILED;
506 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 507 !core_kernel_text(rec->ip)) {
0eb96701 508 ftrace_free_rec(rec);
b17e8a37
SR
509 } else
510 ftrace_bug(failed, rec->ip, old, new);
0eb96701 511 }
3c1720f0
SR
512 }
513 }
514}
515
492a7ea5 516static int
d61f82d0 517ftrace_code_disable(struct dyn_ftrace *rec)
3c1720f0
SR
518{
519 unsigned long ip;
520 unsigned char *nop, *call;
593eb8a2 521 int ret;
3c1720f0
SR
522
523 ip = rec->ip;
524
525 nop = ftrace_nop_replace();
3b47bfc1 526 call = ftrace_call_replace(ip, mcount_addr);
3c1720f0 527
593eb8a2
SR
528 ret = ftrace_modify_code(ip, call, nop);
529 if (ret) {
b17e8a37 530 ftrace_bug(ret, ip, call, nop);
3c1720f0 531 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 532 return 0;
37ad5084 533 }
492a7ea5 534 return 1;
3c1720f0
SR
535}
536
e309b41d 537static int __ftrace_modify_code(void *data)
3d083395 538{
d61f82d0
SR
539 int *command = data;
540
a3583244 541 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 542 ftrace_replace_code(1);
a3583244 543 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
544 ftrace_replace_code(0);
545
546 if (*command & FTRACE_UPDATE_TRACE_FUNC)
547 ftrace_update_ftrace_func(ftrace_trace_function);
548
d61f82d0 549 return 0;
3d083395
SR
550}
551
e309b41d 552static void ftrace_run_update_code(int command)
3d083395 553{
784e2d76 554 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
555}
556
d61f82d0 557static ftrace_func_t saved_ftrace_func;
60a7ecf4 558static int ftrace_start_up;
cb7be3b2 559static DEFINE_MUTEX(ftrace_start_lock);
d61f82d0 560
e309b41d 561static void ftrace_startup(void)
3d083395 562{
d61f82d0
SR
563 int command = 0;
564
4eebcc81
SR
565 if (unlikely(ftrace_disabled))
566 return;
567
cb7be3b2 568 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
569 ftrace_start_up++;
570 if (ftrace_start_up == 1)
d61f82d0
SR
571 command |= FTRACE_ENABLE_CALLS;
572
573 if (saved_ftrace_func != ftrace_trace_function) {
574 saved_ftrace_func = ftrace_trace_function;
575 command |= FTRACE_UPDATE_TRACE_FUNC;
576 }
577
578 if (!command || !ftrace_enabled)
3d083395 579 goto out;
3d083395 580
d61f82d0 581 ftrace_run_update_code(command);
3d083395 582 out:
cb7be3b2 583 mutex_unlock(&ftrace_start_lock);
3d083395
SR
584}
585
e309b41d 586static void ftrace_shutdown(void)
3d083395 587{
d61f82d0
SR
588 int command = 0;
589
4eebcc81
SR
590 if (unlikely(ftrace_disabled))
591 return;
592
cb7be3b2 593 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
594 ftrace_start_up--;
595 if (!ftrace_start_up)
d61f82d0 596 command |= FTRACE_DISABLE_CALLS;
3d083395 597
d61f82d0
SR
598 if (saved_ftrace_func != ftrace_trace_function) {
599 saved_ftrace_func = ftrace_trace_function;
600 command |= FTRACE_UPDATE_TRACE_FUNC;
601 }
3d083395 602
d61f82d0
SR
603 if (!command || !ftrace_enabled)
604 goto out;
605
606 ftrace_run_update_code(command);
3d083395 607 out:
cb7be3b2 608 mutex_unlock(&ftrace_start_lock);
3d083395
SR
609}
610
e309b41d 611static void ftrace_startup_sysctl(void)
b0fc494f 612{
d61f82d0
SR
613 int command = FTRACE_ENABLE_MCOUNT;
614
4eebcc81
SR
615 if (unlikely(ftrace_disabled))
616 return;
617
cb7be3b2 618 mutex_lock(&ftrace_start_lock);
d61f82d0
SR
619 /* Force update next time */
620 saved_ftrace_func = NULL;
60a7ecf4
SR
621 /* ftrace_start_up is true if we want ftrace running */
622 if (ftrace_start_up)
d61f82d0
SR
623 command |= FTRACE_ENABLE_CALLS;
624
625 ftrace_run_update_code(command);
cb7be3b2 626 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
627}
628
e309b41d 629static void ftrace_shutdown_sysctl(void)
b0fc494f 630{
d61f82d0
SR
631 int command = FTRACE_DISABLE_MCOUNT;
632
4eebcc81
SR
633 if (unlikely(ftrace_disabled))
634 return;
635
cb7be3b2 636 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
637 /* ftrace_start_up is true if ftrace is running */
638 if (ftrace_start_up)
d61f82d0
SR
639 command |= FTRACE_DISABLE_CALLS;
640
641 ftrace_run_update_code(command);
cb7be3b2 642 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
643}
644
3d083395
SR
645static cycle_t ftrace_update_time;
646static unsigned long ftrace_update_cnt;
647unsigned long ftrace_update_tot_cnt;
648
08f5ac90 649static int ftrace_update_code(void)
3d083395 650{
08f5ac90 651 struct dyn_ftrace *p, *t;
f22f9a89 652 cycle_t start, stop;
3d083395 653
750ed1a4 654 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
655 ftrace_update_cnt = 0;
656
08f5ac90 657 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 658
08f5ac90
SR
659 /* If something went wrong, bail without enabling anything */
660 if (unlikely(ftrace_disabled))
661 return -1;
f22f9a89 662
08f5ac90 663 list_del_init(&p->list);
f22f9a89 664
08f5ac90
SR
665 /* convert record (i.e, patch mcount-call with NOP) */
666 if (ftrace_code_disable(p)) {
667 p->flags |= FTRACE_FL_CONVERTED;
668 ftrace_update_cnt++;
669 } else
670 ftrace_free_rec(p);
3d083395
SR
671 }
672
750ed1a4 673 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
674 ftrace_update_time = stop - start;
675 ftrace_update_tot_cnt += ftrace_update_cnt;
676
16444a8a
ACM
677 return 0;
678}
679
68bf21aa 680static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
681{
682 struct ftrace_page *pg;
683 int cnt;
684 int i;
3c1720f0
SR
685
686 /* allocate a few pages */
687 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
688 if (!ftrace_pages_start)
689 return -1;
690
691 /*
692 * Allocate a few more pages.
693 *
694 * TODO: have some parser search vmlinux before
695 * final linking to find all calls to ftrace.
696 * Then we can:
697 * a) know how many pages to allocate.
698 * and/or
699 * b) set up the table then.
700 *
701 * The dynamic code is still necessary for
702 * modules.
703 */
704
705 pg = ftrace_pages = ftrace_pages_start;
706
68bf21aa 707 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 708 pr_info("ftrace: allocating %ld entries in %d pages\n",
68bf21aa 709 num_to_init, cnt);
3c1720f0
SR
710
711 for (i = 0; i < cnt; i++) {
712 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
713
714 /* If we fail, we'll try later anyway */
715 if (!pg->next)
716 break;
717
718 pg = pg->next;
719 }
720
721 return 0;
722}
723
5072c59f
SR
724enum {
725 FTRACE_ITER_FILTER = (1 << 0),
726 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 727 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 728 FTRACE_ITER_FAILURES = (1 << 3),
5072c59f
SR
729};
730
731#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
732
733struct ftrace_iterator {
734 loff_t pos;
735 struct ftrace_page *pg;
736 unsigned idx;
737 unsigned flags;
738 unsigned char buffer[FTRACE_BUFF_MAX+1];
739 unsigned buffer_idx;
740 unsigned filtered;
741};
742
e309b41d 743static void *
5072c59f
SR
744t_next(struct seq_file *m, void *v, loff_t *pos)
745{
746 struct ftrace_iterator *iter = m->private;
747 struct dyn_ftrace *rec = NULL;
748
749 (*pos)++;
750
99ecdc43
SR
751 /* should not be called from interrupt context */
752 spin_lock(&ftrace_lock);
5072c59f
SR
753 retry:
754 if (iter->idx >= iter->pg->index) {
755 if (iter->pg->next) {
756 iter->pg = iter->pg->next;
757 iter->idx = 0;
758 goto retry;
759 }
760 } else {
761 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
762 if ((rec->flags & FTRACE_FL_FREE) ||
763
764 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
765 (rec->flags & FTRACE_FL_FAILED)) ||
766
767 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 768 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 769
0183fb1c
SR
770 ((iter->flags & FTRACE_ITER_FILTER) &&
771 !(rec->flags & FTRACE_FL_FILTER)) ||
772
41c52c0d
SR
773 ((iter->flags & FTRACE_ITER_NOTRACE) &&
774 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
775 rec = NULL;
776 goto retry;
777 }
778 }
99ecdc43 779 spin_unlock(&ftrace_lock);
5072c59f
SR
780
781 iter->pos = *pos;
782
783 return rec;
784}
785
786static void *t_start(struct seq_file *m, loff_t *pos)
787{
788 struct ftrace_iterator *iter = m->private;
789 void *p = NULL;
790 loff_t l = -1;
791
792 if (*pos != iter->pos) {
793 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
794 ;
795 } else {
796 l = *pos;
797 p = t_next(m, p, &l);
798 }
799
800 return p;
801}
802
803static void t_stop(struct seq_file *m, void *p)
804{
805}
806
807static int t_show(struct seq_file *m, void *v)
808{
809 struct dyn_ftrace *rec = v;
810 char str[KSYM_SYMBOL_LEN];
811
812 if (!rec)
813 return 0;
814
815 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
816
817 seq_printf(m, "%s\n", str);
818
819 return 0;
820}
821
822static struct seq_operations show_ftrace_seq_ops = {
823 .start = t_start,
824 .next = t_next,
825 .stop = t_stop,
826 .show = t_show,
827};
828
e309b41d 829static int
5072c59f
SR
830ftrace_avail_open(struct inode *inode, struct file *file)
831{
832 struct ftrace_iterator *iter;
833 int ret;
834
4eebcc81
SR
835 if (unlikely(ftrace_disabled))
836 return -ENODEV;
837
5072c59f
SR
838 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
839 if (!iter)
840 return -ENOMEM;
841
842 iter->pg = ftrace_pages_start;
843 iter->pos = -1;
844
845 ret = seq_open(file, &show_ftrace_seq_ops);
846 if (!ret) {
847 struct seq_file *m = file->private_data;
4bf39a94 848
5072c59f 849 m->private = iter;
4bf39a94 850 } else {
5072c59f 851 kfree(iter);
4bf39a94 852 }
5072c59f
SR
853
854 return ret;
855}
856
857int ftrace_avail_release(struct inode *inode, struct file *file)
858{
859 struct seq_file *m = (struct seq_file *)file->private_data;
860 struct ftrace_iterator *iter = m->private;
861
862 seq_release(inode, file);
863 kfree(iter);
4bf39a94 864
5072c59f
SR
865 return 0;
866}
867
eb9a7bf0
AS
868static int
869ftrace_failures_open(struct inode *inode, struct file *file)
870{
871 int ret;
872 struct seq_file *m;
873 struct ftrace_iterator *iter;
874
875 ret = ftrace_avail_open(inode, file);
876 if (!ret) {
877 m = (struct seq_file *)file->private_data;
878 iter = (struct ftrace_iterator *)m->private;
879 iter->flags = FTRACE_ITER_FAILURES;
880 }
881
882 return ret;
883}
884
885
41c52c0d 886static void ftrace_filter_reset(int enable)
5072c59f
SR
887{
888 struct ftrace_page *pg;
889 struct dyn_ftrace *rec;
41c52c0d 890 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
891 unsigned i;
892
99ecdc43
SR
893 /* should not be called from interrupt context */
894 spin_lock(&ftrace_lock);
41c52c0d
SR
895 if (enable)
896 ftrace_filtered = 0;
5072c59f
SR
897 pg = ftrace_pages_start;
898 while (pg) {
899 for (i = 0; i < pg->index; i++) {
900 rec = &pg->records[i];
901 if (rec->flags & FTRACE_FL_FAILED)
902 continue;
41c52c0d 903 rec->flags &= ~type;
5072c59f
SR
904 }
905 pg = pg->next;
906 }
99ecdc43 907 spin_unlock(&ftrace_lock);
5072c59f
SR
908}
909
e309b41d 910static int
41c52c0d 911ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
912{
913 struct ftrace_iterator *iter;
914 int ret = 0;
915
4eebcc81
SR
916 if (unlikely(ftrace_disabled))
917 return -ENODEV;
918
5072c59f
SR
919 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
920 if (!iter)
921 return -ENOMEM;
922
41c52c0d 923 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
924 if ((file->f_mode & FMODE_WRITE) &&
925 !(file->f_flags & O_APPEND))
41c52c0d 926 ftrace_filter_reset(enable);
5072c59f
SR
927
928 if (file->f_mode & FMODE_READ) {
929 iter->pg = ftrace_pages_start;
930 iter->pos = -1;
41c52c0d
SR
931 iter->flags = enable ? FTRACE_ITER_FILTER :
932 FTRACE_ITER_NOTRACE;
5072c59f
SR
933
934 ret = seq_open(file, &show_ftrace_seq_ops);
935 if (!ret) {
936 struct seq_file *m = file->private_data;
937 m->private = iter;
938 } else
939 kfree(iter);
940 } else
941 file->private_data = iter;
41c52c0d 942 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
943
944 return ret;
945}
946
41c52c0d
SR
947static int
948ftrace_filter_open(struct inode *inode, struct file *file)
949{
950 return ftrace_regex_open(inode, file, 1);
951}
952
953static int
954ftrace_notrace_open(struct inode *inode, struct file *file)
955{
956 return ftrace_regex_open(inode, file, 0);
957}
958
e309b41d 959static ssize_t
41c52c0d 960ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
961 size_t cnt, loff_t *ppos)
962{
963 if (file->f_mode & FMODE_READ)
964 return seq_read(file, ubuf, cnt, ppos);
965 else
966 return -EPERM;
967}
968
e309b41d 969static loff_t
41c52c0d 970ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
971{
972 loff_t ret;
973
974 if (file->f_mode & FMODE_READ)
975 ret = seq_lseek(file, offset, origin);
976 else
977 file->f_pos = ret = 1;
978
979 return ret;
980}
981
982enum {
983 MATCH_FULL,
984 MATCH_FRONT_ONLY,
985 MATCH_MIDDLE_ONLY,
986 MATCH_END_ONLY,
987};
988
e309b41d 989static void
41c52c0d 990ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
991{
992 char str[KSYM_SYMBOL_LEN];
993 char *search = NULL;
994 struct ftrace_page *pg;
995 struct dyn_ftrace *rec;
996 int type = MATCH_FULL;
41c52c0d 997 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
998 unsigned i, match = 0, search_len = 0;
999
1000 for (i = 0; i < len; i++) {
1001 if (buff[i] == '*') {
1002 if (!i) {
1003 search = buff + i + 1;
1004 type = MATCH_END_ONLY;
1005 search_len = len - (i + 1);
1006 } else {
1007 if (type == MATCH_END_ONLY) {
1008 type = MATCH_MIDDLE_ONLY;
1009 } else {
1010 match = i;
1011 type = MATCH_FRONT_ONLY;
1012 }
1013 buff[i] = 0;
1014 break;
1015 }
1016 }
1017 }
1018
99ecdc43
SR
1019 /* should not be called from interrupt context */
1020 spin_lock(&ftrace_lock);
41c52c0d
SR
1021 if (enable)
1022 ftrace_filtered = 1;
5072c59f
SR
1023 pg = ftrace_pages_start;
1024 while (pg) {
1025 for (i = 0; i < pg->index; i++) {
1026 int matched = 0;
1027 char *ptr;
1028
1029 rec = &pg->records[i];
1030 if (rec->flags & FTRACE_FL_FAILED)
1031 continue;
1032 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1033 switch (type) {
1034 case MATCH_FULL:
1035 if (strcmp(str, buff) == 0)
1036 matched = 1;
1037 break;
1038 case MATCH_FRONT_ONLY:
1039 if (memcmp(str, buff, match) == 0)
1040 matched = 1;
1041 break;
1042 case MATCH_MIDDLE_ONLY:
1043 if (strstr(str, search))
1044 matched = 1;
1045 break;
1046 case MATCH_END_ONLY:
1047 ptr = strstr(str, search);
1048 if (ptr && (ptr[search_len] == 0))
1049 matched = 1;
1050 break;
1051 }
1052 if (matched)
41c52c0d 1053 rec->flags |= flag;
5072c59f
SR
1054 }
1055 pg = pg->next;
1056 }
99ecdc43 1057 spin_unlock(&ftrace_lock);
5072c59f
SR
1058}
1059
e309b41d 1060static ssize_t
41c52c0d
SR
1061ftrace_regex_write(struct file *file, const char __user *ubuf,
1062 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1063{
1064 struct ftrace_iterator *iter;
1065 char ch;
1066 size_t read = 0;
1067 ssize_t ret;
1068
1069 if (!cnt || cnt < 0)
1070 return 0;
1071
41c52c0d 1072 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1073
1074 if (file->f_mode & FMODE_READ) {
1075 struct seq_file *m = file->private_data;
1076 iter = m->private;
1077 } else
1078 iter = file->private_data;
1079
1080 if (!*ppos) {
1081 iter->flags &= ~FTRACE_ITER_CONT;
1082 iter->buffer_idx = 0;
1083 }
1084
1085 ret = get_user(ch, ubuf++);
1086 if (ret)
1087 goto out;
1088 read++;
1089 cnt--;
1090
1091 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1092 /* skip white space */
1093 while (cnt && isspace(ch)) {
1094 ret = get_user(ch, ubuf++);
1095 if (ret)
1096 goto out;
1097 read++;
1098 cnt--;
1099 }
1100
5072c59f
SR
1101 if (isspace(ch)) {
1102 file->f_pos += read;
1103 ret = read;
1104 goto out;
1105 }
1106
1107 iter->buffer_idx = 0;
1108 }
1109
1110 while (cnt && !isspace(ch)) {
1111 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1112 iter->buffer[iter->buffer_idx++] = ch;
1113 else {
1114 ret = -EINVAL;
1115 goto out;
1116 }
1117 ret = get_user(ch, ubuf++);
1118 if (ret)
1119 goto out;
1120 read++;
1121 cnt--;
1122 }
1123
1124 if (isspace(ch)) {
1125 iter->filtered++;
1126 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1127 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1128 iter->buffer_idx = 0;
1129 } else
1130 iter->flags |= FTRACE_ITER_CONT;
1131
1132
1133 file->f_pos += read;
1134
1135 ret = read;
1136 out:
41c52c0d 1137 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1138
1139 return ret;
1140}
1141
41c52c0d
SR
1142static ssize_t
1143ftrace_filter_write(struct file *file, const char __user *ubuf,
1144 size_t cnt, loff_t *ppos)
1145{
1146 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1147}
1148
1149static ssize_t
1150ftrace_notrace_write(struct file *file, const char __user *ubuf,
1151 size_t cnt, loff_t *ppos)
1152{
1153 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1154}
1155
1156static void
1157ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1158{
1159 if (unlikely(ftrace_disabled))
1160 return;
1161
1162 mutex_lock(&ftrace_regex_lock);
1163 if (reset)
1164 ftrace_filter_reset(enable);
1165 if (buf)
1166 ftrace_match(buf, len, enable);
1167 mutex_unlock(&ftrace_regex_lock);
1168}
1169
77a2b37d
SR
1170/**
1171 * ftrace_set_filter - set a function to filter on in ftrace
1172 * @buf - the string that holds the function filter text.
1173 * @len - the length of the string.
1174 * @reset - non zero to reset all filters before applying this filter.
1175 *
1176 * Filters denote which functions should be enabled when tracing is enabled.
1177 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1178 */
e309b41d 1179void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1180{
41c52c0d
SR
1181 ftrace_set_regex(buf, len, reset, 1);
1182}
4eebcc81 1183
41c52c0d
SR
1184/**
1185 * ftrace_set_notrace - set a function to not trace in ftrace
1186 * @buf - the string that holds the function notrace text.
1187 * @len - the length of the string.
1188 * @reset - non zero to reset all filters before applying this filter.
1189 *
1190 * Notrace Filters denote which functions should not be enabled when tracing
1191 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1192 * for tracing.
1193 */
1194void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1195{
1196 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1197}
1198
e309b41d 1199static int
41c52c0d 1200ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1201{
1202 struct seq_file *m = (struct seq_file *)file->private_data;
1203 struct ftrace_iterator *iter;
1204
41c52c0d 1205 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1206 if (file->f_mode & FMODE_READ) {
1207 iter = m->private;
1208
1209 seq_release(inode, file);
1210 } else
1211 iter = file->private_data;
1212
1213 if (iter->buffer_idx) {
1214 iter->filtered++;
1215 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1216 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1217 }
1218
1219 mutex_lock(&ftrace_sysctl_lock);
cb7be3b2 1220 mutex_lock(&ftrace_start_lock);
60a7ecf4 1221 if (iter->filtered && ftrace_start_up && ftrace_enabled)
5072c59f 1222 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
cb7be3b2 1223 mutex_unlock(&ftrace_start_lock);
5072c59f
SR
1224 mutex_unlock(&ftrace_sysctl_lock);
1225
1226 kfree(iter);
41c52c0d 1227 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1228 return 0;
1229}
1230
41c52c0d
SR
1231static int
1232ftrace_filter_release(struct inode *inode, struct file *file)
1233{
1234 return ftrace_regex_release(inode, file, 1);
1235}
1236
1237static int
1238ftrace_notrace_release(struct inode *inode, struct file *file)
1239{
1240 return ftrace_regex_release(inode, file, 0);
1241}
1242
5072c59f
SR
1243static struct file_operations ftrace_avail_fops = {
1244 .open = ftrace_avail_open,
1245 .read = seq_read,
1246 .llseek = seq_lseek,
1247 .release = ftrace_avail_release,
1248};
1249
eb9a7bf0
AS
1250static struct file_operations ftrace_failures_fops = {
1251 .open = ftrace_failures_open,
1252 .read = seq_read,
1253 .llseek = seq_lseek,
1254 .release = ftrace_avail_release,
1255};
1256
5072c59f
SR
1257static struct file_operations ftrace_filter_fops = {
1258 .open = ftrace_filter_open,
41c52c0d 1259 .read = ftrace_regex_read,
5072c59f 1260 .write = ftrace_filter_write,
41c52c0d 1261 .llseek = ftrace_regex_lseek,
5072c59f
SR
1262 .release = ftrace_filter_release,
1263};
1264
41c52c0d
SR
1265static struct file_operations ftrace_notrace_fops = {
1266 .open = ftrace_notrace_open,
1267 .read = ftrace_regex_read,
1268 .write = ftrace_notrace_write,
1269 .llseek = ftrace_regex_lseek,
1270 .release = ftrace_notrace_release,
1271};
1272
5072c59f
SR
1273static __init int ftrace_init_debugfs(void)
1274{
1275 struct dentry *d_tracer;
1276 struct dentry *entry;
1277
1278 d_tracer = tracing_init_dentry();
1279
1280 entry = debugfs_create_file("available_filter_functions", 0444,
1281 d_tracer, NULL, &ftrace_avail_fops);
1282 if (!entry)
1283 pr_warning("Could not create debugfs "
1284 "'available_filter_functions' entry\n");
1285
eb9a7bf0
AS
1286 entry = debugfs_create_file("failures", 0444,
1287 d_tracer, NULL, &ftrace_failures_fops);
1288 if (!entry)
1289 pr_warning("Could not create debugfs 'failures' entry\n");
1290
5072c59f
SR
1291 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1292 NULL, &ftrace_filter_fops);
1293 if (!entry)
1294 pr_warning("Could not create debugfs "
1295 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1296
1297 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1298 NULL, &ftrace_notrace_fops);
1299 if (!entry)
1300 pr_warning("Could not create debugfs "
1301 "'set_ftrace_notrace' entry\n");
ad90c0e3 1302
5072c59f
SR
1303 return 0;
1304}
1305
1306fs_initcall(ftrace_init_debugfs);
1307
68bf21aa
SR
1308static int ftrace_convert_nops(unsigned long *start,
1309 unsigned long *end)
1310{
1311 unsigned long *p;
1312 unsigned long addr;
1313 unsigned long flags;
1314
08f5ac90 1315 mutex_lock(&ftrace_start_lock);
68bf21aa
SR
1316 p = start;
1317 while (p < end) {
1318 addr = ftrace_call_adjust(*p++);
1319 ftrace_record_ip(addr);
68bf21aa
SR
1320 }
1321
08f5ac90 1322 /* disable interrupts to prevent kstop machine */
68bf21aa 1323 local_irq_save(flags);
08f5ac90 1324 ftrace_update_code();
68bf21aa 1325 local_irq_restore(flags);
08f5ac90 1326 mutex_unlock(&ftrace_start_lock);
68bf21aa
SR
1327
1328 return 0;
1329}
1330
90d595fe
SR
1331void ftrace_init_module(unsigned long *start, unsigned long *end)
1332{
00fd61ae 1333 if (ftrace_disabled || start == end)
fed1939c 1334 return;
90d595fe
SR
1335 ftrace_convert_nops(start, end);
1336}
1337
68bf21aa
SR
1338extern unsigned long __start_mcount_loc[];
1339extern unsigned long __stop_mcount_loc[];
1340
1341void __init ftrace_init(void)
1342{
1343 unsigned long count, addr, flags;
1344 int ret;
1345
1346 /* Keep the ftrace pointer to the stub */
1347 addr = (unsigned long)ftrace_stub;
1348
1349 local_irq_save(flags);
1350 ftrace_dyn_arch_init(&addr);
1351 local_irq_restore(flags);
1352
1353 /* ftrace_dyn_arch_init places the return code in addr */
1354 if (addr)
1355 goto failed;
1356
1357 count = __stop_mcount_loc - __start_mcount_loc;
1358
1359 ret = ftrace_dyn_table_alloc(count);
1360 if (ret)
1361 goto failed;
1362
1363 last_ftrace_enabled = ftrace_enabled = 1;
1364
1365 ret = ftrace_convert_nops(__start_mcount_loc,
1366 __stop_mcount_loc);
1367
1368 return;
1369 failed:
1370 ftrace_disabled = 1;
1371}
68bf21aa 1372
3d083395 1373#else
0b6e4d56
FW
1374
1375static int __init ftrace_nodyn_init(void)
1376{
1377 ftrace_enabled = 1;
1378 return 0;
1379}
1380device_initcall(ftrace_nodyn_init);
1381
c7aafc54
IM
1382# define ftrace_startup() do { } while (0)
1383# define ftrace_shutdown() do { } while (0)
1384# define ftrace_startup_sysctl() do { } while (0)
1385# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1386#endif /* CONFIG_DYNAMIC_FTRACE */
1387
a2bb6a3d 1388/**
81adbdc0 1389 * ftrace_kill - kill ftrace
a2bb6a3d
SR
1390 *
1391 * This function should be used by panic code. It stops ftrace
1392 * but in a not so nice way. If you need to simply kill ftrace
1393 * from a non-atomic section, use ftrace_kill.
1394 */
81adbdc0 1395void ftrace_kill(void)
a2bb6a3d
SR
1396{
1397 ftrace_disabled = 1;
1398 ftrace_enabled = 0;
a2bb6a3d
SR
1399 clear_ftrace_function();
1400}
1401
16444a8a 1402/**
3d083395
SR
1403 * register_ftrace_function - register a function for profiling
1404 * @ops - ops structure that holds the function for profiling.
16444a8a 1405 *
3d083395
SR
1406 * Register a function to be called by all functions in the
1407 * kernel.
1408 *
1409 * Note: @ops->func and all the functions it calls must be labeled
1410 * with "notrace", otherwise it will go into a
1411 * recursive loop.
16444a8a 1412 */
3d083395 1413int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1414{
b0fc494f
SR
1415 int ret;
1416
4eebcc81
SR
1417 if (unlikely(ftrace_disabled))
1418 return -1;
1419
b0fc494f 1420 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1421 ret = __register_ftrace_function(ops);
d61f82d0 1422 ftrace_startup();
b0fc494f
SR
1423 mutex_unlock(&ftrace_sysctl_lock);
1424
1425 return ret;
3d083395
SR
1426}
1427
1428/**
1429 * unregister_ftrace_function - unresgister a function for profiling.
1430 * @ops - ops structure that holds the function to unregister
1431 *
1432 * Unregister a function that was added to be called by ftrace profiling.
1433 */
1434int unregister_ftrace_function(struct ftrace_ops *ops)
1435{
1436 int ret;
1437
b0fc494f 1438 mutex_lock(&ftrace_sysctl_lock);
3d083395 1439 ret = __unregister_ftrace_function(ops);
d61f82d0 1440 ftrace_shutdown();
b0fc494f
SR
1441 mutex_unlock(&ftrace_sysctl_lock);
1442
1443 return ret;
1444}
1445
e309b41d 1446int
b0fc494f 1447ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1448 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1449 loff_t *ppos)
1450{
1451 int ret;
1452
4eebcc81
SR
1453 if (unlikely(ftrace_disabled))
1454 return -ENODEV;
1455
b0fc494f
SR
1456 mutex_lock(&ftrace_sysctl_lock);
1457
5072c59f 1458 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1459
1460 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1461 goto out;
1462
1463 last_ftrace_enabled = ftrace_enabled;
1464
1465 if (ftrace_enabled) {
1466
1467 ftrace_startup_sysctl();
1468
1469 /* we are starting ftrace again */
1470 if (ftrace_list != &ftrace_list_end) {
1471 if (ftrace_list->next == &ftrace_list_end)
1472 ftrace_trace_function = ftrace_list->func;
1473 else
1474 ftrace_trace_function = ftrace_list_func;
1475 }
1476
1477 } else {
1478 /* stopping ftrace calls (just send to ftrace_stub) */
1479 ftrace_trace_function = ftrace_stub;
1480
1481 ftrace_shutdown_sysctl();
1482 }
1483
1484 out:
1485 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1486 return ret;
16444a8a 1487}
f17845e5 1488
15e6cb36
FW
1489#ifdef CONFIG_FUNCTION_RET_TRACER
1490trace_function_return_t ftrace_function_return =
1491 (trace_function_return_t)ftrace_stub;
1492void register_ftrace_return(trace_function_return_t func)
1493{
1494 ftrace_function_return = func;
1495}
1496
1497void unregister_ftrace_return(void)
1498{
1499 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1500}
1501#endif
1502
1503
1504
This page took 0.182561 seconds and 5 git commands to generate.