ftrace: remove daemon
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
f22f9a89 24#include <linux/kprobes.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5072c59f 27#include <linux/ctype.h>
2d8b820b 28#include <linux/hash.h>
3d083395
SR
29#include <linux/list.h>
30
395a59d0
AS
31#include <asm/ftrace.h>
32
3d083395 33#include "trace.h"
16444a8a 34
6912896e
SR
35#define FTRACE_WARN_ON(cond) \
36 do { \
37 if (WARN_ON(cond)) \
38 ftrace_kill(); \
39 } while (0)
40
41#define FTRACE_WARN_ON_ONCE(cond) \
42 do { \
43 if (WARN_ON_ONCE(cond)) \
44 ftrace_kill(); \
45 } while (0)
46
4eebcc81
SR
47/* ftrace_enabled is a method to turn ftrace on or off */
48int ftrace_enabled __read_mostly;
d61f82d0 49static int last_ftrace_enabled;
b0fc494f 50
4eebcc81
SR
51/*
52 * ftrace_disabled is set when an anomaly is discovered.
53 * ftrace_disabled is much stronger than ftrace_enabled.
54 */
55static int ftrace_disabled __read_mostly;
56
3d083395 57static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
58static DEFINE_MUTEX(ftrace_sysctl_lock);
59
16444a8a
ACM
60static struct ftrace_ops ftrace_list_end __read_mostly =
61{
62 .func = ftrace_stub,
63};
64
65static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
66ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
67
f2252935 68static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
69{
70 struct ftrace_ops *op = ftrace_list;
71
72 /* in case someone actually ports this to alpha! */
73 read_barrier_depends();
74
75 while (op != &ftrace_list_end) {
76 /* silly alpha */
77 read_barrier_depends();
78 op->func(ip, parent_ip);
79 op = op->next;
80 };
81}
82
83/**
3d083395 84 * clear_ftrace_function - reset the ftrace function
16444a8a 85 *
3d083395
SR
86 * This NULLs the ftrace function and in essence stops
87 * tracing. There may be lag
16444a8a 88 */
3d083395 89void clear_ftrace_function(void)
16444a8a 90{
3d083395
SR
91 ftrace_trace_function = ftrace_stub;
92}
93
e309b41d 94static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 95{
99ecdc43 96 /* should not be called from interrupt context */
3d083395 97 spin_lock(&ftrace_lock);
16444a8a 98
16444a8a
ACM
99 ops->next = ftrace_list;
100 /*
101 * We are entering ops into the ftrace_list but another
102 * CPU might be walking that list. We need to make sure
103 * the ops->next pointer is valid before another CPU sees
104 * the ops pointer included into the ftrace_list.
105 */
106 smp_wmb();
107 ftrace_list = ops;
3d083395 108
b0fc494f
SR
109 if (ftrace_enabled) {
110 /*
111 * For one func, simply call it directly.
112 * For more than one func, call the chain.
113 */
114 if (ops->next == &ftrace_list_end)
115 ftrace_trace_function = ops->func;
116 else
117 ftrace_trace_function = ftrace_list_func;
118 }
3d083395
SR
119
120 spin_unlock(&ftrace_lock);
16444a8a
ACM
121
122 return 0;
123}
124
e309b41d 125static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 126{
16444a8a
ACM
127 struct ftrace_ops **p;
128 int ret = 0;
129
99ecdc43 130 /* should not be called from interrupt context */
3d083395 131 spin_lock(&ftrace_lock);
16444a8a
ACM
132
133 /*
3d083395
SR
134 * If we are removing the last function, then simply point
135 * to the ftrace_stub.
16444a8a
ACM
136 */
137 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
138 ftrace_trace_function = ftrace_stub;
139 ftrace_list = &ftrace_list_end;
140 goto out;
141 }
142
143 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
144 if (*p == ops)
145 break;
146
147 if (*p != ops) {
148 ret = -1;
149 goto out;
150 }
151
152 *p = (*p)->next;
153
b0fc494f
SR
154 if (ftrace_enabled) {
155 /* If we only have one func left, then call that directly */
156 if (ftrace_list == &ftrace_list_end ||
157 ftrace_list->next == &ftrace_list_end)
158 ftrace_trace_function = ftrace_list->func;
159 }
16444a8a
ACM
160
161 out:
3d083395
SR
162 spin_unlock(&ftrace_lock);
163
164 return ret;
165}
166
167#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 168#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 169# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
170#endif
171
71c67d58
SN
172/*
173 * Since MCOUNT_ADDR may point to mcount itself, we do not want
174 * to get it confused by reading a reference in the code as we
175 * are parsing on objcopy output of text. Use a variable for
176 * it instead.
177 */
178static unsigned long mcount_addr = MCOUNT_ADDR;
179
d61f82d0
SR
180enum {
181 FTRACE_ENABLE_CALLS = (1 << 0),
182 FTRACE_DISABLE_CALLS = (1 << 1),
183 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
184 FTRACE_ENABLE_MCOUNT = (1 << 3),
185 FTRACE_DISABLE_MCOUNT = (1 << 4),
186};
187
5072c59f 188static int ftrace_filtered;
ecea656d
AS
189static int tracing_on;
190static int frozen_record_count;
5072c59f 191
3d083395
SR
192static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
193
194static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
195
41c52c0d 196static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 197
3c1720f0
SR
198struct ftrace_page {
199 struct ftrace_page *next;
aa5e5cea 200 unsigned long index;
3c1720f0 201 struct dyn_ftrace records[];
aa5e5cea 202};
3c1720f0
SR
203
204#define ENTRIES_PER_PAGE \
205 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
206
207/* estimate from running different kernels */
208#define NR_TO_INIT 10000
209
210static struct ftrace_page *ftrace_pages_start;
211static struct ftrace_page *ftrace_pages;
212
3d083395
SR
213static int ftrace_record_suspend;
214
37ad5084
SR
215static struct dyn_ftrace *ftrace_free_records;
216
ecea656d
AS
217
218#ifdef CONFIG_KPROBES
219static inline void freeze_record(struct dyn_ftrace *rec)
220{
221 if (!(rec->flags & FTRACE_FL_FROZEN)) {
222 rec->flags |= FTRACE_FL_FROZEN;
223 frozen_record_count++;
224 }
225}
226
227static inline void unfreeze_record(struct dyn_ftrace *rec)
228{
229 if (rec->flags & FTRACE_FL_FROZEN) {
230 rec->flags &= ~FTRACE_FL_FROZEN;
231 frozen_record_count--;
232 }
233}
234
235static inline int record_frozen(struct dyn_ftrace *rec)
236{
237 return rec->flags & FTRACE_FL_FROZEN;
238}
239#else
240# define freeze_record(rec) ({ 0; })
241# define unfreeze_record(rec) ({ 0; })
242# define record_frozen(rec) ({ 0; })
243#endif /* CONFIG_KPROBES */
244
245int skip_trace(unsigned long ip)
246{
247 unsigned long fl;
248 struct dyn_ftrace *rec;
249 struct hlist_node *t;
250 struct hlist_head *head;
251
252 if (frozen_record_count == 0)
253 return 0;
254
255 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
256 hlist_for_each_entry_rcu(rec, t, head, node) {
257 if (rec->ip == ip) {
258 if (record_frozen(rec)) {
259 if (rec->flags & FTRACE_FL_FAILED)
260 return 1;
261
262 if (!(rec->flags & FTRACE_FL_CONVERTED))
263 return 1;
264
265 if (!tracing_on || !ftrace_enabled)
266 return 1;
267
268 if (ftrace_filtered) {
269 fl = rec->flags & (FTRACE_FL_FILTER |
270 FTRACE_FL_NOTRACE);
271 if (!fl || (fl & FTRACE_FL_NOTRACE))
272 return 1;
273 }
274 }
275 break;
276 }
277 }
278
279 return 0;
280}
281
e309b41d 282static inline int
9ff9cdb2 283ftrace_ip_in_hash(unsigned long ip, unsigned long key)
3d083395
SR
284{
285 struct dyn_ftrace *p;
286 struct hlist_node *t;
287 int found = 0;
288
ffdaa358 289 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
3d083395
SR
290 if (p->ip == ip) {
291 found = 1;
292 break;
293 }
294 }
295
296 return found;
297}
298
e309b41d 299static inline void
3d083395
SR
300ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
301{
ffdaa358 302 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
3d083395
SR
303}
304
0eb96701
AS
305/* called from kstop_machine */
306static inline void ftrace_del_hash(struct dyn_ftrace *node)
307{
308 hlist_del(&node->node);
309}
310
e309b41d 311static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 312{
37ad5084
SR
313 rec->ip = (unsigned long)ftrace_free_records;
314 ftrace_free_records = rec;
315 rec->flags |= FTRACE_FL_FREE;
316}
317
fed1939c
SR
318void ftrace_release(void *start, unsigned long size)
319{
320 struct dyn_ftrace *rec;
321 struct ftrace_page *pg;
322 unsigned long s = (unsigned long)start;
323 unsigned long e = s + size;
324 int i;
325
00fd61ae 326 if (ftrace_disabled || !start)
fed1939c
SR
327 return;
328
99ecdc43 329 /* should not be called from interrupt context */
fed1939c
SR
330 spin_lock(&ftrace_lock);
331
332 for (pg = ftrace_pages_start; pg; pg = pg->next) {
333 for (i = 0; i < pg->index; i++) {
334 rec = &pg->records[i];
335
336 if ((rec->ip >= s) && (rec->ip < e))
337 ftrace_free_rec(rec);
338 }
339 }
340 spin_unlock(&ftrace_lock);
341
bd95b88d 342 ftrace_release_hash(s, e);
fed1939c
SR
343}
344
e309b41d 345static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 346{
37ad5084
SR
347 struct dyn_ftrace *rec;
348
349 /* First check for freed records */
350 if (ftrace_free_records) {
351 rec = ftrace_free_records;
352
37ad5084 353 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 354 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
355 ftrace_free_records = NULL;
356 return NULL;
357 }
358
359 ftrace_free_records = (void *)rec->ip;
360 memset(rec, 0, sizeof(*rec));
361 return rec;
362 }
363
3c1720f0
SR
364 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
365 if (!ftrace_pages->next)
366 return NULL;
367 ftrace_pages = ftrace_pages->next;
368 }
369
370 return &ftrace_pages->records[ftrace_pages->index++];
371}
372
e309b41d 373static void
d61f82d0 374ftrace_record_ip(unsigned long ip)
3d083395
SR
375{
376 struct dyn_ftrace *node;
3d083395
SR
377 unsigned long key;
378 int resched;
2bb6f8d6 379 int cpu;
3d083395 380
4eebcc81 381 if (!ftrace_enabled || ftrace_disabled)
d61f82d0
SR
382 return;
383
3d083395
SR
384 resched = need_resched();
385 preempt_disable_notrace();
386
2bb6f8d6
SR
387 /*
388 * We simply need to protect against recursion.
389 * Use the the raw version of smp_processor_id and not
390 * __get_cpu_var which can call debug hooks that can
391 * cause a recursive crash here.
392 */
393 cpu = raw_smp_processor_id();
394 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
395 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
3d083395
SR
396 goto out;
397
398 if (unlikely(ftrace_record_suspend))
399 goto out;
400
401 key = hash_long(ip, FTRACE_HASHBITS);
402
6912896e 403 FTRACE_WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
3d083395
SR
404
405 if (ftrace_ip_in_hash(ip, key))
406 goto out;
407
3d083395
SR
408 /* This ip may have hit the hash before the lock */
409 if (ftrace_ip_in_hash(ip, key))
cb7be3b2 410 goto out;
3d083395 411
d61f82d0 412 node = ftrace_alloc_dyn_node(ip);
3d083395 413 if (!node)
cb7be3b2 414 goto out;
3d083395
SR
415
416 node->ip = ip;
417
418 ftrace_add_hash(node, key);
419
3d083395 420 out:
2bb6f8d6 421 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
3d083395
SR
422
423 /* prevent recursion with scheduler */
424 if (resched)
425 preempt_enable_no_resched_notrace();
426 else
427 preempt_enable_notrace();
428}
429
caf8cdeb 430#define FTRACE_ADDR ((long)(ftrace_caller))
3c1720f0 431
0eb96701 432static int
5072c59f
SR
433__ftrace_replace_code(struct dyn_ftrace *rec,
434 unsigned char *old, unsigned char *new, int enable)
435{
41c52c0d 436 unsigned long ip, fl;
5072c59f
SR
437
438 ip = rec->ip;
439
440 if (ftrace_filtered && enable) {
5072c59f
SR
441 /*
442 * If filtering is on:
443 *
444 * If this record is set to be filtered and
445 * is enabled then do nothing.
446 *
447 * If this record is set to be filtered and
448 * it is not enabled, enable it.
449 *
450 * If this record is not set to be filtered
451 * and it is not enabled do nothing.
452 *
41c52c0d
SR
453 * If this record is set not to trace then
454 * do nothing.
455 *
a4500b84
AS
456 * If this record is set not to trace and
457 * it is enabled then disable it.
458 *
5072c59f
SR
459 * If this record is not set to be filtered and
460 * it is enabled, disable it.
461 */
a4500b84
AS
462
463 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
464 FTRACE_FL_ENABLED);
5072c59f
SR
465
466 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
a4500b84
AS
467 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
468 !fl || (fl == FTRACE_FL_NOTRACE))
0eb96701 469 return 0;
5072c59f
SR
470
471 /*
472 * If it is enabled disable it,
473 * otherwise enable it!
474 */
a4500b84 475 if (fl & FTRACE_FL_ENABLED) {
5072c59f
SR
476 /* swap new and old */
477 new = old;
478 old = ftrace_call_replace(ip, FTRACE_ADDR);
479 rec->flags &= ~FTRACE_FL_ENABLED;
480 } else {
481 new = ftrace_call_replace(ip, FTRACE_ADDR);
482 rec->flags |= FTRACE_FL_ENABLED;
483 }
484 } else {
485
41c52c0d
SR
486 if (enable) {
487 /*
488 * If this record is set not to trace and is
489 * not enabled, do nothing.
490 */
491 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
492 if (fl == FTRACE_FL_NOTRACE)
0eb96701 493 return 0;
41c52c0d 494
5072c59f 495 new = ftrace_call_replace(ip, FTRACE_ADDR);
41c52c0d 496 } else
5072c59f
SR
497 old = ftrace_call_replace(ip, FTRACE_ADDR);
498
499 if (enable) {
500 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 501 return 0;
5072c59f
SR
502 rec->flags |= FTRACE_FL_ENABLED;
503 } else {
504 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 505 return 0;
5072c59f
SR
506 rec->flags &= ~FTRACE_FL_ENABLED;
507 }
508 }
509
0eb96701 510 return ftrace_modify_code(ip, old, new);
5072c59f
SR
511}
512
e309b41d 513static void ftrace_replace_code(int enable)
3c1720f0 514{
0eb96701 515 int i, failed;
3c1720f0
SR
516 unsigned char *new = NULL, *old = NULL;
517 struct dyn_ftrace *rec;
518 struct ftrace_page *pg;
3c1720f0 519
5072c59f 520 if (enable)
3c1720f0
SR
521 old = ftrace_nop_replace();
522 else
523 new = ftrace_nop_replace();
524
525 for (pg = ftrace_pages_start; pg; pg = pg->next) {
526 for (i = 0; i < pg->index; i++) {
527 rec = &pg->records[i];
528
529 /* don't modify code that has already faulted */
530 if (rec->flags & FTRACE_FL_FAILED)
531 continue;
532
f22f9a89 533 /* ignore updates to this record's mcount site */
98a05ed4
AS
534 if (get_kprobe((void *)rec->ip)) {
535 freeze_record(rec);
f22f9a89 536 continue;
98a05ed4
AS
537 } else {
538 unfreeze_record(rec);
539 }
f22f9a89 540
0eb96701
AS
541 failed = __ftrace_replace_code(rec, old, new, enable);
542 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
543 rec->flags |= FTRACE_FL_FAILED;
544 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 545 !core_kernel_text(rec->ip)) {
0eb96701
AS
546 ftrace_del_hash(rec);
547 ftrace_free_rec(rec);
548 }
549 }
3c1720f0
SR
550 }
551 }
552}
553
e309b41d 554static void ftrace_shutdown_replenish(void)
3c1720f0
SR
555{
556 if (ftrace_pages->next)
557 return;
558
559 /* allocate another page */
560 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
561}
3d083395 562
05736a42
SR
563static void print_ip_ins(const char *fmt, unsigned char *p)
564{
565 int i;
566
567 printk(KERN_CONT "%s", fmt);
568
569 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
570 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
571}
572
492a7ea5 573static int
d61f82d0 574ftrace_code_disable(struct dyn_ftrace *rec)
3c1720f0
SR
575{
576 unsigned long ip;
577 unsigned char *nop, *call;
593eb8a2 578 int ret;
3c1720f0
SR
579
580 ip = rec->ip;
581
582 nop = ftrace_nop_replace();
3b47bfc1 583 call = ftrace_call_replace(ip, mcount_addr);
3c1720f0 584
593eb8a2
SR
585 ret = ftrace_modify_code(ip, call, nop);
586 if (ret) {
587 switch (ret) {
588 case -EFAULT:
6912896e 589 FTRACE_WARN_ON_ONCE(1);
05736a42
SR
590 pr_info("ftrace faulted on modifying ");
591 print_ip_sym(ip);
592 break;
593eb8a2 593 case -EINVAL:
6912896e 594 FTRACE_WARN_ON_ONCE(1);
05736a42
SR
595 pr_info("ftrace failed to modify ");
596 print_ip_sym(ip);
597 print_ip_ins(" expected: ", call);
598 print_ip_ins(" actual: ", (unsigned char *)ip);
599 print_ip_ins(" replace: ", nop);
600 printk(KERN_CONT "\n");
601 break;
593eb8a2 602 case -EPERM:
6912896e 603 FTRACE_WARN_ON_ONCE(1);
593eb8a2
SR
604 pr_info("ftrace faulted on writing ");
605 print_ip_sym(ip);
606 break;
607 default:
6912896e 608 FTRACE_WARN_ON_ONCE(1);
593eb8a2
SR
609 pr_info("ftrace faulted on unknown error ");
610 print_ip_sym(ip);
05736a42
SR
611 }
612
3c1720f0 613 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 614 return 0;
37ad5084 615 }
492a7ea5 616 return 1;
3c1720f0
SR
617}
618
cb7be3b2 619static int ftrace_update_code(void *ignore);
ad90c0e3 620
e309b41d 621static int __ftrace_modify_code(void *data)
3d083395 622{
d61f82d0
SR
623 unsigned long addr;
624 int *command = data;
625
ad90c0e3
SR
626 if (*command & FTRACE_ENABLE_CALLS) {
627 /*
628 * Update any recorded ips now that we have the
629 * machine stopped
630 */
cb7be3b2 631 ftrace_update_code(NULL);
d61f82d0 632 ftrace_replace_code(1);
ecea656d
AS
633 tracing_on = 1;
634 } else if (*command & FTRACE_DISABLE_CALLS) {
d61f82d0 635 ftrace_replace_code(0);
ecea656d
AS
636 tracing_on = 0;
637 }
d61f82d0
SR
638
639 if (*command & FTRACE_UPDATE_TRACE_FUNC)
640 ftrace_update_ftrace_func(ftrace_trace_function);
641
642 if (*command & FTRACE_ENABLE_MCOUNT) {
643 addr = (unsigned long)ftrace_record_ip;
644 ftrace_mcount_set(&addr);
645 } else if (*command & FTRACE_DISABLE_MCOUNT) {
646 addr = (unsigned long)ftrace_stub;
647 ftrace_mcount_set(&addr);
648 }
649
650 return 0;
3d083395
SR
651}
652
e309b41d 653static void ftrace_run_update_code(int command)
3d083395 654{
784e2d76 655 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
656}
657
d61f82d0 658static ftrace_func_t saved_ftrace_func;
cb7be3b2
SR
659static int ftrace_start;
660static DEFINE_MUTEX(ftrace_start_lock);
d61f82d0 661
e309b41d 662static void ftrace_startup(void)
3d083395 663{
d61f82d0
SR
664 int command = 0;
665
4eebcc81
SR
666 if (unlikely(ftrace_disabled))
667 return;
668
cb7be3b2
SR
669 mutex_lock(&ftrace_start_lock);
670 ftrace_start++;
671 if (ftrace_start == 1)
d61f82d0
SR
672 command |= FTRACE_ENABLE_CALLS;
673
674 if (saved_ftrace_func != ftrace_trace_function) {
675 saved_ftrace_func = ftrace_trace_function;
676 command |= FTRACE_UPDATE_TRACE_FUNC;
677 }
678
679 if (!command || !ftrace_enabled)
3d083395 680 goto out;
3d083395 681
d61f82d0 682 ftrace_run_update_code(command);
3d083395 683 out:
cb7be3b2 684 mutex_unlock(&ftrace_start_lock);
3d083395
SR
685}
686
e309b41d 687static void ftrace_shutdown(void)
3d083395 688{
d61f82d0
SR
689 int command = 0;
690
4eebcc81
SR
691 if (unlikely(ftrace_disabled))
692 return;
693
cb7be3b2
SR
694 mutex_lock(&ftrace_start_lock);
695 ftrace_start--;
696 if (!ftrace_start)
d61f82d0 697 command |= FTRACE_DISABLE_CALLS;
3d083395 698
d61f82d0
SR
699 if (saved_ftrace_func != ftrace_trace_function) {
700 saved_ftrace_func = ftrace_trace_function;
701 command |= FTRACE_UPDATE_TRACE_FUNC;
702 }
3d083395 703
d61f82d0
SR
704 if (!command || !ftrace_enabled)
705 goto out;
706
707 ftrace_run_update_code(command);
3d083395 708 out:
cb7be3b2 709 mutex_unlock(&ftrace_start_lock);
3d083395
SR
710}
711
e309b41d 712static void ftrace_startup_sysctl(void)
b0fc494f 713{
d61f82d0
SR
714 int command = FTRACE_ENABLE_MCOUNT;
715
4eebcc81
SR
716 if (unlikely(ftrace_disabled))
717 return;
718
cb7be3b2 719 mutex_lock(&ftrace_start_lock);
d61f82d0
SR
720 /* Force update next time */
721 saved_ftrace_func = NULL;
cb7be3b2
SR
722 /* ftrace_start is true if we want ftrace running */
723 if (ftrace_start)
d61f82d0
SR
724 command |= FTRACE_ENABLE_CALLS;
725
726 ftrace_run_update_code(command);
cb7be3b2 727 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
728}
729
e309b41d 730static void ftrace_shutdown_sysctl(void)
b0fc494f 731{
d61f82d0
SR
732 int command = FTRACE_DISABLE_MCOUNT;
733
4eebcc81
SR
734 if (unlikely(ftrace_disabled))
735 return;
736
cb7be3b2
SR
737 mutex_lock(&ftrace_start_lock);
738 /* ftrace_start is true if ftrace is running */
739 if (ftrace_start)
d61f82d0
SR
740 command |= FTRACE_DISABLE_CALLS;
741
742 ftrace_run_update_code(command);
cb7be3b2 743 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
744}
745
3d083395
SR
746static cycle_t ftrace_update_time;
747static unsigned long ftrace_update_cnt;
748unsigned long ftrace_update_tot_cnt;
749
cb7be3b2 750static int ftrace_update_code(void *ignore)
3d083395 751{
f22f9a89
AS
752 int i, save_ftrace_enabled;
753 cycle_t start, stop;
3d083395 754 struct dyn_ftrace *p;
0eb96701 755 struct hlist_node *t, *n;
f22f9a89 756 struct hlist_head *head, temp_list;
3d083395 757
d61f82d0 758 /* Don't be recording funcs now */
ad90c0e3 759 ftrace_record_suspend++;
d61f82d0
SR
760 save_ftrace_enabled = ftrace_enabled;
761 ftrace_enabled = 0;
3d083395 762
750ed1a4 763 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
764 ftrace_update_cnt = 0;
765
766 /* No locks needed, the machine is stopped! */
767 for (i = 0; i < FTRACE_HASHSIZE; i++) {
f22f9a89
AS
768 INIT_HLIST_HEAD(&temp_list);
769 head = &ftrace_hash[i];
770
0eb96701 771 /* all CPUS are stopped, we are safe to modify code */
f22f9a89 772 hlist_for_each_entry_safe(p, t, n, head, node) {
0eb96701
AS
773 /* Skip over failed records which have not been
774 * freed. */
775 if (p->flags & FTRACE_FL_FAILED)
776 continue;
3d083395 777
0eb96701
AS
778 /* Unconverted records are always at the head of the
779 * hash bucket. Once we encounter a converted record,
780 * simply skip over to the next bucket. Saves ftraced
781 * some processor cycles (ftrace does its bid for
782 * global warming :-p ). */
783 if (p->flags & (FTRACE_FL_CONVERTED))
784 break;
3d083395 785
f22f9a89
AS
786 /* Ignore updates to this record's mcount site.
787 * Reintroduce this record at the head of this
788 * bucket to attempt to "convert" it again if
789 * the kprobe on it is unregistered before the
790 * next run. */
791 if (get_kprobe((void *)p->ip)) {
792 ftrace_del_hash(p);
793 INIT_HLIST_NODE(&p->node);
794 hlist_add_head(&p->node, &temp_list);
98a05ed4 795 freeze_record(p);
f22f9a89 796 continue;
98a05ed4
AS
797 } else {
798 unfreeze_record(p);
f22f9a89
AS
799 }
800
801 /* convert record (i.e, patch mcount-call with NOP) */
0eb96701
AS
802 if (ftrace_code_disable(p)) {
803 p->flags |= FTRACE_FL_CONVERTED;
492a7ea5 804 ftrace_update_cnt++;
0eb96701
AS
805 } else {
806 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 807 !core_kernel_text(p->ip)) {
0eb96701
AS
808 ftrace_del_hash(p);
809 ftrace_free_rec(p);
0eb96701
AS
810 }
811 }
3d083395 812 }
f22f9a89
AS
813
814 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
815 hlist_del(&p->node);
816 INIT_HLIST_NODE(&p->node);
817 hlist_add_head(&p->node, head);
818 }
3d083395
SR
819 }
820
750ed1a4 821 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
822 ftrace_update_time = stop - start;
823 ftrace_update_tot_cnt += ftrace_update_cnt;
824
d61f82d0 825 ftrace_enabled = save_ftrace_enabled;
ad90c0e3 826 ftrace_record_suspend--;
16444a8a
ACM
827
828 return 0;
829}
830
68bf21aa 831static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
832{
833 struct ftrace_page *pg;
834 int cnt;
835 int i;
3c1720f0
SR
836
837 /* allocate a few pages */
838 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
839 if (!ftrace_pages_start)
840 return -1;
841
842 /*
843 * Allocate a few more pages.
844 *
845 * TODO: have some parser search vmlinux before
846 * final linking to find all calls to ftrace.
847 * Then we can:
848 * a) know how many pages to allocate.
849 * and/or
850 * b) set up the table then.
851 *
852 * The dynamic code is still necessary for
853 * modules.
854 */
855
856 pg = ftrace_pages = ftrace_pages_start;
857
68bf21aa
SR
858 cnt = num_to_init / ENTRIES_PER_PAGE;
859 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
860 num_to_init, cnt);
3c1720f0
SR
861
862 for (i = 0; i < cnt; i++) {
863 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
864
865 /* If we fail, we'll try later anyway */
866 if (!pg->next)
867 break;
868
869 pg = pg->next;
870 }
871
872 return 0;
873}
874
5072c59f
SR
875enum {
876 FTRACE_ITER_FILTER = (1 << 0),
877 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 878 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 879 FTRACE_ITER_FAILURES = (1 << 3),
5072c59f
SR
880};
881
882#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
883
884struct ftrace_iterator {
885 loff_t pos;
886 struct ftrace_page *pg;
887 unsigned idx;
888 unsigned flags;
889 unsigned char buffer[FTRACE_BUFF_MAX+1];
890 unsigned buffer_idx;
891 unsigned filtered;
892};
893
e309b41d 894static void *
5072c59f
SR
895t_next(struct seq_file *m, void *v, loff_t *pos)
896{
897 struct ftrace_iterator *iter = m->private;
898 struct dyn_ftrace *rec = NULL;
899
900 (*pos)++;
901
99ecdc43
SR
902 /* should not be called from interrupt context */
903 spin_lock(&ftrace_lock);
5072c59f
SR
904 retry:
905 if (iter->idx >= iter->pg->index) {
906 if (iter->pg->next) {
907 iter->pg = iter->pg->next;
908 iter->idx = 0;
909 goto retry;
910 }
911 } else {
912 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
913 if ((rec->flags & FTRACE_FL_FREE) ||
914
915 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
916 (rec->flags & FTRACE_FL_FAILED)) ||
917
918 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 919 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 920
41c52c0d
SR
921 ((iter->flags & FTRACE_ITER_NOTRACE) &&
922 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
923 rec = NULL;
924 goto retry;
925 }
926 }
99ecdc43 927 spin_unlock(&ftrace_lock);
5072c59f
SR
928
929 iter->pos = *pos;
930
931 return rec;
932}
933
934static void *t_start(struct seq_file *m, loff_t *pos)
935{
936 struct ftrace_iterator *iter = m->private;
937 void *p = NULL;
938 loff_t l = -1;
939
940 if (*pos != iter->pos) {
941 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
942 ;
943 } else {
944 l = *pos;
945 p = t_next(m, p, &l);
946 }
947
948 return p;
949}
950
951static void t_stop(struct seq_file *m, void *p)
952{
953}
954
955static int t_show(struct seq_file *m, void *v)
956{
957 struct dyn_ftrace *rec = v;
958 char str[KSYM_SYMBOL_LEN];
959
960 if (!rec)
961 return 0;
962
963 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
964
965 seq_printf(m, "%s\n", str);
966
967 return 0;
968}
969
970static struct seq_operations show_ftrace_seq_ops = {
971 .start = t_start,
972 .next = t_next,
973 .stop = t_stop,
974 .show = t_show,
975};
976
e309b41d 977static int
5072c59f
SR
978ftrace_avail_open(struct inode *inode, struct file *file)
979{
980 struct ftrace_iterator *iter;
981 int ret;
982
4eebcc81
SR
983 if (unlikely(ftrace_disabled))
984 return -ENODEV;
985
5072c59f
SR
986 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
987 if (!iter)
988 return -ENOMEM;
989
990 iter->pg = ftrace_pages_start;
991 iter->pos = -1;
992
993 ret = seq_open(file, &show_ftrace_seq_ops);
994 if (!ret) {
995 struct seq_file *m = file->private_data;
4bf39a94 996
5072c59f 997 m->private = iter;
4bf39a94 998 } else {
5072c59f 999 kfree(iter);
4bf39a94 1000 }
5072c59f
SR
1001
1002 return ret;
1003}
1004
1005int ftrace_avail_release(struct inode *inode, struct file *file)
1006{
1007 struct seq_file *m = (struct seq_file *)file->private_data;
1008 struct ftrace_iterator *iter = m->private;
1009
1010 seq_release(inode, file);
1011 kfree(iter);
4bf39a94 1012
5072c59f
SR
1013 return 0;
1014}
1015
eb9a7bf0
AS
1016static int
1017ftrace_failures_open(struct inode *inode, struct file *file)
1018{
1019 int ret;
1020 struct seq_file *m;
1021 struct ftrace_iterator *iter;
1022
1023 ret = ftrace_avail_open(inode, file);
1024 if (!ret) {
1025 m = (struct seq_file *)file->private_data;
1026 iter = (struct ftrace_iterator *)m->private;
1027 iter->flags = FTRACE_ITER_FAILURES;
1028 }
1029
1030 return ret;
1031}
1032
1033
41c52c0d 1034static void ftrace_filter_reset(int enable)
5072c59f
SR
1035{
1036 struct ftrace_page *pg;
1037 struct dyn_ftrace *rec;
41c52c0d 1038 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
1039 unsigned i;
1040
99ecdc43
SR
1041 /* should not be called from interrupt context */
1042 spin_lock(&ftrace_lock);
41c52c0d
SR
1043 if (enable)
1044 ftrace_filtered = 0;
5072c59f
SR
1045 pg = ftrace_pages_start;
1046 while (pg) {
1047 for (i = 0; i < pg->index; i++) {
1048 rec = &pg->records[i];
1049 if (rec->flags & FTRACE_FL_FAILED)
1050 continue;
41c52c0d 1051 rec->flags &= ~type;
5072c59f
SR
1052 }
1053 pg = pg->next;
1054 }
99ecdc43 1055 spin_unlock(&ftrace_lock);
5072c59f
SR
1056}
1057
e309b41d 1058static int
41c52c0d 1059ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1060{
1061 struct ftrace_iterator *iter;
1062 int ret = 0;
1063
4eebcc81
SR
1064 if (unlikely(ftrace_disabled))
1065 return -ENODEV;
1066
5072c59f
SR
1067 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1068 if (!iter)
1069 return -ENOMEM;
1070
41c52c0d 1071 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1072 if ((file->f_mode & FMODE_WRITE) &&
1073 !(file->f_flags & O_APPEND))
41c52c0d 1074 ftrace_filter_reset(enable);
5072c59f
SR
1075
1076 if (file->f_mode & FMODE_READ) {
1077 iter->pg = ftrace_pages_start;
1078 iter->pos = -1;
41c52c0d
SR
1079 iter->flags = enable ? FTRACE_ITER_FILTER :
1080 FTRACE_ITER_NOTRACE;
5072c59f
SR
1081
1082 ret = seq_open(file, &show_ftrace_seq_ops);
1083 if (!ret) {
1084 struct seq_file *m = file->private_data;
1085 m->private = iter;
1086 } else
1087 kfree(iter);
1088 } else
1089 file->private_data = iter;
41c52c0d 1090 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1091
1092 return ret;
1093}
1094
41c52c0d
SR
1095static int
1096ftrace_filter_open(struct inode *inode, struct file *file)
1097{
1098 return ftrace_regex_open(inode, file, 1);
1099}
1100
1101static int
1102ftrace_notrace_open(struct inode *inode, struct file *file)
1103{
1104 return ftrace_regex_open(inode, file, 0);
1105}
1106
e309b41d 1107static ssize_t
41c52c0d 1108ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
1109 size_t cnt, loff_t *ppos)
1110{
1111 if (file->f_mode & FMODE_READ)
1112 return seq_read(file, ubuf, cnt, ppos);
1113 else
1114 return -EPERM;
1115}
1116
e309b41d 1117static loff_t
41c52c0d 1118ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1119{
1120 loff_t ret;
1121
1122 if (file->f_mode & FMODE_READ)
1123 ret = seq_lseek(file, offset, origin);
1124 else
1125 file->f_pos = ret = 1;
1126
1127 return ret;
1128}
1129
1130enum {
1131 MATCH_FULL,
1132 MATCH_FRONT_ONLY,
1133 MATCH_MIDDLE_ONLY,
1134 MATCH_END_ONLY,
1135};
1136
e309b41d 1137static void
41c52c0d 1138ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
1139{
1140 char str[KSYM_SYMBOL_LEN];
1141 char *search = NULL;
1142 struct ftrace_page *pg;
1143 struct dyn_ftrace *rec;
1144 int type = MATCH_FULL;
41c52c0d 1145 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
1146 unsigned i, match = 0, search_len = 0;
1147
1148 for (i = 0; i < len; i++) {
1149 if (buff[i] == '*') {
1150 if (!i) {
1151 search = buff + i + 1;
1152 type = MATCH_END_ONLY;
1153 search_len = len - (i + 1);
1154 } else {
1155 if (type == MATCH_END_ONLY) {
1156 type = MATCH_MIDDLE_ONLY;
1157 } else {
1158 match = i;
1159 type = MATCH_FRONT_ONLY;
1160 }
1161 buff[i] = 0;
1162 break;
1163 }
1164 }
1165 }
1166
99ecdc43
SR
1167 /* should not be called from interrupt context */
1168 spin_lock(&ftrace_lock);
41c52c0d
SR
1169 if (enable)
1170 ftrace_filtered = 1;
5072c59f
SR
1171 pg = ftrace_pages_start;
1172 while (pg) {
1173 for (i = 0; i < pg->index; i++) {
1174 int matched = 0;
1175 char *ptr;
1176
1177 rec = &pg->records[i];
1178 if (rec->flags & FTRACE_FL_FAILED)
1179 continue;
1180 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1181 switch (type) {
1182 case MATCH_FULL:
1183 if (strcmp(str, buff) == 0)
1184 matched = 1;
1185 break;
1186 case MATCH_FRONT_ONLY:
1187 if (memcmp(str, buff, match) == 0)
1188 matched = 1;
1189 break;
1190 case MATCH_MIDDLE_ONLY:
1191 if (strstr(str, search))
1192 matched = 1;
1193 break;
1194 case MATCH_END_ONLY:
1195 ptr = strstr(str, search);
1196 if (ptr && (ptr[search_len] == 0))
1197 matched = 1;
1198 break;
1199 }
1200 if (matched)
41c52c0d 1201 rec->flags |= flag;
5072c59f
SR
1202 }
1203 pg = pg->next;
1204 }
99ecdc43 1205 spin_unlock(&ftrace_lock);
5072c59f
SR
1206}
1207
e309b41d 1208static ssize_t
41c52c0d
SR
1209ftrace_regex_write(struct file *file, const char __user *ubuf,
1210 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1211{
1212 struct ftrace_iterator *iter;
1213 char ch;
1214 size_t read = 0;
1215 ssize_t ret;
1216
1217 if (!cnt || cnt < 0)
1218 return 0;
1219
41c52c0d 1220 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1221
1222 if (file->f_mode & FMODE_READ) {
1223 struct seq_file *m = file->private_data;
1224 iter = m->private;
1225 } else
1226 iter = file->private_data;
1227
1228 if (!*ppos) {
1229 iter->flags &= ~FTRACE_ITER_CONT;
1230 iter->buffer_idx = 0;
1231 }
1232
1233 ret = get_user(ch, ubuf++);
1234 if (ret)
1235 goto out;
1236 read++;
1237 cnt--;
1238
1239 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1240 /* skip white space */
1241 while (cnt && isspace(ch)) {
1242 ret = get_user(ch, ubuf++);
1243 if (ret)
1244 goto out;
1245 read++;
1246 cnt--;
1247 }
1248
5072c59f
SR
1249 if (isspace(ch)) {
1250 file->f_pos += read;
1251 ret = read;
1252 goto out;
1253 }
1254
1255 iter->buffer_idx = 0;
1256 }
1257
1258 while (cnt && !isspace(ch)) {
1259 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1260 iter->buffer[iter->buffer_idx++] = ch;
1261 else {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 ret = get_user(ch, ubuf++);
1266 if (ret)
1267 goto out;
1268 read++;
1269 cnt--;
1270 }
1271
1272 if (isspace(ch)) {
1273 iter->filtered++;
1274 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1275 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1276 iter->buffer_idx = 0;
1277 } else
1278 iter->flags |= FTRACE_ITER_CONT;
1279
1280
1281 file->f_pos += read;
1282
1283 ret = read;
1284 out:
41c52c0d 1285 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1286
1287 return ret;
1288}
1289
41c52c0d
SR
1290static ssize_t
1291ftrace_filter_write(struct file *file, const char __user *ubuf,
1292 size_t cnt, loff_t *ppos)
1293{
1294 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1295}
1296
1297static ssize_t
1298ftrace_notrace_write(struct file *file, const char __user *ubuf,
1299 size_t cnt, loff_t *ppos)
1300{
1301 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1302}
1303
1304static void
1305ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1306{
1307 if (unlikely(ftrace_disabled))
1308 return;
1309
1310 mutex_lock(&ftrace_regex_lock);
1311 if (reset)
1312 ftrace_filter_reset(enable);
1313 if (buf)
1314 ftrace_match(buf, len, enable);
1315 mutex_unlock(&ftrace_regex_lock);
1316}
1317
77a2b37d
SR
1318/**
1319 * ftrace_set_filter - set a function to filter on in ftrace
1320 * @buf - the string that holds the function filter text.
1321 * @len - the length of the string.
1322 * @reset - non zero to reset all filters before applying this filter.
1323 *
1324 * Filters denote which functions should be enabled when tracing is enabled.
1325 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1326 */
e309b41d 1327void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1328{
41c52c0d
SR
1329 ftrace_set_regex(buf, len, reset, 1);
1330}
4eebcc81 1331
41c52c0d
SR
1332/**
1333 * ftrace_set_notrace - set a function to not trace in ftrace
1334 * @buf - the string that holds the function notrace text.
1335 * @len - the length of the string.
1336 * @reset - non zero to reset all filters before applying this filter.
1337 *
1338 * Notrace Filters denote which functions should not be enabled when tracing
1339 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1340 * for tracing.
1341 */
1342void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1343{
1344 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1345}
1346
e309b41d 1347static int
41c52c0d 1348ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1349{
1350 struct seq_file *m = (struct seq_file *)file->private_data;
1351 struct ftrace_iterator *iter;
1352
41c52c0d 1353 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1354 if (file->f_mode & FMODE_READ) {
1355 iter = m->private;
1356
1357 seq_release(inode, file);
1358 } else
1359 iter = file->private_data;
1360
1361 if (iter->buffer_idx) {
1362 iter->filtered++;
1363 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1364 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1365 }
1366
1367 mutex_lock(&ftrace_sysctl_lock);
cb7be3b2
SR
1368 mutex_lock(&ftrace_start_lock);
1369 if (iter->filtered && ftrace_start && ftrace_enabled)
5072c59f 1370 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
cb7be3b2 1371 mutex_unlock(&ftrace_start_lock);
5072c59f
SR
1372 mutex_unlock(&ftrace_sysctl_lock);
1373
1374 kfree(iter);
41c52c0d 1375 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1376 return 0;
1377}
1378
41c52c0d
SR
1379static int
1380ftrace_filter_release(struct inode *inode, struct file *file)
1381{
1382 return ftrace_regex_release(inode, file, 1);
1383}
1384
1385static int
1386ftrace_notrace_release(struct inode *inode, struct file *file)
1387{
1388 return ftrace_regex_release(inode, file, 0);
1389}
1390
5072c59f
SR
1391static struct file_operations ftrace_avail_fops = {
1392 .open = ftrace_avail_open,
1393 .read = seq_read,
1394 .llseek = seq_lseek,
1395 .release = ftrace_avail_release,
1396};
1397
eb9a7bf0
AS
1398static struct file_operations ftrace_failures_fops = {
1399 .open = ftrace_failures_open,
1400 .read = seq_read,
1401 .llseek = seq_lseek,
1402 .release = ftrace_avail_release,
1403};
1404
5072c59f
SR
1405static struct file_operations ftrace_filter_fops = {
1406 .open = ftrace_filter_open,
41c52c0d 1407 .read = ftrace_regex_read,
5072c59f 1408 .write = ftrace_filter_write,
41c52c0d 1409 .llseek = ftrace_regex_lseek,
5072c59f
SR
1410 .release = ftrace_filter_release,
1411};
1412
41c52c0d
SR
1413static struct file_operations ftrace_notrace_fops = {
1414 .open = ftrace_notrace_open,
1415 .read = ftrace_regex_read,
1416 .write = ftrace_notrace_write,
1417 .llseek = ftrace_regex_lseek,
1418 .release = ftrace_notrace_release,
1419};
1420
5072c59f
SR
1421static __init int ftrace_init_debugfs(void)
1422{
1423 struct dentry *d_tracer;
1424 struct dentry *entry;
1425
1426 d_tracer = tracing_init_dentry();
1427
1428 entry = debugfs_create_file("available_filter_functions", 0444,
1429 d_tracer, NULL, &ftrace_avail_fops);
1430 if (!entry)
1431 pr_warning("Could not create debugfs "
1432 "'available_filter_functions' entry\n");
1433
eb9a7bf0
AS
1434 entry = debugfs_create_file("failures", 0444,
1435 d_tracer, NULL, &ftrace_failures_fops);
1436 if (!entry)
1437 pr_warning("Could not create debugfs 'failures' entry\n");
1438
5072c59f
SR
1439 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1440 NULL, &ftrace_filter_fops);
1441 if (!entry)
1442 pr_warning("Could not create debugfs "
1443 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1444
1445 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1446 NULL, &ftrace_notrace_fops);
1447 if (!entry)
1448 pr_warning("Could not create debugfs "
1449 "'set_ftrace_notrace' entry\n");
ad90c0e3 1450
5072c59f
SR
1451 return 0;
1452}
1453
1454fs_initcall(ftrace_init_debugfs);
1455
68bf21aa
SR
1456static int ftrace_convert_nops(unsigned long *start,
1457 unsigned long *end)
1458{
1459 unsigned long *p;
1460 unsigned long addr;
1461 unsigned long flags;
1462
1463 p = start;
1464 while (p < end) {
1465 addr = ftrace_call_adjust(*p++);
99ecdc43 1466 /* should not be called from interrupt context */
fed1939c 1467 spin_lock(&ftrace_lock);
68bf21aa 1468 ftrace_record_ip(addr);
fed1939c 1469 spin_unlock(&ftrace_lock);
68bf21aa
SR
1470 ftrace_shutdown_replenish();
1471 }
1472
1473 /* p is ignored */
1474 local_irq_save(flags);
cb7be3b2 1475 ftrace_update_code(p);
68bf21aa
SR
1476 local_irq_restore(flags);
1477
1478 return 0;
1479}
1480
90d595fe
SR
1481void ftrace_init_module(unsigned long *start, unsigned long *end)
1482{
00fd61ae 1483 if (ftrace_disabled || start == end)
fed1939c 1484 return;
90d595fe
SR
1485 ftrace_convert_nops(start, end);
1486}
1487
68bf21aa
SR
1488extern unsigned long __start_mcount_loc[];
1489extern unsigned long __stop_mcount_loc[];
1490
1491void __init ftrace_init(void)
1492{
1493 unsigned long count, addr, flags;
1494 int ret;
1495
1496 /* Keep the ftrace pointer to the stub */
1497 addr = (unsigned long)ftrace_stub;
1498
1499 local_irq_save(flags);
1500 ftrace_dyn_arch_init(&addr);
1501 local_irq_restore(flags);
1502
1503 /* ftrace_dyn_arch_init places the return code in addr */
1504 if (addr)
1505 goto failed;
1506
1507 count = __stop_mcount_loc - __start_mcount_loc;
1508
1509 ret = ftrace_dyn_table_alloc(count);
1510 if (ret)
1511 goto failed;
1512
1513 last_ftrace_enabled = ftrace_enabled = 1;
1514
1515 ret = ftrace_convert_nops(__start_mcount_loc,
1516 __stop_mcount_loc);
1517
1518 return;
1519 failed:
1520 ftrace_disabled = 1;
1521}
68bf21aa 1522
3d083395 1523#else
c7aafc54
IM
1524# define ftrace_startup() do { } while (0)
1525# define ftrace_shutdown() do { } while (0)
1526# define ftrace_startup_sysctl() do { } while (0)
1527# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1528#endif /* CONFIG_DYNAMIC_FTRACE */
1529
a2bb6a3d 1530/**
81adbdc0 1531 * ftrace_kill - kill ftrace
a2bb6a3d
SR
1532 *
1533 * This function should be used by panic code. It stops ftrace
1534 * but in a not so nice way. If you need to simply kill ftrace
1535 * from a non-atomic section, use ftrace_kill.
1536 */
81adbdc0 1537void ftrace_kill(void)
a2bb6a3d
SR
1538{
1539 ftrace_disabled = 1;
1540 ftrace_enabled = 0;
a2bb6a3d
SR
1541 clear_ftrace_function();
1542}
1543
16444a8a 1544/**
3d083395
SR
1545 * register_ftrace_function - register a function for profiling
1546 * @ops - ops structure that holds the function for profiling.
16444a8a 1547 *
3d083395
SR
1548 * Register a function to be called by all functions in the
1549 * kernel.
1550 *
1551 * Note: @ops->func and all the functions it calls must be labeled
1552 * with "notrace", otherwise it will go into a
1553 * recursive loop.
16444a8a 1554 */
3d083395 1555int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1556{
b0fc494f
SR
1557 int ret;
1558
4eebcc81
SR
1559 if (unlikely(ftrace_disabled))
1560 return -1;
1561
b0fc494f 1562 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1563 ret = __register_ftrace_function(ops);
d61f82d0 1564 ftrace_startup();
b0fc494f
SR
1565 mutex_unlock(&ftrace_sysctl_lock);
1566
1567 return ret;
3d083395
SR
1568}
1569
1570/**
1571 * unregister_ftrace_function - unresgister a function for profiling.
1572 * @ops - ops structure that holds the function to unregister
1573 *
1574 * Unregister a function that was added to be called by ftrace profiling.
1575 */
1576int unregister_ftrace_function(struct ftrace_ops *ops)
1577{
1578 int ret;
1579
b0fc494f 1580 mutex_lock(&ftrace_sysctl_lock);
3d083395 1581 ret = __unregister_ftrace_function(ops);
d61f82d0 1582 ftrace_shutdown();
b0fc494f
SR
1583 mutex_unlock(&ftrace_sysctl_lock);
1584
1585 return ret;
1586}
1587
e309b41d 1588int
b0fc494f 1589ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1590 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1591 loff_t *ppos)
1592{
1593 int ret;
1594
4eebcc81
SR
1595 if (unlikely(ftrace_disabled))
1596 return -ENODEV;
1597
b0fc494f
SR
1598 mutex_lock(&ftrace_sysctl_lock);
1599
5072c59f 1600 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1601
1602 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1603 goto out;
1604
1605 last_ftrace_enabled = ftrace_enabled;
1606
1607 if (ftrace_enabled) {
1608
1609 ftrace_startup_sysctl();
1610
1611 /* we are starting ftrace again */
1612 if (ftrace_list != &ftrace_list_end) {
1613 if (ftrace_list->next == &ftrace_list_end)
1614 ftrace_trace_function = ftrace_list->func;
1615 else
1616 ftrace_trace_function = ftrace_list_func;
1617 }
1618
1619 } else {
1620 /* stopping ftrace calls (just send to ftrace_stub) */
1621 ftrace_trace_function = ftrace_stub;
1622
1623 ftrace_shutdown_sysctl();
1624 }
1625
1626 out:
1627 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1628 return ret;
16444a8a 1629}
This page took 0.147259 seconds and 5 git commands to generate.