ftrace: add nop tracer
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
f22f9a89 24#include <linux/kprobes.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5072c59f 27#include <linux/ctype.h>
2d8b820b 28#include <linux/hash.h>
3d083395
SR
29#include <linux/list.h>
30
395a59d0
AS
31#include <asm/ftrace.h>
32
3d083395 33#include "trace.h"
16444a8a 34
4eebcc81
SR
35/* ftrace_enabled is a method to turn ftrace on or off */
36int ftrace_enabled __read_mostly;
d61f82d0 37static int last_ftrace_enabled;
b0fc494f 38
3b47bfc1
SR
39/*
40 * Since MCOUNT_ADDR may point to mcount itself, we do not want
41 * to get it confused by reading a reference in the code as we
42 * are parsing on objcopy output of text. Use a variable for
43 * it instead.
44 */
45static unsigned long mcount_addr = MCOUNT_ADDR;
46
4eebcc81
SR
47/*
48 * ftrace_disabled is set when an anomaly is discovered.
49 * ftrace_disabled is much stronger than ftrace_enabled.
50 */
51static int ftrace_disabled __read_mostly;
52
3d083395 53static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
54static DEFINE_MUTEX(ftrace_sysctl_lock);
55
16444a8a
ACM
56static struct ftrace_ops ftrace_list_end __read_mostly =
57{
58 .func = ftrace_stub,
59};
60
61static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
62ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
63
f2252935 64static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
65{
66 struct ftrace_ops *op = ftrace_list;
67
68 /* in case someone actually ports this to alpha! */
69 read_barrier_depends();
70
71 while (op != &ftrace_list_end) {
72 /* silly alpha */
73 read_barrier_depends();
74 op->func(ip, parent_ip);
75 op = op->next;
76 };
77}
78
79/**
3d083395 80 * clear_ftrace_function - reset the ftrace function
16444a8a 81 *
3d083395
SR
82 * This NULLs the ftrace function and in essence stops
83 * tracing. There may be lag
16444a8a 84 */
3d083395 85void clear_ftrace_function(void)
16444a8a 86{
3d083395
SR
87 ftrace_trace_function = ftrace_stub;
88}
89
e309b41d 90static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 91{
99ecdc43 92 /* should not be called from interrupt context */
3d083395 93 spin_lock(&ftrace_lock);
16444a8a 94
16444a8a
ACM
95 ops->next = ftrace_list;
96 /*
97 * We are entering ops into the ftrace_list but another
98 * CPU might be walking that list. We need to make sure
99 * the ops->next pointer is valid before another CPU sees
100 * the ops pointer included into the ftrace_list.
101 */
102 smp_wmb();
103 ftrace_list = ops;
3d083395 104
b0fc494f
SR
105 if (ftrace_enabled) {
106 /*
107 * For one func, simply call it directly.
108 * For more than one func, call the chain.
109 */
110 if (ops->next == &ftrace_list_end)
111 ftrace_trace_function = ops->func;
112 else
113 ftrace_trace_function = ftrace_list_func;
114 }
3d083395
SR
115
116 spin_unlock(&ftrace_lock);
16444a8a
ACM
117
118 return 0;
119}
120
e309b41d 121static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 122{
16444a8a
ACM
123 struct ftrace_ops **p;
124 int ret = 0;
125
99ecdc43 126 /* should not be called from interrupt context */
3d083395 127 spin_lock(&ftrace_lock);
16444a8a
ACM
128
129 /*
3d083395
SR
130 * If we are removing the last function, then simply point
131 * to the ftrace_stub.
16444a8a
ACM
132 */
133 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
134 ftrace_trace_function = ftrace_stub;
135 ftrace_list = &ftrace_list_end;
136 goto out;
137 }
138
139 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
140 if (*p == ops)
141 break;
142
143 if (*p != ops) {
144 ret = -1;
145 goto out;
146 }
147
148 *p = (*p)->next;
149
b0fc494f
SR
150 if (ftrace_enabled) {
151 /* If we only have one func left, then call that directly */
152 if (ftrace_list == &ftrace_list_end ||
153 ftrace_list->next == &ftrace_list_end)
154 ftrace_trace_function = ftrace_list->func;
155 }
16444a8a
ACM
156
157 out:
3d083395
SR
158 spin_unlock(&ftrace_lock);
159
160 return ret;
161}
162
163#ifdef CONFIG_DYNAMIC_FTRACE
164
99ecdc43
SR
165#ifndef CONFIG_FTRACE_MCOUNT_RECORD
166/*
167 * The hash lock is only needed when the recording of the mcount
168 * callers are dynamic. That is, by the caller themselves and
169 * not recorded via the compilation.
170 */
171static DEFINE_SPINLOCK(ftrace_hash_lock);
2d7da80f 172#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
644f991d
SR
173#define ftrace_hash_unlock(flags) \
174 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
99ecdc43
SR
175#else
176/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
ac8825ec 177#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
99ecdc43
SR
178#define ftrace_hash_unlock(flags) do { } while(0)
179#endif
180
e1c08bdd 181static struct task_struct *ftraced_task;
e1c08bdd 182
d61f82d0
SR
183enum {
184 FTRACE_ENABLE_CALLS = (1 << 0),
185 FTRACE_DISABLE_CALLS = (1 << 1),
186 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
187 FTRACE_ENABLE_MCOUNT = (1 << 3),
188 FTRACE_DISABLE_MCOUNT = (1 << 4),
189};
190
5072c59f 191static int ftrace_filtered;
ecea656d
AS
192static int tracing_on;
193static int frozen_record_count;
5072c59f 194
3d083395
SR
195static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
196
197static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
198
3d083395 199static DEFINE_MUTEX(ftraced_lock);
41c52c0d 200static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 201
3c1720f0
SR
202struct ftrace_page {
203 struct ftrace_page *next;
aa5e5cea 204 unsigned long index;
3c1720f0 205 struct dyn_ftrace records[];
aa5e5cea 206};
3c1720f0
SR
207
208#define ENTRIES_PER_PAGE \
209 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
210
211/* estimate from running different kernels */
212#define NR_TO_INIT 10000
213
214static struct ftrace_page *ftrace_pages_start;
215static struct ftrace_page *ftrace_pages;
216
3d083395
SR
217static int ftraced_trigger;
218static int ftraced_suspend;
ad90c0e3 219static int ftraced_stop;
3d083395
SR
220
221static int ftrace_record_suspend;
222
37ad5084
SR
223static struct dyn_ftrace *ftrace_free_records;
224
ecea656d
AS
225
226#ifdef CONFIG_KPROBES
227static inline void freeze_record(struct dyn_ftrace *rec)
228{
229 if (!(rec->flags & FTRACE_FL_FROZEN)) {
230 rec->flags |= FTRACE_FL_FROZEN;
231 frozen_record_count++;
232 }
233}
234
235static inline void unfreeze_record(struct dyn_ftrace *rec)
236{
237 if (rec->flags & FTRACE_FL_FROZEN) {
238 rec->flags &= ~FTRACE_FL_FROZEN;
239 frozen_record_count--;
240 }
241}
242
243static inline int record_frozen(struct dyn_ftrace *rec)
244{
245 return rec->flags & FTRACE_FL_FROZEN;
246}
247#else
248# define freeze_record(rec) ({ 0; })
249# define unfreeze_record(rec) ({ 0; })
250# define record_frozen(rec) ({ 0; })
251#endif /* CONFIG_KPROBES */
252
253int skip_trace(unsigned long ip)
254{
255 unsigned long fl;
256 struct dyn_ftrace *rec;
257 struct hlist_node *t;
258 struct hlist_head *head;
259
260 if (frozen_record_count == 0)
261 return 0;
262
263 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
264 hlist_for_each_entry_rcu(rec, t, head, node) {
265 if (rec->ip == ip) {
266 if (record_frozen(rec)) {
267 if (rec->flags & FTRACE_FL_FAILED)
268 return 1;
269
270 if (!(rec->flags & FTRACE_FL_CONVERTED))
271 return 1;
272
273 if (!tracing_on || !ftrace_enabled)
274 return 1;
275
276 if (ftrace_filtered) {
277 fl = rec->flags & (FTRACE_FL_FILTER |
278 FTRACE_FL_NOTRACE);
279 if (!fl || (fl & FTRACE_FL_NOTRACE))
280 return 1;
281 }
282 }
283 break;
284 }
285 }
286
287 return 0;
288}
289
e309b41d 290static inline int
9ff9cdb2 291ftrace_ip_in_hash(unsigned long ip, unsigned long key)
3d083395
SR
292{
293 struct dyn_ftrace *p;
294 struct hlist_node *t;
295 int found = 0;
296
ffdaa358 297 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
3d083395
SR
298 if (p->ip == ip) {
299 found = 1;
300 break;
301 }
302 }
303
304 return found;
305}
306
e309b41d 307static inline void
3d083395
SR
308ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
309{
ffdaa358 310 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
3d083395
SR
311}
312
0eb96701
AS
313/* called from kstop_machine */
314static inline void ftrace_del_hash(struct dyn_ftrace *node)
315{
316 hlist_del(&node->node);
317}
318
e309b41d 319static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 320{
37ad5084
SR
321 rec->ip = (unsigned long)ftrace_free_records;
322 ftrace_free_records = rec;
323 rec->flags |= FTRACE_FL_FREE;
324}
325
fed1939c
SR
326void ftrace_release(void *start, unsigned long size)
327{
328 struct dyn_ftrace *rec;
329 struct ftrace_page *pg;
330 unsigned long s = (unsigned long)start;
331 unsigned long e = s + size;
332 int i;
333
00fd61ae 334 if (ftrace_disabled || !start)
fed1939c
SR
335 return;
336
99ecdc43 337 /* should not be called from interrupt context */
fed1939c
SR
338 spin_lock(&ftrace_lock);
339
340 for (pg = ftrace_pages_start; pg; pg = pg->next) {
341 for (i = 0; i < pg->index; i++) {
342 rec = &pg->records[i];
343
344 if ((rec->ip >= s) && (rec->ip < e))
345 ftrace_free_rec(rec);
346 }
347 }
348 spin_unlock(&ftrace_lock);
349
350}
351
e309b41d 352static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 353{
37ad5084
SR
354 struct dyn_ftrace *rec;
355
356 /* First check for freed records */
357 if (ftrace_free_records) {
358 rec = ftrace_free_records;
359
37ad5084
SR
360 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
361 WARN_ON_ONCE(1);
362 ftrace_free_records = NULL;
4eebcc81
SR
363 ftrace_disabled = 1;
364 ftrace_enabled = 0;
37ad5084
SR
365 return NULL;
366 }
367
368 ftrace_free_records = (void *)rec->ip;
369 memset(rec, 0, sizeof(*rec));
370 return rec;
371 }
372
3c1720f0
SR
373 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
374 if (!ftrace_pages->next)
375 return NULL;
376 ftrace_pages = ftrace_pages->next;
377 }
378
379 return &ftrace_pages->records[ftrace_pages->index++];
380}
381
e309b41d 382static void
d61f82d0 383ftrace_record_ip(unsigned long ip)
3d083395
SR
384{
385 struct dyn_ftrace *node;
386 unsigned long flags;
387 unsigned long key;
388 int resched;
2bb6f8d6 389 int cpu;
3d083395 390
4eebcc81 391 if (!ftrace_enabled || ftrace_disabled)
d61f82d0
SR
392 return;
393
3d083395
SR
394 resched = need_resched();
395 preempt_disable_notrace();
396
2bb6f8d6
SR
397 /*
398 * We simply need to protect against recursion.
399 * Use the the raw version of smp_processor_id and not
400 * __get_cpu_var which can call debug hooks that can
401 * cause a recursive crash here.
402 */
403 cpu = raw_smp_processor_id();
404 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
405 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
3d083395
SR
406 goto out;
407
408 if (unlikely(ftrace_record_suspend))
409 goto out;
410
411 key = hash_long(ip, FTRACE_HASHBITS);
412
413 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
414
415 if (ftrace_ip_in_hash(ip, key))
416 goto out;
417
99ecdc43 418 ftrace_hash_lock(flags);
3d083395
SR
419
420 /* This ip may have hit the hash before the lock */
421 if (ftrace_ip_in_hash(ip, key))
422 goto out_unlock;
423
d61f82d0 424 node = ftrace_alloc_dyn_node(ip);
3d083395
SR
425 if (!node)
426 goto out_unlock;
427
428 node->ip = ip;
429
430 ftrace_add_hash(node, key);
431
432 ftraced_trigger = 1;
433
434 out_unlock:
99ecdc43 435 ftrace_hash_unlock(flags);
3d083395 436 out:
2bb6f8d6 437 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
3d083395
SR
438
439 /* prevent recursion with scheduler */
440 if (resched)
441 preempt_enable_no_resched_notrace();
442 else
443 preempt_enable_notrace();
444}
445
caf8cdeb 446#define FTRACE_ADDR ((long)(ftrace_caller))
3c1720f0 447
0eb96701 448static int
5072c59f
SR
449__ftrace_replace_code(struct dyn_ftrace *rec,
450 unsigned char *old, unsigned char *new, int enable)
451{
41c52c0d 452 unsigned long ip, fl;
5072c59f
SR
453
454 ip = rec->ip;
455
456 if (ftrace_filtered && enable) {
5072c59f
SR
457 /*
458 * If filtering is on:
459 *
460 * If this record is set to be filtered and
461 * is enabled then do nothing.
462 *
463 * If this record is set to be filtered and
464 * it is not enabled, enable it.
465 *
466 * If this record is not set to be filtered
467 * and it is not enabled do nothing.
468 *
41c52c0d
SR
469 * If this record is set not to trace then
470 * do nothing.
471 *
a4500b84
AS
472 * If this record is set not to trace and
473 * it is enabled then disable it.
474 *
5072c59f
SR
475 * If this record is not set to be filtered and
476 * it is enabled, disable it.
477 */
a4500b84
AS
478
479 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
480 FTRACE_FL_ENABLED);
5072c59f
SR
481
482 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
a4500b84
AS
483 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
484 !fl || (fl == FTRACE_FL_NOTRACE))
0eb96701 485 return 0;
5072c59f
SR
486
487 /*
488 * If it is enabled disable it,
489 * otherwise enable it!
490 */
a4500b84 491 if (fl & FTRACE_FL_ENABLED) {
5072c59f
SR
492 /* swap new and old */
493 new = old;
494 old = ftrace_call_replace(ip, FTRACE_ADDR);
495 rec->flags &= ~FTRACE_FL_ENABLED;
496 } else {
497 new = ftrace_call_replace(ip, FTRACE_ADDR);
498 rec->flags |= FTRACE_FL_ENABLED;
499 }
500 } else {
501
41c52c0d
SR
502 if (enable) {
503 /*
504 * If this record is set not to trace and is
505 * not enabled, do nothing.
506 */
507 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
508 if (fl == FTRACE_FL_NOTRACE)
0eb96701 509 return 0;
41c52c0d 510
5072c59f 511 new = ftrace_call_replace(ip, FTRACE_ADDR);
41c52c0d 512 } else
5072c59f
SR
513 old = ftrace_call_replace(ip, FTRACE_ADDR);
514
515 if (enable) {
516 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 517 return 0;
5072c59f
SR
518 rec->flags |= FTRACE_FL_ENABLED;
519 } else {
520 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 521 return 0;
5072c59f
SR
522 rec->flags &= ~FTRACE_FL_ENABLED;
523 }
524 }
525
0eb96701 526 return ftrace_modify_code(ip, old, new);
5072c59f
SR
527}
528
e309b41d 529static void ftrace_replace_code(int enable)
3c1720f0 530{
0eb96701 531 int i, failed;
3c1720f0
SR
532 unsigned char *new = NULL, *old = NULL;
533 struct dyn_ftrace *rec;
534 struct ftrace_page *pg;
3c1720f0 535
5072c59f 536 if (enable)
3c1720f0
SR
537 old = ftrace_nop_replace();
538 else
539 new = ftrace_nop_replace();
540
541 for (pg = ftrace_pages_start; pg; pg = pg->next) {
542 for (i = 0; i < pg->index; i++) {
543 rec = &pg->records[i];
544
545 /* don't modify code that has already faulted */
546 if (rec->flags & FTRACE_FL_FAILED)
547 continue;
548
f22f9a89 549 /* ignore updates to this record's mcount site */
98a05ed4
AS
550 if (get_kprobe((void *)rec->ip)) {
551 freeze_record(rec);
f22f9a89 552 continue;
98a05ed4
AS
553 } else {
554 unfreeze_record(rec);
555 }
f22f9a89 556
0eb96701
AS
557 failed = __ftrace_replace_code(rec, old, new, enable);
558 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
559 rec->flags |= FTRACE_FL_FAILED;
560 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 561 !core_kernel_text(rec->ip)) {
0eb96701
AS
562 ftrace_del_hash(rec);
563 ftrace_free_rec(rec);
564 }
565 }
3c1720f0
SR
566 }
567 }
568}
569
e309b41d 570static void ftrace_shutdown_replenish(void)
3c1720f0
SR
571{
572 if (ftrace_pages->next)
573 return;
574
575 /* allocate another page */
576 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
577}
3d083395 578
492a7ea5 579static int
d61f82d0 580ftrace_code_disable(struct dyn_ftrace *rec)
3c1720f0
SR
581{
582 unsigned long ip;
583 unsigned char *nop, *call;
584 int failed;
585
586 ip = rec->ip;
587
588 nop = ftrace_nop_replace();
3b47bfc1 589 call = ftrace_call_replace(ip, mcount_addr);
3c1720f0
SR
590
591 failed = ftrace_modify_code(ip, call, nop);
37ad5084 592 if (failed) {
3c1720f0 593 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 594 return 0;
37ad5084 595 }
492a7ea5 596 return 1;
3c1720f0
SR
597}
598
ad90c0e3
SR
599static int __ftrace_update_code(void *ignore);
600
e309b41d 601static int __ftrace_modify_code(void *data)
3d083395 602{
d61f82d0
SR
603 unsigned long addr;
604 int *command = data;
605
ad90c0e3
SR
606 if (*command & FTRACE_ENABLE_CALLS) {
607 /*
608 * Update any recorded ips now that we have the
609 * machine stopped
610 */
611 __ftrace_update_code(NULL);
d61f82d0 612 ftrace_replace_code(1);
ecea656d
AS
613 tracing_on = 1;
614 } else if (*command & FTRACE_DISABLE_CALLS) {
d61f82d0 615 ftrace_replace_code(0);
ecea656d
AS
616 tracing_on = 0;
617 }
d61f82d0
SR
618
619 if (*command & FTRACE_UPDATE_TRACE_FUNC)
620 ftrace_update_ftrace_func(ftrace_trace_function);
621
622 if (*command & FTRACE_ENABLE_MCOUNT) {
623 addr = (unsigned long)ftrace_record_ip;
624 ftrace_mcount_set(&addr);
625 } else if (*command & FTRACE_DISABLE_MCOUNT) {
626 addr = (unsigned long)ftrace_stub;
627 ftrace_mcount_set(&addr);
628 }
629
630 return 0;
3d083395
SR
631}
632
e309b41d 633static void ftrace_run_update_code(int command)
3d083395 634{
784e2d76 635 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
636}
637
ad90c0e3
SR
638void ftrace_disable_daemon(void)
639{
640 /* Stop the daemon from calling kstop_machine */
641 mutex_lock(&ftraced_lock);
642 ftraced_stop = 1;
643 mutex_unlock(&ftraced_lock);
644
645 ftrace_force_update();
646}
647
648void ftrace_enable_daemon(void)
649{
650 mutex_lock(&ftraced_lock);
651 ftraced_stop = 0;
652 mutex_unlock(&ftraced_lock);
653
654 ftrace_force_update();
655}
656
d61f82d0
SR
657static ftrace_func_t saved_ftrace_func;
658
e309b41d 659static void ftrace_startup(void)
3d083395 660{
d61f82d0
SR
661 int command = 0;
662
4eebcc81
SR
663 if (unlikely(ftrace_disabled))
664 return;
665
3d083395
SR
666 mutex_lock(&ftraced_lock);
667 ftraced_suspend++;
d61f82d0
SR
668 if (ftraced_suspend == 1)
669 command |= FTRACE_ENABLE_CALLS;
670
671 if (saved_ftrace_func != ftrace_trace_function) {
672 saved_ftrace_func = ftrace_trace_function;
673 command |= FTRACE_UPDATE_TRACE_FUNC;
674 }
675
676 if (!command || !ftrace_enabled)
3d083395 677 goto out;
3d083395 678
d61f82d0 679 ftrace_run_update_code(command);
3d083395
SR
680 out:
681 mutex_unlock(&ftraced_lock);
682}
683
e309b41d 684static void ftrace_shutdown(void)
3d083395 685{
d61f82d0
SR
686 int command = 0;
687
4eebcc81
SR
688 if (unlikely(ftrace_disabled))
689 return;
690
3d083395
SR
691 mutex_lock(&ftraced_lock);
692 ftraced_suspend--;
d61f82d0
SR
693 if (!ftraced_suspend)
694 command |= FTRACE_DISABLE_CALLS;
3d083395 695
d61f82d0
SR
696 if (saved_ftrace_func != ftrace_trace_function) {
697 saved_ftrace_func = ftrace_trace_function;
698 command |= FTRACE_UPDATE_TRACE_FUNC;
699 }
3d083395 700
d61f82d0
SR
701 if (!command || !ftrace_enabled)
702 goto out;
703
704 ftrace_run_update_code(command);
3d083395
SR
705 out:
706 mutex_unlock(&ftraced_lock);
707}
708
e309b41d 709static void ftrace_startup_sysctl(void)
b0fc494f 710{
d61f82d0
SR
711 int command = FTRACE_ENABLE_MCOUNT;
712
4eebcc81
SR
713 if (unlikely(ftrace_disabled))
714 return;
715
b0fc494f 716 mutex_lock(&ftraced_lock);
d61f82d0
SR
717 /* Force update next time */
718 saved_ftrace_func = NULL;
b0fc494f
SR
719 /* ftraced_suspend is true if we want ftrace running */
720 if (ftraced_suspend)
d61f82d0
SR
721 command |= FTRACE_ENABLE_CALLS;
722
723 ftrace_run_update_code(command);
b0fc494f
SR
724 mutex_unlock(&ftraced_lock);
725}
726
e309b41d 727static void ftrace_shutdown_sysctl(void)
b0fc494f 728{
d61f82d0
SR
729 int command = FTRACE_DISABLE_MCOUNT;
730
4eebcc81
SR
731 if (unlikely(ftrace_disabled))
732 return;
733
b0fc494f
SR
734 mutex_lock(&ftraced_lock);
735 /* ftraced_suspend is true if ftrace is running */
736 if (ftraced_suspend)
d61f82d0
SR
737 command |= FTRACE_DISABLE_CALLS;
738
739 ftrace_run_update_code(command);
b0fc494f
SR
740 mutex_unlock(&ftraced_lock);
741}
742
3d083395
SR
743static cycle_t ftrace_update_time;
744static unsigned long ftrace_update_cnt;
745unsigned long ftrace_update_tot_cnt;
746
e309b41d 747static int __ftrace_update_code(void *ignore)
3d083395 748{
f22f9a89
AS
749 int i, save_ftrace_enabled;
750 cycle_t start, stop;
3d083395 751 struct dyn_ftrace *p;
0eb96701 752 struct hlist_node *t, *n;
f22f9a89 753 struct hlist_head *head, temp_list;
3d083395 754
d61f82d0 755 /* Don't be recording funcs now */
ad90c0e3 756 ftrace_record_suspend++;
d61f82d0
SR
757 save_ftrace_enabled = ftrace_enabled;
758 ftrace_enabled = 0;
3d083395 759
750ed1a4 760 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
761 ftrace_update_cnt = 0;
762
763 /* No locks needed, the machine is stopped! */
764 for (i = 0; i < FTRACE_HASHSIZE; i++) {
f22f9a89
AS
765 INIT_HLIST_HEAD(&temp_list);
766 head = &ftrace_hash[i];
767
0eb96701 768 /* all CPUS are stopped, we are safe to modify code */
f22f9a89 769 hlist_for_each_entry_safe(p, t, n, head, node) {
0eb96701
AS
770 /* Skip over failed records which have not been
771 * freed. */
772 if (p->flags & FTRACE_FL_FAILED)
773 continue;
3d083395 774
0eb96701
AS
775 /* Unconverted records are always at the head of the
776 * hash bucket. Once we encounter a converted record,
777 * simply skip over to the next bucket. Saves ftraced
778 * some processor cycles (ftrace does its bid for
779 * global warming :-p ). */
780 if (p->flags & (FTRACE_FL_CONVERTED))
781 break;
3d083395 782
f22f9a89
AS
783 /* Ignore updates to this record's mcount site.
784 * Reintroduce this record at the head of this
785 * bucket to attempt to "convert" it again if
786 * the kprobe on it is unregistered before the
787 * next run. */
788 if (get_kprobe((void *)p->ip)) {
789 ftrace_del_hash(p);
790 INIT_HLIST_NODE(&p->node);
791 hlist_add_head(&p->node, &temp_list);
98a05ed4 792 freeze_record(p);
f22f9a89 793 continue;
98a05ed4
AS
794 } else {
795 unfreeze_record(p);
f22f9a89
AS
796 }
797
798 /* convert record (i.e, patch mcount-call with NOP) */
0eb96701
AS
799 if (ftrace_code_disable(p)) {
800 p->flags |= FTRACE_FL_CONVERTED;
492a7ea5 801 ftrace_update_cnt++;
0eb96701
AS
802 } else {
803 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 804 !core_kernel_text(p->ip)) {
0eb96701
AS
805 ftrace_del_hash(p);
806 ftrace_free_rec(p);
0eb96701
AS
807 }
808 }
3d083395 809 }
f22f9a89
AS
810
811 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
812 hlist_del(&p->node);
813 INIT_HLIST_NODE(&p->node);
814 hlist_add_head(&p->node, head);
815 }
3d083395
SR
816 }
817
750ed1a4 818 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
819 ftrace_update_time = stop - start;
820 ftrace_update_tot_cnt += ftrace_update_cnt;
ad90c0e3 821 ftraced_trigger = 0;
3d083395 822
d61f82d0 823 ftrace_enabled = save_ftrace_enabled;
ad90c0e3 824 ftrace_record_suspend--;
16444a8a
ACM
825
826 return 0;
827}
828
ad90c0e3 829static int ftrace_update_code(void)
3d083395 830{
ad90c0e3
SR
831 if (unlikely(ftrace_disabled) ||
832 !ftrace_enabled || !ftraced_trigger)
833 return 0;
4eebcc81 834
784e2d76 835 stop_machine(__ftrace_update_code, NULL, NULL);
ad90c0e3
SR
836
837 return 1;
3d083395
SR
838}
839
68bf21aa 840static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
841{
842 struct ftrace_page *pg;
843 int cnt;
844 int i;
3c1720f0
SR
845
846 /* allocate a few pages */
847 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
848 if (!ftrace_pages_start)
849 return -1;
850
851 /*
852 * Allocate a few more pages.
853 *
854 * TODO: have some parser search vmlinux before
855 * final linking to find all calls to ftrace.
856 * Then we can:
857 * a) know how many pages to allocate.
858 * and/or
859 * b) set up the table then.
860 *
861 * The dynamic code is still necessary for
862 * modules.
863 */
864
865 pg = ftrace_pages = ftrace_pages_start;
866
68bf21aa
SR
867 cnt = num_to_init / ENTRIES_PER_PAGE;
868 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
869 num_to_init, cnt);
3c1720f0
SR
870
871 for (i = 0; i < cnt; i++) {
872 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
873
874 /* If we fail, we'll try later anyway */
875 if (!pg->next)
876 break;
877
878 pg = pg->next;
879 }
880
881 return 0;
882}
883
5072c59f
SR
884enum {
885 FTRACE_ITER_FILTER = (1 << 0),
886 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 887 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 888 FTRACE_ITER_FAILURES = (1 << 3),
5072c59f
SR
889};
890
891#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
892
893struct ftrace_iterator {
894 loff_t pos;
895 struct ftrace_page *pg;
896 unsigned idx;
897 unsigned flags;
898 unsigned char buffer[FTRACE_BUFF_MAX+1];
899 unsigned buffer_idx;
900 unsigned filtered;
901};
902
e309b41d 903static void *
5072c59f
SR
904t_next(struct seq_file *m, void *v, loff_t *pos)
905{
906 struct ftrace_iterator *iter = m->private;
907 struct dyn_ftrace *rec = NULL;
908
909 (*pos)++;
910
99ecdc43
SR
911 /* should not be called from interrupt context */
912 spin_lock(&ftrace_lock);
5072c59f
SR
913 retry:
914 if (iter->idx >= iter->pg->index) {
915 if (iter->pg->next) {
916 iter->pg = iter->pg->next;
917 iter->idx = 0;
918 goto retry;
919 }
920 } else {
921 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
922 if ((rec->flags & FTRACE_FL_FREE) ||
923
924 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
925 (rec->flags & FTRACE_FL_FAILED)) ||
926
927 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 928 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 929
41c52c0d
SR
930 ((iter->flags & FTRACE_ITER_NOTRACE) &&
931 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
932 rec = NULL;
933 goto retry;
934 }
935 }
99ecdc43 936 spin_unlock(&ftrace_lock);
5072c59f
SR
937
938 iter->pos = *pos;
939
940 return rec;
941}
942
943static void *t_start(struct seq_file *m, loff_t *pos)
944{
945 struct ftrace_iterator *iter = m->private;
946 void *p = NULL;
947 loff_t l = -1;
948
949 if (*pos != iter->pos) {
950 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
951 ;
952 } else {
953 l = *pos;
954 p = t_next(m, p, &l);
955 }
956
957 return p;
958}
959
960static void t_stop(struct seq_file *m, void *p)
961{
962}
963
964static int t_show(struct seq_file *m, void *v)
965{
966 struct dyn_ftrace *rec = v;
967 char str[KSYM_SYMBOL_LEN];
968
969 if (!rec)
970 return 0;
971
972 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
973
974 seq_printf(m, "%s\n", str);
975
976 return 0;
977}
978
979static struct seq_operations show_ftrace_seq_ops = {
980 .start = t_start,
981 .next = t_next,
982 .stop = t_stop,
983 .show = t_show,
984};
985
e309b41d 986static int
5072c59f
SR
987ftrace_avail_open(struct inode *inode, struct file *file)
988{
989 struct ftrace_iterator *iter;
990 int ret;
991
4eebcc81
SR
992 if (unlikely(ftrace_disabled))
993 return -ENODEV;
994
5072c59f
SR
995 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
996 if (!iter)
997 return -ENOMEM;
998
999 iter->pg = ftrace_pages_start;
1000 iter->pos = -1;
1001
1002 ret = seq_open(file, &show_ftrace_seq_ops);
1003 if (!ret) {
1004 struct seq_file *m = file->private_data;
4bf39a94 1005
5072c59f 1006 m->private = iter;
4bf39a94 1007 } else {
5072c59f 1008 kfree(iter);
4bf39a94 1009 }
5072c59f
SR
1010
1011 return ret;
1012}
1013
1014int ftrace_avail_release(struct inode *inode, struct file *file)
1015{
1016 struct seq_file *m = (struct seq_file *)file->private_data;
1017 struct ftrace_iterator *iter = m->private;
1018
1019 seq_release(inode, file);
1020 kfree(iter);
4bf39a94 1021
5072c59f
SR
1022 return 0;
1023}
1024
eb9a7bf0
AS
1025static int
1026ftrace_failures_open(struct inode *inode, struct file *file)
1027{
1028 int ret;
1029 struct seq_file *m;
1030 struct ftrace_iterator *iter;
1031
1032 ret = ftrace_avail_open(inode, file);
1033 if (!ret) {
1034 m = (struct seq_file *)file->private_data;
1035 iter = (struct ftrace_iterator *)m->private;
1036 iter->flags = FTRACE_ITER_FAILURES;
1037 }
1038
1039 return ret;
1040}
1041
1042
41c52c0d 1043static void ftrace_filter_reset(int enable)
5072c59f
SR
1044{
1045 struct ftrace_page *pg;
1046 struct dyn_ftrace *rec;
41c52c0d 1047 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
1048 unsigned i;
1049
99ecdc43
SR
1050 /* should not be called from interrupt context */
1051 spin_lock(&ftrace_lock);
41c52c0d
SR
1052 if (enable)
1053 ftrace_filtered = 0;
5072c59f
SR
1054 pg = ftrace_pages_start;
1055 while (pg) {
1056 for (i = 0; i < pg->index; i++) {
1057 rec = &pg->records[i];
1058 if (rec->flags & FTRACE_FL_FAILED)
1059 continue;
41c52c0d 1060 rec->flags &= ~type;
5072c59f
SR
1061 }
1062 pg = pg->next;
1063 }
99ecdc43 1064 spin_unlock(&ftrace_lock);
5072c59f
SR
1065}
1066
e309b41d 1067static int
41c52c0d 1068ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1069{
1070 struct ftrace_iterator *iter;
1071 int ret = 0;
1072
4eebcc81
SR
1073 if (unlikely(ftrace_disabled))
1074 return -ENODEV;
1075
5072c59f
SR
1076 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1077 if (!iter)
1078 return -ENOMEM;
1079
41c52c0d 1080 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1081 if ((file->f_mode & FMODE_WRITE) &&
1082 !(file->f_flags & O_APPEND))
41c52c0d 1083 ftrace_filter_reset(enable);
5072c59f
SR
1084
1085 if (file->f_mode & FMODE_READ) {
1086 iter->pg = ftrace_pages_start;
1087 iter->pos = -1;
41c52c0d
SR
1088 iter->flags = enable ? FTRACE_ITER_FILTER :
1089 FTRACE_ITER_NOTRACE;
5072c59f
SR
1090
1091 ret = seq_open(file, &show_ftrace_seq_ops);
1092 if (!ret) {
1093 struct seq_file *m = file->private_data;
1094 m->private = iter;
1095 } else
1096 kfree(iter);
1097 } else
1098 file->private_data = iter;
41c52c0d 1099 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1100
1101 return ret;
1102}
1103
41c52c0d
SR
1104static int
1105ftrace_filter_open(struct inode *inode, struct file *file)
1106{
1107 return ftrace_regex_open(inode, file, 1);
1108}
1109
1110static int
1111ftrace_notrace_open(struct inode *inode, struct file *file)
1112{
1113 return ftrace_regex_open(inode, file, 0);
1114}
1115
e309b41d 1116static ssize_t
41c52c0d 1117ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
1118 size_t cnt, loff_t *ppos)
1119{
1120 if (file->f_mode & FMODE_READ)
1121 return seq_read(file, ubuf, cnt, ppos);
1122 else
1123 return -EPERM;
1124}
1125
e309b41d 1126static loff_t
41c52c0d 1127ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1128{
1129 loff_t ret;
1130
1131 if (file->f_mode & FMODE_READ)
1132 ret = seq_lseek(file, offset, origin);
1133 else
1134 file->f_pos = ret = 1;
1135
1136 return ret;
1137}
1138
1139enum {
1140 MATCH_FULL,
1141 MATCH_FRONT_ONLY,
1142 MATCH_MIDDLE_ONLY,
1143 MATCH_END_ONLY,
1144};
1145
e309b41d 1146static void
41c52c0d 1147ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
1148{
1149 char str[KSYM_SYMBOL_LEN];
1150 char *search = NULL;
1151 struct ftrace_page *pg;
1152 struct dyn_ftrace *rec;
1153 int type = MATCH_FULL;
41c52c0d 1154 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
1155 unsigned i, match = 0, search_len = 0;
1156
1157 for (i = 0; i < len; i++) {
1158 if (buff[i] == '*') {
1159 if (!i) {
1160 search = buff + i + 1;
1161 type = MATCH_END_ONLY;
1162 search_len = len - (i + 1);
1163 } else {
1164 if (type == MATCH_END_ONLY) {
1165 type = MATCH_MIDDLE_ONLY;
1166 } else {
1167 match = i;
1168 type = MATCH_FRONT_ONLY;
1169 }
1170 buff[i] = 0;
1171 break;
1172 }
1173 }
1174 }
1175
99ecdc43
SR
1176 /* should not be called from interrupt context */
1177 spin_lock(&ftrace_lock);
41c52c0d
SR
1178 if (enable)
1179 ftrace_filtered = 1;
5072c59f
SR
1180 pg = ftrace_pages_start;
1181 while (pg) {
1182 for (i = 0; i < pg->index; i++) {
1183 int matched = 0;
1184 char *ptr;
1185
1186 rec = &pg->records[i];
1187 if (rec->flags & FTRACE_FL_FAILED)
1188 continue;
1189 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1190 switch (type) {
1191 case MATCH_FULL:
1192 if (strcmp(str, buff) == 0)
1193 matched = 1;
1194 break;
1195 case MATCH_FRONT_ONLY:
1196 if (memcmp(str, buff, match) == 0)
1197 matched = 1;
1198 break;
1199 case MATCH_MIDDLE_ONLY:
1200 if (strstr(str, search))
1201 matched = 1;
1202 break;
1203 case MATCH_END_ONLY:
1204 ptr = strstr(str, search);
1205 if (ptr && (ptr[search_len] == 0))
1206 matched = 1;
1207 break;
1208 }
1209 if (matched)
41c52c0d 1210 rec->flags |= flag;
5072c59f
SR
1211 }
1212 pg = pg->next;
1213 }
99ecdc43 1214 spin_unlock(&ftrace_lock);
5072c59f
SR
1215}
1216
e309b41d 1217static ssize_t
41c52c0d
SR
1218ftrace_regex_write(struct file *file, const char __user *ubuf,
1219 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1220{
1221 struct ftrace_iterator *iter;
1222 char ch;
1223 size_t read = 0;
1224 ssize_t ret;
1225
1226 if (!cnt || cnt < 0)
1227 return 0;
1228
41c52c0d 1229 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1230
1231 if (file->f_mode & FMODE_READ) {
1232 struct seq_file *m = file->private_data;
1233 iter = m->private;
1234 } else
1235 iter = file->private_data;
1236
1237 if (!*ppos) {
1238 iter->flags &= ~FTRACE_ITER_CONT;
1239 iter->buffer_idx = 0;
1240 }
1241
1242 ret = get_user(ch, ubuf++);
1243 if (ret)
1244 goto out;
1245 read++;
1246 cnt--;
1247
1248 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1249 /* skip white space */
1250 while (cnt && isspace(ch)) {
1251 ret = get_user(ch, ubuf++);
1252 if (ret)
1253 goto out;
1254 read++;
1255 cnt--;
1256 }
1257
5072c59f
SR
1258 if (isspace(ch)) {
1259 file->f_pos += read;
1260 ret = read;
1261 goto out;
1262 }
1263
1264 iter->buffer_idx = 0;
1265 }
1266
1267 while (cnt && !isspace(ch)) {
1268 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1269 iter->buffer[iter->buffer_idx++] = ch;
1270 else {
1271 ret = -EINVAL;
1272 goto out;
1273 }
1274 ret = get_user(ch, ubuf++);
1275 if (ret)
1276 goto out;
1277 read++;
1278 cnt--;
1279 }
1280
1281 if (isspace(ch)) {
1282 iter->filtered++;
1283 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1284 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1285 iter->buffer_idx = 0;
1286 } else
1287 iter->flags |= FTRACE_ITER_CONT;
1288
1289
1290 file->f_pos += read;
1291
1292 ret = read;
1293 out:
41c52c0d 1294 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1295
1296 return ret;
1297}
1298
41c52c0d
SR
1299static ssize_t
1300ftrace_filter_write(struct file *file, const char __user *ubuf,
1301 size_t cnt, loff_t *ppos)
1302{
1303 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1304}
1305
1306static ssize_t
1307ftrace_notrace_write(struct file *file, const char __user *ubuf,
1308 size_t cnt, loff_t *ppos)
1309{
1310 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1311}
1312
1313static void
1314ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1315{
1316 if (unlikely(ftrace_disabled))
1317 return;
1318
1319 mutex_lock(&ftrace_regex_lock);
1320 if (reset)
1321 ftrace_filter_reset(enable);
1322 if (buf)
1323 ftrace_match(buf, len, enable);
1324 mutex_unlock(&ftrace_regex_lock);
1325}
1326
77a2b37d
SR
1327/**
1328 * ftrace_set_filter - set a function to filter on in ftrace
1329 * @buf - the string that holds the function filter text.
1330 * @len - the length of the string.
1331 * @reset - non zero to reset all filters before applying this filter.
1332 *
1333 * Filters denote which functions should be enabled when tracing is enabled.
1334 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1335 */
e309b41d 1336void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1337{
41c52c0d
SR
1338 ftrace_set_regex(buf, len, reset, 1);
1339}
4eebcc81 1340
41c52c0d
SR
1341/**
1342 * ftrace_set_notrace - set a function to not trace in ftrace
1343 * @buf - the string that holds the function notrace text.
1344 * @len - the length of the string.
1345 * @reset - non zero to reset all filters before applying this filter.
1346 *
1347 * Notrace Filters denote which functions should not be enabled when tracing
1348 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1349 * for tracing.
1350 */
1351void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1352{
1353 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1354}
1355
e309b41d 1356static int
41c52c0d 1357ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1358{
1359 struct seq_file *m = (struct seq_file *)file->private_data;
1360 struct ftrace_iterator *iter;
1361
41c52c0d 1362 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1363 if (file->f_mode & FMODE_READ) {
1364 iter = m->private;
1365
1366 seq_release(inode, file);
1367 } else
1368 iter = file->private_data;
1369
1370 if (iter->buffer_idx) {
1371 iter->filtered++;
1372 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1373 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1374 }
1375
1376 mutex_lock(&ftrace_sysctl_lock);
1377 mutex_lock(&ftraced_lock);
1378 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1379 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1380 mutex_unlock(&ftraced_lock);
1381 mutex_unlock(&ftrace_sysctl_lock);
1382
1383 kfree(iter);
41c52c0d 1384 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1385 return 0;
1386}
1387
41c52c0d
SR
1388static int
1389ftrace_filter_release(struct inode *inode, struct file *file)
1390{
1391 return ftrace_regex_release(inode, file, 1);
1392}
1393
1394static int
1395ftrace_notrace_release(struct inode *inode, struct file *file)
1396{
1397 return ftrace_regex_release(inode, file, 0);
1398}
1399
ad90c0e3
SR
1400static ssize_t
1401ftraced_read(struct file *filp, char __user *ubuf,
1402 size_t cnt, loff_t *ppos)
1403{
1404 /* don't worry about races */
1405 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1406 int r = strlen(buf);
1407
1408 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1409}
1410
1411static ssize_t
1412ftraced_write(struct file *filp, const char __user *ubuf,
1413 size_t cnt, loff_t *ppos)
1414{
1415 char buf[64];
1416 long val;
1417 int ret;
1418
1419 if (cnt >= sizeof(buf))
1420 return -EINVAL;
1421
1422 if (copy_from_user(&buf, ubuf, cnt))
1423 return -EFAULT;
1424
1425 if (strncmp(buf, "enable", 6) == 0)
1426 val = 1;
1427 else if (strncmp(buf, "disable", 7) == 0)
1428 val = 0;
1429 else {
1430 buf[cnt] = 0;
1431
1432 ret = strict_strtoul(buf, 10, &val);
1433 if (ret < 0)
1434 return ret;
1435
1436 val = !!val;
1437 }
1438
1439 if (val)
1440 ftrace_enable_daemon();
1441 else
1442 ftrace_disable_daemon();
1443
1444 filp->f_pos += cnt;
1445
1446 return cnt;
1447}
1448
5072c59f
SR
1449static struct file_operations ftrace_avail_fops = {
1450 .open = ftrace_avail_open,
1451 .read = seq_read,
1452 .llseek = seq_lseek,
1453 .release = ftrace_avail_release,
1454};
1455
eb9a7bf0
AS
1456static struct file_operations ftrace_failures_fops = {
1457 .open = ftrace_failures_open,
1458 .read = seq_read,
1459 .llseek = seq_lseek,
1460 .release = ftrace_avail_release,
1461};
1462
5072c59f
SR
1463static struct file_operations ftrace_filter_fops = {
1464 .open = ftrace_filter_open,
41c52c0d 1465 .read = ftrace_regex_read,
5072c59f 1466 .write = ftrace_filter_write,
41c52c0d 1467 .llseek = ftrace_regex_lseek,
5072c59f
SR
1468 .release = ftrace_filter_release,
1469};
1470
41c52c0d
SR
1471static struct file_operations ftrace_notrace_fops = {
1472 .open = ftrace_notrace_open,
1473 .read = ftrace_regex_read,
1474 .write = ftrace_notrace_write,
1475 .llseek = ftrace_regex_lseek,
1476 .release = ftrace_notrace_release,
1477};
1478
ad90c0e3
SR
1479static struct file_operations ftraced_fops = {
1480 .open = tracing_open_generic,
1481 .read = ftraced_read,
1482 .write = ftraced_write,
1483};
1484
e1c08bdd
SR
1485/**
1486 * ftrace_force_update - force an update to all recording ftrace functions
e1c08bdd
SR
1487 */
1488int ftrace_force_update(void)
1489{
e1c08bdd
SR
1490 int ret = 0;
1491
4eebcc81 1492 if (unlikely(ftrace_disabled))
e1c08bdd
SR
1493 return -ENODEV;
1494
ad90c0e3 1495 mutex_lock(&ftrace_sysctl_lock);
e1c08bdd 1496 mutex_lock(&ftraced_lock);
e1c08bdd 1497
ad90c0e3
SR
1498 /*
1499 * If ftraced_trigger is not set, then there is nothing
1500 * to update.
1501 */
1502 if (ftraced_trigger && !ftrace_update_code())
1503 ret = -EBUSY;
e1c08bdd
SR
1504
1505 mutex_unlock(&ftraced_lock);
ad90c0e3 1506 mutex_unlock(&ftrace_sysctl_lock);
e1c08bdd
SR
1507
1508 return ret;
1509}
1510
4eebcc81
SR
1511static void ftrace_force_shutdown(void)
1512{
1513 struct task_struct *task;
1514 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1515
1516 mutex_lock(&ftraced_lock);
1517 task = ftraced_task;
1518 ftraced_task = NULL;
1519 ftraced_suspend = -1;
1520 ftrace_run_update_code(command);
1521 mutex_unlock(&ftraced_lock);
1522
1523 if (task)
1524 kthread_stop(task);
1525}
1526
5072c59f
SR
1527static __init int ftrace_init_debugfs(void)
1528{
1529 struct dentry *d_tracer;
1530 struct dentry *entry;
1531
1532 d_tracer = tracing_init_dentry();
1533
1534 entry = debugfs_create_file("available_filter_functions", 0444,
1535 d_tracer, NULL, &ftrace_avail_fops);
1536 if (!entry)
1537 pr_warning("Could not create debugfs "
1538 "'available_filter_functions' entry\n");
1539
eb9a7bf0
AS
1540 entry = debugfs_create_file("failures", 0444,
1541 d_tracer, NULL, &ftrace_failures_fops);
1542 if (!entry)
1543 pr_warning("Could not create debugfs 'failures' entry\n");
1544
5072c59f
SR
1545 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1546 NULL, &ftrace_filter_fops);
1547 if (!entry)
1548 pr_warning("Could not create debugfs "
1549 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1550
1551 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1552 NULL, &ftrace_notrace_fops);
1553 if (!entry)
1554 pr_warning("Could not create debugfs "
1555 "'set_ftrace_notrace' entry\n");
ad90c0e3
SR
1556
1557 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1558 NULL, &ftraced_fops);
1559 if (!entry)
1560 pr_warning("Could not create debugfs "
1561 "'ftraced_enabled' entry\n");
5072c59f
SR
1562 return 0;
1563}
1564
1565fs_initcall(ftrace_init_debugfs);
1566
68bf21aa
SR
1567#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1568static int ftrace_convert_nops(unsigned long *start,
1569 unsigned long *end)
1570{
1571 unsigned long *p;
1572 unsigned long addr;
1573 unsigned long flags;
1574
1575 p = start;
1576 while (p < end) {
1577 addr = ftrace_call_adjust(*p++);
99ecdc43 1578 /* should not be called from interrupt context */
fed1939c 1579 spin_lock(&ftrace_lock);
68bf21aa 1580 ftrace_record_ip(addr);
fed1939c 1581 spin_unlock(&ftrace_lock);
68bf21aa
SR
1582 ftrace_shutdown_replenish();
1583 }
1584
1585 /* p is ignored */
1586 local_irq_save(flags);
1587 __ftrace_update_code(p);
1588 local_irq_restore(flags);
1589
1590 return 0;
1591}
1592
90d595fe
SR
1593void ftrace_init_module(unsigned long *start, unsigned long *end)
1594{
00fd61ae 1595 if (ftrace_disabled || start == end)
fed1939c 1596 return;
90d595fe
SR
1597 ftrace_convert_nops(start, end);
1598}
1599
68bf21aa
SR
1600extern unsigned long __start_mcount_loc[];
1601extern unsigned long __stop_mcount_loc[];
1602
1603void __init ftrace_init(void)
1604{
1605 unsigned long count, addr, flags;
1606 int ret;
1607
1608 /* Keep the ftrace pointer to the stub */
1609 addr = (unsigned long)ftrace_stub;
1610
1611 local_irq_save(flags);
1612 ftrace_dyn_arch_init(&addr);
1613 local_irq_restore(flags);
1614
1615 /* ftrace_dyn_arch_init places the return code in addr */
1616 if (addr)
1617 goto failed;
1618
1619 count = __stop_mcount_loc - __start_mcount_loc;
1620
1621 ret = ftrace_dyn_table_alloc(count);
1622 if (ret)
1623 goto failed;
1624
1625 last_ftrace_enabled = ftrace_enabled = 1;
1626
1627 ret = ftrace_convert_nops(__start_mcount_loc,
1628 __stop_mcount_loc);
1629
1630 return;
1631 failed:
1632 ftrace_disabled = 1;
1633}
1634#else /* CONFIG_FTRACE_MCOUNT_RECORD */
1635static int ftraced(void *ignore)
1636{
1637 unsigned long usecs;
1638
1639 while (!kthread_should_stop()) {
1640
1641 set_current_state(TASK_INTERRUPTIBLE);
1642
1643 /* check once a second */
1644 schedule_timeout(HZ);
1645
1646 if (unlikely(ftrace_disabled))
1647 continue;
1648
1649 mutex_lock(&ftrace_sysctl_lock);
1650 mutex_lock(&ftraced_lock);
1651 if (!ftraced_suspend && !ftraced_stop &&
1652 ftrace_update_code()) {
1653 usecs = nsecs_to_usecs(ftrace_update_time);
1654 if (ftrace_update_tot_cnt > 100000) {
1655 ftrace_update_tot_cnt = 0;
1656 pr_info("hm, dftrace overflow: %lu change%s"
1657 " (%lu total) in %lu usec%s\n",
1658 ftrace_update_cnt,
1659 ftrace_update_cnt != 1 ? "s" : "",
1660 ftrace_update_tot_cnt,
1661 usecs, usecs != 1 ? "s" : "");
1662 ftrace_disabled = 1;
1663 WARN_ON_ONCE(1);
1664 }
1665 }
1666 mutex_unlock(&ftraced_lock);
1667 mutex_unlock(&ftrace_sysctl_lock);
1668
1669 ftrace_shutdown_replenish();
1670 }
1671 __set_current_state(TASK_RUNNING);
1672 return 0;
1673}
1674
e309b41d 1675static int __init ftrace_dynamic_init(void)
3d083395
SR
1676{
1677 struct task_struct *p;
d61f82d0 1678 unsigned long addr;
3d083395
SR
1679 int ret;
1680
d61f82d0 1681 addr = (unsigned long)ftrace_record_ip;
9ff9cdb2 1682
784e2d76 1683 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
d61f82d0
SR
1684
1685 /* ftrace_dyn_arch_init places the return code in addr */
4eebcc81
SR
1686 if (addr) {
1687 ret = (int)addr;
1688 goto failed;
1689 }
d61f82d0 1690
68bf21aa 1691 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
3d083395 1692 if (ret)
4eebcc81 1693 goto failed;
3d083395
SR
1694
1695 p = kthread_run(ftraced, NULL, "ftraced");
4eebcc81
SR
1696 if (IS_ERR(p)) {
1697 ret = -1;
1698 goto failed;
1699 }
3d083395 1700
d61f82d0 1701 last_ftrace_enabled = ftrace_enabled = 1;
e1c08bdd 1702 ftraced_task = p;
3d083395
SR
1703
1704 return 0;
4eebcc81
SR
1705
1706 failed:
1707 ftrace_disabled = 1;
1708 return ret;
3d083395
SR
1709}
1710
d61f82d0 1711core_initcall(ftrace_dynamic_init);
68bf21aa
SR
1712#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1713
3d083395 1714#else
c7aafc54
IM
1715# define ftrace_startup() do { } while (0)
1716# define ftrace_shutdown() do { } while (0)
1717# define ftrace_startup_sysctl() do { } while (0)
1718# define ftrace_shutdown_sysctl() do { } while (0)
4eebcc81 1719# define ftrace_force_shutdown() do { } while (0)
3d083395
SR
1720#endif /* CONFIG_DYNAMIC_FTRACE */
1721
a2bb6a3d
SR
1722/**
1723 * ftrace_kill_atomic - kill ftrace from critical sections
1724 *
1725 * This function should be used by panic code. It stops ftrace
1726 * but in a not so nice way. If you need to simply kill ftrace
1727 * from a non-atomic section, use ftrace_kill.
1728 */
1729void ftrace_kill_atomic(void)
1730{
1731 ftrace_disabled = 1;
1732 ftrace_enabled = 0;
b2613e37 1733#ifdef CONFIG_DYNAMIC_FTRACE
a2bb6a3d 1734 ftraced_suspend = -1;
b2613e37 1735#endif
a2bb6a3d
SR
1736 clear_ftrace_function();
1737}
1738
4eebcc81
SR
1739/**
1740 * ftrace_kill - totally shutdown ftrace
1741 *
1742 * This is a safety measure. If something was detected that seems
1743 * wrong, calling this function will keep ftrace from doing
1744 * any more modifications, and updates.
1745 * used when something went wrong.
1746 */
1747void ftrace_kill(void)
1748{
1749 mutex_lock(&ftrace_sysctl_lock);
1750 ftrace_disabled = 1;
1751 ftrace_enabled = 0;
1752
1753 clear_ftrace_function();
1754 mutex_unlock(&ftrace_sysctl_lock);
1755
1756 /* Try to totally disable ftrace */
1757 ftrace_force_shutdown();
1758}
1759
16444a8a 1760/**
3d083395
SR
1761 * register_ftrace_function - register a function for profiling
1762 * @ops - ops structure that holds the function for profiling.
16444a8a 1763 *
3d083395
SR
1764 * Register a function to be called by all functions in the
1765 * kernel.
1766 *
1767 * Note: @ops->func and all the functions it calls must be labeled
1768 * with "notrace", otherwise it will go into a
1769 * recursive loop.
16444a8a 1770 */
3d083395 1771int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1772{
b0fc494f
SR
1773 int ret;
1774
4eebcc81
SR
1775 if (unlikely(ftrace_disabled))
1776 return -1;
1777
b0fc494f 1778 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1779 ret = __register_ftrace_function(ops);
d61f82d0 1780 ftrace_startup();
b0fc494f
SR
1781 mutex_unlock(&ftrace_sysctl_lock);
1782
1783 return ret;
3d083395
SR
1784}
1785
1786/**
1787 * unregister_ftrace_function - unresgister a function for profiling.
1788 * @ops - ops structure that holds the function to unregister
1789 *
1790 * Unregister a function that was added to be called by ftrace profiling.
1791 */
1792int unregister_ftrace_function(struct ftrace_ops *ops)
1793{
1794 int ret;
1795
b0fc494f 1796 mutex_lock(&ftrace_sysctl_lock);
3d083395 1797 ret = __unregister_ftrace_function(ops);
d61f82d0 1798 ftrace_shutdown();
b0fc494f
SR
1799 mutex_unlock(&ftrace_sysctl_lock);
1800
1801 return ret;
1802}
1803
e309b41d 1804int
b0fc494f 1805ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1806 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1807 loff_t *ppos)
1808{
1809 int ret;
1810
4eebcc81
SR
1811 if (unlikely(ftrace_disabled))
1812 return -ENODEV;
1813
b0fc494f
SR
1814 mutex_lock(&ftrace_sysctl_lock);
1815
5072c59f 1816 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1817
1818 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1819 goto out;
1820
1821 last_ftrace_enabled = ftrace_enabled;
1822
1823 if (ftrace_enabled) {
1824
1825 ftrace_startup_sysctl();
1826
1827 /* we are starting ftrace again */
1828 if (ftrace_list != &ftrace_list_end) {
1829 if (ftrace_list->next == &ftrace_list_end)
1830 ftrace_trace_function = ftrace_list->func;
1831 else
1832 ftrace_trace_function = ftrace_list_func;
1833 }
1834
1835 } else {
1836 /* stopping ftrace calls (just send to ftrace_stub) */
1837 ftrace_trace_function = ftrace_stub;
1838
1839 ftrace_shutdown_sysctl();
1840 }
1841
1842 out:
1843 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1844 return ret;
16444a8a 1845}
This page took 0.150618 seconds and 5 git commands to generate.