ftrace: add README
[deliverable/linux.git] / kernel / trace / trace.c
1 /*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/pagemap.h>
19 #include <linux/hardirq.h>
20 #include <linux/linkage.h>
21 #include <linux/uaccess.h>
22 #include <linux/ftrace.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
25 #include <linux/ctype.h>
26 #include <linux/init.h>
27 #include <linux/gfp.h>
28 #include <linux/fs.h>
29
30 #include "trace.h"
31
32 unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33 unsigned long __read_mostly tracing_thresh;
34
35 static int tracing_disabled = 1;
36
37 static long notrace
38 ns2usecs(cycle_t nsec)
39 {
40 nsec += 500;
41 do_div(nsec, 1000);
42 return nsec;
43 }
44
45 static atomic_t tracer_counter;
46 static struct trace_array global_trace;
47
48 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
49
50 static struct trace_array max_tr;
51
52 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
53
54 static int tracer_enabled;
55 static unsigned long trace_nr_entries = 16384UL;
56
57 static struct tracer *trace_types __read_mostly;
58 static struct tracer *current_trace __read_mostly;
59 static int max_tracer_type_len;
60
61 static DEFINE_MUTEX(trace_types_lock);
62
63 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
64
65 static int __init set_nr_entries(char *str)
66 {
67 if (!str)
68 return 0;
69 trace_nr_entries = simple_strtoul(str, &str, 0);
70 return 1;
71 }
72 __setup("trace_entries=", set_nr_entries);
73
74 unsigned long nsecs_to_usecs(unsigned long nsecs)
75 {
76 return nsecs / 1000;
77 }
78
79 enum trace_type {
80 __TRACE_FIRST_TYPE = 0,
81
82 TRACE_FN,
83 TRACE_CTX,
84
85 __TRACE_LAST_TYPE
86 };
87
88 enum trace_flag_type {
89 TRACE_FLAG_IRQS_OFF = 0x01,
90 TRACE_FLAG_NEED_RESCHED = 0x02,
91 TRACE_FLAG_HARDIRQ = 0x04,
92 TRACE_FLAG_SOFTIRQ = 0x08,
93 };
94
95 enum trace_iterator_flags {
96 TRACE_ITER_PRINT_PARENT = 0x01,
97 TRACE_ITER_SYM_OFFSET = 0x02,
98 TRACE_ITER_SYM_ADDR = 0x04,
99 TRACE_ITER_VERBOSE = 0x08,
100 };
101
102 #define TRACE_ITER_SYM_MASK \
103 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
104
105 /* These must match the bit postions above */
106 static const char *trace_options[] = {
107 "print-parent",
108 "sym-offset",
109 "sym-addr",
110 "verbose",
111 NULL
112 };
113
114 static unsigned trace_flags;
115
116 static DEFINE_SPINLOCK(ftrace_max_lock);
117
118 /*
119 * Copy the new maximum trace into the separate maximum-trace
120 * structure. (this way the maximum trace is permanently saved,
121 * for later retrieval via /debugfs/tracing/latency_trace)
122 */
123 static void notrace
124 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
125 {
126 struct trace_array_cpu *data = tr->data[cpu];
127
128 max_tr.cpu = cpu;
129 max_tr.time_start = data->preempt_timestamp;
130
131 data = max_tr.data[cpu];
132 data->saved_latency = tracing_max_latency;
133
134 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
135 data->pid = tsk->pid;
136 data->uid = tsk->uid;
137 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
138 data->policy = tsk->policy;
139 data->rt_priority = tsk->rt_priority;
140
141 /* record this tasks comm */
142 tracing_record_cmdline(current);
143 }
144
145 void check_pages(struct trace_array_cpu *data)
146 {
147 struct page *page, *tmp;
148
149 BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
150 BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
151
152 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
153 BUG_ON(page->lru.next->prev != &page->lru);
154 BUG_ON(page->lru.prev->next != &page->lru);
155 }
156 }
157
158 void *head_page(struct trace_array_cpu *data)
159 {
160 struct page *page;
161
162 check_pages(data);
163 if (list_empty(&data->trace_pages))
164 return NULL;
165
166 page = list_entry(data->trace_pages.next, struct page, lru);
167 BUG_ON(&page->lru == &data->trace_pages);
168
169 return page_address(page);
170 }
171
172 notrace static void
173 flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
174 {
175 struct list_head flip_pages;
176
177 INIT_LIST_HEAD(&flip_pages);
178
179 tr1->trace_current = NULL;
180 memcpy(&tr1->trace_current_idx, &tr2->trace_current_idx,
181 sizeof(struct trace_array_cpu) -
182 offsetof(struct trace_array_cpu, trace_current_idx));
183
184 check_pages(tr1);
185 check_pages(tr2);
186 list_splice_init(&tr1->trace_pages, &flip_pages);
187 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
188 list_splice_init(&flip_pages, &tr2->trace_pages);
189 BUG_ON(!list_empty(&flip_pages));
190 check_pages(tr1);
191 check_pages(tr2);
192 }
193
194 notrace void
195 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
196 {
197 struct trace_array_cpu *data;
198 int i;
199
200 WARN_ON_ONCE(!irqs_disabled());
201 spin_lock(&ftrace_max_lock);
202 /* clear out all the previous traces */
203 for_each_possible_cpu(i) {
204 data = tr->data[i];
205 flip_trace(max_tr.data[i], data);
206 tracing_reset(data);
207 }
208
209 __update_max_tr(tr, tsk, cpu);
210 spin_unlock(&ftrace_max_lock);
211 }
212
213 /**
214 * update_max_tr_single - only copy one trace over, and reset the rest
215 * @tr - tracer
216 * @tsk - task with the latency
217 * @cpu - the cpu of the buffer to copy.
218 */
219 notrace void
220 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
221 {
222 struct trace_array_cpu *data = tr->data[cpu];
223 int i;
224
225 WARN_ON_ONCE(!irqs_disabled());
226 spin_lock(&ftrace_max_lock);
227 for_each_possible_cpu(i)
228 tracing_reset(max_tr.data[i]);
229
230 flip_trace(max_tr.data[cpu], data);
231
232 tracing_reset(data);
233
234 __update_max_tr(tr, tsk, cpu);
235 spin_unlock(&ftrace_max_lock);
236 }
237
238 int register_tracer(struct tracer *type)
239 {
240 struct tracer *t;
241 int len;
242 int ret = 0;
243
244 if (!type->name) {
245 pr_info("Tracer must have a name\n");
246 return -1;
247 }
248
249 mutex_lock(&trace_types_lock);
250 for (t = trace_types; t; t = t->next) {
251 if (strcmp(type->name, t->name) == 0) {
252 /* already found */
253 pr_info("Trace %s already registered\n",
254 type->name);
255 ret = -1;
256 goto out;
257 }
258 }
259
260 #ifdef CONFIG_FTRACE_STARTUP_TEST
261 if (type->selftest) {
262 struct tracer *saved_tracer = current_trace;
263 struct trace_array_cpu *data;
264 struct trace_array *tr = &global_trace;
265 int saved_ctrl = tr->ctrl;
266 int i;
267 /*
268 * Run a selftest on this tracer.
269 * Here we reset the trace buffer, and set the current
270 * tracer to be this tracer. The tracer can then run some
271 * internal tracing to verify that everything is in order.
272 * If we fail, we do not register this tracer.
273 */
274 for_each_possible_cpu(i) {
275 data = tr->data[i];
276 if (!head_page(data))
277 continue;
278 tracing_reset(data);
279 }
280 current_trace = type;
281 tr->ctrl = 0;
282 /* the test is responsible for initializing and enabling */
283 pr_info("Testing tracer %s: ", type->name);
284 ret = type->selftest(type, tr);
285 /* the test is responsible for resetting too */
286 current_trace = saved_tracer;
287 tr->ctrl = saved_ctrl;
288 if (ret) {
289 printk(KERN_CONT "FAILED!\n");
290 goto out;
291 }
292 printk(KERN_CONT "PASSED\n");
293 }
294 #endif
295
296 type->next = trace_types;
297 trace_types = type;
298 len = strlen(type->name);
299 if (len > max_tracer_type_len)
300 max_tracer_type_len = len;
301
302 out:
303 mutex_unlock(&trace_types_lock);
304
305 return ret;
306 }
307
308 void unregister_tracer(struct tracer *type)
309 {
310 struct tracer **t;
311 int len;
312
313 mutex_lock(&trace_types_lock);
314 for (t = &trace_types; *t; t = &(*t)->next) {
315 if (*t == type)
316 goto found;
317 }
318 pr_info("Trace %s not registered\n", type->name);
319 goto out;
320
321 found:
322 *t = (*t)->next;
323 if (strlen(type->name) != max_tracer_type_len)
324 goto out;
325
326 max_tracer_type_len = 0;
327 for (t = &trace_types; *t; t = &(*t)->next) {
328 len = strlen((*t)->name);
329 if (len > max_tracer_type_len)
330 max_tracer_type_len = len;
331 }
332 out:
333 mutex_unlock(&trace_types_lock);
334 }
335
336 void notrace tracing_reset(struct trace_array_cpu *data)
337 {
338 data->trace_idx = 0;
339 data->trace_current = head_page(data);
340 data->trace_current_idx = 0;
341 }
342
343 #ifdef CONFIG_FTRACE
344 static void notrace
345 function_trace_call(unsigned long ip, unsigned long parent_ip)
346 {
347 struct trace_array *tr = &global_trace;
348 struct trace_array_cpu *data;
349 unsigned long flags;
350 long disabled;
351 int cpu;
352
353 if (unlikely(!tracer_enabled))
354 return;
355
356 local_irq_save(flags);
357 cpu = raw_smp_processor_id();
358 data = tr->data[cpu];
359 disabled = atomic_inc_return(&data->disabled);
360
361 if (likely(disabled == 1))
362 ftrace(tr, data, ip, parent_ip, flags);
363
364 atomic_dec(&data->disabled);
365 local_irq_restore(flags);
366 }
367
368 static struct ftrace_ops trace_ops __read_mostly =
369 {
370 .func = function_trace_call,
371 };
372 #endif
373
374 notrace void tracing_start_function_trace(void)
375 {
376 register_ftrace_function(&trace_ops);
377 }
378
379 notrace void tracing_stop_function_trace(void)
380 {
381 unregister_ftrace_function(&trace_ops);
382 }
383
384 #define SAVED_CMDLINES 128
385 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
386 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
387 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
388 static int cmdline_idx;
389 static DEFINE_SPINLOCK(trace_cmdline_lock);
390 atomic_t trace_record_cmdline_disabled;
391
392 static void trace_init_cmdlines(void)
393 {
394 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
395 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
396 cmdline_idx = 0;
397 }
398
399 notrace void trace_stop_cmdline_recording(void);
400
401 static void notrace trace_save_cmdline(struct task_struct *tsk)
402 {
403 unsigned map;
404 unsigned idx;
405
406 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
407 return;
408
409 /*
410 * It's not the end of the world if we don't get
411 * the lock, but we also don't want to spin
412 * nor do we want to disable interrupts,
413 * so if we miss here, then better luck next time.
414 */
415 if (!spin_trylock(&trace_cmdline_lock))
416 return;
417
418 idx = map_pid_to_cmdline[tsk->pid];
419 if (idx >= SAVED_CMDLINES) {
420 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
421
422 map = map_cmdline_to_pid[idx];
423 if (map <= PID_MAX_DEFAULT)
424 map_pid_to_cmdline[map] = (unsigned)-1;
425
426 map_pid_to_cmdline[tsk->pid] = idx;
427
428 cmdline_idx = idx;
429 }
430
431 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
432
433 spin_unlock(&trace_cmdline_lock);
434 }
435
436 static notrace char *trace_find_cmdline(int pid)
437 {
438 char *cmdline = "<...>";
439 unsigned map;
440
441 if (!pid)
442 return "<idle>";
443
444 if (pid > PID_MAX_DEFAULT)
445 goto out;
446
447 map = map_pid_to_cmdline[pid];
448 if (map >= SAVED_CMDLINES)
449 goto out;
450
451 cmdline = saved_cmdlines[map];
452
453 out:
454 return cmdline;
455 }
456
457 notrace void tracing_record_cmdline(struct task_struct *tsk)
458 {
459 if (atomic_read(&trace_record_cmdline_disabled))
460 return;
461
462 trace_save_cmdline(tsk);
463 }
464
465 static inline notrace struct trace_entry *
466 tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
467 {
468 unsigned long idx, idx_next;
469 struct trace_entry *entry;
470 struct list_head *next;
471 struct page *page;
472
473 data->trace_idx++;
474 idx = data->trace_current_idx;
475 idx_next = idx + 1;
476
477 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
478
479 entry = data->trace_current + idx * TRACE_ENTRY_SIZE;
480
481 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
482 page = virt_to_page(data->trace_current);
483 /*
484 * Roundrobin - but skip the head (which is not a real page):
485 */
486 next = page->lru.next;
487 if (unlikely(next == &data->trace_pages))
488 next = next->next;
489 BUG_ON(next == &data->trace_pages);
490
491 page = list_entry(next, struct page, lru);
492 data->trace_current = page_address(page);
493 idx_next = 0;
494 }
495
496 data->trace_current_idx = idx_next;
497
498 return entry;
499 }
500
501 static inline notrace void
502 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
503 {
504 struct task_struct *tsk = current;
505 unsigned long pc;
506
507 pc = preempt_count();
508
509 entry->idx = atomic_inc_return(&tracer_counter);
510 entry->preempt_count = pc & 0xff;
511 entry->pid = tsk->pid;
512 entry->t = now(raw_smp_processor_id());
513 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
514 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
515 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
516 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
517 }
518
519 notrace void
520 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
521 unsigned long ip, unsigned long parent_ip, unsigned long flags)
522 {
523 struct trace_entry *entry;
524
525 entry = tracing_get_trace_entry(tr, data);
526 tracing_generic_entry_update(entry, flags);
527 entry->type = TRACE_FN;
528 entry->fn.ip = ip;
529 entry->fn.parent_ip = parent_ip;
530 }
531
532 notrace void
533 tracing_sched_switch_trace(struct trace_array *tr,
534 struct trace_array_cpu *data,
535 struct task_struct *prev, struct task_struct *next,
536 unsigned long flags)
537 {
538 struct trace_entry *entry;
539
540 entry = tracing_get_trace_entry(tr, data);
541 tracing_generic_entry_update(entry, flags);
542 entry->type = TRACE_CTX;
543 entry->ctx.prev_pid = prev->pid;
544 entry->ctx.prev_prio = prev->prio;
545 entry->ctx.prev_state = prev->state;
546 entry->ctx.next_pid = next->pid;
547 entry->ctx.next_prio = next->prio;
548 }
549
550 enum trace_file_type {
551 TRACE_FILE_LAT_FMT = 1,
552 };
553
554 static struct trace_entry *
555 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
556 struct trace_iterator *iter, int cpu)
557 {
558 struct page *page;
559 struct trace_entry *array;
560
561 if (iter->next_idx[cpu] >= tr->entries ||
562 iter->next_idx[cpu] >= data->trace_idx)
563 return NULL;
564
565 if (!iter->next_page[cpu]) {
566 /*
567 * Initialize. If the count of elements in
568 * this buffer is greater than the max entries
569 * we had an underrun. Which means we looped around.
570 * We can simply use the current pointer as our
571 * starting point.
572 */
573 if (data->trace_idx >= tr->entries) {
574 page = virt_to_page(data->trace_current);
575 iter->next_page[cpu] = &page->lru;
576 iter->next_page_idx[cpu] = data->trace_current_idx;
577 } else {
578 iter->next_page[cpu] = data->trace_pages.next;
579 iter->next_page_idx[cpu] = 0;
580 }
581 }
582
583 page = list_entry(iter->next_page[cpu], struct page, lru);
584 BUG_ON(&data->trace_pages == &page->lru);
585
586 array = page_address(page);
587
588 return &array[iter->next_page_idx[cpu]];
589 }
590
591 static struct notrace trace_entry *
592 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
593 {
594 struct trace_array *tr = iter->tr;
595 struct trace_entry *ent, *next = NULL;
596 int next_cpu = -1;
597 int cpu;
598
599 for_each_possible_cpu(cpu) {
600 if (!head_page(tr->data[cpu]))
601 continue;
602 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
603 if (ent &&
604 (!next || (long)(next->idx - ent->idx) > 0)) {
605 next = ent;
606 next_cpu = cpu;
607 }
608 }
609
610 if (ent_cpu)
611 *ent_cpu = next_cpu;
612
613 return next;
614 }
615
616 static void *find_next_entry_inc(struct trace_iterator *iter)
617 {
618 struct trace_entry *next;
619 int next_cpu = -1;
620
621 next = find_next_entry(iter, &next_cpu);
622
623 if (next) {
624 iter->idx++;
625 iter->next_idx[next_cpu]++;
626 iter->next_page_idx[next_cpu]++;
627 if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) {
628 struct trace_array_cpu *data = iter->tr->data[next_cpu];
629
630 iter->next_page_idx[next_cpu] = 0;
631 iter->next_page[next_cpu] =
632 iter->next_page[next_cpu]->next;
633 if (iter->next_page[next_cpu] == &data->trace_pages)
634 iter->next_page[next_cpu] =
635 data->trace_pages.next;
636 }
637 }
638 iter->ent = next;
639 iter->cpu = next_cpu;
640
641 return next ? iter : NULL;
642 }
643
644 static void notrace *
645 s_next(struct seq_file *m, void *v, loff_t *pos)
646 {
647 struct trace_iterator *iter = m->private;
648 void *ent;
649 void *last_ent = iter->ent;
650 int i = (int)*pos;
651
652 (*pos)++;
653
654 /* can't go backwards */
655 if (iter->idx > i)
656 return NULL;
657
658 if (iter->idx < 0)
659 ent = find_next_entry_inc(iter);
660 else
661 ent = iter;
662
663 while (ent && iter->idx < i)
664 ent = find_next_entry_inc(iter);
665
666 iter->pos = *pos;
667
668 if (last_ent && !ent)
669 seq_puts(m, "\n\nvim:ft=help\n");
670
671 return ent;
672 }
673
674 static void *s_start(struct seq_file *m, loff_t *pos)
675 {
676 struct trace_iterator *iter = m->private;
677 void *p = NULL;
678 loff_t l = 0;
679 int i;
680
681 mutex_lock(&trace_types_lock);
682
683 if (!current_trace || current_trace != iter->trace)
684 return NULL;
685
686 atomic_inc(&trace_record_cmdline_disabled);
687
688 /* let the tracer grab locks here if needed */
689 if (current_trace->start)
690 current_trace->start(iter);
691
692 if (*pos != iter->pos) {
693 iter->ent = NULL;
694 iter->cpu = 0;
695 iter->idx = -1;
696
697 for_each_possible_cpu(i) {
698 iter->next_idx[i] = 0;
699 iter->next_page[i] = NULL;
700 }
701
702 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
703 ;
704
705 } else {
706 l = *pos - 1;
707 p = s_next(m, p, &l);
708 }
709
710 return p;
711 }
712
713 static void s_stop(struct seq_file *m, void *p)
714 {
715 struct trace_iterator *iter = m->private;
716
717 atomic_dec(&trace_record_cmdline_disabled);
718
719 /* let the tracer release locks here if needed */
720 if (current_trace && current_trace == iter->trace && iter->trace->stop)
721 iter->trace->stop(iter);
722
723 mutex_unlock(&trace_types_lock);
724 }
725
726 static void
727 seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
728 {
729 #ifdef CONFIG_KALLSYMS
730 char str[KSYM_SYMBOL_LEN];
731
732 kallsyms_lookup(address, NULL, NULL, NULL, str);
733
734 seq_printf(m, fmt, str);
735 #endif
736 }
737
738 static void
739 seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
740 {
741 #ifdef CONFIG_KALLSYMS
742 char str[KSYM_SYMBOL_LEN];
743
744 sprint_symbol(str, address);
745 seq_printf(m, fmt, str);
746 #endif
747 }
748
749 #ifndef CONFIG_64BIT
750 # define IP_FMT "%08lx"
751 #else
752 # define IP_FMT "%016lx"
753 #endif
754
755 static void notrace
756 seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
757 {
758 if (!ip) {
759 seq_printf(m, "0");
760 return;
761 }
762
763 if (sym_flags & TRACE_ITER_SYM_OFFSET)
764 seq_print_sym_offset(m, "%s", ip);
765 else
766 seq_print_sym_short(m, "%s", ip);
767
768 if (sym_flags & TRACE_ITER_SYM_ADDR)
769 seq_printf(m, " <" IP_FMT ">", ip);
770 }
771
772 static void notrace print_lat_help_header(struct seq_file *m)
773 {
774 seq_puts(m, "# _------=> CPU# \n");
775 seq_puts(m, "# / _-----=> irqs-off \n");
776 seq_puts(m, "# | / _----=> need-resched \n");
777 seq_puts(m, "# || / _---=> hardirq/softirq \n");
778 seq_puts(m, "# ||| / _--=> preempt-depth \n");
779 seq_puts(m, "# |||| / \n");
780 seq_puts(m, "# ||||| delay \n");
781 seq_puts(m, "# cmd pid ||||| time | caller \n");
782 seq_puts(m, "# \\ / ||||| \\ | / \n");
783 }
784
785 static void notrace print_func_help_header(struct seq_file *m)
786 {
787 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
788 seq_puts(m, "# | | | | |\n");
789 }
790
791
792 static void notrace
793 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
794 {
795 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
796 struct trace_array *tr = iter->tr;
797 struct trace_array_cpu *data = tr->data[tr->cpu];
798 struct tracer *type = current_trace;
799 unsigned long total = 0;
800 unsigned long entries = 0;
801 int cpu;
802 const char *name = "preemption";
803
804 if (type)
805 name = type->name;
806
807 for_each_possible_cpu(cpu) {
808 if (head_page(tr->data[cpu])) {
809 total += tr->data[cpu]->trace_idx;
810 if (tr->data[cpu]->trace_idx > tr->entries)
811 entries += tr->entries;
812 else
813 entries += tr->data[cpu]->trace_idx;
814 }
815 }
816
817 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
818 name, UTS_RELEASE);
819 seq_puts(m, "-----------------------------------"
820 "---------------------------------\n");
821 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
822 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
823 nsecs_to_usecs(data->saved_latency),
824 entries,
825 total,
826 tr->cpu,
827 #if defined(CONFIG_PREEMPT_NONE)
828 "server",
829 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
830 "desktop",
831 #elif defined(CONFIG_PREEMPT_DESKTOP)
832 "preempt",
833 #else
834 "unknown",
835 #endif
836 /* These are reserved for later use */
837 0, 0, 0, 0);
838 #ifdef CONFIG_SMP
839 seq_printf(m, " #P:%d)\n", num_online_cpus());
840 #else
841 seq_puts(m, ")\n");
842 #endif
843 seq_puts(m, " -----------------\n");
844 seq_printf(m, " | task: %.16s-%d "
845 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
846 data->comm, data->pid, data->uid, data->nice,
847 data->policy, data->rt_priority);
848 seq_puts(m, " -----------------\n");
849
850 if (data->critical_start) {
851 seq_puts(m, " => started at: ");
852 seq_print_ip_sym(m, data->critical_start, sym_flags);
853 seq_puts(m, "\n => ended at: ");
854 seq_print_ip_sym(m, data->critical_end, sym_flags);
855 seq_puts(m, "\n");
856 }
857
858 seq_puts(m, "\n");
859 }
860
861 static void notrace
862 lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
863 {
864 int hardirq, softirq;
865 char *comm;
866
867 comm = trace_find_cmdline(entry->pid);
868
869 seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
870 seq_printf(m, "%d", cpu);
871 seq_printf(m, "%c%c",
872 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
873 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
874
875 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
876 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
877 if (hardirq && softirq)
878 seq_putc(m, 'H');
879 else {
880 if (hardirq)
881 seq_putc(m, 'h');
882 else {
883 if (softirq)
884 seq_putc(m, 's');
885 else
886 seq_putc(m, '.');
887 }
888 }
889
890 if (entry->preempt_count)
891 seq_printf(m, "%x", entry->preempt_count);
892 else
893 seq_puts(m, ".");
894 }
895
896 unsigned long preempt_mark_thresh = 100;
897
898 static void notrace
899 lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
900 unsigned long rel_usecs)
901 {
902 seq_printf(m, " %4lldus", abs_usecs);
903 if (rel_usecs > preempt_mark_thresh)
904 seq_puts(m, "!: ");
905 else if (rel_usecs > 1)
906 seq_puts(m, "+: ");
907 else
908 seq_puts(m, " : ");
909 }
910
911 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
912
913 static void notrace
914 print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
915 unsigned int trace_idx, int cpu)
916 {
917 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
918 struct trace_entry *next_entry = find_next_entry(iter, NULL);
919 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
920 struct trace_entry *entry = iter->ent;
921 unsigned long abs_usecs;
922 unsigned long rel_usecs;
923 char *comm;
924 int S;
925
926 if (!next_entry)
927 next_entry = entry;
928 rel_usecs = ns2usecs(next_entry->t - entry->t);
929 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
930
931 if (verbose) {
932 comm = trace_find_cmdline(entry->pid);
933 seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
934 " %ld.%03ldms (+%ld.%03ldms): ",
935 comm,
936 entry->pid, cpu, entry->flags,
937 entry->preempt_count, trace_idx,
938 ns2usecs(entry->t),
939 abs_usecs/1000,
940 abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
941 } else {
942 lat_print_generic(m, entry, cpu);
943 lat_print_timestamp(m, abs_usecs, rel_usecs);
944 }
945 switch (entry->type) {
946 case TRACE_FN:
947 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
948 seq_puts(m, " (");
949 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
950 seq_puts(m, ")\n");
951 break;
952 case TRACE_CTX:
953 S = entry->ctx.prev_state < sizeof(state_to_char) ?
954 state_to_char[entry->ctx.prev_state] : 'X';
955 comm = trace_find_cmdline(entry->ctx.next_pid);
956 seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
957 entry->ctx.prev_pid,
958 entry->ctx.prev_prio,
959 S,
960 entry->ctx.next_pid,
961 entry->ctx.next_prio,
962 comm);
963 break;
964 default:
965 seq_printf(m, "Unknown type %d\n", entry->type);
966 }
967 }
968
969 static void notrace
970 print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
971 {
972 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
973 struct trace_entry *entry = iter->ent;
974 unsigned long usec_rem;
975 unsigned long long t;
976 unsigned long secs;
977 char *comm;
978 int S;
979
980 comm = trace_find_cmdline(iter->ent->pid);
981
982 t = ns2usecs(entry->t);
983 usec_rem = do_div(t, 1000000ULL);
984 secs = (unsigned long)t;
985
986 seq_printf(m, "%16s-%-5d ", comm, entry->pid);
987 seq_printf(m, "[%02d] ", iter->cpu);
988 seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
989
990 switch (entry->type) {
991 case TRACE_FN:
992 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
993 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
994 entry->fn.parent_ip) {
995 seq_printf(m, " <-");
996 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
997 }
998 seq_printf(m, "\n");
999 break;
1000 case TRACE_CTX:
1001 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1002 state_to_char[entry->ctx.prev_state] : 'X';
1003 seq_printf(m, " %d:%d:%c ==> %d:%d\n",
1004 entry->ctx.prev_pid,
1005 entry->ctx.prev_prio,
1006 S,
1007 entry->ctx.next_pid,
1008 entry->ctx.next_prio);
1009 break;
1010 }
1011 }
1012
1013 static int trace_empty(struct trace_iterator *iter)
1014 {
1015 struct trace_array_cpu *data;
1016 int cpu;
1017
1018 for_each_possible_cpu(cpu) {
1019 data = iter->tr->data[cpu];
1020
1021 if (head_page(data) && data->trace_idx)
1022 return 0;
1023 }
1024 return 1;
1025 }
1026
1027 static int s_show(struct seq_file *m, void *v)
1028 {
1029 struct trace_iterator *iter = v;
1030
1031 if (iter->ent == NULL) {
1032 if (iter->tr) {
1033 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1034 seq_puts(m, "#\n");
1035 }
1036 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1037 /* print nothing if the buffers are empty */
1038 if (trace_empty(iter))
1039 return 0;
1040 print_trace_header(m, iter);
1041 if (!(trace_flags & TRACE_ITER_VERBOSE))
1042 print_lat_help_header(m);
1043 } else {
1044 if (!(trace_flags & TRACE_ITER_VERBOSE))
1045 print_func_help_header(m);
1046 }
1047 } else {
1048 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1049 print_lat_fmt(m, iter, iter->idx, iter->cpu);
1050 else
1051 print_trace_fmt(m, iter);
1052 }
1053
1054 return 0;
1055 }
1056
1057 static struct seq_operations tracer_seq_ops = {
1058 .start = s_start,
1059 .next = s_next,
1060 .stop = s_stop,
1061 .show = s_show,
1062 };
1063
1064 static struct trace_iterator notrace *
1065 __tracing_open(struct inode *inode, struct file *file, int *ret)
1066 {
1067 struct trace_iterator *iter;
1068
1069 if (tracing_disabled) {
1070 *ret = -ENODEV;
1071 return NULL;
1072 }
1073
1074 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1075 if (!iter) {
1076 *ret = -ENOMEM;
1077 goto out;
1078 }
1079
1080 mutex_lock(&trace_types_lock);
1081 if (current_trace && current_trace->print_max)
1082 iter->tr = &max_tr;
1083 else
1084 iter->tr = inode->i_private;
1085 iter->trace = current_trace;
1086 iter->pos = -1;
1087
1088 /* TODO stop tracer */
1089 *ret = seq_open(file, &tracer_seq_ops);
1090 if (!*ret) {
1091 struct seq_file *m = file->private_data;
1092 m->private = iter;
1093
1094 /* stop the trace while dumping */
1095 if (iter->tr->ctrl)
1096 tracer_enabled = 0;
1097
1098 if (iter->trace && iter->trace->open)
1099 iter->trace->open(iter);
1100 } else {
1101 kfree(iter);
1102 iter = NULL;
1103 }
1104 mutex_unlock(&trace_types_lock);
1105
1106 out:
1107 return iter;
1108 }
1109
1110 int tracing_open_generic(struct inode *inode, struct file *filp)
1111 {
1112 if (tracing_disabled)
1113 return -ENODEV;
1114
1115 filp->private_data = inode->i_private;
1116 return 0;
1117 }
1118
1119 int tracing_release(struct inode *inode, struct file *file)
1120 {
1121 struct seq_file *m = (struct seq_file *)file->private_data;
1122 struct trace_iterator *iter = m->private;
1123
1124 mutex_lock(&trace_types_lock);
1125 if (iter->trace && iter->trace->close)
1126 iter->trace->close(iter);
1127
1128 /* reenable tracing if it was previously enabled */
1129 if (iter->tr->ctrl)
1130 tracer_enabled = 1;
1131 mutex_unlock(&trace_types_lock);
1132
1133 seq_release(inode, file);
1134 kfree(iter);
1135 return 0;
1136 }
1137
1138 static int tracing_open(struct inode *inode, struct file *file)
1139 {
1140 int ret;
1141
1142 __tracing_open(inode, file, &ret);
1143
1144 return ret;
1145 }
1146
1147 static int tracing_lt_open(struct inode *inode, struct file *file)
1148 {
1149 struct trace_iterator *iter;
1150 int ret;
1151
1152 iter = __tracing_open(inode, file, &ret);
1153
1154 if (!ret)
1155 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1156
1157 return ret;
1158 }
1159
1160
1161 static void notrace *
1162 t_next(struct seq_file *m, void *v, loff_t *pos)
1163 {
1164 struct tracer *t = m->private;
1165
1166 (*pos)++;
1167
1168 if (t)
1169 t = t->next;
1170
1171 m->private = t;
1172
1173 return t;
1174 }
1175
1176 static void *t_start(struct seq_file *m, loff_t *pos)
1177 {
1178 struct tracer *t = m->private;
1179 loff_t l = 0;
1180
1181 mutex_lock(&trace_types_lock);
1182 for (; t && l < *pos; t = t_next(m, t, &l))
1183 ;
1184
1185 return t;
1186 }
1187
1188 static void t_stop(struct seq_file *m, void *p)
1189 {
1190 mutex_unlock(&trace_types_lock);
1191 }
1192
1193 static int t_show(struct seq_file *m, void *v)
1194 {
1195 struct tracer *t = v;
1196
1197 if (!t)
1198 return 0;
1199
1200 seq_printf(m, "%s", t->name);
1201 if (t->next)
1202 seq_putc(m, ' ');
1203 else
1204 seq_putc(m, '\n');
1205
1206 return 0;
1207 }
1208
1209 static struct seq_operations show_traces_seq_ops = {
1210 .start = t_start,
1211 .next = t_next,
1212 .stop = t_stop,
1213 .show = t_show,
1214 };
1215
1216 static int show_traces_open(struct inode *inode, struct file *file)
1217 {
1218 int ret;
1219
1220 if (tracing_disabled)
1221 return -ENODEV;
1222
1223 ret = seq_open(file, &show_traces_seq_ops);
1224 if (!ret) {
1225 struct seq_file *m = file->private_data;
1226 m->private = trace_types;
1227 }
1228
1229 return ret;
1230 }
1231
1232 static struct file_operations tracing_fops = {
1233 .open = tracing_open,
1234 .read = seq_read,
1235 .llseek = seq_lseek,
1236 .release = tracing_release,
1237 };
1238
1239 static struct file_operations tracing_lt_fops = {
1240 .open = tracing_lt_open,
1241 .read = seq_read,
1242 .llseek = seq_lseek,
1243 .release = tracing_release,
1244 };
1245
1246 static struct file_operations show_traces_fops = {
1247 .open = show_traces_open,
1248 .read = seq_read,
1249 .release = seq_release,
1250 };
1251
1252 static ssize_t
1253 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1254 size_t cnt, loff_t *ppos)
1255 {
1256 char *buf;
1257 int r = 0;
1258 int len = 0;
1259 int i;
1260
1261 /* calulate max size */
1262 for (i = 0; trace_options[i]; i++) {
1263 len += strlen(trace_options[i]);
1264 len += 3; /* "no" and space */
1265 }
1266
1267 /* +2 for \n and \0 */
1268 buf = kmalloc(len + 2, GFP_KERNEL);
1269 if (!buf)
1270 return -ENOMEM;
1271
1272 for (i = 0; trace_options[i]; i++) {
1273 if (trace_flags & (1 << i))
1274 r += sprintf(buf + r, "%s ", trace_options[i]);
1275 else
1276 r += sprintf(buf + r, "no%s ", trace_options[i]);
1277 }
1278
1279 r += sprintf(buf + r, "\n");
1280 WARN_ON(r >= len + 2);
1281
1282 r = simple_read_from_buffer(ubuf, cnt, ppos,
1283 buf, r);
1284
1285 kfree(buf);
1286
1287 return r;
1288 }
1289
1290 static ssize_t
1291 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1292 size_t cnt, loff_t *ppos)
1293 {
1294 char buf[64];
1295 char *cmp = buf;
1296 int neg = 0;
1297 int i;
1298
1299 if (cnt > 63)
1300 cnt = 63;
1301
1302 if (copy_from_user(&buf, ubuf, cnt))
1303 return -EFAULT;
1304
1305 buf[cnt] = 0;
1306
1307 if (strncmp(buf, "no", 2) == 0) {
1308 neg = 1;
1309 cmp += 2;
1310 }
1311
1312 for (i = 0; trace_options[i]; i++) {
1313 int len = strlen(trace_options[i]);
1314
1315 if (strncmp(cmp, trace_options[i], len) == 0) {
1316 if (neg)
1317 trace_flags &= ~(1 << i);
1318 else
1319 trace_flags |= (1 << i);
1320 break;
1321 }
1322 }
1323
1324 filp->f_pos += cnt;
1325
1326 return cnt;
1327 }
1328
1329 static struct file_operations tracing_iter_fops = {
1330 .open = tracing_open_generic,
1331 .read = tracing_iter_ctrl_read,
1332 .write = tracing_iter_ctrl_write,
1333 };
1334
1335 static const char readme_msg[] =
1336 "tracing mini-HOWTO:\n\n"
1337 "# mkdir /debug\n"
1338 "# mount -t debugfs nodev /debug\n\n"
1339 "# cat /debug/tracing/available_tracers\n"
1340 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
1341 "# cat /debug/tracing/current_tracer\n"
1342 "none\n"
1343 "# echo sched_switch > /debug/tracing/current_tracer\n"
1344 "# cat /debug/tracing/current_tracer\n"
1345 "sched_switch\n"
1346 "# cat /debug/tracing/iter_ctrl\n"
1347 "noprint-parent nosym-offset nosym-addr noverbose\n"
1348 "# echo print-parent > /debug/tracing/iter_ctrl\n"
1349 "# echo 1 > /debug/tracing/tracing_enabled\n"
1350 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
1351 "echo 0 > /debug/tracing/tracing_enabled\n"
1352 ;
1353
1354 static ssize_t
1355 tracing_readme_read(struct file *filp, char __user *ubuf,
1356 size_t cnt, loff_t *ppos)
1357 {
1358 return simple_read_from_buffer(ubuf, cnt, ppos,
1359 readme_msg, strlen(readme_msg));
1360 }
1361
1362 static struct file_operations tracing_readme_fops = {
1363 .open = tracing_open_generic,
1364 .read = tracing_readme_read,
1365 };
1366
1367
1368 static ssize_t
1369 tracing_ctrl_read(struct file *filp, char __user *ubuf,
1370 size_t cnt, loff_t *ppos)
1371 {
1372 struct trace_array *tr = filp->private_data;
1373 char buf[64];
1374 int r;
1375
1376 r = sprintf(buf, "%ld\n", tr->ctrl);
1377 return simple_read_from_buffer(ubuf, cnt, ppos,
1378 buf, r);
1379 }
1380
1381 static ssize_t
1382 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1383 size_t cnt, loff_t *ppos)
1384 {
1385 struct trace_array *tr = filp->private_data;
1386 long val;
1387 char buf[64];
1388
1389 if (cnt > 63)
1390 cnt = 63;
1391
1392 if (copy_from_user(&buf, ubuf, cnt))
1393 return -EFAULT;
1394
1395 buf[cnt] = 0;
1396
1397 val = simple_strtoul(buf, NULL, 10);
1398
1399 val = !!val;
1400
1401 mutex_lock(&trace_types_lock);
1402 if (tr->ctrl ^ val) {
1403 if (val)
1404 tracer_enabled = 1;
1405 else
1406 tracer_enabled = 0;
1407
1408 tr->ctrl = val;
1409
1410 if (current_trace && current_trace->ctrl_update)
1411 current_trace->ctrl_update(tr);
1412 }
1413 mutex_unlock(&trace_types_lock);
1414
1415 filp->f_pos += cnt;
1416
1417 return cnt;
1418 }
1419
1420 static ssize_t
1421 tracing_set_trace_read(struct file *filp, char __user *ubuf,
1422 size_t cnt, loff_t *ppos)
1423 {
1424 char buf[max_tracer_type_len+2];
1425 int r;
1426
1427 mutex_lock(&trace_types_lock);
1428 if (current_trace)
1429 r = sprintf(buf, "%s\n", current_trace->name);
1430 else
1431 r = sprintf(buf, "\n");
1432 mutex_unlock(&trace_types_lock);
1433
1434 return simple_read_from_buffer(ubuf, cnt, ppos,
1435 buf, r);
1436 }
1437
1438 static ssize_t
1439 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1440 size_t cnt, loff_t *ppos)
1441 {
1442 struct trace_array *tr = &global_trace;
1443 struct tracer *t;
1444 char buf[max_tracer_type_len+1];
1445 int i;
1446
1447 if (cnt > max_tracer_type_len)
1448 cnt = max_tracer_type_len;
1449
1450 if (copy_from_user(&buf, ubuf, cnt))
1451 return -EFAULT;
1452
1453 buf[cnt] = 0;
1454
1455 /* strip ending whitespace. */
1456 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1457 buf[i] = 0;
1458
1459 mutex_lock(&trace_types_lock);
1460 for (t = trace_types; t; t = t->next) {
1461 if (strcmp(t->name, buf) == 0)
1462 break;
1463 }
1464 if (!t || t == current_trace)
1465 goto out;
1466
1467 if (current_trace && current_trace->reset)
1468 current_trace->reset(tr);
1469
1470 current_trace = t;
1471 if (t->init)
1472 t->init(tr);
1473
1474 out:
1475 mutex_unlock(&trace_types_lock);
1476
1477 filp->f_pos += cnt;
1478
1479 return cnt;
1480 }
1481
1482 static ssize_t
1483 tracing_max_lat_read(struct file *filp, char __user *ubuf,
1484 size_t cnt, loff_t *ppos)
1485 {
1486 unsigned long *ptr = filp->private_data;
1487 char buf[64];
1488 int r;
1489
1490 r = snprintf(buf, 64, "%ld\n",
1491 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1492 if (r > 64)
1493 r = 64;
1494 return simple_read_from_buffer(ubuf, cnt, ppos,
1495 buf, r);
1496 }
1497
1498 static ssize_t
1499 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1500 size_t cnt, loff_t *ppos)
1501 {
1502 long *ptr = filp->private_data;
1503 long val;
1504 char buf[64];
1505
1506 if (cnt > 63)
1507 cnt = 63;
1508
1509 if (copy_from_user(&buf, ubuf, cnt))
1510 return -EFAULT;
1511
1512 buf[cnt] = 0;
1513
1514 val = simple_strtoul(buf, NULL, 10);
1515
1516 *ptr = val * 1000;
1517
1518 return cnt;
1519 }
1520
1521 static struct file_operations tracing_max_lat_fops = {
1522 .open = tracing_open_generic,
1523 .read = tracing_max_lat_read,
1524 .write = tracing_max_lat_write,
1525 };
1526
1527 static struct file_operations tracing_ctrl_fops = {
1528 .open = tracing_open_generic,
1529 .read = tracing_ctrl_read,
1530 .write = tracing_ctrl_write,
1531 };
1532
1533 static struct file_operations set_tracer_fops = {
1534 .open = tracing_open_generic,
1535 .read = tracing_set_trace_read,
1536 .write = tracing_set_trace_write,
1537 };
1538
1539 #ifdef CONFIG_DYNAMIC_FTRACE
1540
1541 static ssize_t
1542 tracing_read_long(struct file *filp, char __user *ubuf,
1543 size_t cnt, loff_t *ppos)
1544 {
1545 unsigned long *p = filp->private_data;
1546 char buf[64];
1547 int r;
1548
1549 r = sprintf(buf, "%ld\n", *p);
1550 return simple_read_from_buffer(ubuf, cnt, ppos,
1551 buf, r);
1552 }
1553
1554 static struct file_operations tracing_read_long_fops = {
1555 .open = tracing_open_generic,
1556 .read = tracing_read_long,
1557 };
1558 #endif
1559
1560 static struct dentry *d_tracer;
1561
1562 struct dentry *tracing_init_dentry(void)
1563 {
1564 static int once;
1565
1566 if (d_tracer)
1567 return d_tracer;
1568
1569 d_tracer = debugfs_create_dir("tracing", NULL);
1570
1571 if (!d_tracer && !once) {
1572 once = 1;
1573 pr_warning("Could not create debugfs directory 'tracing'\n");
1574 return NULL;
1575 }
1576
1577 return d_tracer;
1578 }
1579
1580 #ifdef CONFIG_FTRACE_SELFTEST
1581 /* Let selftest have access to static functions in this file */
1582 #include "trace_selftest.c"
1583 #endif
1584
1585 static __init void tracer_init_debugfs(void)
1586 {
1587 struct dentry *d_tracer;
1588 struct dentry *entry;
1589
1590 d_tracer = tracing_init_dentry();
1591
1592 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1593 &global_trace, &tracing_ctrl_fops);
1594 if (!entry)
1595 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1596
1597 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1598 NULL, &tracing_iter_fops);
1599 if (!entry)
1600 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1601
1602 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1603 &global_trace, &tracing_lt_fops);
1604 if (!entry)
1605 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1606
1607 entry = debugfs_create_file("trace", 0444, d_tracer,
1608 &global_trace, &tracing_fops);
1609 if (!entry)
1610 pr_warning("Could not create debugfs 'trace' entry\n");
1611
1612 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1613 &global_trace, &show_traces_fops);
1614 if (!entry)
1615 pr_warning("Could not create debugfs 'trace' entry\n");
1616
1617 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1618 &global_trace, &set_tracer_fops);
1619 if (!entry)
1620 pr_warning("Could not create debugfs 'trace' entry\n");
1621
1622 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1623 &tracing_max_latency,
1624 &tracing_max_lat_fops);
1625 if (!entry)
1626 pr_warning("Could not create debugfs "
1627 "'tracing_max_latency' entry\n");
1628
1629 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
1630 &tracing_thresh, &tracing_max_lat_fops);
1631 if (!entry)
1632 pr_warning("Could not create debugfs "
1633 "'tracing_threash' entry\n");
1634 entry = debugfs_create_file("README", 0644, d_tracer,
1635 NULL, &tracing_readme_fops);
1636 if (!entry)
1637 pr_warning("Could not create debugfs 'README' entry\n");
1638
1639
1640 #ifdef CONFIG_DYNAMIC_FTRACE
1641 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
1642 &ftrace_update_tot_cnt,
1643 &tracing_read_long_fops);
1644 if (!entry)
1645 pr_warning("Could not create debugfs "
1646 "'dyn_ftrace_total_info' entry\n");
1647 #endif
1648 }
1649
1650 /* dummy trace to disable tracing */
1651 static struct tracer no_tracer __read_mostly =
1652 {
1653 .name = "none",
1654 };
1655
1656 static int trace_alloc_page(void)
1657 {
1658 struct trace_array_cpu *data;
1659 struct page *page, *tmp;
1660 LIST_HEAD(pages);
1661 void *array;
1662 int i;
1663
1664 /* first allocate a page for each CPU */
1665 for_each_possible_cpu(i) {
1666 array = (void *)__get_free_page(GFP_KERNEL);
1667 if (array == NULL) {
1668 printk(KERN_ERR "tracer: failed to allocate page"
1669 "for trace buffer!\n");
1670 goto free_pages;
1671 }
1672
1673 page = virt_to_page(array);
1674 list_add(&page->lru, &pages);
1675
1676 /* Only allocate if we are actually using the max trace */
1677 #ifdef CONFIG_TRACER_MAX_TRACE
1678 array = (void *)__get_free_page(GFP_KERNEL);
1679 if (array == NULL) {
1680 printk(KERN_ERR "tracer: failed to allocate page"
1681 "for trace buffer!\n");
1682 goto free_pages;
1683 }
1684 page = virt_to_page(array);
1685 list_add(&page->lru, &pages);
1686 #endif
1687 }
1688
1689 /* Now that we successfully allocate a page per CPU, add them */
1690 for_each_possible_cpu(i) {
1691 data = global_trace.data[i];
1692 page = list_entry(pages.next, struct page, lru);
1693 list_del_init(&page->lru);
1694 list_add_tail(&page->lru, &data->trace_pages);
1695 ClearPageLRU(page);
1696
1697 #ifdef CONFIG_TRACER_MAX_TRACE
1698 data = max_tr.data[i];
1699 page = list_entry(pages.next, struct page, lru);
1700 list_del_init(&page->lru);
1701 list_add_tail(&page->lru, &data->trace_pages);
1702 SetPageLRU(page);
1703 #endif
1704 }
1705 global_trace.entries += ENTRIES_PER_PAGE;
1706
1707 return 0;
1708
1709 free_pages:
1710 list_for_each_entry_safe(page, tmp, &pages, lru) {
1711 list_del_init(&page->lru);
1712 __free_page(page);
1713 }
1714 return -ENOMEM;
1715 }
1716
1717 __init static int tracer_alloc_buffers(void)
1718 {
1719 struct trace_array_cpu *data;
1720 void *array;
1721 struct page *page;
1722 int pages = 0;
1723 int ret = -ENOMEM;
1724 int i;
1725
1726 /* Allocate the first page for all buffers */
1727 for_each_possible_cpu(i) {
1728 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
1729 max_tr.data[i] = &per_cpu(max_data, i);
1730
1731 array = (void *)__get_free_page(GFP_KERNEL);
1732 if (array == NULL) {
1733 printk(KERN_ERR "tracer: failed to allocate page"
1734 "for trace buffer!\n");
1735 goto free_buffers;
1736 }
1737
1738 /* set the array to the list */
1739 INIT_LIST_HEAD(&data->trace_pages);
1740 page = virt_to_page(array);
1741 list_add(&page->lru, &data->trace_pages);
1742 /* use the LRU flag to differentiate the two buffers */
1743 ClearPageLRU(page);
1744
1745 /* Only allocate if we are actually using the max trace */
1746 #ifdef CONFIG_TRACER_MAX_TRACE
1747 array = (void *)__get_free_page(GFP_KERNEL);
1748 if (array == NULL) {
1749 printk(KERN_ERR "tracer: failed to allocate page"
1750 "for trace buffer!\n");
1751 goto free_buffers;
1752 }
1753
1754 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
1755 page = virt_to_page(array);
1756 list_add(&page->lru, &max_tr.data[i]->trace_pages);
1757 SetPageLRU(page);
1758 #endif
1759 }
1760
1761 /*
1762 * Since we allocate by orders of pages, we may be able to
1763 * round up a bit.
1764 */
1765 global_trace.entries = ENTRIES_PER_PAGE;
1766 pages++;
1767
1768 while (global_trace.entries < trace_nr_entries) {
1769 if (trace_alloc_page())
1770 break;
1771 pages++;
1772 }
1773 max_tr.entries = global_trace.entries;
1774
1775 pr_info("tracer: %d pages allocated for %ld",
1776 pages, trace_nr_entries);
1777 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
1778 pr_info(" actual entries %ld\n", global_trace.entries);
1779
1780 tracer_init_debugfs();
1781
1782 trace_init_cmdlines();
1783
1784 register_tracer(&no_tracer);
1785 current_trace = &no_tracer;
1786
1787 /* All seems OK, enable tracing */
1788 tracing_disabled = 0;
1789
1790 return 0;
1791
1792 free_buffers:
1793 for (i-- ; i >= 0; i--) {
1794 struct page *page, *tmp;
1795 struct trace_array_cpu *data = global_trace.data[i];
1796
1797 if (data) {
1798 list_for_each_entry_safe(page, tmp,
1799 &data->trace_pages, lru) {
1800 list_del_init(&page->lru);
1801 __free_page(page);
1802 }
1803 }
1804
1805 #ifdef CONFIG_TRACER_MAX_TRACE
1806 data = max_tr.data[i];
1807 if (data) {
1808 list_for_each_entry_safe(page, tmp,
1809 &data->trace_pages, lru) {
1810 list_del_init(&page->lru);
1811 __free_page(page);
1812 }
1813 }
1814 #endif
1815 }
1816 return ret;
1817 }
1818
1819 fs_initcall(tracer_alloc_buffers);
This page took 0.095798 seconds and 6 git commands to generate.