ftrace: lockdep notrace annotations
[deliverable/linux.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
17#include <linux/debugfs.h>
4c11d7ae 18#include <linux/pagemap.h>
bc0c38d1
SR
19#include <linux/hardirq.h>
20#include <linux/linkage.h>
21#include <linux/uaccess.h>
22#include <linux/ftrace.h>
23#include <linux/module.h>
24#include <linux/percpu.h>
25#include <linux/ctype.h>
26#include <linux/init.h>
27#include <linux/gfp.h>
28#include <linux/fs.h>
29
30#include "trace.h"
31
32unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33unsigned long __read_mostly tracing_thresh;
34
35static long notrace
36ns2usecs(cycle_t nsec)
37{
38 nsec += 500;
39 do_div(nsec, 1000);
40 return nsec;
41}
42
43static atomic_t tracer_counter;
44static struct trace_array global_trace;
45
46static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
47
48static struct trace_array max_tr;
49
50static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
51
52static int tracer_enabled;
4c11d7ae 53static unsigned long trace_nr_entries = 16384UL;
bc0c38d1
SR
54
55static struct tracer *trace_types __read_mostly;
56static struct tracer *current_trace __read_mostly;
57static int max_tracer_type_len;
58
59static DEFINE_MUTEX(trace_types_lock);
60
4c11d7ae
SR
61#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
62
bc0c38d1
SR
63static int __init set_nr_entries(char *str)
64{
65 if (!str)
66 return 0;
67 trace_nr_entries = simple_strtoul(str, &str, 0);
68 return 1;
69}
70__setup("trace_entries=", set_nr_entries);
71
72enum trace_type {
73 __TRACE_FIRST_TYPE = 0,
74
75 TRACE_FN,
76 TRACE_CTX,
77
78 __TRACE_LAST_TYPE
79};
80
81enum trace_flag_type {
82 TRACE_FLAG_IRQS_OFF = 0x01,
83 TRACE_FLAG_NEED_RESCHED = 0x02,
84 TRACE_FLAG_HARDIRQ = 0x04,
85 TRACE_FLAG_SOFTIRQ = 0x08,
86};
87
88enum trace_iterator_flags {
89 TRACE_ITER_PRINT_PARENT = 0x01,
90 TRACE_ITER_SYM_OFFSET = 0x02,
91 TRACE_ITER_SYM_ADDR = 0x04,
92 TRACE_ITER_VERBOSE = 0x08,
93};
94
95#define TRACE_ITER_SYM_MASK \
96 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
97
98/* These must match the bit postions above */
99static const char *trace_options[] = {
100 "print-parent",
101 "sym-offset",
102 "sym-addr",
103 "verbose",
104 NULL
105};
106
107static unsigned trace_flags;
108
4c11d7ae 109static DEFINE_SPINLOCK(ftrace_max_lock);
bc0c38d1
SR
110
111/*
112 * Copy the new maximum trace into the separate maximum-trace
113 * structure. (this way the maximum trace is permanently saved,
114 * for later retrieval via /debugfs/tracing/latency_trace)
115 */
116static void notrace
117__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
118{
119 struct trace_array_cpu *data = tr->data[cpu];
120
121 max_tr.cpu = cpu;
122 max_tr.time_start = data->preempt_timestamp;
123
124 data = max_tr.data[cpu];
125 data->saved_latency = tracing_max_latency;
126
127 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
128 data->pid = tsk->pid;
129 data->uid = tsk->uid;
130 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
131 data->policy = tsk->policy;
132 data->rt_priority = tsk->rt_priority;
133
134 /* record this tasks comm */
135 tracing_record_cmdline(current);
136}
137
138notrace void
139update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
140{
141 struct trace_array_cpu *data;
142 void *save_trace;
4c11d7ae 143 struct list_head save_pages;
bc0c38d1
SR
144 int i;
145
4c11d7ae
SR
146 WARN_ON_ONCE(!irqs_disabled());
147 spin_lock(&ftrace_max_lock);
bc0c38d1
SR
148 /* clear out all the previous traces */
149 for_each_possible_cpu(i) {
150 data = tr->data[i];
151 save_trace = max_tr.data[i]->trace;
4c11d7ae 152 save_pages = max_tr.data[i]->trace_pages;
bc0c38d1
SR
153 memcpy(max_tr.data[i], data, sizeof(*data));
154 data->trace = save_trace;
4c11d7ae 155 data->trace_pages = save_pages;
bc0c38d1
SR
156 }
157
158 __update_max_tr(tr, tsk, cpu);
4c11d7ae 159 spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
160}
161
162/**
163 * update_max_tr_single - only copy one trace over, and reset the rest
164 * @tr - tracer
165 * @tsk - task with the latency
166 * @cpu - the cpu of the buffer to copy.
167 */
168notrace void
169update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
170{
171 struct trace_array_cpu *data = tr->data[cpu];
172 void *save_trace;
4c11d7ae 173 struct list_head save_pages;
bc0c38d1
SR
174 int i;
175
4c11d7ae
SR
176 WARN_ON_ONCE(!irqs_disabled());
177 spin_lock(&ftrace_max_lock);
bc0c38d1
SR
178 for_each_possible_cpu(i)
179 tracing_reset(max_tr.data[i]);
180
181 save_trace = max_tr.data[cpu]->trace;
4c11d7ae 182 save_pages = max_tr.data[cpu]->trace_pages;
bc0c38d1
SR
183 memcpy(max_tr.data[cpu], data, sizeof(*data));
184 data->trace = save_trace;
4c11d7ae 185 data->trace_pages = save_pages;
bc0c38d1
SR
186
187 __update_max_tr(tr, tsk, cpu);
4c11d7ae 188 spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
189}
190
191int register_tracer(struct tracer *type)
192{
193 struct tracer *t;
194 int len;
195 int ret = 0;
196
197 if (!type->name) {
198 pr_info("Tracer must have a name\n");
199 return -1;
200 }
201
202 mutex_lock(&trace_types_lock);
203 for (t = trace_types; t; t = t->next) {
204 if (strcmp(type->name, t->name) == 0) {
205 /* already found */
206 pr_info("Trace %s already registered\n",
207 type->name);
208 ret = -1;
209 goto out;
210 }
211 }
212
213 type->next = trace_types;
214 trace_types = type;
215 len = strlen(type->name);
216 if (len > max_tracer_type_len)
217 max_tracer_type_len = len;
218 out:
219 mutex_unlock(&trace_types_lock);
220
221 return ret;
222}
223
224void unregister_tracer(struct tracer *type)
225{
226 struct tracer **t;
227 int len;
228
229 mutex_lock(&trace_types_lock);
230 for (t = &trace_types; *t; t = &(*t)->next) {
231 if (*t == type)
232 goto found;
233 }
234 pr_info("Trace %s not registered\n", type->name);
235 goto out;
236
237 found:
238 *t = (*t)->next;
239 if (strlen(type->name) != max_tracer_type_len)
240 goto out;
241
242 max_tracer_type_len = 0;
243 for (t = &trace_types; *t; t = &(*t)->next) {
244 len = strlen((*t)->name);
245 if (len > max_tracer_type_len)
246 max_tracer_type_len = len;
247 }
248 out:
249 mutex_unlock(&trace_types_lock);
250}
251
252void notrace tracing_reset(struct trace_array_cpu *data)
253{
254 data->trace_idx = 0;
4c11d7ae
SR
255 data->trace_current = data->trace;
256 data->trace_current_idx = 0;
bc0c38d1
SR
257}
258
259#ifdef CONFIG_FTRACE
260static void notrace
261function_trace_call(unsigned long ip, unsigned long parent_ip)
262{
263 struct trace_array *tr = &global_trace;
264 struct trace_array_cpu *data;
265 unsigned long flags;
266 long disabled;
267 int cpu;
268
269 if (unlikely(!tracer_enabled))
270 return;
271
272 raw_local_irq_save(flags);
273 cpu = raw_smp_processor_id();
274 data = tr->data[cpu];
275 disabled = atomic_inc_return(&data->disabled);
276
277 if (likely(disabled == 1))
278 ftrace(tr, data, ip, parent_ip, flags);
279
280 atomic_dec(&data->disabled);
281 raw_local_irq_restore(flags);
282}
283
284static struct ftrace_ops trace_ops __read_mostly =
285{
286 .func = function_trace_call,
287};
288#endif
289
290notrace void tracing_start_function_trace(void)
291{
292 register_ftrace_function(&trace_ops);
293}
294
295notrace void tracing_stop_function_trace(void)
296{
297 unregister_ftrace_function(&trace_ops);
298}
299
300#define SAVED_CMDLINES 128
301static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
302static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
303static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
304static int cmdline_idx;
305static DEFINE_SPINLOCK(trace_cmdline_lock);
306atomic_t trace_record_cmdline_disabled;
307
308static void trace_init_cmdlines(void)
309{
310 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
311 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
312 cmdline_idx = 0;
313}
314
315notrace void trace_stop_cmdline_recording(void);
316
317static void notrace trace_save_cmdline(struct task_struct *tsk)
318{
319 unsigned map;
320 unsigned idx;
321
322 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
323 return;
324
325 /*
326 * It's not the end of the world if we don't get
327 * the lock, but we also don't want to spin
328 * nor do we want to disable interrupts,
329 * so if we miss here, then better luck next time.
330 */
331 if (!spin_trylock(&trace_cmdline_lock))
332 return;
333
334 idx = map_pid_to_cmdline[tsk->pid];
335 if (idx >= SAVED_CMDLINES) {
336 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
337
338 map = map_cmdline_to_pid[idx];
339 if (map <= PID_MAX_DEFAULT)
340 map_pid_to_cmdline[map] = (unsigned)-1;
341
342 map_pid_to_cmdline[tsk->pid] = idx;
343
344 cmdline_idx = idx;
345 }
346
347 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
348
349 spin_unlock(&trace_cmdline_lock);
350}
351
352static notrace char *trace_find_cmdline(int pid)
353{
354 char *cmdline = "<...>";
355 unsigned map;
356
357 if (!pid)
358 return "<idle>";
359
360 if (pid > PID_MAX_DEFAULT)
361 goto out;
362
363 map = map_pid_to_cmdline[pid];
364 if (map >= SAVED_CMDLINES)
365 goto out;
366
367 cmdline = saved_cmdlines[map];
368
369 out:
370 return cmdline;
371}
372
373notrace void tracing_record_cmdline(struct task_struct *tsk)
374{
375 if (atomic_read(&trace_record_cmdline_disabled))
376 return;
377
378 trace_save_cmdline(tsk);
379}
380
381static inline notrace struct trace_entry *
382tracing_get_trace_entry(struct trace_array *tr,
383 struct trace_array_cpu *data)
384{
385 unsigned long idx, idx_next;
386 struct trace_entry *entry;
4c11d7ae
SR
387 struct page *page;
388 struct list_head *next;
bc0c38d1 389
4c11d7ae
SR
390 data->trace_idx++;
391 idx = data->trace_current_idx;
bc0c38d1
SR
392 idx_next = idx + 1;
393
4c11d7ae
SR
394 entry = data->trace_current + idx * TRACE_ENTRY_SIZE;
395
396 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
397 page = virt_to_page(data->trace_current);
398 if (unlikely(&page->lru == data->trace_pages.prev))
399 next = data->trace_pages.next;
400 else
401 next = page->lru.next;
402 page = list_entry(next, struct page, lru);
403 data->trace_current = page_address(page);
bc0c38d1
SR
404 idx_next = 0;
405 }
406
4c11d7ae 407 data->trace_current_idx = idx_next;
bc0c38d1
SR
408
409 return entry;
410}
411
412static inline notrace void
413tracing_generic_entry_update(struct trace_entry *entry,
414 unsigned long flags)
415{
416 struct task_struct *tsk = current;
417 unsigned long pc;
418
419 pc = preempt_count();
420
421 entry->idx = atomic_inc_return(&tracer_counter);
422 entry->preempt_count = pc & 0xff;
423 entry->pid = tsk->pid;
424 entry->t = now(raw_smp_processor_id());
425 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
426 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
427 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
428 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
429}
430
431notrace void
432ftrace(struct trace_array *tr, struct trace_array_cpu *data,
433 unsigned long ip, unsigned long parent_ip,
434 unsigned long flags)
435{
436 struct trace_entry *entry;
437
438 entry = tracing_get_trace_entry(tr, data);
439 tracing_generic_entry_update(entry, flags);
440 entry->type = TRACE_FN;
441 entry->fn.ip = ip;
442 entry->fn.parent_ip = parent_ip;
443}
444
445notrace void
446tracing_sched_switch_trace(struct trace_array *tr,
447 struct trace_array_cpu *data,
448 struct task_struct *prev, struct task_struct *next,
449 unsigned long flags)
450{
451 struct trace_entry *entry;
452
453 entry = tracing_get_trace_entry(tr, data);
454 tracing_generic_entry_update(entry, flags);
455 entry->type = TRACE_CTX;
456 entry->ctx.prev_pid = prev->pid;
457 entry->ctx.prev_prio = prev->prio;
458 entry->ctx.prev_state = prev->state;
459 entry->ctx.next_pid = next->pid;
460 entry->ctx.next_prio = next->prio;
461}
462
463enum trace_file_type {
464 TRACE_FILE_LAT_FMT = 1,
465};
466
467static struct trace_entry *
4c11d7ae
SR
468trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
469 struct trace_iterator *iter, int cpu)
bc0c38d1 470{
4c11d7ae
SR
471 struct page *page;
472 struct trace_entry *array;
bc0c38d1 473
4c11d7ae
SR
474 if (iter->next_idx[cpu] >= tr->entries ||
475 iter->next_idx[cpu] >= data->trace_idx)
bc0c38d1
SR
476 return NULL;
477
4c11d7ae
SR
478 if (!iter->next_page[cpu]) {
479 /*
480 * Initialize. If the count of elements in
481 * this buffer is greater than the max entries
482 * we had an underrun. Which means we looped around.
483 * We can simply use the current pointer as our
484 * starting point.
485 */
486 if (data->trace_idx >= tr->entries) {
487 page = virt_to_page(data->trace_current);
488 iter->next_page[cpu] = &page->lru;
489 iter->next_page_idx[cpu] = data->trace_current_idx;
490 } else {
491 iter->next_page[cpu] = data->trace_pages.next;
492 iter->next_page_idx[cpu] = 0;
493 }
494 }
bc0c38d1 495
4c11d7ae
SR
496 page = list_entry(iter->next_page[cpu], struct page, lru);
497 array = page_address(page);
498
499 return &array[iter->next_page_idx[cpu]];
bc0c38d1
SR
500}
501
502static struct notrace trace_entry *
503find_next_entry(struct trace_iterator *iter, int *ent_cpu)
504{
505 struct trace_array *tr = iter->tr;
506 struct trace_entry *ent, *next = NULL;
507 int next_cpu = -1;
508 int cpu;
509
510 for_each_possible_cpu(cpu) {
511 if (!tr->data[cpu]->trace)
512 continue;
4c11d7ae 513 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
bc0c38d1
SR
514 if (ent &&
515 (!next || (long)(next->idx - ent->idx) > 0)) {
516 next = ent;
517 next_cpu = cpu;
518 }
519 }
520
521 if (ent_cpu)
522 *ent_cpu = next_cpu;
523
524 return next;
525}
526
527static void *find_next_entry_inc(struct trace_iterator *iter)
528{
529 struct trace_entry *next;
530 int next_cpu = -1;
531
532 next = find_next_entry(iter, &next_cpu);
533
534 if (next) {
bc0c38d1 535 iter->idx++;
4c11d7ae
SR
536 iter->next_idx[next_cpu]++;
537 iter->next_page_idx[next_cpu]++;
538 if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) {
539 struct trace_array_cpu *data = iter->tr->data[next_cpu];
540
541 iter->next_page_idx[next_cpu] = 0;
542 iter->next_page[next_cpu] =
543 iter->next_page[next_cpu]->next;
544 if (iter->next_page[next_cpu] == &data->trace_pages)
545 iter->next_page[next_cpu] =
546 data->trace_pages.next;
547 }
bc0c38d1
SR
548 }
549 iter->ent = next;
550 iter->cpu = next_cpu;
551
552 return next ? iter : NULL;
553}
554
555static void notrace *
556s_next(struct seq_file *m, void *v, loff_t *pos)
557{
558 struct trace_iterator *iter = m->private;
559 void *ent;
560 void *last_ent = iter->ent;
561 int i = (int)*pos;
562
563 (*pos)++;
564
565 /* can't go backwards */
566 if (iter->idx > i)
567 return NULL;
568
569 if (iter->idx < 0)
570 ent = find_next_entry_inc(iter);
571 else
572 ent = iter;
573
574 while (ent && iter->idx < i)
575 ent = find_next_entry_inc(iter);
576
577 iter->pos = *pos;
578
579 if (last_ent && !ent)
580 seq_puts(m, "\n\nvim:ft=help\n");
581
582 return ent;
583}
584
585static void *s_start(struct seq_file *m, loff_t *pos)
586{
587 struct trace_iterator *iter = m->private;
588 void *p = NULL;
589 loff_t l = 0;
590 int i;
591
592 mutex_lock(&trace_types_lock);
593
594 if (!current_trace || current_trace != iter->trace)
595 return NULL;
596
597 atomic_inc(&trace_record_cmdline_disabled);
598
599 /* let the tracer grab locks here if needed */
600 if (current_trace->start)
601 current_trace->start(iter);
602
603 if (*pos != iter->pos) {
604 iter->ent = NULL;
605 iter->cpu = 0;
606 iter->idx = -1;
607
4c11d7ae 608 for_each_possible_cpu(i) {
bc0c38d1 609 iter->next_idx[i] = 0;
4c11d7ae
SR
610 iter->next_page[i] = NULL;
611 }
bc0c38d1
SR
612
613 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
614 ;
615
616 } else {
4c11d7ae 617 l = *pos - 1;
bc0c38d1
SR
618 p = s_next(m, p, &l);
619 }
620
621 return p;
622}
623
624static void s_stop(struct seq_file *m, void *p)
625{
626 struct trace_iterator *iter = m->private;
627
628 atomic_dec(&trace_record_cmdline_disabled);
629
630 /* let the tracer release locks here if needed */
631 if (current_trace && current_trace == iter->trace && iter->trace->stop)
632 iter->trace->stop(iter);
633
634 mutex_unlock(&trace_types_lock);
635}
636
637static void
638seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
639{
640#ifdef CONFIG_KALLSYMS
641 char str[KSYM_SYMBOL_LEN];
642
643 kallsyms_lookup(address, NULL, NULL, NULL, str);
644
645 seq_printf(m, fmt, str);
646#endif
647}
648
649static void
650seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
651{
652#ifdef CONFIG_KALLSYMS
653 char str[KSYM_SYMBOL_LEN];
654
655 sprint_symbol(str, address);
656 seq_printf(m, fmt, str);
657#endif
658}
659
660#ifndef CONFIG_64BIT
661# define IP_FMT "%08lx"
662#else
663# define IP_FMT "%016lx"
664#endif
665
666static void notrace
667seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
668{
669 if (!ip) {
670 seq_printf(m, "0");
671 return;
672 }
673
674 if (sym_flags & TRACE_ITER_SYM_OFFSET)
675 seq_print_sym_offset(m, "%s", ip);
676 else
677 seq_print_sym_short(m, "%s", ip);
678
679 if (sym_flags & TRACE_ITER_SYM_ADDR)
680 seq_printf(m, " <" IP_FMT ">", ip);
681}
682
683static void notrace print_lat_help_header(struct seq_file *m)
684{
685 seq_puts(m, "# _------=> CPU# \n");
686 seq_puts(m, "# / _-----=> irqs-off \n");
687 seq_puts(m, "# | / _----=> need-resched \n");
688 seq_puts(m, "# || / _---=> hardirq/softirq \n");
689 seq_puts(m, "# ||| / _--=> preempt-depth \n");
690 seq_puts(m, "# |||| / \n");
691 seq_puts(m, "# ||||| delay \n");
692 seq_puts(m, "# cmd pid ||||| time | caller \n");
693 seq_puts(m, "# \\ / ||||| \\ | / \n");
694}
695
696static void notrace print_func_help_header(struct seq_file *m)
697{
698 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
699 seq_puts(m, "# | | | | |\n");
700}
701
702
703static void notrace
704print_trace_header(struct seq_file *m, struct trace_iterator *iter)
705{
706 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
707 struct trace_array *tr = iter->tr;
708 struct trace_array_cpu *data = tr->data[tr->cpu];
709 struct tracer *type = current_trace;
4c11d7ae
SR
710 unsigned long total = 0;
711 unsigned long entries = 0;
bc0c38d1
SR
712 int cpu;
713 const char *name = "preemption";
714
715 if (type)
716 name = type->name;
717
718 for_each_possible_cpu(cpu) {
719 if (tr->data[cpu]->trace) {
4c11d7ae
SR
720 total += tr->data[cpu]->trace_idx;
721 if (tr->data[cpu]->trace_idx > tr->entries)
bc0c38d1 722 entries += tr->entries;
4c11d7ae 723 else
bc0c38d1
SR
724 entries += tr->data[cpu]->trace_idx;
725 }
726 }
727
728 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
729 name, UTS_RELEASE);
730 seq_puts(m, "-----------------------------------"
731 "---------------------------------\n");
732 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
733 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
734 data->saved_latency,
735 entries,
4c11d7ae 736 total,
bc0c38d1
SR
737 tr->cpu,
738#if defined(CONFIG_PREEMPT_NONE)
739 "server",
740#elif defined(CONFIG_PREEMPT_VOLUNTARY)
741 "desktop",
742#elif defined(CONFIG_PREEMPT_DESKTOP)
743 "preempt",
744#else
745 "unknown",
746#endif
747 /* These are reserved for later use */
748 0, 0, 0, 0);
749#ifdef CONFIG_SMP
750 seq_printf(m, " #P:%d)\n", num_online_cpus());
751#else
752 seq_puts(m, ")\n");
753#endif
754 seq_puts(m, " -----------------\n");
755 seq_printf(m, " | task: %.16s-%d "
756 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
757 data->comm, data->pid, data->uid, data->nice,
758 data->policy, data->rt_priority);
759 seq_puts(m, " -----------------\n");
760
761 if (data->critical_start) {
762 seq_puts(m, " => started at: ");
763 seq_print_ip_sym(m, data->critical_start, sym_flags);
764 seq_puts(m, "\n => ended at: ");
765 seq_print_ip_sym(m, data->critical_end, sym_flags);
766 seq_puts(m, "\n");
767 }
768
769 seq_puts(m, "\n");
770}
771
772unsigned long nsecs_to_usecs(unsigned long nsecs)
773{
774 return nsecs / 1000;
775}
776
777static void notrace
778lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
779{
780 int hardirq, softirq;
781 char *comm;
782
783 comm = trace_find_cmdline(entry->pid);
784
785 seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
786 seq_printf(m, "%d", cpu);
787 seq_printf(m, "%c%c",
788 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
789 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
790
791 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
792 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
793 if (hardirq && softirq)
794 seq_putc(m, 'H');
795 else {
796 if (hardirq)
797 seq_putc(m, 'h');
798 else {
799 if (softirq)
800 seq_putc(m, 's');
801 else
802 seq_putc(m, '.');
803 }
804 }
805
806 if (entry->preempt_count)
807 seq_printf(m, "%x", entry->preempt_count);
808 else
809 seq_puts(m, ".");
810}
811
812unsigned long preempt_mark_thresh = 100;
813
814static void notrace
815lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
816 unsigned long rel_usecs)
817{
818 seq_printf(m, " %4lldus", abs_usecs);
819 if (rel_usecs > preempt_mark_thresh)
820 seq_puts(m, "!: ");
821 else if (rel_usecs > 1)
822 seq_puts(m, "+: ");
823 else
824 seq_puts(m, " : ");
825}
826
827static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
828
829static void notrace
830print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
831 unsigned int trace_idx, int cpu)
832{
833 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
834 struct trace_entry *next_entry = find_next_entry(iter, NULL);
835 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
836 struct trace_entry *entry = iter->ent;
837 unsigned long abs_usecs;
838 unsigned long rel_usecs;
839 char *comm;
840 int S;
841
842 if (!next_entry)
843 next_entry = entry;
844 rel_usecs = ns2usecs(next_entry->t - entry->t);
845 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
846
847 if (verbose) {
848 comm = trace_find_cmdline(entry->pid);
849 seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
850 " %ld.%03ldms (+%ld.%03ldms): ",
851 comm,
852 entry->pid, cpu, entry->flags,
853 entry->preempt_count, trace_idx,
854 ns2usecs(entry->t),
855 abs_usecs/1000,
856 abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
857 } else {
858 lat_print_generic(m, entry, cpu);
859 lat_print_timestamp(m, abs_usecs, rel_usecs);
860 }
861 switch (entry->type) {
862 case TRACE_FN:
863 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
864 seq_puts(m, " (");
865 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
866 seq_puts(m, ")\n");
867 break;
868 case TRACE_CTX:
869 S = entry->ctx.prev_state < sizeof(state_to_char) ?
870 state_to_char[entry->ctx.prev_state] : 'X';
871 comm = trace_find_cmdline(entry->ctx.next_pid);
872 seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
873 entry->ctx.prev_pid,
874 entry->ctx.prev_prio,
875 S,
876 entry->ctx.next_pid,
877 entry->ctx.next_prio,
878 comm);
879 break;
880 }
881}
882
883static void notrace
884print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
885{
886 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
887 struct trace_entry *entry = iter->ent;
888 unsigned long usec_rem;
889 unsigned long long t;
890 unsigned long secs;
891 char *comm;
892 int S;
893
894 comm = trace_find_cmdline(iter->ent->pid);
895
896 t = ns2usecs(entry->t);
897 usec_rem = do_div(t, 1000000ULL);
898 secs = (unsigned long)t;
899
900 seq_printf(m, "%16s-%-5d ", comm, entry->pid);
901 seq_printf(m, "[%02d] ", iter->cpu);
902 seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
903
904 switch (entry->type) {
905 case TRACE_FN:
906 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
907 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
908 entry->fn.parent_ip) {
909 seq_printf(m, " <-");
910 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
911 }
912 break;
913 case TRACE_CTX:
914 S = entry->ctx.prev_state < sizeof(state_to_char) ?
915 state_to_char[entry->ctx.prev_state] : 'X';
916 seq_printf(m, " %d:%d:%c ==> %d:%d\n",
917 entry->ctx.prev_pid,
918 entry->ctx.prev_prio,
919 S,
920 entry->ctx.next_pid,
921 entry->ctx.next_prio);
922 break;
923 }
924 seq_printf(m, "\n");
925}
926
927static int trace_empty(struct trace_iterator *iter)
928{
929 struct trace_array_cpu *data;
930 int cpu;
931
932 for_each_possible_cpu(cpu) {
933 data = iter->tr->data[cpu];
934
935 if (data->trace &&
4c11d7ae 936 data->trace_idx)
bc0c38d1
SR
937 return 0;
938 }
939 return 1;
940}
941
942static int s_show(struct seq_file *m, void *v)
943{
944 struct trace_iterator *iter = v;
945
946 if (iter->ent == NULL) {
947 if (iter->tr) {
948 seq_printf(m, "# tracer: %s\n", iter->trace->name);
949 seq_puts(m, "#\n");
950 }
951 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
952 /* print nothing if the buffers are empty */
953 if (trace_empty(iter))
954 return 0;
955 print_trace_header(m, iter);
956 if (!(trace_flags & TRACE_ITER_VERBOSE))
957 print_lat_help_header(m);
958 } else {
959 if (!(trace_flags & TRACE_ITER_VERBOSE))
960 print_func_help_header(m);
961 }
962 } else {
963 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
964 print_lat_fmt(m, iter, iter->idx, iter->cpu);
965 else
966 print_trace_fmt(m, iter);
967 }
968
969 return 0;
970}
971
972static struct seq_operations tracer_seq_ops = {
973 .start = s_start,
974 .next = s_next,
975 .stop = s_stop,
976 .show = s_show,
977};
978
979static struct trace_iterator notrace *
980__tracing_open(struct inode *inode, struct file *file, int *ret)
981{
982 struct trace_iterator *iter;
983
984 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
985 if (!iter) {
986 *ret = -ENOMEM;
987 goto out;
988 }
989
990 mutex_lock(&trace_types_lock);
991 if (current_trace && current_trace->print_max)
992 iter->tr = &max_tr;
993 else
994 iter->tr = inode->i_private;
995 iter->trace = current_trace;
996 iter->pos = -1;
997
998 /* TODO stop tracer */
999 *ret = seq_open(file, &tracer_seq_ops);
1000 if (!*ret) {
1001 struct seq_file *m = file->private_data;
1002 m->private = iter;
1003
1004 /* stop the trace while dumping */
1005 if (iter->tr->ctrl)
1006 tracer_enabled = 0;
1007
1008 if (iter->trace && iter->trace->open)
1009 iter->trace->open(iter);
1010 } else {
1011 kfree(iter);
1012 iter = NULL;
1013 }
1014 mutex_unlock(&trace_types_lock);
1015
1016 out:
1017 return iter;
1018}
1019
1020int tracing_open_generic(struct inode *inode, struct file *filp)
1021{
1022 filp->private_data = inode->i_private;
1023 return 0;
1024}
1025
1026int tracing_release(struct inode *inode, struct file *file)
1027{
1028 struct seq_file *m = (struct seq_file *)file->private_data;
1029 struct trace_iterator *iter = m->private;
1030
1031 mutex_lock(&trace_types_lock);
1032 if (iter->trace && iter->trace->close)
1033 iter->trace->close(iter);
1034
1035 /* reenable tracing if it was previously enabled */
1036 if (iter->tr->ctrl)
1037 tracer_enabled = 1;
1038 mutex_unlock(&trace_types_lock);
1039
1040 seq_release(inode, file);
1041 kfree(iter);
1042 return 0;
1043}
1044
1045static int tracing_open(struct inode *inode, struct file *file)
1046{
1047 int ret;
1048
1049 __tracing_open(inode, file, &ret);
1050
1051 return ret;
1052}
1053
1054static int tracing_lt_open(struct inode *inode, struct file *file)
1055{
1056 struct trace_iterator *iter;
1057 int ret;
1058
1059 iter = __tracing_open(inode, file, &ret);
1060
1061 if (!ret)
1062 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1063
1064 return ret;
1065}
1066
1067
1068static void notrace *
1069t_next(struct seq_file *m, void *v, loff_t *pos)
1070{
1071 struct tracer *t = m->private;
1072
1073 (*pos)++;
1074
1075 if (t)
1076 t = t->next;
1077
1078 m->private = t;
1079
1080 return t;
1081}
1082
1083static void *t_start(struct seq_file *m, loff_t *pos)
1084{
1085 struct tracer *t = m->private;
1086 loff_t l = 0;
1087
1088 mutex_lock(&trace_types_lock);
1089 for (; t && l < *pos; t = t_next(m, t, &l))
1090 ;
1091
1092 return t;
1093}
1094
1095static void t_stop(struct seq_file *m, void *p)
1096{
1097 mutex_unlock(&trace_types_lock);
1098}
1099
1100static int t_show(struct seq_file *m, void *v)
1101{
1102 struct tracer *t = v;
1103
1104 if (!t)
1105 return 0;
1106
1107 seq_printf(m, "%s", t->name);
1108 if (t->next)
1109 seq_putc(m, ' ');
1110 else
1111 seq_putc(m, '\n');
1112
1113 return 0;
1114}
1115
1116static struct seq_operations show_traces_seq_ops = {
1117 .start = t_start,
1118 .next = t_next,
1119 .stop = t_stop,
1120 .show = t_show,
1121};
1122
1123static int show_traces_open(struct inode *inode, struct file *file)
1124{
1125 int ret;
1126
1127 ret = seq_open(file, &show_traces_seq_ops);
1128 if (!ret) {
1129 struct seq_file *m = file->private_data;
1130 m->private = trace_types;
1131 }
1132
1133 return ret;
1134}
1135
1136static struct file_operations tracing_fops = {
1137 .open = tracing_open,
1138 .read = seq_read,
1139 .llseek = seq_lseek,
1140 .release = tracing_release,
1141};
1142
1143static struct file_operations tracing_lt_fops = {
1144 .open = tracing_lt_open,
1145 .read = seq_read,
1146 .llseek = seq_lseek,
1147 .release = tracing_release,
1148};
1149
1150static struct file_operations show_traces_fops = {
1151 .open = show_traces_open,
1152 .read = seq_read,
1153 .release = seq_release,
1154};
1155
1156static ssize_t
1157tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1158 size_t cnt, loff_t *ppos)
1159{
1160 char *buf;
1161 int r = 0;
1162 int len = 0;
1163 int i;
1164
1165 /* calulate max size */
1166 for (i = 0; trace_options[i]; i++) {
1167 len += strlen(trace_options[i]);
1168 len += 3; /* "no" and space */
1169 }
1170
1171 /* +2 for \n and \0 */
1172 buf = kmalloc(len + 2, GFP_KERNEL);
1173 if (!buf)
1174 return -ENOMEM;
1175
1176 for (i = 0; trace_options[i]; i++) {
1177 if (trace_flags & (1 << i))
1178 r += sprintf(buf + r, "%s ", trace_options[i]);
1179 else
1180 r += sprintf(buf + r, "no%s ", trace_options[i]);
1181 }
1182
1183 r += sprintf(buf + r, "\n");
1184 WARN_ON(r >= len + 2);
1185
1186 r = simple_read_from_buffer(ubuf, cnt, ppos,
1187 buf, r);
1188
1189 kfree(buf);
1190
1191 return r;
1192}
1193
1194static ssize_t
1195tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1196 size_t cnt, loff_t *ppos)
1197{
1198 char buf[64];
1199 char *cmp = buf;
1200 int neg = 0;
1201 int i;
1202
1203 if (cnt > 63)
1204 cnt = 63;
1205
1206 if (copy_from_user(&buf, ubuf, cnt))
1207 return -EFAULT;
1208
1209 buf[cnt] = 0;
1210
1211 if (strncmp(buf, "no", 2) == 0) {
1212 neg = 1;
1213 cmp += 2;
1214 }
1215
1216 for (i = 0; trace_options[i]; i++) {
1217 int len = strlen(trace_options[i]);
1218
1219 if (strncmp(cmp, trace_options[i], len) == 0) {
1220 if (neg)
1221 trace_flags &= ~(1 << i);
1222 else
1223 trace_flags |= (1 << i);
1224 break;
1225 }
1226 }
1227
1228 filp->f_pos += cnt;
1229
1230 return cnt;
1231}
1232
1233static struct file_operations tracing_iter_fops = {
1234 .open = tracing_open_generic,
1235 .read = tracing_iter_ctrl_read,
1236 .write = tracing_iter_ctrl_write,
1237};
1238
1239static ssize_t
1240tracing_ctrl_read(struct file *filp, char __user *ubuf,
1241 size_t cnt, loff_t *ppos)
1242{
1243 struct trace_array *tr = filp->private_data;
1244 char buf[64];
1245 int r;
1246
1247 r = sprintf(buf, "%ld\n", tr->ctrl);
1248 return simple_read_from_buffer(ubuf, cnt, ppos,
1249 buf, r);
1250}
1251
1252static ssize_t
1253tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1254 size_t cnt, loff_t *ppos)
1255{
1256 struct trace_array *tr = filp->private_data;
1257 long val;
1258 char buf[64];
1259
1260 if (cnt > 63)
1261 cnt = 63;
1262
1263 if (copy_from_user(&buf, ubuf, cnt))
1264 return -EFAULT;
1265
1266 buf[cnt] = 0;
1267
1268 val = simple_strtoul(buf, NULL, 10);
1269
1270 val = !!val;
1271
1272 mutex_lock(&trace_types_lock);
1273 if (tr->ctrl ^ val) {
1274 if (val)
1275 tracer_enabled = 1;
1276 else
1277 tracer_enabled = 0;
1278
1279 tr->ctrl = val;
1280
1281 if (current_trace && current_trace->ctrl_update)
1282 current_trace->ctrl_update(tr);
1283 }
1284 mutex_unlock(&trace_types_lock);
1285
1286 filp->f_pos += cnt;
1287
1288 return cnt;
1289}
1290
1291static ssize_t
1292tracing_set_trace_read(struct file *filp, char __user *ubuf,
1293 size_t cnt, loff_t *ppos)
1294{
1295 char buf[max_tracer_type_len+2];
1296 int r;
1297
1298 mutex_lock(&trace_types_lock);
1299 if (current_trace)
1300 r = sprintf(buf, "%s\n", current_trace->name);
1301 else
1302 r = sprintf(buf, "\n");
1303 mutex_unlock(&trace_types_lock);
1304
1305 return simple_read_from_buffer(ubuf, cnt, ppos,
1306 buf, r);
1307}
1308
1309static ssize_t
1310tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1311 size_t cnt, loff_t *ppos)
1312{
1313 struct trace_array *tr = &global_trace;
1314 struct tracer *t;
1315 char buf[max_tracer_type_len+1];
1316 int i;
1317
1318 if (cnt > max_tracer_type_len)
1319 cnt = max_tracer_type_len;
1320
1321 if (copy_from_user(&buf, ubuf, cnt))
1322 return -EFAULT;
1323
1324 buf[cnt] = 0;
1325
1326 /* strip ending whitespace. */
1327 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1328 buf[i] = 0;
1329
1330 mutex_lock(&trace_types_lock);
1331 for (t = trace_types; t; t = t->next) {
1332 if (strcmp(t->name, buf) == 0)
1333 break;
1334 }
1335 if (!t || t == current_trace)
1336 goto out;
1337
1338 if (current_trace && current_trace->reset)
1339 current_trace->reset(tr);
1340
1341 current_trace = t;
1342 if (t->init)
1343 t->init(tr);
1344
1345 out:
1346 mutex_unlock(&trace_types_lock);
1347
1348 filp->f_pos += cnt;
1349
1350 return cnt;
1351}
1352
1353static ssize_t
1354tracing_max_lat_read(struct file *filp, char __user *ubuf,
1355 size_t cnt, loff_t *ppos)
1356{
1357 unsigned long *ptr = filp->private_data;
1358 char buf[64];
1359 int r;
1360
1361 r = snprintf(buf, 64, "%ld\n",
1362 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1363 if (r > 64)
1364 r = 64;
1365 return simple_read_from_buffer(ubuf, cnt, ppos,
1366 buf, r);
1367}
1368
1369static ssize_t
1370tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1371 size_t cnt, loff_t *ppos)
1372{
1373 long *ptr = filp->private_data;
1374 long val;
1375 char buf[64];
1376
1377 if (cnt > 63)
1378 cnt = 63;
1379
1380 if (copy_from_user(&buf, ubuf, cnt))
1381 return -EFAULT;
1382
1383 buf[cnt] = 0;
1384
1385 val = simple_strtoul(buf, NULL, 10);
1386
1387 *ptr = val * 1000;
1388
1389 return cnt;
1390}
1391
1392static struct file_operations tracing_max_lat_fops = {
1393 .open = tracing_open_generic,
1394 .read = tracing_max_lat_read,
1395 .write = tracing_max_lat_write,
1396};
1397
1398static struct file_operations tracing_ctrl_fops = {
1399 .open = tracing_open_generic,
1400 .read = tracing_ctrl_read,
1401 .write = tracing_ctrl_write,
1402};
1403
1404static struct file_operations set_tracer_fops = {
1405 .open = tracing_open_generic,
1406 .read = tracing_set_trace_read,
1407 .write = tracing_set_trace_write,
1408};
1409
1410#ifdef CONFIG_DYNAMIC_FTRACE
1411
1412static ssize_t
1413tracing_read_long(struct file *filp, char __user *ubuf,
1414 size_t cnt, loff_t *ppos)
1415{
1416 unsigned long *p = filp->private_data;
1417 char buf[64];
1418 int r;
1419
1420 r = sprintf(buf, "%ld\n", *p);
1421 return simple_read_from_buffer(ubuf, cnt, ppos,
1422 buf, r);
1423}
1424
1425static struct file_operations tracing_read_long_fops = {
1426 .open = tracing_open_generic,
1427 .read = tracing_read_long,
1428};
1429#endif
1430
1431static struct dentry *d_tracer;
1432
1433struct dentry *tracing_init_dentry(void)
1434{
1435 static int once;
1436
1437 if (d_tracer)
1438 return d_tracer;
1439
1440 d_tracer = debugfs_create_dir("tracing", NULL);
1441
1442 if (!d_tracer && !once) {
1443 once = 1;
1444 pr_warning("Could not create debugfs directory 'tracing'\n");
1445 return NULL;
1446 }
1447
1448 return d_tracer;
1449}
1450
1451static __init void tracer_init_debugfs(void)
1452{
1453 struct dentry *d_tracer;
1454 struct dentry *entry;
1455
1456 d_tracer = tracing_init_dentry();
1457
1458 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1459 &global_trace, &tracing_ctrl_fops);
1460 if (!entry)
1461 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1462
1463 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1464 NULL, &tracing_iter_fops);
1465 if (!entry)
1466 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1467
1468 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1469 &global_trace, &tracing_lt_fops);
1470 if (!entry)
1471 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1472
1473 entry = debugfs_create_file("trace", 0444, d_tracer,
1474 &global_trace, &tracing_fops);
1475 if (!entry)
1476 pr_warning("Could not create debugfs 'trace' entry\n");
1477
1478 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1479 &global_trace, &show_traces_fops);
1480 if (!entry)
1481 pr_warning("Could not create debugfs 'trace' entry\n");
1482
1483 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1484 &global_trace, &set_tracer_fops);
1485 if (!entry)
1486 pr_warning("Could not create debugfs 'trace' entry\n");
1487
1488 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1489 &tracing_max_latency,
1490 &tracing_max_lat_fops);
1491 if (!entry)
1492 pr_warning("Could not create debugfs "
1493 "'tracing_max_latency' entry\n");
1494
1495 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
1496 &tracing_thresh, &tracing_max_lat_fops);
1497 if (!entry)
1498 pr_warning("Could not create debugfs "
1499 "'tracing_threash' entry\n");
1500
1501#ifdef CONFIG_DYNAMIC_FTRACE
1502 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
1503 &ftrace_update_tot_cnt,
1504 &tracing_read_long_fops);
1505 if (!entry)
1506 pr_warning("Could not create debugfs "
1507 "'dyn_ftrace_total_info' entry\n");
1508#endif
1509}
1510
1511/* dummy trace to disable tracing */
1512static struct tracer no_tracer __read_mostly =
1513{
1514 .name = "none",
1515};
1516
4c11d7ae 1517static int trace_alloc_page(void)
bc0c38d1 1518{
4c11d7ae
SR
1519 struct trace_array_cpu *data;
1520 void *array;
1521 struct page *page, *tmp;
1522 LIST_HEAD(pages);
1523 int i;
1524
1525 /* first allocate a page for each CPU */
1526 for_each_possible_cpu(i) {
1527 array = (void *)__get_free_page(GFP_KERNEL);
1528 if (array == NULL) {
1529 printk(KERN_ERR "tracer: failed to allocate page"
1530 "for trace buffer!\n");
1531 goto free_pages;
1532 }
1533
1534 page = virt_to_page(array);
1535 list_add(&page->lru, &pages);
1536
1537/* Only allocate if we are actually using the max trace */
1538#ifdef CONFIG_TRACER_MAX_TRACE
1539 array = (void *)__get_free_page(GFP_KERNEL);
1540 if (array == NULL) {
1541 printk(KERN_ERR "tracer: failed to allocate page"
1542 "for trace buffer!\n");
1543 goto free_pages;
1544 }
1545 page = virt_to_page(array);
1546 list_add(&page->lru, &pages);
1547#endif
1548 }
1549
1550 /* Now that we successfully allocate a page per CPU, add them */
1551 for_each_possible_cpu(i) {
1552 data = global_trace.data[i];
1553 page = list_entry(pages.next, struct page, lru);
1554 list_del(&page->lru);
1555 list_add_tail(&page->lru, &data->trace_pages);
1556 ClearPageLRU(page);
1557
1558#ifdef CONFIG_TRACER_MAX_TRACE
1559 data = max_tr.data[i];
1560 page = list_entry(pages.next, struct page, lru);
1561 list_del(&page->lru);
1562 list_add_tail(&page->lru, &data->trace_pages);
1563 SetPageLRU(page);
1564#endif
1565 }
1566 global_trace.entries += ENTRIES_PER_PAGE;
1567
1568 return 0;
1569
1570 free_pages:
1571 list_for_each_entry_safe(page, tmp, &pages, lru) {
1572 list_del(&page->lru);
1573 __free_page(page);
1574 }
1575 return -ENOMEM;
bc0c38d1
SR
1576}
1577
1578__init static int tracer_alloc_buffers(void)
1579{
4c11d7ae
SR
1580 struct trace_array_cpu *data;
1581 void *array;
1582 struct page *page;
1583 int pages = 0;
bc0c38d1
SR
1584 int i;
1585
4c11d7ae 1586 /* Allocate the first page for all buffers */
bc0c38d1 1587 for_each_possible_cpu(i) {
4c11d7ae 1588 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
bc0c38d1
SR
1589 max_tr.data[i] = &per_cpu(max_data, i);
1590
4c11d7ae 1591 array = (void *)__get_free_page(GFP_KERNEL);
bc0c38d1 1592 if (array == NULL) {
4c11d7ae
SR
1593 printk(KERN_ERR "tracer: failed to allocate page"
1594 "for trace buffer!\n");
bc0c38d1
SR
1595 goto free_buffers;
1596 }
4c11d7ae
SR
1597 data->trace = array;
1598
1599 /* set the array to the list */
1600 INIT_LIST_HEAD(&data->trace_pages);
1601 page = virt_to_page(array);
1602 list_add(&page->lru, &data->trace_pages);
1603 /* use the LRU flag to differentiate the two buffers */
1604 ClearPageLRU(page);
bc0c38d1
SR
1605
1606/* Only allocate if we are actually using the max trace */
1607#ifdef CONFIG_TRACER_MAX_TRACE
4c11d7ae 1608 array = (void *)__get_free_page(GFP_KERNEL);
bc0c38d1 1609 if (array == NULL) {
4c11d7ae
SR
1610 printk(KERN_ERR "tracer: failed to allocate page"
1611 "for trace buffer!\n");
bc0c38d1
SR
1612 goto free_buffers;
1613 }
1614 max_tr.data[i]->trace = array;
4c11d7ae
SR
1615
1616 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
1617 page = virt_to_page(array);
1618 list_add(&page->lru, &max_tr.data[i]->trace_pages);
1619 SetPageLRU(page);
bc0c38d1
SR
1620#endif
1621 }
1622
1623 /*
1624 * Since we allocate by orders of pages, we may be able to
1625 * round up a bit.
1626 */
4c11d7ae 1627 global_trace.entries = ENTRIES_PER_PAGE;
bc0c38d1 1628 max_tr.entries = global_trace.entries;
4c11d7ae
SR
1629 pages++;
1630
1631 while (global_trace.entries < trace_nr_entries) {
1632 if (trace_alloc_page())
1633 break;
1634 pages++;
1635 }
bc0c38d1 1636
4c11d7ae
SR
1637 pr_info("tracer: %d pages allocated for %ld",
1638 pages, trace_nr_entries);
bc0c38d1
SR
1639 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
1640 pr_info(" actual entries %ld\n", global_trace.entries);
1641
1642 tracer_init_debugfs();
1643
1644 trace_init_cmdlines();
1645
1646 register_tracer(&no_tracer);
1647 current_trace = &no_tracer;
1648
1649 return 0;
1650
1651 free_buffers:
1652 for (i-- ; i >= 0; i--) {
4c11d7ae 1653 struct page *page, *tmp;
bc0c38d1
SR
1654 struct trace_array_cpu *data = global_trace.data[i];
1655
1656 if (data && data->trace) {
4c11d7ae
SR
1657 list_for_each_entry_safe(page, tmp,
1658 &data->trace_pages, lru) {
1659 list_del(&page->lru);
1660 __free_page(page);
1661 }
bc0c38d1
SR
1662 data->trace = NULL;
1663 }
1664
1665#ifdef CONFIG_TRACER_MAX_TRACE
1666 data = max_tr.data[i];
1667 if (data && data->trace) {
4c11d7ae
SR
1668 list_for_each_entry_safe(page, tmp,
1669 &data->trace_pages, lru) {
1670 list_del(&page->lru);
1671 __free_page(page);
1672 }
bc0c38d1
SR
1673 data->trace = NULL;
1674 }
1675#endif
1676 }
1677 return -ENOMEM;
1678}
1679
1680device_initcall(tracer_alloc_buffers);
This page took 0.088024 seconds and 5 git commands to generate.