2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded
;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running
;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled
;
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt
[] = {
71 static struct tracer_flags dummy_tracer_flags
= {
73 .opts
= dummy_tracer_opt
76 static int dummy_set_flag(u32 old_flags
, u32 bit
, int set
)
82 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
86 static DEFINE_PER_CPU(bool, trace_cmdline_save
);
89 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
94 static int tracing_disabled
= 1;
96 DEFINE_PER_CPU(int, ftrace_cpu_disabled
);
98 cpumask_var_t __read_mostly tracing_buffer_mask
;
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
116 enum ftrace_dump_mode ftrace_dump_on_oops
;
118 /* When set, tracing will stop when a WARN*() is hit */
119 int __disable_trace_on_warning
;
121 static int tracing_set_tracer(const char *buf
);
123 #define MAX_TRACER_SIZE 100
124 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
125 static char *default_bootup_tracer
;
127 static bool allocate_snapshot
;
129 static int __init
set_cmdline_ftrace(char *str
)
131 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
132 default_bootup_tracer
= bootup_tracer_buf
;
133 /* We are using ftrace early, expand it */
134 ring_buffer_expanded
= true;
137 __setup("ftrace=", set_cmdline_ftrace
);
139 static int __init
set_ftrace_dump_on_oops(char *str
)
141 if (*str
++ != '=' || !*str
) {
142 ftrace_dump_on_oops
= DUMP_ALL
;
146 if (!strcmp("orig_cpu", str
)) {
147 ftrace_dump_on_oops
= DUMP_ORIG
;
153 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
155 static int __init
stop_trace_on_warning(char *str
)
157 __disable_trace_on_warning
= 1;
160 __setup("traceoff_on_warning=", stop_trace_on_warning
);
162 static int __init
boot_alloc_snapshot(char *str
)
164 allocate_snapshot
= true;
165 /* We also need the main ring buffer expanded */
166 ring_buffer_expanded
= true;
169 __setup("alloc_snapshot", boot_alloc_snapshot
);
172 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
173 static char *trace_boot_options __initdata
;
175 static int __init
set_trace_boot_options(char *str
)
177 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
178 trace_boot_options
= trace_boot_options_buf
;
181 __setup("trace_options=", set_trace_boot_options
);
184 unsigned long long ns2usecs(cycle_t nsec
)
192 * The global_trace is the descriptor that holds the tracing
193 * buffers for the live tracing. For each CPU, it contains
194 * a link list of pages that will store trace entries. The
195 * page descriptor of the pages in the memory is used to hold
196 * the link list by linking the lru item in the page descriptor
197 * to each of the pages in the buffer per CPU.
199 * For each active CPU there is a data field that holds the
200 * pages for the buffer for that CPU. Each CPU has the same number
201 * of pages allocated for its buffer.
203 static struct trace_array global_trace
;
205 LIST_HEAD(ftrace_trace_arrays
);
207 int trace_array_get(struct trace_array
*this_tr
)
209 struct trace_array
*tr
;
212 mutex_lock(&trace_types_lock
);
213 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
220 mutex_unlock(&trace_types_lock
);
225 static void __trace_array_put(struct trace_array
*this_tr
)
227 WARN_ON(!this_tr
->ref
);
231 void trace_array_put(struct trace_array
*this_tr
)
233 mutex_lock(&trace_types_lock
);
234 __trace_array_put(this_tr
);
235 mutex_unlock(&trace_types_lock
);
238 int filter_check_discard(struct ftrace_event_file
*file
, void *rec
,
239 struct ring_buffer
*buffer
,
240 struct ring_buffer_event
*event
)
242 if (unlikely(file
->flags
& FTRACE_EVENT_FL_FILTERED
) &&
243 !filter_match_preds(file
->filter
, rec
)) {
244 ring_buffer_discard_commit(buffer
, event
);
250 EXPORT_SYMBOL_GPL(filter_check_discard
);
252 int call_filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
253 struct ring_buffer
*buffer
,
254 struct ring_buffer_event
*event
)
256 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
257 !filter_match_preds(call
->filter
, rec
)) {
258 ring_buffer_discard_commit(buffer
, event
);
264 EXPORT_SYMBOL_GPL(call_filter_check_discard
);
266 cycle_t
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
270 /* Early boot up does not have a buffer yet */
272 return trace_clock_local();
274 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
275 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
280 cycle_t
ftrace_now(int cpu
)
282 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
286 * tracing_is_enabled - Show if global_trace has been disabled
288 * Shows if the global trace has been enabled or not. It uses the
289 * mirror flag "buffer_disabled" to be used in fast paths such as for
290 * the irqsoff tracer. But it may be inaccurate due to races. If you
291 * need to know the accurate state, use tracing_is_on() which is a little
292 * slower, but accurate.
294 int tracing_is_enabled(void)
297 * For quick access (irqsoff uses this in fast path), just
298 * return the mirror variable of the state of the ring buffer.
299 * It's a little racy, but we don't really care.
302 return !global_trace
.buffer_disabled
;
306 * trace_buf_size is the size in bytes that is allocated
307 * for a buffer. Note, the number of bytes is always rounded
310 * This number is purposely set to a low number of 16384.
311 * If the dump on oops happens, it will be much appreciated
312 * to not have to wait for all that output. Anyway this can be
313 * boot time and run time configurable.
315 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
317 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
319 /* trace_types holds a link list of available tracers. */
320 static struct tracer
*trace_types __read_mostly
;
323 * trace_types_lock is used to protect the trace_types list.
325 DEFINE_MUTEX(trace_types_lock
);
328 * serialize the access of the ring buffer
330 * ring buffer serializes readers, but it is low level protection.
331 * The validity of the events (which returns by ring_buffer_peek() ..etc)
332 * are not protected by ring buffer.
334 * The content of events may become garbage if we allow other process consumes
335 * these events concurrently:
336 * A) the page of the consumed events may become a normal page
337 * (not reader page) in ring buffer, and this page will be rewrited
338 * by events producer.
339 * B) The page of the consumed events may become a page for splice_read,
340 * and this page will be returned to system.
342 * These primitives allow multi process access to different cpu ring buffer
345 * These primitives don't distinguish read-only and read-consume access.
346 * Multi read-only access are also serialized.
350 static DECLARE_RWSEM(all_cpu_access_lock
);
351 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
353 static inline void trace_access_lock(int cpu
)
355 if (cpu
== RING_BUFFER_ALL_CPUS
) {
356 /* gain it for accessing the whole ring buffer. */
357 down_write(&all_cpu_access_lock
);
359 /* gain it for accessing a cpu ring buffer. */
361 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
362 down_read(&all_cpu_access_lock
);
364 /* Secondly block other access to this @cpu ring buffer. */
365 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
369 static inline void trace_access_unlock(int cpu
)
371 if (cpu
== RING_BUFFER_ALL_CPUS
) {
372 up_write(&all_cpu_access_lock
);
374 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
375 up_read(&all_cpu_access_lock
);
379 static inline void trace_access_lock_init(void)
383 for_each_possible_cpu(cpu
)
384 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
389 static DEFINE_MUTEX(access_lock
);
391 static inline void trace_access_lock(int cpu
)
394 mutex_lock(&access_lock
);
397 static inline void trace_access_unlock(int cpu
)
400 mutex_unlock(&access_lock
);
403 static inline void trace_access_lock_init(void)
409 /* trace_flags holds trace_options default values */
410 unsigned long trace_flags
= TRACE_ITER_PRINT_PARENT
| TRACE_ITER_PRINTK
|
411 TRACE_ITER_ANNOTATE
| TRACE_ITER_CONTEXT_INFO
| TRACE_ITER_SLEEP_TIME
|
412 TRACE_ITER_GRAPH_TIME
| TRACE_ITER_RECORD_CMD
| TRACE_ITER_OVERWRITE
|
413 TRACE_ITER_IRQ_INFO
| TRACE_ITER_MARKERS
| TRACE_ITER_FUNCTION
;
415 static void tracer_tracing_on(struct trace_array
*tr
)
417 if (tr
->trace_buffer
.buffer
)
418 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
420 * This flag is looked at when buffers haven't been allocated
421 * yet, or by some tracers (like irqsoff), that just want to
422 * know if the ring buffer has been disabled, but it can handle
423 * races of where it gets disabled but we still do a record.
424 * As the check is in the fast path of the tracers, it is more
425 * important to be fast than accurate.
427 tr
->buffer_disabled
= 0;
428 /* Make the flag seen by readers */
433 * tracing_on - enable tracing buffers
435 * This function enables tracing buffers that may have been
436 * disabled with tracing_off.
438 void tracing_on(void)
440 tracer_tracing_on(&global_trace
);
442 EXPORT_SYMBOL_GPL(tracing_on
);
445 * __trace_puts - write a constant string into the trace buffer.
446 * @ip: The address of the caller
447 * @str: The constant string to write
448 * @size: The size of the string.
450 int __trace_puts(unsigned long ip
, const char *str
, int size
)
452 struct ring_buffer_event
*event
;
453 struct ring_buffer
*buffer
;
454 struct print_entry
*entry
;
455 unsigned long irq_flags
;
458 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
461 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
463 local_save_flags(irq_flags
);
464 buffer
= global_trace
.trace_buffer
.buffer
;
465 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
466 irq_flags
, preempt_count());
470 entry
= ring_buffer_event_data(event
);
473 memcpy(&entry
->buf
, str
, size
);
475 /* Add a newline if necessary */
476 if (entry
->buf
[size
- 1] != '\n') {
477 entry
->buf
[size
] = '\n';
478 entry
->buf
[size
+ 1] = '\0';
480 entry
->buf
[size
] = '\0';
482 __buffer_unlock_commit(buffer
, event
);
486 EXPORT_SYMBOL_GPL(__trace_puts
);
489 * __trace_bputs - write the pointer to a constant string into trace buffer
490 * @ip: The address of the caller
491 * @str: The constant string to write to the buffer to
493 int __trace_bputs(unsigned long ip
, const char *str
)
495 struct ring_buffer_event
*event
;
496 struct ring_buffer
*buffer
;
497 struct bputs_entry
*entry
;
498 unsigned long irq_flags
;
499 int size
= sizeof(struct bputs_entry
);
501 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
504 local_save_flags(irq_flags
);
505 buffer
= global_trace
.trace_buffer
.buffer
;
506 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
507 irq_flags
, preempt_count());
511 entry
= ring_buffer_event_data(event
);
515 __buffer_unlock_commit(buffer
, event
);
519 EXPORT_SYMBOL_GPL(__trace_bputs
);
521 #ifdef CONFIG_TRACER_SNAPSHOT
523 * trace_snapshot - take a snapshot of the current buffer.
525 * This causes a swap between the snapshot buffer and the current live
526 * tracing buffer. You can use this to take snapshots of the live
527 * trace when some condition is triggered, but continue to trace.
529 * Note, make sure to allocate the snapshot with either
530 * a tracing_snapshot_alloc(), or by doing it manually
531 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
533 * If the snapshot buffer is not allocated, it will stop tracing.
534 * Basically making a permanent snapshot.
536 void tracing_snapshot(void)
538 struct trace_array
*tr
= &global_trace
;
539 struct tracer
*tracer
= tr
->current_trace
;
543 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
544 internal_trace_puts("*** snapshot is being ignored ***\n");
548 if (!tr
->allocated_snapshot
) {
549 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
550 internal_trace_puts("*** stopping trace here! ***\n");
555 /* Note, snapshot can not be used when the tracer uses it */
556 if (tracer
->use_max_tr
) {
557 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
558 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
562 local_irq_save(flags
);
563 update_max_tr(tr
, current
, smp_processor_id());
564 local_irq_restore(flags
);
566 EXPORT_SYMBOL_GPL(tracing_snapshot
);
568 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
569 struct trace_buffer
*size_buf
, int cpu_id
);
570 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
572 static int alloc_snapshot(struct trace_array
*tr
)
576 if (!tr
->allocated_snapshot
) {
578 /* allocate spare buffer */
579 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
580 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
584 tr
->allocated_snapshot
= true;
590 void free_snapshot(struct trace_array
*tr
)
593 * We don't free the ring buffer. instead, resize it because
594 * The max_tr ring buffer has some state (e.g. ring->clock) and
595 * we want preserve it.
597 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
598 set_buffer_entries(&tr
->max_buffer
, 1);
599 tracing_reset_online_cpus(&tr
->max_buffer
);
600 tr
->allocated_snapshot
= false;
604 * tracing_alloc_snapshot - allocate snapshot buffer.
606 * This only allocates the snapshot buffer if it isn't already
607 * allocated - it doesn't also take a snapshot.
609 * This is meant to be used in cases where the snapshot buffer needs
610 * to be set up for events that can't sleep but need to be able to
611 * trigger a snapshot.
613 int tracing_alloc_snapshot(void)
615 struct trace_array
*tr
= &global_trace
;
618 ret
= alloc_snapshot(tr
);
623 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
626 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
628 * This is similar to trace_snapshot(), but it will allocate the
629 * snapshot buffer if it isn't already allocated. Use this only
630 * where it is safe to sleep, as the allocation may sleep.
632 * This causes a swap between the snapshot buffer and the current live
633 * tracing buffer. You can use this to take snapshots of the live
634 * trace when some condition is triggered, but continue to trace.
636 void tracing_snapshot_alloc(void)
640 ret
= tracing_alloc_snapshot();
646 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
648 void tracing_snapshot(void)
650 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
652 EXPORT_SYMBOL_GPL(tracing_snapshot
);
653 int tracing_alloc_snapshot(void)
655 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
658 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
659 void tracing_snapshot_alloc(void)
664 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
665 #endif /* CONFIG_TRACER_SNAPSHOT */
667 static void tracer_tracing_off(struct trace_array
*tr
)
669 if (tr
->trace_buffer
.buffer
)
670 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
672 * This flag is looked at when buffers haven't been allocated
673 * yet, or by some tracers (like irqsoff), that just want to
674 * know if the ring buffer has been disabled, but it can handle
675 * races of where it gets disabled but we still do a record.
676 * As the check is in the fast path of the tracers, it is more
677 * important to be fast than accurate.
679 tr
->buffer_disabled
= 1;
680 /* Make the flag seen by readers */
685 * tracing_off - turn off tracing buffers
687 * This function stops the tracing buffers from recording data.
688 * It does not disable any overhead the tracers themselves may
689 * be causing. This function simply causes all recording to
690 * the ring buffers to fail.
692 void tracing_off(void)
694 tracer_tracing_off(&global_trace
);
696 EXPORT_SYMBOL_GPL(tracing_off
);
698 void disable_trace_on_warning(void)
700 if (__disable_trace_on_warning
)
705 * tracer_tracing_is_on - show real state of ring buffer enabled
706 * @tr : the trace array to know if ring buffer is enabled
708 * Shows real state of the ring buffer if it is enabled or not.
710 static int tracer_tracing_is_on(struct trace_array
*tr
)
712 if (tr
->trace_buffer
.buffer
)
713 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
714 return !tr
->buffer_disabled
;
718 * tracing_is_on - show state of ring buffers enabled
720 int tracing_is_on(void)
722 return tracer_tracing_is_on(&global_trace
);
724 EXPORT_SYMBOL_GPL(tracing_is_on
);
726 static int __init
set_buf_size(char *str
)
728 unsigned long buf_size
;
732 buf_size
= memparse(str
, &str
);
733 /* nr_entries can not be zero */
736 trace_buf_size
= buf_size
;
739 __setup("trace_buf_size=", set_buf_size
);
741 static int __init
set_tracing_thresh(char *str
)
743 unsigned long threshold
;
748 ret
= kstrtoul(str
, 0, &threshold
);
751 tracing_thresh
= threshold
* 1000;
754 __setup("tracing_thresh=", set_tracing_thresh
);
756 unsigned long nsecs_to_usecs(unsigned long nsecs
)
761 /* These must match the bit postions in trace_iterator_flags */
762 static const char *trace_options
[] = {
795 int in_ns
; /* is this clock in nanoseconds? */
797 { trace_clock_local
, "local", 1 },
798 { trace_clock_global
, "global", 1 },
799 { trace_clock_counter
, "counter", 0 },
800 { trace_clock_jiffies
, "uptime", 1 },
801 { trace_clock
, "perf", 1 },
806 * trace_parser_get_init - gets the buffer for trace parser
808 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
810 memset(parser
, 0, sizeof(*parser
));
812 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
821 * trace_parser_put - frees the buffer for trace parser
823 void trace_parser_put(struct trace_parser
*parser
)
825 kfree(parser
->buffer
);
829 * trace_get_user - reads the user input string separated by space
830 * (matched by isspace(ch))
832 * For each string found the 'struct trace_parser' is updated,
833 * and the function returns.
835 * Returns number of bytes read.
837 * See kernel/trace/trace.h for 'struct trace_parser' details.
839 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
840 size_t cnt
, loff_t
*ppos
)
847 trace_parser_clear(parser
);
849 ret
= get_user(ch
, ubuf
++);
857 * The parser is not finished with the last write,
858 * continue reading the user input without skipping spaces.
861 /* skip white space */
862 while (cnt
&& isspace(ch
)) {
863 ret
= get_user(ch
, ubuf
++);
870 /* only spaces were written */
880 /* read the non-space input */
881 while (cnt
&& !isspace(ch
)) {
882 if (parser
->idx
< parser
->size
- 1)
883 parser
->buffer
[parser
->idx
++] = ch
;
888 ret
= get_user(ch
, ubuf
++);
895 /* We either got finished input or we have to wait for another call. */
897 parser
->buffer
[parser
->idx
] = 0;
898 parser
->cont
= false;
899 } else if (parser
->idx
< parser
->size
- 1) {
901 parser
->buffer
[parser
->idx
++] = ch
;
914 ssize_t
trace_seq_to_user(struct trace_seq
*s
, char __user
*ubuf
, size_t cnt
)
922 if (s
->len
<= s
->readpos
)
925 len
= s
->len
- s
->readpos
;
928 ret
= copy_to_user(ubuf
, s
->buffer
+ s
->readpos
, cnt
);
938 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
942 if (s
->len
<= s
->readpos
)
945 len
= s
->len
- s
->readpos
;
948 memcpy(buf
, s
->buffer
+ s
->readpos
, cnt
);
955 * ftrace_max_lock is used to protect the swapping of buffers
956 * when taking a max snapshot. The buffers themselves are
957 * protected by per_cpu spinlocks. But the action of the swap
958 * needs its own lock.
960 * This is defined as a arch_spinlock_t in order to help
961 * with performance when lockdep debugging is enabled.
963 * It is also used in other places outside the update_max_tr
964 * so it needs to be defined outside of the
965 * CONFIG_TRACER_MAX_TRACE.
967 static arch_spinlock_t ftrace_max_lock
=
968 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
970 unsigned long __read_mostly tracing_thresh
;
972 #ifdef CONFIG_TRACER_MAX_TRACE
973 unsigned long __read_mostly tracing_max_latency
;
976 * Copy the new maximum trace into the separate maximum-trace
977 * structure. (this way the maximum trace is permanently saved,
978 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
981 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
983 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
984 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
985 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
986 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
989 max_buf
->time_start
= data
->preempt_timestamp
;
991 max_data
->saved_latency
= tracing_max_latency
;
992 max_data
->critical_start
= data
->critical_start
;
993 max_data
->critical_end
= data
->critical_end
;
995 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
996 max_data
->pid
= tsk
->pid
;
998 * If tsk == current, then use current_uid(), as that does not use
999 * RCU. The irq tracer can be called out of RCU scope.
1002 max_data
->uid
= current_uid();
1004 max_data
->uid
= task_uid(tsk
);
1006 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1007 max_data
->policy
= tsk
->policy
;
1008 max_data
->rt_priority
= tsk
->rt_priority
;
1010 /* record this tasks comm */
1011 tracing_record_cmdline(tsk
);
1015 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1017 * @tsk: the task with the latency
1018 * @cpu: The cpu that initiated the trace.
1020 * Flip the buffers between the @tr and the max_tr and record information
1021 * about which task was the cause of this latency.
1024 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1026 struct ring_buffer
*buf
;
1031 WARN_ON_ONCE(!irqs_disabled());
1033 if (!tr
->allocated_snapshot
) {
1034 /* Only the nop tracer should hit this when disabling */
1035 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1039 arch_spin_lock(&ftrace_max_lock
);
1041 buf
= tr
->trace_buffer
.buffer
;
1042 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1043 tr
->max_buffer
.buffer
= buf
;
1045 __update_max_tr(tr
, tsk
, cpu
);
1046 arch_spin_unlock(&ftrace_max_lock
);
1050 * update_max_tr_single - only copy one trace over, and reset the rest
1052 * @tsk - task with the latency
1053 * @cpu - the cpu of the buffer to copy.
1055 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1058 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1065 WARN_ON_ONCE(!irqs_disabled());
1066 if (!tr
->allocated_snapshot
) {
1067 /* Only the nop tracer should hit this when disabling */
1068 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1072 arch_spin_lock(&ftrace_max_lock
);
1074 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1076 if (ret
== -EBUSY
) {
1078 * We failed to swap the buffer due to a commit taking
1079 * place on this CPU. We fail to record, but we reset
1080 * the max trace buffer (no one writes directly to it)
1081 * and flag that it failed.
1083 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1084 "Failed to swap buffers due to commit in progress\n");
1087 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1089 __update_max_tr(tr
, tsk
, cpu
);
1090 arch_spin_unlock(&ftrace_max_lock
);
1092 #endif /* CONFIG_TRACER_MAX_TRACE */
1094 static void default_wait_pipe(struct trace_iterator
*iter
)
1096 /* Iterators are static, they should be filled or empty */
1097 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1100 ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
1103 #ifdef CONFIG_FTRACE_STARTUP_TEST
1104 static int run_tracer_selftest(struct tracer
*type
)
1106 struct trace_array
*tr
= &global_trace
;
1107 struct tracer
*saved_tracer
= tr
->current_trace
;
1110 if (!type
->selftest
|| tracing_selftest_disabled
)
1114 * Run a selftest on this tracer.
1115 * Here we reset the trace buffer, and set the current
1116 * tracer to be this tracer. The tracer can then run some
1117 * internal tracing to verify that everything is in order.
1118 * If we fail, we do not register this tracer.
1120 tracing_reset_online_cpus(&tr
->trace_buffer
);
1122 tr
->current_trace
= type
;
1124 #ifdef CONFIG_TRACER_MAX_TRACE
1125 if (type
->use_max_tr
) {
1126 /* If we expanded the buffers, make sure the max is expanded too */
1127 if (ring_buffer_expanded
)
1128 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1129 RING_BUFFER_ALL_CPUS
);
1130 tr
->allocated_snapshot
= true;
1134 /* the test is responsible for initializing and enabling */
1135 pr_info("Testing tracer %s: ", type
->name
);
1136 ret
= type
->selftest(type
, tr
);
1137 /* the test is responsible for resetting too */
1138 tr
->current_trace
= saved_tracer
;
1140 printk(KERN_CONT
"FAILED!\n");
1141 /* Add the warning after printing 'FAILED' */
1145 /* Only reset on passing, to avoid touching corrupted buffers */
1146 tracing_reset_online_cpus(&tr
->trace_buffer
);
1148 #ifdef CONFIG_TRACER_MAX_TRACE
1149 if (type
->use_max_tr
) {
1150 tr
->allocated_snapshot
= false;
1152 /* Shrink the max buffer again */
1153 if (ring_buffer_expanded
)
1154 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1155 RING_BUFFER_ALL_CPUS
);
1159 printk(KERN_CONT
"PASSED\n");
1163 static inline int run_tracer_selftest(struct tracer
*type
)
1167 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1170 * register_tracer - register a tracer with the ftrace system.
1171 * @type - the plugin for the tracer
1173 * Register a new plugin tracer.
1175 int register_tracer(struct tracer
*type
)
1181 pr_info("Tracer must have a name\n");
1185 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1186 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1190 mutex_lock(&trace_types_lock
);
1192 tracing_selftest_running
= true;
1194 for (t
= trace_types
; t
; t
= t
->next
) {
1195 if (strcmp(type
->name
, t
->name
) == 0) {
1197 pr_info("Tracer %s already registered\n",
1204 if (!type
->set_flag
)
1205 type
->set_flag
= &dummy_set_flag
;
1207 type
->flags
= &dummy_tracer_flags
;
1209 if (!type
->flags
->opts
)
1210 type
->flags
->opts
= dummy_tracer_opt
;
1211 if (!type
->wait_pipe
)
1212 type
->wait_pipe
= default_wait_pipe
;
1214 ret
= run_tracer_selftest(type
);
1218 type
->next
= trace_types
;
1222 tracing_selftest_running
= false;
1223 mutex_unlock(&trace_types_lock
);
1225 if (ret
|| !default_bootup_tracer
)
1228 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1231 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1232 /* Do we want this tracer to start on bootup? */
1233 tracing_set_tracer(type
->name
);
1234 default_bootup_tracer
= NULL
;
1235 /* disable other selftests, since this will break it. */
1236 tracing_selftest_disabled
= true;
1237 #ifdef CONFIG_FTRACE_STARTUP_TEST
1238 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1246 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1248 struct ring_buffer
*buffer
= buf
->buffer
;
1253 ring_buffer_record_disable(buffer
);
1255 /* Make sure all commits have finished */
1256 synchronize_sched();
1257 ring_buffer_reset_cpu(buffer
, cpu
);
1259 ring_buffer_record_enable(buffer
);
1262 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1264 struct ring_buffer
*buffer
= buf
->buffer
;
1270 ring_buffer_record_disable(buffer
);
1272 /* Make sure all commits have finished */
1273 synchronize_sched();
1275 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1277 for_each_online_cpu(cpu
)
1278 ring_buffer_reset_cpu(buffer
, cpu
);
1280 ring_buffer_record_enable(buffer
);
1283 /* Must have trace_types_lock held */
1284 void tracing_reset_all_online_cpus(void)
1286 struct trace_array
*tr
;
1288 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1289 tracing_reset_online_cpus(&tr
->trace_buffer
);
1290 #ifdef CONFIG_TRACER_MAX_TRACE
1291 tracing_reset_online_cpus(&tr
->max_buffer
);
1296 #define SAVED_CMDLINES 128
1297 #define NO_CMDLINE_MAP UINT_MAX
1298 static unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1299 static unsigned map_cmdline_to_pid
[SAVED_CMDLINES
];
1300 static char saved_cmdlines
[SAVED_CMDLINES
][TASK_COMM_LEN
];
1301 static int cmdline_idx
;
1302 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1304 /* temporary disable recording */
1305 static atomic_t trace_record_cmdline_disabled __read_mostly
;
1307 static void trace_init_cmdlines(void)
1309 memset(&map_pid_to_cmdline
, NO_CMDLINE_MAP
, sizeof(map_pid_to_cmdline
));
1310 memset(&map_cmdline_to_pid
, NO_CMDLINE_MAP
, sizeof(map_cmdline_to_pid
));
1314 int is_tracing_stopped(void)
1316 return global_trace
.stop_count
;
1320 * tracing_start - quick start of the tracer
1322 * If tracing is enabled but was stopped by tracing_stop,
1323 * this will start the tracer back up.
1325 void tracing_start(void)
1327 struct ring_buffer
*buffer
;
1328 unsigned long flags
;
1330 if (tracing_disabled
)
1333 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1334 if (--global_trace
.stop_count
) {
1335 if (global_trace
.stop_count
< 0) {
1336 /* Someone screwed up their debugging */
1338 global_trace
.stop_count
= 0;
1343 /* Prevent the buffers from switching */
1344 arch_spin_lock(&ftrace_max_lock
);
1346 buffer
= global_trace
.trace_buffer
.buffer
;
1348 ring_buffer_record_enable(buffer
);
1350 #ifdef CONFIG_TRACER_MAX_TRACE
1351 buffer
= global_trace
.max_buffer
.buffer
;
1353 ring_buffer_record_enable(buffer
);
1356 arch_spin_unlock(&ftrace_max_lock
);
1360 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1363 static void tracing_start_tr(struct trace_array
*tr
)
1365 struct ring_buffer
*buffer
;
1366 unsigned long flags
;
1368 if (tracing_disabled
)
1371 /* If global, we need to also start the max tracer */
1372 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1373 return tracing_start();
1375 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1377 if (--tr
->stop_count
) {
1378 if (tr
->stop_count
< 0) {
1379 /* Someone screwed up their debugging */
1386 buffer
= tr
->trace_buffer
.buffer
;
1388 ring_buffer_record_enable(buffer
);
1391 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1395 * tracing_stop - quick stop of the tracer
1397 * Light weight way to stop tracing. Use in conjunction with
1400 void tracing_stop(void)
1402 struct ring_buffer
*buffer
;
1403 unsigned long flags
;
1406 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1407 if (global_trace
.stop_count
++)
1410 /* Prevent the buffers from switching */
1411 arch_spin_lock(&ftrace_max_lock
);
1413 buffer
= global_trace
.trace_buffer
.buffer
;
1415 ring_buffer_record_disable(buffer
);
1417 #ifdef CONFIG_TRACER_MAX_TRACE
1418 buffer
= global_trace
.max_buffer
.buffer
;
1420 ring_buffer_record_disable(buffer
);
1423 arch_spin_unlock(&ftrace_max_lock
);
1426 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1429 static void tracing_stop_tr(struct trace_array
*tr
)
1431 struct ring_buffer
*buffer
;
1432 unsigned long flags
;
1434 /* If global, we need to also stop the max tracer */
1435 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1436 return tracing_stop();
1438 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1439 if (tr
->stop_count
++)
1442 buffer
= tr
->trace_buffer
.buffer
;
1444 ring_buffer_record_disable(buffer
);
1447 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1450 void trace_stop_cmdline_recording(void);
1452 static void trace_save_cmdline(struct task_struct
*tsk
)
1456 if (!tsk
->pid
|| unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1460 * It's not the end of the world if we don't get
1461 * the lock, but we also don't want to spin
1462 * nor do we want to disable interrupts,
1463 * so if we miss here, then better luck next time.
1465 if (!arch_spin_trylock(&trace_cmdline_lock
))
1468 idx
= map_pid_to_cmdline
[tsk
->pid
];
1469 if (idx
== NO_CMDLINE_MAP
) {
1470 idx
= (cmdline_idx
+ 1) % SAVED_CMDLINES
;
1473 * Check whether the cmdline buffer at idx has a pid
1474 * mapped. We are going to overwrite that entry so we
1475 * need to clear the map_pid_to_cmdline. Otherwise we
1476 * would read the new comm for the old pid.
1478 pid
= map_cmdline_to_pid
[idx
];
1479 if (pid
!= NO_CMDLINE_MAP
)
1480 map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1482 map_cmdline_to_pid
[idx
] = tsk
->pid
;
1483 map_pid_to_cmdline
[tsk
->pid
] = idx
;
1488 memcpy(&saved_cmdlines
[idx
], tsk
->comm
, TASK_COMM_LEN
);
1490 arch_spin_unlock(&trace_cmdline_lock
);
1493 void trace_find_cmdline(int pid
, char comm
[])
1498 strcpy(comm
, "<idle>");
1502 if (WARN_ON_ONCE(pid
< 0)) {
1503 strcpy(comm
, "<XXX>");
1507 if (pid
> PID_MAX_DEFAULT
) {
1508 strcpy(comm
, "<...>");
1513 arch_spin_lock(&trace_cmdline_lock
);
1514 map
= map_pid_to_cmdline
[pid
];
1515 if (map
!= NO_CMDLINE_MAP
)
1516 strcpy(comm
, saved_cmdlines
[map
]);
1518 strcpy(comm
, "<...>");
1520 arch_spin_unlock(&trace_cmdline_lock
);
1524 void tracing_record_cmdline(struct task_struct
*tsk
)
1526 if (atomic_read(&trace_record_cmdline_disabled
) || !tracing_is_on())
1529 if (!__this_cpu_read(trace_cmdline_save
))
1532 __this_cpu_write(trace_cmdline_save
, false);
1534 trace_save_cmdline(tsk
);
1538 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
1541 struct task_struct
*tsk
= current
;
1543 entry
->preempt_count
= pc
& 0xff;
1544 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
1546 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1547 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
1549 TRACE_FLAG_IRQS_NOSUPPORT
|
1551 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
1552 ((pc
& SOFTIRQ_MASK
) ? TRACE_FLAG_SOFTIRQ
: 0) |
1553 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
1554 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
1556 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
1558 struct ring_buffer_event
*
1559 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
1562 unsigned long flags
, int pc
)
1564 struct ring_buffer_event
*event
;
1566 event
= ring_buffer_lock_reserve(buffer
, len
);
1567 if (event
!= NULL
) {
1568 struct trace_entry
*ent
= ring_buffer_event_data(event
);
1570 tracing_generic_entry_update(ent
, flags
, pc
);
1578 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
1580 __this_cpu_write(trace_cmdline_save
, true);
1581 ring_buffer_unlock_commit(buffer
, event
);
1585 __trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1586 struct ring_buffer_event
*event
,
1587 unsigned long flags
, int pc
)
1589 __buffer_unlock_commit(buffer
, event
);
1591 ftrace_trace_stack(buffer
, flags
, 6, pc
);
1592 ftrace_trace_userstack(buffer
, flags
, pc
);
1595 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1596 struct ring_buffer_event
*event
,
1597 unsigned long flags
, int pc
)
1599 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1601 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit
);
1603 static struct ring_buffer
*temp_buffer
;
1605 struct ring_buffer_event
*
1606 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1607 struct ftrace_event_file
*ftrace_file
,
1608 int type
, unsigned long len
,
1609 unsigned long flags
, int pc
)
1611 struct ring_buffer_event
*entry
;
1613 *current_rb
= ftrace_file
->tr
->trace_buffer
.buffer
;
1614 entry
= trace_buffer_lock_reserve(*current_rb
,
1615 type
, len
, flags
, pc
);
1617 * If tracing is off, but we have triggers enabled
1618 * we still need to look at the event data. Use the temp_buffer
1619 * to store the trace event for the tigger to use. It's recusive
1620 * safe and will not be recorded anywhere.
1622 if (!entry
&& ftrace_file
->flags
& FTRACE_EVENT_FL_TRIGGER_COND
) {
1623 *current_rb
= temp_buffer
;
1624 entry
= trace_buffer_lock_reserve(*current_rb
,
1625 type
, len
, flags
, pc
);
1629 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
1631 struct ring_buffer_event
*
1632 trace_current_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1633 int type
, unsigned long len
,
1634 unsigned long flags
, int pc
)
1636 *current_rb
= global_trace
.trace_buffer
.buffer
;
1637 return trace_buffer_lock_reserve(*current_rb
,
1638 type
, len
, flags
, pc
);
1640 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve
);
1642 void trace_current_buffer_unlock_commit(struct ring_buffer
*buffer
,
1643 struct ring_buffer_event
*event
,
1644 unsigned long flags
, int pc
)
1646 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1648 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit
);
1650 void trace_buffer_unlock_commit_regs(struct ring_buffer
*buffer
,
1651 struct ring_buffer_event
*event
,
1652 unsigned long flags
, int pc
,
1653 struct pt_regs
*regs
)
1655 __buffer_unlock_commit(buffer
, event
);
1657 ftrace_trace_stack_regs(buffer
, flags
, 0, pc
, regs
);
1658 ftrace_trace_userstack(buffer
, flags
, pc
);
1660 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs
);
1662 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
1663 struct ring_buffer_event
*event
)
1665 ring_buffer_discard_commit(buffer
, event
);
1667 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit
);
1670 trace_function(struct trace_array
*tr
,
1671 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
1674 struct ftrace_event_call
*call
= &event_function
;
1675 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
1676 struct ring_buffer_event
*event
;
1677 struct ftrace_entry
*entry
;
1679 /* If we are reading the ring buffer, don't trace */
1680 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
1683 event
= trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
1687 entry
= ring_buffer_event_data(event
);
1689 entry
->parent_ip
= parent_ip
;
1691 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1692 __buffer_unlock_commit(buffer
, event
);
1695 #ifdef CONFIG_STACKTRACE
1697 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1698 struct ftrace_stack
{
1699 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
1702 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
1703 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
1705 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
1706 unsigned long flags
,
1707 int skip
, int pc
, struct pt_regs
*regs
)
1709 struct ftrace_event_call
*call
= &event_kernel_stack
;
1710 struct ring_buffer_event
*event
;
1711 struct stack_entry
*entry
;
1712 struct stack_trace trace
;
1714 int size
= FTRACE_STACK_ENTRIES
;
1716 trace
.nr_entries
= 0;
1720 * Since events can happen in NMIs there's no safe way to
1721 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1722 * or NMI comes in, it will just have to use the default
1723 * FTRACE_STACK_SIZE.
1725 preempt_disable_notrace();
1727 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
1729 * We don't need any atomic variables, just a barrier.
1730 * If an interrupt comes in, we don't care, because it would
1731 * have exited and put the counter back to what we want.
1732 * We just need a barrier to keep gcc from moving things
1736 if (use_stack
== 1) {
1737 trace
.entries
= &__get_cpu_var(ftrace_stack
).calls
[0];
1738 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
1741 save_stack_trace_regs(regs
, &trace
);
1743 save_stack_trace(&trace
);
1745 if (trace
.nr_entries
> size
)
1746 size
= trace
.nr_entries
;
1748 /* From now on, use_stack is a boolean */
1751 size
*= sizeof(unsigned long);
1753 event
= trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
1754 sizeof(*entry
) + size
, flags
, pc
);
1757 entry
= ring_buffer_event_data(event
);
1759 memset(&entry
->caller
, 0, size
);
1762 memcpy(&entry
->caller
, trace
.entries
,
1763 trace
.nr_entries
* sizeof(unsigned long));
1765 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1766 trace
.entries
= entry
->caller
;
1768 save_stack_trace_regs(regs
, &trace
);
1770 save_stack_trace(&trace
);
1773 entry
->size
= trace
.nr_entries
;
1775 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1776 __buffer_unlock_commit(buffer
, event
);
1779 /* Again, don't let gcc optimize things here */
1781 __this_cpu_dec(ftrace_stack_reserve
);
1782 preempt_enable_notrace();
1786 void ftrace_trace_stack_regs(struct ring_buffer
*buffer
, unsigned long flags
,
1787 int skip
, int pc
, struct pt_regs
*regs
)
1789 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1792 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
1795 void ftrace_trace_stack(struct ring_buffer
*buffer
, unsigned long flags
,
1798 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1801 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
1804 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
1807 __ftrace_trace_stack(tr
->trace_buffer
.buffer
, flags
, skip
, pc
, NULL
);
1811 * trace_dump_stack - record a stack back trace in the trace buffer
1812 * @skip: Number of functions to skip (helper handlers)
1814 void trace_dump_stack(int skip
)
1816 unsigned long flags
;
1818 if (tracing_disabled
|| tracing_selftest_running
)
1821 local_save_flags(flags
);
1824 * Skip 3 more, seems to get us at the caller of
1828 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
1829 flags
, skip
, preempt_count(), NULL
);
1832 static DEFINE_PER_CPU(int, user_stack_count
);
1835 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
1837 struct ftrace_event_call
*call
= &event_user_stack
;
1838 struct ring_buffer_event
*event
;
1839 struct userstack_entry
*entry
;
1840 struct stack_trace trace
;
1842 if (!(trace_flags
& TRACE_ITER_USERSTACKTRACE
))
1846 * NMIs can not handle page faults, even with fix ups.
1847 * The save user stack can (and often does) fault.
1849 if (unlikely(in_nmi()))
1853 * prevent recursion, since the user stack tracing may
1854 * trigger other kernel events.
1857 if (__this_cpu_read(user_stack_count
))
1860 __this_cpu_inc(user_stack_count
);
1862 event
= trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
1863 sizeof(*entry
), flags
, pc
);
1865 goto out_drop_count
;
1866 entry
= ring_buffer_event_data(event
);
1868 entry
->tgid
= current
->tgid
;
1869 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
1871 trace
.nr_entries
= 0;
1872 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1874 trace
.entries
= entry
->caller
;
1876 save_stack_trace_user(&trace
);
1877 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1878 __buffer_unlock_commit(buffer
, event
);
1881 __this_cpu_dec(user_stack_count
);
1887 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
1889 ftrace_trace_userstack(tr
, flags
, preempt_count());
1893 #endif /* CONFIG_STACKTRACE */
1895 /* created for use with alloc_percpu */
1896 struct trace_buffer_struct
{
1897 char buffer
[TRACE_BUF_SIZE
];
1900 static struct trace_buffer_struct
*trace_percpu_buffer
;
1901 static struct trace_buffer_struct
*trace_percpu_sirq_buffer
;
1902 static struct trace_buffer_struct
*trace_percpu_irq_buffer
;
1903 static struct trace_buffer_struct
*trace_percpu_nmi_buffer
;
1906 * The buffer used is dependent on the context. There is a per cpu
1907 * buffer for normal context, softirq contex, hard irq context and
1908 * for NMI context. Thise allows for lockless recording.
1910 * Note, if the buffers failed to be allocated, then this returns NULL
1912 static char *get_trace_buf(void)
1914 struct trace_buffer_struct
*percpu_buffer
;
1917 * If we have allocated per cpu buffers, then we do not
1918 * need to do any locking.
1921 percpu_buffer
= trace_percpu_nmi_buffer
;
1923 percpu_buffer
= trace_percpu_irq_buffer
;
1924 else if (in_softirq())
1925 percpu_buffer
= trace_percpu_sirq_buffer
;
1927 percpu_buffer
= trace_percpu_buffer
;
1932 return this_cpu_ptr(&percpu_buffer
->buffer
[0]);
1935 static int alloc_percpu_trace_buffer(void)
1937 struct trace_buffer_struct
*buffers
;
1938 struct trace_buffer_struct
*sirq_buffers
;
1939 struct trace_buffer_struct
*irq_buffers
;
1940 struct trace_buffer_struct
*nmi_buffers
;
1942 buffers
= alloc_percpu(struct trace_buffer_struct
);
1946 sirq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1950 irq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1954 nmi_buffers
= alloc_percpu(struct trace_buffer_struct
);
1958 trace_percpu_buffer
= buffers
;
1959 trace_percpu_sirq_buffer
= sirq_buffers
;
1960 trace_percpu_irq_buffer
= irq_buffers
;
1961 trace_percpu_nmi_buffer
= nmi_buffers
;
1966 free_percpu(irq_buffers
);
1968 free_percpu(sirq_buffers
);
1970 free_percpu(buffers
);
1972 WARN(1, "Could not allocate percpu trace_printk buffer");
1976 static int buffers_allocated
;
1978 void trace_printk_init_buffers(void)
1980 if (buffers_allocated
)
1983 if (alloc_percpu_trace_buffer())
1986 pr_info("ftrace: Allocated trace_printk buffers\n");
1988 /* Expand the buffers to set size */
1989 tracing_update_buffers();
1991 buffers_allocated
= 1;
1994 * trace_printk_init_buffers() can be called by modules.
1995 * If that happens, then we need to start cmdline recording
1996 * directly here. If the global_trace.buffer is already
1997 * allocated here, then this was called by module code.
1999 if (global_trace
.trace_buffer
.buffer
)
2000 tracing_start_cmdline_record();
2003 void trace_printk_start_comm(void)
2005 /* Start tracing comms if trace printk is set */
2006 if (!buffers_allocated
)
2008 tracing_start_cmdline_record();
2011 static void trace_printk_start_stop_comm(int enabled
)
2013 if (!buffers_allocated
)
2017 tracing_start_cmdline_record();
2019 tracing_stop_cmdline_record();
2023 * trace_vbprintk - write binary msg to tracing buffer
2026 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2028 struct ftrace_event_call
*call
= &event_bprint
;
2029 struct ring_buffer_event
*event
;
2030 struct ring_buffer
*buffer
;
2031 struct trace_array
*tr
= &global_trace
;
2032 struct bprint_entry
*entry
;
2033 unsigned long flags
;
2035 int len
= 0, size
, pc
;
2037 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2040 /* Don't pollute graph traces with trace_vprintk internals */
2041 pause_graph_tracing();
2043 pc
= preempt_count();
2044 preempt_disable_notrace();
2046 tbuffer
= get_trace_buf();
2052 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2054 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2057 local_save_flags(flags
);
2058 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2059 buffer
= tr
->trace_buffer
.buffer
;
2060 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2064 entry
= ring_buffer_event_data(event
);
2068 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2069 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2070 __buffer_unlock_commit(buffer
, event
);
2071 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2075 preempt_enable_notrace();
2076 unpause_graph_tracing();
2080 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2083 __trace_array_vprintk(struct ring_buffer
*buffer
,
2084 unsigned long ip
, const char *fmt
, va_list args
)
2086 struct ftrace_event_call
*call
= &event_print
;
2087 struct ring_buffer_event
*event
;
2088 int len
= 0, size
, pc
;
2089 struct print_entry
*entry
;
2090 unsigned long flags
;
2093 if (tracing_disabled
|| tracing_selftest_running
)
2096 /* Don't pollute graph traces with trace_vprintk internals */
2097 pause_graph_tracing();
2099 pc
= preempt_count();
2100 preempt_disable_notrace();
2103 tbuffer
= get_trace_buf();
2109 len
= vsnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2110 if (len
> TRACE_BUF_SIZE
)
2113 local_save_flags(flags
);
2114 size
= sizeof(*entry
) + len
+ 1;
2115 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2119 entry
= ring_buffer_event_data(event
);
2122 memcpy(&entry
->buf
, tbuffer
, len
);
2123 entry
->buf
[len
] = '\0';
2124 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2125 __buffer_unlock_commit(buffer
, event
);
2126 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2129 preempt_enable_notrace();
2130 unpause_graph_tracing();
2135 int trace_array_vprintk(struct trace_array
*tr
,
2136 unsigned long ip
, const char *fmt
, va_list args
)
2138 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
2141 int trace_array_printk(struct trace_array
*tr
,
2142 unsigned long ip
, const char *fmt
, ...)
2147 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2151 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
2156 int trace_array_printk_buf(struct ring_buffer
*buffer
,
2157 unsigned long ip
, const char *fmt
, ...)
2162 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2166 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
2171 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
2173 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
2175 EXPORT_SYMBOL_GPL(trace_vprintk
);
2177 static void trace_iterator_increment(struct trace_iterator
*iter
)
2179 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
2183 ring_buffer_read(buf_iter
, NULL
);
2186 static struct trace_entry
*
2187 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
2188 unsigned long *lost_events
)
2190 struct ring_buffer_event
*event
;
2191 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
2194 event
= ring_buffer_iter_peek(buf_iter
, ts
);
2196 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
2200 iter
->ent_size
= ring_buffer_event_length(event
);
2201 return ring_buffer_event_data(event
);
2207 static struct trace_entry
*
2208 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
2209 unsigned long *missing_events
, u64
*ent_ts
)
2211 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
2212 struct trace_entry
*ent
, *next
= NULL
;
2213 unsigned long lost_events
= 0, next_lost
= 0;
2214 int cpu_file
= iter
->cpu_file
;
2215 u64 next_ts
= 0, ts
;
2221 * If we are in a per_cpu trace file, don't bother by iterating over
2222 * all cpu and peek directly.
2224 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
2225 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
2227 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
2229 *ent_cpu
= cpu_file
;
2234 for_each_tracing_cpu(cpu
) {
2236 if (ring_buffer_empty_cpu(buffer
, cpu
))
2239 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
2242 * Pick the entry with the smallest timestamp:
2244 if (ent
&& (!next
|| ts
< next_ts
)) {
2248 next_lost
= lost_events
;
2249 next_size
= iter
->ent_size
;
2253 iter
->ent_size
= next_size
;
2256 *ent_cpu
= next_cpu
;
2262 *missing_events
= next_lost
;
2267 /* Find the next real entry, without updating the iterator itself */
2268 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
2269 int *ent_cpu
, u64
*ent_ts
)
2271 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
2274 /* Find the next real entry, and increment the iterator to the next entry */
2275 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
2277 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
2278 &iter
->lost_events
, &iter
->ts
);
2281 trace_iterator_increment(iter
);
2283 return iter
->ent
? iter
: NULL
;
2286 static void trace_consume(struct trace_iterator
*iter
)
2288 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
2289 &iter
->lost_events
);
2292 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2294 struct trace_iterator
*iter
= m
->private;
2298 WARN_ON_ONCE(iter
->leftover
);
2302 /* can't go backwards */
2307 ent
= trace_find_next_entry_inc(iter
);
2311 while (ent
&& iter
->idx
< i
)
2312 ent
= trace_find_next_entry_inc(iter
);
2319 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
2321 struct ring_buffer_event
*event
;
2322 struct ring_buffer_iter
*buf_iter
;
2323 unsigned long entries
= 0;
2326 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
2328 buf_iter
= trace_buffer_iter(iter
, cpu
);
2332 ring_buffer_iter_reset(buf_iter
);
2335 * We could have the case with the max latency tracers
2336 * that a reset never took place on a cpu. This is evident
2337 * by the timestamp being before the start of the buffer.
2339 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
2340 if (ts
>= iter
->trace_buffer
->time_start
)
2343 ring_buffer_read(buf_iter
, NULL
);
2346 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
2350 * The current tracer is copied to avoid a global locking
2353 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
2355 struct trace_iterator
*iter
= m
->private;
2356 struct trace_array
*tr
= iter
->tr
;
2357 int cpu_file
= iter
->cpu_file
;
2363 * copy the tracer to avoid using a global lock all around.
2364 * iter->trace is a copy of current_trace, the pointer to the
2365 * name may be used instead of a strcmp(), as iter->trace->name
2366 * will point to the same string as current_trace->name.
2368 mutex_lock(&trace_types_lock
);
2369 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
2370 *iter
->trace
= *tr
->current_trace
;
2371 mutex_unlock(&trace_types_lock
);
2373 #ifdef CONFIG_TRACER_MAX_TRACE
2374 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2375 return ERR_PTR(-EBUSY
);
2378 if (!iter
->snapshot
)
2379 atomic_inc(&trace_record_cmdline_disabled
);
2381 if (*pos
!= iter
->pos
) {
2386 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
2387 for_each_tracing_cpu(cpu
)
2388 tracing_iter_reset(iter
, cpu
);
2390 tracing_iter_reset(iter
, cpu_file
);
2393 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
2398 * If we overflowed the seq_file before, then we want
2399 * to just reuse the trace_seq buffer again.
2405 p
= s_next(m
, p
, &l
);
2409 trace_event_read_lock();
2410 trace_access_lock(cpu_file
);
2414 static void s_stop(struct seq_file
*m
, void *p
)
2416 struct trace_iterator
*iter
= m
->private;
2418 #ifdef CONFIG_TRACER_MAX_TRACE
2419 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2423 if (!iter
->snapshot
)
2424 atomic_dec(&trace_record_cmdline_disabled
);
2426 trace_access_unlock(iter
->cpu_file
);
2427 trace_event_read_unlock();
2431 get_total_entries(struct trace_buffer
*buf
,
2432 unsigned long *total
, unsigned long *entries
)
2434 unsigned long count
;
2440 for_each_tracing_cpu(cpu
) {
2441 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
2443 * If this buffer has skipped entries, then we hold all
2444 * entries for the trace and we need to ignore the
2445 * ones before the time stamp.
2447 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
2448 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
2449 /* total is the same as the entries */
2453 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
2458 static void print_lat_help_header(struct seq_file
*m
)
2460 seq_puts(m
, "# _------=> CPU# \n");
2461 seq_puts(m
, "# / _-----=> irqs-off \n");
2462 seq_puts(m
, "# | / _----=> need-resched \n");
2463 seq_puts(m
, "# || / _---=> hardirq/softirq \n");
2464 seq_puts(m
, "# ||| / _--=> preempt-depth \n");
2465 seq_puts(m
, "# |||| / delay \n");
2466 seq_puts(m
, "# cmd pid ||||| time | caller \n");
2467 seq_puts(m
, "# \\ / ||||| \\ | / \n");
2470 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
2472 unsigned long total
;
2473 unsigned long entries
;
2475 get_total_entries(buf
, &total
, &entries
);
2476 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2477 entries
, total
, num_online_cpus());
2481 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
)
2483 print_event_info(buf
, m
);
2484 seq_puts(m
, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2485 seq_puts(m
, "# | | | | |\n");
2488 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
)
2490 print_event_info(buf
, m
);
2491 seq_puts(m
, "# _-----=> irqs-off\n");
2492 seq_puts(m
, "# / _----=> need-resched\n");
2493 seq_puts(m
, "# | / _---=> hardirq/softirq\n");
2494 seq_puts(m
, "# || / _--=> preempt-depth\n");
2495 seq_puts(m
, "# ||| / delay\n");
2496 seq_puts(m
, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2497 seq_puts(m
, "# | | | |||| | |\n");
2501 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
2503 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2504 struct trace_buffer
*buf
= iter
->trace_buffer
;
2505 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
2506 struct tracer
*type
= iter
->trace
;
2507 unsigned long entries
;
2508 unsigned long total
;
2509 const char *name
= "preemption";
2513 get_total_entries(buf
, &total
, &entries
);
2515 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
2517 seq_puts(m
, "# -----------------------------------"
2518 "---------------------------------\n");
2519 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2520 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2521 nsecs_to_usecs(data
->saved_latency
),
2525 #if defined(CONFIG_PREEMPT_NONE)
2527 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2529 #elif defined(CONFIG_PREEMPT)
2534 /* These are reserved for later use */
2537 seq_printf(m
, " #P:%d)\n", num_online_cpus());
2541 seq_puts(m
, "# -----------------\n");
2542 seq_printf(m
, "# | task: %.16s-%d "
2543 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2544 data
->comm
, data
->pid
,
2545 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
2546 data
->policy
, data
->rt_priority
);
2547 seq_puts(m
, "# -----------------\n");
2549 if (data
->critical_start
) {
2550 seq_puts(m
, "# => started at: ");
2551 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
2552 trace_print_seq(m
, &iter
->seq
);
2553 seq_puts(m
, "\n# => ended at: ");
2554 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
2555 trace_print_seq(m
, &iter
->seq
);
2556 seq_puts(m
, "\n#\n");
2562 static void test_cpu_buff_start(struct trace_iterator
*iter
)
2564 struct trace_seq
*s
= &iter
->seq
;
2566 if (!(trace_flags
& TRACE_ITER_ANNOTATE
))
2569 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
2572 if (cpumask_test_cpu(iter
->cpu
, iter
->started
))
2575 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
2578 cpumask_set_cpu(iter
->cpu
, iter
->started
);
2580 /* Don't print started cpu buffer for the first entry of the trace */
2582 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
2586 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
2588 struct trace_seq
*s
= &iter
->seq
;
2589 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2590 struct trace_entry
*entry
;
2591 struct trace_event
*event
;
2595 test_cpu_buff_start(iter
);
2597 event
= ftrace_find_event(entry
->type
);
2599 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2600 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2601 if (!trace_print_lat_context(iter
))
2604 if (!trace_print_context(iter
))
2610 return event
->funcs
->trace(iter
, sym_flags
, event
);
2612 if (!trace_seq_printf(s
, "Unknown type %d\n", entry
->type
))
2615 return TRACE_TYPE_HANDLED
;
2617 return TRACE_TYPE_PARTIAL_LINE
;
2620 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
2622 struct trace_seq
*s
= &iter
->seq
;
2623 struct trace_entry
*entry
;
2624 struct trace_event
*event
;
2628 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2629 if (!trace_seq_printf(s
, "%d %d %llu ",
2630 entry
->pid
, iter
->cpu
, iter
->ts
))
2634 event
= ftrace_find_event(entry
->type
);
2636 return event
->funcs
->raw(iter
, 0, event
);
2638 if (!trace_seq_printf(s
, "%d ?\n", entry
->type
))
2641 return TRACE_TYPE_HANDLED
;
2643 return TRACE_TYPE_PARTIAL_LINE
;
2646 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
2648 struct trace_seq
*s
= &iter
->seq
;
2649 unsigned char newline
= '\n';
2650 struct trace_entry
*entry
;
2651 struct trace_event
*event
;
2655 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2656 SEQ_PUT_HEX_FIELD_RET(s
, entry
->pid
);
2657 SEQ_PUT_HEX_FIELD_RET(s
, iter
->cpu
);
2658 SEQ_PUT_HEX_FIELD_RET(s
, iter
->ts
);
2661 event
= ftrace_find_event(entry
->type
);
2663 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
2664 if (ret
!= TRACE_TYPE_HANDLED
)
2668 SEQ_PUT_FIELD_RET(s
, newline
);
2670 return TRACE_TYPE_HANDLED
;
2673 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
2675 struct trace_seq
*s
= &iter
->seq
;
2676 struct trace_entry
*entry
;
2677 struct trace_event
*event
;
2681 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2682 SEQ_PUT_FIELD_RET(s
, entry
->pid
);
2683 SEQ_PUT_FIELD_RET(s
, iter
->cpu
);
2684 SEQ_PUT_FIELD_RET(s
, iter
->ts
);
2687 event
= ftrace_find_event(entry
->type
);
2688 return event
? event
->funcs
->binary(iter
, 0, event
) :
2692 int trace_empty(struct trace_iterator
*iter
)
2694 struct ring_buffer_iter
*buf_iter
;
2697 /* If we are looking at one CPU buffer, only check that one */
2698 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
2699 cpu
= iter
->cpu_file
;
2700 buf_iter
= trace_buffer_iter(iter
, cpu
);
2702 if (!ring_buffer_iter_empty(buf_iter
))
2705 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2711 for_each_tracing_cpu(cpu
) {
2712 buf_iter
= trace_buffer_iter(iter
, cpu
);
2714 if (!ring_buffer_iter_empty(buf_iter
))
2717 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2725 /* Called with trace_event_read_lock() held. */
2726 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
2728 enum print_line_t ret
;
2730 if (iter
->lost_events
&&
2731 !trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
2732 iter
->cpu
, iter
->lost_events
))
2733 return TRACE_TYPE_PARTIAL_LINE
;
2735 if (iter
->trace
&& iter
->trace
->print_line
) {
2736 ret
= iter
->trace
->print_line(iter
);
2737 if (ret
!= TRACE_TYPE_UNHANDLED
)
2741 if (iter
->ent
->type
== TRACE_BPUTS
&&
2742 trace_flags
& TRACE_ITER_PRINTK
&&
2743 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2744 return trace_print_bputs_msg_only(iter
);
2746 if (iter
->ent
->type
== TRACE_BPRINT
&&
2747 trace_flags
& TRACE_ITER_PRINTK
&&
2748 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2749 return trace_print_bprintk_msg_only(iter
);
2751 if (iter
->ent
->type
== TRACE_PRINT
&&
2752 trace_flags
& TRACE_ITER_PRINTK
&&
2753 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2754 return trace_print_printk_msg_only(iter
);
2756 if (trace_flags
& TRACE_ITER_BIN
)
2757 return print_bin_fmt(iter
);
2759 if (trace_flags
& TRACE_ITER_HEX
)
2760 return print_hex_fmt(iter
);
2762 if (trace_flags
& TRACE_ITER_RAW
)
2763 return print_raw_fmt(iter
);
2765 return print_trace_fmt(iter
);
2768 void trace_latency_header(struct seq_file
*m
)
2770 struct trace_iterator
*iter
= m
->private;
2772 /* print nothing if the buffers are empty */
2773 if (trace_empty(iter
))
2776 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2777 print_trace_header(m
, iter
);
2779 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2780 print_lat_help_header(m
);
2783 void trace_default_header(struct seq_file
*m
)
2785 struct trace_iterator
*iter
= m
->private;
2787 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
2790 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2791 /* print nothing if the buffers are empty */
2792 if (trace_empty(iter
))
2794 print_trace_header(m
, iter
);
2795 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2796 print_lat_help_header(m
);
2798 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
2799 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
2800 print_func_help_header_irq(iter
->trace_buffer
, m
);
2802 print_func_help_header(iter
->trace_buffer
, m
);
2807 static void test_ftrace_alive(struct seq_file
*m
)
2809 if (!ftrace_is_dead())
2811 seq_printf(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2812 seq_printf(m
, "# MAY BE MISSING FUNCTION EVENTS\n");
2815 #ifdef CONFIG_TRACER_MAX_TRACE
2816 static void show_snapshot_main_help(struct seq_file
*m
)
2818 seq_printf(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2819 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2820 seq_printf(m
, "# Takes a snapshot of the main buffer.\n");
2821 seq_printf(m
, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2822 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2823 seq_printf(m
, "# is not a '0' or '1')\n");
2826 static void show_snapshot_percpu_help(struct seq_file
*m
)
2828 seq_printf(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2829 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2830 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2831 seq_printf(m
, "# Takes a snapshot of the main buffer for this cpu.\n");
2833 seq_printf(m
, "# echo 1 > snapshot : Not supported with this kernel.\n");
2834 seq_printf(m
, "# Must use main snapshot file to allocate.\n");
2836 seq_printf(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2837 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2838 seq_printf(m
, "# is not a '0' or '1')\n");
2841 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
2843 if (iter
->tr
->allocated_snapshot
)
2844 seq_printf(m
, "#\n# * Snapshot is allocated *\n#\n");
2846 seq_printf(m
, "#\n# * Snapshot is freed *\n#\n");
2848 seq_printf(m
, "# Snapshot commands:\n");
2849 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
2850 show_snapshot_main_help(m
);
2852 show_snapshot_percpu_help(m
);
2855 /* Should never be called */
2856 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
2859 static int s_show(struct seq_file
*m
, void *v
)
2861 struct trace_iterator
*iter
= v
;
2864 if (iter
->ent
== NULL
) {
2866 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
2868 test_ftrace_alive(m
);
2870 if (iter
->snapshot
&& trace_empty(iter
))
2871 print_snapshot_help(m
, iter
);
2872 else if (iter
->trace
&& iter
->trace
->print_header
)
2873 iter
->trace
->print_header(m
);
2875 trace_default_header(m
);
2877 } else if (iter
->leftover
) {
2879 * If we filled the seq_file buffer earlier, we
2880 * want to just show it now.
2882 ret
= trace_print_seq(m
, &iter
->seq
);
2884 /* ret should this time be zero, but you never know */
2885 iter
->leftover
= ret
;
2888 print_trace_line(iter
);
2889 ret
= trace_print_seq(m
, &iter
->seq
);
2891 * If we overflow the seq_file buffer, then it will
2892 * ask us for this data again at start up.
2894 * ret is 0 if seq_file write succeeded.
2897 iter
->leftover
= ret
;
2904 * Should be used after trace_array_get(), trace_types_lock
2905 * ensures that i_cdev was already initialized.
2907 static inline int tracing_get_cpu(struct inode
*inode
)
2909 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
2910 return (long)inode
->i_cdev
- 1;
2911 return RING_BUFFER_ALL_CPUS
;
2914 static const struct seq_operations tracer_seq_ops
= {
2921 static struct trace_iterator
*
2922 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
2924 struct trace_array
*tr
= inode
->i_private
;
2925 struct trace_iterator
*iter
;
2928 if (tracing_disabled
)
2929 return ERR_PTR(-ENODEV
);
2931 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
2933 return ERR_PTR(-ENOMEM
);
2935 iter
->buffer_iter
= kzalloc(sizeof(*iter
->buffer_iter
) * num_possible_cpus(),
2937 if (!iter
->buffer_iter
)
2941 * We make a copy of the current tracer to avoid concurrent
2942 * changes on it while we are reading.
2944 mutex_lock(&trace_types_lock
);
2945 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
2949 *iter
->trace
= *tr
->current_trace
;
2951 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
2956 #ifdef CONFIG_TRACER_MAX_TRACE
2957 /* Currently only the top directory has a snapshot */
2958 if (tr
->current_trace
->print_max
|| snapshot
)
2959 iter
->trace_buffer
= &tr
->max_buffer
;
2962 iter
->trace_buffer
= &tr
->trace_buffer
;
2963 iter
->snapshot
= snapshot
;
2965 iter
->cpu_file
= tracing_get_cpu(inode
);
2966 mutex_init(&iter
->mutex
);
2968 /* Notify the tracer early; before we stop tracing. */
2969 if (iter
->trace
&& iter
->trace
->open
)
2970 iter
->trace
->open(iter
);
2972 /* Annotate start of buffers if we had overruns */
2973 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
2974 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
2976 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2977 if (trace_clocks
[tr
->clock_id
].in_ns
)
2978 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
2980 /* stop the trace while dumping if we are not opening "snapshot" */
2981 if (!iter
->snapshot
)
2982 tracing_stop_tr(tr
);
2984 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
2985 for_each_tracing_cpu(cpu
) {
2986 iter
->buffer_iter
[cpu
] =
2987 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
2989 ring_buffer_read_prepare_sync();
2990 for_each_tracing_cpu(cpu
) {
2991 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
2992 tracing_iter_reset(iter
, cpu
);
2995 cpu
= iter
->cpu_file
;
2996 iter
->buffer_iter
[cpu
] =
2997 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
2998 ring_buffer_read_prepare_sync();
2999 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3000 tracing_iter_reset(iter
, cpu
);
3003 mutex_unlock(&trace_types_lock
);
3008 mutex_unlock(&trace_types_lock
);
3010 kfree(iter
->buffer_iter
);
3012 seq_release_private(inode
, file
);
3013 return ERR_PTR(-ENOMEM
);
3016 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3018 if (tracing_disabled
)
3021 filp
->private_data
= inode
->i_private
;
3025 bool tracing_is_disabled(void)
3027 return (tracing_disabled
) ? true: false;
3031 * Open and update trace_array ref count.
3032 * Must have the current trace_array passed to it.
3034 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3036 struct trace_array
*tr
= inode
->i_private
;
3038 if (tracing_disabled
)
3041 if (trace_array_get(tr
) < 0)
3044 filp
->private_data
= inode
->i_private
;
3049 static int tracing_release(struct inode
*inode
, struct file
*file
)
3051 struct trace_array
*tr
= inode
->i_private
;
3052 struct seq_file
*m
= file
->private_data
;
3053 struct trace_iterator
*iter
;
3056 if (!(file
->f_mode
& FMODE_READ
)) {
3057 trace_array_put(tr
);
3061 /* Writes do not use seq_file */
3063 mutex_lock(&trace_types_lock
);
3065 for_each_tracing_cpu(cpu
) {
3066 if (iter
->buffer_iter
[cpu
])
3067 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3070 if (iter
->trace
&& iter
->trace
->close
)
3071 iter
->trace
->close(iter
);
3073 if (!iter
->snapshot
)
3074 /* reenable tracing if it was previously enabled */
3075 tracing_start_tr(tr
);
3077 __trace_array_put(tr
);
3079 mutex_unlock(&trace_types_lock
);
3081 mutex_destroy(&iter
->mutex
);
3082 free_cpumask_var(iter
->started
);
3084 kfree(iter
->buffer_iter
);
3085 seq_release_private(inode
, file
);
3090 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3092 struct trace_array
*tr
= inode
->i_private
;
3094 trace_array_put(tr
);
3098 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
3100 struct trace_array
*tr
= inode
->i_private
;
3102 trace_array_put(tr
);
3104 return single_release(inode
, file
);
3107 static int tracing_open(struct inode
*inode
, struct file
*file
)
3109 struct trace_array
*tr
= inode
->i_private
;
3110 struct trace_iterator
*iter
;
3113 if (trace_array_get(tr
) < 0)
3116 /* If this file was open for write, then erase contents */
3117 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
3118 int cpu
= tracing_get_cpu(inode
);
3120 if (cpu
== RING_BUFFER_ALL_CPUS
)
3121 tracing_reset_online_cpus(&tr
->trace_buffer
);
3123 tracing_reset(&tr
->trace_buffer
, cpu
);
3126 if (file
->f_mode
& FMODE_READ
) {
3127 iter
= __tracing_open(inode
, file
, false);
3129 ret
= PTR_ERR(iter
);
3130 else if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
3131 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
3135 trace_array_put(tr
);
3141 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3143 struct tracer
*t
= v
;
3153 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3158 mutex_lock(&trace_types_lock
);
3159 for (t
= trace_types
; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
3165 static void t_stop(struct seq_file
*m
, void *p
)
3167 mutex_unlock(&trace_types_lock
);
3170 static int t_show(struct seq_file
*m
, void *v
)
3172 struct tracer
*t
= v
;
3177 seq_printf(m
, "%s", t
->name
);
3186 static const struct seq_operations show_traces_seq_ops
= {
3193 static int show_traces_open(struct inode
*inode
, struct file
*file
)
3195 if (tracing_disabled
)
3198 return seq_open(file
, &show_traces_seq_ops
);
3202 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
3203 size_t count
, loff_t
*ppos
)
3208 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
3212 if (file
->f_mode
& FMODE_READ
)
3213 ret
= seq_lseek(file
, offset
, whence
);
3215 file
->f_pos
= ret
= 0;
3220 static const struct file_operations tracing_fops
= {
3221 .open
= tracing_open
,
3223 .write
= tracing_write_stub
,
3224 .llseek
= tracing_lseek
,
3225 .release
= tracing_release
,
3228 static const struct file_operations show_traces_fops
= {
3229 .open
= show_traces_open
,
3231 .release
= seq_release
,
3232 .llseek
= seq_lseek
,
3236 * The tracer itself will not take this lock, but still we want
3237 * to provide a consistent cpumask to user-space:
3239 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
3242 * Temporary storage for the character representation of the
3243 * CPU bitmask (and one more byte for the newline):
3245 static char mask_str
[NR_CPUS
+ 1];
3248 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
3249 size_t count
, loff_t
*ppos
)
3251 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3254 mutex_lock(&tracing_cpumask_update_lock
);
3256 len
= cpumask_scnprintf(mask_str
, count
, tr
->tracing_cpumask
);
3257 if (count
- len
< 2) {
3261 len
+= sprintf(mask_str
+ len
, "\n");
3262 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
3265 mutex_unlock(&tracing_cpumask_update_lock
);
3271 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
3272 size_t count
, loff_t
*ppos
)
3274 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3275 cpumask_var_t tracing_cpumask_new
;
3278 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
3281 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
3285 mutex_lock(&tracing_cpumask_update_lock
);
3287 local_irq_disable();
3288 arch_spin_lock(&ftrace_max_lock
);
3289 for_each_tracing_cpu(cpu
) {
3291 * Increase/decrease the disabled counter if we are
3292 * about to flip a bit in the cpumask:
3294 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3295 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3296 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3297 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3299 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3300 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3301 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3302 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3305 arch_spin_unlock(&ftrace_max_lock
);
3308 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
3310 mutex_unlock(&tracing_cpumask_update_lock
);
3311 free_cpumask_var(tracing_cpumask_new
);
3316 free_cpumask_var(tracing_cpumask_new
);
3321 static const struct file_operations tracing_cpumask_fops
= {
3322 .open
= tracing_open_generic_tr
,
3323 .read
= tracing_cpumask_read
,
3324 .write
= tracing_cpumask_write
,
3325 .release
= tracing_release_generic_tr
,
3326 .llseek
= generic_file_llseek
,
3329 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
3331 struct tracer_opt
*trace_opts
;
3332 struct trace_array
*tr
= m
->private;
3336 mutex_lock(&trace_types_lock
);
3337 tracer_flags
= tr
->current_trace
->flags
->val
;
3338 trace_opts
= tr
->current_trace
->flags
->opts
;
3340 for (i
= 0; trace_options
[i
]; i
++) {
3341 if (trace_flags
& (1 << i
))
3342 seq_printf(m
, "%s\n", trace_options
[i
]);
3344 seq_printf(m
, "no%s\n", trace_options
[i
]);
3347 for (i
= 0; trace_opts
[i
].name
; i
++) {
3348 if (tracer_flags
& trace_opts
[i
].bit
)
3349 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
3351 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
3353 mutex_unlock(&trace_types_lock
);
3358 static int __set_tracer_option(struct tracer
*trace
,
3359 struct tracer_flags
*tracer_flags
,
3360 struct tracer_opt
*opts
, int neg
)
3364 ret
= trace
->set_flag(tracer_flags
->val
, opts
->bit
, !neg
);
3369 tracer_flags
->val
&= ~opts
->bit
;
3371 tracer_flags
->val
|= opts
->bit
;
3375 /* Try to assign a tracer specific option */
3376 static int set_tracer_option(struct tracer
*trace
, char *cmp
, int neg
)
3378 struct tracer_flags
*tracer_flags
= trace
->flags
;
3379 struct tracer_opt
*opts
= NULL
;
3382 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
3383 opts
= &tracer_flags
->opts
[i
];
3385 if (strcmp(cmp
, opts
->name
) == 0)
3386 return __set_tracer_option(trace
, trace
->flags
,
3393 /* Some tracers require overwrite to stay enabled */
3394 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
3396 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
3402 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
3404 /* do nothing if flag is already set */
3405 if (!!(trace_flags
& mask
) == !!enabled
)
3408 /* Give the tracer a chance to approve the change */
3409 if (tr
->current_trace
->flag_changed
)
3410 if (tr
->current_trace
->flag_changed(tr
->current_trace
, mask
, !!enabled
))
3414 trace_flags
|= mask
;
3416 trace_flags
&= ~mask
;
3418 if (mask
== TRACE_ITER_RECORD_CMD
)
3419 trace_event_enable_cmd_record(enabled
);
3421 if (mask
== TRACE_ITER_OVERWRITE
) {
3422 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
3423 #ifdef CONFIG_TRACER_MAX_TRACE
3424 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
3428 if (mask
== TRACE_ITER_PRINTK
)
3429 trace_printk_start_stop_comm(enabled
);
3434 static int trace_set_options(struct trace_array
*tr
, char *option
)
3441 cmp
= strstrip(option
);
3443 if (strncmp(cmp
, "no", 2) == 0) {
3448 mutex_lock(&trace_types_lock
);
3450 for (i
= 0; trace_options
[i
]; i
++) {
3451 if (strcmp(cmp
, trace_options
[i
]) == 0) {
3452 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
3457 /* If no option could be set, test the specific tracer options */
3458 if (!trace_options
[i
])
3459 ret
= set_tracer_option(tr
->current_trace
, cmp
, neg
);
3461 mutex_unlock(&trace_types_lock
);
3467 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
3468 size_t cnt
, loff_t
*ppos
)
3470 struct seq_file
*m
= filp
->private_data
;
3471 struct trace_array
*tr
= m
->private;
3475 if (cnt
>= sizeof(buf
))
3478 if (copy_from_user(&buf
, ubuf
, cnt
))
3483 ret
= trace_set_options(tr
, buf
);
3492 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
3494 struct trace_array
*tr
= inode
->i_private
;
3497 if (tracing_disabled
)
3500 if (trace_array_get(tr
) < 0)
3503 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
3505 trace_array_put(tr
);
3510 static const struct file_operations tracing_iter_fops
= {
3511 .open
= tracing_trace_options_open
,
3513 .llseek
= seq_lseek
,
3514 .release
= tracing_single_release_tr
,
3515 .write
= tracing_trace_options_write
,
3518 static const char readme_msg
[] =
3519 "tracing mini-HOWTO:\n\n"
3520 "# echo 0 > tracing_on : quick way to disable tracing\n"
3521 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3522 " Important files:\n"
3523 " trace\t\t\t- The static contents of the buffer\n"
3524 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3525 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3526 " current_tracer\t- function and latency tracers\n"
3527 " available_tracers\t- list of configured tracers for current_tracer\n"
3528 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3529 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3530 " trace_clock\t\t-change the clock used to order events\n"
3531 " local: Per cpu clock but may not be synced across CPUs\n"
3532 " global: Synced across CPUs but slows tracing down.\n"
3533 " counter: Not a clock, but just an increment\n"
3534 " uptime: Jiffy counter from time of boot\n"
3535 " perf: Same clock that perf events use\n"
3536 #ifdef CONFIG_X86_64
3537 " x86-tsc: TSC cycle counter\n"
3539 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3540 " tracing_cpumask\t- Limit which CPUs to trace\n"
3541 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3542 "\t\t\t Remove sub-buffer with rmdir\n"
3543 " trace_options\t\t- Set format or modify how tracing happens\n"
3544 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3545 "\t\t\t option name\n"
3546 #ifdef CONFIG_DYNAMIC_FTRACE
3547 "\n available_filter_functions - list of functions that can be filtered on\n"
3548 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3549 "\t\t\t functions\n"
3550 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3551 "\t modules: Can select a group via module\n"
3552 "\t Format: :mod:<module-name>\n"
3553 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3554 "\t triggers: a command to perform when function is hit\n"
3555 "\t Format: <function>:<trigger>[:count]\n"
3556 "\t trigger: traceon, traceoff\n"
3557 "\t\t enable_event:<system>:<event>\n"
3558 "\t\t disable_event:<system>:<event>\n"
3559 #ifdef CONFIG_STACKTRACE
3562 #ifdef CONFIG_TRACER_SNAPSHOT
3565 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3566 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3567 "\t The first one will disable tracing every time do_fault is hit\n"
3568 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3569 "\t The first time do trap is hit and it disables tracing, the\n"
3570 "\t counter will decrement to 2. If tracing is already disabled,\n"
3571 "\t the counter will not decrement. It only decrements when the\n"
3572 "\t trigger did work\n"
3573 "\t To remove trigger without count:\n"
3574 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3575 "\t To remove trigger with a count:\n"
3576 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3577 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3578 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3579 "\t modules: Can select a group via module command :mod:\n"
3580 "\t Does not accept triggers\n"
3581 #endif /* CONFIG_DYNAMIC_FTRACE */
3582 #ifdef CONFIG_FUNCTION_TRACER
3583 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3586 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3587 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3588 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3590 #ifdef CONFIG_TRACER_SNAPSHOT
3591 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3592 "\t\t\t snapshot buffer. Read the contents for more\n"
3593 "\t\t\t information\n"
3595 #ifdef CONFIG_STACK_TRACER
3596 " stack_trace\t\t- Shows the max stack trace when active\n"
3597 " stack_max_size\t- Shows current max stack size that was traced\n"
3598 "\t\t\t Write into this file to reset the max size (trigger a\n"
3599 "\t\t\t new trace)\n"
3600 #ifdef CONFIG_DYNAMIC_FTRACE
3601 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3604 #endif /* CONFIG_STACK_TRACER */
3605 " events/\t\t- Directory containing all trace event subsystems:\n"
3606 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3607 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3608 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3610 " filter\t\t- If set, only events passing filter are traced\n"
3611 " events/<system>/<event>/\t- Directory containing control files for\n"
3613 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3614 " filter\t\t- If set, only events passing filter are traced\n"
3615 " trigger\t\t- If set, a command to perform when event is hit\n"
3616 "\t Format: <trigger>[:count][if <filter>]\n"
3617 "\t trigger: traceon, traceoff\n"
3618 "\t enable_event:<system>:<event>\n"
3619 "\t disable_event:<system>:<event>\n"
3620 #ifdef CONFIG_STACKTRACE
3623 #ifdef CONFIG_TRACER_SNAPSHOT
3626 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3627 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3628 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3629 "\t events/block/block_unplug/trigger\n"
3630 "\t The first disables tracing every time block_unplug is hit.\n"
3631 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3632 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3633 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3634 "\t Like function triggers, the counter is only decremented if it\n"
3635 "\t enabled or disabled tracing.\n"
3636 "\t To remove a trigger without a count:\n"
3637 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3638 "\t To remove a trigger with a count:\n"
3639 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3640 "\t Filters can be ignored when removing a trigger.\n"
3644 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
3645 size_t cnt
, loff_t
*ppos
)
3647 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
3648 readme_msg
, strlen(readme_msg
));
3651 static const struct file_operations tracing_readme_fops
= {
3652 .open
= tracing_open_generic
,
3653 .read
= tracing_readme_read
,
3654 .llseek
= generic_file_llseek
,
3658 tracing_saved_cmdlines_read(struct file
*file
, char __user
*ubuf
,
3659 size_t cnt
, loff_t
*ppos
)
3668 file_buf
= kmalloc(SAVED_CMDLINES
*(16+TASK_COMM_LEN
), GFP_KERNEL
);
3672 buf_comm
= kmalloc(TASK_COMM_LEN
, GFP_KERNEL
);
3680 for (i
= 0; i
< SAVED_CMDLINES
; i
++) {
3683 pid
= map_cmdline_to_pid
[i
];
3684 if (pid
== -1 || pid
== NO_CMDLINE_MAP
)
3687 trace_find_cmdline(pid
, buf_comm
);
3688 r
= sprintf(buf
, "%d %s\n", pid
, buf_comm
);
3693 len
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
3702 static const struct file_operations tracing_saved_cmdlines_fops
= {
3703 .open
= tracing_open_generic
,
3704 .read
= tracing_saved_cmdlines_read
,
3705 .llseek
= generic_file_llseek
,
3709 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
3710 size_t cnt
, loff_t
*ppos
)
3712 struct trace_array
*tr
= filp
->private_data
;
3713 char buf
[MAX_TRACER_SIZE
+2];
3716 mutex_lock(&trace_types_lock
);
3717 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
3718 mutex_unlock(&trace_types_lock
);
3720 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3723 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
3725 tracing_reset_online_cpus(&tr
->trace_buffer
);
3729 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
3733 for_each_tracing_cpu(cpu
)
3734 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
3737 #ifdef CONFIG_TRACER_MAX_TRACE
3738 /* resize @tr's buffer to the size of @size_tr's entries */
3739 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
3740 struct trace_buffer
*size_buf
, int cpu_id
)
3744 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
3745 for_each_tracing_cpu(cpu
) {
3746 ret
= ring_buffer_resize(trace_buf
->buffer
,
3747 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
3750 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
3751 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
3754 ret
= ring_buffer_resize(trace_buf
->buffer
,
3755 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
3757 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
3758 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
3763 #endif /* CONFIG_TRACER_MAX_TRACE */
3765 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
3766 unsigned long size
, int cpu
)
3771 * If kernel or user changes the size of the ring buffer
3772 * we use the size that was given, and we can forget about
3773 * expanding it later.
3775 ring_buffer_expanded
= true;
3777 /* May be called before buffers are initialized */
3778 if (!tr
->trace_buffer
.buffer
)
3781 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
3785 #ifdef CONFIG_TRACER_MAX_TRACE
3786 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
3787 !tr
->current_trace
->use_max_tr
)
3790 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
3792 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
3793 &tr
->trace_buffer
, cpu
);
3796 * AARGH! We are left with different
3797 * size max buffer!!!!
3798 * The max buffer is our "snapshot" buffer.
3799 * When a tracer needs a snapshot (one of the
3800 * latency tracers), it swaps the max buffer
3801 * with the saved snap shot. We succeeded to
3802 * update the size of the main buffer, but failed to
3803 * update the size of the max buffer. But when we tried
3804 * to reset the main buffer to the original size, we
3805 * failed there too. This is very unlikely to
3806 * happen, but if it does, warn and kill all
3810 tracing_disabled
= 1;
3815 if (cpu
== RING_BUFFER_ALL_CPUS
)
3816 set_buffer_entries(&tr
->max_buffer
, size
);
3818 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
3821 #endif /* CONFIG_TRACER_MAX_TRACE */
3823 if (cpu
== RING_BUFFER_ALL_CPUS
)
3824 set_buffer_entries(&tr
->trace_buffer
, size
);
3826 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
3831 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
3832 unsigned long size
, int cpu_id
)
3836 mutex_lock(&trace_types_lock
);
3838 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
3839 /* make sure, this cpu is enabled in the mask */
3840 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
3846 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
3851 mutex_unlock(&trace_types_lock
);
3858 * tracing_update_buffers - used by tracing facility to expand ring buffers
3860 * To save on memory when the tracing is never used on a system with it
3861 * configured in. The ring buffers are set to a minimum size. But once
3862 * a user starts to use the tracing facility, then they need to grow
3863 * to their default size.
3865 * This function is to be called when a tracer is about to be used.
3867 int tracing_update_buffers(void)
3871 mutex_lock(&trace_types_lock
);
3872 if (!ring_buffer_expanded
)
3873 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
3874 RING_BUFFER_ALL_CPUS
);
3875 mutex_unlock(&trace_types_lock
);
3880 struct trace_option_dentry
;
3882 static struct trace_option_dentry
*
3883 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
3886 destroy_trace_option_files(struct trace_option_dentry
*topts
);
3888 static int tracing_set_tracer(const char *buf
)
3890 static struct trace_option_dentry
*topts
;
3891 struct trace_array
*tr
= &global_trace
;
3893 #ifdef CONFIG_TRACER_MAX_TRACE
3898 mutex_lock(&trace_types_lock
);
3900 if (!ring_buffer_expanded
) {
3901 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
3902 RING_BUFFER_ALL_CPUS
);
3908 for (t
= trace_types
; t
; t
= t
->next
) {
3909 if (strcmp(t
->name
, buf
) == 0)
3916 if (t
== tr
->current_trace
)
3919 trace_branch_disable();
3921 tr
->current_trace
->enabled
= false;
3923 if (tr
->current_trace
->reset
)
3924 tr
->current_trace
->reset(tr
);
3926 /* Current trace needs to be nop_trace before synchronize_sched */
3927 tr
->current_trace
= &nop_trace
;
3929 #ifdef CONFIG_TRACER_MAX_TRACE
3930 had_max_tr
= tr
->allocated_snapshot
;
3932 if (had_max_tr
&& !t
->use_max_tr
) {
3934 * We need to make sure that the update_max_tr sees that
3935 * current_trace changed to nop_trace to keep it from
3936 * swapping the buffers after we resize it.
3937 * The update_max_tr is called from interrupts disabled
3938 * so a synchronized_sched() is sufficient.
3940 synchronize_sched();
3944 destroy_trace_option_files(topts
);
3946 topts
= create_trace_option_files(tr
, t
);
3948 #ifdef CONFIG_TRACER_MAX_TRACE
3949 if (t
->use_max_tr
&& !had_max_tr
) {
3950 ret
= alloc_snapshot(tr
);
3957 ret
= tracer_init(t
, tr
);
3962 tr
->current_trace
= t
;
3963 tr
->current_trace
->enabled
= true;
3964 trace_branch_enable(tr
);
3966 mutex_unlock(&trace_types_lock
);
3972 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
3973 size_t cnt
, loff_t
*ppos
)
3975 char buf
[MAX_TRACER_SIZE
+1];
3982 if (cnt
> MAX_TRACER_SIZE
)
3983 cnt
= MAX_TRACER_SIZE
;
3985 if (copy_from_user(&buf
, ubuf
, cnt
))
3990 /* strip ending whitespace. */
3991 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
3994 err
= tracing_set_tracer(buf
);
4004 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
4005 size_t cnt
, loff_t
*ppos
)
4007 unsigned long *ptr
= filp
->private_data
;
4011 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
4012 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
4013 if (r
> sizeof(buf
))
4015 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4019 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
4020 size_t cnt
, loff_t
*ppos
)
4022 unsigned long *ptr
= filp
->private_data
;
4026 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4035 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
4037 struct trace_array
*tr
= inode
->i_private
;
4038 struct trace_iterator
*iter
;
4041 if (tracing_disabled
)
4044 if (trace_array_get(tr
) < 0)
4047 mutex_lock(&trace_types_lock
);
4049 /* create a buffer to store the information to pass to userspace */
4050 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4053 __trace_array_put(tr
);
4058 * We make a copy of the current tracer to avoid concurrent
4059 * changes on it while we are reading.
4061 iter
->trace
= kmalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
4066 *iter
->trace
= *tr
->current_trace
;
4068 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
4073 /* trace pipe does not show start of buffer */
4074 cpumask_setall(iter
->started
);
4076 if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
4077 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4079 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4080 if (trace_clocks
[tr
->clock_id
].in_ns
)
4081 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
4084 iter
->trace_buffer
= &tr
->trace_buffer
;
4085 iter
->cpu_file
= tracing_get_cpu(inode
);
4086 mutex_init(&iter
->mutex
);
4087 filp
->private_data
= iter
;
4089 if (iter
->trace
->pipe_open
)
4090 iter
->trace
->pipe_open(iter
);
4092 nonseekable_open(inode
, filp
);
4094 mutex_unlock(&trace_types_lock
);
4100 __trace_array_put(tr
);
4101 mutex_unlock(&trace_types_lock
);
4105 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
4107 struct trace_iterator
*iter
= file
->private_data
;
4108 struct trace_array
*tr
= inode
->i_private
;
4110 mutex_lock(&trace_types_lock
);
4112 if (iter
->trace
->pipe_close
)
4113 iter
->trace
->pipe_close(iter
);
4115 mutex_unlock(&trace_types_lock
);
4117 free_cpumask_var(iter
->started
);
4118 mutex_destroy(&iter
->mutex
);
4122 trace_array_put(tr
);
4128 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
4130 /* Iterators are static, they should be filled or empty */
4131 if (trace_buffer_iter(iter
, iter
->cpu_file
))
4132 return POLLIN
| POLLRDNORM
;
4134 if (trace_flags
& TRACE_ITER_BLOCK
)
4136 * Always select as readable when in blocking mode
4138 return POLLIN
| POLLRDNORM
;
4140 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
4145 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
4147 struct trace_iterator
*iter
= filp
->private_data
;
4149 return trace_poll(iter
, filp
, poll_table
);
4153 * This is a make-shift waitqueue.
4154 * A tracer might use this callback on some rare cases:
4156 * 1) the current tracer might hold the runqueue lock when it wakes up
4157 * a reader, hence a deadlock (sched, function, and function graph tracers)
4158 * 2) the function tracers, trace all functions, we don't want
4159 * the overhead of calling wake_up and friends
4160 * (and tracing them too)
4162 * Anyway, this is really very primitive wakeup.
4164 void poll_wait_pipe(struct trace_iterator
*iter
)
4166 set_current_state(TASK_INTERRUPTIBLE
);
4167 /* sleep for 100 msecs, and try again. */
4168 schedule_timeout(HZ
/ 10);
4171 /* Must be called with trace_types_lock mutex held. */
4172 static int tracing_wait_pipe(struct file
*filp
)
4174 struct trace_iterator
*iter
= filp
->private_data
;
4176 while (trace_empty(iter
)) {
4178 if ((filp
->f_flags
& O_NONBLOCK
)) {
4182 mutex_unlock(&iter
->mutex
);
4184 iter
->trace
->wait_pipe(iter
);
4186 mutex_lock(&iter
->mutex
);
4188 if (signal_pending(current
))
4192 * We block until we read something and tracing is disabled.
4193 * We still block if tracing is disabled, but we have never
4194 * read anything. This allows a user to cat this file, and
4195 * then enable tracing. But after we have read something,
4196 * we give an EOF when tracing is again disabled.
4198 * iter->pos will be 0 if we haven't read anything.
4200 if (!tracing_is_on() && iter
->pos
)
4211 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
4212 size_t cnt
, loff_t
*ppos
)
4214 struct trace_iterator
*iter
= filp
->private_data
;
4215 struct trace_array
*tr
= iter
->tr
;
4218 /* return any leftover data */
4219 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4223 trace_seq_init(&iter
->seq
);
4225 /* copy the tracer to avoid using a global lock all around */
4226 mutex_lock(&trace_types_lock
);
4227 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4228 *iter
->trace
= *tr
->current_trace
;
4229 mutex_unlock(&trace_types_lock
);
4232 * Avoid more than one consumer on a single file descriptor
4233 * This is just a matter of traces coherency, the ring buffer itself
4236 mutex_lock(&iter
->mutex
);
4237 if (iter
->trace
->read
) {
4238 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
4244 sret
= tracing_wait_pipe(filp
);
4248 /* stop when tracing is finished */
4249 if (trace_empty(iter
)) {
4254 if (cnt
>= PAGE_SIZE
)
4255 cnt
= PAGE_SIZE
- 1;
4257 /* reset all but tr, trace, and overruns */
4258 memset(&iter
->seq
, 0,
4259 sizeof(struct trace_iterator
) -
4260 offsetof(struct trace_iterator
, seq
));
4261 cpumask_clear(iter
->started
);
4264 trace_event_read_lock();
4265 trace_access_lock(iter
->cpu_file
);
4266 while (trace_find_next_entry_inc(iter
) != NULL
) {
4267 enum print_line_t ret
;
4268 int len
= iter
->seq
.len
;
4270 ret
= print_trace_line(iter
);
4271 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4272 /* don't print partial lines */
4273 iter
->seq
.len
= len
;
4276 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4277 trace_consume(iter
);
4279 if (iter
->seq
.len
>= cnt
)
4283 * Setting the full flag means we reached the trace_seq buffer
4284 * size and we should leave by partial output condition above.
4285 * One of the trace_seq_* functions is not used properly.
4287 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
4290 trace_access_unlock(iter
->cpu_file
);
4291 trace_event_read_unlock();
4293 /* Now copy what we have to the user */
4294 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4295 if (iter
->seq
.readpos
>= iter
->seq
.len
)
4296 trace_seq_init(&iter
->seq
);
4299 * If there was nothing to send to user, in spite of consuming trace
4300 * entries, go back to wait for more entries.
4306 mutex_unlock(&iter
->mutex
);
4311 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
4314 __free_page(spd
->pages
[idx
]);
4317 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
4319 .map
= generic_pipe_buf_map
,
4320 .unmap
= generic_pipe_buf_unmap
,
4321 .confirm
= generic_pipe_buf_confirm
,
4322 .release
= generic_pipe_buf_release
,
4323 .steal
= generic_pipe_buf_steal
,
4324 .get
= generic_pipe_buf_get
,
4328 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
4333 /* Seq buffer is page-sized, exactly what we need. */
4335 count
= iter
->seq
.len
;
4336 ret
= print_trace_line(iter
);
4337 count
= iter
->seq
.len
- count
;
4340 iter
->seq
.len
-= count
;
4343 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4344 iter
->seq
.len
-= count
;
4348 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4349 trace_consume(iter
);
4351 if (!trace_find_next_entry_inc(iter
)) {
4361 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
4363 struct pipe_inode_info
*pipe
,
4367 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
4368 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
4369 struct trace_iterator
*iter
= filp
->private_data
;
4370 struct splice_pipe_desc spd
= {
4372 .partial
= partial_def
,
4373 .nr_pages
= 0, /* This gets updated below. */
4374 .nr_pages_max
= PIPE_DEF_BUFFERS
,
4376 .ops
= &tracing_pipe_buf_ops
,
4377 .spd_release
= tracing_spd_release_pipe
,
4379 struct trace_array
*tr
= iter
->tr
;
4384 if (splice_grow_spd(pipe
, &spd
))
4387 /* copy the tracer to avoid using a global lock all around */
4388 mutex_lock(&trace_types_lock
);
4389 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4390 *iter
->trace
= *tr
->current_trace
;
4391 mutex_unlock(&trace_types_lock
);
4393 mutex_lock(&iter
->mutex
);
4395 if (iter
->trace
->splice_read
) {
4396 ret
= iter
->trace
->splice_read(iter
, filp
,
4397 ppos
, pipe
, len
, flags
);
4402 ret
= tracing_wait_pipe(filp
);
4406 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
4411 trace_event_read_lock();
4412 trace_access_lock(iter
->cpu_file
);
4414 /* Fill as many pages as possible. */
4415 for (i
= 0, rem
= len
; i
< pipe
->buffers
&& rem
; i
++) {
4416 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
4420 rem
= tracing_fill_pipe_page(rem
, iter
);
4422 /* Copy the data into the page, so we can start over. */
4423 ret
= trace_seq_to_buffer(&iter
->seq
,
4424 page_address(spd
.pages
[i
]),
4427 __free_page(spd
.pages
[i
]);
4430 spd
.partial
[i
].offset
= 0;
4431 spd
.partial
[i
].len
= iter
->seq
.len
;
4433 trace_seq_init(&iter
->seq
);
4436 trace_access_unlock(iter
->cpu_file
);
4437 trace_event_read_unlock();
4438 mutex_unlock(&iter
->mutex
);
4442 ret
= splice_to_pipe(pipe
, &spd
);
4444 splice_shrink_spd(&spd
);
4448 mutex_unlock(&iter
->mutex
);
4453 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
4454 size_t cnt
, loff_t
*ppos
)
4456 struct inode
*inode
= file_inode(filp
);
4457 struct trace_array
*tr
= inode
->i_private
;
4458 int cpu
= tracing_get_cpu(inode
);
4463 mutex_lock(&trace_types_lock
);
4465 if (cpu
== RING_BUFFER_ALL_CPUS
) {
4466 int cpu
, buf_size_same
;
4471 /* check if all cpu sizes are same */
4472 for_each_tracing_cpu(cpu
) {
4473 /* fill in the size from first enabled cpu */
4475 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
4476 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
4482 if (buf_size_same
) {
4483 if (!ring_buffer_expanded
)
4484 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
4486 trace_buf_size
>> 10);
4488 r
= sprintf(buf
, "%lu\n", size
>> 10);
4490 r
= sprintf(buf
, "X\n");
4492 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
4494 mutex_unlock(&trace_types_lock
);
4496 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4501 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
4502 size_t cnt
, loff_t
*ppos
)
4504 struct inode
*inode
= file_inode(filp
);
4505 struct trace_array
*tr
= inode
->i_private
;
4509 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4513 /* must have at least 1 entry */
4517 /* value is in KB */
4519 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
4529 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
4530 size_t cnt
, loff_t
*ppos
)
4532 struct trace_array
*tr
= filp
->private_data
;
4535 unsigned long size
= 0, expanded_size
= 0;
4537 mutex_lock(&trace_types_lock
);
4538 for_each_tracing_cpu(cpu
) {
4539 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
4540 if (!ring_buffer_expanded
)
4541 expanded_size
+= trace_buf_size
>> 10;
4543 if (ring_buffer_expanded
)
4544 r
= sprintf(buf
, "%lu\n", size
);
4546 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
4547 mutex_unlock(&trace_types_lock
);
4549 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4553 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
4554 size_t cnt
, loff_t
*ppos
)
4557 * There is no need to read what the user has written, this function
4558 * is just to make sure that there is no error when "echo" is used
4567 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
4569 struct trace_array
*tr
= inode
->i_private
;
4571 /* disable tracing ? */
4572 if (trace_flags
& TRACE_ITER_STOP_ON_FREE
)
4573 tracer_tracing_off(tr
);
4574 /* resize the ring buffer to 0 */
4575 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
4577 trace_array_put(tr
);
4583 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
4584 size_t cnt
, loff_t
*fpos
)
4586 unsigned long addr
= (unsigned long)ubuf
;
4587 struct trace_array
*tr
= filp
->private_data
;
4588 struct ring_buffer_event
*event
;
4589 struct ring_buffer
*buffer
;
4590 struct print_entry
*entry
;
4591 unsigned long irq_flags
;
4592 struct page
*pages
[2];
4602 if (tracing_disabled
)
4605 if (!(trace_flags
& TRACE_ITER_MARKERS
))
4608 if (cnt
> TRACE_BUF_SIZE
)
4609 cnt
= TRACE_BUF_SIZE
;
4612 * Userspace is injecting traces into the kernel trace buffer.
4613 * We want to be as non intrusive as possible.
4614 * To do so, we do not want to allocate any special buffers
4615 * or take any locks, but instead write the userspace data
4616 * straight into the ring buffer.
4618 * First we need to pin the userspace buffer into memory,
4619 * which, most likely it is, because it just referenced it.
4620 * But there's no guarantee that it is. By using get_user_pages_fast()
4621 * and kmap_atomic/kunmap_atomic() we can get access to the
4622 * pages directly. We then write the data directly into the
4625 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
4627 /* check if we cross pages */
4628 if ((addr
& PAGE_MASK
) != ((addr
+ cnt
) & PAGE_MASK
))
4631 offset
= addr
& (PAGE_SIZE
- 1);
4634 ret
= get_user_pages_fast(addr
, nr_pages
, 0, pages
);
4635 if (ret
< nr_pages
) {
4637 put_page(pages
[ret
]);
4642 for (i
= 0; i
< nr_pages
; i
++)
4643 map_page
[i
] = kmap_atomic(pages
[i
]);
4645 local_save_flags(irq_flags
);
4646 size
= sizeof(*entry
) + cnt
+ 2; /* possible \n added */
4647 buffer
= tr
->trace_buffer
.buffer
;
4648 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
4649 irq_flags
, preempt_count());
4651 /* Ring buffer disabled, return as if not open for write */
4656 entry
= ring_buffer_event_data(event
);
4657 entry
->ip
= _THIS_IP_
;
4659 if (nr_pages
== 2) {
4660 len
= PAGE_SIZE
- offset
;
4661 memcpy(&entry
->buf
, map_page
[0] + offset
, len
);
4662 memcpy(&entry
->buf
[len
], map_page
[1], cnt
- len
);
4664 memcpy(&entry
->buf
, map_page
[0] + offset
, cnt
);
4666 if (entry
->buf
[cnt
- 1] != '\n') {
4667 entry
->buf
[cnt
] = '\n';
4668 entry
->buf
[cnt
+ 1] = '\0';
4670 entry
->buf
[cnt
] = '\0';
4672 __buffer_unlock_commit(buffer
, event
);
4679 for (i
= 0; i
< nr_pages
; i
++){
4680 kunmap_atomic(map_page
[i
]);
4687 static int tracing_clock_show(struct seq_file
*m
, void *v
)
4689 struct trace_array
*tr
= m
->private;
4692 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
4694 "%s%s%s%s", i
? " " : "",
4695 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
4696 i
== tr
->clock_id
? "]" : "");
4702 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
4703 size_t cnt
, loff_t
*fpos
)
4705 struct seq_file
*m
= filp
->private_data
;
4706 struct trace_array
*tr
= m
->private;
4708 const char *clockstr
;
4711 if (cnt
>= sizeof(buf
))
4714 if (copy_from_user(&buf
, ubuf
, cnt
))
4719 clockstr
= strstrip(buf
);
4721 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
4722 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
4725 if (i
== ARRAY_SIZE(trace_clocks
))
4728 mutex_lock(&trace_types_lock
);
4732 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
4735 * New clock may not be consistent with the previous clock.
4736 * Reset the buffer so that it doesn't have incomparable timestamps.
4738 tracing_reset_online_cpus(&tr
->trace_buffer
);
4740 #ifdef CONFIG_TRACER_MAX_TRACE
4741 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&& tr
->max_buffer
.buffer
)
4742 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
4743 tracing_reset_online_cpus(&tr
->max_buffer
);
4746 mutex_unlock(&trace_types_lock
);
4753 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
4755 struct trace_array
*tr
= inode
->i_private
;
4758 if (tracing_disabled
)
4761 if (trace_array_get(tr
))
4764 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
4766 trace_array_put(tr
);
4771 struct ftrace_buffer_info
{
4772 struct trace_iterator iter
;
4777 #ifdef CONFIG_TRACER_SNAPSHOT
4778 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
4780 struct trace_array
*tr
= inode
->i_private
;
4781 struct trace_iterator
*iter
;
4785 if (trace_array_get(tr
) < 0)
4788 if (file
->f_mode
& FMODE_READ
) {
4789 iter
= __tracing_open(inode
, file
, true);
4791 ret
= PTR_ERR(iter
);
4793 /* Writes still need the seq_file to hold the private data */
4795 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
4798 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4806 iter
->trace_buffer
= &tr
->max_buffer
;
4807 iter
->cpu_file
= tracing_get_cpu(inode
);
4809 file
->private_data
= m
;
4813 trace_array_put(tr
);
4819 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
4822 struct seq_file
*m
= filp
->private_data
;
4823 struct trace_iterator
*iter
= m
->private;
4824 struct trace_array
*tr
= iter
->tr
;
4828 ret
= tracing_update_buffers();
4832 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4836 mutex_lock(&trace_types_lock
);
4838 if (tr
->current_trace
->use_max_tr
) {
4845 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
4849 if (tr
->allocated_snapshot
)
4853 /* Only allow per-cpu swap if the ring buffer supports it */
4854 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4855 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
4860 if (!tr
->allocated_snapshot
) {
4861 ret
= alloc_snapshot(tr
);
4865 local_irq_disable();
4866 /* Now, we're going to swap */
4867 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
4868 update_max_tr(tr
, current
, smp_processor_id());
4870 update_max_tr_single(tr
, current
, iter
->cpu_file
);
4874 if (tr
->allocated_snapshot
) {
4875 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
4876 tracing_reset_online_cpus(&tr
->max_buffer
);
4878 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
4888 mutex_unlock(&trace_types_lock
);
4892 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
4894 struct seq_file
*m
= file
->private_data
;
4897 ret
= tracing_release(inode
, file
);
4899 if (file
->f_mode
& FMODE_READ
)
4902 /* If write only, the seq_file is just a stub */
4910 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
4911 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
4912 size_t count
, loff_t
*ppos
);
4913 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
4914 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
4915 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
4917 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
4919 struct ftrace_buffer_info
*info
;
4922 ret
= tracing_buffers_open(inode
, filp
);
4926 info
= filp
->private_data
;
4928 if (info
->iter
.trace
->use_max_tr
) {
4929 tracing_buffers_release(inode
, filp
);
4933 info
->iter
.snapshot
= true;
4934 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
4939 #endif /* CONFIG_TRACER_SNAPSHOT */
4942 static const struct file_operations tracing_max_lat_fops
= {
4943 .open
= tracing_open_generic
,
4944 .read
= tracing_max_lat_read
,
4945 .write
= tracing_max_lat_write
,
4946 .llseek
= generic_file_llseek
,
4949 static const struct file_operations set_tracer_fops
= {
4950 .open
= tracing_open_generic
,
4951 .read
= tracing_set_trace_read
,
4952 .write
= tracing_set_trace_write
,
4953 .llseek
= generic_file_llseek
,
4956 static const struct file_operations tracing_pipe_fops
= {
4957 .open
= tracing_open_pipe
,
4958 .poll
= tracing_poll_pipe
,
4959 .read
= tracing_read_pipe
,
4960 .splice_read
= tracing_splice_read_pipe
,
4961 .release
= tracing_release_pipe
,
4962 .llseek
= no_llseek
,
4965 static const struct file_operations tracing_entries_fops
= {
4966 .open
= tracing_open_generic_tr
,
4967 .read
= tracing_entries_read
,
4968 .write
= tracing_entries_write
,
4969 .llseek
= generic_file_llseek
,
4970 .release
= tracing_release_generic_tr
,
4973 static const struct file_operations tracing_total_entries_fops
= {
4974 .open
= tracing_open_generic_tr
,
4975 .read
= tracing_total_entries_read
,
4976 .llseek
= generic_file_llseek
,
4977 .release
= tracing_release_generic_tr
,
4980 static const struct file_operations tracing_free_buffer_fops
= {
4981 .open
= tracing_open_generic_tr
,
4982 .write
= tracing_free_buffer_write
,
4983 .release
= tracing_free_buffer_release
,
4986 static const struct file_operations tracing_mark_fops
= {
4987 .open
= tracing_open_generic_tr
,
4988 .write
= tracing_mark_write
,
4989 .llseek
= generic_file_llseek
,
4990 .release
= tracing_release_generic_tr
,
4993 static const struct file_operations trace_clock_fops
= {
4994 .open
= tracing_clock_open
,
4996 .llseek
= seq_lseek
,
4997 .release
= tracing_single_release_tr
,
4998 .write
= tracing_clock_write
,
5001 #ifdef CONFIG_TRACER_SNAPSHOT
5002 static const struct file_operations snapshot_fops
= {
5003 .open
= tracing_snapshot_open
,
5005 .write
= tracing_snapshot_write
,
5006 .llseek
= tracing_lseek
,
5007 .release
= tracing_snapshot_release
,
5010 static const struct file_operations snapshot_raw_fops
= {
5011 .open
= snapshot_raw_open
,
5012 .read
= tracing_buffers_read
,
5013 .release
= tracing_buffers_release
,
5014 .splice_read
= tracing_buffers_splice_read
,
5015 .llseek
= no_llseek
,
5018 #endif /* CONFIG_TRACER_SNAPSHOT */
5020 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
5022 struct trace_array
*tr
= inode
->i_private
;
5023 struct ftrace_buffer_info
*info
;
5026 if (tracing_disabled
)
5029 if (trace_array_get(tr
) < 0)
5032 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
5034 trace_array_put(tr
);
5038 mutex_lock(&trace_types_lock
);
5041 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
5042 info
->iter
.trace
= tr
->current_trace
;
5043 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
5045 /* Force reading ring buffer for first read */
5046 info
->read
= (unsigned int)-1;
5048 filp
->private_data
= info
;
5050 mutex_unlock(&trace_types_lock
);
5052 ret
= nonseekable_open(inode
, filp
);
5054 trace_array_put(tr
);
5060 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
5062 struct ftrace_buffer_info
*info
= filp
->private_data
;
5063 struct trace_iterator
*iter
= &info
->iter
;
5065 return trace_poll(iter
, filp
, poll_table
);
5069 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5070 size_t count
, loff_t
*ppos
)
5072 struct ftrace_buffer_info
*info
= filp
->private_data
;
5073 struct trace_iterator
*iter
= &info
->iter
;
5080 mutex_lock(&trace_types_lock
);
5082 #ifdef CONFIG_TRACER_MAX_TRACE
5083 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5090 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
5096 /* Do we have previous read data to read? */
5097 if (info
->read
< PAGE_SIZE
)
5101 trace_access_lock(iter
->cpu_file
);
5102 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
5106 trace_access_unlock(iter
->cpu_file
);
5109 if (trace_empty(iter
)) {
5110 if ((filp
->f_flags
& O_NONBLOCK
)) {
5114 mutex_unlock(&trace_types_lock
);
5115 iter
->trace
->wait_pipe(iter
);
5116 mutex_lock(&trace_types_lock
);
5117 if (signal_pending(current
)) {
5129 size
= PAGE_SIZE
- info
->read
;
5133 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
5144 mutex_unlock(&trace_types_lock
);
5149 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
5151 struct ftrace_buffer_info
*info
= file
->private_data
;
5152 struct trace_iterator
*iter
= &info
->iter
;
5154 mutex_lock(&trace_types_lock
);
5156 __trace_array_put(iter
->tr
);
5159 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
, info
->spare
);
5162 mutex_unlock(&trace_types_lock
);
5168 struct ring_buffer
*buffer
;
5173 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
5174 struct pipe_buffer
*buf
)
5176 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5181 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5186 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
5187 struct pipe_buffer
*buf
)
5189 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5194 /* Pipe buffer operations for a buffer. */
5195 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
5197 .map
= generic_pipe_buf_map
,
5198 .unmap
= generic_pipe_buf_unmap
,
5199 .confirm
= generic_pipe_buf_confirm
,
5200 .release
= buffer_pipe_buf_release
,
5201 .steal
= generic_pipe_buf_steal
,
5202 .get
= buffer_pipe_buf_get
,
5206 * Callback from splice_to_pipe(), if we need to release some pages
5207 * at the end of the spd in case we error'ed out in filling the pipe.
5209 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
5211 struct buffer_ref
*ref
=
5212 (struct buffer_ref
*)spd
->partial
[i
].private;
5217 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5219 spd
->partial
[i
].private = 0;
5223 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5224 struct pipe_inode_info
*pipe
, size_t len
,
5227 struct ftrace_buffer_info
*info
= file
->private_data
;
5228 struct trace_iterator
*iter
= &info
->iter
;
5229 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5230 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5231 struct splice_pipe_desc spd
= {
5233 .partial
= partial_def
,
5234 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5236 .ops
= &buffer_pipe_buf_ops
,
5237 .spd_release
= buffer_spd_release
,
5239 struct buffer_ref
*ref
;
5240 int entries
, size
, i
;
5243 mutex_lock(&trace_types_lock
);
5245 #ifdef CONFIG_TRACER_MAX_TRACE
5246 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5252 if (splice_grow_spd(pipe
, &spd
)) {
5257 if (*ppos
& (PAGE_SIZE
- 1)) {
5262 if (len
& (PAGE_SIZE
- 1)) {
5263 if (len
< PAGE_SIZE
) {
5271 trace_access_lock(iter
->cpu_file
);
5272 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5274 for (i
= 0; i
< pipe
->buffers
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
5278 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
5283 ref
->buffer
= iter
->trace_buffer
->buffer
;
5284 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
5290 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
5291 len
, iter
->cpu_file
, 1);
5293 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5299 * zero out any left over data, this is going to
5302 size
= ring_buffer_page_len(ref
->page
);
5303 if (size
< PAGE_SIZE
)
5304 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
5306 page
= virt_to_page(ref
->page
);
5308 spd
.pages
[i
] = page
;
5309 spd
.partial
[i
].len
= PAGE_SIZE
;
5310 spd
.partial
[i
].offset
= 0;
5311 spd
.partial
[i
].private = (unsigned long)ref
;
5315 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5318 trace_access_unlock(iter
->cpu_file
);
5321 /* did we read anything? */
5322 if (!spd
.nr_pages
) {
5323 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
)) {
5327 mutex_unlock(&trace_types_lock
);
5328 iter
->trace
->wait_pipe(iter
);
5329 mutex_lock(&trace_types_lock
);
5330 if (signal_pending(current
)) {
5337 ret
= splice_to_pipe(pipe
, &spd
);
5338 splice_shrink_spd(&spd
);
5340 mutex_unlock(&trace_types_lock
);
5345 static const struct file_operations tracing_buffers_fops
= {
5346 .open
= tracing_buffers_open
,
5347 .read
= tracing_buffers_read
,
5348 .poll
= tracing_buffers_poll
,
5349 .release
= tracing_buffers_release
,
5350 .splice_read
= tracing_buffers_splice_read
,
5351 .llseek
= no_llseek
,
5355 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
5356 size_t count
, loff_t
*ppos
)
5358 struct inode
*inode
= file_inode(filp
);
5359 struct trace_array
*tr
= inode
->i_private
;
5360 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
5361 int cpu
= tracing_get_cpu(inode
);
5362 struct trace_seq
*s
;
5364 unsigned long long t
;
5365 unsigned long usec_rem
;
5367 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
5373 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
5374 trace_seq_printf(s
, "entries: %ld\n", cnt
);
5376 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
5377 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
5379 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
5380 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
5382 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
5383 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
5385 if (trace_clocks
[tr
->clock_id
].in_ns
) {
5386 /* local or global for trace_clock */
5387 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5388 usec_rem
= do_div(t
, USEC_PER_SEC
);
5389 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
5392 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5393 usec_rem
= do_div(t
, USEC_PER_SEC
);
5394 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
5396 /* counter or tsc mode for trace_clock */
5397 trace_seq_printf(s
, "oldest event ts: %llu\n",
5398 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5400 trace_seq_printf(s
, "now ts: %llu\n",
5401 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5404 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
5405 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
5407 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
5408 trace_seq_printf(s
, "read events: %ld\n", cnt
);
5410 count
= simple_read_from_buffer(ubuf
, count
, ppos
, s
->buffer
, s
->len
);
5417 static const struct file_operations tracing_stats_fops
= {
5418 .open
= tracing_open_generic_tr
,
5419 .read
= tracing_stats_read
,
5420 .llseek
= generic_file_llseek
,
5421 .release
= tracing_release_generic_tr
,
5424 #ifdef CONFIG_DYNAMIC_FTRACE
5426 int __weak
ftrace_arch_read_dyn_info(char *buf
, int size
)
5432 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
5433 size_t cnt
, loff_t
*ppos
)
5435 static char ftrace_dyn_info_buffer
[1024];
5436 static DEFINE_MUTEX(dyn_info_mutex
);
5437 unsigned long *p
= filp
->private_data
;
5438 char *buf
= ftrace_dyn_info_buffer
;
5439 int size
= ARRAY_SIZE(ftrace_dyn_info_buffer
);
5442 mutex_lock(&dyn_info_mutex
);
5443 r
= sprintf(buf
, "%ld ", *p
);
5445 r
+= ftrace_arch_read_dyn_info(buf
+r
, (size
-1)-r
);
5448 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5450 mutex_unlock(&dyn_info_mutex
);
5455 static const struct file_operations tracing_dyn_info_fops
= {
5456 .open
= tracing_open_generic
,
5457 .read
= tracing_read_dyn_info
,
5458 .llseek
= generic_file_llseek
,
5460 #endif /* CONFIG_DYNAMIC_FTRACE */
5462 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5464 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5470 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5472 unsigned long *count
= (long *)data
;
5484 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
5485 struct ftrace_probe_ops
*ops
, void *data
)
5487 long count
= (long)data
;
5489 seq_printf(m
, "%ps:", (void *)ip
);
5491 seq_printf(m
, "snapshot");
5494 seq_printf(m
, ":unlimited\n");
5496 seq_printf(m
, ":count=%ld\n", count
);
5501 static struct ftrace_probe_ops snapshot_probe_ops
= {
5502 .func
= ftrace_snapshot
,
5503 .print
= ftrace_snapshot_print
,
5506 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
5507 .func
= ftrace_count_snapshot
,
5508 .print
= ftrace_snapshot_print
,
5512 ftrace_trace_snapshot_callback(struct ftrace_hash
*hash
,
5513 char *glob
, char *cmd
, char *param
, int enable
)
5515 struct ftrace_probe_ops
*ops
;
5516 void *count
= (void *)-1;
5520 /* hash funcs only work with set_ftrace_filter */
5524 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
5526 if (glob
[0] == '!') {
5527 unregister_ftrace_function_probe_func(glob
+1, ops
);
5534 number
= strsep(¶m
, ":");
5536 if (!strlen(number
))
5540 * We use the callback data field (which is a pointer)
5543 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
5548 ret
= register_ftrace_function_probe(glob
, ops
, count
);
5551 alloc_snapshot(&global_trace
);
5553 return ret
< 0 ? ret
: 0;
5556 static struct ftrace_func_command ftrace_snapshot_cmd
= {
5558 .func
= ftrace_trace_snapshot_callback
,
5561 static __init
int register_snapshot_cmd(void)
5563 return register_ftrace_command(&ftrace_snapshot_cmd
);
5566 static inline __init
int register_snapshot_cmd(void) { return 0; }
5567 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5569 struct dentry
*tracing_init_dentry_tr(struct trace_array
*tr
)
5574 if (!debugfs_initialized())
5577 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
5578 tr
->dir
= debugfs_create_dir("tracing", NULL
);
5581 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5586 struct dentry
*tracing_init_dentry(void)
5588 return tracing_init_dentry_tr(&global_trace
);
5591 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
5593 struct dentry
*d_tracer
;
5596 return tr
->percpu_dir
;
5598 d_tracer
= tracing_init_dentry_tr(tr
);
5602 tr
->percpu_dir
= debugfs_create_dir("per_cpu", d_tracer
);
5604 WARN_ONCE(!tr
->percpu_dir
,
5605 "Could not create debugfs directory 'per_cpu/%d'\n", cpu
);
5607 return tr
->percpu_dir
;
5610 static struct dentry
*
5611 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
5612 void *data
, long cpu
, const struct file_operations
*fops
)
5614 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
5616 if (ret
) /* See tracing_get_cpu() */
5617 ret
->d_inode
->i_cdev
= (void *)(cpu
+ 1);
5622 tracing_init_debugfs_percpu(struct trace_array
*tr
, long cpu
)
5624 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
5625 struct dentry
*d_cpu
;
5626 char cpu_dir
[30]; /* 30 characters should be more than enough */
5631 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
5632 d_cpu
= debugfs_create_dir(cpu_dir
, d_percpu
);
5634 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir
);
5638 /* per cpu trace_pipe */
5639 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
5640 tr
, cpu
, &tracing_pipe_fops
);
5643 trace_create_cpu_file("trace", 0644, d_cpu
,
5644 tr
, cpu
, &tracing_fops
);
5646 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
5647 tr
, cpu
, &tracing_buffers_fops
);
5649 trace_create_cpu_file("stats", 0444, d_cpu
,
5650 tr
, cpu
, &tracing_stats_fops
);
5652 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
5653 tr
, cpu
, &tracing_entries_fops
);
5655 #ifdef CONFIG_TRACER_SNAPSHOT
5656 trace_create_cpu_file("snapshot", 0644, d_cpu
,
5657 tr
, cpu
, &snapshot_fops
);
5659 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
5660 tr
, cpu
, &snapshot_raw_fops
);
5664 #ifdef CONFIG_FTRACE_SELFTEST
5665 /* Let selftest have access to static functions in this file */
5666 #include "trace_selftest.c"
5669 struct trace_option_dentry
{
5670 struct tracer_opt
*opt
;
5671 struct tracer_flags
*flags
;
5672 struct trace_array
*tr
;
5673 struct dentry
*entry
;
5677 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5680 struct trace_option_dentry
*topt
= filp
->private_data
;
5683 if (topt
->flags
->val
& topt
->opt
->bit
)
5688 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5692 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5695 struct trace_option_dentry
*topt
= filp
->private_data
;
5699 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5703 if (val
!= 0 && val
!= 1)
5706 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
5707 mutex_lock(&trace_types_lock
);
5708 ret
= __set_tracer_option(topt
->tr
->current_trace
, topt
->flags
,
5710 mutex_unlock(&trace_types_lock
);
5721 static const struct file_operations trace_options_fops
= {
5722 .open
= tracing_open_generic
,
5723 .read
= trace_options_read
,
5724 .write
= trace_options_write
,
5725 .llseek
= generic_file_llseek
,
5729 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5732 long index
= (long)filp
->private_data
;
5735 if (trace_flags
& (1 << index
))
5740 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5744 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5747 struct trace_array
*tr
= &global_trace
;
5748 long index
= (long)filp
->private_data
;
5752 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5756 if (val
!= 0 && val
!= 1)
5759 mutex_lock(&trace_types_lock
);
5760 ret
= set_tracer_flag(tr
, 1 << index
, val
);
5761 mutex_unlock(&trace_types_lock
);
5771 static const struct file_operations trace_options_core_fops
= {
5772 .open
= tracing_open_generic
,
5773 .read
= trace_options_core_read
,
5774 .write
= trace_options_core_write
,
5775 .llseek
= generic_file_llseek
,
5778 struct dentry
*trace_create_file(const char *name
,
5780 struct dentry
*parent
,
5782 const struct file_operations
*fops
)
5786 ret
= debugfs_create_file(name
, mode
, parent
, data
, fops
);
5788 pr_warning("Could not create debugfs '%s' entry\n", name
);
5794 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
5796 struct dentry
*d_tracer
;
5801 d_tracer
= tracing_init_dentry_tr(tr
);
5805 tr
->options
= debugfs_create_dir("options", d_tracer
);
5807 pr_warning("Could not create debugfs directory 'options'\n");
5815 create_trace_option_file(struct trace_array
*tr
,
5816 struct trace_option_dentry
*topt
,
5817 struct tracer_flags
*flags
,
5818 struct tracer_opt
*opt
)
5820 struct dentry
*t_options
;
5822 t_options
= trace_options_init_dentry(tr
);
5826 topt
->flags
= flags
;
5830 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
5831 &trace_options_fops
);
5835 static struct trace_option_dentry
*
5836 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
5838 struct trace_option_dentry
*topts
;
5839 struct tracer_flags
*flags
;
5840 struct tracer_opt
*opts
;
5846 flags
= tracer
->flags
;
5848 if (!flags
|| !flags
->opts
)
5853 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
5856 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
5860 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
5861 create_trace_option_file(tr
, &topts
[cnt
], flags
,
5868 destroy_trace_option_files(struct trace_option_dentry
*topts
)
5875 for (cnt
= 0; topts
[cnt
].opt
; cnt
++) {
5876 if (topts
[cnt
].entry
)
5877 debugfs_remove(topts
[cnt
].entry
);
5883 static struct dentry
*
5884 create_trace_option_core_file(struct trace_array
*tr
,
5885 const char *option
, long index
)
5887 struct dentry
*t_options
;
5889 t_options
= trace_options_init_dentry(tr
);
5893 return trace_create_file(option
, 0644, t_options
, (void *)index
,
5894 &trace_options_core_fops
);
5897 static __init
void create_trace_options_dir(struct trace_array
*tr
)
5899 struct dentry
*t_options
;
5902 t_options
= trace_options_init_dentry(tr
);
5906 for (i
= 0; trace_options
[i
]; i
++)
5907 create_trace_option_core_file(tr
, trace_options
[i
], i
);
5911 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
5912 size_t cnt
, loff_t
*ppos
)
5914 struct trace_array
*tr
= filp
->private_data
;
5918 r
= tracer_tracing_is_on(tr
);
5919 r
= sprintf(buf
, "%d\n", r
);
5921 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5925 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
5926 size_t cnt
, loff_t
*ppos
)
5928 struct trace_array
*tr
= filp
->private_data
;
5929 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
5933 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5938 mutex_lock(&trace_types_lock
);
5940 tracer_tracing_on(tr
);
5941 if (tr
->current_trace
->start
)
5942 tr
->current_trace
->start(tr
);
5944 tracer_tracing_off(tr
);
5945 if (tr
->current_trace
->stop
)
5946 tr
->current_trace
->stop(tr
);
5948 mutex_unlock(&trace_types_lock
);
5956 static const struct file_operations rb_simple_fops
= {
5957 .open
= tracing_open_generic_tr
,
5958 .read
= rb_simple_read
,
5959 .write
= rb_simple_write
,
5960 .release
= tracing_release_generic_tr
,
5961 .llseek
= default_llseek
,
5964 struct dentry
*trace_instance_dir
;
5967 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
);
5970 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
5972 enum ring_buffer_flags rb_flags
;
5974 rb_flags
= trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
5978 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
5982 buf
->data
= alloc_percpu(struct trace_array_cpu
);
5984 ring_buffer_free(buf
->buffer
);
5988 /* Allocate the first page for all buffers */
5989 set_buffer_entries(&tr
->trace_buffer
,
5990 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
5995 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
5999 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
6003 #ifdef CONFIG_TRACER_MAX_TRACE
6004 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
6005 allocate_snapshot
? size
: 1);
6007 ring_buffer_free(tr
->trace_buffer
.buffer
);
6008 free_percpu(tr
->trace_buffer
.data
);
6011 tr
->allocated_snapshot
= allocate_snapshot
;
6014 * Only the top level trace array gets its snapshot allocated
6015 * from the kernel command line.
6017 allocate_snapshot
= false;
6022 static int new_instance_create(const char *name
)
6024 struct trace_array
*tr
;
6027 mutex_lock(&trace_types_lock
);
6030 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6031 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
6036 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
6040 tr
->name
= kstrdup(name
, GFP_KERNEL
);
6044 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
6047 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
6049 raw_spin_lock_init(&tr
->start_lock
);
6051 tr
->current_trace
= &nop_trace
;
6053 INIT_LIST_HEAD(&tr
->systems
);
6054 INIT_LIST_HEAD(&tr
->events
);
6056 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
6059 tr
->dir
= debugfs_create_dir(name
, trace_instance_dir
);
6063 ret
= event_trace_add_tracer(tr
->dir
, tr
);
6065 debugfs_remove_recursive(tr
->dir
);
6069 init_tracer_debugfs(tr
, tr
->dir
);
6071 list_add(&tr
->list
, &ftrace_trace_arrays
);
6073 mutex_unlock(&trace_types_lock
);
6078 if (tr
->trace_buffer
.buffer
)
6079 ring_buffer_free(tr
->trace_buffer
.buffer
);
6080 free_cpumask_var(tr
->tracing_cpumask
);
6085 mutex_unlock(&trace_types_lock
);
6091 static int instance_delete(const char *name
)
6093 struct trace_array
*tr
;
6097 mutex_lock(&trace_types_lock
);
6100 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6101 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
6113 list_del(&tr
->list
);
6115 event_trace_del_tracer(tr
);
6116 debugfs_remove_recursive(tr
->dir
);
6117 free_percpu(tr
->trace_buffer
.data
);
6118 ring_buffer_free(tr
->trace_buffer
.buffer
);
6126 mutex_unlock(&trace_types_lock
);
6131 static int instance_mkdir (struct inode
*inode
, struct dentry
*dentry
, umode_t mode
)
6133 struct dentry
*parent
;
6136 /* Paranoid: Make sure the parent is the "instances" directory */
6137 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6138 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6142 * The inode mutex is locked, but debugfs_create_dir() will also
6143 * take the mutex. As the instances directory can not be destroyed
6144 * or changed in any other way, it is safe to unlock it, and
6145 * let the dentry try. If two users try to make the same dir at
6146 * the same time, then the new_instance_create() will determine the
6149 mutex_unlock(&inode
->i_mutex
);
6151 ret
= new_instance_create(dentry
->d_iname
);
6153 mutex_lock(&inode
->i_mutex
);
6158 static int instance_rmdir(struct inode
*inode
, struct dentry
*dentry
)
6160 struct dentry
*parent
;
6163 /* Paranoid: Make sure the parent is the "instances" directory */
6164 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6165 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6168 /* The caller did a dget() on dentry */
6169 mutex_unlock(&dentry
->d_inode
->i_mutex
);
6172 * The inode mutex is locked, but debugfs_create_dir() will also
6173 * take the mutex. As the instances directory can not be destroyed
6174 * or changed in any other way, it is safe to unlock it, and
6175 * let the dentry try. If two users try to make the same dir at
6176 * the same time, then the instance_delete() will determine the
6179 mutex_unlock(&inode
->i_mutex
);
6181 ret
= instance_delete(dentry
->d_iname
);
6183 mutex_lock_nested(&inode
->i_mutex
, I_MUTEX_PARENT
);
6184 mutex_lock(&dentry
->d_inode
->i_mutex
);
6189 static const struct inode_operations instance_dir_inode_operations
= {
6190 .lookup
= simple_lookup
,
6191 .mkdir
= instance_mkdir
,
6192 .rmdir
= instance_rmdir
,
6195 static __init
void create_trace_instances(struct dentry
*d_tracer
)
6197 trace_instance_dir
= debugfs_create_dir("instances", d_tracer
);
6198 if (WARN_ON(!trace_instance_dir
))
6201 /* Hijack the dir inode operations, to allow mkdir */
6202 trace_instance_dir
->d_inode
->i_op
= &instance_dir_inode_operations
;
6206 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
)
6210 trace_create_file("tracing_cpumask", 0644, d_tracer
,
6211 tr
, &tracing_cpumask_fops
);
6213 trace_create_file("trace_options", 0644, d_tracer
,
6214 tr
, &tracing_iter_fops
);
6216 trace_create_file("trace", 0644, d_tracer
,
6219 trace_create_file("trace_pipe", 0444, d_tracer
,
6220 tr
, &tracing_pipe_fops
);
6222 trace_create_file("buffer_size_kb", 0644, d_tracer
,
6223 tr
, &tracing_entries_fops
);
6225 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
6226 tr
, &tracing_total_entries_fops
);
6228 trace_create_file("free_buffer", 0200, d_tracer
,
6229 tr
, &tracing_free_buffer_fops
);
6231 trace_create_file("trace_marker", 0220, d_tracer
,
6232 tr
, &tracing_mark_fops
);
6234 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
6237 trace_create_file("tracing_on", 0644, d_tracer
,
6238 tr
, &rb_simple_fops
);
6240 #ifdef CONFIG_TRACER_SNAPSHOT
6241 trace_create_file("snapshot", 0644, d_tracer
,
6242 tr
, &snapshot_fops
);
6245 for_each_tracing_cpu(cpu
)
6246 tracing_init_debugfs_percpu(tr
, cpu
);
6250 static __init
int tracer_init_debugfs(void)
6252 struct dentry
*d_tracer
;
6254 trace_access_lock_init();
6256 d_tracer
= tracing_init_dentry();
6260 init_tracer_debugfs(&global_trace
, d_tracer
);
6262 trace_create_file("available_tracers", 0444, d_tracer
,
6263 &global_trace
, &show_traces_fops
);
6265 trace_create_file("current_tracer", 0644, d_tracer
,
6266 &global_trace
, &set_tracer_fops
);
6268 #ifdef CONFIG_TRACER_MAX_TRACE
6269 trace_create_file("tracing_max_latency", 0644, d_tracer
,
6270 &tracing_max_latency
, &tracing_max_lat_fops
);
6273 trace_create_file("tracing_thresh", 0644, d_tracer
,
6274 &tracing_thresh
, &tracing_max_lat_fops
);
6276 trace_create_file("README", 0444, d_tracer
,
6277 NULL
, &tracing_readme_fops
);
6279 trace_create_file("saved_cmdlines", 0444, d_tracer
,
6280 NULL
, &tracing_saved_cmdlines_fops
);
6282 #ifdef CONFIG_DYNAMIC_FTRACE
6283 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
6284 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
6287 create_trace_instances(d_tracer
);
6289 create_trace_options_dir(&global_trace
);
6294 static int trace_panic_handler(struct notifier_block
*this,
6295 unsigned long event
, void *unused
)
6297 if (ftrace_dump_on_oops
)
6298 ftrace_dump(ftrace_dump_on_oops
);
6302 static struct notifier_block trace_panic_notifier
= {
6303 .notifier_call
= trace_panic_handler
,
6305 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
6308 static int trace_die_handler(struct notifier_block
*self
,
6314 if (ftrace_dump_on_oops
)
6315 ftrace_dump(ftrace_dump_on_oops
);
6323 static struct notifier_block trace_die_notifier
= {
6324 .notifier_call
= trace_die_handler
,
6329 * printk is set to max of 1024, we really don't need it that big.
6330 * Nothing should be printing 1000 characters anyway.
6332 #define TRACE_MAX_PRINT 1000
6335 * Define here KERN_TRACE so that we have one place to modify
6336 * it if we decide to change what log level the ftrace dump
6339 #define KERN_TRACE KERN_EMERG
6342 trace_printk_seq(struct trace_seq
*s
)
6344 /* Probably should print a warning here. */
6345 if (s
->len
>= TRACE_MAX_PRINT
)
6346 s
->len
= TRACE_MAX_PRINT
;
6348 /* should be zero ended, but we are paranoid. */
6349 s
->buffer
[s
->len
] = 0;
6351 printk(KERN_TRACE
"%s", s
->buffer
);
6356 void trace_init_global_iter(struct trace_iterator
*iter
)
6358 iter
->tr
= &global_trace
;
6359 iter
->trace
= iter
->tr
->current_trace
;
6360 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
6361 iter
->trace_buffer
= &global_trace
.trace_buffer
;
6363 if (iter
->trace
&& iter
->trace
->open
)
6364 iter
->trace
->open(iter
);
6366 /* Annotate start of buffers if we had overruns */
6367 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
6368 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
6370 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6371 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
6372 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
6375 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
6377 /* use static because iter can be a bit big for the stack */
6378 static struct trace_iterator iter
;
6379 static atomic_t dump_running
;
6380 unsigned int old_userobj
;
6381 unsigned long flags
;
6384 /* Only allow one dump user at a time. */
6385 if (atomic_inc_return(&dump_running
) != 1) {
6386 atomic_dec(&dump_running
);
6391 * Always turn off tracing when we dump.
6392 * We don't need to show trace output of what happens
6393 * between multiple crashes.
6395 * If the user does a sysrq-z, then they can re-enable
6396 * tracing with echo 1 > tracing_on.
6400 local_irq_save(flags
);
6402 /* Simulate the iterator */
6403 trace_init_global_iter(&iter
);
6405 for_each_tracing_cpu(cpu
) {
6406 atomic_inc(&per_cpu_ptr(iter
.tr
->trace_buffer
.data
, cpu
)->disabled
);
6409 old_userobj
= trace_flags
& TRACE_ITER_SYM_USEROBJ
;
6411 /* don't look at user memory in panic mode */
6412 trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
6414 switch (oops_dump_mode
) {
6416 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6419 iter
.cpu_file
= raw_smp_processor_id();
6424 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
6425 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6428 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
6430 /* Did function tracer already get disabled? */
6431 if (ftrace_is_dead()) {
6432 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6433 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6437 * We need to stop all tracing on all CPUS to read the
6438 * the next buffer. This is a bit expensive, but is
6439 * not done often. We fill all what we can read,
6440 * and then release the locks again.
6443 while (!trace_empty(&iter
)) {
6446 printk(KERN_TRACE
"---------------------------------\n");
6450 /* reset all but tr, trace, and overruns */
6451 memset(&iter
.seq
, 0,
6452 sizeof(struct trace_iterator
) -
6453 offsetof(struct trace_iterator
, seq
));
6454 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
6457 if (trace_find_next_entry_inc(&iter
) != NULL
) {
6460 ret
= print_trace_line(&iter
);
6461 if (ret
!= TRACE_TYPE_NO_CONSUME
)
6462 trace_consume(&iter
);
6464 touch_nmi_watchdog();
6466 trace_printk_seq(&iter
.seq
);
6470 printk(KERN_TRACE
" (ftrace buffer empty)\n");
6472 printk(KERN_TRACE
"---------------------------------\n");
6475 trace_flags
|= old_userobj
;
6477 for_each_tracing_cpu(cpu
) {
6478 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
6480 atomic_dec(&dump_running
);
6481 local_irq_restore(flags
);
6483 EXPORT_SYMBOL_GPL(ftrace_dump
);
6485 __init
static int tracer_alloc_buffers(void)
6491 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
6494 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
6495 goto out_free_buffer_mask
;
6497 /* Only allocate trace_printk buffers if a trace_printk exists */
6498 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
6499 /* Must be called before global_trace.buffer is allocated */
6500 trace_printk_init_buffers();
6502 /* To save memory, keep the ring buffer size to its minimum */
6503 if (ring_buffer_expanded
)
6504 ring_buf_size
= trace_buf_size
;
6508 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
6509 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
6511 raw_spin_lock_init(&global_trace
.start_lock
);
6513 /* Used for event triggers */
6514 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
6516 goto out_free_cpumask
;
6518 /* TODO: make the number of buffers hot pluggable with CPUS */
6519 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
6520 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
6522 goto out_free_temp_buffer
;
6525 if (global_trace
.buffer_disabled
)
6528 trace_init_cmdlines();
6531 * register_tracer() might reference current_trace, so it
6532 * needs to be set before we register anything. This is
6533 * just a bootstrap of current_trace anyway.
6535 global_trace
.current_trace
= &nop_trace
;
6537 register_tracer(&nop_trace
);
6539 /* All seems OK, enable tracing */
6540 tracing_disabled
= 0;
6542 atomic_notifier_chain_register(&panic_notifier_list
,
6543 &trace_panic_notifier
);
6545 register_die_notifier(&trace_die_notifier
);
6547 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
6549 INIT_LIST_HEAD(&global_trace
.systems
);
6550 INIT_LIST_HEAD(&global_trace
.events
);
6551 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
6553 while (trace_boot_options
) {
6556 option
= strsep(&trace_boot_options
, ",");
6557 trace_set_options(&global_trace
, option
);
6560 register_snapshot_cmd();
6564 out_free_temp_buffer
:
6565 ring_buffer_free(temp_buffer
);
6567 free_percpu(global_trace
.trace_buffer
.data
);
6568 #ifdef CONFIG_TRACER_MAX_TRACE
6569 free_percpu(global_trace
.max_buffer
.data
);
6571 free_cpumask_var(global_trace
.tracing_cpumask
);
6572 out_free_buffer_mask
:
6573 free_cpumask_var(tracing_buffer_mask
);
6578 __init
static int clear_boot_tracer(void)
6581 * The default tracer at boot buffer is an init section.
6582 * This function is called in lateinit. If we did not
6583 * find the boot tracer, then clear it out, to prevent
6584 * later registration from accessing the buffer that is
6585 * about to be freed.
6587 if (!default_bootup_tracer
)
6590 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
6591 default_bootup_tracer
);
6592 default_bootup_tracer
= NULL
;
6597 early_initcall(tracer_alloc_buffers
);
6598 fs_initcall(tracer_init_debugfs
);
6599 late_initcall(clear_boot_tracer
);