2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded
;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running
;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled
;
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt
[] = {
71 static struct tracer_flags dummy_tracer_flags
= {
73 .opts
= dummy_tracer_opt
77 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
83 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
87 static DEFINE_PER_CPU(bool, trace_cmdline_save
);
90 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
95 static int tracing_disabled
= 1;
97 DEFINE_PER_CPU(int, ftrace_cpu_disabled
);
99 cpumask_var_t __read_mostly tracing_buffer_mask
;
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
117 enum ftrace_dump_mode ftrace_dump_on_oops
;
119 /* When set, tracing will stop when a WARN*() is hit */
120 int __disable_trace_on_warning
;
122 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
124 #define MAX_TRACER_SIZE 100
125 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
126 static char *default_bootup_tracer
;
128 static bool allocate_snapshot
;
130 static int __init
set_cmdline_ftrace(char *str
)
132 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
133 default_bootup_tracer
= bootup_tracer_buf
;
134 /* We are using ftrace early, expand it */
135 ring_buffer_expanded
= true;
138 __setup("ftrace=", set_cmdline_ftrace
);
140 static int __init
set_ftrace_dump_on_oops(char *str
)
142 if (*str
++ != '=' || !*str
) {
143 ftrace_dump_on_oops
= DUMP_ALL
;
147 if (!strcmp("orig_cpu", str
)) {
148 ftrace_dump_on_oops
= DUMP_ORIG
;
154 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
156 static int __init
stop_trace_on_warning(char *str
)
158 __disable_trace_on_warning
= 1;
161 __setup("traceoff_on_warning=", stop_trace_on_warning
);
163 static int __init
boot_alloc_snapshot(char *str
)
165 allocate_snapshot
= true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded
= true;
170 __setup("alloc_snapshot", boot_alloc_snapshot
);
173 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
174 static char *trace_boot_options __initdata
;
176 static int __init
set_trace_boot_options(char *str
)
178 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
179 trace_boot_options
= trace_boot_options_buf
;
182 __setup("trace_options=", set_trace_boot_options
);
184 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
185 static char *trace_boot_clock __initdata
;
187 static int __init
set_trace_boot_clock(char *str
)
189 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
190 trace_boot_clock
= trace_boot_clock_buf
;
193 __setup("trace_clock=", set_trace_boot_clock
);
196 unsigned long long ns2usecs(cycle_t nsec
)
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
215 static struct trace_array global_trace
;
217 LIST_HEAD(ftrace_trace_arrays
);
219 int trace_array_get(struct trace_array
*this_tr
)
221 struct trace_array
*tr
;
224 mutex_lock(&trace_types_lock
);
225 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
232 mutex_unlock(&trace_types_lock
);
237 static void __trace_array_put(struct trace_array
*this_tr
)
239 WARN_ON(!this_tr
->ref
);
243 void trace_array_put(struct trace_array
*this_tr
)
245 mutex_lock(&trace_types_lock
);
246 __trace_array_put(this_tr
);
247 mutex_unlock(&trace_types_lock
);
250 int filter_check_discard(struct ftrace_event_file
*file
, void *rec
,
251 struct ring_buffer
*buffer
,
252 struct ring_buffer_event
*event
)
254 if (unlikely(file
->flags
& FTRACE_EVENT_FL_FILTERED
) &&
255 !filter_match_preds(file
->filter
, rec
)) {
256 ring_buffer_discard_commit(buffer
, event
);
262 EXPORT_SYMBOL_GPL(filter_check_discard
);
264 int call_filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
265 struct ring_buffer
*buffer
,
266 struct ring_buffer_event
*event
)
268 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
269 !filter_match_preds(call
->filter
, rec
)) {
270 ring_buffer_discard_commit(buffer
, event
);
276 EXPORT_SYMBOL_GPL(call_filter_check_discard
);
278 static cycle_t
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
282 /* Early boot up does not have a buffer yet */
284 return trace_clock_local();
286 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
287 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
292 cycle_t
ftrace_now(int cpu
)
294 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
298 * tracing_is_enabled - Show if global_trace has been disabled
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
306 int tracing_is_enabled(void)
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
314 return !global_trace
.buffer_disabled
;
318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
327 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
329 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
331 /* trace_types holds a link list of available tracers. */
332 static struct tracer
*trace_types __read_mostly
;
335 * trace_types_lock is used to protect the trace_types list.
337 DEFINE_MUTEX(trace_types_lock
);
340 * serialize the access of the ring buffer
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
354 * These primitives allow multi process access to different cpu ring buffer
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
362 static DECLARE_RWSEM(all_cpu_access_lock
);
363 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
365 static inline void trace_access_lock(int cpu
)
367 if (cpu
== RING_BUFFER_ALL_CPUS
) {
368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock
);
371 /* gain it for accessing a cpu ring buffer. */
373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
374 down_read(&all_cpu_access_lock
);
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
381 static inline void trace_access_unlock(int cpu
)
383 if (cpu
== RING_BUFFER_ALL_CPUS
) {
384 up_write(&all_cpu_access_lock
);
386 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
387 up_read(&all_cpu_access_lock
);
391 static inline void trace_access_lock_init(void)
395 for_each_possible_cpu(cpu
)
396 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
401 static DEFINE_MUTEX(access_lock
);
403 static inline void trace_access_lock(int cpu
)
406 mutex_lock(&access_lock
);
409 static inline void trace_access_unlock(int cpu
)
412 mutex_unlock(&access_lock
);
415 static inline void trace_access_lock_init(void)
421 /* trace_flags holds trace_options default values */
422 unsigned long trace_flags
= TRACE_ITER_PRINT_PARENT
| TRACE_ITER_PRINTK
|
423 TRACE_ITER_ANNOTATE
| TRACE_ITER_CONTEXT_INFO
| TRACE_ITER_SLEEP_TIME
|
424 TRACE_ITER_GRAPH_TIME
| TRACE_ITER_RECORD_CMD
| TRACE_ITER_OVERWRITE
|
425 TRACE_ITER_IRQ_INFO
| TRACE_ITER_MARKERS
| TRACE_ITER_FUNCTION
;
427 static void tracer_tracing_on(struct trace_array
*tr
)
429 if (tr
->trace_buffer
.buffer
)
430 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
439 tr
->buffer_disabled
= 0;
440 /* Make the flag seen by readers */
445 * tracing_on - enable tracing buffers
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
450 void tracing_on(void)
452 tracer_tracing_on(&global_trace
);
454 EXPORT_SYMBOL_GPL(tracing_on
);
457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
462 int __trace_puts(unsigned long ip
, const char *str
, int size
)
464 struct ring_buffer_event
*event
;
465 struct ring_buffer
*buffer
;
466 struct print_entry
*entry
;
467 unsigned long irq_flags
;
470 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
473 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
475 local_save_flags(irq_flags
);
476 buffer
= global_trace
.trace_buffer
.buffer
;
477 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
478 irq_flags
, preempt_count());
482 entry
= ring_buffer_event_data(event
);
485 memcpy(&entry
->buf
, str
, size
);
487 /* Add a newline if necessary */
488 if (entry
->buf
[size
- 1] != '\n') {
489 entry
->buf
[size
] = '\n';
490 entry
->buf
[size
+ 1] = '\0';
492 entry
->buf
[size
] = '\0';
494 __buffer_unlock_commit(buffer
, event
);
498 EXPORT_SYMBOL_GPL(__trace_puts
);
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
505 int __trace_bputs(unsigned long ip
, const char *str
)
507 struct ring_buffer_event
*event
;
508 struct ring_buffer
*buffer
;
509 struct bputs_entry
*entry
;
510 unsigned long irq_flags
;
511 int size
= sizeof(struct bputs_entry
);
513 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
516 local_save_flags(irq_flags
);
517 buffer
= global_trace
.trace_buffer
.buffer
;
518 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
519 irq_flags
, preempt_count());
523 entry
= ring_buffer_event_data(event
);
527 __buffer_unlock_commit(buffer
, event
);
531 EXPORT_SYMBOL_GPL(__trace_bputs
);
533 #ifdef CONFIG_TRACER_SNAPSHOT
535 * trace_snapshot - take a snapshot of the current buffer.
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
548 void tracing_snapshot(void)
550 struct trace_array
*tr
= &global_trace
;
551 struct tracer
*tracer
= tr
->current_trace
;
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
560 if (!tr
->allocated_snapshot
) {
561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer
->use_max_tr
) {
569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
574 local_irq_save(flags
);
575 update_max_tr(tr
, current
, smp_processor_id());
576 local_irq_restore(flags
);
578 EXPORT_SYMBOL_GPL(tracing_snapshot
);
580 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
581 struct trace_buffer
*size_buf
, int cpu_id
);
582 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
584 static int alloc_snapshot(struct trace_array
*tr
)
588 if (!tr
->allocated_snapshot
) {
590 /* allocate spare buffer */
591 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
592 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
596 tr
->allocated_snapshot
= true;
602 static void free_snapshot(struct trace_array
*tr
)
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
609 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
610 set_buffer_entries(&tr
->max_buffer
, 1);
611 tracing_reset_online_cpus(&tr
->max_buffer
);
612 tr
->allocated_snapshot
= false;
616 * tracing_alloc_snapshot - allocate snapshot buffer.
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
625 int tracing_alloc_snapshot(void)
627 struct trace_array
*tr
= &global_trace
;
630 ret
= alloc_snapshot(tr
);
635 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
648 void tracing_snapshot_alloc(void)
652 ret
= tracing_alloc_snapshot();
658 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
660 void tracing_snapshot(void)
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
664 EXPORT_SYMBOL_GPL(tracing_snapshot
);
665 int tracing_alloc_snapshot(void)
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
670 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
671 void tracing_snapshot_alloc(void)
676 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
677 #endif /* CONFIG_TRACER_SNAPSHOT */
679 static void tracer_tracing_off(struct trace_array
*tr
)
681 if (tr
->trace_buffer
.buffer
)
682 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
691 tr
->buffer_disabled
= 1;
692 /* Make the flag seen by readers */
697 * tracing_off - turn off tracing buffers
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
704 void tracing_off(void)
706 tracer_tracing_off(&global_trace
);
708 EXPORT_SYMBOL_GPL(tracing_off
);
710 void disable_trace_on_warning(void)
712 if (__disable_trace_on_warning
)
717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
720 * Shows real state of the ring buffer if it is enabled or not.
722 static int tracer_tracing_is_on(struct trace_array
*tr
)
724 if (tr
->trace_buffer
.buffer
)
725 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
726 return !tr
->buffer_disabled
;
730 * tracing_is_on - show state of ring buffers enabled
732 int tracing_is_on(void)
734 return tracer_tracing_is_on(&global_trace
);
736 EXPORT_SYMBOL_GPL(tracing_is_on
);
738 static int __init
set_buf_size(char *str
)
740 unsigned long buf_size
;
744 buf_size
= memparse(str
, &str
);
745 /* nr_entries can not be zero */
748 trace_buf_size
= buf_size
;
751 __setup("trace_buf_size=", set_buf_size
);
753 static int __init
set_tracing_thresh(char *str
)
755 unsigned long threshold
;
760 ret
= kstrtoul(str
, 0, &threshold
);
763 tracing_thresh
= threshold
* 1000;
766 __setup("tracing_thresh=", set_tracing_thresh
);
768 unsigned long nsecs_to_usecs(unsigned long nsecs
)
773 /* These must match the bit postions in trace_iterator_flags */
774 static const char *trace_options
[] = {
807 int in_ns
; /* is this clock in nanoseconds? */
809 { trace_clock_local
, "local", 1 },
810 { trace_clock_global
, "global", 1 },
811 { trace_clock_counter
, "counter", 0 },
812 { trace_clock_jiffies
, "uptime", 1 },
813 { trace_clock
, "perf", 1 },
818 * trace_parser_get_init - gets the buffer for trace parser
820 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
822 memset(parser
, 0, sizeof(*parser
));
824 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
833 * trace_parser_put - frees the buffer for trace parser
835 void trace_parser_put(struct trace_parser
*parser
)
837 kfree(parser
->buffer
);
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
847 * Returns number of bytes read.
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
851 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
852 size_t cnt
, loff_t
*ppos
)
859 trace_parser_clear(parser
);
861 ret
= get_user(ch
, ubuf
++);
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
873 /* skip white space */
874 while (cnt
&& isspace(ch
)) {
875 ret
= get_user(ch
, ubuf
++);
882 /* only spaces were written */
892 /* read the non-space input */
893 while (cnt
&& !isspace(ch
)) {
894 if (parser
->idx
< parser
->size
- 1)
895 parser
->buffer
[parser
->idx
++] = ch
;
900 ret
= get_user(ch
, ubuf
++);
907 /* We either got finished input or we have to wait for another call. */
909 parser
->buffer
[parser
->idx
] = 0;
910 parser
->cont
= false;
911 } else if (parser
->idx
< parser
->size
- 1) {
913 parser
->buffer
[parser
->idx
++] = ch
;
926 ssize_t
trace_seq_to_user(struct trace_seq
*s
, char __user
*ubuf
, size_t cnt
)
934 if (s
->len
<= s
->readpos
)
937 len
= s
->len
- s
->readpos
;
940 ret
= copy_to_user(ubuf
, s
->buffer
+ s
->readpos
, cnt
);
950 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
954 if (s
->len
<= s
->readpos
)
957 len
= s
->len
- s
->readpos
;
960 memcpy(buf
, s
->buffer
+ s
->readpos
, cnt
);
966 unsigned long __read_mostly tracing_thresh
;
968 #ifdef CONFIG_TRACER_MAX_TRACE
970 * Copy the new maximum trace into the separate maximum-trace
971 * structure. (this way the maximum trace is permanently saved,
972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
975 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
977 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
978 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
979 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
980 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
983 max_buf
->time_start
= data
->preempt_timestamp
;
985 max_data
->saved_latency
= tr
->max_latency
;
986 max_data
->critical_start
= data
->critical_start
;
987 max_data
->critical_end
= data
->critical_end
;
989 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
990 max_data
->pid
= tsk
->pid
;
992 * If tsk == current, then use current_uid(), as that does not use
993 * RCU. The irq tracer can be called out of RCU scope.
996 max_data
->uid
= current_uid();
998 max_data
->uid
= task_uid(tsk
);
1000 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1001 max_data
->policy
= tsk
->policy
;
1002 max_data
->rt_priority
= tsk
->rt_priority
;
1004 /* record this tasks comm */
1005 tracing_record_cmdline(tsk
);
1009 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1011 * @tsk: the task with the latency
1012 * @cpu: The cpu that initiated the trace.
1014 * Flip the buffers between the @tr and the max_tr and record information
1015 * about which task was the cause of this latency.
1018 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1020 struct ring_buffer
*buf
;
1025 WARN_ON_ONCE(!irqs_disabled());
1027 if (!tr
->allocated_snapshot
) {
1028 /* Only the nop tracer should hit this when disabling */
1029 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1033 arch_spin_lock(&tr
->max_lock
);
1035 buf
= tr
->trace_buffer
.buffer
;
1036 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1037 tr
->max_buffer
.buffer
= buf
;
1039 __update_max_tr(tr
, tsk
, cpu
);
1040 arch_spin_unlock(&tr
->max_lock
);
1044 * update_max_tr_single - only copy one trace over, and reset the rest
1046 * @tsk - task with the latency
1047 * @cpu - the cpu of the buffer to copy.
1049 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1052 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1059 WARN_ON_ONCE(!irqs_disabled());
1060 if (!tr
->allocated_snapshot
) {
1061 /* Only the nop tracer should hit this when disabling */
1062 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1066 arch_spin_lock(&tr
->max_lock
);
1068 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1070 if (ret
== -EBUSY
) {
1072 * We failed to swap the buffer due to a commit taking
1073 * place on this CPU. We fail to record, but we reset
1074 * the max trace buffer (no one writes directly to it)
1075 * and flag that it failed.
1077 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1078 "Failed to swap buffers due to commit in progress\n");
1081 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1083 __update_max_tr(tr
, tsk
, cpu
);
1084 arch_spin_unlock(&tr
->max_lock
);
1086 #endif /* CONFIG_TRACER_MAX_TRACE */
1088 static void wait_on_pipe(struct trace_iterator
*iter
)
1090 /* Iterators are static, they should be filled or empty */
1091 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1094 ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
1097 #ifdef CONFIG_FTRACE_STARTUP_TEST
1098 static int run_tracer_selftest(struct tracer
*type
)
1100 struct trace_array
*tr
= &global_trace
;
1101 struct tracer
*saved_tracer
= tr
->current_trace
;
1104 if (!type
->selftest
|| tracing_selftest_disabled
)
1108 * Run a selftest on this tracer.
1109 * Here we reset the trace buffer, and set the current
1110 * tracer to be this tracer. The tracer can then run some
1111 * internal tracing to verify that everything is in order.
1112 * If we fail, we do not register this tracer.
1114 tracing_reset_online_cpus(&tr
->trace_buffer
);
1116 tr
->current_trace
= type
;
1118 #ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type
->use_max_tr
) {
1120 /* If we expanded the buffers, make sure the max is expanded too */
1121 if (ring_buffer_expanded
)
1122 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1123 RING_BUFFER_ALL_CPUS
);
1124 tr
->allocated_snapshot
= true;
1128 /* the test is responsible for initializing and enabling */
1129 pr_info("Testing tracer %s: ", type
->name
);
1130 ret
= type
->selftest(type
, tr
);
1131 /* the test is responsible for resetting too */
1132 tr
->current_trace
= saved_tracer
;
1134 printk(KERN_CONT
"FAILED!\n");
1135 /* Add the warning after printing 'FAILED' */
1139 /* Only reset on passing, to avoid touching corrupted buffers */
1140 tracing_reset_online_cpus(&tr
->trace_buffer
);
1142 #ifdef CONFIG_TRACER_MAX_TRACE
1143 if (type
->use_max_tr
) {
1144 tr
->allocated_snapshot
= false;
1146 /* Shrink the max buffer again */
1147 if (ring_buffer_expanded
)
1148 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1149 RING_BUFFER_ALL_CPUS
);
1153 printk(KERN_CONT
"PASSED\n");
1157 static inline int run_tracer_selftest(struct tracer
*type
)
1161 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1164 * register_tracer - register a tracer with the ftrace system.
1165 * @type - the plugin for the tracer
1167 * Register a new plugin tracer.
1169 int register_tracer(struct tracer
*type
)
1175 pr_info("Tracer must have a name\n");
1179 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1180 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1184 mutex_lock(&trace_types_lock
);
1186 tracing_selftest_running
= true;
1188 for (t
= trace_types
; t
; t
= t
->next
) {
1189 if (strcmp(type
->name
, t
->name
) == 0) {
1191 pr_info("Tracer %s already registered\n",
1198 if (!type
->set_flag
)
1199 type
->set_flag
= &dummy_set_flag
;
1201 type
->flags
= &dummy_tracer_flags
;
1203 if (!type
->flags
->opts
)
1204 type
->flags
->opts
= dummy_tracer_opt
;
1206 ret
= run_tracer_selftest(type
);
1210 type
->next
= trace_types
;
1214 tracing_selftest_running
= false;
1215 mutex_unlock(&trace_types_lock
);
1217 if (ret
|| !default_bootup_tracer
)
1220 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1223 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1224 /* Do we want this tracer to start on bootup? */
1225 tracing_set_tracer(&global_trace
, type
->name
);
1226 default_bootup_tracer
= NULL
;
1227 /* disable other selftests, since this will break it. */
1228 tracing_selftest_disabled
= true;
1229 #ifdef CONFIG_FTRACE_STARTUP_TEST
1230 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1238 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1240 struct ring_buffer
*buffer
= buf
->buffer
;
1245 ring_buffer_record_disable(buffer
);
1247 /* Make sure all commits have finished */
1248 synchronize_sched();
1249 ring_buffer_reset_cpu(buffer
, cpu
);
1251 ring_buffer_record_enable(buffer
);
1254 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1256 struct ring_buffer
*buffer
= buf
->buffer
;
1262 ring_buffer_record_disable(buffer
);
1264 /* Make sure all commits have finished */
1265 synchronize_sched();
1267 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1269 for_each_online_cpu(cpu
)
1270 ring_buffer_reset_cpu(buffer
, cpu
);
1272 ring_buffer_record_enable(buffer
);
1275 /* Must have trace_types_lock held */
1276 void tracing_reset_all_online_cpus(void)
1278 struct trace_array
*tr
;
1280 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1281 tracing_reset_online_cpus(&tr
->trace_buffer
);
1282 #ifdef CONFIG_TRACER_MAX_TRACE
1283 tracing_reset_online_cpus(&tr
->max_buffer
);
1288 #define SAVED_CMDLINES 128
1289 #define NO_CMDLINE_MAP UINT_MAX
1290 static unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1291 static unsigned map_cmdline_to_pid
[SAVED_CMDLINES
];
1292 static char saved_cmdlines
[SAVED_CMDLINES
][TASK_COMM_LEN
];
1293 static int cmdline_idx
;
1294 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1296 /* temporary disable recording */
1297 static atomic_t trace_record_cmdline_disabled __read_mostly
;
1299 static void trace_init_cmdlines(void)
1301 memset(&map_pid_to_cmdline
, NO_CMDLINE_MAP
, sizeof(map_pid_to_cmdline
));
1302 memset(&map_cmdline_to_pid
, NO_CMDLINE_MAP
, sizeof(map_cmdline_to_pid
));
1306 int is_tracing_stopped(void)
1308 return global_trace
.stop_count
;
1312 * tracing_start - quick start of the tracer
1314 * If tracing is enabled but was stopped by tracing_stop,
1315 * this will start the tracer back up.
1317 void tracing_start(void)
1319 struct ring_buffer
*buffer
;
1320 unsigned long flags
;
1322 if (tracing_disabled
)
1325 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1326 if (--global_trace
.stop_count
) {
1327 if (global_trace
.stop_count
< 0) {
1328 /* Someone screwed up their debugging */
1330 global_trace
.stop_count
= 0;
1335 /* Prevent the buffers from switching */
1336 arch_spin_lock(&global_trace
.max_lock
);
1338 buffer
= global_trace
.trace_buffer
.buffer
;
1340 ring_buffer_record_enable(buffer
);
1342 #ifdef CONFIG_TRACER_MAX_TRACE
1343 buffer
= global_trace
.max_buffer
.buffer
;
1345 ring_buffer_record_enable(buffer
);
1348 arch_spin_unlock(&global_trace
.max_lock
);
1352 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1355 static void tracing_start_tr(struct trace_array
*tr
)
1357 struct ring_buffer
*buffer
;
1358 unsigned long flags
;
1360 if (tracing_disabled
)
1363 /* If global, we need to also start the max tracer */
1364 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1365 return tracing_start();
1367 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1369 if (--tr
->stop_count
) {
1370 if (tr
->stop_count
< 0) {
1371 /* Someone screwed up their debugging */
1378 buffer
= tr
->trace_buffer
.buffer
;
1380 ring_buffer_record_enable(buffer
);
1383 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1387 * tracing_stop - quick stop of the tracer
1389 * Light weight way to stop tracing. Use in conjunction with
1392 void tracing_stop(void)
1394 struct ring_buffer
*buffer
;
1395 unsigned long flags
;
1398 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1399 if (global_trace
.stop_count
++)
1402 /* Prevent the buffers from switching */
1403 arch_spin_lock(&global_trace
.max_lock
);
1405 buffer
= global_trace
.trace_buffer
.buffer
;
1407 ring_buffer_record_disable(buffer
);
1409 #ifdef CONFIG_TRACER_MAX_TRACE
1410 buffer
= global_trace
.max_buffer
.buffer
;
1412 ring_buffer_record_disable(buffer
);
1415 arch_spin_unlock(&global_trace
.max_lock
);
1418 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1421 static void tracing_stop_tr(struct trace_array
*tr
)
1423 struct ring_buffer
*buffer
;
1424 unsigned long flags
;
1426 /* If global, we need to also stop the max tracer */
1427 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1428 return tracing_stop();
1430 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1431 if (tr
->stop_count
++)
1434 buffer
= tr
->trace_buffer
.buffer
;
1436 ring_buffer_record_disable(buffer
);
1439 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1442 void trace_stop_cmdline_recording(void);
1444 static void trace_save_cmdline(struct task_struct
*tsk
)
1448 if (!tsk
->pid
|| unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1452 * It's not the end of the world if we don't get
1453 * the lock, but we also don't want to spin
1454 * nor do we want to disable interrupts,
1455 * so if we miss here, then better luck next time.
1457 if (!arch_spin_trylock(&trace_cmdline_lock
))
1460 idx
= map_pid_to_cmdline
[tsk
->pid
];
1461 if (idx
== NO_CMDLINE_MAP
) {
1462 idx
= (cmdline_idx
+ 1) % SAVED_CMDLINES
;
1465 * Check whether the cmdline buffer at idx has a pid
1466 * mapped. We are going to overwrite that entry so we
1467 * need to clear the map_pid_to_cmdline. Otherwise we
1468 * would read the new comm for the old pid.
1470 pid
= map_cmdline_to_pid
[idx
];
1471 if (pid
!= NO_CMDLINE_MAP
)
1472 map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1474 map_cmdline_to_pid
[idx
] = tsk
->pid
;
1475 map_pid_to_cmdline
[tsk
->pid
] = idx
;
1480 memcpy(&saved_cmdlines
[idx
], tsk
->comm
, TASK_COMM_LEN
);
1482 arch_spin_unlock(&trace_cmdline_lock
);
1485 void trace_find_cmdline(int pid
, char comm
[])
1490 strcpy(comm
, "<idle>");
1494 if (WARN_ON_ONCE(pid
< 0)) {
1495 strcpy(comm
, "<XXX>");
1499 if (pid
> PID_MAX_DEFAULT
) {
1500 strcpy(comm
, "<...>");
1505 arch_spin_lock(&trace_cmdline_lock
);
1506 map
= map_pid_to_cmdline
[pid
];
1507 if (map
!= NO_CMDLINE_MAP
)
1508 strcpy(comm
, saved_cmdlines
[map
]);
1510 strcpy(comm
, "<...>");
1512 arch_spin_unlock(&trace_cmdline_lock
);
1516 void tracing_record_cmdline(struct task_struct
*tsk
)
1518 if (atomic_read(&trace_record_cmdline_disabled
) || !tracing_is_on())
1521 if (!__this_cpu_read(trace_cmdline_save
))
1524 __this_cpu_write(trace_cmdline_save
, false);
1526 trace_save_cmdline(tsk
);
1530 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
1533 struct task_struct
*tsk
= current
;
1535 entry
->preempt_count
= pc
& 0xff;
1536 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
1538 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1539 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
1541 TRACE_FLAG_IRQS_NOSUPPORT
|
1543 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
1544 ((pc
& SOFTIRQ_MASK
) ? TRACE_FLAG_SOFTIRQ
: 0) |
1545 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
1546 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
1548 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
1550 struct ring_buffer_event
*
1551 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
1554 unsigned long flags
, int pc
)
1556 struct ring_buffer_event
*event
;
1558 event
= ring_buffer_lock_reserve(buffer
, len
);
1559 if (event
!= NULL
) {
1560 struct trace_entry
*ent
= ring_buffer_event_data(event
);
1562 tracing_generic_entry_update(ent
, flags
, pc
);
1570 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
1572 __this_cpu_write(trace_cmdline_save
, true);
1573 ring_buffer_unlock_commit(buffer
, event
);
1577 __trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1578 struct ring_buffer_event
*event
,
1579 unsigned long flags
, int pc
)
1581 __buffer_unlock_commit(buffer
, event
);
1583 ftrace_trace_stack(buffer
, flags
, 6, pc
);
1584 ftrace_trace_userstack(buffer
, flags
, pc
);
1587 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1588 struct ring_buffer_event
*event
,
1589 unsigned long flags
, int pc
)
1591 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1593 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit
);
1595 static struct ring_buffer
*temp_buffer
;
1597 struct ring_buffer_event
*
1598 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1599 struct ftrace_event_file
*ftrace_file
,
1600 int type
, unsigned long len
,
1601 unsigned long flags
, int pc
)
1603 struct ring_buffer_event
*entry
;
1605 *current_rb
= ftrace_file
->tr
->trace_buffer
.buffer
;
1606 entry
= trace_buffer_lock_reserve(*current_rb
,
1607 type
, len
, flags
, pc
);
1609 * If tracing is off, but we have triggers enabled
1610 * we still need to look at the event data. Use the temp_buffer
1611 * to store the trace event for the tigger to use. It's recusive
1612 * safe and will not be recorded anywhere.
1614 if (!entry
&& ftrace_file
->flags
& FTRACE_EVENT_FL_TRIGGER_COND
) {
1615 *current_rb
= temp_buffer
;
1616 entry
= trace_buffer_lock_reserve(*current_rb
,
1617 type
, len
, flags
, pc
);
1621 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
1623 struct ring_buffer_event
*
1624 trace_current_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1625 int type
, unsigned long len
,
1626 unsigned long flags
, int pc
)
1628 *current_rb
= global_trace
.trace_buffer
.buffer
;
1629 return trace_buffer_lock_reserve(*current_rb
,
1630 type
, len
, flags
, pc
);
1632 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve
);
1634 void trace_current_buffer_unlock_commit(struct ring_buffer
*buffer
,
1635 struct ring_buffer_event
*event
,
1636 unsigned long flags
, int pc
)
1638 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1640 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit
);
1642 void trace_buffer_unlock_commit_regs(struct ring_buffer
*buffer
,
1643 struct ring_buffer_event
*event
,
1644 unsigned long flags
, int pc
,
1645 struct pt_regs
*regs
)
1647 __buffer_unlock_commit(buffer
, event
);
1649 ftrace_trace_stack_regs(buffer
, flags
, 0, pc
, regs
);
1650 ftrace_trace_userstack(buffer
, flags
, pc
);
1652 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs
);
1654 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
1655 struct ring_buffer_event
*event
)
1657 ring_buffer_discard_commit(buffer
, event
);
1659 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit
);
1662 trace_function(struct trace_array
*tr
,
1663 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
1666 struct ftrace_event_call
*call
= &event_function
;
1667 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
1668 struct ring_buffer_event
*event
;
1669 struct ftrace_entry
*entry
;
1671 /* If we are reading the ring buffer, don't trace */
1672 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
1675 event
= trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
1679 entry
= ring_buffer_event_data(event
);
1681 entry
->parent_ip
= parent_ip
;
1683 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1684 __buffer_unlock_commit(buffer
, event
);
1687 #ifdef CONFIG_STACKTRACE
1689 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1690 struct ftrace_stack
{
1691 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
1694 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
1695 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
1697 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
1698 unsigned long flags
,
1699 int skip
, int pc
, struct pt_regs
*regs
)
1701 struct ftrace_event_call
*call
= &event_kernel_stack
;
1702 struct ring_buffer_event
*event
;
1703 struct stack_entry
*entry
;
1704 struct stack_trace trace
;
1706 int size
= FTRACE_STACK_ENTRIES
;
1708 trace
.nr_entries
= 0;
1712 * Since events can happen in NMIs there's no safe way to
1713 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1714 * or NMI comes in, it will just have to use the default
1715 * FTRACE_STACK_SIZE.
1717 preempt_disable_notrace();
1719 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
1721 * We don't need any atomic variables, just a barrier.
1722 * If an interrupt comes in, we don't care, because it would
1723 * have exited and put the counter back to what we want.
1724 * We just need a barrier to keep gcc from moving things
1728 if (use_stack
== 1) {
1729 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
1730 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
1733 save_stack_trace_regs(regs
, &trace
);
1735 save_stack_trace(&trace
);
1737 if (trace
.nr_entries
> size
)
1738 size
= trace
.nr_entries
;
1740 /* From now on, use_stack is a boolean */
1743 size
*= sizeof(unsigned long);
1745 event
= trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
1746 sizeof(*entry
) + size
, flags
, pc
);
1749 entry
= ring_buffer_event_data(event
);
1751 memset(&entry
->caller
, 0, size
);
1754 memcpy(&entry
->caller
, trace
.entries
,
1755 trace
.nr_entries
* sizeof(unsigned long));
1757 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1758 trace
.entries
= entry
->caller
;
1760 save_stack_trace_regs(regs
, &trace
);
1762 save_stack_trace(&trace
);
1765 entry
->size
= trace
.nr_entries
;
1767 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1768 __buffer_unlock_commit(buffer
, event
);
1771 /* Again, don't let gcc optimize things here */
1773 __this_cpu_dec(ftrace_stack_reserve
);
1774 preempt_enable_notrace();
1778 void ftrace_trace_stack_regs(struct ring_buffer
*buffer
, unsigned long flags
,
1779 int skip
, int pc
, struct pt_regs
*regs
)
1781 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1784 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
1787 void ftrace_trace_stack(struct ring_buffer
*buffer
, unsigned long flags
,
1790 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1793 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
1796 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
1799 __ftrace_trace_stack(tr
->trace_buffer
.buffer
, flags
, skip
, pc
, NULL
);
1803 * trace_dump_stack - record a stack back trace in the trace buffer
1804 * @skip: Number of functions to skip (helper handlers)
1806 void trace_dump_stack(int skip
)
1808 unsigned long flags
;
1810 if (tracing_disabled
|| tracing_selftest_running
)
1813 local_save_flags(flags
);
1816 * Skip 3 more, seems to get us at the caller of
1820 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
1821 flags
, skip
, preempt_count(), NULL
);
1824 static DEFINE_PER_CPU(int, user_stack_count
);
1827 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
1829 struct ftrace_event_call
*call
= &event_user_stack
;
1830 struct ring_buffer_event
*event
;
1831 struct userstack_entry
*entry
;
1832 struct stack_trace trace
;
1834 if (!(trace_flags
& TRACE_ITER_USERSTACKTRACE
))
1838 * NMIs can not handle page faults, even with fix ups.
1839 * The save user stack can (and often does) fault.
1841 if (unlikely(in_nmi()))
1845 * prevent recursion, since the user stack tracing may
1846 * trigger other kernel events.
1849 if (__this_cpu_read(user_stack_count
))
1852 __this_cpu_inc(user_stack_count
);
1854 event
= trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
1855 sizeof(*entry
), flags
, pc
);
1857 goto out_drop_count
;
1858 entry
= ring_buffer_event_data(event
);
1860 entry
->tgid
= current
->tgid
;
1861 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
1863 trace
.nr_entries
= 0;
1864 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1866 trace
.entries
= entry
->caller
;
1868 save_stack_trace_user(&trace
);
1869 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1870 __buffer_unlock_commit(buffer
, event
);
1873 __this_cpu_dec(user_stack_count
);
1879 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
1881 ftrace_trace_userstack(tr
, flags
, preempt_count());
1885 #endif /* CONFIG_STACKTRACE */
1887 /* created for use with alloc_percpu */
1888 struct trace_buffer_struct
{
1889 char buffer
[TRACE_BUF_SIZE
];
1892 static struct trace_buffer_struct
*trace_percpu_buffer
;
1893 static struct trace_buffer_struct
*trace_percpu_sirq_buffer
;
1894 static struct trace_buffer_struct
*trace_percpu_irq_buffer
;
1895 static struct trace_buffer_struct
*trace_percpu_nmi_buffer
;
1898 * The buffer used is dependent on the context. There is a per cpu
1899 * buffer for normal context, softirq contex, hard irq context and
1900 * for NMI context. Thise allows for lockless recording.
1902 * Note, if the buffers failed to be allocated, then this returns NULL
1904 static char *get_trace_buf(void)
1906 struct trace_buffer_struct
*percpu_buffer
;
1909 * If we have allocated per cpu buffers, then we do not
1910 * need to do any locking.
1913 percpu_buffer
= trace_percpu_nmi_buffer
;
1915 percpu_buffer
= trace_percpu_irq_buffer
;
1916 else if (in_softirq())
1917 percpu_buffer
= trace_percpu_sirq_buffer
;
1919 percpu_buffer
= trace_percpu_buffer
;
1924 return this_cpu_ptr(&percpu_buffer
->buffer
[0]);
1927 static int alloc_percpu_trace_buffer(void)
1929 struct trace_buffer_struct
*buffers
;
1930 struct trace_buffer_struct
*sirq_buffers
;
1931 struct trace_buffer_struct
*irq_buffers
;
1932 struct trace_buffer_struct
*nmi_buffers
;
1934 buffers
= alloc_percpu(struct trace_buffer_struct
);
1938 sirq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1942 irq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1946 nmi_buffers
= alloc_percpu(struct trace_buffer_struct
);
1950 trace_percpu_buffer
= buffers
;
1951 trace_percpu_sirq_buffer
= sirq_buffers
;
1952 trace_percpu_irq_buffer
= irq_buffers
;
1953 trace_percpu_nmi_buffer
= nmi_buffers
;
1958 free_percpu(irq_buffers
);
1960 free_percpu(sirq_buffers
);
1962 free_percpu(buffers
);
1964 WARN(1, "Could not allocate percpu trace_printk buffer");
1968 static int buffers_allocated
;
1970 void trace_printk_init_buffers(void)
1972 if (buffers_allocated
)
1975 if (alloc_percpu_trace_buffer())
1978 pr_info("ftrace: Allocated trace_printk buffers\n");
1980 /* Expand the buffers to set size */
1981 tracing_update_buffers();
1983 buffers_allocated
= 1;
1986 * trace_printk_init_buffers() can be called by modules.
1987 * If that happens, then we need to start cmdline recording
1988 * directly here. If the global_trace.buffer is already
1989 * allocated here, then this was called by module code.
1991 if (global_trace
.trace_buffer
.buffer
)
1992 tracing_start_cmdline_record();
1995 void trace_printk_start_comm(void)
1997 /* Start tracing comms if trace printk is set */
1998 if (!buffers_allocated
)
2000 tracing_start_cmdline_record();
2003 static void trace_printk_start_stop_comm(int enabled
)
2005 if (!buffers_allocated
)
2009 tracing_start_cmdline_record();
2011 tracing_stop_cmdline_record();
2015 * trace_vbprintk - write binary msg to tracing buffer
2018 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2020 struct ftrace_event_call
*call
= &event_bprint
;
2021 struct ring_buffer_event
*event
;
2022 struct ring_buffer
*buffer
;
2023 struct trace_array
*tr
= &global_trace
;
2024 struct bprint_entry
*entry
;
2025 unsigned long flags
;
2027 int len
= 0, size
, pc
;
2029 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2032 /* Don't pollute graph traces with trace_vprintk internals */
2033 pause_graph_tracing();
2035 pc
= preempt_count();
2036 preempt_disable_notrace();
2038 tbuffer
= get_trace_buf();
2044 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2046 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2049 local_save_flags(flags
);
2050 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2051 buffer
= tr
->trace_buffer
.buffer
;
2052 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2056 entry
= ring_buffer_event_data(event
);
2060 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2061 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2062 __buffer_unlock_commit(buffer
, event
);
2063 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2067 preempt_enable_notrace();
2068 unpause_graph_tracing();
2072 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2075 __trace_array_vprintk(struct ring_buffer
*buffer
,
2076 unsigned long ip
, const char *fmt
, va_list args
)
2078 struct ftrace_event_call
*call
= &event_print
;
2079 struct ring_buffer_event
*event
;
2080 int len
= 0, size
, pc
;
2081 struct print_entry
*entry
;
2082 unsigned long flags
;
2085 if (tracing_disabled
|| tracing_selftest_running
)
2088 /* Don't pollute graph traces with trace_vprintk internals */
2089 pause_graph_tracing();
2091 pc
= preempt_count();
2092 preempt_disable_notrace();
2095 tbuffer
= get_trace_buf();
2101 len
= vsnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2102 if (len
> TRACE_BUF_SIZE
)
2105 local_save_flags(flags
);
2106 size
= sizeof(*entry
) + len
+ 1;
2107 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2111 entry
= ring_buffer_event_data(event
);
2114 memcpy(&entry
->buf
, tbuffer
, len
);
2115 entry
->buf
[len
] = '\0';
2116 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2117 __buffer_unlock_commit(buffer
, event
);
2118 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2121 preempt_enable_notrace();
2122 unpause_graph_tracing();
2127 int trace_array_vprintk(struct trace_array
*tr
,
2128 unsigned long ip
, const char *fmt
, va_list args
)
2130 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
2133 int trace_array_printk(struct trace_array
*tr
,
2134 unsigned long ip
, const char *fmt
, ...)
2139 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2143 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
2148 int trace_array_printk_buf(struct ring_buffer
*buffer
,
2149 unsigned long ip
, const char *fmt
, ...)
2154 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2158 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
2163 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
2165 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
2167 EXPORT_SYMBOL_GPL(trace_vprintk
);
2169 static void trace_iterator_increment(struct trace_iterator
*iter
)
2171 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
2175 ring_buffer_read(buf_iter
, NULL
);
2178 static struct trace_entry
*
2179 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
2180 unsigned long *lost_events
)
2182 struct ring_buffer_event
*event
;
2183 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
2186 event
= ring_buffer_iter_peek(buf_iter
, ts
);
2188 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
2192 iter
->ent_size
= ring_buffer_event_length(event
);
2193 return ring_buffer_event_data(event
);
2199 static struct trace_entry
*
2200 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
2201 unsigned long *missing_events
, u64
*ent_ts
)
2203 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
2204 struct trace_entry
*ent
, *next
= NULL
;
2205 unsigned long lost_events
= 0, next_lost
= 0;
2206 int cpu_file
= iter
->cpu_file
;
2207 u64 next_ts
= 0, ts
;
2213 * If we are in a per_cpu trace file, don't bother by iterating over
2214 * all cpu and peek directly.
2216 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
2217 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
2219 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
2221 *ent_cpu
= cpu_file
;
2226 for_each_tracing_cpu(cpu
) {
2228 if (ring_buffer_empty_cpu(buffer
, cpu
))
2231 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
2234 * Pick the entry with the smallest timestamp:
2236 if (ent
&& (!next
|| ts
< next_ts
)) {
2240 next_lost
= lost_events
;
2241 next_size
= iter
->ent_size
;
2245 iter
->ent_size
= next_size
;
2248 *ent_cpu
= next_cpu
;
2254 *missing_events
= next_lost
;
2259 /* Find the next real entry, without updating the iterator itself */
2260 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
2261 int *ent_cpu
, u64
*ent_ts
)
2263 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
2266 /* Find the next real entry, and increment the iterator to the next entry */
2267 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
2269 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
2270 &iter
->lost_events
, &iter
->ts
);
2273 trace_iterator_increment(iter
);
2275 return iter
->ent
? iter
: NULL
;
2278 static void trace_consume(struct trace_iterator
*iter
)
2280 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
2281 &iter
->lost_events
);
2284 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2286 struct trace_iterator
*iter
= m
->private;
2290 WARN_ON_ONCE(iter
->leftover
);
2294 /* can't go backwards */
2299 ent
= trace_find_next_entry_inc(iter
);
2303 while (ent
&& iter
->idx
< i
)
2304 ent
= trace_find_next_entry_inc(iter
);
2311 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
2313 struct ring_buffer_event
*event
;
2314 struct ring_buffer_iter
*buf_iter
;
2315 unsigned long entries
= 0;
2318 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
2320 buf_iter
= trace_buffer_iter(iter
, cpu
);
2324 ring_buffer_iter_reset(buf_iter
);
2327 * We could have the case with the max latency tracers
2328 * that a reset never took place on a cpu. This is evident
2329 * by the timestamp being before the start of the buffer.
2331 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
2332 if (ts
>= iter
->trace_buffer
->time_start
)
2335 ring_buffer_read(buf_iter
, NULL
);
2338 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
2342 * The current tracer is copied to avoid a global locking
2345 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
2347 struct trace_iterator
*iter
= m
->private;
2348 struct trace_array
*tr
= iter
->tr
;
2349 int cpu_file
= iter
->cpu_file
;
2355 * copy the tracer to avoid using a global lock all around.
2356 * iter->trace is a copy of current_trace, the pointer to the
2357 * name may be used instead of a strcmp(), as iter->trace->name
2358 * will point to the same string as current_trace->name.
2360 mutex_lock(&trace_types_lock
);
2361 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
2362 *iter
->trace
= *tr
->current_trace
;
2363 mutex_unlock(&trace_types_lock
);
2365 #ifdef CONFIG_TRACER_MAX_TRACE
2366 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2367 return ERR_PTR(-EBUSY
);
2370 if (!iter
->snapshot
)
2371 atomic_inc(&trace_record_cmdline_disabled
);
2373 if (*pos
!= iter
->pos
) {
2378 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
2379 for_each_tracing_cpu(cpu
)
2380 tracing_iter_reset(iter
, cpu
);
2382 tracing_iter_reset(iter
, cpu_file
);
2385 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
2390 * If we overflowed the seq_file before, then we want
2391 * to just reuse the trace_seq buffer again.
2397 p
= s_next(m
, p
, &l
);
2401 trace_event_read_lock();
2402 trace_access_lock(cpu_file
);
2406 static void s_stop(struct seq_file
*m
, void *p
)
2408 struct trace_iterator
*iter
= m
->private;
2410 #ifdef CONFIG_TRACER_MAX_TRACE
2411 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2415 if (!iter
->snapshot
)
2416 atomic_dec(&trace_record_cmdline_disabled
);
2418 trace_access_unlock(iter
->cpu_file
);
2419 trace_event_read_unlock();
2423 get_total_entries(struct trace_buffer
*buf
,
2424 unsigned long *total
, unsigned long *entries
)
2426 unsigned long count
;
2432 for_each_tracing_cpu(cpu
) {
2433 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
2435 * If this buffer has skipped entries, then we hold all
2436 * entries for the trace and we need to ignore the
2437 * ones before the time stamp.
2439 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
2440 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
2441 /* total is the same as the entries */
2445 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
2450 static void print_lat_help_header(struct seq_file
*m
)
2452 seq_puts(m
, "# _------=> CPU# \n");
2453 seq_puts(m
, "# / _-----=> irqs-off \n");
2454 seq_puts(m
, "# | / _----=> need-resched \n");
2455 seq_puts(m
, "# || / _---=> hardirq/softirq \n");
2456 seq_puts(m
, "# ||| / _--=> preempt-depth \n");
2457 seq_puts(m
, "# |||| / delay \n");
2458 seq_puts(m
, "# cmd pid ||||| time | caller \n");
2459 seq_puts(m
, "# \\ / ||||| \\ | / \n");
2462 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
2464 unsigned long total
;
2465 unsigned long entries
;
2467 get_total_entries(buf
, &total
, &entries
);
2468 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2469 entries
, total
, num_online_cpus());
2473 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
)
2475 print_event_info(buf
, m
);
2476 seq_puts(m
, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2477 seq_puts(m
, "# | | | | |\n");
2480 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
)
2482 print_event_info(buf
, m
);
2483 seq_puts(m
, "# _-----=> irqs-off\n");
2484 seq_puts(m
, "# / _----=> need-resched\n");
2485 seq_puts(m
, "# | / _---=> hardirq/softirq\n");
2486 seq_puts(m
, "# || / _--=> preempt-depth\n");
2487 seq_puts(m
, "# ||| / delay\n");
2488 seq_puts(m
, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2489 seq_puts(m
, "# | | | |||| | |\n");
2493 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
2495 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2496 struct trace_buffer
*buf
= iter
->trace_buffer
;
2497 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
2498 struct tracer
*type
= iter
->trace
;
2499 unsigned long entries
;
2500 unsigned long total
;
2501 const char *name
= "preemption";
2505 get_total_entries(buf
, &total
, &entries
);
2507 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
2509 seq_puts(m
, "# -----------------------------------"
2510 "---------------------------------\n");
2511 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2512 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2513 nsecs_to_usecs(data
->saved_latency
),
2517 #if defined(CONFIG_PREEMPT_NONE)
2519 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2521 #elif defined(CONFIG_PREEMPT)
2526 /* These are reserved for later use */
2529 seq_printf(m
, " #P:%d)\n", num_online_cpus());
2533 seq_puts(m
, "# -----------------\n");
2534 seq_printf(m
, "# | task: %.16s-%d "
2535 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2536 data
->comm
, data
->pid
,
2537 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
2538 data
->policy
, data
->rt_priority
);
2539 seq_puts(m
, "# -----------------\n");
2541 if (data
->critical_start
) {
2542 seq_puts(m
, "# => started at: ");
2543 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
2544 trace_print_seq(m
, &iter
->seq
);
2545 seq_puts(m
, "\n# => ended at: ");
2546 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
2547 trace_print_seq(m
, &iter
->seq
);
2548 seq_puts(m
, "\n#\n");
2554 static void test_cpu_buff_start(struct trace_iterator
*iter
)
2556 struct trace_seq
*s
= &iter
->seq
;
2558 if (!(trace_flags
& TRACE_ITER_ANNOTATE
))
2561 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
2564 if (cpumask_test_cpu(iter
->cpu
, iter
->started
))
2567 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
2570 cpumask_set_cpu(iter
->cpu
, iter
->started
);
2572 /* Don't print started cpu buffer for the first entry of the trace */
2574 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
2578 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
2580 struct trace_seq
*s
= &iter
->seq
;
2581 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2582 struct trace_entry
*entry
;
2583 struct trace_event
*event
;
2587 test_cpu_buff_start(iter
);
2589 event
= ftrace_find_event(entry
->type
);
2591 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2592 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2593 if (!trace_print_lat_context(iter
))
2596 if (!trace_print_context(iter
))
2602 return event
->funcs
->trace(iter
, sym_flags
, event
);
2604 if (!trace_seq_printf(s
, "Unknown type %d\n", entry
->type
))
2607 return TRACE_TYPE_HANDLED
;
2609 return TRACE_TYPE_PARTIAL_LINE
;
2612 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
2614 struct trace_seq
*s
= &iter
->seq
;
2615 struct trace_entry
*entry
;
2616 struct trace_event
*event
;
2620 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2621 if (!trace_seq_printf(s
, "%d %d %llu ",
2622 entry
->pid
, iter
->cpu
, iter
->ts
))
2626 event
= ftrace_find_event(entry
->type
);
2628 return event
->funcs
->raw(iter
, 0, event
);
2630 if (!trace_seq_printf(s
, "%d ?\n", entry
->type
))
2633 return TRACE_TYPE_HANDLED
;
2635 return TRACE_TYPE_PARTIAL_LINE
;
2638 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
2640 struct trace_seq
*s
= &iter
->seq
;
2641 unsigned char newline
= '\n';
2642 struct trace_entry
*entry
;
2643 struct trace_event
*event
;
2647 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2648 SEQ_PUT_HEX_FIELD_RET(s
, entry
->pid
);
2649 SEQ_PUT_HEX_FIELD_RET(s
, iter
->cpu
);
2650 SEQ_PUT_HEX_FIELD_RET(s
, iter
->ts
);
2653 event
= ftrace_find_event(entry
->type
);
2655 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
2656 if (ret
!= TRACE_TYPE_HANDLED
)
2660 SEQ_PUT_FIELD_RET(s
, newline
);
2662 return TRACE_TYPE_HANDLED
;
2665 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
2667 struct trace_seq
*s
= &iter
->seq
;
2668 struct trace_entry
*entry
;
2669 struct trace_event
*event
;
2673 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2674 SEQ_PUT_FIELD_RET(s
, entry
->pid
);
2675 SEQ_PUT_FIELD_RET(s
, iter
->cpu
);
2676 SEQ_PUT_FIELD_RET(s
, iter
->ts
);
2679 event
= ftrace_find_event(entry
->type
);
2680 return event
? event
->funcs
->binary(iter
, 0, event
) :
2684 int trace_empty(struct trace_iterator
*iter
)
2686 struct ring_buffer_iter
*buf_iter
;
2689 /* If we are looking at one CPU buffer, only check that one */
2690 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
2691 cpu
= iter
->cpu_file
;
2692 buf_iter
= trace_buffer_iter(iter
, cpu
);
2694 if (!ring_buffer_iter_empty(buf_iter
))
2697 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2703 for_each_tracing_cpu(cpu
) {
2704 buf_iter
= trace_buffer_iter(iter
, cpu
);
2706 if (!ring_buffer_iter_empty(buf_iter
))
2709 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2717 /* Called with trace_event_read_lock() held. */
2718 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
2720 enum print_line_t ret
;
2722 if (iter
->lost_events
&&
2723 !trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
2724 iter
->cpu
, iter
->lost_events
))
2725 return TRACE_TYPE_PARTIAL_LINE
;
2727 if (iter
->trace
&& iter
->trace
->print_line
) {
2728 ret
= iter
->trace
->print_line(iter
);
2729 if (ret
!= TRACE_TYPE_UNHANDLED
)
2733 if (iter
->ent
->type
== TRACE_BPUTS
&&
2734 trace_flags
& TRACE_ITER_PRINTK
&&
2735 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2736 return trace_print_bputs_msg_only(iter
);
2738 if (iter
->ent
->type
== TRACE_BPRINT
&&
2739 trace_flags
& TRACE_ITER_PRINTK
&&
2740 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2741 return trace_print_bprintk_msg_only(iter
);
2743 if (iter
->ent
->type
== TRACE_PRINT
&&
2744 trace_flags
& TRACE_ITER_PRINTK
&&
2745 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2746 return trace_print_printk_msg_only(iter
);
2748 if (trace_flags
& TRACE_ITER_BIN
)
2749 return print_bin_fmt(iter
);
2751 if (trace_flags
& TRACE_ITER_HEX
)
2752 return print_hex_fmt(iter
);
2754 if (trace_flags
& TRACE_ITER_RAW
)
2755 return print_raw_fmt(iter
);
2757 return print_trace_fmt(iter
);
2760 void trace_latency_header(struct seq_file
*m
)
2762 struct trace_iterator
*iter
= m
->private;
2764 /* print nothing if the buffers are empty */
2765 if (trace_empty(iter
))
2768 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2769 print_trace_header(m
, iter
);
2771 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2772 print_lat_help_header(m
);
2775 void trace_default_header(struct seq_file
*m
)
2777 struct trace_iterator
*iter
= m
->private;
2779 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
2782 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2783 /* print nothing if the buffers are empty */
2784 if (trace_empty(iter
))
2786 print_trace_header(m
, iter
);
2787 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2788 print_lat_help_header(m
);
2790 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
2791 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
2792 print_func_help_header_irq(iter
->trace_buffer
, m
);
2794 print_func_help_header(iter
->trace_buffer
, m
);
2799 static void test_ftrace_alive(struct seq_file
*m
)
2801 if (!ftrace_is_dead())
2803 seq_printf(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2804 seq_printf(m
, "# MAY BE MISSING FUNCTION EVENTS\n");
2807 #ifdef CONFIG_TRACER_MAX_TRACE
2808 static void show_snapshot_main_help(struct seq_file
*m
)
2810 seq_printf(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2811 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2812 seq_printf(m
, "# Takes a snapshot of the main buffer.\n");
2813 seq_printf(m
, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2814 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2815 seq_printf(m
, "# is not a '0' or '1')\n");
2818 static void show_snapshot_percpu_help(struct seq_file
*m
)
2820 seq_printf(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2821 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2822 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2823 seq_printf(m
, "# Takes a snapshot of the main buffer for this cpu.\n");
2825 seq_printf(m
, "# echo 1 > snapshot : Not supported with this kernel.\n");
2826 seq_printf(m
, "# Must use main snapshot file to allocate.\n");
2828 seq_printf(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2829 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2830 seq_printf(m
, "# is not a '0' or '1')\n");
2833 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
2835 if (iter
->tr
->allocated_snapshot
)
2836 seq_printf(m
, "#\n# * Snapshot is allocated *\n#\n");
2838 seq_printf(m
, "#\n# * Snapshot is freed *\n#\n");
2840 seq_printf(m
, "# Snapshot commands:\n");
2841 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
2842 show_snapshot_main_help(m
);
2844 show_snapshot_percpu_help(m
);
2847 /* Should never be called */
2848 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
2851 static int s_show(struct seq_file
*m
, void *v
)
2853 struct trace_iterator
*iter
= v
;
2856 if (iter
->ent
== NULL
) {
2858 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
2860 test_ftrace_alive(m
);
2862 if (iter
->snapshot
&& trace_empty(iter
))
2863 print_snapshot_help(m
, iter
);
2864 else if (iter
->trace
&& iter
->trace
->print_header
)
2865 iter
->trace
->print_header(m
);
2867 trace_default_header(m
);
2869 } else if (iter
->leftover
) {
2871 * If we filled the seq_file buffer earlier, we
2872 * want to just show it now.
2874 ret
= trace_print_seq(m
, &iter
->seq
);
2876 /* ret should this time be zero, but you never know */
2877 iter
->leftover
= ret
;
2880 print_trace_line(iter
);
2881 ret
= trace_print_seq(m
, &iter
->seq
);
2883 * If we overflow the seq_file buffer, then it will
2884 * ask us for this data again at start up.
2886 * ret is 0 if seq_file write succeeded.
2889 iter
->leftover
= ret
;
2896 * Should be used after trace_array_get(), trace_types_lock
2897 * ensures that i_cdev was already initialized.
2899 static inline int tracing_get_cpu(struct inode
*inode
)
2901 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
2902 return (long)inode
->i_cdev
- 1;
2903 return RING_BUFFER_ALL_CPUS
;
2906 static const struct seq_operations tracer_seq_ops
= {
2913 static struct trace_iterator
*
2914 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
2916 struct trace_array
*tr
= inode
->i_private
;
2917 struct trace_iterator
*iter
;
2920 if (tracing_disabled
)
2921 return ERR_PTR(-ENODEV
);
2923 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
2925 return ERR_PTR(-ENOMEM
);
2927 iter
->buffer_iter
= kzalloc(sizeof(*iter
->buffer_iter
) * num_possible_cpus(),
2929 if (!iter
->buffer_iter
)
2933 * We make a copy of the current tracer to avoid concurrent
2934 * changes on it while we are reading.
2936 mutex_lock(&trace_types_lock
);
2937 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
2941 *iter
->trace
= *tr
->current_trace
;
2943 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
2948 #ifdef CONFIG_TRACER_MAX_TRACE
2949 /* Currently only the top directory has a snapshot */
2950 if (tr
->current_trace
->print_max
|| snapshot
)
2951 iter
->trace_buffer
= &tr
->max_buffer
;
2954 iter
->trace_buffer
= &tr
->trace_buffer
;
2955 iter
->snapshot
= snapshot
;
2957 iter
->cpu_file
= tracing_get_cpu(inode
);
2958 mutex_init(&iter
->mutex
);
2960 /* Notify the tracer early; before we stop tracing. */
2961 if (iter
->trace
&& iter
->trace
->open
)
2962 iter
->trace
->open(iter
);
2964 /* Annotate start of buffers if we had overruns */
2965 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
2966 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
2968 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2969 if (trace_clocks
[tr
->clock_id
].in_ns
)
2970 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
2972 /* stop the trace while dumping if we are not opening "snapshot" */
2973 if (!iter
->snapshot
)
2974 tracing_stop_tr(tr
);
2976 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
2977 for_each_tracing_cpu(cpu
) {
2978 iter
->buffer_iter
[cpu
] =
2979 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
2981 ring_buffer_read_prepare_sync();
2982 for_each_tracing_cpu(cpu
) {
2983 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
2984 tracing_iter_reset(iter
, cpu
);
2987 cpu
= iter
->cpu_file
;
2988 iter
->buffer_iter
[cpu
] =
2989 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
2990 ring_buffer_read_prepare_sync();
2991 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
2992 tracing_iter_reset(iter
, cpu
);
2995 mutex_unlock(&trace_types_lock
);
3000 mutex_unlock(&trace_types_lock
);
3002 kfree(iter
->buffer_iter
);
3004 seq_release_private(inode
, file
);
3005 return ERR_PTR(-ENOMEM
);
3008 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3010 if (tracing_disabled
)
3013 filp
->private_data
= inode
->i_private
;
3017 bool tracing_is_disabled(void)
3019 return (tracing_disabled
) ? true: false;
3023 * Open and update trace_array ref count.
3024 * Must have the current trace_array passed to it.
3026 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3028 struct trace_array
*tr
= inode
->i_private
;
3030 if (tracing_disabled
)
3033 if (trace_array_get(tr
) < 0)
3036 filp
->private_data
= inode
->i_private
;
3041 static int tracing_release(struct inode
*inode
, struct file
*file
)
3043 struct trace_array
*tr
= inode
->i_private
;
3044 struct seq_file
*m
= file
->private_data
;
3045 struct trace_iterator
*iter
;
3048 if (!(file
->f_mode
& FMODE_READ
)) {
3049 trace_array_put(tr
);
3053 /* Writes do not use seq_file */
3055 mutex_lock(&trace_types_lock
);
3057 for_each_tracing_cpu(cpu
) {
3058 if (iter
->buffer_iter
[cpu
])
3059 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3062 if (iter
->trace
&& iter
->trace
->close
)
3063 iter
->trace
->close(iter
);
3065 if (!iter
->snapshot
)
3066 /* reenable tracing if it was previously enabled */
3067 tracing_start_tr(tr
);
3069 __trace_array_put(tr
);
3071 mutex_unlock(&trace_types_lock
);
3073 mutex_destroy(&iter
->mutex
);
3074 free_cpumask_var(iter
->started
);
3076 kfree(iter
->buffer_iter
);
3077 seq_release_private(inode
, file
);
3082 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3084 struct trace_array
*tr
= inode
->i_private
;
3086 trace_array_put(tr
);
3090 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
3092 struct trace_array
*tr
= inode
->i_private
;
3094 trace_array_put(tr
);
3096 return single_release(inode
, file
);
3099 static int tracing_open(struct inode
*inode
, struct file
*file
)
3101 struct trace_array
*tr
= inode
->i_private
;
3102 struct trace_iterator
*iter
;
3105 if (trace_array_get(tr
) < 0)
3108 /* If this file was open for write, then erase contents */
3109 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
3110 int cpu
= tracing_get_cpu(inode
);
3112 if (cpu
== RING_BUFFER_ALL_CPUS
)
3113 tracing_reset_online_cpus(&tr
->trace_buffer
);
3115 tracing_reset(&tr
->trace_buffer
, cpu
);
3118 if (file
->f_mode
& FMODE_READ
) {
3119 iter
= __tracing_open(inode
, file
, false);
3121 ret
= PTR_ERR(iter
);
3122 else if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
3123 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
3127 trace_array_put(tr
);
3133 * Some tracers are not suitable for instance buffers.
3134 * A tracer is always available for the global array (toplevel)
3135 * or if it explicitly states that it is.
3138 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
3140 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
3143 /* Find the next tracer that this trace array may use */
3144 static struct tracer
*
3145 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
3147 while (t
&& !trace_ok_for_array(t
, tr
))
3154 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3156 struct trace_array
*tr
= m
->private;
3157 struct tracer
*t
= v
;
3162 t
= get_tracer_for_array(tr
, t
->next
);
3167 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3169 struct trace_array
*tr
= m
->private;
3173 mutex_lock(&trace_types_lock
);
3175 t
= get_tracer_for_array(tr
, trace_types
);
3176 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
3182 static void t_stop(struct seq_file
*m
, void *p
)
3184 mutex_unlock(&trace_types_lock
);
3187 static int t_show(struct seq_file
*m
, void *v
)
3189 struct tracer
*t
= v
;
3194 seq_printf(m
, "%s", t
->name
);
3203 static const struct seq_operations show_traces_seq_ops
= {
3210 static int show_traces_open(struct inode
*inode
, struct file
*file
)
3212 struct trace_array
*tr
= inode
->i_private
;
3216 if (tracing_disabled
)
3219 ret
= seq_open(file
, &show_traces_seq_ops
);
3223 m
= file
->private_data
;
3230 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
3231 size_t count
, loff_t
*ppos
)
3236 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
3240 if (file
->f_mode
& FMODE_READ
)
3241 ret
= seq_lseek(file
, offset
, whence
);
3243 file
->f_pos
= ret
= 0;
3248 static const struct file_operations tracing_fops
= {
3249 .open
= tracing_open
,
3251 .write
= tracing_write_stub
,
3252 .llseek
= tracing_lseek
,
3253 .release
= tracing_release
,
3256 static const struct file_operations show_traces_fops
= {
3257 .open
= show_traces_open
,
3259 .release
= seq_release
,
3260 .llseek
= seq_lseek
,
3264 * The tracer itself will not take this lock, but still we want
3265 * to provide a consistent cpumask to user-space:
3267 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
3270 * Temporary storage for the character representation of the
3271 * CPU bitmask (and one more byte for the newline):
3273 static char mask_str
[NR_CPUS
+ 1];
3276 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
3277 size_t count
, loff_t
*ppos
)
3279 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3282 mutex_lock(&tracing_cpumask_update_lock
);
3284 len
= cpumask_scnprintf(mask_str
, count
, tr
->tracing_cpumask
);
3285 if (count
- len
< 2) {
3289 len
+= sprintf(mask_str
+ len
, "\n");
3290 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
3293 mutex_unlock(&tracing_cpumask_update_lock
);
3299 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
3300 size_t count
, loff_t
*ppos
)
3302 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3303 cpumask_var_t tracing_cpumask_new
;
3306 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
3309 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
3313 mutex_lock(&tracing_cpumask_update_lock
);
3315 local_irq_disable();
3316 arch_spin_lock(&tr
->max_lock
);
3317 for_each_tracing_cpu(cpu
) {
3319 * Increase/decrease the disabled counter if we are
3320 * about to flip a bit in the cpumask:
3322 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3323 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3324 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3325 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3327 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3328 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3329 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3330 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3333 arch_spin_unlock(&tr
->max_lock
);
3336 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
3338 mutex_unlock(&tracing_cpumask_update_lock
);
3339 free_cpumask_var(tracing_cpumask_new
);
3344 free_cpumask_var(tracing_cpumask_new
);
3349 static const struct file_operations tracing_cpumask_fops
= {
3350 .open
= tracing_open_generic_tr
,
3351 .read
= tracing_cpumask_read
,
3352 .write
= tracing_cpumask_write
,
3353 .release
= tracing_release_generic_tr
,
3354 .llseek
= generic_file_llseek
,
3357 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
3359 struct tracer_opt
*trace_opts
;
3360 struct trace_array
*tr
= m
->private;
3364 mutex_lock(&trace_types_lock
);
3365 tracer_flags
= tr
->current_trace
->flags
->val
;
3366 trace_opts
= tr
->current_trace
->flags
->opts
;
3368 for (i
= 0; trace_options
[i
]; i
++) {
3369 if (trace_flags
& (1 << i
))
3370 seq_printf(m
, "%s\n", trace_options
[i
]);
3372 seq_printf(m
, "no%s\n", trace_options
[i
]);
3375 for (i
= 0; trace_opts
[i
].name
; i
++) {
3376 if (tracer_flags
& trace_opts
[i
].bit
)
3377 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
3379 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
3381 mutex_unlock(&trace_types_lock
);
3386 static int __set_tracer_option(struct trace_array
*tr
,
3387 struct tracer_flags
*tracer_flags
,
3388 struct tracer_opt
*opts
, int neg
)
3390 struct tracer
*trace
= tr
->current_trace
;
3393 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
3398 tracer_flags
->val
&= ~opts
->bit
;
3400 tracer_flags
->val
|= opts
->bit
;
3404 /* Try to assign a tracer specific option */
3405 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
3407 struct tracer
*trace
= tr
->current_trace
;
3408 struct tracer_flags
*tracer_flags
= trace
->flags
;
3409 struct tracer_opt
*opts
= NULL
;
3412 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
3413 opts
= &tracer_flags
->opts
[i
];
3415 if (strcmp(cmp
, opts
->name
) == 0)
3416 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
3422 /* Some tracers require overwrite to stay enabled */
3423 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
3425 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
3431 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
3433 /* do nothing if flag is already set */
3434 if (!!(trace_flags
& mask
) == !!enabled
)
3437 /* Give the tracer a chance to approve the change */
3438 if (tr
->current_trace
->flag_changed
)
3439 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
3443 trace_flags
|= mask
;
3445 trace_flags
&= ~mask
;
3447 if (mask
== TRACE_ITER_RECORD_CMD
)
3448 trace_event_enable_cmd_record(enabled
);
3450 if (mask
== TRACE_ITER_OVERWRITE
) {
3451 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
3452 #ifdef CONFIG_TRACER_MAX_TRACE
3453 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
3457 if (mask
== TRACE_ITER_PRINTK
)
3458 trace_printk_start_stop_comm(enabled
);
3463 static int trace_set_options(struct trace_array
*tr
, char *option
)
3470 cmp
= strstrip(option
);
3472 if (strncmp(cmp
, "no", 2) == 0) {
3477 mutex_lock(&trace_types_lock
);
3479 for (i
= 0; trace_options
[i
]; i
++) {
3480 if (strcmp(cmp
, trace_options
[i
]) == 0) {
3481 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
3486 /* If no option could be set, test the specific tracer options */
3487 if (!trace_options
[i
])
3488 ret
= set_tracer_option(tr
, cmp
, neg
);
3490 mutex_unlock(&trace_types_lock
);
3496 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
3497 size_t cnt
, loff_t
*ppos
)
3499 struct seq_file
*m
= filp
->private_data
;
3500 struct trace_array
*tr
= m
->private;
3504 if (cnt
>= sizeof(buf
))
3507 if (copy_from_user(&buf
, ubuf
, cnt
))
3512 ret
= trace_set_options(tr
, buf
);
3521 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
3523 struct trace_array
*tr
= inode
->i_private
;
3526 if (tracing_disabled
)
3529 if (trace_array_get(tr
) < 0)
3532 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
3534 trace_array_put(tr
);
3539 static const struct file_operations tracing_iter_fops
= {
3540 .open
= tracing_trace_options_open
,
3542 .llseek
= seq_lseek
,
3543 .release
= tracing_single_release_tr
,
3544 .write
= tracing_trace_options_write
,
3547 static const char readme_msg
[] =
3548 "tracing mini-HOWTO:\n\n"
3549 "# echo 0 > tracing_on : quick way to disable tracing\n"
3550 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3551 " Important files:\n"
3552 " trace\t\t\t- The static contents of the buffer\n"
3553 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3554 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3555 " current_tracer\t- function and latency tracers\n"
3556 " available_tracers\t- list of configured tracers for current_tracer\n"
3557 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3558 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3559 " trace_clock\t\t-change the clock used to order events\n"
3560 " local: Per cpu clock but may not be synced across CPUs\n"
3561 " global: Synced across CPUs but slows tracing down.\n"
3562 " counter: Not a clock, but just an increment\n"
3563 " uptime: Jiffy counter from time of boot\n"
3564 " perf: Same clock that perf events use\n"
3565 #ifdef CONFIG_X86_64
3566 " x86-tsc: TSC cycle counter\n"
3568 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3569 " tracing_cpumask\t- Limit which CPUs to trace\n"
3570 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3571 "\t\t\t Remove sub-buffer with rmdir\n"
3572 " trace_options\t\t- Set format or modify how tracing happens\n"
3573 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3574 "\t\t\t option name\n"
3575 #ifdef CONFIG_DYNAMIC_FTRACE
3576 "\n available_filter_functions - list of functions that can be filtered on\n"
3577 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3578 "\t\t\t functions\n"
3579 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3580 "\t modules: Can select a group via module\n"
3581 "\t Format: :mod:<module-name>\n"
3582 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3583 "\t triggers: a command to perform when function is hit\n"
3584 "\t Format: <function>:<trigger>[:count]\n"
3585 "\t trigger: traceon, traceoff\n"
3586 "\t\t enable_event:<system>:<event>\n"
3587 "\t\t disable_event:<system>:<event>\n"
3588 #ifdef CONFIG_STACKTRACE
3591 #ifdef CONFIG_TRACER_SNAPSHOT
3596 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3597 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3598 "\t The first one will disable tracing every time do_fault is hit\n"
3599 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3600 "\t The first time do trap is hit and it disables tracing, the\n"
3601 "\t counter will decrement to 2. If tracing is already disabled,\n"
3602 "\t the counter will not decrement. It only decrements when the\n"
3603 "\t trigger did work\n"
3604 "\t To remove trigger without count:\n"
3605 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3606 "\t To remove trigger with a count:\n"
3607 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3608 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3609 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3610 "\t modules: Can select a group via module command :mod:\n"
3611 "\t Does not accept triggers\n"
3612 #endif /* CONFIG_DYNAMIC_FTRACE */
3613 #ifdef CONFIG_FUNCTION_TRACER
3614 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3617 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3618 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3619 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3621 #ifdef CONFIG_TRACER_SNAPSHOT
3622 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3623 "\t\t\t snapshot buffer. Read the contents for more\n"
3624 "\t\t\t information\n"
3626 #ifdef CONFIG_STACK_TRACER
3627 " stack_trace\t\t- Shows the max stack trace when active\n"
3628 " stack_max_size\t- Shows current max stack size that was traced\n"
3629 "\t\t\t Write into this file to reset the max size (trigger a\n"
3630 "\t\t\t new trace)\n"
3631 #ifdef CONFIG_DYNAMIC_FTRACE
3632 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3635 #endif /* CONFIG_STACK_TRACER */
3636 " events/\t\t- Directory containing all trace event subsystems:\n"
3637 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3638 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3639 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3641 " filter\t\t- If set, only events passing filter are traced\n"
3642 " events/<system>/<event>/\t- Directory containing control files for\n"
3644 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3645 " filter\t\t- If set, only events passing filter are traced\n"
3646 " trigger\t\t- If set, a command to perform when event is hit\n"
3647 "\t Format: <trigger>[:count][if <filter>]\n"
3648 "\t trigger: traceon, traceoff\n"
3649 "\t enable_event:<system>:<event>\n"
3650 "\t disable_event:<system>:<event>\n"
3651 #ifdef CONFIG_STACKTRACE
3654 #ifdef CONFIG_TRACER_SNAPSHOT
3657 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3658 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3659 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3660 "\t events/block/block_unplug/trigger\n"
3661 "\t The first disables tracing every time block_unplug is hit.\n"
3662 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3663 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3664 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3665 "\t Like function triggers, the counter is only decremented if it\n"
3666 "\t enabled or disabled tracing.\n"
3667 "\t To remove a trigger without a count:\n"
3668 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3669 "\t To remove a trigger with a count:\n"
3670 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3671 "\t Filters can be ignored when removing a trigger.\n"
3675 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
3676 size_t cnt
, loff_t
*ppos
)
3678 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
3679 readme_msg
, strlen(readme_msg
));
3682 static const struct file_operations tracing_readme_fops
= {
3683 .open
= tracing_open_generic
,
3684 .read
= tracing_readme_read
,
3685 .llseek
= generic_file_llseek
,
3689 tracing_saved_cmdlines_read(struct file
*file
, char __user
*ubuf
,
3690 size_t cnt
, loff_t
*ppos
)
3699 file_buf
= kmalloc(SAVED_CMDLINES
*(16+TASK_COMM_LEN
), GFP_KERNEL
);
3703 buf_comm
= kmalloc(TASK_COMM_LEN
, GFP_KERNEL
);
3711 for (i
= 0; i
< SAVED_CMDLINES
; i
++) {
3714 pid
= map_cmdline_to_pid
[i
];
3715 if (pid
== -1 || pid
== NO_CMDLINE_MAP
)
3718 trace_find_cmdline(pid
, buf_comm
);
3719 r
= sprintf(buf
, "%d %s\n", pid
, buf_comm
);
3724 len
= simple_read_from_buffer(ubuf
, cnt
, ppos
,
3733 static const struct file_operations tracing_saved_cmdlines_fops
= {
3734 .open
= tracing_open_generic
,
3735 .read
= tracing_saved_cmdlines_read
,
3736 .llseek
= generic_file_llseek
,
3740 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
3741 size_t cnt
, loff_t
*ppos
)
3743 struct trace_array
*tr
= filp
->private_data
;
3744 char buf
[MAX_TRACER_SIZE
+2];
3747 mutex_lock(&trace_types_lock
);
3748 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
3749 mutex_unlock(&trace_types_lock
);
3751 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3754 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
3756 tracing_reset_online_cpus(&tr
->trace_buffer
);
3760 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
3764 for_each_tracing_cpu(cpu
)
3765 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
3768 #ifdef CONFIG_TRACER_MAX_TRACE
3769 /* resize @tr's buffer to the size of @size_tr's entries */
3770 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
3771 struct trace_buffer
*size_buf
, int cpu_id
)
3775 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
3776 for_each_tracing_cpu(cpu
) {
3777 ret
= ring_buffer_resize(trace_buf
->buffer
,
3778 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
3781 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
3782 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
3785 ret
= ring_buffer_resize(trace_buf
->buffer
,
3786 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
3788 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
3789 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
3794 #endif /* CONFIG_TRACER_MAX_TRACE */
3796 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
3797 unsigned long size
, int cpu
)
3802 * If kernel or user changes the size of the ring buffer
3803 * we use the size that was given, and we can forget about
3804 * expanding it later.
3806 ring_buffer_expanded
= true;
3808 /* May be called before buffers are initialized */
3809 if (!tr
->trace_buffer
.buffer
)
3812 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
3816 #ifdef CONFIG_TRACER_MAX_TRACE
3817 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
3818 !tr
->current_trace
->use_max_tr
)
3821 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
3823 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
3824 &tr
->trace_buffer
, cpu
);
3827 * AARGH! We are left with different
3828 * size max buffer!!!!
3829 * The max buffer is our "snapshot" buffer.
3830 * When a tracer needs a snapshot (one of the
3831 * latency tracers), it swaps the max buffer
3832 * with the saved snap shot. We succeeded to
3833 * update the size of the main buffer, but failed to
3834 * update the size of the max buffer. But when we tried
3835 * to reset the main buffer to the original size, we
3836 * failed there too. This is very unlikely to
3837 * happen, but if it does, warn and kill all
3841 tracing_disabled
= 1;
3846 if (cpu
== RING_BUFFER_ALL_CPUS
)
3847 set_buffer_entries(&tr
->max_buffer
, size
);
3849 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
3852 #endif /* CONFIG_TRACER_MAX_TRACE */
3854 if (cpu
== RING_BUFFER_ALL_CPUS
)
3855 set_buffer_entries(&tr
->trace_buffer
, size
);
3857 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
3862 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
3863 unsigned long size
, int cpu_id
)
3867 mutex_lock(&trace_types_lock
);
3869 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
3870 /* make sure, this cpu is enabled in the mask */
3871 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
3877 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
3882 mutex_unlock(&trace_types_lock
);
3889 * tracing_update_buffers - used by tracing facility to expand ring buffers
3891 * To save on memory when the tracing is never used on a system with it
3892 * configured in. The ring buffers are set to a minimum size. But once
3893 * a user starts to use the tracing facility, then they need to grow
3894 * to their default size.
3896 * This function is to be called when a tracer is about to be used.
3898 int tracing_update_buffers(void)
3902 mutex_lock(&trace_types_lock
);
3903 if (!ring_buffer_expanded
)
3904 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
3905 RING_BUFFER_ALL_CPUS
);
3906 mutex_unlock(&trace_types_lock
);
3911 struct trace_option_dentry
;
3913 static struct trace_option_dentry
*
3914 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
3917 destroy_trace_option_files(struct trace_option_dentry
*topts
);
3920 * Used to clear out the tracer before deletion of an instance.
3921 * Must have trace_types_lock held.
3923 static void tracing_set_nop(struct trace_array
*tr
)
3925 if (tr
->current_trace
== &nop_trace
)
3928 tr
->current_trace
->enabled
--;
3930 if (tr
->current_trace
->reset
)
3931 tr
->current_trace
->reset(tr
);
3933 tr
->current_trace
= &nop_trace
;
3936 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
3938 static struct trace_option_dentry
*topts
;
3940 #ifdef CONFIG_TRACER_MAX_TRACE
3945 mutex_lock(&trace_types_lock
);
3947 if (!ring_buffer_expanded
) {
3948 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
3949 RING_BUFFER_ALL_CPUS
);
3955 for (t
= trace_types
; t
; t
= t
->next
) {
3956 if (strcmp(t
->name
, buf
) == 0)
3963 if (t
== tr
->current_trace
)
3966 /* Some tracers are only allowed for the top level buffer */
3967 if (!trace_ok_for_array(t
, tr
)) {
3972 trace_branch_disable();
3974 tr
->current_trace
->enabled
--;
3976 if (tr
->current_trace
->reset
)
3977 tr
->current_trace
->reset(tr
);
3979 /* Current trace needs to be nop_trace before synchronize_sched */
3980 tr
->current_trace
= &nop_trace
;
3982 #ifdef CONFIG_TRACER_MAX_TRACE
3983 had_max_tr
= tr
->allocated_snapshot
;
3985 if (had_max_tr
&& !t
->use_max_tr
) {
3987 * We need to make sure that the update_max_tr sees that
3988 * current_trace changed to nop_trace to keep it from
3989 * swapping the buffers after we resize it.
3990 * The update_max_tr is called from interrupts disabled
3991 * so a synchronized_sched() is sufficient.
3993 synchronize_sched();
3997 /* Currently, only the top instance has options */
3998 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
3999 destroy_trace_option_files(topts
);
4000 topts
= create_trace_option_files(tr
, t
);
4003 #ifdef CONFIG_TRACER_MAX_TRACE
4004 if (t
->use_max_tr
&& !had_max_tr
) {
4005 ret
= alloc_snapshot(tr
);
4012 ret
= tracer_init(t
, tr
);
4017 tr
->current_trace
= t
;
4018 tr
->current_trace
->enabled
++;
4019 trace_branch_enable(tr
);
4021 mutex_unlock(&trace_types_lock
);
4027 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
4028 size_t cnt
, loff_t
*ppos
)
4030 struct trace_array
*tr
= filp
->private_data
;
4031 char buf
[MAX_TRACER_SIZE
+1];
4038 if (cnt
> MAX_TRACER_SIZE
)
4039 cnt
= MAX_TRACER_SIZE
;
4041 if (copy_from_user(&buf
, ubuf
, cnt
))
4046 /* strip ending whitespace. */
4047 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
4050 err
= tracing_set_tracer(tr
, buf
);
4060 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
4061 size_t cnt
, loff_t
*ppos
)
4063 unsigned long *ptr
= filp
->private_data
;
4067 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
4068 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
4069 if (r
> sizeof(buf
))
4071 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4075 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
4076 size_t cnt
, loff_t
*ppos
)
4078 unsigned long *ptr
= filp
->private_data
;
4082 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4091 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
4093 struct trace_array
*tr
= inode
->i_private
;
4094 struct trace_iterator
*iter
;
4097 if (tracing_disabled
)
4100 if (trace_array_get(tr
) < 0)
4103 mutex_lock(&trace_types_lock
);
4105 /* create a buffer to store the information to pass to userspace */
4106 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4109 __trace_array_put(tr
);
4114 * We make a copy of the current tracer to avoid concurrent
4115 * changes on it while we are reading.
4117 iter
->trace
= kmalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
4122 *iter
->trace
= *tr
->current_trace
;
4124 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
4129 /* trace pipe does not show start of buffer */
4130 cpumask_setall(iter
->started
);
4132 if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
4133 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4135 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4136 if (trace_clocks
[tr
->clock_id
].in_ns
)
4137 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
4140 iter
->trace_buffer
= &tr
->trace_buffer
;
4141 iter
->cpu_file
= tracing_get_cpu(inode
);
4142 mutex_init(&iter
->mutex
);
4143 filp
->private_data
= iter
;
4145 if (iter
->trace
->pipe_open
)
4146 iter
->trace
->pipe_open(iter
);
4148 nonseekable_open(inode
, filp
);
4150 mutex_unlock(&trace_types_lock
);
4156 __trace_array_put(tr
);
4157 mutex_unlock(&trace_types_lock
);
4161 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
4163 struct trace_iterator
*iter
= file
->private_data
;
4164 struct trace_array
*tr
= inode
->i_private
;
4166 mutex_lock(&trace_types_lock
);
4168 if (iter
->trace
->pipe_close
)
4169 iter
->trace
->pipe_close(iter
);
4171 mutex_unlock(&trace_types_lock
);
4173 free_cpumask_var(iter
->started
);
4174 mutex_destroy(&iter
->mutex
);
4178 trace_array_put(tr
);
4184 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
4186 /* Iterators are static, they should be filled or empty */
4187 if (trace_buffer_iter(iter
, iter
->cpu_file
))
4188 return POLLIN
| POLLRDNORM
;
4190 if (trace_flags
& TRACE_ITER_BLOCK
)
4192 * Always select as readable when in blocking mode
4194 return POLLIN
| POLLRDNORM
;
4196 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
4201 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
4203 struct trace_iterator
*iter
= filp
->private_data
;
4205 return trace_poll(iter
, filp
, poll_table
);
4208 /* Must be called with trace_types_lock mutex held. */
4209 static int tracing_wait_pipe(struct file
*filp
)
4211 struct trace_iterator
*iter
= filp
->private_data
;
4213 while (trace_empty(iter
)) {
4215 if ((filp
->f_flags
& O_NONBLOCK
)) {
4220 * We block until we read something and tracing is disabled.
4221 * We still block if tracing is disabled, but we have never
4222 * read anything. This allows a user to cat this file, and
4223 * then enable tracing. But after we have read something,
4224 * we give an EOF when tracing is again disabled.
4226 * iter->pos will be 0 if we haven't read anything.
4228 if (!tracing_is_on() && iter
->pos
)
4231 mutex_unlock(&iter
->mutex
);
4235 mutex_lock(&iter
->mutex
);
4237 if (signal_pending(current
))
4248 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
4249 size_t cnt
, loff_t
*ppos
)
4251 struct trace_iterator
*iter
= filp
->private_data
;
4252 struct trace_array
*tr
= iter
->tr
;
4255 /* return any leftover data */
4256 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4260 trace_seq_init(&iter
->seq
);
4262 /* copy the tracer to avoid using a global lock all around */
4263 mutex_lock(&trace_types_lock
);
4264 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4265 *iter
->trace
= *tr
->current_trace
;
4266 mutex_unlock(&trace_types_lock
);
4269 * Avoid more than one consumer on a single file descriptor
4270 * This is just a matter of traces coherency, the ring buffer itself
4273 mutex_lock(&iter
->mutex
);
4274 if (iter
->trace
->read
) {
4275 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
4281 sret
= tracing_wait_pipe(filp
);
4285 /* stop when tracing is finished */
4286 if (trace_empty(iter
)) {
4291 if (cnt
>= PAGE_SIZE
)
4292 cnt
= PAGE_SIZE
- 1;
4294 /* reset all but tr, trace, and overruns */
4295 memset(&iter
->seq
, 0,
4296 sizeof(struct trace_iterator
) -
4297 offsetof(struct trace_iterator
, seq
));
4298 cpumask_clear(iter
->started
);
4301 trace_event_read_lock();
4302 trace_access_lock(iter
->cpu_file
);
4303 while (trace_find_next_entry_inc(iter
) != NULL
) {
4304 enum print_line_t ret
;
4305 int len
= iter
->seq
.len
;
4307 ret
= print_trace_line(iter
);
4308 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4309 /* don't print partial lines */
4310 iter
->seq
.len
= len
;
4313 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4314 trace_consume(iter
);
4316 if (iter
->seq
.len
>= cnt
)
4320 * Setting the full flag means we reached the trace_seq buffer
4321 * size and we should leave by partial output condition above.
4322 * One of the trace_seq_* functions is not used properly.
4324 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
4327 trace_access_unlock(iter
->cpu_file
);
4328 trace_event_read_unlock();
4330 /* Now copy what we have to the user */
4331 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4332 if (iter
->seq
.readpos
>= iter
->seq
.len
)
4333 trace_seq_init(&iter
->seq
);
4336 * If there was nothing to send to user, in spite of consuming trace
4337 * entries, go back to wait for more entries.
4343 mutex_unlock(&iter
->mutex
);
4348 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
4351 __free_page(spd
->pages
[idx
]);
4354 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
4356 .confirm
= generic_pipe_buf_confirm
,
4357 .release
= generic_pipe_buf_release
,
4358 .steal
= generic_pipe_buf_steal
,
4359 .get
= generic_pipe_buf_get
,
4363 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
4368 /* Seq buffer is page-sized, exactly what we need. */
4370 count
= iter
->seq
.len
;
4371 ret
= print_trace_line(iter
);
4372 count
= iter
->seq
.len
- count
;
4375 iter
->seq
.len
-= count
;
4378 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4379 iter
->seq
.len
-= count
;
4383 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4384 trace_consume(iter
);
4386 if (!trace_find_next_entry_inc(iter
)) {
4396 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
4398 struct pipe_inode_info
*pipe
,
4402 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
4403 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
4404 struct trace_iterator
*iter
= filp
->private_data
;
4405 struct splice_pipe_desc spd
= {
4407 .partial
= partial_def
,
4408 .nr_pages
= 0, /* This gets updated below. */
4409 .nr_pages_max
= PIPE_DEF_BUFFERS
,
4411 .ops
= &tracing_pipe_buf_ops
,
4412 .spd_release
= tracing_spd_release_pipe
,
4414 struct trace_array
*tr
= iter
->tr
;
4419 if (splice_grow_spd(pipe
, &spd
))
4422 /* copy the tracer to avoid using a global lock all around */
4423 mutex_lock(&trace_types_lock
);
4424 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4425 *iter
->trace
= *tr
->current_trace
;
4426 mutex_unlock(&trace_types_lock
);
4428 mutex_lock(&iter
->mutex
);
4430 if (iter
->trace
->splice_read
) {
4431 ret
= iter
->trace
->splice_read(iter
, filp
,
4432 ppos
, pipe
, len
, flags
);
4437 ret
= tracing_wait_pipe(filp
);
4441 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
4446 trace_event_read_lock();
4447 trace_access_lock(iter
->cpu_file
);
4449 /* Fill as many pages as possible. */
4450 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
4451 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
4455 rem
= tracing_fill_pipe_page(rem
, iter
);
4457 /* Copy the data into the page, so we can start over. */
4458 ret
= trace_seq_to_buffer(&iter
->seq
,
4459 page_address(spd
.pages
[i
]),
4462 __free_page(spd
.pages
[i
]);
4465 spd
.partial
[i
].offset
= 0;
4466 spd
.partial
[i
].len
= iter
->seq
.len
;
4468 trace_seq_init(&iter
->seq
);
4471 trace_access_unlock(iter
->cpu_file
);
4472 trace_event_read_unlock();
4473 mutex_unlock(&iter
->mutex
);
4477 ret
= splice_to_pipe(pipe
, &spd
);
4479 splice_shrink_spd(&spd
);
4483 mutex_unlock(&iter
->mutex
);
4488 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
4489 size_t cnt
, loff_t
*ppos
)
4491 struct inode
*inode
= file_inode(filp
);
4492 struct trace_array
*tr
= inode
->i_private
;
4493 int cpu
= tracing_get_cpu(inode
);
4498 mutex_lock(&trace_types_lock
);
4500 if (cpu
== RING_BUFFER_ALL_CPUS
) {
4501 int cpu
, buf_size_same
;
4506 /* check if all cpu sizes are same */
4507 for_each_tracing_cpu(cpu
) {
4508 /* fill in the size from first enabled cpu */
4510 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
4511 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
4517 if (buf_size_same
) {
4518 if (!ring_buffer_expanded
)
4519 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
4521 trace_buf_size
>> 10);
4523 r
= sprintf(buf
, "%lu\n", size
>> 10);
4525 r
= sprintf(buf
, "X\n");
4527 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
4529 mutex_unlock(&trace_types_lock
);
4531 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4536 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
4537 size_t cnt
, loff_t
*ppos
)
4539 struct inode
*inode
= file_inode(filp
);
4540 struct trace_array
*tr
= inode
->i_private
;
4544 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4548 /* must have at least 1 entry */
4552 /* value is in KB */
4554 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
4564 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
4565 size_t cnt
, loff_t
*ppos
)
4567 struct trace_array
*tr
= filp
->private_data
;
4570 unsigned long size
= 0, expanded_size
= 0;
4572 mutex_lock(&trace_types_lock
);
4573 for_each_tracing_cpu(cpu
) {
4574 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
4575 if (!ring_buffer_expanded
)
4576 expanded_size
+= trace_buf_size
>> 10;
4578 if (ring_buffer_expanded
)
4579 r
= sprintf(buf
, "%lu\n", size
);
4581 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
4582 mutex_unlock(&trace_types_lock
);
4584 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4588 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
4589 size_t cnt
, loff_t
*ppos
)
4592 * There is no need to read what the user has written, this function
4593 * is just to make sure that there is no error when "echo" is used
4602 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
4604 struct trace_array
*tr
= inode
->i_private
;
4606 /* disable tracing ? */
4607 if (trace_flags
& TRACE_ITER_STOP_ON_FREE
)
4608 tracer_tracing_off(tr
);
4609 /* resize the ring buffer to 0 */
4610 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
4612 trace_array_put(tr
);
4618 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
4619 size_t cnt
, loff_t
*fpos
)
4621 unsigned long addr
= (unsigned long)ubuf
;
4622 struct trace_array
*tr
= filp
->private_data
;
4623 struct ring_buffer_event
*event
;
4624 struct ring_buffer
*buffer
;
4625 struct print_entry
*entry
;
4626 unsigned long irq_flags
;
4627 struct page
*pages
[2];
4637 if (tracing_disabled
)
4640 if (!(trace_flags
& TRACE_ITER_MARKERS
))
4643 if (cnt
> TRACE_BUF_SIZE
)
4644 cnt
= TRACE_BUF_SIZE
;
4647 * Userspace is injecting traces into the kernel trace buffer.
4648 * We want to be as non intrusive as possible.
4649 * To do so, we do not want to allocate any special buffers
4650 * or take any locks, but instead write the userspace data
4651 * straight into the ring buffer.
4653 * First we need to pin the userspace buffer into memory,
4654 * which, most likely it is, because it just referenced it.
4655 * But there's no guarantee that it is. By using get_user_pages_fast()
4656 * and kmap_atomic/kunmap_atomic() we can get access to the
4657 * pages directly. We then write the data directly into the
4660 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
4662 /* check if we cross pages */
4663 if ((addr
& PAGE_MASK
) != ((addr
+ cnt
) & PAGE_MASK
))
4666 offset
= addr
& (PAGE_SIZE
- 1);
4669 ret
= get_user_pages_fast(addr
, nr_pages
, 0, pages
);
4670 if (ret
< nr_pages
) {
4672 put_page(pages
[ret
]);
4677 for (i
= 0; i
< nr_pages
; i
++)
4678 map_page
[i
] = kmap_atomic(pages
[i
]);
4680 local_save_flags(irq_flags
);
4681 size
= sizeof(*entry
) + cnt
+ 2; /* possible \n added */
4682 buffer
= tr
->trace_buffer
.buffer
;
4683 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
4684 irq_flags
, preempt_count());
4686 /* Ring buffer disabled, return as if not open for write */
4691 entry
= ring_buffer_event_data(event
);
4692 entry
->ip
= _THIS_IP_
;
4694 if (nr_pages
== 2) {
4695 len
= PAGE_SIZE
- offset
;
4696 memcpy(&entry
->buf
, map_page
[0] + offset
, len
);
4697 memcpy(&entry
->buf
[len
], map_page
[1], cnt
- len
);
4699 memcpy(&entry
->buf
, map_page
[0] + offset
, cnt
);
4701 if (entry
->buf
[cnt
- 1] != '\n') {
4702 entry
->buf
[cnt
] = '\n';
4703 entry
->buf
[cnt
+ 1] = '\0';
4705 entry
->buf
[cnt
] = '\0';
4707 __buffer_unlock_commit(buffer
, event
);
4714 for (i
= 0; i
< nr_pages
; i
++){
4715 kunmap_atomic(map_page
[i
]);
4722 static int tracing_clock_show(struct seq_file
*m
, void *v
)
4724 struct trace_array
*tr
= m
->private;
4727 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
4729 "%s%s%s%s", i
? " " : "",
4730 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
4731 i
== tr
->clock_id
? "]" : "");
4737 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
4741 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
4742 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
4745 if (i
== ARRAY_SIZE(trace_clocks
))
4748 mutex_lock(&trace_types_lock
);
4752 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
4755 * New clock may not be consistent with the previous clock.
4756 * Reset the buffer so that it doesn't have incomparable timestamps.
4758 tracing_reset_online_cpus(&tr
->trace_buffer
);
4760 #ifdef CONFIG_TRACER_MAX_TRACE
4761 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&& tr
->max_buffer
.buffer
)
4762 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
4763 tracing_reset_online_cpus(&tr
->max_buffer
);
4766 mutex_unlock(&trace_types_lock
);
4771 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
4772 size_t cnt
, loff_t
*fpos
)
4774 struct seq_file
*m
= filp
->private_data
;
4775 struct trace_array
*tr
= m
->private;
4777 const char *clockstr
;
4780 if (cnt
>= sizeof(buf
))
4783 if (copy_from_user(&buf
, ubuf
, cnt
))
4788 clockstr
= strstrip(buf
);
4790 ret
= tracing_set_clock(tr
, clockstr
);
4799 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
4801 struct trace_array
*tr
= inode
->i_private
;
4804 if (tracing_disabled
)
4807 if (trace_array_get(tr
))
4810 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
4812 trace_array_put(tr
);
4817 struct ftrace_buffer_info
{
4818 struct trace_iterator iter
;
4823 #ifdef CONFIG_TRACER_SNAPSHOT
4824 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
4826 struct trace_array
*tr
= inode
->i_private
;
4827 struct trace_iterator
*iter
;
4831 if (trace_array_get(tr
) < 0)
4834 if (file
->f_mode
& FMODE_READ
) {
4835 iter
= __tracing_open(inode
, file
, true);
4837 ret
= PTR_ERR(iter
);
4839 /* Writes still need the seq_file to hold the private data */
4841 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
4844 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4852 iter
->trace_buffer
= &tr
->max_buffer
;
4853 iter
->cpu_file
= tracing_get_cpu(inode
);
4855 file
->private_data
= m
;
4859 trace_array_put(tr
);
4865 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
4868 struct seq_file
*m
= filp
->private_data
;
4869 struct trace_iterator
*iter
= m
->private;
4870 struct trace_array
*tr
= iter
->tr
;
4874 ret
= tracing_update_buffers();
4878 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4882 mutex_lock(&trace_types_lock
);
4884 if (tr
->current_trace
->use_max_tr
) {
4891 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
4895 if (tr
->allocated_snapshot
)
4899 /* Only allow per-cpu swap if the ring buffer supports it */
4900 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4901 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
4906 if (!tr
->allocated_snapshot
) {
4907 ret
= alloc_snapshot(tr
);
4911 local_irq_disable();
4912 /* Now, we're going to swap */
4913 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
4914 update_max_tr(tr
, current
, smp_processor_id());
4916 update_max_tr_single(tr
, current
, iter
->cpu_file
);
4920 if (tr
->allocated_snapshot
) {
4921 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
4922 tracing_reset_online_cpus(&tr
->max_buffer
);
4924 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
4934 mutex_unlock(&trace_types_lock
);
4938 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
4940 struct seq_file
*m
= file
->private_data
;
4943 ret
= tracing_release(inode
, file
);
4945 if (file
->f_mode
& FMODE_READ
)
4948 /* If write only, the seq_file is just a stub */
4956 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
4957 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
4958 size_t count
, loff_t
*ppos
);
4959 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
4960 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
4961 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
4963 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
4965 struct ftrace_buffer_info
*info
;
4968 ret
= tracing_buffers_open(inode
, filp
);
4972 info
= filp
->private_data
;
4974 if (info
->iter
.trace
->use_max_tr
) {
4975 tracing_buffers_release(inode
, filp
);
4979 info
->iter
.snapshot
= true;
4980 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
4985 #endif /* CONFIG_TRACER_SNAPSHOT */
4988 static const struct file_operations tracing_max_lat_fops
= {
4989 .open
= tracing_open_generic
,
4990 .read
= tracing_max_lat_read
,
4991 .write
= tracing_max_lat_write
,
4992 .llseek
= generic_file_llseek
,
4995 static const struct file_operations set_tracer_fops
= {
4996 .open
= tracing_open_generic
,
4997 .read
= tracing_set_trace_read
,
4998 .write
= tracing_set_trace_write
,
4999 .llseek
= generic_file_llseek
,
5002 static const struct file_operations tracing_pipe_fops
= {
5003 .open
= tracing_open_pipe
,
5004 .poll
= tracing_poll_pipe
,
5005 .read
= tracing_read_pipe
,
5006 .splice_read
= tracing_splice_read_pipe
,
5007 .release
= tracing_release_pipe
,
5008 .llseek
= no_llseek
,
5011 static const struct file_operations tracing_entries_fops
= {
5012 .open
= tracing_open_generic_tr
,
5013 .read
= tracing_entries_read
,
5014 .write
= tracing_entries_write
,
5015 .llseek
= generic_file_llseek
,
5016 .release
= tracing_release_generic_tr
,
5019 static const struct file_operations tracing_total_entries_fops
= {
5020 .open
= tracing_open_generic_tr
,
5021 .read
= tracing_total_entries_read
,
5022 .llseek
= generic_file_llseek
,
5023 .release
= tracing_release_generic_tr
,
5026 static const struct file_operations tracing_free_buffer_fops
= {
5027 .open
= tracing_open_generic_tr
,
5028 .write
= tracing_free_buffer_write
,
5029 .release
= tracing_free_buffer_release
,
5032 static const struct file_operations tracing_mark_fops
= {
5033 .open
= tracing_open_generic_tr
,
5034 .write
= tracing_mark_write
,
5035 .llseek
= generic_file_llseek
,
5036 .release
= tracing_release_generic_tr
,
5039 static const struct file_operations trace_clock_fops
= {
5040 .open
= tracing_clock_open
,
5042 .llseek
= seq_lseek
,
5043 .release
= tracing_single_release_tr
,
5044 .write
= tracing_clock_write
,
5047 #ifdef CONFIG_TRACER_SNAPSHOT
5048 static const struct file_operations snapshot_fops
= {
5049 .open
= tracing_snapshot_open
,
5051 .write
= tracing_snapshot_write
,
5052 .llseek
= tracing_lseek
,
5053 .release
= tracing_snapshot_release
,
5056 static const struct file_operations snapshot_raw_fops
= {
5057 .open
= snapshot_raw_open
,
5058 .read
= tracing_buffers_read
,
5059 .release
= tracing_buffers_release
,
5060 .splice_read
= tracing_buffers_splice_read
,
5061 .llseek
= no_llseek
,
5064 #endif /* CONFIG_TRACER_SNAPSHOT */
5066 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
5068 struct trace_array
*tr
= inode
->i_private
;
5069 struct ftrace_buffer_info
*info
;
5072 if (tracing_disabled
)
5075 if (trace_array_get(tr
) < 0)
5078 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
5080 trace_array_put(tr
);
5084 mutex_lock(&trace_types_lock
);
5087 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
5088 info
->iter
.trace
= tr
->current_trace
;
5089 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
5091 /* Force reading ring buffer for first read */
5092 info
->read
= (unsigned int)-1;
5094 filp
->private_data
= info
;
5096 mutex_unlock(&trace_types_lock
);
5098 ret
= nonseekable_open(inode
, filp
);
5100 trace_array_put(tr
);
5106 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
5108 struct ftrace_buffer_info
*info
= filp
->private_data
;
5109 struct trace_iterator
*iter
= &info
->iter
;
5111 return trace_poll(iter
, filp
, poll_table
);
5115 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5116 size_t count
, loff_t
*ppos
)
5118 struct ftrace_buffer_info
*info
= filp
->private_data
;
5119 struct trace_iterator
*iter
= &info
->iter
;
5126 mutex_lock(&trace_types_lock
);
5128 #ifdef CONFIG_TRACER_MAX_TRACE
5129 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5136 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
5142 /* Do we have previous read data to read? */
5143 if (info
->read
< PAGE_SIZE
)
5147 trace_access_lock(iter
->cpu_file
);
5148 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
5152 trace_access_unlock(iter
->cpu_file
);
5155 if (trace_empty(iter
)) {
5156 if ((filp
->f_flags
& O_NONBLOCK
)) {
5160 mutex_unlock(&trace_types_lock
);
5162 mutex_lock(&trace_types_lock
);
5163 if (signal_pending(current
)) {
5175 size
= PAGE_SIZE
- info
->read
;
5179 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
5190 mutex_unlock(&trace_types_lock
);
5195 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
5197 struct ftrace_buffer_info
*info
= file
->private_data
;
5198 struct trace_iterator
*iter
= &info
->iter
;
5200 mutex_lock(&trace_types_lock
);
5202 __trace_array_put(iter
->tr
);
5205 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
, info
->spare
);
5208 mutex_unlock(&trace_types_lock
);
5214 struct ring_buffer
*buffer
;
5219 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
5220 struct pipe_buffer
*buf
)
5222 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5227 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5232 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
5233 struct pipe_buffer
*buf
)
5235 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5240 /* Pipe buffer operations for a buffer. */
5241 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
5243 .confirm
= generic_pipe_buf_confirm
,
5244 .release
= buffer_pipe_buf_release
,
5245 .steal
= generic_pipe_buf_steal
,
5246 .get
= buffer_pipe_buf_get
,
5250 * Callback from splice_to_pipe(), if we need to release some pages
5251 * at the end of the spd in case we error'ed out in filling the pipe.
5253 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
5255 struct buffer_ref
*ref
=
5256 (struct buffer_ref
*)spd
->partial
[i
].private;
5261 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5263 spd
->partial
[i
].private = 0;
5267 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5268 struct pipe_inode_info
*pipe
, size_t len
,
5271 struct ftrace_buffer_info
*info
= file
->private_data
;
5272 struct trace_iterator
*iter
= &info
->iter
;
5273 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5274 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5275 struct splice_pipe_desc spd
= {
5277 .partial
= partial_def
,
5278 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5280 .ops
= &buffer_pipe_buf_ops
,
5281 .spd_release
= buffer_spd_release
,
5283 struct buffer_ref
*ref
;
5284 int entries
, size
, i
;
5287 mutex_lock(&trace_types_lock
);
5289 #ifdef CONFIG_TRACER_MAX_TRACE
5290 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5296 if (splice_grow_spd(pipe
, &spd
)) {
5301 if (*ppos
& (PAGE_SIZE
- 1)) {
5306 if (len
& (PAGE_SIZE
- 1)) {
5307 if (len
< PAGE_SIZE
) {
5315 trace_access_lock(iter
->cpu_file
);
5316 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5318 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
5322 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
5327 ref
->buffer
= iter
->trace_buffer
->buffer
;
5328 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
5334 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
5335 len
, iter
->cpu_file
, 1);
5337 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5343 * zero out any left over data, this is going to
5346 size
= ring_buffer_page_len(ref
->page
);
5347 if (size
< PAGE_SIZE
)
5348 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
5350 page
= virt_to_page(ref
->page
);
5352 spd
.pages
[i
] = page
;
5353 spd
.partial
[i
].len
= PAGE_SIZE
;
5354 spd
.partial
[i
].offset
= 0;
5355 spd
.partial
[i
].private = (unsigned long)ref
;
5359 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5362 trace_access_unlock(iter
->cpu_file
);
5365 /* did we read anything? */
5366 if (!spd
.nr_pages
) {
5367 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
)) {
5371 mutex_unlock(&trace_types_lock
);
5373 mutex_lock(&trace_types_lock
);
5374 if (signal_pending(current
)) {
5381 ret
= splice_to_pipe(pipe
, &spd
);
5382 splice_shrink_spd(&spd
);
5384 mutex_unlock(&trace_types_lock
);
5389 static const struct file_operations tracing_buffers_fops
= {
5390 .open
= tracing_buffers_open
,
5391 .read
= tracing_buffers_read
,
5392 .poll
= tracing_buffers_poll
,
5393 .release
= tracing_buffers_release
,
5394 .splice_read
= tracing_buffers_splice_read
,
5395 .llseek
= no_llseek
,
5399 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
5400 size_t count
, loff_t
*ppos
)
5402 struct inode
*inode
= file_inode(filp
);
5403 struct trace_array
*tr
= inode
->i_private
;
5404 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
5405 int cpu
= tracing_get_cpu(inode
);
5406 struct trace_seq
*s
;
5408 unsigned long long t
;
5409 unsigned long usec_rem
;
5411 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
5417 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
5418 trace_seq_printf(s
, "entries: %ld\n", cnt
);
5420 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
5421 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
5423 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
5424 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
5426 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
5427 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
5429 if (trace_clocks
[tr
->clock_id
].in_ns
) {
5430 /* local or global for trace_clock */
5431 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5432 usec_rem
= do_div(t
, USEC_PER_SEC
);
5433 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
5436 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5437 usec_rem
= do_div(t
, USEC_PER_SEC
);
5438 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
5440 /* counter or tsc mode for trace_clock */
5441 trace_seq_printf(s
, "oldest event ts: %llu\n",
5442 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5444 trace_seq_printf(s
, "now ts: %llu\n",
5445 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5448 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
5449 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
5451 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
5452 trace_seq_printf(s
, "read events: %ld\n", cnt
);
5454 count
= simple_read_from_buffer(ubuf
, count
, ppos
, s
->buffer
, s
->len
);
5461 static const struct file_operations tracing_stats_fops
= {
5462 .open
= tracing_open_generic_tr
,
5463 .read
= tracing_stats_read
,
5464 .llseek
= generic_file_llseek
,
5465 .release
= tracing_release_generic_tr
,
5468 #ifdef CONFIG_DYNAMIC_FTRACE
5470 int __weak
ftrace_arch_read_dyn_info(char *buf
, int size
)
5476 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
5477 size_t cnt
, loff_t
*ppos
)
5479 static char ftrace_dyn_info_buffer
[1024];
5480 static DEFINE_MUTEX(dyn_info_mutex
);
5481 unsigned long *p
= filp
->private_data
;
5482 char *buf
= ftrace_dyn_info_buffer
;
5483 int size
= ARRAY_SIZE(ftrace_dyn_info_buffer
);
5486 mutex_lock(&dyn_info_mutex
);
5487 r
= sprintf(buf
, "%ld ", *p
);
5489 r
+= ftrace_arch_read_dyn_info(buf
+r
, (size
-1)-r
);
5492 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5494 mutex_unlock(&dyn_info_mutex
);
5499 static const struct file_operations tracing_dyn_info_fops
= {
5500 .open
= tracing_open_generic
,
5501 .read
= tracing_read_dyn_info
,
5502 .llseek
= generic_file_llseek
,
5504 #endif /* CONFIG_DYNAMIC_FTRACE */
5506 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5508 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5514 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5516 unsigned long *count
= (long *)data
;
5528 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
5529 struct ftrace_probe_ops
*ops
, void *data
)
5531 long count
= (long)data
;
5533 seq_printf(m
, "%ps:", (void *)ip
);
5535 seq_printf(m
, "snapshot");
5538 seq_printf(m
, ":unlimited\n");
5540 seq_printf(m
, ":count=%ld\n", count
);
5545 static struct ftrace_probe_ops snapshot_probe_ops
= {
5546 .func
= ftrace_snapshot
,
5547 .print
= ftrace_snapshot_print
,
5550 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
5551 .func
= ftrace_count_snapshot
,
5552 .print
= ftrace_snapshot_print
,
5556 ftrace_trace_snapshot_callback(struct ftrace_hash
*hash
,
5557 char *glob
, char *cmd
, char *param
, int enable
)
5559 struct ftrace_probe_ops
*ops
;
5560 void *count
= (void *)-1;
5564 /* hash funcs only work with set_ftrace_filter */
5568 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
5570 if (glob
[0] == '!') {
5571 unregister_ftrace_function_probe_func(glob
+1, ops
);
5578 number
= strsep(¶m
, ":");
5580 if (!strlen(number
))
5584 * We use the callback data field (which is a pointer)
5587 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
5592 ret
= register_ftrace_function_probe(glob
, ops
, count
);
5595 alloc_snapshot(&global_trace
);
5597 return ret
< 0 ? ret
: 0;
5600 static struct ftrace_func_command ftrace_snapshot_cmd
= {
5602 .func
= ftrace_trace_snapshot_callback
,
5605 static __init
int register_snapshot_cmd(void)
5607 return register_ftrace_command(&ftrace_snapshot_cmd
);
5610 static inline __init
int register_snapshot_cmd(void) { return 0; }
5611 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5613 struct dentry
*tracing_init_dentry_tr(struct trace_array
*tr
)
5618 if (!debugfs_initialized())
5621 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
5622 tr
->dir
= debugfs_create_dir("tracing", NULL
);
5625 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5630 struct dentry
*tracing_init_dentry(void)
5632 return tracing_init_dentry_tr(&global_trace
);
5635 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
5637 struct dentry
*d_tracer
;
5640 return tr
->percpu_dir
;
5642 d_tracer
= tracing_init_dentry_tr(tr
);
5646 tr
->percpu_dir
= debugfs_create_dir("per_cpu", d_tracer
);
5648 WARN_ONCE(!tr
->percpu_dir
,
5649 "Could not create debugfs directory 'per_cpu/%d'\n", cpu
);
5651 return tr
->percpu_dir
;
5654 static struct dentry
*
5655 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
5656 void *data
, long cpu
, const struct file_operations
*fops
)
5658 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
5660 if (ret
) /* See tracing_get_cpu() */
5661 ret
->d_inode
->i_cdev
= (void *)(cpu
+ 1);
5666 tracing_init_debugfs_percpu(struct trace_array
*tr
, long cpu
)
5668 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
5669 struct dentry
*d_cpu
;
5670 char cpu_dir
[30]; /* 30 characters should be more than enough */
5675 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
5676 d_cpu
= debugfs_create_dir(cpu_dir
, d_percpu
);
5678 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir
);
5682 /* per cpu trace_pipe */
5683 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
5684 tr
, cpu
, &tracing_pipe_fops
);
5687 trace_create_cpu_file("trace", 0644, d_cpu
,
5688 tr
, cpu
, &tracing_fops
);
5690 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
5691 tr
, cpu
, &tracing_buffers_fops
);
5693 trace_create_cpu_file("stats", 0444, d_cpu
,
5694 tr
, cpu
, &tracing_stats_fops
);
5696 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
5697 tr
, cpu
, &tracing_entries_fops
);
5699 #ifdef CONFIG_TRACER_SNAPSHOT
5700 trace_create_cpu_file("snapshot", 0644, d_cpu
,
5701 tr
, cpu
, &snapshot_fops
);
5703 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
5704 tr
, cpu
, &snapshot_raw_fops
);
5708 #ifdef CONFIG_FTRACE_SELFTEST
5709 /* Let selftest have access to static functions in this file */
5710 #include "trace_selftest.c"
5713 struct trace_option_dentry
{
5714 struct tracer_opt
*opt
;
5715 struct tracer_flags
*flags
;
5716 struct trace_array
*tr
;
5717 struct dentry
*entry
;
5721 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5724 struct trace_option_dentry
*topt
= filp
->private_data
;
5727 if (topt
->flags
->val
& topt
->opt
->bit
)
5732 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5736 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5739 struct trace_option_dentry
*topt
= filp
->private_data
;
5743 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5747 if (val
!= 0 && val
!= 1)
5750 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
5751 mutex_lock(&trace_types_lock
);
5752 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
5754 mutex_unlock(&trace_types_lock
);
5765 static const struct file_operations trace_options_fops
= {
5766 .open
= tracing_open_generic
,
5767 .read
= trace_options_read
,
5768 .write
= trace_options_write
,
5769 .llseek
= generic_file_llseek
,
5773 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5776 long index
= (long)filp
->private_data
;
5779 if (trace_flags
& (1 << index
))
5784 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5788 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5791 struct trace_array
*tr
= &global_trace
;
5792 long index
= (long)filp
->private_data
;
5796 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5800 if (val
!= 0 && val
!= 1)
5803 mutex_lock(&trace_types_lock
);
5804 ret
= set_tracer_flag(tr
, 1 << index
, val
);
5805 mutex_unlock(&trace_types_lock
);
5815 static const struct file_operations trace_options_core_fops
= {
5816 .open
= tracing_open_generic
,
5817 .read
= trace_options_core_read
,
5818 .write
= trace_options_core_write
,
5819 .llseek
= generic_file_llseek
,
5822 struct dentry
*trace_create_file(const char *name
,
5824 struct dentry
*parent
,
5826 const struct file_operations
*fops
)
5830 ret
= debugfs_create_file(name
, mode
, parent
, data
, fops
);
5832 pr_warning("Could not create debugfs '%s' entry\n", name
);
5838 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
5840 struct dentry
*d_tracer
;
5845 d_tracer
= tracing_init_dentry_tr(tr
);
5849 tr
->options
= debugfs_create_dir("options", d_tracer
);
5851 pr_warning("Could not create debugfs directory 'options'\n");
5859 create_trace_option_file(struct trace_array
*tr
,
5860 struct trace_option_dentry
*topt
,
5861 struct tracer_flags
*flags
,
5862 struct tracer_opt
*opt
)
5864 struct dentry
*t_options
;
5866 t_options
= trace_options_init_dentry(tr
);
5870 topt
->flags
= flags
;
5874 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
5875 &trace_options_fops
);
5879 static struct trace_option_dentry
*
5880 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
5882 struct trace_option_dentry
*topts
;
5883 struct tracer_flags
*flags
;
5884 struct tracer_opt
*opts
;
5890 flags
= tracer
->flags
;
5892 if (!flags
|| !flags
->opts
)
5897 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
5900 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
5904 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
5905 create_trace_option_file(tr
, &topts
[cnt
], flags
,
5912 destroy_trace_option_files(struct trace_option_dentry
*topts
)
5919 for (cnt
= 0; topts
[cnt
].opt
; cnt
++) {
5920 if (topts
[cnt
].entry
)
5921 debugfs_remove(topts
[cnt
].entry
);
5927 static struct dentry
*
5928 create_trace_option_core_file(struct trace_array
*tr
,
5929 const char *option
, long index
)
5931 struct dentry
*t_options
;
5933 t_options
= trace_options_init_dentry(tr
);
5937 return trace_create_file(option
, 0644, t_options
, (void *)index
,
5938 &trace_options_core_fops
);
5941 static __init
void create_trace_options_dir(struct trace_array
*tr
)
5943 struct dentry
*t_options
;
5946 t_options
= trace_options_init_dentry(tr
);
5950 for (i
= 0; trace_options
[i
]; i
++)
5951 create_trace_option_core_file(tr
, trace_options
[i
], i
);
5955 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
5956 size_t cnt
, loff_t
*ppos
)
5958 struct trace_array
*tr
= filp
->private_data
;
5962 r
= tracer_tracing_is_on(tr
);
5963 r
= sprintf(buf
, "%d\n", r
);
5965 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5969 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
5970 size_t cnt
, loff_t
*ppos
)
5972 struct trace_array
*tr
= filp
->private_data
;
5973 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
5977 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5982 mutex_lock(&trace_types_lock
);
5984 tracer_tracing_on(tr
);
5985 if (tr
->current_trace
->start
)
5986 tr
->current_trace
->start(tr
);
5988 tracer_tracing_off(tr
);
5989 if (tr
->current_trace
->stop
)
5990 tr
->current_trace
->stop(tr
);
5992 mutex_unlock(&trace_types_lock
);
6000 static const struct file_operations rb_simple_fops
= {
6001 .open
= tracing_open_generic_tr
,
6002 .read
= rb_simple_read
,
6003 .write
= rb_simple_write
,
6004 .release
= tracing_release_generic_tr
,
6005 .llseek
= default_llseek
,
6008 struct dentry
*trace_instance_dir
;
6011 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
);
6014 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
6016 enum ring_buffer_flags rb_flags
;
6018 rb_flags
= trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
6022 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
6026 buf
->data
= alloc_percpu(struct trace_array_cpu
);
6028 ring_buffer_free(buf
->buffer
);
6032 /* Allocate the first page for all buffers */
6033 set_buffer_entries(&tr
->trace_buffer
,
6034 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
6039 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
6043 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
6047 #ifdef CONFIG_TRACER_MAX_TRACE
6048 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
6049 allocate_snapshot
? size
: 1);
6051 ring_buffer_free(tr
->trace_buffer
.buffer
);
6052 free_percpu(tr
->trace_buffer
.data
);
6055 tr
->allocated_snapshot
= allocate_snapshot
;
6058 * Only the top level trace array gets its snapshot allocated
6059 * from the kernel command line.
6061 allocate_snapshot
= false;
6066 static int new_instance_create(const char *name
)
6068 struct trace_array
*tr
;
6071 mutex_lock(&trace_types_lock
);
6074 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6075 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
6080 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
6084 tr
->name
= kstrdup(name
, GFP_KERNEL
);
6088 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
6091 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
6093 raw_spin_lock_init(&tr
->start_lock
);
6095 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6097 tr
->current_trace
= &nop_trace
;
6099 INIT_LIST_HEAD(&tr
->systems
);
6100 INIT_LIST_HEAD(&tr
->events
);
6102 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
6105 tr
->dir
= debugfs_create_dir(name
, trace_instance_dir
);
6109 ret
= event_trace_add_tracer(tr
->dir
, tr
);
6111 debugfs_remove_recursive(tr
->dir
);
6115 init_tracer_debugfs(tr
, tr
->dir
);
6117 list_add(&tr
->list
, &ftrace_trace_arrays
);
6119 mutex_unlock(&trace_types_lock
);
6124 if (tr
->trace_buffer
.buffer
)
6125 ring_buffer_free(tr
->trace_buffer
.buffer
);
6126 free_cpumask_var(tr
->tracing_cpumask
);
6131 mutex_unlock(&trace_types_lock
);
6137 static int instance_delete(const char *name
)
6139 struct trace_array
*tr
;
6143 mutex_lock(&trace_types_lock
);
6146 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6147 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
6159 list_del(&tr
->list
);
6161 tracing_set_nop(tr
);
6162 event_trace_del_tracer(tr
);
6163 ftrace_destroy_function_files(tr
);
6164 debugfs_remove_recursive(tr
->dir
);
6165 free_percpu(tr
->trace_buffer
.data
);
6166 ring_buffer_free(tr
->trace_buffer
.buffer
);
6174 mutex_unlock(&trace_types_lock
);
6179 static int instance_mkdir (struct inode
*inode
, struct dentry
*dentry
, umode_t mode
)
6181 struct dentry
*parent
;
6184 /* Paranoid: Make sure the parent is the "instances" directory */
6185 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6186 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6190 * The inode mutex is locked, but debugfs_create_dir() will also
6191 * take the mutex. As the instances directory can not be destroyed
6192 * or changed in any other way, it is safe to unlock it, and
6193 * let the dentry try. If two users try to make the same dir at
6194 * the same time, then the new_instance_create() will determine the
6197 mutex_unlock(&inode
->i_mutex
);
6199 ret
= new_instance_create(dentry
->d_iname
);
6201 mutex_lock(&inode
->i_mutex
);
6206 static int instance_rmdir(struct inode
*inode
, struct dentry
*dentry
)
6208 struct dentry
*parent
;
6211 /* Paranoid: Make sure the parent is the "instances" directory */
6212 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6213 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6216 /* The caller did a dget() on dentry */
6217 mutex_unlock(&dentry
->d_inode
->i_mutex
);
6220 * The inode mutex is locked, but debugfs_create_dir() will also
6221 * take the mutex. As the instances directory can not be destroyed
6222 * or changed in any other way, it is safe to unlock it, and
6223 * let the dentry try. If two users try to make the same dir at
6224 * the same time, then the instance_delete() will determine the
6227 mutex_unlock(&inode
->i_mutex
);
6229 ret
= instance_delete(dentry
->d_iname
);
6231 mutex_lock_nested(&inode
->i_mutex
, I_MUTEX_PARENT
);
6232 mutex_lock(&dentry
->d_inode
->i_mutex
);
6237 static const struct inode_operations instance_dir_inode_operations
= {
6238 .lookup
= simple_lookup
,
6239 .mkdir
= instance_mkdir
,
6240 .rmdir
= instance_rmdir
,
6243 static __init
void create_trace_instances(struct dentry
*d_tracer
)
6245 trace_instance_dir
= debugfs_create_dir("instances", d_tracer
);
6246 if (WARN_ON(!trace_instance_dir
))
6249 /* Hijack the dir inode operations, to allow mkdir */
6250 trace_instance_dir
->d_inode
->i_op
= &instance_dir_inode_operations
;
6254 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
)
6258 trace_create_file("available_tracers", 0444, d_tracer
,
6259 tr
, &show_traces_fops
);
6261 trace_create_file("current_tracer", 0644, d_tracer
,
6262 tr
, &set_tracer_fops
);
6264 trace_create_file("tracing_cpumask", 0644, d_tracer
,
6265 tr
, &tracing_cpumask_fops
);
6267 trace_create_file("trace_options", 0644, d_tracer
,
6268 tr
, &tracing_iter_fops
);
6270 trace_create_file("trace", 0644, d_tracer
,
6273 trace_create_file("trace_pipe", 0444, d_tracer
,
6274 tr
, &tracing_pipe_fops
);
6276 trace_create_file("buffer_size_kb", 0644, d_tracer
,
6277 tr
, &tracing_entries_fops
);
6279 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
6280 tr
, &tracing_total_entries_fops
);
6282 trace_create_file("free_buffer", 0200, d_tracer
,
6283 tr
, &tracing_free_buffer_fops
);
6285 trace_create_file("trace_marker", 0220, d_tracer
,
6286 tr
, &tracing_mark_fops
);
6288 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
6291 trace_create_file("tracing_on", 0644, d_tracer
,
6292 tr
, &rb_simple_fops
);
6294 #ifdef CONFIG_TRACER_MAX_TRACE
6295 trace_create_file("tracing_max_latency", 0644, d_tracer
,
6296 &tr
->max_latency
, &tracing_max_lat_fops
);
6299 if (ftrace_create_function_files(tr
, d_tracer
))
6300 WARN(1, "Could not allocate function filter files");
6302 #ifdef CONFIG_TRACER_SNAPSHOT
6303 trace_create_file("snapshot", 0644, d_tracer
,
6304 tr
, &snapshot_fops
);
6307 for_each_tracing_cpu(cpu
)
6308 tracing_init_debugfs_percpu(tr
, cpu
);
6312 static __init
int tracer_init_debugfs(void)
6314 struct dentry
*d_tracer
;
6316 trace_access_lock_init();
6318 d_tracer
= tracing_init_dentry();
6322 init_tracer_debugfs(&global_trace
, d_tracer
);
6324 trace_create_file("tracing_thresh", 0644, d_tracer
,
6325 &tracing_thresh
, &tracing_max_lat_fops
);
6327 trace_create_file("README", 0444, d_tracer
,
6328 NULL
, &tracing_readme_fops
);
6330 trace_create_file("saved_cmdlines", 0444, d_tracer
,
6331 NULL
, &tracing_saved_cmdlines_fops
);
6333 #ifdef CONFIG_DYNAMIC_FTRACE
6334 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
6335 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
6338 create_trace_instances(d_tracer
);
6340 create_trace_options_dir(&global_trace
);
6345 static int trace_panic_handler(struct notifier_block
*this,
6346 unsigned long event
, void *unused
)
6348 if (ftrace_dump_on_oops
)
6349 ftrace_dump(ftrace_dump_on_oops
);
6353 static struct notifier_block trace_panic_notifier
= {
6354 .notifier_call
= trace_panic_handler
,
6356 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
6359 static int trace_die_handler(struct notifier_block
*self
,
6365 if (ftrace_dump_on_oops
)
6366 ftrace_dump(ftrace_dump_on_oops
);
6374 static struct notifier_block trace_die_notifier
= {
6375 .notifier_call
= trace_die_handler
,
6380 * printk is set to max of 1024, we really don't need it that big.
6381 * Nothing should be printing 1000 characters anyway.
6383 #define TRACE_MAX_PRINT 1000
6386 * Define here KERN_TRACE so that we have one place to modify
6387 * it if we decide to change what log level the ftrace dump
6390 #define KERN_TRACE KERN_EMERG
6393 trace_printk_seq(struct trace_seq
*s
)
6395 /* Probably should print a warning here. */
6396 if (s
->len
>= TRACE_MAX_PRINT
)
6397 s
->len
= TRACE_MAX_PRINT
;
6399 /* should be zero ended, but we are paranoid. */
6400 s
->buffer
[s
->len
] = 0;
6402 printk(KERN_TRACE
"%s", s
->buffer
);
6407 void trace_init_global_iter(struct trace_iterator
*iter
)
6409 iter
->tr
= &global_trace
;
6410 iter
->trace
= iter
->tr
->current_trace
;
6411 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
6412 iter
->trace_buffer
= &global_trace
.trace_buffer
;
6414 if (iter
->trace
&& iter
->trace
->open
)
6415 iter
->trace
->open(iter
);
6417 /* Annotate start of buffers if we had overruns */
6418 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
6419 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
6421 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6422 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
6423 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
6426 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
6428 /* use static because iter can be a bit big for the stack */
6429 static struct trace_iterator iter
;
6430 static atomic_t dump_running
;
6431 unsigned int old_userobj
;
6432 unsigned long flags
;
6435 /* Only allow one dump user at a time. */
6436 if (atomic_inc_return(&dump_running
) != 1) {
6437 atomic_dec(&dump_running
);
6442 * Always turn off tracing when we dump.
6443 * We don't need to show trace output of what happens
6444 * between multiple crashes.
6446 * If the user does a sysrq-z, then they can re-enable
6447 * tracing with echo 1 > tracing_on.
6451 local_irq_save(flags
);
6453 /* Simulate the iterator */
6454 trace_init_global_iter(&iter
);
6456 for_each_tracing_cpu(cpu
) {
6457 atomic_inc(&per_cpu_ptr(iter
.tr
->trace_buffer
.data
, cpu
)->disabled
);
6460 old_userobj
= trace_flags
& TRACE_ITER_SYM_USEROBJ
;
6462 /* don't look at user memory in panic mode */
6463 trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
6465 switch (oops_dump_mode
) {
6467 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6470 iter
.cpu_file
= raw_smp_processor_id();
6475 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
6476 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6479 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
6481 /* Did function tracer already get disabled? */
6482 if (ftrace_is_dead()) {
6483 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6484 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6488 * We need to stop all tracing on all CPUS to read the
6489 * the next buffer. This is a bit expensive, but is
6490 * not done often. We fill all what we can read,
6491 * and then release the locks again.
6494 while (!trace_empty(&iter
)) {
6497 printk(KERN_TRACE
"---------------------------------\n");
6501 /* reset all but tr, trace, and overruns */
6502 memset(&iter
.seq
, 0,
6503 sizeof(struct trace_iterator
) -
6504 offsetof(struct trace_iterator
, seq
));
6505 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
6508 if (trace_find_next_entry_inc(&iter
) != NULL
) {
6511 ret
= print_trace_line(&iter
);
6512 if (ret
!= TRACE_TYPE_NO_CONSUME
)
6513 trace_consume(&iter
);
6515 touch_nmi_watchdog();
6517 trace_printk_seq(&iter
.seq
);
6521 printk(KERN_TRACE
" (ftrace buffer empty)\n");
6523 printk(KERN_TRACE
"---------------------------------\n");
6526 trace_flags
|= old_userobj
;
6528 for_each_tracing_cpu(cpu
) {
6529 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
6531 atomic_dec(&dump_running
);
6532 local_irq_restore(flags
);
6534 EXPORT_SYMBOL_GPL(ftrace_dump
);
6536 __init
static int tracer_alloc_buffers(void)
6542 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
6545 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
6546 goto out_free_buffer_mask
;
6548 /* Only allocate trace_printk buffers if a trace_printk exists */
6549 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
6550 /* Must be called before global_trace.buffer is allocated */
6551 trace_printk_init_buffers();
6553 /* To save memory, keep the ring buffer size to its minimum */
6554 if (ring_buffer_expanded
)
6555 ring_buf_size
= trace_buf_size
;
6559 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
6560 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
6562 raw_spin_lock_init(&global_trace
.start_lock
);
6564 /* Used for event triggers */
6565 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
6567 goto out_free_cpumask
;
6569 /* TODO: make the number of buffers hot pluggable with CPUS */
6570 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
6571 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
6573 goto out_free_temp_buffer
;
6576 if (global_trace
.buffer_disabled
)
6579 trace_init_cmdlines();
6581 if (trace_boot_clock
) {
6582 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
6584 pr_warning("Trace clock %s not defined, going back to default\n",
6589 * register_tracer() might reference current_trace, so it
6590 * needs to be set before we register anything. This is
6591 * just a bootstrap of current_trace anyway.
6593 global_trace
.current_trace
= &nop_trace
;
6595 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6597 ftrace_init_global_array_ops(&global_trace
);
6599 register_tracer(&nop_trace
);
6601 /* All seems OK, enable tracing */
6602 tracing_disabled
= 0;
6604 atomic_notifier_chain_register(&panic_notifier_list
,
6605 &trace_panic_notifier
);
6607 register_die_notifier(&trace_die_notifier
);
6609 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
6611 INIT_LIST_HEAD(&global_trace
.systems
);
6612 INIT_LIST_HEAD(&global_trace
.events
);
6613 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
6615 while (trace_boot_options
) {
6618 option
= strsep(&trace_boot_options
, ",");
6619 trace_set_options(&global_trace
, option
);
6622 register_snapshot_cmd();
6626 out_free_temp_buffer
:
6627 ring_buffer_free(temp_buffer
);
6629 free_percpu(global_trace
.trace_buffer
.data
);
6630 #ifdef CONFIG_TRACER_MAX_TRACE
6631 free_percpu(global_trace
.max_buffer
.data
);
6633 free_cpumask_var(global_trace
.tracing_cpumask
);
6634 out_free_buffer_mask
:
6635 free_cpumask_var(tracing_buffer_mask
);
6640 __init
static int clear_boot_tracer(void)
6643 * The default tracer at boot buffer is an init section.
6644 * This function is called in lateinit. If we did not
6645 * find the boot tracer, then clear it out, to prevent
6646 * later registration from accessing the buffer that is
6647 * about to be freed.
6649 if (!default_bootup_tracer
)
6652 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
6653 default_bootup_tracer
);
6654 default_bootup_tracer
= NULL
;
6659 early_initcall(tracer_alloc_buffers
);
6660 fs_initcall(tracer_init_debugfs
);
6661 late_initcall(clear_boot_tracer
);