Merge tag 'upstream-4.6-rc1' of git://git.infradead.org/linux-ubifs
[deliverable/linux.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
8c1a49ae
SRRH
77static int
78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
79{
80 return 0;
81}
0f048701 82
7ffbd48d
SR
83/*
84 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
86 * occurred.
87 */
88static DEFINE_PER_CPU(bool, trace_cmdline_save);
89
0f048701
SR
90/*
91 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
94 * this back to zero.
95 */
4fd27358 96static int tracing_disabled = 1;
0f048701 97
955b61e5 98cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 99
944ac425
SR
100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 114 */
cecbca96
FW
115
116enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 117
de7edd31
SRRH
118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
9828413d
SRRH
121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
122/* Map of enums to their values, for "enum_map" file */
123struct trace_enum_map_head {
124 struct module *mod;
125 unsigned long length;
126};
127
128union trace_enum_map_item;
129
130struct trace_enum_map_tail {
131 /*
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
134 */
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
137};
138
139static DEFINE_MUTEX(trace_enum_mutex);
140
141/*
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
147 */
148union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
152};
153
154static union trace_enum_map_item *trace_enum_maps;
155#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
156
607e2ea1 157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 158
ee6c2c1b
LZ
159#define MAX_TRACER_SIZE 100
160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 161static char *default_bootup_tracer;
d9e54076 162
55034cd6
SRRH
163static bool allocate_snapshot;
164
1beee96b 165static int __init set_cmdline_ftrace(char *str)
d9e54076 166{
67012ab1 167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 168 default_bootup_tracer = bootup_tracer_buf;
73c5162a 169 /* We are using ftrace early, expand it */
55034cd6 170 ring_buffer_expanded = true;
d9e54076
PZ
171 return 1;
172}
1beee96b 173__setup("ftrace=", set_cmdline_ftrace);
d9e54076 174
944ac425
SR
175static int __init set_ftrace_dump_on_oops(char *str)
176{
cecbca96
FW
177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
179 return 1;
180 }
181
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
184 return 1;
185 }
186
187 return 0;
944ac425
SR
188}
189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 190
de7edd31
SRRH
191static int __init stop_trace_on_warning(char *str)
192{
933ff9f2
LCG
193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
de7edd31
SRRH
195 return 1;
196}
933ff9f2 197__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 198
3209cff4 199static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
200{
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
204 return 1;
205}
3209cff4 206__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 207
7bcfaf54
SR
208
209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
210
211static int __init set_trace_boot_options(char *str)
212{
67012ab1 213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
214 return 0;
215}
216__setup("trace_options=", set_trace_boot_options);
217
e1e232ca
SR
218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219static char *trace_boot_clock __initdata;
220
221static int __init set_trace_boot_clock(char *str)
222{
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
225 return 0;
226}
227__setup("trace_clock=", set_trace_boot_clock);
228
0daa2302
SRRH
229static int __init set_tracepoint_printk(char *str)
230{
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
233 return 1;
234}
235__setup("tp_printk", set_tracepoint_printk);
de7edd31 236
cf8e3474 237unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
238{
239 nsec += 500;
240 do_div(nsec, 1000);
241 return nsec;
242}
243
983f938a
SRRH
244/* trace_flags holds trace_options default values */
245#define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
251
16270145
SRRH
252/* trace_options that are only supported by global_trace */
253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
255
256
4fcdae83
SR
257/*
258 * The global_trace is the descriptor that holds the tracing
259 * buffers for the live tracing. For each CPU, it contains
260 * a link list of pages that will store trace entries. The
261 * page descriptor of the pages in the memory is used to hold
262 * the link list by linking the lru item in the page descriptor
263 * to each of the pages in the buffer per CPU.
264 *
265 * For each active CPU there is a data field that holds the
266 * pages for the buffer for that CPU. Each CPU has the same number
267 * of pages allocated for its buffer.
268 */
983f938a
SRRH
269static struct trace_array global_trace = {
270 .trace_flags = TRACE_DEFAULT_FLAGS,
271};
bc0c38d1 272
ae63b31e 273LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 274
ff451961
SRRH
275int trace_array_get(struct trace_array *this_tr)
276{
277 struct trace_array *tr;
278 int ret = -ENODEV;
279
280 mutex_lock(&trace_types_lock);
281 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
282 if (tr == this_tr) {
283 tr->ref++;
284 ret = 0;
285 break;
286 }
287 }
288 mutex_unlock(&trace_types_lock);
289
290 return ret;
291}
292
293static void __trace_array_put(struct trace_array *this_tr)
294{
295 WARN_ON(!this_tr->ref);
296 this_tr->ref--;
297}
298
299void trace_array_put(struct trace_array *this_tr)
300{
301 mutex_lock(&trace_types_lock);
302 __trace_array_put(this_tr);
303 mutex_unlock(&trace_types_lock);
304}
305
7f1d2f82 306int filter_check_discard(struct trace_event_file *file, void *rec,
f306cc82
TZ
307 struct ring_buffer *buffer,
308 struct ring_buffer_event *event)
eb02ce01 309{
5d6ad960 310 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
f306cc82
TZ
311 !filter_match_preds(file->filter, rec)) {
312 ring_buffer_discard_commit(buffer, event);
313 return 1;
314 }
315
316 return 0;
317}
318EXPORT_SYMBOL_GPL(filter_check_discard);
319
2425bcb9 320int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
321 struct ring_buffer *buffer,
322 struct ring_buffer_event *event)
323{
324 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
325 !filter_match_preds(call->filter, rec)) {
326 ring_buffer_discard_commit(buffer, event);
327 return 1;
328 }
329
330 return 0;
eb02ce01 331}
f306cc82 332EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 333
ad1438a0 334static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
335{
336 u64 ts;
337
338 /* Early boot up does not have a buffer yet */
9457158b 339 if (!buf->buffer)
37886f6a
SR
340 return trace_clock_local();
341
9457158b
AL
342 ts = ring_buffer_time_stamp(buf->buffer, cpu);
343 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
344
345 return ts;
346}
bc0c38d1 347
9457158b
AL
348cycle_t ftrace_now(int cpu)
349{
350 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
351}
352
10246fa3
SRRH
353/**
354 * tracing_is_enabled - Show if global_trace has been disabled
355 *
356 * Shows if the global trace has been enabled or not. It uses the
357 * mirror flag "buffer_disabled" to be used in fast paths such as for
358 * the irqsoff tracer. But it may be inaccurate due to races. If you
359 * need to know the accurate state, use tracing_is_on() which is a little
360 * slower, but accurate.
361 */
9036990d
SR
362int tracing_is_enabled(void)
363{
10246fa3
SRRH
364 /*
365 * For quick access (irqsoff uses this in fast path), just
366 * return the mirror variable of the state of the ring buffer.
367 * It's a little racy, but we don't really care.
368 */
369 smp_rmb();
370 return !global_trace.buffer_disabled;
9036990d
SR
371}
372
4fcdae83 373/*
3928a8a2
SR
374 * trace_buf_size is the size in bytes that is allocated
375 * for a buffer. Note, the number of bytes is always rounded
376 * to page size.
3f5a54e3
SR
377 *
378 * This number is purposely set to a low number of 16384.
379 * If the dump on oops happens, it will be much appreciated
380 * to not have to wait for all that output. Anyway this can be
381 * boot time and run time configurable.
4fcdae83 382 */
3928a8a2 383#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 384
3928a8a2 385static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 386
4fcdae83 387/* trace_types holds a link list of available tracers. */
bc0c38d1 388static struct tracer *trace_types __read_mostly;
4fcdae83 389
4fcdae83
SR
390/*
391 * trace_types_lock is used to protect the trace_types list.
4fcdae83 392 */
a8227415 393DEFINE_MUTEX(trace_types_lock);
4fcdae83 394
7e53bd42
LJ
395/*
396 * serialize the access of the ring buffer
397 *
398 * ring buffer serializes readers, but it is low level protection.
399 * The validity of the events (which returns by ring_buffer_peek() ..etc)
400 * are not protected by ring buffer.
401 *
402 * The content of events may become garbage if we allow other process consumes
403 * these events concurrently:
404 * A) the page of the consumed events may become a normal page
405 * (not reader page) in ring buffer, and this page will be rewrited
406 * by events producer.
407 * B) The page of the consumed events may become a page for splice_read,
408 * and this page will be returned to system.
409 *
410 * These primitives allow multi process access to different cpu ring buffer
411 * concurrently.
412 *
413 * These primitives don't distinguish read-only and read-consume access.
414 * Multi read-only access are also serialized.
415 */
416
417#ifdef CONFIG_SMP
418static DECLARE_RWSEM(all_cpu_access_lock);
419static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
420
421static inline void trace_access_lock(int cpu)
422{
ae3b5093 423 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
424 /* gain it for accessing the whole ring buffer. */
425 down_write(&all_cpu_access_lock);
426 } else {
427 /* gain it for accessing a cpu ring buffer. */
428
ae3b5093 429 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
430 down_read(&all_cpu_access_lock);
431
432 /* Secondly block other access to this @cpu ring buffer. */
433 mutex_lock(&per_cpu(cpu_access_lock, cpu));
434 }
435}
436
437static inline void trace_access_unlock(int cpu)
438{
ae3b5093 439 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
440 up_write(&all_cpu_access_lock);
441 } else {
442 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
443 up_read(&all_cpu_access_lock);
444 }
445}
446
447static inline void trace_access_lock_init(void)
448{
449 int cpu;
450
451 for_each_possible_cpu(cpu)
452 mutex_init(&per_cpu(cpu_access_lock, cpu));
453}
454
455#else
456
457static DEFINE_MUTEX(access_lock);
458
459static inline void trace_access_lock(int cpu)
460{
461 (void)cpu;
462 mutex_lock(&access_lock);
463}
464
465static inline void trace_access_unlock(int cpu)
466{
467 (void)cpu;
468 mutex_unlock(&access_lock);
469}
470
471static inline void trace_access_lock_init(void)
472{
473}
474
475#endif
476
d78a4614
SRRH
477#ifdef CONFIG_STACKTRACE
478static void __ftrace_trace_stack(struct ring_buffer *buffer,
479 unsigned long flags,
480 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
481static inline void ftrace_trace_stack(struct trace_array *tr,
482 struct ring_buffer *buffer,
73dddbb5
SRRH
483 unsigned long flags,
484 int skip, int pc, struct pt_regs *regs);
ca475e83 485
d78a4614
SRRH
486#else
487static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs)
490{
491}
2d34f489
SRRH
492static inline void ftrace_trace_stack(struct trace_array *tr,
493 struct ring_buffer *buffer,
73dddbb5
SRRH
494 unsigned long flags,
495 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
496{
497}
498
d78a4614
SRRH
499#endif
500
5280bcef 501static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
502{
503 if (tr->trace_buffer.buffer)
504 ring_buffer_record_on(tr->trace_buffer.buffer);
505 /*
506 * This flag is looked at when buffers haven't been allocated
507 * yet, or by some tracers (like irqsoff), that just want to
508 * know if the ring buffer has been disabled, but it can handle
509 * races of where it gets disabled but we still do a record.
510 * As the check is in the fast path of the tracers, it is more
511 * important to be fast than accurate.
512 */
513 tr->buffer_disabled = 0;
514 /* Make the flag seen by readers */
515 smp_wmb();
516}
517
499e5470
SR
518/**
519 * tracing_on - enable tracing buffers
520 *
521 * This function enables tracing buffers that may have been
522 * disabled with tracing_off.
523 */
524void tracing_on(void)
525{
10246fa3 526 tracer_tracing_on(&global_trace);
499e5470
SR
527}
528EXPORT_SYMBOL_GPL(tracing_on);
529
09ae7234
SRRH
530/**
531 * __trace_puts - write a constant string into the trace buffer.
532 * @ip: The address of the caller
533 * @str: The constant string to write
534 * @size: The size of the string.
535 */
536int __trace_puts(unsigned long ip, const char *str, int size)
537{
538 struct ring_buffer_event *event;
539 struct ring_buffer *buffer;
540 struct print_entry *entry;
541 unsigned long irq_flags;
542 int alloc;
8abfb872
J
543 int pc;
544
983f938a 545 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
546 return 0;
547
8abfb872 548 pc = preempt_count();
09ae7234 549
3132e107
SRRH
550 if (unlikely(tracing_selftest_running || tracing_disabled))
551 return 0;
552
09ae7234
SRRH
553 alloc = sizeof(*entry) + size + 2; /* possible \n added */
554
555 local_save_flags(irq_flags);
556 buffer = global_trace.trace_buffer.buffer;
557 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 558 irq_flags, pc);
09ae7234
SRRH
559 if (!event)
560 return 0;
561
562 entry = ring_buffer_event_data(event);
563 entry->ip = ip;
564
565 memcpy(&entry->buf, str, size);
566
567 /* Add a newline if necessary */
568 if (entry->buf[size - 1] != '\n') {
569 entry->buf[size] = '\n';
570 entry->buf[size + 1] = '\0';
571 } else
572 entry->buf[size] = '\0';
573
574 __buffer_unlock_commit(buffer, event);
2d34f489 575 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
576
577 return size;
578}
579EXPORT_SYMBOL_GPL(__trace_puts);
580
581/**
582 * __trace_bputs - write the pointer to a constant string into trace buffer
583 * @ip: The address of the caller
584 * @str: The constant string to write to the buffer to
585 */
586int __trace_bputs(unsigned long ip, const char *str)
587{
588 struct ring_buffer_event *event;
589 struct ring_buffer *buffer;
590 struct bputs_entry *entry;
591 unsigned long irq_flags;
592 int size = sizeof(struct bputs_entry);
8abfb872
J
593 int pc;
594
983f938a 595 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
596 return 0;
597
8abfb872 598 pc = preempt_count();
09ae7234 599
3132e107
SRRH
600 if (unlikely(tracing_selftest_running || tracing_disabled))
601 return 0;
602
09ae7234
SRRH
603 local_save_flags(irq_flags);
604 buffer = global_trace.trace_buffer.buffer;
605 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 606 irq_flags, pc);
09ae7234
SRRH
607 if (!event)
608 return 0;
609
610 entry = ring_buffer_event_data(event);
611 entry->ip = ip;
612 entry->str = str;
613
614 __buffer_unlock_commit(buffer, event);
2d34f489 615 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
616
617 return 1;
618}
619EXPORT_SYMBOL_GPL(__trace_bputs);
620
ad909e21
SRRH
621#ifdef CONFIG_TRACER_SNAPSHOT
622/**
623 * trace_snapshot - take a snapshot of the current buffer.
624 *
625 * This causes a swap between the snapshot buffer and the current live
626 * tracing buffer. You can use this to take snapshots of the live
627 * trace when some condition is triggered, but continue to trace.
628 *
629 * Note, make sure to allocate the snapshot with either
630 * a tracing_snapshot_alloc(), or by doing it manually
631 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
632 *
633 * If the snapshot buffer is not allocated, it will stop tracing.
634 * Basically making a permanent snapshot.
635 */
636void tracing_snapshot(void)
637{
638 struct trace_array *tr = &global_trace;
639 struct tracer *tracer = tr->current_trace;
640 unsigned long flags;
641
1b22e382
SRRH
642 if (in_nmi()) {
643 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
644 internal_trace_puts("*** snapshot is being ignored ***\n");
645 return;
646 }
647
ad909e21 648 if (!tr->allocated_snapshot) {
ca268da6
SRRH
649 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
650 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
651 tracing_off();
652 return;
653 }
654
655 /* Note, snapshot can not be used when the tracer uses it */
656 if (tracer->use_max_tr) {
ca268da6
SRRH
657 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
658 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
659 return;
660 }
661
662 local_irq_save(flags);
663 update_max_tr(tr, current, smp_processor_id());
664 local_irq_restore(flags);
665}
1b22e382 666EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
667
668static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
669 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
670static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
671
672static int alloc_snapshot(struct trace_array *tr)
673{
674 int ret;
675
676 if (!tr->allocated_snapshot) {
677
678 /* allocate spare buffer */
679 ret = resize_buffer_duplicate_size(&tr->max_buffer,
680 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
681 if (ret < 0)
682 return ret;
683
684 tr->allocated_snapshot = true;
685 }
686
687 return 0;
688}
689
ad1438a0 690static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
691{
692 /*
693 * We don't free the ring buffer. instead, resize it because
694 * The max_tr ring buffer has some state (e.g. ring->clock) and
695 * we want preserve it.
696 */
697 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
698 set_buffer_entries(&tr->max_buffer, 1);
699 tracing_reset_online_cpus(&tr->max_buffer);
700 tr->allocated_snapshot = false;
701}
ad909e21 702
93e31ffb
TZ
703/**
704 * tracing_alloc_snapshot - allocate snapshot buffer.
705 *
706 * This only allocates the snapshot buffer if it isn't already
707 * allocated - it doesn't also take a snapshot.
708 *
709 * This is meant to be used in cases where the snapshot buffer needs
710 * to be set up for events that can't sleep but need to be able to
711 * trigger a snapshot.
712 */
713int tracing_alloc_snapshot(void)
714{
715 struct trace_array *tr = &global_trace;
716 int ret;
717
718 ret = alloc_snapshot(tr);
719 WARN_ON(ret < 0);
720
721 return ret;
722}
723EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
724
ad909e21
SRRH
725/**
726 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
727 *
728 * This is similar to trace_snapshot(), but it will allocate the
729 * snapshot buffer if it isn't already allocated. Use this only
730 * where it is safe to sleep, as the allocation may sleep.
731 *
732 * This causes a swap between the snapshot buffer and the current live
733 * tracing buffer. You can use this to take snapshots of the live
734 * trace when some condition is triggered, but continue to trace.
735 */
736void tracing_snapshot_alloc(void)
737{
ad909e21
SRRH
738 int ret;
739
93e31ffb
TZ
740 ret = tracing_alloc_snapshot();
741 if (ret < 0)
3209cff4 742 return;
ad909e21
SRRH
743
744 tracing_snapshot();
745}
1b22e382 746EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
747#else
748void tracing_snapshot(void)
749{
750 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
751}
1b22e382 752EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
753int tracing_alloc_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
756 return -ENODEV;
757}
758EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
759void tracing_snapshot_alloc(void)
760{
761 /* Give warning */
762 tracing_snapshot();
763}
1b22e382 764EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
765#endif /* CONFIG_TRACER_SNAPSHOT */
766
5280bcef 767static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
768{
769 if (tr->trace_buffer.buffer)
770 ring_buffer_record_off(tr->trace_buffer.buffer);
771 /*
772 * This flag is looked at when buffers haven't been allocated
773 * yet, or by some tracers (like irqsoff), that just want to
774 * know if the ring buffer has been disabled, but it can handle
775 * races of where it gets disabled but we still do a record.
776 * As the check is in the fast path of the tracers, it is more
777 * important to be fast than accurate.
778 */
779 tr->buffer_disabled = 1;
780 /* Make the flag seen by readers */
781 smp_wmb();
782}
783
499e5470
SR
784/**
785 * tracing_off - turn off tracing buffers
786 *
787 * This function stops the tracing buffers from recording data.
788 * It does not disable any overhead the tracers themselves may
789 * be causing. This function simply causes all recording to
790 * the ring buffers to fail.
791 */
792void tracing_off(void)
793{
10246fa3 794 tracer_tracing_off(&global_trace);
499e5470
SR
795}
796EXPORT_SYMBOL_GPL(tracing_off);
797
de7edd31
SRRH
798void disable_trace_on_warning(void)
799{
800 if (__disable_trace_on_warning)
801 tracing_off();
802}
803
10246fa3
SRRH
804/**
805 * tracer_tracing_is_on - show real state of ring buffer enabled
806 * @tr : the trace array to know if ring buffer is enabled
807 *
808 * Shows real state of the ring buffer if it is enabled or not.
809 */
5280bcef 810static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
811{
812 if (tr->trace_buffer.buffer)
813 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
814 return !tr->buffer_disabled;
815}
816
499e5470
SR
817/**
818 * tracing_is_on - show state of ring buffers enabled
819 */
820int tracing_is_on(void)
821{
10246fa3 822 return tracer_tracing_is_on(&global_trace);
499e5470
SR
823}
824EXPORT_SYMBOL_GPL(tracing_is_on);
825
3928a8a2 826static int __init set_buf_size(char *str)
bc0c38d1 827{
3928a8a2 828 unsigned long buf_size;
c6caeeb1 829
bc0c38d1
SR
830 if (!str)
831 return 0;
9d612bef 832 buf_size = memparse(str, &str);
c6caeeb1 833 /* nr_entries can not be zero */
9d612bef 834 if (buf_size == 0)
c6caeeb1 835 return 0;
3928a8a2 836 trace_buf_size = buf_size;
bc0c38d1
SR
837 return 1;
838}
3928a8a2 839__setup("trace_buf_size=", set_buf_size);
bc0c38d1 840
0e950173
TB
841static int __init set_tracing_thresh(char *str)
842{
87abb3b1 843 unsigned long threshold;
0e950173
TB
844 int ret;
845
846 if (!str)
847 return 0;
bcd83ea6 848 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
849 if (ret < 0)
850 return 0;
87abb3b1 851 tracing_thresh = threshold * 1000;
0e950173
TB
852 return 1;
853}
854__setup("tracing_thresh=", set_tracing_thresh);
855
57f50be1
SR
856unsigned long nsecs_to_usecs(unsigned long nsecs)
857{
858 return nsecs / 1000;
859}
860
a3418a36
SRRH
861/*
862 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
863 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
864 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
865 * of strings in the order that the enums were defined.
866 */
867#undef C
868#define C(a, b) b
869
4fcdae83 870/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 871static const char *trace_options[] = {
a3418a36 872 TRACE_FLAGS
bc0c38d1
SR
873 NULL
874};
875
5079f326
Z
876static struct {
877 u64 (*func)(void);
878 const char *name;
8be0709f 879 int in_ns; /* is this clock in nanoseconds? */
5079f326 880} trace_clocks[] = {
1b3e5c09
TG
881 { trace_clock_local, "local", 1 },
882 { trace_clock_global, "global", 1 },
883 { trace_clock_counter, "counter", 0 },
e7fda6c4 884 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
885 { trace_clock, "perf", 1 },
886 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 887 { ktime_get_raw_fast_ns, "mono_raw", 1 },
8cbd9cc6 888 ARCH_TRACE_CLOCKS
5079f326
Z
889};
890
b63f39ea 891/*
892 * trace_parser_get_init - gets the buffer for trace parser
893 */
894int trace_parser_get_init(struct trace_parser *parser, int size)
895{
896 memset(parser, 0, sizeof(*parser));
897
898 parser->buffer = kmalloc(size, GFP_KERNEL);
899 if (!parser->buffer)
900 return 1;
901
902 parser->size = size;
903 return 0;
904}
905
906/*
907 * trace_parser_put - frees the buffer for trace parser
908 */
909void trace_parser_put(struct trace_parser *parser)
910{
911 kfree(parser->buffer);
912}
913
914/*
915 * trace_get_user - reads the user input string separated by space
916 * (matched by isspace(ch))
917 *
918 * For each string found the 'struct trace_parser' is updated,
919 * and the function returns.
920 *
921 * Returns number of bytes read.
922 *
923 * See kernel/trace/trace.h for 'struct trace_parser' details.
924 */
925int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
926 size_t cnt, loff_t *ppos)
927{
928 char ch;
929 size_t read = 0;
930 ssize_t ret;
931
932 if (!*ppos)
933 trace_parser_clear(parser);
934
935 ret = get_user(ch, ubuf++);
936 if (ret)
937 goto out;
938
939 read++;
940 cnt--;
941
942 /*
943 * The parser is not finished with the last write,
944 * continue reading the user input without skipping spaces.
945 */
946 if (!parser->cont) {
947 /* skip white space */
948 while (cnt && isspace(ch)) {
949 ret = get_user(ch, ubuf++);
950 if (ret)
951 goto out;
952 read++;
953 cnt--;
954 }
955
956 /* only spaces were written */
957 if (isspace(ch)) {
958 *ppos += read;
959 ret = read;
960 goto out;
961 }
962
963 parser->idx = 0;
964 }
965
966 /* read the non-space input */
967 while (cnt && !isspace(ch)) {
3c235a33 968 if (parser->idx < parser->size - 1)
b63f39ea 969 parser->buffer[parser->idx++] = ch;
970 else {
971 ret = -EINVAL;
972 goto out;
973 }
974 ret = get_user(ch, ubuf++);
975 if (ret)
976 goto out;
977 read++;
978 cnt--;
979 }
980
981 /* We either got finished input or we have to wait for another call. */
982 if (isspace(ch)) {
983 parser->buffer[parser->idx] = 0;
984 parser->cont = false;
057db848 985 } else if (parser->idx < parser->size - 1) {
b63f39ea 986 parser->cont = true;
987 parser->buffer[parser->idx++] = ch;
057db848
SR
988 } else {
989 ret = -EINVAL;
990 goto out;
b63f39ea 991 }
992
993 *ppos += read;
994 ret = read;
995
996out:
997 return ret;
998}
999
3a161d99 1000/* TODO add a seq_buf_to_buffer() */
b8b94265 1001static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1002{
1003 int len;
3c56819b 1004
5ac48378 1005 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1006 return -EBUSY;
1007
5ac48378 1008 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1009 if (cnt > len)
1010 cnt = len;
3a161d99 1011 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1012
3a161d99 1013 s->seq.readpos += cnt;
3c56819b
EGM
1014 return cnt;
1015}
1016
0e950173
TB
1017unsigned long __read_mostly tracing_thresh;
1018
5d4a9dba 1019#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1020/*
1021 * Copy the new maximum trace into the separate maximum-trace
1022 * structure. (this way the maximum trace is permanently saved,
1023 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1024 */
1025static void
1026__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1027{
12883efb
SRRH
1028 struct trace_buffer *trace_buf = &tr->trace_buffer;
1029 struct trace_buffer *max_buf = &tr->max_buffer;
1030 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1031 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1032
12883efb
SRRH
1033 max_buf->cpu = cpu;
1034 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1035
6d9b3fa5 1036 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1037 max_data->critical_start = data->critical_start;
1038 max_data->critical_end = data->critical_end;
5d4a9dba 1039
1acaa1b2 1040 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1041 max_data->pid = tsk->pid;
f17a5194
SRRH
1042 /*
1043 * If tsk == current, then use current_uid(), as that does not use
1044 * RCU. The irq tracer can be called out of RCU scope.
1045 */
1046 if (tsk == current)
1047 max_data->uid = current_uid();
1048 else
1049 max_data->uid = task_uid(tsk);
1050
8248ac05
SR
1051 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1052 max_data->policy = tsk->policy;
1053 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1054
1055 /* record this tasks comm */
1056 tracing_record_cmdline(tsk);
1057}
1058
4fcdae83
SR
1059/**
1060 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1061 * @tr: tracer
1062 * @tsk: the task with the latency
1063 * @cpu: The cpu that initiated the trace.
1064 *
1065 * Flip the buffers between the @tr and the max_tr and record information
1066 * about which task was the cause of this latency.
1067 */
e309b41d 1068void
bc0c38d1
SR
1069update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1070{
2721e72d 1071 struct ring_buffer *buf;
bc0c38d1 1072
2b6080f2 1073 if (tr->stop_count)
b8de7bd1
SR
1074 return;
1075
4c11d7ae 1076 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1077
45ad21ca 1078 if (!tr->allocated_snapshot) {
debdd57f 1079 /* Only the nop tracer should hit this when disabling */
2b6080f2 1080 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1081 return;
debdd57f 1082 }
34600f0e 1083
0b9b12c1 1084 arch_spin_lock(&tr->max_lock);
3928a8a2 1085
12883efb
SRRH
1086 buf = tr->trace_buffer.buffer;
1087 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1088 tr->max_buffer.buffer = buf;
3928a8a2 1089
bc0c38d1 1090 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1091 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1092}
1093
1094/**
1095 * update_max_tr_single - only copy one trace over, and reset the rest
1096 * @tr - tracer
1097 * @tsk - task with the latency
1098 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1099 *
1100 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1101 */
e309b41d 1102void
bc0c38d1
SR
1103update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1104{
3928a8a2 1105 int ret;
bc0c38d1 1106
2b6080f2 1107 if (tr->stop_count)
b8de7bd1
SR
1108 return;
1109
4c11d7ae 1110 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1111 if (!tr->allocated_snapshot) {
2930e04d 1112 /* Only the nop tracer should hit this when disabling */
9e8529af 1113 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1114 return;
2930e04d 1115 }
ef710e10 1116
0b9b12c1 1117 arch_spin_lock(&tr->max_lock);
bc0c38d1 1118
12883efb 1119 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1120
e8165dbb
SR
1121 if (ret == -EBUSY) {
1122 /*
1123 * We failed to swap the buffer due to a commit taking
1124 * place on this CPU. We fail to record, but we reset
1125 * the max trace buffer (no one writes directly to it)
1126 * and flag that it failed.
1127 */
12883efb 1128 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1129 "Failed to swap buffers due to commit in progress\n");
1130 }
1131
e8165dbb 1132 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1133
1134 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1135 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1136}
5d4a9dba 1137#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1138
e30f53aa 1139static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1140{
15693458
SRRH
1141 /* Iterators are static, they should be filled or empty */
1142 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1143 return 0;
0d5c6e1c 1144
e30f53aa
RV
1145 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1146 full);
0d5c6e1c
SR
1147}
1148
f4e781c0
SRRH
1149#ifdef CONFIG_FTRACE_STARTUP_TEST
1150static int run_tracer_selftest(struct tracer *type)
1151{
1152 struct trace_array *tr = &global_trace;
1153 struct tracer *saved_tracer = tr->current_trace;
1154 int ret;
0d5c6e1c 1155
f4e781c0
SRRH
1156 if (!type->selftest || tracing_selftest_disabled)
1157 return 0;
0d5c6e1c
SR
1158
1159 /*
f4e781c0
SRRH
1160 * Run a selftest on this tracer.
1161 * Here we reset the trace buffer, and set the current
1162 * tracer to be this tracer. The tracer can then run some
1163 * internal tracing to verify that everything is in order.
1164 * If we fail, we do not register this tracer.
0d5c6e1c 1165 */
f4e781c0 1166 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1167
f4e781c0
SRRH
1168 tr->current_trace = type;
1169
1170#ifdef CONFIG_TRACER_MAX_TRACE
1171 if (type->use_max_tr) {
1172 /* If we expanded the buffers, make sure the max is expanded too */
1173 if (ring_buffer_expanded)
1174 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1175 RING_BUFFER_ALL_CPUS);
1176 tr->allocated_snapshot = true;
1177 }
1178#endif
1179
1180 /* the test is responsible for initializing and enabling */
1181 pr_info("Testing tracer %s: ", type->name);
1182 ret = type->selftest(type, tr);
1183 /* the test is responsible for resetting too */
1184 tr->current_trace = saved_tracer;
1185 if (ret) {
1186 printk(KERN_CONT "FAILED!\n");
1187 /* Add the warning after printing 'FAILED' */
1188 WARN_ON(1);
1189 return -1;
1190 }
1191 /* Only reset on passing, to avoid touching corrupted buffers */
1192 tracing_reset_online_cpus(&tr->trace_buffer);
1193
1194#ifdef CONFIG_TRACER_MAX_TRACE
1195 if (type->use_max_tr) {
1196 tr->allocated_snapshot = false;
0d5c6e1c 1197
f4e781c0
SRRH
1198 /* Shrink the max buffer again */
1199 if (ring_buffer_expanded)
1200 ring_buffer_resize(tr->max_buffer.buffer, 1,
1201 RING_BUFFER_ALL_CPUS);
1202 }
1203#endif
1204
1205 printk(KERN_CONT "PASSED\n");
1206 return 0;
1207}
1208#else
1209static inline int run_tracer_selftest(struct tracer *type)
1210{
1211 return 0;
0d5c6e1c 1212}
f4e781c0 1213#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1214
41d9c0be
SRRH
1215static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1216
a4d1e688
JW
1217static void __init apply_trace_boot_options(void);
1218
4fcdae83
SR
1219/**
1220 * register_tracer - register a tracer with the ftrace system.
1221 * @type - the plugin for the tracer
1222 *
1223 * Register a new plugin tracer.
1224 */
a4d1e688 1225int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1226{
1227 struct tracer *t;
bc0c38d1
SR
1228 int ret = 0;
1229
1230 if (!type->name) {
1231 pr_info("Tracer must have a name\n");
1232 return -1;
1233 }
1234
24a461d5 1235 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1236 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1237 return -1;
1238 }
1239
bc0c38d1 1240 mutex_lock(&trace_types_lock);
86fa2f60 1241
8e1b82e0
FW
1242 tracing_selftest_running = true;
1243
bc0c38d1
SR
1244 for (t = trace_types; t; t = t->next) {
1245 if (strcmp(type->name, t->name) == 0) {
1246 /* already found */
ee6c2c1b 1247 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1248 type->name);
1249 ret = -1;
1250 goto out;
1251 }
1252 }
1253
adf9f195
FW
1254 if (!type->set_flag)
1255 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1256 if (!type->flags) {
1257 /*allocate a dummy tracer_flags*/
1258 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1259 if (!type->flags) {
1260 ret = -ENOMEM;
1261 goto out;
1262 }
d39cdd20
CH
1263 type->flags->val = 0;
1264 type->flags->opts = dummy_tracer_opt;
1265 } else
adf9f195
FW
1266 if (!type->flags->opts)
1267 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1268
d39cdd20
CH
1269 /* store the tracer for __set_tracer_option */
1270 type->flags->trace = type;
1271
f4e781c0
SRRH
1272 ret = run_tracer_selftest(type);
1273 if (ret < 0)
1274 goto out;
60a11774 1275
bc0c38d1
SR
1276 type->next = trace_types;
1277 trace_types = type;
41d9c0be 1278 add_tracer_options(&global_trace, type);
60a11774 1279
bc0c38d1 1280 out:
8e1b82e0 1281 tracing_selftest_running = false;
bc0c38d1
SR
1282 mutex_unlock(&trace_types_lock);
1283
dac74940
SR
1284 if (ret || !default_bootup_tracer)
1285 goto out_unlock;
1286
ee6c2c1b 1287 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1288 goto out_unlock;
1289
1290 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1291 /* Do we want this tracer to start on bootup? */
607e2ea1 1292 tracing_set_tracer(&global_trace, type->name);
dac74940 1293 default_bootup_tracer = NULL;
a4d1e688
JW
1294
1295 apply_trace_boot_options();
1296
dac74940 1297 /* disable other selftests, since this will break it. */
55034cd6 1298 tracing_selftest_disabled = true;
b2821ae6 1299#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1300 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1301 type->name);
b2821ae6 1302#endif
b2821ae6 1303
dac74940 1304 out_unlock:
bc0c38d1
SR
1305 return ret;
1306}
1307
12883efb 1308void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1309{
12883efb 1310 struct ring_buffer *buffer = buf->buffer;
f633903a 1311
a5416411
HT
1312 if (!buffer)
1313 return;
1314
f633903a
SR
1315 ring_buffer_record_disable(buffer);
1316
1317 /* Make sure all commits have finished */
1318 synchronize_sched();
68179686 1319 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1320
1321 ring_buffer_record_enable(buffer);
1322}
1323
12883efb 1324void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1325{
12883efb 1326 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1327 int cpu;
1328
a5416411
HT
1329 if (!buffer)
1330 return;
1331
621968cd
SR
1332 ring_buffer_record_disable(buffer);
1333
1334 /* Make sure all commits have finished */
1335 synchronize_sched();
1336
9457158b 1337 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1338
1339 for_each_online_cpu(cpu)
68179686 1340 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1341
1342 ring_buffer_record_enable(buffer);
213cc060
PE
1343}
1344
09d8091c 1345/* Must have trace_types_lock held */
873c642f 1346void tracing_reset_all_online_cpus(void)
9456f0fa 1347{
873c642f
SRRH
1348 struct trace_array *tr;
1349
873c642f 1350 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1351 tracing_reset_online_cpus(&tr->trace_buffer);
1352#ifdef CONFIG_TRACER_MAX_TRACE
1353 tracing_reset_online_cpus(&tr->max_buffer);
1354#endif
873c642f 1355 }
9456f0fa
SR
1356}
1357
939c7a4f 1358#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1359#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1360static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1361struct saved_cmdlines_buffer {
1362 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1363 unsigned *map_cmdline_to_pid;
1364 unsigned cmdline_num;
1365 int cmdline_idx;
1366 char *saved_cmdlines;
1367};
1368static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1369
25b0b44a 1370/* temporary disable recording */
4fd27358 1371static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1372
939c7a4f
YY
1373static inline char *get_saved_cmdlines(int idx)
1374{
1375 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1376}
1377
1378static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1379{
939c7a4f
YY
1380 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1381}
1382
1383static int allocate_cmdlines_buffer(unsigned int val,
1384 struct saved_cmdlines_buffer *s)
1385{
1386 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1387 GFP_KERNEL);
1388 if (!s->map_cmdline_to_pid)
1389 return -ENOMEM;
1390
1391 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1392 if (!s->saved_cmdlines) {
1393 kfree(s->map_cmdline_to_pid);
1394 return -ENOMEM;
1395 }
1396
1397 s->cmdline_idx = 0;
1398 s->cmdline_num = val;
1399 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1400 sizeof(s->map_pid_to_cmdline));
1401 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1402 val * sizeof(*s->map_cmdline_to_pid));
1403
1404 return 0;
1405}
1406
1407static int trace_create_savedcmd(void)
1408{
1409 int ret;
1410
a6af8fbf 1411 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1412 if (!savedcmd)
1413 return -ENOMEM;
1414
1415 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1416 if (ret < 0) {
1417 kfree(savedcmd);
1418 savedcmd = NULL;
1419 return -ENOMEM;
1420 }
1421
1422 return 0;
bc0c38d1
SR
1423}
1424
b5130b1e
CE
1425int is_tracing_stopped(void)
1426{
2b6080f2 1427 return global_trace.stop_count;
b5130b1e
CE
1428}
1429
0f048701
SR
1430/**
1431 * tracing_start - quick start of the tracer
1432 *
1433 * If tracing is enabled but was stopped by tracing_stop,
1434 * this will start the tracer back up.
1435 */
1436void tracing_start(void)
1437{
1438 struct ring_buffer *buffer;
1439 unsigned long flags;
1440
1441 if (tracing_disabled)
1442 return;
1443
2b6080f2
SR
1444 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1445 if (--global_trace.stop_count) {
1446 if (global_trace.stop_count < 0) {
b06a8301
SR
1447 /* Someone screwed up their debugging */
1448 WARN_ON_ONCE(1);
2b6080f2 1449 global_trace.stop_count = 0;
b06a8301 1450 }
0f048701
SR
1451 goto out;
1452 }
1453
a2f80714 1454 /* Prevent the buffers from switching */
0b9b12c1 1455 arch_spin_lock(&global_trace.max_lock);
0f048701 1456
12883efb 1457 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1458 if (buffer)
1459 ring_buffer_record_enable(buffer);
1460
12883efb
SRRH
1461#ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1463 if (buffer)
1464 ring_buffer_record_enable(buffer);
12883efb 1465#endif
0f048701 1466
0b9b12c1 1467 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1468
0f048701 1469 out:
2b6080f2
SR
1470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_start_tr(struct trace_array *tr)
1474{
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1477
1478 if (tracing_disabled)
1479 return;
1480
1481 /* If global, we need to also start the max tracer */
1482 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1483 return tracing_start();
1484
1485 raw_spin_lock_irqsave(&tr->start_lock, flags);
1486
1487 if (--tr->stop_count) {
1488 if (tr->stop_count < 0) {
1489 /* Someone screwed up their debugging */
1490 WARN_ON_ONCE(1);
1491 tr->stop_count = 0;
1492 }
1493 goto out;
1494 }
1495
12883efb 1496 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1497 if (buffer)
1498 ring_buffer_record_enable(buffer);
1499
1500 out:
1501 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1502}
1503
1504/**
1505 * tracing_stop - quick stop of the tracer
1506 *
1507 * Light weight way to stop tracing. Use in conjunction with
1508 * tracing_start.
1509 */
1510void tracing_stop(void)
1511{
1512 struct ring_buffer *buffer;
1513 unsigned long flags;
1514
2b6080f2
SR
1515 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1516 if (global_trace.stop_count++)
0f048701
SR
1517 goto out;
1518
a2f80714 1519 /* Prevent the buffers from switching */
0b9b12c1 1520 arch_spin_lock(&global_trace.max_lock);
a2f80714 1521
12883efb 1522 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1523 if (buffer)
1524 ring_buffer_record_disable(buffer);
1525
12883efb
SRRH
1526#ifdef CONFIG_TRACER_MAX_TRACE
1527 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1528 if (buffer)
1529 ring_buffer_record_disable(buffer);
12883efb 1530#endif
0f048701 1531
0b9b12c1 1532 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1533
0f048701 1534 out:
2b6080f2
SR
1535 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1536}
1537
1538static void tracing_stop_tr(struct trace_array *tr)
1539{
1540 struct ring_buffer *buffer;
1541 unsigned long flags;
1542
1543 /* If global, we need to also stop the max tracer */
1544 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1545 return tracing_stop();
1546
1547 raw_spin_lock_irqsave(&tr->start_lock, flags);
1548 if (tr->stop_count++)
1549 goto out;
1550
12883efb 1551 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1552 if (buffer)
1553 ring_buffer_record_disable(buffer);
1554
1555 out:
1556 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1557}
1558
e309b41d 1559void trace_stop_cmdline_recording(void);
bc0c38d1 1560
379cfdac 1561static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1562{
a635cf04 1563 unsigned pid, idx;
bc0c38d1
SR
1564
1565 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1566 return 0;
bc0c38d1
SR
1567
1568 /*
1569 * It's not the end of the world if we don't get
1570 * the lock, but we also don't want to spin
1571 * nor do we want to disable interrupts,
1572 * so if we miss here, then better luck next time.
1573 */
0199c4e6 1574 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1575 return 0;
bc0c38d1 1576
939c7a4f 1577 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1578 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1579 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1580
a635cf04
CE
1581 /*
1582 * Check whether the cmdline buffer at idx has a pid
1583 * mapped. We are going to overwrite that entry so we
1584 * need to clear the map_pid_to_cmdline. Otherwise we
1585 * would read the new comm for the old pid.
1586 */
939c7a4f 1587 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1588 if (pid != NO_CMDLINE_MAP)
939c7a4f 1589 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1590
939c7a4f
YY
1591 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1592 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1593
939c7a4f 1594 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1595 }
1596
939c7a4f 1597 set_cmdline(idx, tsk->comm);
bc0c38d1 1598
0199c4e6 1599 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1600
1601 return 1;
bc0c38d1
SR
1602}
1603
4c27e756 1604static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1605{
bc0c38d1
SR
1606 unsigned map;
1607
4ca53085
SR
1608 if (!pid) {
1609 strcpy(comm, "<idle>");
1610 return;
1611 }
bc0c38d1 1612
74bf4076
SR
1613 if (WARN_ON_ONCE(pid < 0)) {
1614 strcpy(comm, "<XXX>");
1615 return;
1616 }
1617
4ca53085
SR
1618 if (pid > PID_MAX_DEFAULT) {
1619 strcpy(comm, "<...>");
1620 return;
1621 }
bc0c38d1 1622
939c7a4f 1623 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1624 if (map != NO_CMDLINE_MAP)
939c7a4f 1625 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1626 else
1627 strcpy(comm, "<...>");
4c27e756
SRRH
1628}
1629
1630void trace_find_cmdline(int pid, char comm[])
1631{
1632 preempt_disable();
1633 arch_spin_lock(&trace_cmdline_lock);
1634
1635 __trace_find_cmdline(pid, comm);
bc0c38d1 1636
0199c4e6 1637 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1638 preempt_enable();
bc0c38d1
SR
1639}
1640
e309b41d 1641void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1642{
0fb9656d 1643 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1644 return;
1645
7ffbd48d
SR
1646 if (!__this_cpu_read(trace_cmdline_save))
1647 return;
1648
379cfdac
SRRH
1649 if (trace_save_cmdline(tsk))
1650 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1651}
1652
45dcd8b8 1653void
38697053
SR
1654tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1655 int pc)
bc0c38d1
SR
1656{
1657 struct task_struct *tsk = current;
bc0c38d1 1658
777e208d
SR
1659 entry->preempt_count = pc & 0xff;
1660 entry->pid = (tsk) ? tsk->pid : 0;
1661 entry->flags =
9244489a 1662#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1663 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1664#else
1665 TRACE_FLAG_IRQS_NOSUPPORT |
1666#endif
7e6867bf 1667 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1
SR
1668 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1669 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1670 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1671 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1672}
f413cdb8 1673EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1674
e77405ad
SR
1675struct ring_buffer_event *
1676trace_buffer_lock_reserve(struct ring_buffer *buffer,
1677 int type,
1678 unsigned long len,
1679 unsigned long flags, int pc)
51a763dd
ACM
1680{
1681 struct ring_buffer_event *event;
1682
e77405ad 1683 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1684 if (event != NULL) {
1685 struct trace_entry *ent = ring_buffer_event_data(event);
1686
1687 tracing_generic_entry_update(ent, flags, pc);
1688 ent->type = type;
1689 }
1690
1691 return event;
1692}
51a763dd 1693
7ffbd48d
SR
1694void
1695__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1696{
1697 __this_cpu_write(trace_cmdline_save, true);
1698 ring_buffer_unlock_commit(buffer, event);
1699}
1700
b7f0c959
SRRH
1701void trace_buffer_unlock_commit(struct trace_array *tr,
1702 struct ring_buffer *buffer,
1703 struct ring_buffer_event *event,
1704 unsigned long flags, int pc)
51a763dd 1705{
7ffbd48d 1706 __buffer_unlock_commit(buffer, event);
51a763dd 1707
2d34f489 1708 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
e77405ad 1709 ftrace_trace_userstack(buffer, flags, pc);
07edf712 1710}
0d5c6e1c 1711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1712
2c4a33ab
SRRH
1713static struct ring_buffer *temp_buffer;
1714
ccb469a1
SR
1715struct ring_buffer_event *
1716trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 1717 struct trace_event_file *trace_file,
ccb469a1
SR
1718 int type, unsigned long len,
1719 unsigned long flags, int pc)
1720{
2c4a33ab
SRRH
1721 struct ring_buffer_event *entry;
1722
7f1d2f82 1723 *current_rb = trace_file->tr->trace_buffer.buffer;
2c4a33ab 1724 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1725 type, len, flags, pc);
2c4a33ab
SRRH
1726 /*
1727 * If tracing is off, but we have triggers enabled
1728 * we still need to look at the event data. Use the temp_buffer
1729 * to store the trace event for the tigger to use. It's recusive
1730 * safe and will not be recorded anywhere.
1731 */
5d6ad960 1732 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab
SRRH
1733 *current_rb = temp_buffer;
1734 entry = trace_buffer_lock_reserve(*current_rb,
1735 type, len, flags, pc);
1736 }
1737 return entry;
ccb469a1
SR
1738}
1739EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1740
ef5580d0 1741struct ring_buffer_event *
e77405ad
SR
1742trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1743 int type, unsigned long len,
ef5580d0
SR
1744 unsigned long flags, int pc)
1745{
12883efb 1746 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1747 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1748 type, len, flags, pc);
1749}
94487d6d 1750EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1751
b7f0c959
SRRH
1752void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1753 struct ring_buffer *buffer,
0d5c6e1c
SR
1754 struct ring_buffer_event *event,
1755 unsigned long flags, int pc,
1756 struct pt_regs *regs)
1fd8df2c 1757{
7ffbd48d 1758 __buffer_unlock_commit(buffer, event);
1fd8df2c 1759
7717c6be 1760 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1fd8df2c
MH
1761 ftrace_trace_userstack(buffer, flags, pc);
1762}
0d5c6e1c 1763EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1764
e77405ad
SR
1765void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1766 struct ring_buffer_event *event)
77d9f465 1767{
e77405ad 1768 ring_buffer_discard_commit(buffer, event);
ef5580d0 1769}
12acd473 1770EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1771
e309b41d 1772void
7be42151 1773trace_function(struct trace_array *tr,
38697053
SR
1774 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1775 int pc)
bc0c38d1 1776{
2425bcb9 1777 struct trace_event_call *call = &event_function;
12883efb 1778 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1779 struct ring_buffer_event *event;
777e208d 1780 struct ftrace_entry *entry;
bc0c38d1 1781
e77405ad 1782 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1783 flags, pc);
3928a8a2
SR
1784 if (!event)
1785 return;
1786 entry = ring_buffer_event_data(event);
777e208d
SR
1787 entry->ip = ip;
1788 entry->parent_ip = parent_ip;
e1112b4d 1789
f306cc82 1790 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1791 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1792}
1793
c0a0d0d3 1794#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1795
1796#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797struct ftrace_stack {
1798 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1799};
1800
1801static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1802static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1803
e77405ad 1804static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1805 unsigned long flags,
1fd8df2c 1806 int skip, int pc, struct pt_regs *regs)
86387f7e 1807{
2425bcb9 1808 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 1809 struct ring_buffer_event *event;
777e208d 1810 struct stack_entry *entry;
86387f7e 1811 struct stack_trace trace;
4a9bd3f1
SR
1812 int use_stack;
1813 int size = FTRACE_STACK_ENTRIES;
1814
1815 trace.nr_entries = 0;
1816 trace.skip = skip;
1817
1818 /*
1819 * Since events can happen in NMIs there's no safe way to
1820 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1821 * or NMI comes in, it will just have to use the default
1822 * FTRACE_STACK_SIZE.
1823 */
1824 preempt_disable_notrace();
1825
82146529 1826 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1827 /*
1828 * We don't need any atomic variables, just a barrier.
1829 * If an interrupt comes in, we don't care, because it would
1830 * have exited and put the counter back to what we want.
1831 * We just need a barrier to keep gcc from moving things
1832 * around.
1833 */
1834 barrier();
1835 if (use_stack == 1) {
bdffd893 1836 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1837 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1838
1839 if (regs)
1840 save_stack_trace_regs(regs, &trace);
1841 else
1842 save_stack_trace(&trace);
1843
1844 if (trace.nr_entries > size)
1845 size = trace.nr_entries;
1846 } else
1847 /* From now on, use_stack is a boolean */
1848 use_stack = 0;
1849
1850 size *= sizeof(unsigned long);
86387f7e 1851
e77405ad 1852 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1853 sizeof(*entry) + size, flags, pc);
3928a8a2 1854 if (!event)
4a9bd3f1
SR
1855 goto out;
1856 entry = ring_buffer_event_data(event);
86387f7e 1857
4a9bd3f1
SR
1858 memset(&entry->caller, 0, size);
1859
1860 if (use_stack)
1861 memcpy(&entry->caller, trace.entries,
1862 trace.nr_entries * sizeof(unsigned long));
1863 else {
1864 trace.max_entries = FTRACE_STACK_ENTRIES;
1865 trace.entries = entry->caller;
1866 if (regs)
1867 save_stack_trace_regs(regs, &trace);
1868 else
1869 save_stack_trace(&trace);
1870 }
1871
1872 entry->size = trace.nr_entries;
86387f7e 1873
f306cc82 1874 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1875 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1876
1877 out:
1878 /* Again, don't let gcc optimize things here */
1879 barrier();
82146529 1880 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1881 preempt_enable_notrace();
1882
f0a920d5
IM
1883}
1884
2d34f489
SRRH
1885static inline void ftrace_trace_stack(struct trace_array *tr,
1886 struct ring_buffer *buffer,
73dddbb5
SRRH
1887 unsigned long flags,
1888 int skip, int pc, struct pt_regs *regs)
53614991 1889{
2d34f489 1890 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
1891 return;
1892
73dddbb5 1893 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
1894}
1895
c0a0d0d3
FW
1896void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1897 int pc)
38697053 1898{
12883efb 1899 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1900}
1901
03889384
SR
1902/**
1903 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1904 * @skip: Number of functions to skip (helper handlers)
03889384 1905 */
c142be8e 1906void trace_dump_stack(int skip)
03889384
SR
1907{
1908 unsigned long flags;
1909
1910 if (tracing_disabled || tracing_selftest_running)
e36c5458 1911 return;
03889384
SR
1912
1913 local_save_flags(flags);
1914
c142be8e
SRRH
1915 /*
1916 * Skip 3 more, seems to get us at the caller of
1917 * this function.
1918 */
1919 skip += 3;
1920 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1921 flags, skip, preempt_count(), NULL);
03889384
SR
1922}
1923
91e86e56
SR
1924static DEFINE_PER_CPU(int, user_stack_count);
1925
e77405ad
SR
1926void
1927ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1928{
2425bcb9 1929 struct trace_event_call *call = &event_user_stack;
8d7c6a96 1930 struct ring_buffer_event *event;
02b67518
TE
1931 struct userstack_entry *entry;
1932 struct stack_trace trace;
02b67518 1933
983f938a 1934 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
1935 return;
1936
b6345879
SR
1937 /*
1938 * NMIs can not handle page faults, even with fix ups.
1939 * The save user stack can (and often does) fault.
1940 */
1941 if (unlikely(in_nmi()))
1942 return;
02b67518 1943
91e86e56
SR
1944 /*
1945 * prevent recursion, since the user stack tracing may
1946 * trigger other kernel events.
1947 */
1948 preempt_disable();
1949 if (__this_cpu_read(user_stack_count))
1950 goto out;
1951
1952 __this_cpu_inc(user_stack_count);
1953
e77405ad 1954 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1955 sizeof(*entry), flags, pc);
02b67518 1956 if (!event)
1dbd1951 1957 goto out_drop_count;
02b67518 1958 entry = ring_buffer_event_data(event);
02b67518 1959
48659d31 1960 entry->tgid = current->tgid;
02b67518
TE
1961 memset(&entry->caller, 0, sizeof(entry->caller));
1962
1963 trace.nr_entries = 0;
1964 trace.max_entries = FTRACE_STACK_ENTRIES;
1965 trace.skip = 0;
1966 trace.entries = entry->caller;
1967
1968 save_stack_trace_user(&trace);
f306cc82 1969 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1970 __buffer_unlock_commit(buffer, event);
91e86e56 1971
1dbd1951 1972 out_drop_count:
91e86e56 1973 __this_cpu_dec(user_stack_count);
91e86e56
SR
1974 out:
1975 preempt_enable();
02b67518
TE
1976}
1977
4fd27358
HE
1978#ifdef UNUSED
1979static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1980{
7be42151 1981 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1982}
4fd27358 1983#endif /* UNUSED */
02b67518 1984
c0a0d0d3
FW
1985#endif /* CONFIG_STACKTRACE */
1986
07d777fe
SR
1987/* created for use with alloc_percpu */
1988struct trace_buffer_struct {
1989 char buffer[TRACE_BUF_SIZE];
1990};
1991
1992static struct trace_buffer_struct *trace_percpu_buffer;
1993static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1994static struct trace_buffer_struct *trace_percpu_irq_buffer;
1995static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1996
1997/*
1998 * The buffer used is dependent on the context. There is a per cpu
1999 * buffer for normal context, softirq contex, hard irq context and
2000 * for NMI context. Thise allows for lockless recording.
2001 *
2002 * Note, if the buffers failed to be allocated, then this returns NULL
2003 */
2004static char *get_trace_buf(void)
2005{
2006 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
2007
2008 /*
2009 * If we have allocated per cpu buffers, then we do not
2010 * need to do any locking.
2011 */
2012 if (in_nmi())
2013 percpu_buffer = trace_percpu_nmi_buffer;
2014 else if (in_irq())
2015 percpu_buffer = trace_percpu_irq_buffer;
2016 else if (in_softirq())
2017 percpu_buffer = trace_percpu_sirq_buffer;
2018 else
2019 percpu_buffer = trace_percpu_buffer;
2020
2021 if (!percpu_buffer)
2022 return NULL;
2023
d8a0349c 2024 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2025}
2026
2027static int alloc_percpu_trace_buffer(void)
2028{
2029 struct trace_buffer_struct *buffers;
2030 struct trace_buffer_struct *sirq_buffers;
2031 struct trace_buffer_struct *irq_buffers;
2032 struct trace_buffer_struct *nmi_buffers;
2033
2034 buffers = alloc_percpu(struct trace_buffer_struct);
2035 if (!buffers)
2036 goto err_warn;
2037
2038 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2039 if (!sirq_buffers)
2040 goto err_sirq;
2041
2042 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2043 if (!irq_buffers)
2044 goto err_irq;
2045
2046 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2047 if (!nmi_buffers)
2048 goto err_nmi;
2049
2050 trace_percpu_buffer = buffers;
2051 trace_percpu_sirq_buffer = sirq_buffers;
2052 trace_percpu_irq_buffer = irq_buffers;
2053 trace_percpu_nmi_buffer = nmi_buffers;
2054
2055 return 0;
2056
2057 err_nmi:
2058 free_percpu(irq_buffers);
2059 err_irq:
2060 free_percpu(sirq_buffers);
2061 err_sirq:
2062 free_percpu(buffers);
2063 err_warn:
2064 WARN(1, "Could not allocate percpu trace_printk buffer");
2065 return -ENOMEM;
2066}
2067
81698831
SR
2068static int buffers_allocated;
2069
07d777fe
SR
2070void trace_printk_init_buffers(void)
2071{
07d777fe
SR
2072 if (buffers_allocated)
2073 return;
2074
2075 if (alloc_percpu_trace_buffer())
2076 return;
2077
2184db46
SR
2078 /* trace_printk() is for debug use only. Don't use it in production. */
2079
a395d6a7
JP
2080 pr_warn("\n");
2081 pr_warn("**********************************************************\n");
2082 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2083 pr_warn("** **\n");
2084 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2085 pr_warn("** **\n");
2086 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2087 pr_warn("** unsafe for production use. **\n");
2088 pr_warn("** **\n");
2089 pr_warn("** If you see this message and you are not debugging **\n");
2090 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2091 pr_warn("** **\n");
2092 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2093 pr_warn("**********************************************************\n");
07d777fe 2094
b382ede6
SR
2095 /* Expand the buffers to set size */
2096 tracing_update_buffers();
2097
07d777fe 2098 buffers_allocated = 1;
81698831
SR
2099
2100 /*
2101 * trace_printk_init_buffers() can be called by modules.
2102 * If that happens, then we need to start cmdline recording
2103 * directly here. If the global_trace.buffer is already
2104 * allocated here, then this was called by module code.
2105 */
12883efb 2106 if (global_trace.trace_buffer.buffer)
81698831
SR
2107 tracing_start_cmdline_record();
2108}
2109
2110void trace_printk_start_comm(void)
2111{
2112 /* Start tracing comms if trace printk is set */
2113 if (!buffers_allocated)
2114 return;
2115 tracing_start_cmdline_record();
2116}
2117
2118static void trace_printk_start_stop_comm(int enabled)
2119{
2120 if (!buffers_allocated)
2121 return;
2122
2123 if (enabled)
2124 tracing_start_cmdline_record();
2125 else
2126 tracing_stop_cmdline_record();
07d777fe
SR
2127}
2128
769b0441 2129/**
48ead020 2130 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2131 *
2132 */
40ce74f1 2133int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2134{
2425bcb9 2135 struct trace_event_call *call = &event_bprint;
769b0441 2136 struct ring_buffer_event *event;
e77405ad 2137 struct ring_buffer *buffer;
769b0441 2138 struct trace_array *tr = &global_trace;
48ead020 2139 struct bprint_entry *entry;
769b0441 2140 unsigned long flags;
07d777fe
SR
2141 char *tbuffer;
2142 int len = 0, size, pc;
769b0441
FW
2143
2144 if (unlikely(tracing_selftest_running || tracing_disabled))
2145 return 0;
2146
2147 /* Don't pollute graph traces with trace_vprintk internals */
2148 pause_graph_tracing();
2149
2150 pc = preempt_count();
5168ae50 2151 preempt_disable_notrace();
769b0441 2152
07d777fe
SR
2153 tbuffer = get_trace_buf();
2154 if (!tbuffer) {
2155 len = 0;
769b0441 2156 goto out;
07d777fe 2157 }
769b0441 2158
07d777fe 2159 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2160
07d777fe
SR
2161 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2162 goto out;
769b0441 2163
07d777fe 2164 local_save_flags(flags);
769b0441 2165 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2166 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2167 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2168 flags, pc);
769b0441 2169 if (!event)
07d777fe 2170 goto out;
769b0441
FW
2171 entry = ring_buffer_event_data(event);
2172 entry->ip = ip;
769b0441
FW
2173 entry->fmt = fmt;
2174
07d777fe 2175 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2176 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2177 __buffer_unlock_commit(buffer, event);
2d34f489 2178 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2179 }
769b0441 2180
769b0441 2181out:
5168ae50 2182 preempt_enable_notrace();
769b0441
FW
2183 unpause_graph_tracing();
2184
2185 return len;
2186}
48ead020
FW
2187EXPORT_SYMBOL_GPL(trace_vbprintk);
2188
12883efb
SRRH
2189static int
2190__trace_array_vprintk(struct ring_buffer *buffer,
2191 unsigned long ip, const char *fmt, va_list args)
48ead020 2192{
2425bcb9 2193 struct trace_event_call *call = &event_print;
48ead020 2194 struct ring_buffer_event *event;
07d777fe 2195 int len = 0, size, pc;
48ead020 2196 struct print_entry *entry;
07d777fe
SR
2197 unsigned long flags;
2198 char *tbuffer;
48ead020
FW
2199
2200 if (tracing_disabled || tracing_selftest_running)
2201 return 0;
2202
07d777fe
SR
2203 /* Don't pollute graph traces with trace_vprintk internals */
2204 pause_graph_tracing();
2205
48ead020
FW
2206 pc = preempt_count();
2207 preempt_disable_notrace();
48ead020 2208
07d777fe
SR
2209
2210 tbuffer = get_trace_buf();
2211 if (!tbuffer) {
2212 len = 0;
48ead020 2213 goto out;
07d777fe 2214 }
48ead020 2215
3558a5ac 2216 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2217
07d777fe 2218 local_save_flags(flags);
48ead020 2219 size = sizeof(*entry) + len + 1;
e77405ad 2220 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2221 flags, pc);
48ead020 2222 if (!event)
07d777fe 2223 goto out;
48ead020 2224 entry = ring_buffer_event_data(event);
c13d2f7c 2225 entry->ip = ip;
48ead020 2226
3558a5ac 2227 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2228 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2229 __buffer_unlock_commit(buffer, event);
2d34f489 2230 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 2231 }
48ead020
FW
2232 out:
2233 preempt_enable_notrace();
07d777fe 2234 unpause_graph_tracing();
48ead020
FW
2235
2236 return len;
2237}
659372d3 2238
12883efb
SRRH
2239int trace_array_vprintk(struct trace_array *tr,
2240 unsigned long ip, const char *fmt, va_list args)
2241{
2242 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2243}
2244
2245int trace_array_printk(struct trace_array *tr,
2246 unsigned long ip, const char *fmt, ...)
2247{
2248 int ret;
2249 va_list ap;
2250
983f938a 2251 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2252 return 0;
2253
2254 va_start(ap, fmt);
2255 ret = trace_array_vprintk(tr, ip, fmt, ap);
2256 va_end(ap);
2257 return ret;
2258}
2259
2260int trace_array_printk_buf(struct ring_buffer *buffer,
2261 unsigned long ip, const char *fmt, ...)
2262{
2263 int ret;
2264 va_list ap;
2265
983f938a 2266 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2267 return 0;
2268
2269 va_start(ap, fmt);
2270 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2271 va_end(ap);
2272 return ret;
2273}
2274
659372d3
SR
2275int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2276{
a813a159 2277 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2278}
769b0441
FW
2279EXPORT_SYMBOL_GPL(trace_vprintk);
2280
e2ac8ef5 2281static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2282{
6d158a81
SR
2283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2284
5a90f577 2285 iter->idx++;
6d158a81
SR
2286 if (buf_iter)
2287 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2288}
2289
e309b41d 2290static struct trace_entry *
bc21b478
SR
2291peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2292 unsigned long *lost_events)
dd0e545f 2293{
3928a8a2 2294 struct ring_buffer_event *event;
6d158a81 2295 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2296
d769041f
SR
2297 if (buf_iter)
2298 event = ring_buffer_iter_peek(buf_iter, ts);
2299 else
12883efb 2300 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2301 lost_events);
d769041f 2302
4a9bd3f1
SR
2303 if (event) {
2304 iter->ent_size = ring_buffer_event_length(event);
2305 return ring_buffer_event_data(event);
2306 }
2307 iter->ent_size = 0;
2308 return NULL;
dd0e545f 2309}
d769041f 2310
dd0e545f 2311static struct trace_entry *
bc21b478
SR
2312__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2313 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2314{
12883efb 2315 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2316 struct trace_entry *ent, *next = NULL;
aa27497c 2317 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2318 int cpu_file = iter->cpu_file;
3928a8a2 2319 u64 next_ts = 0, ts;
bc0c38d1 2320 int next_cpu = -1;
12b5da34 2321 int next_size = 0;
bc0c38d1
SR
2322 int cpu;
2323
b04cc6b1
FW
2324 /*
2325 * If we are in a per_cpu trace file, don't bother by iterating over
2326 * all cpu and peek directly.
2327 */
ae3b5093 2328 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2329 if (ring_buffer_empty_cpu(buffer, cpu_file))
2330 return NULL;
bc21b478 2331 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2332 if (ent_cpu)
2333 *ent_cpu = cpu_file;
2334
2335 return ent;
2336 }
2337
ab46428c 2338 for_each_tracing_cpu(cpu) {
dd0e545f 2339
3928a8a2
SR
2340 if (ring_buffer_empty_cpu(buffer, cpu))
2341 continue;
dd0e545f 2342
bc21b478 2343 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2344
cdd31cd2
IM
2345 /*
2346 * Pick the entry with the smallest timestamp:
2347 */
3928a8a2 2348 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2349 next = ent;
2350 next_cpu = cpu;
3928a8a2 2351 next_ts = ts;
bc21b478 2352 next_lost = lost_events;
12b5da34 2353 next_size = iter->ent_size;
bc0c38d1
SR
2354 }
2355 }
2356
12b5da34
SR
2357 iter->ent_size = next_size;
2358
bc0c38d1
SR
2359 if (ent_cpu)
2360 *ent_cpu = next_cpu;
2361
3928a8a2
SR
2362 if (ent_ts)
2363 *ent_ts = next_ts;
2364
bc21b478
SR
2365 if (missing_events)
2366 *missing_events = next_lost;
2367
bc0c38d1
SR
2368 return next;
2369}
2370
dd0e545f 2371/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2372struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2373 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2374{
bc21b478 2375 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2376}
2377
2378/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2379void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2380{
bc21b478
SR
2381 iter->ent = __find_next_entry(iter, &iter->cpu,
2382 &iter->lost_events, &iter->ts);
dd0e545f 2383
3928a8a2 2384 if (iter->ent)
e2ac8ef5 2385 trace_iterator_increment(iter);
dd0e545f 2386
3928a8a2 2387 return iter->ent ? iter : NULL;
b3806b43 2388}
bc0c38d1 2389
e309b41d 2390static void trace_consume(struct trace_iterator *iter)
b3806b43 2391{
12883efb 2392 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2393 &iter->lost_events);
bc0c38d1
SR
2394}
2395
e309b41d 2396static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2397{
2398 struct trace_iterator *iter = m->private;
bc0c38d1 2399 int i = (int)*pos;
4e3c3333 2400 void *ent;
bc0c38d1 2401
a63ce5b3
SR
2402 WARN_ON_ONCE(iter->leftover);
2403
bc0c38d1
SR
2404 (*pos)++;
2405
2406 /* can't go backwards */
2407 if (iter->idx > i)
2408 return NULL;
2409
2410 if (iter->idx < 0)
955b61e5 2411 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2412 else
2413 ent = iter;
2414
2415 while (ent && iter->idx < i)
955b61e5 2416 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2417
2418 iter->pos = *pos;
2419
bc0c38d1
SR
2420 return ent;
2421}
2422
955b61e5 2423void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2424{
2f26ebd5
SR
2425 struct ring_buffer_event *event;
2426 struct ring_buffer_iter *buf_iter;
2427 unsigned long entries = 0;
2428 u64 ts;
2429
12883efb 2430 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2431
6d158a81
SR
2432 buf_iter = trace_buffer_iter(iter, cpu);
2433 if (!buf_iter)
2f26ebd5
SR
2434 return;
2435
2f26ebd5
SR
2436 ring_buffer_iter_reset(buf_iter);
2437
2438 /*
2439 * We could have the case with the max latency tracers
2440 * that a reset never took place on a cpu. This is evident
2441 * by the timestamp being before the start of the buffer.
2442 */
2443 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2444 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2445 break;
2446 entries++;
2447 ring_buffer_read(buf_iter, NULL);
2448 }
2449
12883efb 2450 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2451}
2452
d7350c3f 2453/*
d7350c3f
FW
2454 * The current tracer is copied to avoid a global locking
2455 * all around.
2456 */
bc0c38d1
SR
2457static void *s_start(struct seq_file *m, loff_t *pos)
2458{
2459 struct trace_iterator *iter = m->private;
2b6080f2 2460 struct trace_array *tr = iter->tr;
b04cc6b1 2461 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2462 void *p = NULL;
2463 loff_t l = 0;
3928a8a2 2464 int cpu;
bc0c38d1 2465
2fd196ec
HT
2466 /*
2467 * copy the tracer to avoid using a global lock all around.
2468 * iter->trace is a copy of current_trace, the pointer to the
2469 * name may be used instead of a strcmp(), as iter->trace->name
2470 * will point to the same string as current_trace->name.
2471 */
bc0c38d1 2472 mutex_lock(&trace_types_lock);
2b6080f2
SR
2473 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2474 *iter->trace = *tr->current_trace;
d7350c3f 2475 mutex_unlock(&trace_types_lock);
bc0c38d1 2476
12883efb 2477#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2478 if (iter->snapshot && iter->trace->use_max_tr)
2479 return ERR_PTR(-EBUSY);
12883efb 2480#endif
debdd57f
HT
2481
2482 if (!iter->snapshot)
2483 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2484
bc0c38d1
SR
2485 if (*pos != iter->pos) {
2486 iter->ent = NULL;
2487 iter->cpu = 0;
2488 iter->idx = -1;
2489
ae3b5093 2490 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2491 for_each_tracing_cpu(cpu)
2f26ebd5 2492 tracing_iter_reset(iter, cpu);
b04cc6b1 2493 } else
2f26ebd5 2494 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2495
ac91d854 2496 iter->leftover = 0;
bc0c38d1
SR
2497 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2498 ;
2499
2500 } else {
a63ce5b3
SR
2501 /*
2502 * If we overflowed the seq_file before, then we want
2503 * to just reuse the trace_seq buffer again.
2504 */
2505 if (iter->leftover)
2506 p = iter;
2507 else {
2508 l = *pos - 1;
2509 p = s_next(m, p, &l);
2510 }
bc0c38d1
SR
2511 }
2512
4f535968 2513 trace_event_read_lock();
7e53bd42 2514 trace_access_lock(cpu_file);
bc0c38d1
SR
2515 return p;
2516}
2517
2518static void s_stop(struct seq_file *m, void *p)
2519{
7e53bd42
LJ
2520 struct trace_iterator *iter = m->private;
2521
12883efb 2522#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2523 if (iter->snapshot && iter->trace->use_max_tr)
2524 return;
12883efb 2525#endif
debdd57f
HT
2526
2527 if (!iter->snapshot)
2528 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2529
7e53bd42 2530 trace_access_unlock(iter->cpu_file);
4f535968 2531 trace_event_read_unlock();
bc0c38d1
SR
2532}
2533
39eaf7ef 2534static void
12883efb
SRRH
2535get_total_entries(struct trace_buffer *buf,
2536 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2537{
2538 unsigned long count;
2539 int cpu;
2540
2541 *total = 0;
2542 *entries = 0;
2543
2544 for_each_tracing_cpu(cpu) {
12883efb 2545 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2546 /*
2547 * If this buffer has skipped entries, then we hold all
2548 * entries for the trace and we need to ignore the
2549 * ones before the time stamp.
2550 */
12883efb
SRRH
2551 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2552 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2553 /* total is the same as the entries */
2554 *total += count;
2555 } else
2556 *total += count +
12883efb 2557 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2558 *entries += count;
2559 }
2560}
2561
e309b41d 2562static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2563{
d79ac28f
RV
2564 seq_puts(m, "# _------=> CPU# \n"
2565 "# / _-----=> irqs-off \n"
2566 "# | / _----=> need-resched \n"
2567 "# || / _---=> hardirq/softirq \n"
2568 "# ||| / _--=> preempt-depth \n"
2569 "# |||| / delay \n"
2570 "# cmd pid ||||| time | caller \n"
2571 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2572}
2573
12883efb 2574static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2575{
39eaf7ef
SR
2576 unsigned long total;
2577 unsigned long entries;
2578
12883efb 2579 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2580 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2581 entries, total, num_online_cpus());
2582 seq_puts(m, "#\n");
2583}
2584
12883efb 2585static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2586{
12883efb 2587 print_event_info(buf, m);
d79ac28f
RV
2588 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2589 "# | | | | |\n");
bc0c38d1
SR
2590}
2591
12883efb 2592static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2593{
12883efb 2594 print_event_info(buf, m);
d79ac28f
RV
2595 seq_puts(m, "# _-----=> irqs-off\n"
2596 "# / _----=> need-resched\n"
2597 "# | / _---=> hardirq/softirq\n"
2598 "# || / _--=> preempt-depth\n"
2599 "# ||| / delay\n"
2600 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2601 "# | | | |||| | |\n");
77271ce4 2602}
bc0c38d1 2603
62b915f1 2604void
bc0c38d1
SR
2605print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2606{
983f938a 2607 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2608 struct trace_buffer *buf = iter->trace_buffer;
2609 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2610 struct tracer *type = iter->trace;
39eaf7ef
SR
2611 unsigned long entries;
2612 unsigned long total;
bc0c38d1
SR
2613 const char *name = "preemption";
2614
d840f718 2615 name = type->name;
bc0c38d1 2616
12883efb 2617 get_total_entries(buf, &total, &entries);
bc0c38d1 2618
888b55dc 2619 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2620 name, UTS_RELEASE);
888b55dc 2621 seq_puts(m, "# -----------------------------------"
bc0c38d1 2622 "---------------------------------\n");
888b55dc 2623 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2624 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2625 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2626 entries,
4c11d7ae 2627 total,
12883efb 2628 buf->cpu,
bc0c38d1
SR
2629#if defined(CONFIG_PREEMPT_NONE)
2630 "server",
2631#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2632 "desktop",
b5c21b45 2633#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2634 "preempt",
2635#else
2636 "unknown",
2637#endif
2638 /* These are reserved for later use */
2639 0, 0, 0, 0);
2640#ifdef CONFIG_SMP
2641 seq_printf(m, " #P:%d)\n", num_online_cpus());
2642#else
2643 seq_puts(m, ")\n");
2644#endif
888b55dc
KM
2645 seq_puts(m, "# -----------------\n");
2646 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2647 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2648 data->comm, data->pid,
2649 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2650 data->policy, data->rt_priority);
888b55dc 2651 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2652
2653 if (data->critical_start) {
888b55dc 2654 seq_puts(m, "# => started at: ");
214023c3
SR
2655 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2656 trace_print_seq(m, &iter->seq);
888b55dc 2657 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2658 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2659 trace_print_seq(m, &iter->seq);
8248ac05 2660 seq_puts(m, "\n#\n");
bc0c38d1
SR
2661 }
2662
888b55dc 2663 seq_puts(m, "#\n");
bc0c38d1
SR
2664}
2665
a309720c
SR
2666static void test_cpu_buff_start(struct trace_iterator *iter)
2667{
2668 struct trace_seq *s = &iter->seq;
983f938a 2669 struct trace_array *tr = iter->tr;
a309720c 2670
983f938a 2671 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
2672 return;
2673
2674 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2675 return;
2676
919cd979 2677 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2678 return;
2679
12883efb 2680 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2681 return;
2682
919cd979
SL
2683 if (iter->started)
2684 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2685
2686 /* Don't print started cpu buffer for the first entry of the trace */
2687 if (iter->idx > 1)
2688 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2689 iter->cpu);
a309720c
SR
2690}
2691
2c4f035f 2692static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2693{
983f938a 2694 struct trace_array *tr = iter->tr;
214023c3 2695 struct trace_seq *s = &iter->seq;
983f938a 2696 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2697 struct trace_entry *entry;
f633cef0 2698 struct trace_event *event;
bc0c38d1 2699
4e3c3333 2700 entry = iter->ent;
dd0e545f 2701
a309720c
SR
2702 test_cpu_buff_start(iter);
2703
c4a8e8be 2704 event = ftrace_find_event(entry->type);
bc0c38d1 2705
983f938a 2706 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2707 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2708 trace_print_lat_context(iter);
2709 else
2710 trace_print_context(iter);
c4a8e8be 2711 }
bc0c38d1 2712
19a7fe20
SRRH
2713 if (trace_seq_has_overflowed(s))
2714 return TRACE_TYPE_PARTIAL_LINE;
2715
268ccda0 2716 if (event)
a9a57763 2717 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2718
19a7fe20 2719 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2720
19a7fe20 2721 return trace_handle_return(s);
bc0c38d1
SR
2722}
2723
2c4f035f 2724static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 2725{
983f938a 2726 struct trace_array *tr = iter->tr;
f9896bf3
IM
2727 struct trace_seq *s = &iter->seq;
2728 struct trace_entry *entry;
f633cef0 2729 struct trace_event *event;
f9896bf3
IM
2730
2731 entry = iter->ent;
dd0e545f 2732
983f938a 2733 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
2734 trace_seq_printf(s, "%d %d %llu ",
2735 entry->pid, iter->cpu, iter->ts);
2736
2737 if (trace_seq_has_overflowed(s))
2738 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2739
f633cef0 2740 event = ftrace_find_event(entry->type);
268ccda0 2741 if (event)
a9a57763 2742 return event->funcs->raw(iter, 0, event);
d9793bd8 2743
19a7fe20 2744 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2745
19a7fe20 2746 return trace_handle_return(s);
f9896bf3
IM
2747}
2748
2c4f035f 2749static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 2750{
983f938a 2751 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
2752 struct trace_seq *s = &iter->seq;
2753 unsigned char newline = '\n';
2754 struct trace_entry *entry;
f633cef0 2755 struct trace_event *event;
5e3ca0ec
IM
2756
2757 entry = iter->ent;
dd0e545f 2758
983f938a 2759 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2760 SEQ_PUT_HEX_FIELD(s, entry->pid);
2761 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2762 SEQ_PUT_HEX_FIELD(s, iter->ts);
2763 if (trace_seq_has_overflowed(s))
2764 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2765 }
5e3ca0ec 2766
f633cef0 2767 event = ftrace_find_event(entry->type);
268ccda0 2768 if (event) {
a9a57763 2769 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2770 if (ret != TRACE_TYPE_HANDLED)
2771 return ret;
2772 }
7104f300 2773
19a7fe20 2774 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2775
19a7fe20 2776 return trace_handle_return(s);
5e3ca0ec
IM
2777}
2778
2c4f035f 2779static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 2780{
983f938a 2781 struct trace_array *tr = iter->tr;
cb0f12aa
IM
2782 struct trace_seq *s = &iter->seq;
2783 struct trace_entry *entry;
f633cef0 2784 struct trace_event *event;
cb0f12aa
IM
2785
2786 entry = iter->ent;
dd0e545f 2787
983f938a 2788 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2789 SEQ_PUT_FIELD(s, entry->pid);
2790 SEQ_PUT_FIELD(s, iter->cpu);
2791 SEQ_PUT_FIELD(s, iter->ts);
2792 if (trace_seq_has_overflowed(s))
2793 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2794 }
cb0f12aa 2795
f633cef0 2796 event = ftrace_find_event(entry->type);
a9a57763
SR
2797 return event ? event->funcs->binary(iter, 0, event) :
2798 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2799}
2800
62b915f1 2801int trace_empty(struct trace_iterator *iter)
bc0c38d1 2802{
6d158a81 2803 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2804 int cpu;
2805
9aba60fe 2806 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2807 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2808 cpu = iter->cpu_file;
6d158a81
SR
2809 buf_iter = trace_buffer_iter(iter, cpu);
2810 if (buf_iter) {
2811 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2812 return 0;
2813 } else {
12883efb 2814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2815 return 0;
2816 }
2817 return 1;
2818 }
2819
ab46428c 2820 for_each_tracing_cpu(cpu) {
6d158a81
SR
2821 buf_iter = trace_buffer_iter(iter, cpu);
2822 if (buf_iter) {
2823 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2824 return 0;
2825 } else {
12883efb 2826 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2827 return 0;
2828 }
bc0c38d1 2829 }
d769041f 2830
797d3712 2831 return 1;
bc0c38d1
SR
2832}
2833
4f535968 2834/* Called with trace_event_read_lock() held. */
955b61e5 2835enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2836{
983f938a
SRRH
2837 struct trace_array *tr = iter->tr;
2838 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
2839 enum print_line_t ret;
2840
19a7fe20
SRRH
2841 if (iter->lost_events) {
2842 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2843 iter->cpu, iter->lost_events);
2844 if (trace_seq_has_overflowed(&iter->seq))
2845 return TRACE_TYPE_PARTIAL_LINE;
2846 }
bc21b478 2847
2c4f035f
FW
2848 if (iter->trace && iter->trace->print_line) {
2849 ret = iter->trace->print_line(iter);
2850 if (ret != TRACE_TYPE_UNHANDLED)
2851 return ret;
2852 }
72829bc3 2853
09ae7234
SRRH
2854 if (iter->ent->type == TRACE_BPUTS &&
2855 trace_flags & TRACE_ITER_PRINTK &&
2856 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857 return trace_print_bputs_msg_only(iter);
2858
48ead020
FW
2859 if (iter->ent->type == TRACE_BPRINT &&
2860 trace_flags & TRACE_ITER_PRINTK &&
2861 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2862 return trace_print_bprintk_msg_only(iter);
48ead020 2863
66896a85
FW
2864 if (iter->ent->type == TRACE_PRINT &&
2865 trace_flags & TRACE_ITER_PRINTK &&
2866 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2867 return trace_print_printk_msg_only(iter);
66896a85 2868
cb0f12aa
IM
2869 if (trace_flags & TRACE_ITER_BIN)
2870 return print_bin_fmt(iter);
2871
5e3ca0ec
IM
2872 if (trace_flags & TRACE_ITER_HEX)
2873 return print_hex_fmt(iter);
2874
f9896bf3
IM
2875 if (trace_flags & TRACE_ITER_RAW)
2876 return print_raw_fmt(iter);
2877
f9896bf3
IM
2878 return print_trace_fmt(iter);
2879}
2880
7e9a49ef
JO
2881void trace_latency_header(struct seq_file *m)
2882{
2883 struct trace_iterator *iter = m->private;
983f938a 2884 struct trace_array *tr = iter->tr;
7e9a49ef
JO
2885
2886 /* print nothing if the buffers are empty */
2887 if (trace_empty(iter))
2888 return;
2889
2890 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2891 print_trace_header(m, iter);
2892
983f938a 2893 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
2894 print_lat_help_header(m);
2895}
2896
62b915f1
JO
2897void trace_default_header(struct seq_file *m)
2898{
2899 struct trace_iterator *iter = m->private;
983f938a
SRRH
2900 struct trace_array *tr = iter->tr;
2901 unsigned long trace_flags = tr->trace_flags;
62b915f1 2902
f56e7f8e
JO
2903 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2904 return;
2905
62b915f1
JO
2906 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2907 /* print nothing if the buffers are empty */
2908 if (trace_empty(iter))
2909 return;
2910 print_trace_header(m, iter);
2911 if (!(trace_flags & TRACE_ITER_VERBOSE))
2912 print_lat_help_header(m);
2913 } else {
77271ce4
SR
2914 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2915 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2916 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2917 else
12883efb 2918 print_func_help_header(iter->trace_buffer, m);
77271ce4 2919 }
62b915f1
JO
2920 }
2921}
2922
e0a413f6
SR
2923static void test_ftrace_alive(struct seq_file *m)
2924{
2925 if (!ftrace_is_dead())
2926 return;
d79ac28f
RV
2927 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2929}
2930
d8741e2e 2931#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2932static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2933{
d79ac28f
RV
2934 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936 "# Takes a snapshot of the main buffer.\n"
2937 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938 "# (Doesn't have to be '2' works with any number that\n"
2939 "# is not a '0' or '1')\n");
d8741e2e 2940}
f1affcaa
SRRH
2941
2942static void show_snapshot_percpu_help(struct seq_file *m)
2943{
fa6f0cc7 2944 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2945#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2946 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2948#else
d79ac28f
RV
2949 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950 "# Must use main snapshot file to allocate.\n");
f1affcaa 2951#endif
d79ac28f
RV
2952 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953 "# (Doesn't have to be '2' works with any number that\n"
2954 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2955}
2956
d8741e2e
SRRH
2957static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2958{
45ad21ca 2959 if (iter->tr->allocated_snapshot)
fa6f0cc7 2960 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2961 else
fa6f0cc7 2962 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2963
fa6f0cc7 2964 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2965 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2966 show_snapshot_main_help(m);
2967 else
2968 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2969}
2970#else
2971/* Should never be called */
2972static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2973#endif
2974
bc0c38d1
SR
2975static int s_show(struct seq_file *m, void *v)
2976{
2977 struct trace_iterator *iter = v;
a63ce5b3 2978 int ret;
bc0c38d1
SR
2979
2980 if (iter->ent == NULL) {
2981 if (iter->tr) {
2982 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2983 seq_puts(m, "#\n");
e0a413f6 2984 test_ftrace_alive(m);
bc0c38d1 2985 }
d8741e2e
SRRH
2986 if (iter->snapshot && trace_empty(iter))
2987 print_snapshot_help(m, iter);
2988 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2989 iter->trace->print_header(m);
62b915f1
JO
2990 else
2991 trace_default_header(m);
2992
a63ce5b3
SR
2993 } else if (iter->leftover) {
2994 /*
2995 * If we filled the seq_file buffer earlier, we
2996 * want to just show it now.
2997 */
2998 ret = trace_print_seq(m, &iter->seq);
2999
3000 /* ret should this time be zero, but you never know */
3001 iter->leftover = ret;
3002
bc0c38d1 3003 } else {
f9896bf3 3004 print_trace_line(iter);
a63ce5b3
SR
3005 ret = trace_print_seq(m, &iter->seq);
3006 /*
3007 * If we overflow the seq_file buffer, then it will
3008 * ask us for this data again at start up.
3009 * Use that instead.
3010 * ret is 0 if seq_file write succeeded.
3011 * -1 otherwise.
3012 */
3013 iter->leftover = ret;
bc0c38d1
SR
3014 }
3015
3016 return 0;
3017}
3018
649e9c70
ON
3019/*
3020 * Should be used after trace_array_get(), trace_types_lock
3021 * ensures that i_cdev was already initialized.
3022 */
3023static inline int tracing_get_cpu(struct inode *inode)
3024{
3025 if (inode->i_cdev) /* See trace_create_cpu_file() */
3026 return (long)inode->i_cdev - 1;
3027 return RING_BUFFER_ALL_CPUS;
3028}
3029
88e9d34c 3030static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3031 .start = s_start,
3032 .next = s_next,
3033 .stop = s_stop,
3034 .show = s_show,
bc0c38d1
SR
3035};
3036
e309b41d 3037static struct trace_iterator *
6484c71c 3038__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3039{
6484c71c 3040 struct trace_array *tr = inode->i_private;
bc0c38d1 3041 struct trace_iterator *iter;
50e18b94 3042 int cpu;
bc0c38d1 3043
85a2f9b4
SR
3044 if (tracing_disabled)
3045 return ERR_PTR(-ENODEV);
60a11774 3046
50e18b94 3047 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3048 if (!iter)
3049 return ERR_PTR(-ENOMEM);
bc0c38d1 3050
72917235 3051 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3052 GFP_KERNEL);
93574fcc
DC
3053 if (!iter->buffer_iter)
3054 goto release;
3055
d7350c3f
FW
3056 /*
3057 * We make a copy of the current tracer to avoid concurrent
3058 * changes on it while we are reading.
3059 */
bc0c38d1 3060 mutex_lock(&trace_types_lock);
d7350c3f 3061 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3062 if (!iter->trace)
d7350c3f 3063 goto fail;
85a2f9b4 3064
2b6080f2 3065 *iter->trace = *tr->current_trace;
d7350c3f 3066
79f55997 3067 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3068 goto fail;
3069
12883efb
SRRH
3070 iter->tr = tr;
3071
3072#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3073 /* Currently only the top directory has a snapshot */
3074 if (tr->current_trace->print_max || snapshot)
12883efb 3075 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3076 else
12883efb
SRRH
3077#endif
3078 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3079 iter->snapshot = snapshot;
bc0c38d1 3080 iter->pos = -1;
6484c71c 3081 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3082 mutex_init(&iter->mutex);
bc0c38d1 3083
8bba1bf5
MM
3084 /* Notify the tracer early; before we stop tracing. */
3085 if (iter->trace && iter->trace->open)
a93751ca 3086 iter->trace->open(iter);
8bba1bf5 3087
12ef7d44 3088 /* Annotate start of buffers if we had overruns */
12883efb 3089 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3090 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3091
8be0709f 3092 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3093 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3094 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3095
debdd57f
HT
3096 /* stop the trace while dumping if we are not opening "snapshot" */
3097 if (!iter->snapshot)
2b6080f2 3098 tracing_stop_tr(tr);
2f26ebd5 3099
ae3b5093 3100 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3101 for_each_tracing_cpu(cpu) {
b04cc6b1 3102 iter->buffer_iter[cpu] =
12883efb 3103 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3104 }
3105 ring_buffer_read_prepare_sync();
3106 for_each_tracing_cpu(cpu) {
3107 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3108 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3109 }
3110 } else {
3111 cpu = iter->cpu_file;
3928a8a2 3112 iter->buffer_iter[cpu] =
12883efb 3113 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3114 ring_buffer_read_prepare_sync();
3115 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3116 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3117 }
3118
bc0c38d1
SR
3119 mutex_unlock(&trace_types_lock);
3120
bc0c38d1 3121 return iter;
3928a8a2 3122
d7350c3f 3123 fail:
3928a8a2 3124 mutex_unlock(&trace_types_lock);
d7350c3f 3125 kfree(iter->trace);
6d158a81 3126 kfree(iter->buffer_iter);
93574fcc 3127release:
50e18b94
JO
3128 seq_release_private(inode, file);
3129 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3130}
3131
3132int tracing_open_generic(struct inode *inode, struct file *filp)
3133{
60a11774
SR
3134 if (tracing_disabled)
3135 return -ENODEV;
3136
bc0c38d1
SR
3137 filp->private_data = inode->i_private;
3138 return 0;
3139}
3140
2e86421d
GB
3141bool tracing_is_disabled(void)
3142{
3143 return (tracing_disabled) ? true: false;
3144}
3145
7b85af63
SRRH
3146/*
3147 * Open and update trace_array ref count.
3148 * Must have the current trace_array passed to it.
3149 */
dcc30223 3150static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3151{
3152 struct trace_array *tr = inode->i_private;
3153
3154 if (tracing_disabled)
3155 return -ENODEV;
3156
3157 if (trace_array_get(tr) < 0)
3158 return -ENODEV;
3159
3160 filp->private_data = inode->i_private;
3161
3162 return 0;
7b85af63
SRRH
3163}
3164
4fd27358 3165static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3166{
6484c71c 3167 struct trace_array *tr = inode->i_private;
907f2784 3168 struct seq_file *m = file->private_data;
4acd4d00 3169 struct trace_iterator *iter;
3928a8a2 3170 int cpu;
bc0c38d1 3171
ff451961 3172 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3173 trace_array_put(tr);
4acd4d00 3174 return 0;
ff451961 3175 }
4acd4d00 3176
6484c71c 3177 /* Writes do not use seq_file */
4acd4d00 3178 iter = m->private;
bc0c38d1 3179 mutex_lock(&trace_types_lock);
a695cb58 3180
3928a8a2
SR
3181 for_each_tracing_cpu(cpu) {
3182 if (iter->buffer_iter[cpu])
3183 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3184 }
3185
bc0c38d1
SR
3186 if (iter->trace && iter->trace->close)
3187 iter->trace->close(iter);
3188
debdd57f
HT
3189 if (!iter->snapshot)
3190 /* reenable tracing if it was previously enabled */
2b6080f2 3191 tracing_start_tr(tr);
f77d09a3
AL
3192
3193 __trace_array_put(tr);
3194
bc0c38d1
SR
3195 mutex_unlock(&trace_types_lock);
3196
d7350c3f 3197 mutex_destroy(&iter->mutex);
b0dfa978 3198 free_cpumask_var(iter->started);
d7350c3f 3199 kfree(iter->trace);
6d158a81 3200 kfree(iter->buffer_iter);
50e18b94 3201 seq_release_private(inode, file);
ff451961 3202
bc0c38d1
SR
3203 return 0;
3204}
3205
7b85af63
SRRH
3206static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3207{
3208 struct trace_array *tr = inode->i_private;
3209
3210 trace_array_put(tr);
bc0c38d1
SR
3211 return 0;
3212}
3213
7b85af63
SRRH
3214static int tracing_single_release_tr(struct inode *inode, struct file *file)
3215{
3216 struct trace_array *tr = inode->i_private;
3217
3218 trace_array_put(tr);
3219
3220 return single_release(inode, file);
3221}
3222
bc0c38d1
SR
3223static int tracing_open(struct inode *inode, struct file *file)
3224{
6484c71c 3225 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3226 struct trace_iterator *iter;
3227 int ret = 0;
bc0c38d1 3228
ff451961
SRRH
3229 if (trace_array_get(tr) < 0)
3230 return -ENODEV;
3231
4acd4d00 3232 /* If this file was open for write, then erase contents */
6484c71c
ON
3233 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3234 int cpu = tracing_get_cpu(inode);
3235
3236 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3237 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3238 else
6484c71c 3239 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3240 }
bc0c38d1 3241
4acd4d00 3242 if (file->f_mode & FMODE_READ) {
6484c71c 3243 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3244 if (IS_ERR(iter))
3245 ret = PTR_ERR(iter);
983f938a 3246 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
3247 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3248 }
ff451961
SRRH
3249
3250 if (ret < 0)
3251 trace_array_put(tr);
3252
bc0c38d1
SR
3253 return ret;
3254}
3255
607e2ea1
SRRH
3256/*
3257 * Some tracers are not suitable for instance buffers.
3258 * A tracer is always available for the global array (toplevel)
3259 * or if it explicitly states that it is.
3260 */
3261static bool
3262trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3263{
3264 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3265}
3266
3267/* Find the next tracer that this trace array may use */
3268static struct tracer *
3269get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3270{
3271 while (t && !trace_ok_for_array(t, tr))
3272 t = t->next;
3273
3274 return t;
3275}
3276
e309b41d 3277static void *
bc0c38d1
SR
3278t_next(struct seq_file *m, void *v, loff_t *pos)
3279{
607e2ea1 3280 struct trace_array *tr = m->private;
f129e965 3281 struct tracer *t = v;
bc0c38d1
SR
3282
3283 (*pos)++;
3284
3285 if (t)
607e2ea1 3286 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3287
bc0c38d1
SR
3288 return t;
3289}
3290
3291static void *t_start(struct seq_file *m, loff_t *pos)
3292{
607e2ea1 3293 struct trace_array *tr = m->private;
f129e965 3294 struct tracer *t;
bc0c38d1
SR
3295 loff_t l = 0;
3296
3297 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3298
3299 t = get_tracer_for_array(tr, trace_types);
3300 for (; t && l < *pos; t = t_next(m, t, &l))
3301 ;
bc0c38d1
SR
3302
3303 return t;
3304}
3305
3306static void t_stop(struct seq_file *m, void *p)
3307{
3308 mutex_unlock(&trace_types_lock);
3309}
3310
3311static int t_show(struct seq_file *m, void *v)
3312{
3313 struct tracer *t = v;
3314
3315 if (!t)
3316 return 0;
3317
fa6f0cc7 3318 seq_puts(m, t->name);
bc0c38d1
SR
3319 if (t->next)
3320 seq_putc(m, ' ');
3321 else
3322 seq_putc(m, '\n');
3323
3324 return 0;
3325}
3326
88e9d34c 3327static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3328 .start = t_start,
3329 .next = t_next,
3330 .stop = t_stop,
3331 .show = t_show,
bc0c38d1
SR
3332};
3333
3334static int show_traces_open(struct inode *inode, struct file *file)
3335{
607e2ea1
SRRH
3336 struct trace_array *tr = inode->i_private;
3337 struct seq_file *m;
3338 int ret;
3339
60a11774
SR
3340 if (tracing_disabled)
3341 return -ENODEV;
3342
607e2ea1
SRRH
3343 ret = seq_open(file, &show_traces_seq_ops);
3344 if (ret)
3345 return ret;
3346
3347 m = file->private_data;
3348 m->private = tr;
3349
3350 return 0;
bc0c38d1
SR
3351}
3352
4acd4d00
SR
3353static ssize_t
3354tracing_write_stub(struct file *filp, const char __user *ubuf,
3355 size_t count, loff_t *ppos)
3356{
3357 return count;
3358}
3359
098c879e 3360loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3361{
098c879e
SRRH
3362 int ret;
3363
364829b1 3364 if (file->f_mode & FMODE_READ)
098c879e 3365 ret = seq_lseek(file, offset, whence);
364829b1 3366 else
098c879e
SRRH
3367 file->f_pos = ret = 0;
3368
3369 return ret;
364829b1
SP
3370}
3371
5e2336a0 3372static const struct file_operations tracing_fops = {
4bf39a94
IM
3373 .open = tracing_open,
3374 .read = seq_read,
4acd4d00 3375 .write = tracing_write_stub,
098c879e 3376 .llseek = tracing_lseek,
4bf39a94 3377 .release = tracing_release,
bc0c38d1
SR
3378};
3379
5e2336a0 3380static const struct file_operations show_traces_fops = {
c7078de1
IM
3381 .open = show_traces_open,
3382 .read = seq_read,
3383 .release = seq_release,
b444786f 3384 .llseek = seq_lseek,
c7078de1
IM
3385};
3386
36dfe925
IM
3387/*
3388 * The tracer itself will not take this lock, but still we want
3389 * to provide a consistent cpumask to user-space:
3390 */
3391static DEFINE_MUTEX(tracing_cpumask_update_lock);
3392
3393/*
3394 * Temporary storage for the character representation of the
3395 * CPU bitmask (and one more byte for the newline):
3396 */
3397static char mask_str[NR_CPUS + 1];
3398
c7078de1
IM
3399static ssize_t
3400tracing_cpumask_read(struct file *filp, char __user *ubuf,
3401 size_t count, loff_t *ppos)
3402{
ccfe9e42 3403 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3404 int len;
c7078de1
IM
3405
3406 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3407
1a40243b
TH
3408 len = snprintf(mask_str, count, "%*pb\n",
3409 cpumask_pr_args(tr->tracing_cpumask));
3410 if (len >= count) {
36dfe925
IM
3411 count = -EINVAL;
3412 goto out_err;
3413 }
36dfe925
IM
3414 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3415
3416out_err:
c7078de1
IM
3417 mutex_unlock(&tracing_cpumask_update_lock);
3418
3419 return count;
3420}
3421
3422static ssize_t
3423tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3424 size_t count, loff_t *ppos)
3425{
ccfe9e42 3426 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3427 cpumask_var_t tracing_cpumask_new;
2b6080f2 3428 int err, cpu;
9e01c1b7
RR
3429
3430 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3431 return -ENOMEM;
c7078de1 3432
9e01c1b7 3433 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3434 if (err)
36dfe925
IM
3435 goto err_unlock;
3436
215368e8
LZ
3437 mutex_lock(&tracing_cpumask_update_lock);
3438
a5e25883 3439 local_irq_disable();
0b9b12c1 3440 arch_spin_lock(&tr->max_lock);
ab46428c 3441 for_each_tracing_cpu(cpu) {
36dfe925
IM
3442 /*
3443 * Increase/decrease the disabled counter if we are
3444 * about to flip a bit in the cpumask:
3445 */
ccfe9e42 3446 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3447 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3448 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3449 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3450 }
ccfe9e42 3451 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3452 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3453 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3454 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3455 }
3456 }
0b9b12c1 3457 arch_spin_unlock(&tr->max_lock);
a5e25883 3458 local_irq_enable();
36dfe925 3459
ccfe9e42 3460 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3461
3462 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3463 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3464
3465 return count;
36dfe925
IM
3466
3467err_unlock:
215368e8 3468 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3469
3470 return err;
c7078de1
IM
3471}
3472
5e2336a0 3473static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3474 .open = tracing_open_generic_tr,
c7078de1
IM
3475 .read = tracing_cpumask_read,
3476 .write = tracing_cpumask_write,
ccfe9e42 3477 .release = tracing_release_generic_tr,
b444786f 3478 .llseek = generic_file_llseek,
bc0c38d1
SR
3479};
3480
fdb372ed 3481static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3482{
d8e83d26 3483 struct tracer_opt *trace_opts;
2b6080f2 3484 struct trace_array *tr = m->private;
d8e83d26 3485 u32 tracer_flags;
d8e83d26 3486 int i;
adf9f195 3487
d8e83d26 3488 mutex_lock(&trace_types_lock);
2b6080f2
SR
3489 tracer_flags = tr->current_trace->flags->val;
3490 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3491
bc0c38d1 3492 for (i = 0; trace_options[i]; i++) {
983f938a 3493 if (tr->trace_flags & (1 << i))
fdb372ed 3494 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3495 else
fdb372ed 3496 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3497 }
3498
adf9f195
FW
3499 for (i = 0; trace_opts[i].name; i++) {
3500 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3501 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3502 else
fdb372ed 3503 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3504 }
d8e83d26 3505 mutex_unlock(&trace_types_lock);
adf9f195 3506
fdb372ed 3507 return 0;
bc0c38d1 3508}
bc0c38d1 3509
8c1a49ae 3510static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3511 struct tracer_flags *tracer_flags,
3512 struct tracer_opt *opts, int neg)
3513{
d39cdd20 3514 struct tracer *trace = tracer_flags->trace;
8d18eaaf 3515 int ret;
bc0c38d1 3516
8c1a49ae 3517 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3518 if (ret)
3519 return ret;
3520
3521 if (neg)
3522 tracer_flags->val &= ~opts->bit;
3523 else
3524 tracer_flags->val |= opts->bit;
3525 return 0;
bc0c38d1
SR
3526}
3527
adf9f195 3528/* Try to assign a tracer specific option */
8c1a49ae 3529static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3530{
8c1a49ae 3531 struct tracer *trace = tr->current_trace;
7770841e 3532 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3533 struct tracer_opt *opts = NULL;
8d18eaaf 3534 int i;
adf9f195 3535
7770841e
Z
3536 for (i = 0; tracer_flags->opts[i].name; i++) {
3537 opts = &tracer_flags->opts[i];
adf9f195 3538
8d18eaaf 3539 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3540 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3541 }
adf9f195 3542
8d18eaaf 3543 return -EINVAL;
adf9f195
FW
3544}
3545
613f04a0
SRRH
3546/* Some tracers require overwrite to stay enabled */
3547int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3548{
3549 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3550 return -1;
3551
3552 return 0;
3553}
3554
2b6080f2 3555int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3556{
3557 /* do nothing if flag is already set */
983f938a 3558 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
3559 return 0;
3560
3561 /* Give the tracer a chance to approve the change */
2b6080f2 3562 if (tr->current_trace->flag_changed)
bf6065b5 3563 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3564 return -EINVAL;
af4617bd
SR
3565
3566 if (enabled)
983f938a 3567 tr->trace_flags |= mask;
af4617bd 3568 else
983f938a 3569 tr->trace_flags &= ~mask;
e870e9a1
LZ
3570
3571 if (mask == TRACE_ITER_RECORD_CMD)
3572 trace_event_enable_cmd_record(enabled);
750912fa 3573
80902822 3574 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3575 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3576#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3577 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3578#endif
3579 }
81698831 3580
b9f9108c 3581 if (mask == TRACE_ITER_PRINTK) {
81698831 3582 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
3583 trace_printk_control(enabled);
3584 }
613f04a0
SRRH
3585
3586 return 0;
af4617bd
SR
3587}
3588
2b6080f2 3589static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3590{
8d18eaaf 3591 char *cmp;
bc0c38d1 3592 int neg = 0;
613f04a0 3593 int ret = -ENODEV;
bc0c38d1 3594 int i;
a4d1e688 3595 size_t orig_len = strlen(option);
bc0c38d1 3596
7bcfaf54 3597 cmp = strstrip(option);
bc0c38d1 3598
8d18eaaf 3599 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3600 neg = 1;
3601 cmp += 2;
3602 }
3603
69d34da2
SRRH
3604 mutex_lock(&trace_types_lock);
3605
bc0c38d1 3606 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3607 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3608 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3609 break;
3610 }
3611 }
adf9f195
FW
3612
3613 /* If no option could be set, test the specific tracer options */
69d34da2 3614 if (!trace_options[i])
8c1a49ae 3615 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3616
3617 mutex_unlock(&trace_types_lock);
bc0c38d1 3618
a4d1e688
JW
3619 /*
3620 * If the first trailing whitespace is replaced with '\0' by strstrip,
3621 * turn it back into a space.
3622 */
3623 if (orig_len > strlen(option))
3624 option[strlen(option)] = ' ';
3625
7bcfaf54
SR
3626 return ret;
3627}
3628
a4d1e688
JW
3629static void __init apply_trace_boot_options(void)
3630{
3631 char *buf = trace_boot_options_buf;
3632 char *option;
3633
3634 while (true) {
3635 option = strsep(&buf, ",");
3636
3637 if (!option)
3638 break;
a4d1e688 3639
43ed3843
SRRH
3640 if (*option)
3641 trace_set_options(&global_trace, option);
a4d1e688
JW
3642
3643 /* Put back the comma to allow this to be called again */
3644 if (buf)
3645 *(buf - 1) = ',';
3646 }
3647}
3648
7bcfaf54
SR
3649static ssize_t
3650tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3651 size_t cnt, loff_t *ppos)
3652{
2b6080f2
SR
3653 struct seq_file *m = filp->private_data;
3654 struct trace_array *tr = m->private;
7bcfaf54 3655 char buf[64];
613f04a0 3656 int ret;
7bcfaf54
SR
3657
3658 if (cnt >= sizeof(buf))
3659 return -EINVAL;
3660
3661 if (copy_from_user(&buf, ubuf, cnt))
3662 return -EFAULT;
3663
a8dd2176
SR
3664 buf[cnt] = 0;
3665
2b6080f2 3666 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3667 if (ret < 0)
3668 return ret;
7bcfaf54 3669
cf8517cf 3670 *ppos += cnt;
bc0c38d1
SR
3671
3672 return cnt;
3673}
3674
fdb372ed
LZ
3675static int tracing_trace_options_open(struct inode *inode, struct file *file)
3676{
7b85af63 3677 struct trace_array *tr = inode->i_private;
f77d09a3 3678 int ret;
7b85af63 3679
fdb372ed
LZ
3680 if (tracing_disabled)
3681 return -ENODEV;
2b6080f2 3682
7b85af63
SRRH
3683 if (trace_array_get(tr) < 0)
3684 return -ENODEV;
3685
f77d09a3
AL
3686 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3687 if (ret < 0)
3688 trace_array_put(tr);
3689
3690 return ret;
fdb372ed
LZ
3691}
3692
5e2336a0 3693static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3694 .open = tracing_trace_options_open,
3695 .read = seq_read,
3696 .llseek = seq_lseek,
7b85af63 3697 .release = tracing_single_release_tr,
ee6bce52 3698 .write = tracing_trace_options_write,
bc0c38d1
SR
3699};
3700
7bd2f24c
IM
3701static const char readme_msg[] =
3702 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3703 "# echo 0 > tracing_on : quick way to disable tracing\n"
3704 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3705 " Important files:\n"
3706 " trace\t\t\t- The static contents of the buffer\n"
3707 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3708 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3709 " current_tracer\t- function and latency tracers\n"
3710 " available_tracers\t- list of configured tracers for current_tracer\n"
3711 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3712 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3713 " trace_clock\t\t-change the clock used to order events\n"
3714 " local: Per cpu clock but may not be synced across CPUs\n"
3715 " global: Synced across CPUs but slows tracing down.\n"
3716 " counter: Not a clock, but just an increment\n"
3717 " uptime: Jiffy counter from time of boot\n"
3718 " perf: Same clock that perf events use\n"
3719#ifdef CONFIG_X86_64
3720 " x86-tsc: TSC cycle counter\n"
3721#endif
3722 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3723 " tracing_cpumask\t- Limit which CPUs to trace\n"
3724 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3725 "\t\t\t Remove sub-buffer with rmdir\n"
3726 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3727 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3728 "\t\t\t option name\n"
939c7a4f 3729 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3730#ifdef CONFIG_DYNAMIC_FTRACE
3731 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3732 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3733 "\t\t\t functions\n"
3734 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3735 "\t modules: Can select a group via module\n"
3736 "\t Format: :mod:<module-name>\n"
3737 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3738 "\t triggers: a command to perform when function is hit\n"
3739 "\t Format: <function>:<trigger>[:count]\n"
3740 "\t trigger: traceon, traceoff\n"
3741 "\t\t enable_event:<system>:<event>\n"
3742 "\t\t disable_event:<system>:<event>\n"
22f45649 3743#ifdef CONFIG_STACKTRACE
71485c45 3744 "\t\t stacktrace\n"
22f45649
SRRH
3745#endif
3746#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3747 "\t\t snapshot\n"
22f45649 3748#endif
17a280ea
SRRH
3749 "\t\t dump\n"
3750 "\t\t cpudump\n"
71485c45
SRRH
3751 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3752 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3753 "\t The first one will disable tracing every time do_fault is hit\n"
3754 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3755 "\t The first time do trap is hit and it disables tracing, the\n"
3756 "\t counter will decrement to 2. If tracing is already disabled,\n"
3757 "\t the counter will not decrement. It only decrements when the\n"
3758 "\t trigger did work\n"
3759 "\t To remove trigger without count:\n"
3760 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3761 "\t To remove trigger with a count:\n"
3762 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3763 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3764 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3765 "\t modules: Can select a group via module command :mod:\n"
3766 "\t Does not accept triggers\n"
22f45649
SRRH
3767#endif /* CONFIG_DYNAMIC_FTRACE */
3768#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3769 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3770 "\t\t (function)\n"
22f45649
SRRH
3771#endif
3772#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3773 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3774 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3775 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3776#endif
3777#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3778 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3779 "\t\t\t snapshot buffer. Read the contents for more\n"
3780 "\t\t\t information\n"
22f45649 3781#endif
991821c8 3782#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3783 " stack_trace\t\t- Shows the max stack trace when active\n"
3784 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3785 "\t\t\t Write into this file to reset the max size (trigger a\n"
3786 "\t\t\t new trace)\n"
22f45649 3787#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3788 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3789 "\t\t\t traces\n"
22f45649 3790#endif
991821c8 3791#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3792 " events/\t\t- Directory containing all trace event subsystems:\n"
3793 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3794 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3795 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3796 "\t\t\t events\n"
26f25564 3797 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3798 " events/<system>/<event>/\t- Directory containing control files for\n"
3799 "\t\t\t <event>:\n"
26f25564
TZ
3800 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3801 " filter\t\t- If set, only events passing filter are traced\n"
3802 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3803 "\t Format: <trigger>[:count][if <filter>]\n"
3804 "\t trigger: traceon, traceoff\n"
3805 "\t enable_event:<system>:<event>\n"
3806 "\t disable_event:<system>:<event>\n"
26f25564 3807#ifdef CONFIG_STACKTRACE
71485c45 3808 "\t\t stacktrace\n"
26f25564
TZ
3809#endif
3810#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3811 "\t\t snapshot\n"
26f25564 3812#endif
71485c45
SRRH
3813 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3814 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3815 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3816 "\t events/block/block_unplug/trigger\n"
3817 "\t The first disables tracing every time block_unplug is hit.\n"
3818 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3819 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3820 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3821 "\t Like function triggers, the counter is only decremented if it\n"
3822 "\t enabled or disabled tracing.\n"
3823 "\t To remove a trigger without a count:\n"
3824 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3825 "\t To remove a trigger with a count:\n"
3826 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3827 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3828;
3829
3830static ssize_t
3831tracing_readme_read(struct file *filp, char __user *ubuf,
3832 size_t cnt, loff_t *ppos)
3833{
3834 return simple_read_from_buffer(ubuf, cnt, ppos,
3835 readme_msg, strlen(readme_msg));
3836}
3837
5e2336a0 3838static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3839 .open = tracing_open_generic,
3840 .read = tracing_readme_read,
b444786f 3841 .llseek = generic_file_llseek,
7bd2f24c
IM
3842};
3843
42584c81
YY
3844static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3845{
3846 unsigned int *ptr = v;
69abe6a5 3847
42584c81
YY
3848 if (*pos || m->count)
3849 ptr++;
69abe6a5 3850
42584c81 3851 (*pos)++;
69abe6a5 3852
939c7a4f
YY
3853 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3854 ptr++) {
42584c81
YY
3855 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3856 continue;
69abe6a5 3857
42584c81
YY
3858 return ptr;
3859 }
69abe6a5 3860
42584c81
YY
3861 return NULL;
3862}
3863
3864static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3865{
3866 void *v;
3867 loff_t l = 0;
69abe6a5 3868
4c27e756
SRRH
3869 preempt_disable();
3870 arch_spin_lock(&trace_cmdline_lock);
3871
939c7a4f 3872 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3873 while (l <= *pos) {
3874 v = saved_cmdlines_next(m, v, &l);
3875 if (!v)
3876 return NULL;
69abe6a5
AP
3877 }
3878
42584c81
YY
3879 return v;
3880}
3881
3882static void saved_cmdlines_stop(struct seq_file *m, void *v)
3883{
4c27e756
SRRH
3884 arch_spin_unlock(&trace_cmdline_lock);
3885 preempt_enable();
42584c81 3886}
69abe6a5 3887
42584c81
YY
3888static int saved_cmdlines_show(struct seq_file *m, void *v)
3889{
3890 char buf[TASK_COMM_LEN];
3891 unsigned int *pid = v;
69abe6a5 3892
4c27e756 3893 __trace_find_cmdline(*pid, buf);
42584c81
YY
3894 seq_printf(m, "%d %s\n", *pid, buf);
3895 return 0;
3896}
3897
3898static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3899 .start = saved_cmdlines_start,
3900 .next = saved_cmdlines_next,
3901 .stop = saved_cmdlines_stop,
3902 .show = saved_cmdlines_show,
3903};
3904
3905static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3906{
3907 if (tracing_disabled)
3908 return -ENODEV;
3909
3910 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3911}
3912
3913static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3914 .open = tracing_saved_cmdlines_open,
3915 .read = seq_read,
3916 .llseek = seq_lseek,
3917 .release = seq_release,
69abe6a5
AP
3918};
3919
939c7a4f
YY
3920static ssize_t
3921tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3922 size_t cnt, loff_t *ppos)
3923{
3924 char buf[64];
3925 int r;
3926
3927 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3928 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3929 arch_spin_unlock(&trace_cmdline_lock);
3930
3931 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3932}
3933
3934static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3935{
3936 kfree(s->saved_cmdlines);
3937 kfree(s->map_cmdline_to_pid);
3938 kfree(s);
3939}
3940
3941static int tracing_resize_saved_cmdlines(unsigned int val)
3942{
3943 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3944
a6af8fbf 3945 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3946 if (!s)
3947 return -ENOMEM;
3948
3949 if (allocate_cmdlines_buffer(val, s) < 0) {
3950 kfree(s);
3951 return -ENOMEM;
3952 }
3953
3954 arch_spin_lock(&trace_cmdline_lock);
3955 savedcmd_temp = savedcmd;
3956 savedcmd = s;
3957 arch_spin_unlock(&trace_cmdline_lock);
3958 free_saved_cmdlines_buffer(savedcmd_temp);
3959
3960 return 0;
3961}
3962
3963static ssize_t
3964tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3965 size_t cnt, loff_t *ppos)
3966{
3967 unsigned long val;
3968 int ret;
3969
3970 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3971 if (ret)
3972 return ret;
3973
3974 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3975 if (!val || val > PID_MAX_DEFAULT)
3976 return -EINVAL;
3977
3978 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3979 if (ret < 0)
3980 return ret;
3981
3982 *ppos += cnt;
3983
3984 return cnt;
3985}
3986
3987static const struct file_operations tracing_saved_cmdlines_size_fops = {
3988 .open = tracing_open_generic,
3989 .read = tracing_saved_cmdlines_size_read,
3990 .write = tracing_saved_cmdlines_size_write,
3991};
3992
9828413d
SRRH
3993#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3994static union trace_enum_map_item *
3995update_enum_map(union trace_enum_map_item *ptr)
3996{
3997 if (!ptr->map.enum_string) {
3998 if (ptr->tail.next) {
3999 ptr = ptr->tail.next;
4000 /* Set ptr to the next real item (skip head) */
4001 ptr++;
4002 } else
4003 return NULL;
4004 }
4005 return ptr;
4006}
4007
4008static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4009{
4010 union trace_enum_map_item *ptr = v;
4011
4012 /*
4013 * Paranoid! If ptr points to end, we don't want to increment past it.
4014 * This really should never happen.
4015 */
4016 ptr = update_enum_map(ptr);
4017 if (WARN_ON_ONCE(!ptr))
4018 return NULL;
4019
4020 ptr++;
4021
4022 (*pos)++;
4023
4024 ptr = update_enum_map(ptr);
4025
4026 return ptr;
4027}
4028
4029static void *enum_map_start(struct seq_file *m, loff_t *pos)
4030{
4031 union trace_enum_map_item *v;
4032 loff_t l = 0;
4033
4034 mutex_lock(&trace_enum_mutex);
4035
4036 v = trace_enum_maps;
4037 if (v)
4038 v++;
4039
4040 while (v && l < *pos) {
4041 v = enum_map_next(m, v, &l);
4042 }
4043
4044 return v;
4045}
4046
4047static void enum_map_stop(struct seq_file *m, void *v)
4048{
4049 mutex_unlock(&trace_enum_mutex);
4050}
4051
4052static int enum_map_show(struct seq_file *m, void *v)
4053{
4054 union trace_enum_map_item *ptr = v;
4055
4056 seq_printf(m, "%s %ld (%s)\n",
4057 ptr->map.enum_string, ptr->map.enum_value,
4058 ptr->map.system);
4059
4060 return 0;
4061}
4062
4063static const struct seq_operations tracing_enum_map_seq_ops = {
4064 .start = enum_map_start,
4065 .next = enum_map_next,
4066 .stop = enum_map_stop,
4067 .show = enum_map_show,
4068};
4069
4070static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4071{
4072 if (tracing_disabled)
4073 return -ENODEV;
4074
4075 return seq_open(filp, &tracing_enum_map_seq_ops);
4076}
4077
4078static const struct file_operations tracing_enum_map_fops = {
4079 .open = tracing_enum_map_open,
4080 .read = seq_read,
4081 .llseek = seq_lseek,
4082 .release = seq_release,
4083};
4084
4085static inline union trace_enum_map_item *
4086trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4087{
4088 /* Return tail of array given the head */
4089 return ptr + ptr->head.length + 1;
4090}
4091
4092static void
4093trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4094 int len)
4095{
4096 struct trace_enum_map **stop;
4097 struct trace_enum_map **map;
4098 union trace_enum_map_item *map_array;
4099 union trace_enum_map_item *ptr;
4100
4101 stop = start + len;
4102
4103 /*
4104 * The trace_enum_maps contains the map plus a head and tail item,
4105 * where the head holds the module and length of array, and the
4106 * tail holds a pointer to the next list.
4107 */
4108 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4109 if (!map_array) {
a395d6a7 4110 pr_warn("Unable to allocate trace enum mapping\n");
9828413d
SRRH
4111 return;
4112 }
4113
4114 mutex_lock(&trace_enum_mutex);
4115
4116 if (!trace_enum_maps)
4117 trace_enum_maps = map_array;
4118 else {
4119 ptr = trace_enum_maps;
4120 for (;;) {
4121 ptr = trace_enum_jmp_to_tail(ptr);
4122 if (!ptr->tail.next)
4123 break;
4124 ptr = ptr->tail.next;
4125
4126 }
4127 ptr->tail.next = map_array;
4128 }
4129 map_array->head.mod = mod;
4130 map_array->head.length = len;
4131 map_array++;
4132
4133 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4134 map_array->map = **map;
4135 map_array++;
4136 }
4137 memset(map_array, 0, sizeof(*map_array));
4138
4139 mutex_unlock(&trace_enum_mutex);
4140}
4141
4142static void trace_create_enum_file(struct dentry *d_tracer)
4143{
4144 trace_create_file("enum_map", 0444, d_tracer,
4145 NULL, &tracing_enum_map_fops);
4146}
4147
4148#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4149static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4150static inline void trace_insert_enum_map_file(struct module *mod,
4151 struct trace_enum_map **start, int len) { }
4152#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4153
4154static void trace_insert_enum_map(struct module *mod,
4155 struct trace_enum_map **start, int len)
0c564a53
SRRH
4156{
4157 struct trace_enum_map **map;
0c564a53
SRRH
4158
4159 if (len <= 0)
4160 return;
4161
4162 map = start;
4163
4164 trace_event_enum_update(map, len);
9828413d
SRRH
4165
4166 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4167}
4168
bc0c38d1
SR
4169static ssize_t
4170tracing_set_trace_read(struct file *filp, char __user *ubuf,
4171 size_t cnt, loff_t *ppos)
4172{
2b6080f2 4173 struct trace_array *tr = filp->private_data;
ee6c2c1b 4174 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4175 int r;
4176
4177 mutex_lock(&trace_types_lock);
2b6080f2 4178 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4179 mutex_unlock(&trace_types_lock);
4180
4bf39a94 4181 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4182}
4183
b6f11df2
ACM
4184int tracer_init(struct tracer *t, struct trace_array *tr)
4185{
12883efb 4186 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4187 return t->init(tr);
4188}
4189
12883efb 4190static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4191{
4192 int cpu;
737223fb 4193
438ced17 4194 for_each_tracing_cpu(cpu)
12883efb 4195 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4196}
4197
12883efb 4198#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4199/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4200static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4201 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4202{
4203 int cpu, ret = 0;
4204
4205 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4206 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4207 ret = ring_buffer_resize(trace_buf->buffer,
4208 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4209 if (ret < 0)
4210 break;
12883efb
SRRH
4211 per_cpu_ptr(trace_buf->data, cpu)->entries =
4212 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4213 }
4214 } else {
12883efb
SRRH
4215 ret = ring_buffer_resize(trace_buf->buffer,
4216 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4217 if (ret == 0)
12883efb
SRRH
4218 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4219 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4220 }
4221
4222 return ret;
4223}
12883efb 4224#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4225
2b6080f2
SR
4226static int __tracing_resize_ring_buffer(struct trace_array *tr,
4227 unsigned long size, int cpu)
73c5162a
SR
4228{
4229 int ret;
4230
4231 /*
4232 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4233 * we use the size that was given, and we can forget about
4234 * expanding it later.
73c5162a 4235 */
55034cd6 4236 ring_buffer_expanded = true;
73c5162a 4237
b382ede6 4238 /* May be called before buffers are initialized */
12883efb 4239 if (!tr->trace_buffer.buffer)
b382ede6
SR
4240 return 0;
4241
12883efb 4242 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4243 if (ret < 0)
4244 return ret;
4245
12883efb 4246#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4247 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4248 !tr->current_trace->use_max_tr)
ef710e10
KM
4249 goto out;
4250
12883efb 4251 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4252 if (ret < 0) {
12883efb
SRRH
4253 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4254 &tr->trace_buffer, cpu);
73c5162a 4255 if (r < 0) {
a123c52b
SR
4256 /*
4257 * AARGH! We are left with different
4258 * size max buffer!!!!
4259 * The max buffer is our "snapshot" buffer.
4260 * When a tracer needs a snapshot (one of the
4261 * latency tracers), it swaps the max buffer
4262 * with the saved snap shot. We succeeded to
4263 * update the size of the main buffer, but failed to
4264 * update the size of the max buffer. But when we tried
4265 * to reset the main buffer to the original size, we
4266 * failed there too. This is very unlikely to
4267 * happen, but if it does, warn and kill all
4268 * tracing.
4269 */
73c5162a
SR
4270 WARN_ON(1);
4271 tracing_disabled = 1;
4272 }
4273 return ret;
4274 }
4275
438ced17 4276 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4277 set_buffer_entries(&tr->max_buffer, size);
438ced17 4278 else
12883efb 4279 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4280
ef710e10 4281 out:
12883efb
SRRH
4282#endif /* CONFIG_TRACER_MAX_TRACE */
4283
438ced17 4284 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4285 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4286 else
12883efb 4287 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4288
4289 return ret;
4290}
4291
2b6080f2
SR
4292static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4293 unsigned long size, int cpu_id)
4f271a2a 4294{
83f40318 4295 int ret = size;
4f271a2a
VN
4296
4297 mutex_lock(&trace_types_lock);
4298
438ced17
VN
4299 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4300 /* make sure, this cpu is enabled in the mask */
4301 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4302 ret = -EINVAL;
4303 goto out;
4304 }
4305 }
4f271a2a 4306
2b6080f2 4307 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4308 if (ret < 0)
4309 ret = -ENOMEM;
4310
438ced17 4311out:
4f271a2a
VN
4312 mutex_unlock(&trace_types_lock);
4313
4314 return ret;
4315}
4316
ef710e10 4317
1852fcce
SR
4318/**
4319 * tracing_update_buffers - used by tracing facility to expand ring buffers
4320 *
4321 * To save on memory when the tracing is never used on a system with it
4322 * configured in. The ring buffers are set to a minimum size. But once
4323 * a user starts to use the tracing facility, then they need to grow
4324 * to their default size.
4325 *
4326 * This function is to be called when a tracer is about to be used.
4327 */
4328int tracing_update_buffers(void)
4329{
4330 int ret = 0;
4331
1027fcb2 4332 mutex_lock(&trace_types_lock);
1852fcce 4333 if (!ring_buffer_expanded)
2b6080f2 4334 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4335 RING_BUFFER_ALL_CPUS);
1027fcb2 4336 mutex_unlock(&trace_types_lock);
1852fcce
SR
4337
4338 return ret;
4339}
4340
577b785f
SR
4341struct trace_option_dentry;
4342
37aea98b 4343static void
2b6080f2 4344create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 4345
6b450d25
SRRH
4346/*
4347 * Used to clear out the tracer before deletion of an instance.
4348 * Must have trace_types_lock held.
4349 */
4350static void tracing_set_nop(struct trace_array *tr)
4351{
4352 if (tr->current_trace == &nop_trace)
4353 return;
4354
50512ab5 4355 tr->current_trace->enabled--;
6b450d25
SRRH
4356
4357 if (tr->current_trace->reset)
4358 tr->current_trace->reset(tr);
4359
4360 tr->current_trace = &nop_trace;
4361}
4362
41d9c0be 4363static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4364{
09d23a1d
SRRH
4365 /* Only enable if the directory has been created already. */
4366 if (!tr->dir)
4367 return;
4368
37aea98b 4369 create_trace_option_files(tr, t);
09d23a1d
SRRH
4370}
4371
4372static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4373{
bc0c38d1 4374 struct tracer *t;
12883efb 4375#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4376 bool had_max_tr;
12883efb 4377#endif
d9e54076 4378 int ret = 0;
bc0c38d1 4379
1027fcb2
SR
4380 mutex_lock(&trace_types_lock);
4381
73c5162a 4382 if (!ring_buffer_expanded) {
2b6080f2 4383 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4384 RING_BUFFER_ALL_CPUS);
73c5162a 4385 if (ret < 0)
59f586db 4386 goto out;
73c5162a
SR
4387 ret = 0;
4388 }
4389
bc0c38d1
SR
4390 for (t = trace_types; t; t = t->next) {
4391 if (strcmp(t->name, buf) == 0)
4392 break;
4393 }
c2931e05
FW
4394 if (!t) {
4395 ret = -EINVAL;
4396 goto out;
4397 }
2b6080f2 4398 if (t == tr->current_trace)
bc0c38d1
SR
4399 goto out;
4400
607e2ea1
SRRH
4401 /* Some tracers are only allowed for the top level buffer */
4402 if (!trace_ok_for_array(t, tr)) {
4403 ret = -EINVAL;
4404 goto out;
4405 }
4406
cf6ab6d9
SRRH
4407 /* If trace pipe files are being read, we can't change the tracer */
4408 if (tr->current_trace->ref) {
4409 ret = -EBUSY;
4410 goto out;
4411 }
4412
9f029e83 4413 trace_branch_disable();
613f04a0 4414
50512ab5 4415 tr->current_trace->enabled--;
613f04a0 4416
2b6080f2
SR
4417 if (tr->current_trace->reset)
4418 tr->current_trace->reset(tr);
34600f0e 4419
12883efb 4420 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4421 tr->current_trace = &nop_trace;
34600f0e 4422
45ad21ca
SRRH
4423#ifdef CONFIG_TRACER_MAX_TRACE
4424 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4425
4426 if (had_max_tr && !t->use_max_tr) {
4427 /*
4428 * We need to make sure that the update_max_tr sees that
4429 * current_trace changed to nop_trace to keep it from
4430 * swapping the buffers after we resize it.
4431 * The update_max_tr is called from interrupts disabled
4432 * so a synchronized_sched() is sufficient.
4433 */
4434 synchronize_sched();
3209cff4 4435 free_snapshot(tr);
ef710e10 4436 }
12883efb 4437#endif
12883efb
SRRH
4438
4439#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4440 if (t->use_max_tr && !had_max_tr) {
3209cff4 4441 ret = alloc_snapshot(tr);
d60da506
HT
4442 if (ret < 0)
4443 goto out;
ef710e10 4444 }
12883efb 4445#endif
577b785f 4446
1c80025a 4447 if (t->init) {
b6f11df2 4448 ret = tracer_init(t, tr);
1c80025a
FW
4449 if (ret)
4450 goto out;
4451 }
bc0c38d1 4452
2b6080f2 4453 tr->current_trace = t;
50512ab5 4454 tr->current_trace->enabled++;
9f029e83 4455 trace_branch_enable(tr);
bc0c38d1
SR
4456 out:
4457 mutex_unlock(&trace_types_lock);
4458
d9e54076
PZ
4459 return ret;
4460}
4461
4462static ssize_t
4463tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4464 size_t cnt, loff_t *ppos)
4465{
607e2ea1 4466 struct trace_array *tr = filp->private_data;
ee6c2c1b 4467 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4468 int i;
4469 size_t ret;
e6e7a65a
FW
4470 int err;
4471
4472 ret = cnt;
d9e54076 4473
ee6c2c1b
LZ
4474 if (cnt > MAX_TRACER_SIZE)
4475 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4476
4477 if (copy_from_user(&buf, ubuf, cnt))
4478 return -EFAULT;
4479
4480 buf[cnt] = 0;
4481
4482 /* strip ending whitespace. */
4483 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4484 buf[i] = 0;
4485
607e2ea1 4486 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4487 if (err)
4488 return err;
d9e54076 4489
cf8517cf 4490 *ppos += ret;
bc0c38d1 4491
c2931e05 4492 return ret;
bc0c38d1
SR
4493}
4494
4495static ssize_t
6508fa76
SF
4496tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4497 size_t cnt, loff_t *ppos)
bc0c38d1 4498{
bc0c38d1
SR
4499 char buf[64];
4500 int r;
4501
cffae437 4502 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4503 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4504 if (r > sizeof(buf))
4505 r = sizeof(buf);
4bf39a94 4506 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4507}
4508
4509static ssize_t
6508fa76
SF
4510tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4511 size_t cnt, loff_t *ppos)
bc0c38d1 4512{
5e39841c 4513 unsigned long val;
c6caeeb1 4514 int ret;
bc0c38d1 4515
22fe9b54
PH
4516 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4517 if (ret)
c6caeeb1 4518 return ret;
bc0c38d1
SR
4519
4520 *ptr = val * 1000;
4521
4522 return cnt;
4523}
4524
6508fa76
SF
4525static ssize_t
4526tracing_thresh_read(struct file *filp, char __user *ubuf,
4527 size_t cnt, loff_t *ppos)
4528{
4529 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4530}
4531
4532static ssize_t
4533tracing_thresh_write(struct file *filp, const char __user *ubuf,
4534 size_t cnt, loff_t *ppos)
4535{
4536 struct trace_array *tr = filp->private_data;
4537 int ret;
4538
4539 mutex_lock(&trace_types_lock);
4540 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4541 if (ret < 0)
4542 goto out;
4543
4544 if (tr->current_trace->update_thresh) {
4545 ret = tr->current_trace->update_thresh(tr);
4546 if (ret < 0)
4547 goto out;
4548 }
4549
4550 ret = cnt;
4551out:
4552 mutex_unlock(&trace_types_lock);
4553
4554 return ret;
4555}
4556
e428abbb
CG
4557#ifdef CONFIG_TRACER_MAX_TRACE
4558
6508fa76
SF
4559static ssize_t
4560tracing_max_lat_read(struct file *filp, char __user *ubuf,
4561 size_t cnt, loff_t *ppos)
4562{
4563 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4564}
4565
4566static ssize_t
4567tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4568 size_t cnt, loff_t *ppos)
4569{
4570 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4571}
4572
e428abbb
CG
4573#endif
4574
b3806b43
SR
4575static int tracing_open_pipe(struct inode *inode, struct file *filp)
4576{
15544209 4577 struct trace_array *tr = inode->i_private;
b3806b43 4578 struct trace_iterator *iter;
b04cc6b1 4579 int ret = 0;
b3806b43
SR
4580
4581 if (tracing_disabled)
4582 return -ENODEV;
4583
7b85af63
SRRH
4584 if (trace_array_get(tr) < 0)
4585 return -ENODEV;
4586
b04cc6b1
FW
4587 mutex_lock(&trace_types_lock);
4588
b3806b43
SR
4589 /* create a buffer to store the information to pass to userspace */
4590 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4591 if (!iter) {
4592 ret = -ENOMEM;
f77d09a3 4593 __trace_array_put(tr);
b04cc6b1
FW
4594 goto out;
4595 }
b3806b43 4596
3a161d99 4597 trace_seq_init(&iter->seq);
d716ff71 4598 iter->trace = tr->current_trace;
d7350c3f 4599
4462344e 4600 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4601 ret = -ENOMEM;
d7350c3f 4602 goto fail;
4462344e
RR
4603 }
4604
a309720c 4605 /* trace pipe does not show start of buffer */
4462344e 4606 cpumask_setall(iter->started);
a309720c 4607
983f938a 4608 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
4609 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4610
8be0709f 4611 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4612 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4613 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4614
15544209
ON
4615 iter->tr = tr;
4616 iter->trace_buffer = &tr->trace_buffer;
4617 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4618 mutex_init(&iter->mutex);
b3806b43
SR
4619 filp->private_data = iter;
4620
107bad8b
SR
4621 if (iter->trace->pipe_open)
4622 iter->trace->pipe_open(iter);
107bad8b 4623
b444786f 4624 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4625
4626 tr->current_trace->ref++;
b04cc6b1
FW
4627out:
4628 mutex_unlock(&trace_types_lock);
4629 return ret;
d7350c3f
FW
4630
4631fail:
4632 kfree(iter->trace);
4633 kfree(iter);
7b85af63 4634 __trace_array_put(tr);
d7350c3f
FW
4635 mutex_unlock(&trace_types_lock);
4636 return ret;
b3806b43
SR
4637}
4638
4639static int tracing_release_pipe(struct inode *inode, struct file *file)
4640{
4641 struct trace_iterator *iter = file->private_data;
15544209 4642 struct trace_array *tr = inode->i_private;
b3806b43 4643
b04cc6b1
FW
4644 mutex_lock(&trace_types_lock);
4645
cf6ab6d9
SRRH
4646 tr->current_trace->ref--;
4647
29bf4a5e 4648 if (iter->trace->pipe_close)
c521efd1
SR
4649 iter->trace->pipe_close(iter);
4650
b04cc6b1
FW
4651 mutex_unlock(&trace_types_lock);
4652
4462344e 4653 free_cpumask_var(iter->started);
d7350c3f 4654 mutex_destroy(&iter->mutex);
b3806b43 4655 kfree(iter);
b3806b43 4656
7b85af63
SRRH
4657 trace_array_put(tr);
4658
b3806b43
SR
4659 return 0;
4660}
4661
2a2cc8f7 4662static unsigned int
cc60cdc9 4663trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4664{
983f938a
SRRH
4665 struct trace_array *tr = iter->tr;
4666
15693458
SRRH
4667 /* Iterators are static, they should be filled or empty */
4668 if (trace_buffer_iter(iter, iter->cpu_file))
4669 return POLLIN | POLLRDNORM;
2a2cc8f7 4670
983f938a 4671 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4672 /*
4673 * Always select as readable when in blocking mode
4674 */
4675 return POLLIN | POLLRDNORM;
15693458 4676 else
12883efb 4677 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4678 filp, poll_table);
2a2cc8f7 4679}
2a2cc8f7 4680
cc60cdc9
SR
4681static unsigned int
4682tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4683{
4684 struct trace_iterator *iter = filp->private_data;
4685
4686 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4687}
4688
d716ff71 4689/* Must be called with iter->mutex held. */
ff98781b 4690static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4691{
4692 struct trace_iterator *iter = filp->private_data;
8b8b3683 4693 int ret;
b3806b43 4694
b3806b43 4695 while (trace_empty(iter)) {
2dc8f095 4696
107bad8b 4697 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4698 return -EAGAIN;
107bad8b 4699 }
2dc8f095 4700
b3806b43 4701 /*
250bfd3d 4702 * We block until we read something and tracing is disabled.
b3806b43
SR
4703 * We still block if tracing is disabled, but we have never
4704 * read anything. This allows a user to cat this file, and
4705 * then enable tracing. But after we have read something,
4706 * we give an EOF when tracing is again disabled.
4707 *
4708 * iter->pos will be 0 if we haven't read anything.
4709 */
10246fa3 4710 if (!tracing_is_on() && iter->pos)
b3806b43 4711 break;
f4874261
SRRH
4712
4713 mutex_unlock(&iter->mutex);
4714
e30f53aa 4715 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4716
4717 mutex_lock(&iter->mutex);
4718
8b8b3683
SRRH
4719 if (ret)
4720 return ret;
b3806b43
SR
4721 }
4722
ff98781b
EGM
4723 return 1;
4724}
4725
4726/*
4727 * Consumer reader.
4728 */
4729static ssize_t
4730tracing_read_pipe(struct file *filp, char __user *ubuf,
4731 size_t cnt, loff_t *ppos)
4732{
4733 struct trace_iterator *iter = filp->private_data;
4734 ssize_t sret;
4735
4736 /* return any leftover data */
4737 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4738 if (sret != -EBUSY)
4739 return sret;
4740
f9520750 4741 trace_seq_init(&iter->seq);
ff98781b 4742
d7350c3f
FW
4743 /*
4744 * Avoid more than one consumer on a single file descriptor
4745 * This is just a matter of traces coherency, the ring buffer itself
4746 * is protected.
4747 */
4748 mutex_lock(&iter->mutex);
ff98781b
EGM
4749 if (iter->trace->read) {
4750 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4751 if (sret)
4752 goto out;
4753 }
4754
4755waitagain:
4756 sret = tracing_wait_pipe(filp);
4757 if (sret <= 0)
4758 goto out;
4759
b3806b43 4760 /* stop when tracing is finished */
ff98781b
EGM
4761 if (trace_empty(iter)) {
4762 sret = 0;
107bad8b 4763 goto out;
ff98781b 4764 }
b3806b43
SR
4765
4766 if (cnt >= PAGE_SIZE)
4767 cnt = PAGE_SIZE - 1;
4768
53d0aa77 4769 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4770 memset(&iter->seq, 0,
4771 sizeof(struct trace_iterator) -
4772 offsetof(struct trace_iterator, seq));
ed5467da 4773 cpumask_clear(iter->started);
4823ed7e 4774 iter->pos = -1;
b3806b43 4775
4f535968 4776 trace_event_read_lock();
7e53bd42 4777 trace_access_lock(iter->cpu_file);
955b61e5 4778 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4779 enum print_line_t ret;
5ac48378 4780 int save_len = iter->seq.seq.len;
088b1e42 4781
f9896bf3 4782 ret = print_trace_line(iter);
2c4f035f 4783 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4784 /* don't print partial lines */
5ac48378 4785 iter->seq.seq.len = save_len;
b3806b43 4786 break;
088b1e42 4787 }
b91facc3
FW
4788 if (ret != TRACE_TYPE_NO_CONSUME)
4789 trace_consume(iter);
b3806b43 4790
5ac48378 4791 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4792 break;
ee5e51f5
JO
4793
4794 /*
4795 * Setting the full flag means we reached the trace_seq buffer
4796 * size and we should leave by partial output condition above.
4797 * One of the trace_seq_* functions is not used properly.
4798 */
4799 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4800 iter->ent->type);
b3806b43 4801 }
7e53bd42 4802 trace_access_unlock(iter->cpu_file);
4f535968 4803 trace_event_read_unlock();
b3806b43 4804
b3806b43 4805 /* Now copy what we have to the user */
6c6c2796 4806 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4807 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4808 trace_seq_init(&iter->seq);
9ff4b974
PP
4809
4810 /*
25985edc 4811 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4812 * entries, go back to wait for more entries.
4813 */
6c6c2796 4814 if (sret == -EBUSY)
9ff4b974 4815 goto waitagain;
b3806b43 4816
107bad8b 4817out:
d7350c3f 4818 mutex_unlock(&iter->mutex);
107bad8b 4819
6c6c2796 4820 return sret;
b3806b43
SR
4821}
4822
3c56819b
EGM
4823static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4824 unsigned int idx)
4825{
4826 __free_page(spd->pages[idx]);
4827}
4828
28dfef8f 4829static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4830 .can_merge = 0,
34cd4998 4831 .confirm = generic_pipe_buf_confirm,
92fdd98c 4832 .release = generic_pipe_buf_release,
34cd4998
SR
4833 .steal = generic_pipe_buf_steal,
4834 .get = generic_pipe_buf_get,
3c56819b
EGM
4835};
4836
34cd4998 4837static size_t
fa7c7f6e 4838tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4839{
4840 size_t count;
74f06bb7 4841 int save_len;
34cd4998
SR
4842 int ret;
4843
4844 /* Seq buffer is page-sized, exactly what we need. */
4845 for (;;) {
74f06bb7 4846 save_len = iter->seq.seq.len;
34cd4998 4847 ret = print_trace_line(iter);
74f06bb7
SRRH
4848
4849 if (trace_seq_has_overflowed(&iter->seq)) {
4850 iter->seq.seq.len = save_len;
34cd4998
SR
4851 break;
4852 }
74f06bb7
SRRH
4853
4854 /*
4855 * This should not be hit, because it should only
4856 * be set if the iter->seq overflowed. But check it
4857 * anyway to be safe.
4858 */
34cd4998 4859 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4860 iter->seq.seq.len = save_len;
4861 break;
4862 }
4863
5ac48378 4864 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4865 if (rem < count) {
4866 rem = 0;
4867 iter->seq.seq.len = save_len;
34cd4998
SR
4868 break;
4869 }
4870
74e7ff8c
LJ
4871 if (ret != TRACE_TYPE_NO_CONSUME)
4872 trace_consume(iter);
34cd4998 4873 rem -= count;
955b61e5 4874 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4875 rem = 0;
4876 iter->ent = NULL;
4877 break;
4878 }
4879 }
4880
4881 return rem;
4882}
4883
3c56819b
EGM
4884static ssize_t tracing_splice_read_pipe(struct file *filp,
4885 loff_t *ppos,
4886 struct pipe_inode_info *pipe,
4887 size_t len,
4888 unsigned int flags)
4889{
35f3d14d
JA
4890 struct page *pages_def[PIPE_DEF_BUFFERS];
4891 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4892 struct trace_iterator *iter = filp->private_data;
4893 struct splice_pipe_desc spd = {
35f3d14d
JA
4894 .pages = pages_def,
4895 .partial = partial_def,
34cd4998 4896 .nr_pages = 0, /* This gets updated below. */
047fe360 4897 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4898 .flags = flags,
4899 .ops = &tracing_pipe_buf_ops,
4900 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4901 };
4902 ssize_t ret;
34cd4998 4903 size_t rem;
3c56819b
EGM
4904 unsigned int i;
4905
35f3d14d
JA
4906 if (splice_grow_spd(pipe, &spd))
4907 return -ENOMEM;
4908
d7350c3f 4909 mutex_lock(&iter->mutex);
3c56819b
EGM
4910
4911 if (iter->trace->splice_read) {
4912 ret = iter->trace->splice_read(iter, filp,
4913 ppos, pipe, len, flags);
4914 if (ret)
34cd4998 4915 goto out_err;
3c56819b
EGM
4916 }
4917
4918 ret = tracing_wait_pipe(filp);
4919 if (ret <= 0)
34cd4998 4920 goto out_err;
3c56819b 4921
955b61e5 4922 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4923 ret = -EFAULT;
34cd4998 4924 goto out_err;
3c56819b
EGM
4925 }
4926
4f535968 4927 trace_event_read_lock();
7e53bd42 4928 trace_access_lock(iter->cpu_file);
4f535968 4929
3c56819b 4930 /* Fill as many pages as possible. */
a786c06d 4931 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4932 spd.pages[i] = alloc_page(GFP_KERNEL);
4933 if (!spd.pages[i])
34cd4998 4934 break;
3c56819b 4935
fa7c7f6e 4936 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4937
4938 /* Copy the data into the page, so we can start over. */
4939 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4940 page_address(spd.pages[i]),
5ac48378 4941 trace_seq_used(&iter->seq));
3c56819b 4942 if (ret < 0) {
35f3d14d 4943 __free_page(spd.pages[i]);
3c56819b
EGM
4944 break;
4945 }
35f3d14d 4946 spd.partial[i].offset = 0;
5ac48378 4947 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4948
f9520750 4949 trace_seq_init(&iter->seq);
3c56819b
EGM
4950 }
4951
7e53bd42 4952 trace_access_unlock(iter->cpu_file);
4f535968 4953 trace_event_read_unlock();
d7350c3f 4954 mutex_unlock(&iter->mutex);
3c56819b
EGM
4955
4956 spd.nr_pages = i;
4957
a29054d9
SRRH
4958 if (i)
4959 ret = splice_to_pipe(pipe, &spd);
4960 else
4961 ret = 0;
35f3d14d 4962out:
047fe360 4963 splice_shrink_spd(&spd);
35f3d14d 4964 return ret;
3c56819b 4965
34cd4998 4966out_err:
d7350c3f 4967 mutex_unlock(&iter->mutex);
35f3d14d 4968 goto out;
3c56819b
EGM
4969}
4970
a98a3c3f
SR
4971static ssize_t
4972tracing_entries_read(struct file *filp, char __user *ubuf,
4973 size_t cnt, loff_t *ppos)
4974{
0bc392ee
ON
4975 struct inode *inode = file_inode(filp);
4976 struct trace_array *tr = inode->i_private;
4977 int cpu = tracing_get_cpu(inode);
438ced17
VN
4978 char buf[64];
4979 int r = 0;
4980 ssize_t ret;
a98a3c3f 4981
db526ca3 4982 mutex_lock(&trace_types_lock);
438ced17 4983
0bc392ee 4984 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4985 int cpu, buf_size_same;
4986 unsigned long size;
4987
4988 size = 0;
4989 buf_size_same = 1;
4990 /* check if all cpu sizes are same */
4991 for_each_tracing_cpu(cpu) {
4992 /* fill in the size from first enabled cpu */
4993 if (size == 0)
12883efb
SRRH
4994 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4995 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4996 buf_size_same = 0;
4997 break;
4998 }
4999 }
5000
5001 if (buf_size_same) {
5002 if (!ring_buffer_expanded)
5003 r = sprintf(buf, "%lu (expanded: %lu)\n",
5004 size >> 10,
5005 trace_buf_size >> 10);
5006 else
5007 r = sprintf(buf, "%lu\n", size >> 10);
5008 } else
5009 r = sprintf(buf, "X\n");
5010 } else
0bc392ee 5011 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5012
db526ca3
SR
5013 mutex_unlock(&trace_types_lock);
5014
438ced17
VN
5015 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5016 return ret;
a98a3c3f
SR
5017}
5018
5019static ssize_t
5020tracing_entries_write(struct file *filp, const char __user *ubuf,
5021 size_t cnt, loff_t *ppos)
5022{
0bc392ee
ON
5023 struct inode *inode = file_inode(filp);
5024 struct trace_array *tr = inode->i_private;
a98a3c3f 5025 unsigned long val;
4f271a2a 5026 int ret;
a98a3c3f 5027
22fe9b54
PH
5028 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5029 if (ret)
c6caeeb1 5030 return ret;
a98a3c3f
SR
5031
5032 /* must have at least 1 entry */
5033 if (!val)
5034 return -EINVAL;
5035
1696b2b0
SR
5036 /* value is in KB */
5037 val <<= 10;
0bc392ee 5038 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
5039 if (ret < 0)
5040 return ret;
a98a3c3f 5041
cf8517cf 5042 *ppos += cnt;
a98a3c3f 5043
4f271a2a
VN
5044 return cnt;
5045}
bf5e6519 5046
f81ab074
VN
5047static ssize_t
5048tracing_total_entries_read(struct file *filp, char __user *ubuf,
5049 size_t cnt, loff_t *ppos)
5050{
5051 struct trace_array *tr = filp->private_data;
5052 char buf[64];
5053 int r, cpu;
5054 unsigned long size = 0, expanded_size = 0;
5055
5056 mutex_lock(&trace_types_lock);
5057 for_each_tracing_cpu(cpu) {
12883efb 5058 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5059 if (!ring_buffer_expanded)
5060 expanded_size += trace_buf_size >> 10;
5061 }
5062 if (ring_buffer_expanded)
5063 r = sprintf(buf, "%lu\n", size);
5064 else
5065 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5066 mutex_unlock(&trace_types_lock);
5067
5068 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5069}
5070
4f271a2a
VN
5071static ssize_t
5072tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5073 size_t cnt, loff_t *ppos)
5074{
5075 /*
5076 * There is no need to read what the user has written, this function
5077 * is just to make sure that there is no error when "echo" is used
5078 */
5079
5080 *ppos += cnt;
a98a3c3f
SR
5081
5082 return cnt;
5083}
5084
4f271a2a
VN
5085static int
5086tracing_free_buffer_release(struct inode *inode, struct file *filp)
5087{
2b6080f2
SR
5088 struct trace_array *tr = inode->i_private;
5089
cf30cf67 5090 /* disable tracing ? */
983f938a 5091 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5092 tracer_tracing_off(tr);
4f271a2a 5093 /* resize the ring buffer to 0 */
2b6080f2 5094 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5095
7b85af63
SRRH
5096 trace_array_put(tr);
5097
4f271a2a
VN
5098 return 0;
5099}
5100
5bf9a1ee
PP
5101static ssize_t
5102tracing_mark_write(struct file *filp, const char __user *ubuf,
5103 size_t cnt, loff_t *fpos)
5104{
d696b58c 5105 unsigned long addr = (unsigned long)ubuf;
2d71619c 5106 struct trace_array *tr = filp->private_data;
d696b58c
SR
5107 struct ring_buffer_event *event;
5108 struct ring_buffer *buffer;
5109 struct print_entry *entry;
5110 unsigned long irq_flags;
5111 struct page *pages[2];
6edb2a8a 5112 void *map_page[2];
d696b58c
SR
5113 int nr_pages = 1;
5114 ssize_t written;
d696b58c
SR
5115 int offset;
5116 int size;
5117 int len;
5118 int ret;
6edb2a8a 5119 int i;
5bf9a1ee 5120
c76f0694 5121 if (tracing_disabled)
5bf9a1ee
PP
5122 return -EINVAL;
5123
983f938a 5124 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
5125 return -EINVAL;
5126
5bf9a1ee
PP
5127 if (cnt > TRACE_BUF_SIZE)
5128 cnt = TRACE_BUF_SIZE;
5129
d696b58c
SR
5130 /*
5131 * Userspace is injecting traces into the kernel trace buffer.
5132 * We want to be as non intrusive as possible.
5133 * To do so, we do not want to allocate any special buffers
5134 * or take any locks, but instead write the userspace data
5135 * straight into the ring buffer.
5136 *
5137 * First we need to pin the userspace buffer into memory,
5138 * which, most likely it is, because it just referenced it.
5139 * But there's no guarantee that it is. By using get_user_pages_fast()
5140 * and kmap_atomic/kunmap_atomic() we can get access to the
5141 * pages directly. We then write the data directly into the
5142 * ring buffer.
5143 */
5144 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5145
d696b58c
SR
5146 /* check if we cross pages */
5147 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5148 nr_pages = 2;
5149
5150 offset = addr & (PAGE_SIZE - 1);
5151 addr &= PAGE_MASK;
5152
5153 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5154 if (ret < nr_pages) {
5155 while (--ret >= 0)
5156 put_page(pages[ret]);
5157 written = -EFAULT;
5158 goto out;
5bf9a1ee 5159 }
d696b58c 5160
6edb2a8a
SR
5161 for (i = 0; i < nr_pages; i++)
5162 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5163
5164 local_save_flags(irq_flags);
5165 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5166 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5167 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5168 irq_flags, preempt_count());
5169 if (!event) {
5170 /* Ring buffer disabled, return as if not open for write */
5171 written = -EBADF;
5172 goto out_unlock;
5bf9a1ee 5173 }
d696b58c
SR
5174
5175 entry = ring_buffer_event_data(event);
5176 entry->ip = _THIS_IP_;
5177
5178 if (nr_pages == 2) {
5179 len = PAGE_SIZE - offset;
6edb2a8a
SR
5180 memcpy(&entry->buf, map_page[0] + offset, len);
5181 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5182 } else
6edb2a8a 5183 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5184
d696b58c
SR
5185 if (entry->buf[cnt - 1] != '\n') {
5186 entry->buf[cnt] = '\n';
5187 entry->buf[cnt + 1] = '\0';
5188 } else
5189 entry->buf[cnt] = '\0';
5190
7ffbd48d 5191 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5192
d696b58c 5193 written = cnt;
5bf9a1ee 5194
d696b58c 5195 *fpos += written;
1aa54bca 5196
d696b58c 5197 out_unlock:
7215853e 5198 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5199 kunmap_atomic(map_page[i]);
5200 put_page(pages[i]);
5201 }
d696b58c 5202 out:
1aa54bca 5203 return written;
5bf9a1ee
PP
5204}
5205
13f16d20 5206static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5207{
2b6080f2 5208 struct trace_array *tr = m->private;
5079f326
Z
5209 int i;
5210
5211 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5212 seq_printf(m,
5079f326 5213 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5214 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5215 i == tr->clock_id ? "]" : "");
13f16d20 5216 seq_putc(m, '\n');
5079f326 5217
13f16d20 5218 return 0;
5079f326
Z
5219}
5220
e1e232ca 5221static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5222{
5079f326
Z
5223 int i;
5224
5079f326
Z
5225 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5226 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5227 break;
5228 }
5229 if (i == ARRAY_SIZE(trace_clocks))
5230 return -EINVAL;
5231
5079f326
Z
5232 mutex_lock(&trace_types_lock);
5233
2b6080f2
SR
5234 tr->clock_id = i;
5235
12883efb 5236 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5237
60303ed3
DS
5238 /*
5239 * New clock may not be consistent with the previous clock.
5240 * Reset the buffer so that it doesn't have incomparable timestamps.
5241 */
9457158b 5242 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5243
5244#ifdef CONFIG_TRACER_MAX_TRACE
5245 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5246 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5247 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5248#endif
60303ed3 5249
5079f326
Z
5250 mutex_unlock(&trace_types_lock);
5251
e1e232ca
SR
5252 return 0;
5253}
5254
5255static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5256 size_t cnt, loff_t *fpos)
5257{
5258 struct seq_file *m = filp->private_data;
5259 struct trace_array *tr = m->private;
5260 char buf[64];
5261 const char *clockstr;
5262 int ret;
5263
5264 if (cnt >= sizeof(buf))
5265 return -EINVAL;
5266
5267 if (copy_from_user(&buf, ubuf, cnt))
5268 return -EFAULT;
5269
5270 buf[cnt] = 0;
5271
5272 clockstr = strstrip(buf);
5273
5274 ret = tracing_set_clock(tr, clockstr);
5275 if (ret)
5276 return ret;
5277
5079f326
Z
5278 *fpos += cnt;
5279
5280 return cnt;
5281}
5282
13f16d20
LZ
5283static int tracing_clock_open(struct inode *inode, struct file *file)
5284{
7b85af63
SRRH
5285 struct trace_array *tr = inode->i_private;
5286 int ret;
5287
13f16d20
LZ
5288 if (tracing_disabled)
5289 return -ENODEV;
2b6080f2 5290
7b85af63
SRRH
5291 if (trace_array_get(tr))
5292 return -ENODEV;
5293
5294 ret = single_open(file, tracing_clock_show, inode->i_private);
5295 if (ret < 0)
5296 trace_array_put(tr);
5297
5298 return ret;
13f16d20
LZ
5299}
5300
6de58e62
SRRH
5301struct ftrace_buffer_info {
5302 struct trace_iterator iter;
5303 void *spare;
5304 unsigned int read;
5305};
5306
debdd57f
HT
5307#ifdef CONFIG_TRACER_SNAPSHOT
5308static int tracing_snapshot_open(struct inode *inode, struct file *file)
5309{
6484c71c 5310 struct trace_array *tr = inode->i_private;
debdd57f 5311 struct trace_iterator *iter;
2b6080f2 5312 struct seq_file *m;
debdd57f
HT
5313 int ret = 0;
5314
ff451961
SRRH
5315 if (trace_array_get(tr) < 0)
5316 return -ENODEV;
5317
debdd57f 5318 if (file->f_mode & FMODE_READ) {
6484c71c 5319 iter = __tracing_open(inode, file, true);
debdd57f
HT
5320 if (IS_ERR(iter))
5321 ret = PTR_ERR(iter);
2b6080f2
SR
5322 } else {
5323 /* Writes still need the seq_file to hold the private data */
f77d09a3 5324 ret = -ENOMEM;
2b6080f2
SR
5325 m = kzalloc(sizeof(*m), GFP_KERNEL);
5326 if (!m)
f77d09a3 5327 goto out;
2b6080f2
SR
5328 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5329 if (!iter) {
5330 kfree(m);
f77d09a3 5331 goto out;
2b6080f2 5332 }
f77d09a3
AL
5333 ret = 0;
5334
ff451961 5335 iter->tr = tr;
6484c71c
ON
5336 iter->trace_buffer = &tr->max_buffer;
5337 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5338 m->private = iter;
5339 file->private_data = m;
debdd57f 5340 }
f77d09a3 5341out:
ff451961
SRRH
5342 if (ret < 0)
5343 trace_array_put(tr);
5344
debdd57f
HT
5345 return ret;
5346}
5347
5348static ssize_t
5349tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5350 loff_t *ppos)
5351{
2b6080f2
SR
5352 struct seq_file *m = filp->private_data;
5353 struct trace_iterator *iter = m->private;
5354 struct trace_array *tr = iter->tr;
debdd57f
HT
5355 unsigned long val;
5356 int ret;
5357
5358 ret = tracing_update_buffers();
5359 if (ret < 0)
5360 return ret;
5361
5362 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5363 if (ret)
5364 return ret;
5365
5366 mutex_lock(&trace_types_lock);
5367
2b6080f2 5368 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5369 ret = -EBUSY;
5370 goto out;
5371 }
5372
5373 switch (val) {
5374 case 0:
f1affcaa
SRRH
5375 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5376 ret = -EINVAL;
5377 break;
debdd57f 5378 }
3209cff4
SRRH
5379 if (tr->allocated_snapshot)
5380 free_snapshot(tr);
debdd57f
HT
5381 break;
5382 case 1:
f1affcaa
SRRH
5383/* Only allow per-cpu swap if the ring buffer supports it */
5384#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5385 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5386 ret = -EINVAL;
5387 break;
5388 }
5389#endif
45ad21ca 5390 if (!tr->allocated_snapshot) {
3209cff4 5391 ret = alloc_snapshot(tr);
debdd57f
HT
5392 if (ret < 0)
5393 break;
debdd57f 5394 }
debdd57f
HT
5395 local_irq_disable();
5396 /* Now, we're going to swap */
f1affcaa 5397 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5398 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5399 else
ce9bae55 5400 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5401 local_irq_enable();
5402 break;
5403 default:
45ad21ca 5404 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5405 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5406 tracing_reset_online_cpus(&tr->max_buffer);
5407 else
5408 tracing_reset(&tr->max_buffer, iter->cpu_file);
5409 }
debdd57f
HT
5410 break;
5411 }
5412
5413 if (ret >= 0) {
5414 *ppos += cnt;
5415 ret = cnt;
5416 }
5417out:
5418 mutex_unlock(&trace_types_lock);
5419 return ret;
5420}
2b6080f2
SR
5421
5422static int tracing_snapshot_release(struct inode *inode, struct file *file)
5423{
5424 struct seq_file *m = file->private_data;
ff451961
SRRH
5425 int ret;
5426
5427 ret = tracing_release(inode, file);
2b6080f2
SR
5428
5429 if (file->f_mode & FMODE_READ)
ff451961 5430 return ret;
2b6080f2
SR
5431
5432 /* If write only, the seq_file is just a stub */
5433 if (m)
5434 kfree(m->private);
5435 kfree(m);
5436
5437 return 0;
5438}
5439
6de58e62
SRRH
5440static int tracing_buffers_open(struct inode *inode, struct file *filp);
5441static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5442 size_t count, loff_t *ppos);
5443static int tracing_buffers_release(struct inode *inode, struct file *file);
5444static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5445 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5446
5447static int snapshot_raw_open(struct inode *inode, struct file *filp)
5448{
5449 struct ftrace_buffer_info *info;
5450 int ret;
5451
5452 ret = tracing_buffers_open(inode, filp);
5453 if (ret < 0)
5454 return ret;
5455
5456 info = filp->private_data;
5457
5458 if (info->iter.trace->use_max_tr) {
5459 tracing_buffers_release(inode, filp);
5460 return -EBUSY;
5461 }
5462
5463 info->iter.snapshot = true;
5464 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5465
5466 return ret;
5467}
5468
debdd57f
HT
5469#endif /* CONFIG_TRACER_SNAPSHOT */
5470
5471
6508fa76
SF
5472static const struct file_operations tracing_thresh_fops = {
5473 .open = tracing_open_generic,
5474 .read = tracing_thresh_read,
5475 .write = tracing_thresh_write,
5476 .llseek = generic_file_llseek,
5477};
5478
e428abbb 5479#ifdef CONFIG_TRACER_MAX_TRACE
5e2336a0 5480static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5481 .open = tracing_open_generic,
5482 .read = tracing_max_lat_read,
5483 .write = tracing_max_lat_write,
b444786f 5484 .llseek = generic_file_llseek,
bc0c38d1 5485};
e428abbb 5486#endif
bc0c38d1 5487
5e2336a0 5488static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5489 .open = tracing_open_generic,
5490 .read = tracing_set_trace_read,
5491 .write = tracing_set_trace_write,
b444786f 5492 .llseek = generic_file_llseek,
bc0c38d1
SR
5493};
5494
5e2336a0 5495static const struct file_operations tracing_pipe_fops = {
4bf39a94 5496 .open = tracing_open_pipe,
2a2cc8f7 5497 .poll = tracing_poll_pipe,
4bf39a94 5498 .read = tracing_read_pipe,
3c56819b 5499 .splice_read = tracing_splice_read_pipe,
4bf39a94 5500 .release = tracing_release_pipe,
b444786f 5501 .llseek = no_llseek,
b3806b43
SR
5502};
5503
5e2336a0 5504static const struct file_operations tracing_entries_fops = {
0bc392ee 5505 .open = tracing_open_generic_tr,
a98a3c3f
SR
5506 .read = tracing_entries_read,
5507 .write = tracing_entries_write,
b444786f 5508 .llseek = generic_file_llseek,
0bc392ee 5509 .release = tracing_release_generic_tr,
a98a3c3f
SR
5510};
5511
f81ab074 5512static const struct file_operations tracing_total_entries_fops = {
7b85af63 5513 .open = tracing_open_generic_tr,
f81ab074
VN
5514 .read = tracing_total_entries_read,
5515 .llseek = generic_file_llseek,
7b85af63 5516 .release = tracing_release_generic_tr,
f81ab074
VN
5517};
5518
4f271a2a 5519static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5520 .open = tracing_open_generic_tr,
4f271a2a
VN
5521 .write = tracing_free_buffer_write,
5522 .release = tracing_free_buffer_release,
5523};
5524
5e2336a0 5525static const struct file_operations tracing_mark_fops = {
7b85af63 5526 .open = tracing_open_generic_tr,
5bf9a1ee 5527 .write = tracing_mark_write,
b444786f 5528 .llseek = generic_file_llseek,
7b85af63 5529 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5530};
5531
5079f326 5532static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5533 .open = tracing_clock_open,
5534 .read = seq_read,
5535 .llseek = seq_lseek,
7b85af63 5536 .release = tracing_single_release_tr,
5079f326
Z
5537 .write = tracing_clock_write,
5538};
5539
debdd57f
HT
5540#ifdef CONFIG_TRACER_SNAPSHOT
5541static const struct file_operations snapshot_fops = {
5542 .open = tracing_snapshot_open,
5543 .read = seq_read,
5544 .write = tracing_snapshot_write,
098c879e 5545 .llseek = tracing_lseek,
2b6080f2 5546 .release = tracing_snapshot_release,
debdd57f 5547};
debdd57f 5548
6de58e62
SRRH
5549static const struct file_operations snapshot_raw_fops = {
5550 .open = snapshot_raw_open,
5551 .read = tracing_buffers_read,
5552 .release = tracing_buffers_release,
5553 .splice_read = tracing_buffers_splice_read,
5554 .llseek = no_llseek,
2cadf913
SR
5555};
5556
6de58e62
SRRH
5557#endif /* CONFIG_TRACER_SNAPSHOT */
5558
2cadf913
SR
5559static int tracing_buffers_open(struct inode *inode, struct file *filp)
5560{
46ef2be0 5561 struct trace_array *tr = inode->i_private;
2cadf913 5562 struct ftrace_buffer_info *info;
7b85af63 5563 int ret;
2cadf913
SR
5564
5565 if (tracing_disabled)
5566 return -ENODEV;
5567
7b85af63
SRRH
5568 if (trace_array_get(tr) < 0)
5569 return -ENODEV;
5570
2cadf913 5571 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5572 if (!info) {
5573 trace_array_put(tr);
2cadf913 5574 return -ENOMEM;
7b85af63 5575 }
2cadf913 5576
a695cb58
SRRH
5577 mutex_lock(&trace_types_lock);
5578
cc60cdc9 5579 info->iter.tr = tr;
46ef2be0 5580 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5581 info->iter.trace = tr->current_trace;
12883efb 5582 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5583 info->spare = NULL;
2cadf913 5584 /* Force reading ring buffer for first read */
cc60cdc9 5585 info->read = (unsigned int)-1;
2cadf913
SR
5586
5587 filp->private_data = info;
5588
cf6ab6d9
SRRH
5589 tr->current_trace->ref++;
5590
a695cb58
SRRH
5591 mutex_unlock(&trace_types_lock);
5592
7b85af63
SRRH
5593 ret = nonseekable_open(inode, filp);
5594 if (ret < 0)
5595 trace_array_put(tr);
5596
5597 return ret;
2cadf913
SR
5598}
5599
cc60cdc9
SR
5600static unsigned int
5601tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5602{
5603 struct ftrace_buffer_info *info = filp->private_data;
5604 struct trace_iterator *iter = &info->iter;
5605
5606 return trace_poll(iter, filp, poll_table);
5607}
5608
2cadf913
SR
5609static ssize_t
5610tracing_buffers_read(struct file *filp, char __user *ubuf,
5611 size_t count, loff_t *ppos)
5612{
5613 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5614 struct trace_iterator *iter = &info->iter;
2cadf913 5615 ssize_t ret;
6de58e62 5616 ssize_t size;
2cadf913 5617
2dc5d12b
SR
5618 if (!count)
5619 return 0;
5620
6de58e62 5621#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5622 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5623 return -EBUSY;
6de58e62
SRRH
5624#endif
5625
ddd538f3 5626 if (!info->spare)
12883efb
SRRH
5627 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5628 iter->cpu_file);
ddd538f3 5629 if (!info->spare)
d716ff71 5630 return -ENOMEM;
ddd538f3 5631
2cadf913
SR
5632 /* Do we have previous read data to read? */
5633 if (info->read < PAGE_SIZE)
5634 goto read;
5635
b627344f 5636 again:
cc60cdc9 5637 trace_access_lock(iter->cpu_file);
12883efb 5638 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5639 &info->spare,
5640 count,
cc60cdc9
SR
5641 iter->cpu_file, 0);
5642 trace_access_unlock(iter->cpu_file);
2cadf913 5643
b627344f
SR
5644 if (ret < 0) {
5645 if (trace_empty(iter)) {
d716ff71
SRRH
5646 if ((filp->f_flags & O_NONBLOCK))
5647 return -EAGAIN;
5648
e30f53aa 5649 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5650 if (ret)
5651 return ret;
5652
b627344f
SR
5653 goto again;
5654 }
d716ff71 5655 return 0;
b627344f 5656 }
436fc280 5657
436fc280 5658 info->read = 0;
b627344f 5659 read:
2cadf913
SR
5660 size = PAGE_SIZE - info->read;
5661 if (size > count)
5662 size = count;
5663
5664 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5665 if (ret == size)
5666 return -EFAULT;
5667
2dc5d12b
SR
5668 size -= ret;
5669
2cadf913
SR
5670 *ppos += size;
5671 info->read += size;
5672
5673 return size;
5674}
5675
5676static int tracing_buffers_release(struct inode *inode, struct file *file)
5677{
5678 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5679 struct trace_iterator *iter = &info->iter;
2cadf913 5680
a695cb58
SRRH
5681 mutex_lock(&trace_types_lock);
5682
cf6ab6d9
SRRH
5683 iter->tr->current_trace->ref--;
5684
ff451961 5685 __trace_array_put(iter->tr);
2cadf913 5686
ddd538f3 5687 if (info->spare)
12883efb 5688 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5689 kfree(info);
5690
a695cb58
SRRH
5691 mutex_unlock(&trace_types_lock);
5692
2cadf913
SR
5693 return 0;
5694}
5695
5696struct buffer_ref {
5697 struct ring_buffer *buffer;
5698 void *page;
5699 int ref;
5700};
5701
5702static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5703 struct pipe_buffer *buf)
5704{
5705 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5706
5707 if (--ref->ref)
5708 return;
5709
5710 ring_buffer_free_read_page(ref->buffer, ref->page);
5711 kfree(ref);
5712 buf->private = 0;
5713}
5714
2cadf913
SR
5715static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5716 struct pipe_buffer *buf)
5717{
5718 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5719
5720 ref->ref++;
5721}
5722
5723/* Pipe buffer operations for a buffer. */
28dfef8f 5724static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5725 .can_merge = 0,
2cadf913
SR
5726 .confirm = generic_pipe_buf_confirm,
5727 .release = buffer_pipe_buf_release,
d55cb6cf 5728 .steal = generic_pipe_buf_steal,
2cadf913
SR
5729 .get = buffer_pipe_buf_get,
5730};
5731
5732/*
5733 * Callback from splice_to_pipe(), if we need to release some pages
5734 * at the end of the spd in case we error'ed out in filling the pipe.
5735 */
5736static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5737{
5738 struct buffer_ref *ref =
5739 (struct buffer_ref *)spd->partial[i].private;
5740
5741 if (--ref->ref)
5742 return;
5743
5744 ring_buffer_free_read_page(ref->buffer, ref->page);
5745 kfree(ref);
5746 spd->partial[i].private = 0;
5747}
5748
5749static ssize_t
5750tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5751 struct pipe_inode_info *pipe, size_t len,
5752 unsigned int flags)
5753{
5754 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5755 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5756 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5757 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5758 struct splice_pipe_desc spd = {
35f3d14d
JA
5759 .pages = pages_def,
5760 .partial = partial_def,
047fe360 5761 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5762 .flags = flags,
5763 .ops = &buffer_pipe_buf_ops,
5764 .spd_release = buffer_spd_release,
5765 };
5766 struct buffer_ref *ref;
93459c6c 5767 int entries, size, i;
07906da7 5768 ssize_t ret = 0;
2cadf913 5769
6de58e62 5770#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5771 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5772 return -EBUSY;
6de58e62
SRRH
5773#endif
5774
d716ff71
SRRH
5775 if (splice_grow_spd(pipe, &spd))
5776 return -ENOMEM;
35f3d14d 5777
d716ff71
SRRH
5778 if (*ppos & (PAGE_SIZE - 1))
5779 return -EINVAL;
93cfb3c9
LJ
5780
5781 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5782 if (len < PAGE_SIZE)
5783 return -EINVAL;
93cfb3c9
LJ
5784 len &= PAGE_MASK;
5785 }
5786
cc60cdc9
SR
5787 again:
5788 trace_access_lock(iter->cpu_file);
12883efb 5789 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5790
a786c06d 5791 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5792 struct page *page;
5793 int r;
5794
5795 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5796 if (!ref) {
5797 ret = -ENOMEM;
2cadf913 5798 break;
07906da7 5799 }
2cadf913 5800
7267fa68 5801 ref->ref = 1;
12883efb 5802 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5803 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5804 if (!ref->page) {
07906da7 5805 ret = -ENOMEM;
2cadf913
SR
5806 kfree(ref);
5807 break;
5808 }
5809
5810 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5811 len, iter->cpu_file, 1);
2cadf913 5812 if (r < 0) {
7ea59064 5813 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5814 kfree(ref);
5815 break;
5816 }
5817
5818 /*
5819 * zero out any left over data, this is going to
5820 * user land.
5821 */
5822 size = ring_buffer_page_len(ref->page);
5823 if (size < PAGE_SIZE)
5824 memset(ref->page + size, 0, PAGE_SIZE - size);
5825
5826 page = virt_to_page(ref->page);
5827
5828 spd.pages[i] = page;
5829 spd.partial[i].len = PAGE_SIZE;
5830 spd.partial[i].offset = 0;
5831 spd.partial[i].private = (unsigned long)ref;
5832 spd.nr_pages++;
93cfb3c9 5833 *ppos += PAGE_SIZE;
93459c6c 5834
12883efb 5835 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5836 }
5837
cc60cdc9 5838 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5839 spd.nr_pages = i;
5840
5841 /* did we read anything? */
5842 if (!spd.nr_pages) {
07906da7 5843 if (ret)
d716ff71
SRRH
5844 return ret;
5845
5846 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5847 return -EAGAIN;
07906da7 5848
e30f53aa 5849 ret = wait_on_pipe(iter, true);
8b8b3683 5850 if (ret)
d716ff71 5851 return ret;
e30f53aa 5852
cc60cdc9 5853 goto again;
2cadf913
SR
5854 }
5855
5856 ret = splice_to_pipe(pipe, &spd);
047fe360 5857 splice_shrink_spd(&spd);
6de58e62 5858
2cadf913
SR
5859 return ret;
5860}
5861
5862static const struct file_operations tracing_buffers_fops = {
5863 .open = tracing_buffers_open,
5864 .read = tracing_buffers_read,
cc60cdc9 5865 .poll = tracing_buffers_poll,
2cadf913
SR
5866 .release = tracing_buffers_release,
5867 .splice_read = tracing_buffers_splice_read,
5868 .llseek = no_llseek,
5869};
5870
c8d77183
SR
5871static ssize_t
5872tracing_stats_read(struct file *filp, char __user *ubuf,
5873 size_t count, loff_t *ppos)
5874{
4d3435b8
ON
5875 struct inode *inode = file_inode(filp);
5876 struct trace_array *tr = inode->i_private;
12883efb 5877 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5878 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5879 struct trace_seq *s;
5880 unsigned long cnt;
c64e148a
VN
5881 unsigned long long t;
5882 unsigned long usec_rem;
c8d77183 5883
e4f2d10f 5884 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5885 if (!s)
a646365c 5886 return -ENOMEM;
c8d77183
SR
5887
5888 trace_seq_init(s);
5889
12883efb 5890 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5891 trace_seq_printf(s, "entries: %ld\n", cnt);
5892
12883efb 5893 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5894 trace_seq_printf(s, "overrun: %ld\n", cnt);
5895
12883efb 5896 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5897 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5898
12883efb 5899 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5900 trace_seq_printf(s, "bytes: %ld\n", cnt);
5901
58e8eedf 5902 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5903 /* local or global for trace_clock */
12883efb 5904 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5905 usec_rem = do_div(t, USEC_PER_SEC);
5906 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5907 t, usec_rem);
5908
12883efb 5909 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5910 usec_rem = do_div(t, USEC_PER_SEC);
5911 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5912 } else {
5913 /* counter or tsc mode for trace_clock */
5914 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5915 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5916
11043d8b 5917 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5918 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5919 }
c64e148a 5920
12883efb 5921 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5922 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5923
12883efb 5924 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5925 trace_seq_printf(s, "read events: %ld\n", cnt);
5926
5ac48378
SRRH
5927 count = simple_read_from_buffer(ubuf, count, ppos,
5928 s->buffer, trace_seq_used(s));
c8d77183
SR
5929
5930 kfree(s);
5931
5932 return count;
5933}
5934
5935static const struct file_operations tracing_stats_fops = {
4d3435b8 5936 .open = tracing_open_generic_tr,
c8d77183 5937 .read = tracing_stats_read,
b444786f 5938 .llseek = generic_file_llseek,
4d3435b8 5939 .release = tracing_release_generic_tr,
c8d77183
SR
5940};
5941
bc0c38d1
SR
5942#ifdef CONFIG_DYNAMIC_FTRACE
5943
b807c3d0
SR
5944int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5945{
5946 return 0;
5947}
5948
bc0c38d1 5949static ssize_t
b807c3d0 5950tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5951 size_t cnt, loff_t *ppos)
5952{
a26a2a27
SR
5953 static char ftrace_dyn_info_buffer[1024];
5954 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5955 unsigned long *p = filp->private_data;
b807c3d0 5956 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5957 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5958 int r;
5959
b807c3d0
SR
5960 mutex_lock(&dyn_info_mutex);
5961 r = sprintf(buf, "%ld ", *p);
4bf39a94 5962
a26a2a27 5963 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5964 buf[r++] = '\n';
5965
5966 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5967
5968 mutex_unlock(&dyn_info_mutex);
5969
5970 return r;
bc0c38d1
SR
5971}
5972
5e2336a0 5973static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5974 .open = tracing_open_generic,
b807c3d0 5975 .read = tracing_read_dyn_info,
b444786f 5976 .llseek = generic_file_llseek,
bc0c38d1 5977};
77fd5c15 5978#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5979
77fd5c15
SRRH
5980#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5981static void
5982ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5983{
5984 tracing_snapshot();
5985}
bc0c38d1 5986
77fd5c15
SRRH
5987static void
5988ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5989{
77fd5c15
SRRH
5990 unsigned long *count = (long *)data;
5991
5992 if (!*count)
5993 return;
bc0c38d1 5994
77fd5c15
SRRH
5995 if (*count != -1)
5996 (*count)--;
5997
5998 tracing_snapshot();
5999}
6000
6001static int
6002ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6003 struct ftrace_probe_ops *ops, void *data)
6004{
6005 long count = (long)data;
6006
6007 seq_printf(m, "%ps:", (void *)ip);
6008
fa6f0cc7 6009 seq_puts(m, "snapshot");
77fd5c15
SRRH
6010
6011 if (count == -1)
fa6f0cc7 6012 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
6013 else
6014 seq_printf(m, ":count=%ld\n", count);
6015
6016 return 0;
6017}
6018
6019static struct ftrace_probe_ops snapshot_probe_ops = {
6020 .func = ftrace_snapshot,
6021 .print = ftrace_snapshot_print,
6022};
6023
6024static struct ftrace_probe_ops snapshot_count_probe_ops = {
6025 .func = ftrace_count_snapshot,
6026 .print = ftrace_snapshot_print,
6027};
6028
6029static int
6030ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6031 char *glob, char *cmd, char *param, int enable)
6032{
6033 struct ftrace_probe_ops *ops;
6034 void *count = (void *)-1;
6035 char *number;
6036 int ret;
6037
6038 /* hash funcs only work with set_ftrace_filter */
6039 if (!enable)
6040 return -EINVAL;
6041
6042 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6043
6044 if (glob[0] == '!') {
6045 unregister_ftrace_function_probe_func(glob+1, ops);
6046 return 0;
6047 }
6048
6049 if (!param)
6050 goto out_reg;
6051
6052 number = strsep(&param, ":");
6053
6054 if (!strlen(number))
6055 goto out_reg;
6056
6057 /*
6058 * We use the callback data field (which is a pointer)
6059 * as our counter.
6060 */
6061 ret = kstrtoul(number, 0, (unsigned long *)&count);
6062 if (ret)
6063 return ret;
6064
6065 out_reg:
6066 ret = register_ftrace_function_probe(glob, ops, count);
6067
6068 if (ret >= 0)
6069 alloc_snapshot(&global_trace);
6070
6071 return ret < 0 ? ret : 0;
6072}
6073
6074static struct ftrace_func_command ftrace_snapshot_cmd = {
6075 .name = "snapshot",
6076 .func = ftrace_trace_snapshot_callback,
6077};
6078
38de93ab 6079static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6080{
6081 return register_ftrace_command(&ftrace_snapshot_cmd);
6082}
6083#else
38de93ab 6084static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6085#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6086
7eeafbca 6087static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6088{
8434dc93
SRRH
6089 if (WARN_ON(!tr->dir))
6090 return ERR_PTR(-ENODEV);
6091
6092 /* Top directory uses NULL as the parent */
6093 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6094 return NULL;
6095
6096 /* All sub buffers have a descriptor */
2b6080f2 6097 return tr->dir;
bc0c38d1
SR
6098}
6099
2b6080f2 6100static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6101{
b04cc6b1
FW
6102 struct dentry *d_tracer;
6103
2b6080f2
SR
6104 if (tr->percpu_dir)
6105 return tr->percpu_dir;
b04cc6b1 6106
7eeafbca 6107 d_tracer = tracing_get_dentry(tr);
14a5ae40 6108 if (IS_ERR(d_tracer))
b04cc6b1
FW
6109 return NULL;
6110
8434dc93 6111 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6112
2b6080f2 6113 WARN_ONCE(!tr->percpu_dir,
8434dc93 6114 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6115
2b6080f2 6116 return tr->percpu_dir;
b04cc6b1
FW
6117}
6118
649e9c70
ON
6119static struct dentry *
6120trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6121 void *data, long cpu, const struct file_operations *fops)
6122{
6123 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6124
6125 if (ret) /* See tracing_get_cpu() */
7682c918 6126 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6127 return ret;
6128}
6129
2b6080f2 6130static void
8434dc93 6131tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6132{
2b6080f2 6133 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6134 struct dentry *d_cpu;
dd49a38c 6135 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6136
0a3d7ce7
NK
6137 if (!d_percpu)
6138 return;
6139
dd49a38c 6140 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6141 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6142 if (!d_cpu) {
a395d6a7 6143 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6144 return;
6145 }
b04cc6b1 6146
8656e7a2 6147 /* per cpu trace_pipe */
649e9c70 6148 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6149 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6150
6151 /* per cpu trace */
649e9c70 6152 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6153 tr, cpu, &tracing_fops);
7f96f93f 6154
649e9c70 6155 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6156 tr, cpu, &tracing_buffers_fops);
7f96f93f 6157
649e9c70 6158 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6159 tr, cpu, &tracing_stats_fops);
438ced17 6160
649e9c70 6161 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6162 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6163
6164#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6165 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6166 tr, cpu, &snapshot_fops);
6de58e62 6167
649e9c70 6168 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6169 tr, cpu, &snapshot_raw_fops);
f1affcaa 6170#endif
b04cc6b1
FW
6171}
6172
60a11774
SR
6173#ifdef CONFIG_FTRACE_SELFTEST
6174/* Let selftest have access to static functions in this file */
6175#include "trace_selftest.c"
6176#endif
6177
577b785f
SR
6178static ssize_t
6179trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6180 loff_t *ppos)
6181{
6182 struct trace_option_dentry *topt = filp->private_data;
6183 char *buf;
6184
6185 if (topt->flags->val & topt->opt->bit)
6186 buf = "1\n";
6187 else
6188 buf = "0\n";
6189
6190 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6191}
6192
6193static ssize_t
6194trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6195 loff_t *ppos)
6196{
6197 struct trace_option_dentry *topt = filp->private_data;
6198 unsigned long val;
577b785f
SR
6199 int ret;
6200
22fe9b54
PH
6201 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6202 if (ret)
577b785f
SR
6203 return ret;
6204
8d18eaaf
LZ
6205 if (val != 0 && val != 1)
6206 return -EINVAL;
577b785f 6207
8d18eaaf 6208 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6209 mutex_lock(&trace_types_lock);
8c1a49ae 6210 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6211 topt->opt, !val);
577b785f
SR
6212 mutex_unlock(&trace_types_lock);
6213 if (ret)
6214 return ret;
577b785f
SR
6215 }
6216
6217 *ppos += cnt;
6218
6219 return cnt;
6220}
6221
6222
6223static const struct file_operations trace_options_fops = {
6224 .open = tracing_open_generic,
6225 .read = trace_options_read,
6226 .write = trace_options_write,
b444786f 6227 .llseek = generic_file_llseek,
577b785f
SR
6228};
6229
9a38a885
SRRH
6230/*
6231 * In order to pass in both the trace_array descriptor as well as the index
6232 * to the flag that the trace option file represents, the trace_array
6233 * has a character array of trace_flags_index[], which holds the index
6234 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6235 * The address of this character array is passed to the flag option file
6236 * read/write callbacks.
6237 *
6238 * In order to extract both the index and the trace_array descriptor,
6239 * get_tr_index() uses the following algorithm.
6240 *
6241 * idx = *ptr;
6242 *
6243 * As the pointer itself contains the address of the index (remember
6244 * index[1] == 1).
6245 *
6246 * Then to get the trace_array descriptor, by subtracting that index
6247 * from the ptr, we get to the start of the index itself.
6248 *
6249 * ptr - idx == &index[0]
6250 *
6251 * Then a simple container_of() from that pointer gets us to the
6252 * trace_array descriptor.
6253 */
6254static void get_tr_index(void *data, struct trace_array **ptr,
6255 unsigned int *pindex)
6256{
6257 *pindex = *(unsigned char *)data;
6258
6259 *ptr = container_of(data - *pindex, struct trace_array,
6260 trace_flags_index);
6261}
6262
a8259075
SR
6263static ssize_t
6264trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6265 loff_t *ppos)
6266{
9a38a885
SRRH
6267 void *tr_index = filp->private_data;
6268 struct trace_array *tr;
6269 unsigned int index;
a8259075
SR
6270 char *buf;
6271
9a38a885
SRRH
6272 get_tr_index(tr_index, &tr, &index);
6273
6274 if (tr->trace_flags & (1 << index))
a8259075
SR
6275 buf = "1\n";
6276 else
6277 buf = "0\n";
6278
6279 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6280}
6281
6282static ssize_t
6283trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6284 loff_t *ppos)
6285{
9a38a885
SRRH
6286 void *tr_index = filp->private_data;
6287 struct trace_array *tr;
6288 unsigned int index;
a8259075
SR
6289 unsigned long val;
6290 int ret;
6291
9a38a885
SRRH
6292 get_tr_index(tr_index, &tr, &index);
6293
22fe9b54
PH
6294 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6295 if (ret)
a8259075
SR
6296 return ret;
6297
f2d84b65 6298 if (val != 0 && val != 1)
a8259075 6299 return -EINVAL;
69d34da2
SRRH
6300
6301 mutex_lock(&trace_types_lock);
2b6080f2 6302 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6303 mutex_unlock(&trace_types_lock);
a8259075 6304
613f04a0
SRRH
6305 if (ret < 0)
6306 return ret;
6307
a8259075
SR
6308 *ppos += cnt;
6309
6310 return cnt;
6311}
6312
a8259075
SR
6313static const struct file_operations trace_options_core_fops = {
6314 .open = tracing_open_generic,
6315 .read = trace_options_core_read,
6316 .write = trace_options_core_write,
b444786f 6317 .llseek = generic_file_llseek,
a8259075
SR
6318};
6319
5452af66 6320struct dentry *trace_create_file(const char *name,
f4ae40a6 6321 umode_t mode,
5452af66
FW
6322 struct dentry *parent,
6323 void *data,
6324 const struct file_operations *fops)
6325{
6326 struct dentry *ret;
6327
8434dc93 6328 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6329 if (!ret)
a395d6a7 6330 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6331
6332 return ret;
6333}
6334
6335
2b6080f2 6336static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6337{
6338 struct dentry *d_tracer;
a8259075 6339
2b6080f2
SR
6340 if (tr->options)
6341 return tr->options;
a8259075 6342
7eeafbca 6343 d_tracer = tracing_get_dentry(tr);
14a5ae40 6344 if (IS_ERR(d_tracer))
a8259075
SR
6345 return NULL;
6346
8434dc93 6347 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6348 if (!tr->options) {
a395d6a7 6349 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
6350 return NULL;
6351 }
6352
2b6080f2 6353 return tr->options;
a8259075
SR
6354}
6355
577b785f 6356static void
2b6080f2
SR
6357create_trace_option_file(struct trace_array *tr,
6358 struct trace_option_dentry *topt,
577b785f
SR
6359 struct tracer_flags *flags,
6360 struct tracer_opt *opt)
6361{
6362 struct dentry *t_options;
577b785f 6363
2b6080f2 6364 t_options = trace_options_init_dentry(tr);
577b785f
SR
6365 if (!t_options)
6366 return;
6367
6368 topt->flags = flags;
6369 topt->opt = opt;
2b6080f2 6370 topt->tr = tr;
577b785f 6371
5452af66 6372 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6373 &trace_options_fops);
6374
577b785f
SR
6375}
6376
37aea98b 6377static void
2b6080f2 6378create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6379{
6380 struct trace_option_dentry *topts;
37aea98b 6381 struct trace_options *tr_topts;
577b785f
SR
6382 struct tracer_flags *flags;
6383 struct tracer_opt *opts;
6384 int cnt;
37aea98b 6385 int i;
577b785f
SR
6386
6387 if (!tracer)
37aea98b 6388 return;
577b785f
SR
6389
6390 flags = tracer->flags;
6391
6392 if (!flags || !flags->opts)
37aea98b
SRRH
6393 return;
6394
6395 /*
6396 * If this is an instance, only create flags for tracers
6397 * the instance may have.
6398 */
6399 if (!trace_ok_for_array(tracer, tr))
6400 return;
6401
6402 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
6403 /* Make sure there's no duplicate flags. */
6404 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
6405 return;
6406 }
577b785f
SR
6407
6408 opts = flags->opts;
6409
6410 for (cnt = 0; opts[cnt].name; cnt++)
6411 ;
6412
0cfe8245 6413 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 6414 if (!topts)
37aea98b
SRRH
6415 return;
6416
6417 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6418 GFP_KERNEL);
6419 if (!tr_topts) {
6420 kfree(topts);
6421 return;
6422 }
6423
6424 tr->topts = tr_topts;
6425 tr->topts[tr->nr_topts].tracer = tracer;
6426 tr->topts[tr->nr_topts].topts = topts;
6427 tr->nr_topts++;
577b785f 6428
41d9c0be 6429 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 6430 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 6431 &opts[cnt]);
41d9c0be
SRRH
6432 WARN_ONCE(topts[cnt].entry == NULL,
6433 "Failed to create trace option: %s",
6434 opts[cnt].name);
6435 }
577b785f
SR
6436}
6437
a8259075 6438static struct dentry *
2b6080f2
SR
6439create_trace_option_core_file(struct trace_array *tr,
6440 const char *option, long index)
a8259075
SR
6441{
6442 struct dentry *t_options;
a8259075 6443
2b6080f2 6444 t_options = trace_options_init_dentry(tr);
a8259075
SR
6445 if (!t_options)
6446 return NULL;
6447
9a38a885
SRRH
6448 return trace_create_file(option, 0644, t_options,
6449 (void *)&tr->trace_flags_index[index],
6450 &trace_options_core_fops);
a8259075
SR
6451}
6452
16270145 6453static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6454{
6455 struct dentry *t_options;
16270145 6456 bool top_level = tr == &global_trace;
a8259075
SR
6457 int i;
6458
2b6080f2 6459 t_options = trace_options_init_dentry(tr);
a8259075
SR
6460 if (!t_options)
6461 return;
6462
16270145
SRRH
6463 for (i = 0; trace_options[i]; i++) {
6464 if (top_level ||
6465 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6466 create_trace_option_core_file(tr, trace_options[i], i);
6467 }
a8259075
SR
6468}
6469
499e5470
SR
6470static ssize_t
6471rb_simple_read(struct file *filp, char __user *ubuf,
6472 size_t cnt, loff_t *ppos)
6473{
348f0fc2 6474 struct trace_array *tr = filp->private_data;
499e5470
SR
6475 char buf[64];
6476 int r;
6477
10246fa3 6478 r = tracer_tracing_is_on(tr);
499e5470
SR
6479 r = sprintf(buf, "%d\n", r);
6480
6481 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6482}
6483
6484static ssize_t
6485rb_simple_write(struct file *filp, const char __user *ubuf,
6486 size_t cnt, loff_t *ppos)
6487{
348f0fc2 6488 struct trace_array *tr = filp->private_data;
12883efb 6489 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6490 unsigned long val;
6491 int ret;
6492
6493 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6494 if (ret)
6495 return ret;
6496
6497 if (buffer) {
2df8f8a6
SR
6498 mutex_lock(&trace_types_lock);
6499 if (val) {
10246fa3 6500 tracer_tracing_on(tr);
2b6080f2
SR
6501 if (tr->current_trace->start)
6502 tr->current_trace->start(tr);
2df8f8a6 6503 } else {
10246fa3 6504 tracer_tracing_off(tr);
2b6080f2
SR
6505 if (tr->current_trace->stop)
6506 tr->current_trace->stop(tr);
2df8f8a6
SR
6507 }
6508 mutex_unlock(&trace_types_lock);
499e5470
SR
6509 }
6510
6511 (*ppos)++;
6512
6513 return cnt;
6514}
6515
6516static const struct file_operations rb_simple_fops = {
7b85af63 6517 .open = tracing_open_generic_tr,
499e5470
SR
6518 .read = rb_simple_read,
6519 .write = rb_simple_write,
7b85af63 6520 .release = tracing_release_generic_tr,
499e5470
SR
6521 .llseek = default_llseek,
6522};
6523
277ba044
SR
6524struct dentry *trace_instance_dir;
6525
6526static void
8434dc93 6527init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6528
55034cd6
SRRH
6529static int
6530allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6531{
6532 enum ring_buffer_flags rb_flags;
737223fb 6533
983f938a 6534 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 6535
dced341b
SRRH
6536 buf->tr = tr;
6537
55034cd6
SRRH
6538 buf->buffer = ring_buffer_alloc(size, rb_flags);
6539 if (!buf->buffer)
6540 return -ENOMEM;
737223fb 6541
55034cd6
SRRH
6542 buf->data = alloc_percpu(struct trace_array_cpu);
6543 if (!buf->data) {
6544 ring_buffer_free(buf->buffer);
6545 return -ENOMEM;
6546 }
737223fb 6547
737223fb
SRRH
6548 /* Allocate the first page for all buffers */
6549 set_buffer_entries(&tr->trace_buffer,
6550 ring_buffer_size(tr->trace_buffer.buffer, 0));
6551
55034cd6
SRRH
6552 return 0;
6553}
737223fb 6554
55034cd6
SRRH
6555static int allocate_trace_buffers(struct trace_array *tr, int size)
6556{
6557 int ret;
737223fb 6558
55034cd6
SRRH
6559 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6560 if (ret)
6561 return ret;
737223fb 6562
55034cd6
SRRH
6563#ifdef CONFIG_TRACER_MAX_TRACE
6564 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6565 allocate_snapshot ? size : 1);
6566 if (WARN_ON(ret)) {
737223fb 6567 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6568 free_percpu(tr->trace_buffer.data);
6569 return -ENOMEM;
6570 }
6571 tr->allocated_snapshot = allocate_snapshot;
737223fb 6572
55034cd6
SRRH
6573 /*
6574 * Only the top level trace array gets its snapshot allocated
6575 * from the kernel command line.
6576 */
6577 allocate_snapshot = false;
737223fb 6578#endif
55034cd6 6579 return 0;
737223fb
SRRH
6580}
6581
f0b70cc4
SRRH
6582static void free_trace_buffer(struct trace_buffer *buf)
6583{
6584 if (buf->buffer) {
6585 ring_buffer_free(buf->buffer);
6586 buf->buffer = NULL;
6587 free_percpu(buf->data);
6588 buf->data = NULL;
6589 }
6590}
6591
23aaa3c1
SRRH
6592static void free_trace_buffers(struct trace_array *tr)
6593{
6594 if (!tr)
6595 return;
6596
f0b70cc4 6597 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6598
6599#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6600 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6601#endif
6602}
6603
9a38a885
SRRH
6604static void init_trace_flags_index(struct trace_array *tr)
6605{
6606 int i;
6607
6608 /* Used by the trace options files */
6609 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6610 tr->trace_flags_index[i] = i;
6611}
6612
37aea98b
SRRH
6613static void __update_tracer_options(struct trace_array *tr)
6614{
6615 struct tracer *t;
6616
6617 for (t = trace_types; t; t = t->next)
6618 add_tracer_options(tr, t);
6619}
6620
6621static void update_tracer_options(struct trace_array *tr)
6622{
6623 mutex_lock(&trace_types_lock);
6624 __update_tracer_options(tr);
6625 mutex_unlock(&trace_types_lock);
6626}
6627
eae47358 6628static int instance_mkdir(const char *name)
737223fb 6629{
277ba044
SR
6630 struct trace_array *tr;
6631 int ret;
277ba044
SR
6632
6633 mutex_lock(&trace_types_lock);
6634
6635 ret = -EEXIST;
6636 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6637 if (tr->name && strcmp(tr->name, name) == 0)
6638 goto out_unlock;
6639 }
6640
6641 ret = -ENOMEM;
6642 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6643 if (!tr)
6644 goto out_unlock;
6645
6646 tr->name = kstrdup(name, GFP_KERNEL);
6647 if (!tr->name)
6648 goto out_free_tr;
6649
ccfe9e42
AL
6650 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6651 goto out_free_tr;
6652
983f938a
SRRH
6653 tr->trace_flags = global_trace.trace_flags;
6654
ccfe9e42
AL
6655 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6656
277ba044
SR
6657 raw_spin_lock_init(&tr->start_lock);
6658
0b9b12c1
SRRH
6659 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6660
277ba044
SR
6661 tr->current_trace = &nop_trace;
6662
6663 INIT_LIST_HEAD(&tr->systems);
6664 INIT_LIST_HEAD(&tr->events);
6665
737223fb 6666 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6667 goto out_free_tr;
6668
8434dc93 6669 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6670 if (!tr->dir)
6671 goto out_free_tr;
6672
6673 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6674 if (ret) {
8434dc93 6675 tracefs_remove_recursive(tr->dir);
277ba044 6676 goto out_free_tr;
609e85a7 6677 }
277ba044 6678
8434dc93 6679 init_tracer_tracefs(tr, tr->dir);
9a38a885 6680 init_trace_flags_index(tr);
37aea98b 6681 __update_tracer_options(tr);
277ba044
SR
6682
6683 list_add(&tr->list, &ftrace_trace_arrays);
6684
6685 mutex_unlock(&trace_types_lock);
6686
6687 return 0;
6688
6689 out_free_tr:
23aaa3c1 6690 free_trace_buffers(tr);
ccfe9e42 6691 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6692 kfree(tr->name);
6693 kfree(tr);
6694
6695 out_unlock:
6696 mutex_unlock(&trace_types_lock);
6697
6698 return ret;
6699
6700}
6701
eae47358 6702static int instance_rmdir(const char *name)
0c8916c3
SR
6703{
6704 struct trace_array *tr;
6705 int found = 0;
6706 int ret;
37aea98b 6707 int i;
0c8916c3
SR
6708
6709 mutex_lock(&trace_types_lock);
6710
6711 ret = -ENODEV;
6712 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6713 if (tr->name && strcmp(tr->name, name) == 0) {
6714 found = 1;
6715 break;
6716 }
6717 }
6718 if (!found)
6719 goto out_unlock;
6720
a695cb58 6721 ret = -EBUSY;
cf6ab6d9 6722 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6723 goto out_unlock;
6724
0c8916c3
SR
6725 list_del(&tr->list);
6726
6b450d25 6727 tracing_set_nop(tr);
0c8916c3 6728 event_trace_del_tracer(tr);
591dffda 6729 ftrace_destroy_function_files(tr);
681a4a2f 6730 tracefs_remove_recursive(tr->dir);
a9fcaaac 6731 free_trace_buffers(tr);
0c8916c3 6732
37aea98b
SRRH
6733 for (i = 0; i < tr->nr_topts; i++) {
6734 kfree(tr->topts[i].topts);
6735 }
6736 kfree(tr->topts);
6737
0c8916c3
SR
6738 kfree(tr->name);
6739 kfree(tr);
6740
6741 ret = 0;
6742
6743 out_unlock:
6744 mutex_unlock(&trace_types_lock);
6745
6746 return ret;
6747}
6748
277ba044
SR
6749static __init void create_trace_instances(struct dentry *d_tracer)
6750{
eae47358
SRRH
6751 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6752 instance_mkdir,
6753 instance_rmdir);
277ba044
SR
6754 if (WARN_ON(!trace_instance_dir))
6755 return;
277ba044
SR
6756}
6757
2b6080f2 6758static void
8434dc93 6759init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6760{
121aaee7 6761 int cpu;
2b6080f2 6762
607e2ea1
SRRH
6763 trace_create_file("available_tracers", 0444, d_tracer,
6764 tr, &show_traces_fops);
6765
6766 trace_create_file("current_tracer", 0644, d_tracer,
6767 tr, &set_tracer_fops);
6768
ccfe9e42
AL
6769 trace_create_file("tracing_cpumask", 0644, d_tracer,
6770 tr, &tracing_cpumask_fops);
6771
2b6080f2
SR
6772 trace_create_file("trace_options", 0644, d_tracer,
6773 tr, &tracing_iter_fops);
6774
6775 trace_create_file("trace", 0644, d_tracer,
6484c71c 6776 tr, &tracing_fops);
2b6080f2
SR
6777
6778 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6779 tr, &tracing_pipe_fops);
2b6080f2
SR
6780
6781 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6782 tr, &tracing_entries_fops);
2b6080f2
SR
6783
6784 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6785 tr, &tracing_total_entries_fops);
6786
238ae93d 6787 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6788 tr, &tracing_free_buffer_fops);
6789
6790 trace_create_file("trace_marker", 0220, d_tracer,
6791 tr, &tracing_mark_fops);
6792
6793 trace_create_file("trace_clock", 0644, d_tracer, tr,
6794 &trace_clock_fops);
6795
6796 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6797 tr, &rb_simple_fops);
ce9bae55 6798
16270145
SRRH
6799 create_trace_options_dir(tr);
6800
6d9b3fa5
SRRH
6801#ifdef CONFIG_TRACER_MAX_TRACE
6802 trace_create_file("tracing_max_latency", 0644, d_tracer,
6803 &tr->max_latency, &tracing_max_lat_fops);
6804#endif
6805
591dffda
SRRH
6806 if (ftrace_create_function_files(tr, d_tracer))
6807 WARN(1, "Could not allocate function filter files");
6808
ce9bae55
SRRH
6809#ifdef CONFIG_TRACER_SNAPSHOT
6810 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6811 tr, &snapshot_fops);
ce9bae55 6812#endif
121aaee7
SRRH
6813
6814 for_each_tracing_cpu(cpu)
8434dc93 6815 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6816
2b6080f2
SR
6817}
6818
f76180bc
SRRH
6819static struct vfsmount *trace_automount(void *ingore)
6820{
6821 struct vfsmount *mnt;
6822 struct file_system_type *type;
6823
6824 /*
6825 * To maintain backward compatibility for tools that mount
6826 * debugfs to get to the tracing facility, tracefs is automatically
6827 * mounted to the debugfs/tracing directory.
6828 */
6829 type = get_fs_type("tracefs");
6830 if (!type)
6831 return NULL;
6832 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6833 put_filesystem(type);
6834 if (IS_ERR(mnt))
6835 return NULL;
6836 mntget(mnt);
6837
6838 return mnt;
6839}
6840
7eeafbca
SRRH
6841/**
6842 * tracing_init_dentry - initialize top level trace array
6843 *
6844 * This is called when creating files or directories in the tracing
6845 * directory. It is called via fs_initcall() by any of the boot up code
6846 * and expects to return the dentry of the top level tracing directory.
6847 */
6848struct dentry *tracing_init_dentry(void)
6849{
6850 struct trace_array *tr = &global_trace;
6851
f76180bc 6852 /* The top level trace array uses NULL as parent */
7eeafbca 6853 if (tr->dir)
f76180bc 6854 return NULL;
7eeafbca 6855
8b129199
JW
6856 if (WARN_ON(!tracefs_initialized()) ||
6857 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6858 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
6859 return ERR_PTR(-ENODEV);
6860
f76180bc
SRRH
6861 /*
6862 * As there may still be users that expect the tracing
6863 * files to exist in debugfs/tracing, we must automount
6864 * the tracefs file system there, so older tools still
6865 * work with the newer kerenl.
6866 */
6867 tr->dir = debugfs_create_automount("tracing", NULL,
6868 trace_automount, NULL);
7eeafbca
SRRH
6869 if (!tr->dir) {
6870 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6871 return ERR_PTR(-ENOMEM);
6872 }
6873
8434dc93 6874 return NULL;
7eeafbca
SRRH
6875}
6876
0c564a53
SRRH
6877extern struct trace_enum_map *__start_ftrace_enum_maps[];
6878extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6879
6880static void __init trace_enum_init(void)
6881{
3673b8e4
SRRH
6882 int len;
6883
6884 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6885 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6886}
6887
6888#ifdef CONFIG_MODULES
6889static void trace_module_add_enums(struct module *mod)
6890{
6891 if (!mod->num_trace_enums)
6892 return;
6893
6894 /*
6895 * Modules with bad taint do not have events created, do
6896 * not bother with enums either.
6897 */
6898 if (trace_module_has_bad_taint(mod))
6899 return;
6900
9828413d 6901 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6902}
6903
9828413d
SRRH
6904#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6905static void trace_module_remove_enums(struct module *mod)
6906{
6907 union trace_enum_map_item *map;
6908 union trace_enum_map_item **last = &trace_enum_maps;
6909
6910 if (!mod->num_trace_enums)
6911 return;
6912
6913 mutex_lock(&trace_enum_mutex);
6914
6915 map = trace_enum_maps;
6916
6917 while (map) {
6918 if (map->head.mod == mod)
6919 break;
6920 map = trace_enum_jmp_to_tail(map);
6921 last = &map->tail.next;
6922 map = map->tail.next;
6923 }
6924 if (!map)
6925 goto out;
6926
6927 *last = trace_enum_jmp_to_tail(map)->tail.next;
6928 kfree(map);
6929 out:
6930 mutex_unlock(&trace_enum_mutex);
6931}
6932#else
6933static inline void trace_module_remove_enums(struct module *mod) { }
6934#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6935
3673b8e4
SRRH
6936static int trace_module_notify(struct notifier_block *self,
6937 unsigned long val, void *data)
6938{
6939 struct module *mod = data;
6940
6941 switch (val) {
6942 case MODULE_STATE_COMING:
6943 trace_module_add_enums(mod);
6944 break;
9828413d
SRRH
6945 case MODULE_STATE_GOING:
6946 trace_module_remove_enums(mod);
6947 break;
3673b8e4
SRRH
6948 }
6949
6950 return 0;
0c564a53
SRRH
6951}
6952
3673b8e4
SRRH
6953static struct notifier_block trace_module_nb = {
6954 .notifier_call = trace_module_notify,
6955 .priority = 0,
6956};
9828413d 6957#endif /* CONFIG_MODULES */
3673b8e4 6958
8434dc93 6959static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6960{
6961 struct dentry *d_tracer;
bc0c38d1 6962
7e53bd42
LJ
6963 trace_access_lock_init();
6964
bc0c38d1 6965 d_tracer = tracing_init_dentry();
14a5ae40 6966 if (IS_ERR(d_tracer))
ed6f1c99 6967 return 0;
bc0c38d1 6968
8434dc93 6969 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6970
5452af66 6971 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6972 &global_trace, &tracing_thresh_fops);
a8259075 6973
339ae5d3 6974 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6975 NULL, &tracing_readme_fops);
6976
69abe6a5
AP
6977 trace_create_file("saved_cmdlines", 0444, d_tracer,
6978 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6979
939c7a4f
YY
6980 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6981 NULL, &tracing_saved_cmdlines_size_fops);
6982
0c564a53
SRRH
6983 trace_enum_init();
6984
9828413d
SRRH
6985 trace_create_enum_file(d_tracer);
6986
3673b8e4
SRRH
6987#ifdef CONFIG_MODULES
6988 register_module_notifier(&trace_module_nb);
6989#endif
6990
bc0c38d1 6991#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6992 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6993 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6994#endif
b04cc6b1 6995
277ba044 6996 create_trace_instances(d_tracer);
5452af66 6997
37aea98b 6998 update_tracer_options(&global_trace);
09d23a1d 6999
b5ad384e 7000 return 0;
bc0c38d1
SR
7001}
7002
3f5a54e3
SR
7003static int trace_panic_handler(struct notifier_block *this,
7004 unsigned long event, void *unused)
7005{
944ac425 7006 if (ftrace_dump_on_oops)
cecbca96 7007 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7008 return NOTIFY_OK;
7009}
7010
7011static struct notifier_block trace_panic_notifier = {
7012 .notifier_call = trace_panic_handler,
7013 .next = NULL,
7014 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7015};
7016
7017static int trace_die_handler(struct notifier_block *self,
7018 unsigned long val,
7019 void *data)
7020{
7021 switch (val) {
7022 case DIE_OOPS:
944ac425 7023 if (ftrace_dump_on_oops)
cecbca96 7024 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7025 break;
7026 default:
7027 break;
7028 }
7029 return NOTIFY_OK;
7030}
7031
7032static struct notifier_block trace_die_notifier = {
7033 .notifier_call = trace_die_handler,
7034 .priority = 200
7035};
7036
7037/*
7038 * printk is set to max of 1024, we really don't need it that big.
7039 * Nothing should be printing 1000 characters anyway.
7040 */
7041#define TRACE_MAX_PRINT 1000
7042
7043/*
7044 * Define here KERN_TRACE so that we have one place to modify
7045 * it if we decide to change what log level the ftrace dump
7046 * should be at.
7047 */
428aee14 7048#define KERN_TRACE KERN_EMERG
3f5a54e3 7049
955b61e5 7050void
3f5a54e3
SR
7051trace_printk_seq(struct trace_seq *s)
7052{
7053 /* Probably should print a warning here. */
3a161d99
SRRH
7054 if (s->seq.len >= TRACE_MAX_PRINT)
7055 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 7056
820b75f6
SRRH
7057 /*
7058 * More paranoid code. Although the buffer size is set to
7059 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7060 * an extra layer of protection.
7061 */
7062 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7063 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
7064
7065 /* should be zero ended, but we are paranoid. */
3a161d99 7066 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
7067
7068 printk(KERN_TRACE "%s", s->buffer);
7069
f9520750 7070 trace_seq_init(s);
3f5a54e3
SR
7071}
7072
955b61e5
JW
7073void trace_init_global_iter(struct trace_iterator *iter)
7074{
7075 iter->tr = &global_trace;
2b6080f2 7076 iter->trace = iter->tr->current_trace;
ae3b5093 7077 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 7078 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
7079
7080 if (iter->trace && iter->trace->open)
7081 iter->trace->open(iter);
7082
7083 /* Annotate start of buffers if we had overruns */
7084 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7085 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7086
7087 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7088 if (trace_clocks[iter->tr->clock_id].in_ns)
7089 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
7090}
7091
7fe70b57 7092void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 7093{
3f5a54e3
SR
7094 /* use static because iter can be a bit big for the stack */
7095 static struct trace_iterator iter;
7fe70b57 7096 static atomic_t dump_running;
983f938a 7097 struct trace_array *tr = &global_trace;
cf586b61 7098 unsigned int old_userobj;
d769041f
SR
7099 unsigned long flags;
7100 int cnt = 0, cpu;
3f5a54e3 7101
7fe70b57
SRRH
7102 /* Only allow one dump user at a time. */
7103 if (atomic_inc_return(&dump_running) != 1) {
7104 atomic_dec(&dump_running);
7105 return;
7106 }
3f5a54e3 7107
7fe70b57
SRRH
7108 /*
7109 * Always turn off tracing when we dump.
7110 * We don't need to show trace output of what happens
7111 * between multiple crashes.
7112 *
7113 * If the user does a sysrq-z, then they can re-enable
7114 * tracing with echo 1 > tracing_on.
7115 */
0ee6b6cf 7116 tracing_off();
cf586b61 7117
7fe70b57 7118 local_irq_save(flags);
3f5a54e3 7119
38dbe0b1 7120 /* Simulate the iterator */
955b61e5
JW
7121 trace_init_global_iter(&iter);
7122
d769041f 7123 for_each_tracing_cpu(cpu) {
5e2d5ef8 7124 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
7125 }
7126
983f938a 7127 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 7128
b54d3de9 7129 /* don't look at user memory in panic mode */
983f938a 7130 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 7131
cecbca96
FW
7132 switch (oops_dump_mode) {
7133 case DUMP_ALL:
ae3b5093 7134 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7135 break;
7136 case DUMP_ORIG:
7137 iter.cpu_file = raw_smp_processor_id();
7138 break;
7139 case DUMP_NONE:
7140 goto out_enable;
7141 default:
7142 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7143 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7144 }
7145
7146 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7147
7fe70b57
SRRH
7148 /* Did function tracer already get disabled? */
7149 if (ftrace_is_dead()) {
7150 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7151 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7152 }
7153
3f5a54e3
SR
7154 /*
7155 * We need to stop all tracing on all CPUS to read the
7156 * the next buffer. This is a bit expensive, but is
7157 * not done often. We fill all what we can read,
7158 * and then release the locks again.
7159 */
7160
3f5a54e3
SR
7161 while (!trace_empty(&iter)) {
7162
7163 if (!cnt)
7164 printk(KERN_TRACE "---------------------------------\n");
7165
7166 cnt++;
7167
7168 /* reset all but tr, trace, and overruns */
7169 memset(&iter.seq, 0,
7170 sizeof(struct trace_iterator) -
7171 offsetof(struct trace_iterator, seq));
7172 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7173 iter.pos = -1;
7174
955b61e5 7175 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7176 int ret;
7177
7178 ret = print_trace_line(&iter);
7179 if (ret != TRACE_TYPE_NO_CONSUME)
7180 trace_consume(&iter);
3f5a54e3 7181 }
b892e5c8 7182 touch_nmi_watchdog();
3f5a54e3
SR
7183
7184 trace_printk_seq(&iter.seq);
7185 }
7186
7187 if (!cnt)
7188 printk(KERN_TRACE " (ftrace buffer empty)\n");
7189 else
7190 printk(KERN_TRACE "---------------------------------\n");
7191
cecbca96 7192 out_enable:
983f938a 7193 tr->trace_flags |= old_userobj;
cf586b61 7194
7fe70b57
SRRH
7195 for_each_tracing_cpu(cpu) {
7196 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7197 }
7fe70b57 7198 atomic_dec(&dump_running);
cd891ae0 7199 local_irq_restore(flags);
3f5a54e3 7200}
a8eecf22 7201EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7202
3928a8a2 7203__init static int tracer_alloc_buffers(void)
bc0c38d1 7204{
73c5162a 7205 int ring_buf_size;
9e01c1b7 7206 int ret = -ENOMEM;
4c11d7ae 7207
b5e87c05
SRRH
7208 /*
7209 * Make sure we don't accidently add more trace options
7210 * than we have bits for.
7211 */
9a38a885 7212 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 7213
9e01c1b7
RR
7214 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7215 goto out;
7216
ccfe9e42 7217 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7218 goto out_free_buffer_mask;
4c11d7ae 7219
07d777fe
SR
7220 /* Only allocate trace_printk buffers if a trace_printk exists */
7221 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7222 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7223 trace_printk_init_buffers();
7224
73c5162a
SR
7225 /* To save memory, keep the ring buffer size to its minimum */
7226 if (ring_buffer_expanded)
7227 ring_buf_size = trace_buf_size;
7228 else
7229 ring_buf_size = 1;
7230
9e01c1b7 7231 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7232 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7233
2b6080f2
SR
7234 raw_spin_lock_init(&global_trace.start_lock);
7235
2c4a33ab
SRRH
7236 /* Used for event triggers */
7237 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7238 if (!temp_buffer)
7239 goto out_free_cpumask;
7240
939c7a4f
YY
7241 if (trace_create_savedcmd() < 0)
7242 goto out_free_temp_buffer;
7243
9e01c1b7 7244 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7245 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7246 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7247 WARN_ON(1);
939c7a4f 7248 goto out_free_savedcmd;
4c11d7ae 7249 }
a7603ff4 7250
499e5470
SR
7251 if (global_trace.buffer_disabled)
7252 tracing_off();
4c11d7ae 7253
e1e232ca
SR
7254 if (trace_boot_clock) {
7255 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7256 if (ret < 0)
a395d6a7
JP
7257 pr_warn("Trace clock %s not defined, going back to default\n",
7258 trace_boot_clock);
e1e232ca
SR
7259 }
7260
ca164318
SRRH
7261 /*
7262 * register_tracer() might reference current_trace, so it
7263 * needs to be set before we register anything. This is
7264 * just a bootstrap of current_trace anyway.
7265 */
2b6080f2
SR
7266 global_trace.current_trace = &nop_trace;
7267
0b9b12c1
SRRH
7268 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7269
4104d326
SRRH
7270 ftrace_init_global_array_ops(&global_trace);
7271
9a38a885
SRRH
7272 init_trace_flags_index(&global_trace);
7273
ca164318
SRRH
7274 register_tracer(&nop_trace);
7275
60a11774
SR
7276 /* All seems OK, enable tracing */
7277 tracing_disabled = 0;
3928a8a2 7278
3f5a54e3
SR
7279 atomic_notifier_chain_register(&panic_notifier_list,
7280 &trace_panic_notifier);
7281
7282 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7283
ae63b31e
SR
7284 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7285
7286 INIT_LIST_HEAD(&global_trace.systems);
7287 INIT_LIST_HEAD(&global_trace.events);
7288 list_add(&global_trace.list, &ftrace_trace_arrays);
7289
a4d1e688 7290 apply_trace_boot_options();
7bcfaf54 7291
77fd5c15
SRRH
7292 register_snapshot_cmd();
7293
2fc1dfbe 7294 return 0;
3f5a54e3 7295
939c7a4f
YY
7296out_free_savedcmd:
7297 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7298out_free_temp_buffer:
7299 ring_buffer_free(temp_buffer);
9e01c1b7 7300out_free_cpumask:
ccfe9e42 7301 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7302out_free_buffer_mask:
7303 free_cpumask_var(tracing_buffer_mask);
7304out:
7305 return ret;
bc0c38d1 7306}
b2821ae6 7307
5f893b26
SRRH
7308void __init trace_init(void)
7309{
0daa2302
SRRH
7310 if (tracepoint_printk) {
7311 tracepoint_print_iter =
7312 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7313 if (WARN_ON(!tracepoint_print_iter))
7314 tracepoint_printk = 0;
7315 }
5f893b26 7316 tracer_alloc_buffers();
0c564a53 7317 trace_event_init();
5f893b26
SRRH
7318}
7319
b2821ae6
SR
7320__init static int clear_boot_tracer(void)
7321{
7322 /*
7323 * The default tracer at boot buffer is an init section.
7324 * This function is called in lateinit. If we did not
7325 * find the boot tracer, then clear it out, to prevent
7326 * later registration from accessing the buffer that is
7327 * about to be freed.
7328 */
7329 if (!default_bootup_tracer)
7330 return 0;
7331
7332 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7333 default_bootup_tracer);
7334 default_bootup_tracer = NULL;
7335
7336 return 0;
7337}
7338
8434dc93 7339fs_initcall(tracer_init_tracefs);
b2821ae6 7340late_initcall(clear_boot_tracer);
This page took 1.048538 seconds and 5 git commands to generate.