tracing: Use RING_BUFFER_ALL_CPUS for TRACE_PIPE_ALL_CPU
[deliverable/linux.git] / kernel / trace / trace_irqsoff.c
CommitLineData
81d68a96 1/*
73d8b8bc 2 * trace irqs off critical timings
81d68a96
SR
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 10 * Copyright (C) 2004 Nadia Yvette Chambers
81d68a96
SR
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
6cd8a4bb
SR
24static DEFINE_PER_CPU(int, tracing_cpu);
25
5389f6fa 26static DEFINE_RAW_SPINLOCK(max_trace_lock);
89b2f978 27
6cd8a4bb
SR
28enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
613f04a0 35static int save_flags;
e9d25fe6 36
62b915f1
JO
37static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
6cd8a4bb 40#ifdef CONFIG_PREEMPT_TRACER
e309b41d 41static inline int
6cd8a4bb
SR
42preempt_trace(void)
43{
44 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45}
46#else
47# define preempt_trace() (0)
48#endif
49
50#ifdef CONFIG_IRQSOFF_TRACER
e309b41d 51static inline int
6cd8a4bb
SR
52irq_trace(void)
53{
54 return ((trace_type & TRACER_IRQS_OFF) &&
55 irqs_disabled());
56}
57#else
58# define irq_trace() (0)
59#endif
60
62b915f1
JO
61#define TRACE_DISPLAY_GRAPH 1
62
63static struct tracer_opt trace_opts[] = {
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 /* display latency trace as call graph */
66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67#endif
68 { } /* Empty entry */
69};
70
71static struct tracer_flags tracer_flags = {
72 .val = 0,
73 .opts = trace_opts,
74};
75
76#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
81d68a96
SR
78/*
79 * Sequence count - we record it when starting a measurement and
80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare
25985edc 83 * and what happens together happens separately as well, so this doesn't
81d68a96
SR
84 * decrease the validity of the maximum found:
85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence;
87
606576ce 88#ifdef CONFIG_FUNCTION_TRACER
81d68a96 89/*
5e6d2b9c
SR
90 * Prologue for the preempt and irqs off function tracers.
91 *
92 * Returns 1 if it is OK to continue, and data->disabled is
93 * incremented.
94 * 0 if the trace is to be ignored, and data->disabled
95 * is kept the same.
96 *
97 * Note, this function is also used outside this ifdef but
98 * inside the #ifdef of the function graph tracer below.
99 * This is OK, since the function graph tracer is
100 * dependent on the function tracer.
81d68a96 101 */
5e6d2b9c
SR
102static int func_prolog_dec(struct trace_array *tr,
103 struct trace_array_cpu **data,
104 unsigned long *flags)
81d68a96 105{
81d68a96
SR
106 long disabled;
107 int cpu;
108
361943ad
SR
109 /*
110 * Does not matter if we preempt. We test the flags
111 * afterward, to see if irqs are disabled or not.
112 * If we preempt and get a false positive, the flags
113 * test will fail.
114 */
115 cpu = raw_smp_processor_id();
116 if (likely(!per_cpu(tracing_cpu, cpu)))
5e6d2b9c 117 return 0;
81d68a96 118
5e6d2b9c 119 local_save_flags(*flags);
361943ad 120 /* slight chance to get a false positive on tracing_cpu */
5e6d2b9c
SR
121 if (!irqs_disabled_flags(*flags))
122 return 0;
81d68a96 123
5e6d2b9c
SR
124 *data = tr->data[cpu];
125 disabled = atomic_inc_return(&(*data)->disabled);
81d68a96
SR
126
127 if (likely(disabled == 1))
5e6d2b9c
SR
128 return 1;
129
130 atomic_dec(&(*data)->disabled);
131
132 return 0;
133}
134
135/*
136 * irqsoff uses its own tracer function to keep the overhead down:
137 */
138static void
2f5f6ad9 139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 140 struct ftrace_ops *op, struct pt_regs *pt_regs)
5e6d2b9c
SR
141{
142 struct trace_array *tr = irqsoff_trace;
143 struct trace_array_cpu *data;
144 unsigned long flags;
145
146 if (!func_prolog_dec(tr, &data, &flags))
147 return;
148
149 trace_function(tr, ip, parent_ip, flags, preempt_count());
81d68a96
SR
150
151 atomic_dec(&data->disabled);
152}
153
154static struct ftrace_ops trace_ops __read_mostly =
155{
156 .func = irqsoff_tracer_call,
4740974a 157 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
81d68a96 158};
606576ce 159#endif /* CONFIG_FUNCTION_TRACER */
81d68a96 160
62b915f1
JO
161#ifdef CONFIG_FUNCTION_GRAPH_TRACER
162static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
163{
164 int cpu;
165
166 if (!(bit & TRACE_DISPLAY_GRAPH))
167 return -EINVAL;
168
169 if (!(is_graph() ^ set))
170 return 0;
171
172 stop_irqsoff_tracer(irqsoff_trace, !set);
173
174 for_each_possible_cpu(cpu)
175 per_cpu(tracing_cpu, cpu) = 0;
176
177 tracing_max_latency = 0;
178 tracing_reset_online_cpus(irqsoff_trace);
179
180 return start_irqsoff_tracer(irqsoff_trace, set);
181}
182
183static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
184{
185 struct trace_array *tr = irqsoff_trace;
186 struct trace_array_cpu *data;
187 unsigned long flags;
62b915f1 188 int ret;
62b915f1
JO
189 int pc;
190
5e6d2b9c 191 if (!func_prolog_dec(tr, &data, &flags))
62b915f1
JO
192 return 0;
193
5e6d2b9c
SR
194 pc = preempt_count();
195 ret = __trace_graph_entry(tr, trace, flags, pc);
62b915f1 196 atomic_dec(&data->disabled);
5e6d2b9c 197
62b915f1
JO
198 return ret;
199}
200
201static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
202{
203 struct trace_array *tr = irqsoff_trace;
204 struct trace_array_cpu *data;
205 unsigned long flags;
62b915f1
JO
206 int pc;
207
5e6d2b9c 208 if (!func_prolog_dec(tr, &data, &flags))
62b915f1
JO
209 return;
210
5e6d2b9c
SR
211 pc = preempt_count();
212 __trace_graph_return(tr, trace, flags, pc);
62b915f1
JO
213 atomic_dec(&data->disabled);
214}
215
216static void irqsoff_trace_open(struct trace_iterator *iter)
217{
218 if (is_graph())
219 graph_trace_open(iter);
220
221}
222
223static void irqsoff_trace_close(struct trace_iterator *iter)
224{
225 if (iter->private)
226 graph_trace_close(iter);
227}
228
229#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
321e68b0
JO
230 TRACE_GRAPH_PRINT_PROC | \
231 TRACE_GRAPH_PRINT_ABS_TIME | \
232 TRACE_GRAPH_PRINT_DURATION)
62b915f1
JO
233
234static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
235{
62b915f1
JO
236 /*
237 * In graph mode call the graph tracer output function,
238 * otherwise go with the TRACE_FN event handler
239 */
240 if (is_graph())
0a772620 241 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
62b915f1
JO
242
243 return TRACE_TYPE_UNHANDLED;
244}
245
246static void irqsoff_print_header(struct seq_file *s)
247{
0a772620
JO
248 if (is_graph())
249 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
250 else
62b915f1
JO
251 trace_default_header(s);
252}
253
62b915f1
JO
254static void
255__trace_function(struct trace_array *tr,
256 unsigned long ip, unsigned long parent_ip,
257 unsigned long flags, int pc)
258{
0a772620
JO
259 if (is_graph())
260 trace_graph_function(tr, ip, parent_ip, flags, pc);
261 else
62b915f1 262 trace_function(tr, ip, parent_ip, flags, pc);
62b915f1
JO
263}
264
265#else
266#define __trace_function trace_function
267
268static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
269{
270 return -EINVAL;
271}
272
273static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
274{
275 return -1;
276}
277
278static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
279{
280 return TRACE_TYPE_UNHANDLED;
281}
282
283static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
62b915f1
JO
284static void irqsoff_trace_open(struct trace_iterator *iter) { }
285static void irqsoff_trace_close(struct trace_iterator *iter) { }
7e9a49ef
JO
286
287#ifdef CONFIG_FUNCTION_TRACER
288static void irqsoff_print_header(struct seq_file *s)
289{
290 trace_default_header(s);
291}
292#else
293static void irqsoff_print_header(struct seq_file *s)
294{
295 trace_latency_header(s);
296}
297#endif /* CONFIG_FUNCTION_TRACER */
62b915f1
JO
298#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
299
81d68a96
SR
300/*
301 * Should this new latency be reported/recorded?
302 */
e309b41d 303static int report_latency(cycle_t delta)
81d68a96
SR
304{
305 if (tracing_thresh) {
306 if (delta < tracing_thresh)
307 return 0;
308 } else {
309 if (delta <= tracing_max_latency)
310 return 0;
311 }
312 return 1;
313}
314
e309b41d 315static void
81d68a96
SR
316check_critical_timing(struct trace_array *tr,
317 struct trace_array_cpu *data,
318 unsigned long parent_ip,
319 int cpu)
320{
89b2f978 321 cycle_t T0, T1, delta;
81d68a96 322 unsigned long flags;
38697053 323 int pc;
81d68a96 324
81d68a96 325 T0 = data->preempt_timestamp;
750ed1a4 326 T1 = ftrace_now(cpu);
81d68a96
SR
327 delta = T1-T0;
328
329 local_save_flags(flags);
330
6450c1d3
SR
331 pc = preempt_count();
332
81d68a96
SR
333 if (!report_latency(delta))
334 goto out;
335
5389f6fa 336 raw_spin_lock_irqsave(&max_trace_lock, flags);
81d68a96 337
89b2f978
SR
338 /* check if we are still the max latency */
339 if (!report_latency(delta))
340 goto out_unlock;
341
62b915f1 342 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
cc51a0fc
SR
343 /* Skip 5 functions to get to the irq/preempt enable function */
344 __trace_stack(tr, flags, 5, pc);
81d68a96 345
81d68a96 346 if (data->critical_sequence != max_sequence)
89b2f978 347 goto out_unlock;
81d68a96 348
81d68a96
SR
349 data->critical_end = parent_ip;
350
b5130b1e
CE
351 if (likely(!is_tracing_stopped())) {
352 tracing_max_latency = delta;
353 update_max_tr_single(tr, current, cpu);
354 }
81d68a96 355
81d68a96
SR
356 max_sequence++;
357
89b2f978 358out_unlock:
5389f6fa 359 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
89b2f978 360
81d68a96
SR
361out:
362 data->critical_sequence = max_sequence;
750ed1a4 363 data->preempt_timestamp = ftrace_now(cpu);
62b915f1 364 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
81d68a96
SR
365}
366
e309b41d 367static inline void
81d68a96
SR
368start_critical_timing(unsigned long ip, unsigned long parent_ip)
369{
370 int cpu;
371 struct trace_array *tr = irqsoff_trace;
372 struct trace_array_cpu *data;
373 unsigned long flags;
374
375 if (likely(!tracer_enabled))
376 return;
377
c5f888ca
SR
378 cpu = raw_smp_processor_id();
379
380 if (per_cpu(tracing_cpu, cpu))
6cd8a4bb
SR
381 return;
382
81d68a96
SR
383 data = tr->data[cpu];
384
c5f888ca 385 if (unlikely(!data) || atomic_read(&data->disabled))
81d68a96
SR
386 return;
387
388 atomic_inc(&data->disabled);
389
390 data->critical_sequence = max_sequence;
750ed1a4 391 data->preempt_timestamp = ftrace_now(cpu);
6cd8a4bb 392 data->critical_start = parent_ip ? : ip;
81d68a96
SR
393
394 local_save_flags(flags);
6cd8a4bb 395
62b915f1 396 __trace_function(tr, ip, parent_ip, flags, preempt_count());
81d68a96 397
c5f888ca 398 per_cpu(tracing_cpu, cpu) = 1;
6cd8a4bb 399
81d68a96
SR
400 atomic_dec(&data->disabled);
401}
402
e309b41d 403static inline void
81d68a96
SR
404stop_critical_timing(unsigned long ip, unsigned long parent_ip)
405{
406 int cpu;
407 struct trace_array *tr = irqsoff_trace;
408 struct trace_array_cpu *data;
409 unsigned long flags;
410
c5f888ca 411 cpu = raw_smp_processor_id();
6cd8a4bb 412 /* Always clear the tracing cpu on stopping the trace */
c5f888ca
SR
413 if (unlikely(per_cpu(tracing_cpu, cpu)))
414 per_cpu(tracing_cpu, cpu) = 0;
6cd8a4bb
SR
415 else
416 return;
417
418 if (!tracer_enabled)
81d68a96
SR
419 return;
420
81d68a96
SR
421 data = tr->data[cpu];
422
3928a8a2 423 if (unlikely(!data) ||
81d68a96
SR
424 !data->critical_start || atomic_read(&data->disabled))
425 return;
426
427 atomic_inc(&data->disabled);
c5f888ca 428
81d68a96 429 local_save_flags(flags);
62b915f1 430 __trace_function(tr, ip, parent_ip, flags, preempt_count());
6cd8a4bb 431 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
81d68a96
SR
432 data->critical_start = 0;
433 atomic_dec(&data->disabled);
434}
435
6cd8a4bb 436/* start and stop critical timings used to for stoppage (in idle) */
e309b41d 437void start_critical_timings(void)
81d68a96 438{
6cd8a4bb 439 if (preempt_trace() || irq_trace())
81d68a96
SR
440 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
441}
1fe37104 442EXPORT_SYMBOL_GPL(start_critical_timings);
81d68a96 443
e309b41d 444void stop_critical_timings(void)
81d68a96 445{
6cd8a4bb 446 if (preempt_trace() || irq_trace())
81d68a96
SR
447 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
448}
1fe37104 449EXPORT_SYMBOL_GPL(stop_critical_timings);
81d68a96 450
6cd8a4bb 451#ifdef CONFIG_IRQSOFF_TRACER
81d68a96 452#ifdef CONFIG_PROVE_LOCKING
e309b41d 453void time_hardirqs_on(unsigned long a0, unsigned long a1)
81d68a96 454{
6cd8a4bb 455 if (!preempt_trace() && irq_trace())
81d68a96
SR
456 stop_critical_timing(a0, a1);
457}
458
e309b41d 459void time_hardirqs_off(unsigned long a0, unsigned long a1)
81d68a96 460{
6cd8a4bb 461 if (!preempt_trace() && irq_trace())
81d68a96
SR
462 start_critical_timing(a0, a1);
463}
464
465#else /* !CONFIG_PROVE_LOCKING */
466
467/*
468 * Stubs:
469 */
470
81d68a96
SR
471void trace_softirqs_on(unsigned long ip)
472{
473}
474
475void trace_softirqs_off(unsigned long ip)
476{
477}
478
e309b41d 479inline void print_irqtrace_events(struct task_struct *curr)
81d68a96
SR
480{
481}
482
483/*
484 * We are only interested in hardirq on/off events:
485 */
e309b41d 486void trace_hardirqs_on(void)
81d68a96 487{
6cd8a4bb 488 if (!preempt_trace() && irq_trace())
81d68a96
SR
489 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
490}
491EXPORT_SYMBOL(trace_hardirqs_on);
492
e309b41d 493void trace_hardirqs_off(void)
81d68a96 494{
6cd8a4bb 495 if (!preempt_trace() && irq_trace())
81d68a96
SR
496 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
497}
498EXPORT_SYMBOL(trace_hardirqs_off);
499
e309b41d 500void trace_hardirqs_on_caller(unsigned long caller_addr)
81d68a96 501{
6cd8a4bb 502 if (!preempt_trace() && irq_trace())
81d68a96
SR
503 stop_critical_timing(CALLER_ADDR0, caller_addr);
504}
505EXPORT_SYMBOL(trace_hardirqs_on_caller);
506
e309b41d 507void trace_hardirqs_off_caller(unsigned long caller_addr)
81d68a96 508{
6cd8a4bb 509 if (!preempt_trace() && irq_trace())
81d68a96
SR
510 start_critical_timing(CALLER_ADDR0, caller_addr);
511}
512EXPORT_SYMBOL(trace_hardirqs_off_caller);
513
514#endif /* CONFIG_PROVE_LOCKING */
6cd8a4bb
SR
515#endif /* CONFIG_IRQSOFF_TRACER */
516
517#ifdef CONFIG_PREEMPT_TRACER
e309b41d 518void trace_preempt_on(unsigned long a0, unsigned long a1)
6cd8a4bb 519{
e36de1de 520 if (preempt_trace() && !irq_trace())
1e01cb0c 521 stop_critical_timing(a0, a1);
6cd8a4bb
SR
522}
523
e309b41d 524void trace_preempt_off(unsigned long a0, unsigned long a1)
6cd8a4bb 525{
e36de1de 526 if (preempt_trace() && !irq_trace())
1e01cb0c 527 start_critical_timing(a0, a1);
6cd8a4bb
SR
528}
529#endif /* CONFIG_PREEMPT_TRACER */
81d68a96 530
62b915f1 531static int start_irqsoff_tracer(struct trace_array *tr, int graph)
81d68a96 532{
62b915f1
JO
533 int ret = 0;
534
535 if (!graph)
536 ret = register_ftrace_function(&trace_ops);
537 else
538 ret = register_ftrace_graph(&irqsoff_graph_return,
539 &irqsoff_graph_entry);
540
541 if (!ret && tracing_is_enabled())
9036990d 542 tracer_enabled = 1;
94523e81 543 else
9036990d 544 tracer_enabled = 0;
62b915f1
JO
545
546 return ret;
81d68a96
SR
547}
548
62b915f1 549static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
81d68a96 550{
81d68a96 551 tracer_enabled = 0;
62b915f1
JO
552
553 if (!graph)
554 unregister_ftrace_function(&trace_ops);
555 else
556 unregister_ftrace_graph();
81d68a96
SR
557}
558
6cd8a4bb 559static void __irqsoff_tracer_init(struct trace_array *tr)
81d68a96 560{
613f04a0
SRRH
561 save_flags = trace_flags;
562
563 /* non overwrite screws up the latency tracers */
564 set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
565 set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
e9d25fe6 566
745b1626 567 tracing_max_latency = 0;
81d68a96 568 irqsoff_trace = tr;
c5f888ca 569 /* make sure that the tracer is visible */
81d68a96 570 smp_wmb();
2f26ebd5 571 tracing_reset_online_cpus(tr);
62b915f1
JO
572
573 if (start_irqsoff_tracer(tr, is_graph()))
574 printk(KERN_ERR "failed to start irqsoff tracer\n");
81d68a96
SR
575}
576
577static void irqsoff_tracer_reset(struct trace_array *tr)
578{
613f04a0
SRRH
579 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
580 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
581
62b915f1 582 stop_irqsoff_tracer(tr, is_graph());
e9d25fe6 583
613f04a0
SRRH
584 set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
585 set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
81d68a96
SR
586}
587
9036990d
SR
588static void irqsoff_tracer_start(struct trace_array *tr)
589{
9036990d 590 tracer_enabled = 1;
9036990d
SR
591}
592
593static void irqsoff_tracer_stop(struct trace_array *tr)
594{
595 tracer_enabled = 0;
81d68a96
SR
596}
597
6cd8a4bb 598#ifdef CONFIG_IRQSOFF_TRACER
1c80025a 599static int irqsoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
600{
601 trace_type = TRACER_IRQS_OFF;
602
603 __irqsoff_tracer_init(tr);
1c80025a 604 return 0;
6cd8a4bb 605}
81d68a96
SR
606static struct tracer irqsoff_tracer __read_mostly =
607{
608 .name = "irqsoff",
609 .init = irqsoff_tracer_init,
610 .reset = irqsoff_tracer_reset,
9036990d
SR
611 .start = irqsoff_tracer_start,
612 .stop = irqsoff_tracer_stop,
f43c738b 613 .print_max = true,
62b915f1
JO
614 .print_header = irqsoff_print_header,
615 .print_line = irqsoff_print_line,
616 .flags = &tracer_flags,
617 .set_flag = irqsoff_set_flag,
613f04a0 618 .flag_changed = trace_keep_overwrite,
60a11774
SR
619#ifdef CONFIG_FTRACE_SELFTEST
620 .selftest = trace_selftest_startup_irqsoff,
621#endif
62b915f1
JO
622 .open = irqsoff_trace_open,
623 .close = irqsoff_trace_close,
f43c738b 624 .use_max_tr = true,
81d68a96 625};
6cd8a4bb
SR
626# define register_irqsoff(trace) register_tracer(&trace)
627#else
628# define register_irqsoff(trace) do { } while (0)
629#endif
630
631#ifdef CONFIG_PREEMPT_TRACER
1c80025a 632static int preemptoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
633{
634 trace_type = TRACER_PREEMPT_OFF;
635
636 __irqsoff_tracer_init(tr);
1c80025a 637 return 0;
6cd8a4bb
SR
638}
639
640static struct tracer preemptoff_tracer __read_mostly =
641{
642 .name = "preemptoff",
643 .init = preemptoff_tracer_init,
644 .reset = irqsoff_tracer_reset,
9036990d
SR
645 .start = irqsoff_tracer_start,
646 .stop = irqsoff_tracer_stop,
f43c738b 647 .print_max = true,
62b915f1
JO
648 .print_header = irqsoff_print_header,
649 .print_line = irqsoff_print_line,
650 .flags = &tracer_flags,
651 .set_flag = irqsoff_set_flag,
613f04a0 652 .flag_changed = trace_keep_overwrite,
60a11774
SR
653#ifdef CONFIG_FTRACE_SELFTEST
654 .selftest = trace_selftest_startup_preemptoff,
655#endif
62b915f1
JO
656 .open = irqsoff_trace_open,
657 .close = irqsoff_trace_close,
f43c738b 658 .use_max_tr = true,
6cd8a4bb
SR
659};
660# define register_preemptoff(trace) register_tracer(&trace)
661#else
662# define register_preemptoff(trace) do { } while (0)
663#endif
664
665#if defined(CONFIG_IRQSOFF_TRACER) && \
666 defined(CONFIG_PREEMPT_TRACER)
667
1c80025a 668static int preemptirqsoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
669{
670 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
671
672 __irqsoff_tracer_init(tr);
1c80025a 673 return 0;
6cd8a4bb
SR
674}
675
676static struct tracer preemptirqsoff_tracer __read_mostly =
677{
678 .name = "preemptirqsoff",
679 .init = preemptirqsoff_tracer_init,
680 .reset = irqsoff_tracer_reset,
9036990d
SR
681 .start = irqsoff_tracer_start,
682 .stop = irqsoff_tracer_stop,
f43c738b 683 .print_max = true,
62b915f1
JO
684 .print_header = irqsoff_print_header,
685 .print_line = irqsoff_print_line,
686 .flags = &tracer_flags,
687 .set_flag = irqsoff_set_flag,
613f04a0 688 .flag_changed = trace_keep_overwrite,
60a11774
SR
689#ifdef CONFIG_FTRACE_SELFTEST
690 .selftest = trace_selftest_startup_preemptirqsoff,
691#endif
62b915f1
JO
692 .open = irqsoff_trace_open,
693 .close = irqsoff_trace_close,
f43c738b 694 .use_max_tr = true,
6cd8a4bb
SR
695};
696
697# define register_preemptirqsoff(trace) register_tracer(&trace)
698#else
699# define register_preemptirqsoff(trace) do { } while (0)
700#endif
81d68a96
SR
701
702__init static int init_irqsoff_tracer(void)
703{
6cd8a4bb
SR
704 register_irqsoff(irqsoff_tracer);
705 register_preemptoff(preemptoff_tracer);
706 register_preemptirqsoff(preemptirqsoff_tracer);
81d68a96
SR
707
708 return 0;
709}
6f415672 710core_initcall(init_irqsoff_tracer);
This page took 0.282116 seconds and 5 git commands to generate.