2 * h/w branch tracer for x86 based on bts
4 * Copyright (C) 2008-2009 Intel Corporation.
5 * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
9 #include <linux/module.h>
11 #include <linux/debugfs.h>
12 #include <linux/ftrace.h>
13 #include <linux/kallsyms.h>
14 #include <linux/mutex.h>
15 #include <linux/cpu.h>
16 #include <linux/smp.h>
21 #include "trace_output.h"
24 #define SIZEOF_BTS (1 << 13)
26 /* The tracer mutex protects the below per-cpu tracer array.
27 It needs to be held to:
28 - start tracing on all cpus
29 - stop tracing on all cpus
30 - start tracing on a single hotplug cpu
31 - stop tracing on a single hotplug cpu
32 - read the trace from all cpus
33 - read the trace from a single cpu
35 static DEFINE_MUTEX(bts_tracer_mutex
);
36 static DEFINE_PER_CPU(struct bts_tracer
*, tracer
);
37 static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS
], buffer
);
39 #define this_tracer per_cpu(tracer, smp_processor_id())
40 #define this_buffer per_cpu(buffer, smp_processor_id())
42 static int __read_mostly trace_hw_branches_enabled
;
46 * Start tracing on the current cpu.
47 * The argument is ignored.
49 * pre: bts_tracer_mutex must be locked.
51 static void bts_trace_start_cpu(void *arg
)
54 ds_release_bts(this_tracer
);
57 ds_request_bts(/* task = */ NULL
, this_buffer
, SIZEOF_BTS
,
58 /* ovfl = */ NULL
, /* th = */ (size_t)-1,
60 if (IS_ERR(this_tracer
)) {
66 static void bts_trace_start(struct trace_array
*tr
)
68 mutex_lock(&bts_tracer_mutex
);
70 on_each_cpu(bts_trace_start_cpu
, NULL
, 1);
71 trace_hw_branches_enabled
= 1;
73 mutex_unlock(&bts_tracer_mutex
);
77 * Start tracing on the current cpu.
78 * The argument is ignored.
80 * pre: bts_tracer_mutex must be locked.
82 static void bts_trace_stop_cpu(void *arg
)
85 ds_release_bts(this_tracer
);
90 static void bts_trace_stop(struct trace_array
*tr
)
92 mutex_lock(&bts_tracer_mutex
);
94 trace_hw_branches_enabled
= 0;
95 on_each_cpu(bts_trace_stop_cpu
, NULL
, 1);
97 mutex_unlock(&bts_tracer_mutex
);
100 static int __cpuinit
bts_hotcpu_handler(struct notifier_block
*nfb
,
101 unsigned long action
, void *hcpu
)
103 unsigned int cpu
= (unsigned long)hcpu
;
105 mutex_lock(&bts_tracer_mutex
);
107 if (!trace_hw_branches_enabled
)
112 case CPU_DOWN_FAILED
:
113 smp_call_function_single(cpu
, bts_trace_start_cpu
, NULL
, 1);
115 case CPU_DOWN_PREPARE
:
116 smp_call_function_single(cpu
, bts_trace_stop_cpu
, NULL
, 1);
121 mutex_unlock(&bts_tracer_mutex
);
125 static struct notifier_block bts_hotcpu_notifier __cpuinitdata
= {
126 .notifier_call
= bts_hotcpu_handler
129 static int bts_trace_init(struct trace_array
*tr
)
131 register_hotcpu_notifier(&bts_hotcpu_notifier
);
132 tracing_reset_online_cpus(tr
);
138 static void bts_trace_reset(struct trace_array
*tr
)
141 unregister_hotcpu_notifier(&bts_hotcpu_notifier
);
144 static void bts_trace_print_header(struct seq_file
*m
)
147 "# CPU# FROM TO FUNCTION\n");
152 static enum print_line_t
bts_trace_print_line(struct trace_iterator
*iter
)
154 struct trace_entry
*entry
= iter
->ent
;
155 struct trace_seq
*seq
= &iter
->seq
;
156 struct hw_branch_entry
*it
;
158 trace_assign_type(it
, entry
);
160 if (entry
->type
== TRACE_HW_BRANCHES
) {
161 if (trace_seq_printf(seq
, "%4d ", entry
->cpu
) &&
162 trace_seq_printf(seq
, "0x%016llx -> 0x%016llx ",
165 seq_print_ip_sym(seq
, it
->from
, /* sym_flags = */ 0)) &&
166 trace_seq_printf(seq
, "\n"))
167 return TRACE_TYPE_HANDLED
;
168 return TRACE_TYPE_PARTIAL_LINE
;;
170 return TRACE_TYPE_UNHANDLED
;
173 void trace_hw_branch(struct trace_array
*tr
, u64 from
, u64 to
)
175 struct ring_buffer_event
*event
;
176 struct hw_branch_entry
*entry
;
177 unsigned long irq1
, irq2
;
183 if (unlikely(!trace_hw_branches_enabled
))
186 local_irq_save(irq1
);
187 cpu
= raw_smp_processor_id();
188 if (atomic_inc_return(&tr
->data
[cpu
]->disabled
) != 1)
191 event
= ring_buffer_lock_reserve(tr
->buffer
, sizeof(*entry
), &irq2
);
194 entry
= ring_buffer_event_data(event
);
195 tracing_generic_entry_update(&entry
->ent
, 0, from
);
196 entry
->ent
.type
= TRACE_HW_BRANCHES
;
197 entry
->ent
.cpu
= cpu
;
200 ring_buffer_unlock_commit(tr
->buffer
, event
, irq2
);
203 atomic_dec(&tr
->data
[cpu
]->disabled
);
204 local_irq_restore(irq1
);
207 static void trace_bts_at(struct trace_array
*tr
,
208 const struct bts_trace
*trace
, void *at
)
210 struct bts_struct bts
;
213 WARN_ON_ONCE(!trace
->read
);
217 err
= trace
->read(this_tracer
, at
, &bts
);
221 switch (bts
.qualifier
) {
223 trace_hw_branch(tr
, bts
.variant
.lbr
.from
, bts
.variant
.lbr
.to
);
229 * Collect the trace on the current cpu and write it into the ftrace buffer.
231 * pre: bts_tracer_mutex must be locked
233 static void trace_bts_cpu(void *arg
)
235 struct trace_array
*tr
= (struct trace_array
*) arg
;
236 const struct bts_trace
*trace
;
242 if (unlikely(atomic_read(&tr
->data
[raw_smp_processor_id()]->disabled
)))
245 ds_suspend_bts(this_tracer
);
246 trace
= ds_read_bts(this_tracer
);
250 for (at
= trace
->ds
.top
; (void *)at
< trace
->ds
.end
;
251 at
+= trace
->ds
.size
)
252 trace_bts_at(tr
, trace
, at
);
254 for (at
= trace
->ds
.begin
; (void *)at
< trace
->ds
.top
;
255 at
+= trace
->ds
.size
)
256 trace_bts_at(tr
, trace
, at
);
259 ds_resume_bts(this_tracer
);
262 static void trace_bts_prepare(struct trace_iterator
*iter
)
264 mutex_lock(&bts_tracer_mutex
);
266 on_each_cpu(trace_bts_cpu
, iter
->tr
, 1);
268 mutex_unlock(&bts_tracer_mutex
);
271 struct tracer bts_tracer __read_mostly
=
273 .name
= "hw-branch-tracer",
274 .init
= bts_trace_init
,
275 .reset
= bts_trace_reset
,
276 .print_header
= bts_trace_print_header
,
277 .print_line
= bts_trace_print_line
,
278 .start
= bts_trace_start
,
279 .stop
= bts_trace_stop
,
280 .open
= trace_bts_prepare
283 __init
static int init_bts_trace(void)
285 return register_tracer(&bts_tracer
);
287 device_initcall(init_bts_trace
);