Commit | Line | Data |
---|---|---|
f984b51e PP |
1 | /* |
2 | * Memory mapped I/O tracing | |
3 | * | |
4 | * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi> | |
5 | */ | |
6 | ||
7 | #define DEBUG 1 | |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/mmiotrace.h> | |
13829537 | 11 | #include <linux/pci.h> |
f984b51e PP |
12 | |
13 | #include "trace.h" | |
14 | ||
d0a7e8ca PP |
15 | struct header_iter { |
16 | struct pci_dev *dev; | |
17 | }; | |
18 | ||
f984b51e | 19 | static struct trace_array *mmio_trace_array; |
2039238b | 20 | static bool overrun_detected; |
f984b51e | 21 | |
bd8ac686 PP |
22 | static void mmio_reset_data(struct trace_array *tr) |
23 | { | |
24 | int cpu; | |
25 | ||
2039238b | 26 | overrun_detected = false; |
bd8ac686 PP |
27 | tr->time_start = ftrace_now(tr->cpu); |
28 | ||
29 | for_each_online_cpu(cpu) | |
3928a8a2 | 30 | tracing_reset(tr, cpu); |
bd8ac686 | 31 | } |
f984b51e PP |
32 | |
33 | static void mmio_trace_init(struct trace_array *tr) | |
34 | { | |
35 | pr_debug("in %s\n", __func__); | |
36 | mmio_trace_array = tr; | |
bd8ac686 PP |
37 | if (tr->ctrl) { |
38 | mmio_reset_data(tr); | |
f984b51e | 39 | enable_mmiotrace(); |
bd8ac686 | 40 | } |
f984b51e PP |
41 | } |
42 | ||
43 | static void mmio_trace_reset(struct trace_array *tr) | |
44 | { | |
45 | pr_debug("in %s\n", __func__); | |
46 | if (tr->ctrl) | |
47 | disable_mmiotrace(); | |
bd8ac686 PP |
48 | mmio_reset_data(tr); |
49 | mmio_trace_array = NULL; | |
f984b51e PP |
50 | } |
51 | ||
52 | static void mmio_trace_ctrl_update(struct trace_array *tr) | |
53 | { | |
54 | pr_debug("in %s\n", __func__); | |
bd8ac686 PP |
55 | if (tr->ctrl) { |
56 | mmio_reset_data(tr); | |
f984b51e | 57 | enable_mmiotrace(); |
bd8ac686 | 58 | } else { |
f984b51e | 59 | disable_mmiotrace(); |
bd8ac686 PP |
60 | } |
61 | } | |
62 | ||
13829537 PP |
63 | static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) |
64 | { | |
65 | int ret = 0; | |
66 | int i; | |
67 | resource_size_t start, end; | |
68 | const struct pci_driver *drv = pci_dev_driver(dev); | |
69 | ||
70 | /* XXX: incomplete checks for trace_seq_printf() return value */ | |
71 | ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", | |
72 | dev->bus->number, dev->devfn, | |
73 | dev->vendor, dev->device, dev->irq); | |
74 | /* | |
75 | * XXX: is pci_resource_to_user() appropriate, since we are | |
76 | * supposed to interpret the __ioremap() phys_addr argument based on | |
77 | * these printed values? | |
78 | */ | |
79 | for (i = 0; i < 7; i++) { | |
80 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | |
81 | ret += trace_seq_printf(s, " %llx", | |
82 | (unsigned long long)(start | | |
83 | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); | |
84 | } | |
85 | for (i = 0; i < 7; i++) { | |
86 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | |
87 | ret += trace_seq_printf(s, " %llx", | |
88 | dev->resource[i].start < dev->resource[i].end ? | |
89 | (unsigned long long)(end - start) + 1 : 0); | |
90 | } | |
91 | if (drv) | |
92 | ret += trace_seq_printf(s, " %s\n", drv->name); | |
93 | else | |
94 | ret += trace_seq_printf(s, " \n"); | |
95 | return ret; | |
96 | } | |
97 | ||
d0a7e8ca PP |
98 | static void destroy_header_iter(struct header_iter *hiter) |
99 | { | |
100 | if (!hiter) | |
101 | return; | |
102 | pci_dev_put(hiter->dev); | |
103 | kfree(hiter); | |
104 | } | |
105 | ||
106 | static void mmio_pipe_open(struct trace_iterator *iter) | |
bd8ac686 | 107 | { |
d0a7e8ca | 108 | struct header_iter *hiter; |
bd8ac686 | 109 | struct trace_seq *s = &iter->seq; |
13829537 PP |
110 | |
111 | trace_seq_printf(s, "VERSION 20070824\n"); | |
112 | ||
d0a7e8ca PP |
113 | hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); |
114 | if (!hiter) | |
115 | return; | |
116 | ||
117 | hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); | |
118 | iter->private = hiter; | |
119 | } | |
120 | ||
121 | /* XXX: This is not called when the pipe is closed! */ | |
122 | static void mmio_close(struct trace_iterator *iter) | |
123 | { | |
124 | struct header_iter *hiter = iter->private; | |
125 | destroy_header_iter(hiter); | |
126 | iter->private = NULL; | |
127 | } | |
128 | ||
2039238b PP |
129 | static unsigned long count_overruns(struct trace_iterator *iter) |
130 | { | |
131 | int cpu; | |
132 | unsigned long cnt = 0; | |
3928a8a2 SR |
133 | /* FIXME: */ |
134 | #if 0 | |
2039238b PP |
135 | for_each_online_cpu(cpu) { |
136 | cnt += iter->overrun[cpu]; | |
137 | iter->overrun[cpu] = 0; | |
138 | } | |
3928a8a2 SR |
139 | #endif |
140 | (void)cpu; | |
2039238b PP |
141 | return cnt; |
142 | } | |
143 | ||
d0a7e8ca PP |
144 | static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp, |
145 | char __user *ubuf, size_t cnt, loff_t *ppos) | |
146 | { | |
147 | ssize_t ret; | |
148 | struct header_iter *hiter = iter->private; | |
149 | struct trace_seq *s = &iter->seq; | |
2039238b PP |
150 | unsigned long n; |
151 | ||
152 | n = count_overruns(iter); | |
153 | if (n) { | |
154 | /* XXX: This is later than where events were lost. */ | |
155 | trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n); | |
156 | if (!overrun_detected) | |
157 | pr_warning("mmiotrace has lost events.\n"); | |
158 | overrun_detected = true; | |
159 | goto print_out; | |
160 | } | |
d0a7e8ca PP |
161 | |
162 | if (!hiter) | |
163 | return 0; | |
164 | ||
165 | mmio_print_pcidev(s, hiter->dev); | |
166 | hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev); | |
167 | ||
168 | if (!hiter->dev) { | |
169 | destroy_header_iter(hiter); | |
170 | iter->private = NULL; | |
171 | } | |
172 | ||
2039238b | 173 | print_out: |
d0a7e8ca PP |
174 | ret = trace_seq_to_user(s, ubuf, cnt); |
175 | return (ret == -EBUSY) ? 0 : ret; | |
bd8ac686 PP |
176 | } |
177 | ||
07f4e4f7 | 178 | static enum print_line_t mmio_print_rw(struct trace_iterator *iter) |
bd8ac686 PP |
179 | { |
180 | struct trace_entry *entry = iter->ent; | |
7104f300 SR |
181 | struct trace_mmiotrace_rw *field; |
182 | struct mmiotrace_rw *rw; | |
bd8ac686 | 183 | struct trace_seq *s = &iter->seq; |
3928a8a2 | 184 | unsigned long long t = ns2usecs(iter->ts); |
bd8ac686 PP |
185 | unsigned long usec_rem = do_div(t, 1000000ULL); |
186 | unsigned secs = (unsigned long)t; | |
187 | int ret = 1; | |
188 | ||
7104f300 SR |
189 | trace_assign_type(field, entry); |
190 | rw = &field->rw; | |
191 | ||
777e208d | 192 | switch (rw->opcode) { |
bd8ac686 PP |
193 | case MMIO_READ: |
194 | ret = trace_seq_printf(s, | |
dee310d0 PP |
195 | "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
196 | rw->width, secs, usec_rem, rw->map_id, | |
197 | (unsigned long long)rw->phys, | |
736ca61f | 198 | rw->value, rw->pc, 0); |
bd8ac686 PP |
199 | break; |
200 | case MMIO_WRITE: | |
201 | ret = trace_seq_printf(s, | |
dee310d0 PP |
202 | "W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
203 | rw->width, secs, usec_rem, rw->map_id, | |
204 | (unsigned long long)rw->phys, | |
736ca61f | 205 | rw->value, rw->pc, 0); |
bd8ac686 PP |
206 | break; |
207 | case MMIO_UNKNOWN_OP: | |
208 | ret = trace_seq_printf(s, | |
dee310d0 PP |
209 | "UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n", |
210 | secs, usec_rem, rw->map_id, | |
211 | (unsigned long long)rw->phys, | |
bd8ac686 | 212 | (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, |
736ca61f | 213 | (rw->value >> 0) & 0xff, rw->pc, 0); |
bd8ac686 PP |
214 | break; |
215 | default: | |
216 | ret = trace_seq_printf(s, "rw what?\n"); | |
217 | break; | |
218 | } | |
219 | if (ret) | |
07f4e4f7 FW |
220 | return TRACE_TYPE_HANDLED; |
221 | return TRACE_TYPE_PARTIAL_LINE; | |
bd8ac686 PP |
222 | } |
223 | ||
07f4e4f7 | 224 | static enum print_line_t mmio_print_map(struct trace_iterator *iter) |
bd8ac686 PP |
225 | { |
226 | struct trace_entry *entry = iter->ent; | |
7104f300 SR |
227 | struct trace_mmiotrace_map *field; |
228 | struct mmiotrace_map *m; | |
bd8ac686 | 229 | struct trace_seq *s = &iter->seq; |
3928a8a2 | 230 | unsigned long long t = ns2usecs(iter->ts); |
bd8ac686 PP |
231 | unsigned long usec_rem = do_div(t, 1000000ULL); |
232 | unsigned secs = (unsigned long)t; | |
07f4e4f7 | 233 | int ret; |
bd8ac686 | 234 | |
7104f300 SR |
235 | trace_assign_type(field, entry); |
236 | m = &field->map; | |
237 | ||
777e208d | 238 | switch (m->opcode) { |
bd8ac686 PP |
239 | case MMIO_PROBE: |
240 | ret = trace_seq_printf(s, | |
dee310d0 PP |
241 | "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", |
242 | secs, usec_rem, m->map_id, | |
243 | (unsigned long long)m->phys, m->virt, m->len, | |
e0fd5c2f | 244 | 0UL, 0); |
bd8ac686 PP |
245 | break; |
246 | case MMIO_UNPROBE: | |
247 | ret = trace_seq_printf(s, | |
248 | "UNMAP %lu.%06lu %d 0x%lx %d\n", | |
e0fd5c2f | 249 | secs, usec_rem, m->map_id, 0UL, 0); |
bd8ac686 PP |
250 | break; |
251 | default: | |
252 | ret = trace_seq_printf(s, "map what?\n"); | |
253 | break; | |
254 | } | |
255 | if (ret) | |
07f4e4f7 FW |
256 | return TRACE_TYPE_HANDLED; |
257 | return TRACE_TYPE_PARTIAL_LINE; | |
bd8ac686 PP |
258 | } |
259 | ||
07f4e4f7 | 260 | static enum print_line_t mmio_print_mark(struct trace_iterator *iter) |
fc5e27ae PP |
261 | { |
262 | struct trace_entry *entry = iter->ent; | |
777e208d SR |
263 | struct print_entry *print = (struct print_entry *)entry; |
264 | const char *msg = print->buf; | |
fc5e27ae | 265 | struct trace_seq *s = &iter->seq; |
3928a8a2 | 266 | unsigned long long t = ns2usecs(iter->ts); |
fc5e27ae PP |
267 | unsigned long usec_rem = do_div(t, 1000000ULL); |
268 | unsigned secs = (unsigned long)t; | |
269 | int ret; | |
270 | ||
271 | /* The trailing newline must be in the message. */ | |
272 | ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg); | |
273 | if (!ret) | |
07f4e4f7 | 274 | return TRACE_TYPE_PARTIAL_LINE; |
fc5e27ae | 275 | |
777e208d | 276 | if (entry->flags & TRACE_FLAG_CONT) |
fc5e27ae PP |
277 | trace_seq_print_cont(s, iter); |
278 | ||
07f4e4f7 | 279 | return TRACE_TYPE_HANDLED; |
fc5e27ae PP |
280 | } |
281 | ||
07f4e4f7 | 282 | static enum print_line_t mmio_print_line(struct trace_iterator *iter) |
bd8ac686 PP |
283 | { |
284 | switch (iter->ent->type) { | |
285 | case TRACE_MMIO_RW: | |
286 | return mmio_print_rw(iter); | |
287 | case TRACE_MMIO_MAP: | |
288 | return mmio_print_map(iter); | |
fc5e27ae PP |
289 | case TRACE_PRINT: |
290 | return mmio_print_mark(iter); | |
bd8ac686 | 291 | default: |
07f4e4f7 | 292 | return TRACE_TYPE_HANDLED; /* ignore unknown entries */ |
bd8ac686 | 293 | } |
f984b51e PP |
294 | } |
295 | ||
296 | static struct tracer mmio_tracer __read_mostly = | |
297 | { | |
298 | .name = "mmiotrace", | |
299 | .init = mmio_trace_init, | |
300 | .reset = mmio_trace_reset, | |
d0a7e8ca PP |
301 | .pipe_open = mmio_pipe_open, |
302 | .close = mmio_close, | |
303 | .read = mmio_read, | |
f984b51e | 304 | .ctrl_update = mmio_trace_ctrl_update, |
bd8ac686 | 305 | .print_line = mmio_print_line, |
f984b51e PP |
306 | }; |
307 | ||
308 | __init static int init_mmio_trace(void) | |
309 | { | |
f984b51e PP |
310 | return register_tracer(&mmio_tracer); |
311 | } | |
312 | device_initcall(init_mmio_trace); | |
313 | ||
45dcd8b8 PP |
314 | static void __trace_mmiotrace_rw(struct trace_array *tr, |
315 | struct trace_array_cpu *data, | |
316 | struct mmiotrace_rw *rw) | |
317 | { | |
3928a8a2 | 318 | struct ring_buffer_event *event; |
777e208d | 319 | struct trace_mmiotrace_rw *entry; |
45dcd8b8 PP |
320 | unsigned long irq_flags; |
321 | ||
3928a8a2 SR |
322 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
323 | &irq_flags); | |
324 | if (!event) | |
325 | return; | |
326 | entry = ring_buffer_event_data(event); | |
777e208d SR |
327 | tracing_generic_entry_update(&entry->ent, 0); |
328 | entry->ent.type = TRACE_MMIO_RW; | |
329 | entry->rw = *rw; | |
3928a8a2 | 330 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
45dcd8b8 PP |
331 | |
332 | trace_wake_up(); | |
333 | } | |
334 | ||
bd8ac686 | 335 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
f984b51e PP |
336 | { |
337 | struct trace_array *tr = mmio_trace_array; | |
338 | struct trace_array_cpu *data = tr->data[smp_processor_id()]; | |
bd8ac686 PP |
339 | __trace_mmiotrace_rw(tr, data, rw); |
340 | } | |
f984b51e | 341 | |
45dcd8b8 PP |
342 | static void __trace_mmiotrace_map(struct trace_array *tr, |
343 | struct trace_array_cpu *data, | |
344 | struct mmiotrace_map *map) | |
345 | { | |
3928a8a2 | 346 | struct ring_buffer_event *event; |
777e208d | 347 | struct trace_mmiotrace_map *entry; |
45dcd8b8 PP |
348 | unsigned long irq_flags; |
349 | ||
3928a8a2 SR |
350 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
351 | &irq_flags); | |
352 | if (!event) | |
353 | return; | |
354 | entry = ring_buffer_event_data(event); | |
777e208d SR |
355 | tracing_generic_entry_update(&entry->ent, 0); |
356 | entry->ent.type = TRACE_MMIO_MAP; | |
357 | entry->map = *map; | |
3928a8a2 | 358 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
45dcd8b8 PP |
359 | |
360 | trace_wake_up(); | |
361 | } | |
362 | ||
bd8ac686 PP |
363 | void mmio_trace_mapping(struct mmiotrace_map *map) |
364 | { | |
365 | struct trace_array *tr = mmio_trace_array; | |
366 | struct trace_array_cpu *data; | |
367 | ||
368 | preempt_disable(); | |
369 | data = tr->data[smp_processor_id()]; | |
370 | __trace_mmiotrace_map(tr, data, map); | |
371 | preempt_enable(); | |
f984b51e | 372 | } |
9e57fb35 PP |
373 | |
374 | int mmio_trace_printk(const char *fmt, va_list args) | |
375 | { | |
376 | return trace_vprintk(0, fmt, args); | |
377 | } |