Commit | Line | Data |
---|---|---|
8062382c AS |
1 | /* |
2 | * BTS PMU driver for perf | |
3 | * Copyright (c) 2013-2014, Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #undef DEBUG | |
16 | ||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
18 | ||
19 | #include <linux/bitops.h> | |
20 | #include <linux/types.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/debugfs.h> | |
23 | #include <linux/device.h> | |
24 | #include <linux/coredump.h> | |
25 | ||
26 | #include <asm-generic/sizes.h> | |
27 | #include <asm/perf_event.h> | |
28 | ||
27f6d22b | 29 | #include "../perf_event.h" |
8062382c AS |
30 | |
31 | struct bts_ctx { | |
32 | struct perf_output_handle handle; | |
33 | struct debug_store ds_back; | |
34 | int started; | |
35 | }; | |
36 | ||
37 | static DEFINE_PER_CPU(struct bts_ctx, bts_ctx); | |
38 | ||
39 | #define BTS_RECORD_SIZE 24 | |
40 | #define BTS_SAFETY_MARGIN 4080 | |
41 | ||
42 | struct bts_phys { | |
43 | struct page *page; | |
44 | unsigned long size; | |
45 | unsigned long offset; | |
46 | unsigned long displacement; | |
47 | }; | |
48 | ||
49 | struct bts_buffer { | |
50 | size_t real_size; /* multiple of BTS_RECORD_SIZE */ | |
51 | unsigned int nr_pages; | |
52 | unsigned int nr_bufs; | |
53 | unsigned int cur_buf; | |
54 | bool snapshot; | |
55 | local_t data_size; | |
56 | local_t lost; | |
57 | local_t head; | |
58 | unsigned long end; | |
59 | void **data_pages; | |
60 | struct bts_phys buf[0]; | |
61 | }; | |
62 | ||
63 | struct pmu bts_pmu; | |
64 | ||
8062382c AS |
65 | static size_t buf_size(struct page *page) |
66 | { | |
67 | return 1 << (PAGE_SHIFT + page_private(page)); | |
68 | } | |
69 | ||
70 | static void * | |
71 | bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) | |
72 | { | |
73 | struct bts_buffer *buf; | |
74 | struct page *page; | |
75 | int node = (cpu == -1) ? cpu : cpu_to_node(cpu); | |
76 | unsigned long offset; | |
77 | size_t size = nr_pages << PAGE_SHIFT; | |
78 | int pg, nbuf, pad; | |
79 | ||
80 | /* count all the high order buffers */ | |
81 | for (pg = 0, nbuf = 0; pg < nr_pages;) { | |
82 | page = virt_to_page(pages[pg]); | |
83 | if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) | |
84 | return NULL; | |
85 | pg += 1 << page_private(page); | |
86 | nbuf++; | |
87 | } | |
88 | ||
89 | /* | |
90 | * to avoid interrupts in overwrite mode, only allow one physical | |
91 | */ | |
92 | if (overwrite && nbuf > 1) | |
93 | return NULL; | |
94 | ||
95 | buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node); | |
96 | if (!buf) | |
97 | return NULL; | |
98 | ||
99 | buf->nr_pages = nr_pages; | |
100 | buf->nr_bufs = nbuf; | |
101 | buf->snapshot = overwrite; | |
102 | buf->data_pages = pages; | |
103 | buf->real_size = size - size % BTS_RECORD_SIZE; | |
104 | ||
105 | for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) { | |
106 | unsigned int __nr_pages; | |
107 | ||
108 | page = virt_to_page(pages[pg]); | |
109 | __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; | |
110 | buf->buf[nbuf].page = page; | |
111 | buf->buf[nbuf].offset = offset; | |
112 | buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); | |
113 | buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement; | |
114 | pad = buf->buf[nbuf].size % BTS_RECORD_SIZE; | |
115 | buf->buf[nbuf].size -= pad; | |
116 | ||
117 | pg += __nr_pages; | |
118 | offset += __nr_pages << PAGE_SHIFT; | |
119 | } | |
120 | ||
121 | return buf; | |
122 | } | |
123 | ||
124 | static void bts_buffer_free_aux(void *data) | |
125 | { | |
126 | kfree(data); | |
127 | } | |
128 | ||
129 | static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx) | |
130 | { | |
131 | return buf->buf[idx].offset + buf->buf[idx].displacement; | |
132 | } | |
133 | ||
134 | static void | |
135 | bts_config_buffer(struct bts_buffer *buf) | |
136 | { | |
137 | int cpu = raw_smp_processor_id(); | |
138 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; | |
139 | struct bts_phys *phys = &buf->buf[buf->cur_buf]; | |
140 | unsigned long index, thresh = 0, end = phys->size; | |
141 | struct page *page = phys->page; | |
142 | ||
143 | index = local_read(&buf->head); | |
144 | ||
145 | if (!buf->snapshot) { | |
146 | if (buf->end < phys->offset + buf_size(page)) | |
147 | end = buf->end - phys->offset - phys->displacement; | |
148 | ||
149 | index -= phys->offset + phys->displacement; | |
150 | ||
151 | if (end - index > BTS_SAFETY_MARGIN) | |
152 | thresh = end - BTS_SAFETY_MARGIN; | |
153 | else if (end - index > BTS_RECORD_SIZE) | |
154 | thresh = end - BTS_RECORD_SIZE; | |
155 | else | |
156 | thresh = end; | |
157 | } | |
158 | ||
2e54a5bd | 159 | ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement; |
8062382c AS |
160 | ds->bts_index = ds->bts_buffer_base + index; |
161 | ds->bts_absolute_maximum = ds->bts_buffer_base + end; | |
162 | ds->bts_interrupt_threshold = !buf->snapshot | |
163 | ? ds->bts_buffer_base + thresh | |
164 | : ds->bts_absolute_maximum + BTS_RECORD_SIZE; | |
165 | } | |
166 | ||
167 | static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head) | |
168 | { | |
169 | unsigned long index = head - phys->offset; | |
170 | ||
171 | memset(page_address(phys->page) + index, 0, phys->size - index); | |
172 | } | |
173 | ||
174 | static bool bts_buffer_is_full(struct bts_buffer *buf, struct bts_ctx *bts) | |
175 | { | |
176 | if (buf->snapshot) | |
177 | return false; | |
178 | ||
179 | if (local_read(&buf->data_size) >= bts->handle.size || | |
180 | bts->handle.size - local_read(&buf->data_size) < BTS_RECORD_SIZE) | |
181 | return true; | |
182 | ||
183 | return false; | |
184 | } | |
185 | ||
186 | static void bts_update(struct bts_ctx *bts) | |
187 | { | |
188 | int cpu = raw_smp_processor_id(); | |
189 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; | |
190 | struct bts_buffer *buf = perf_get_aux(&bts->handle); | |
191 | unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head; | |
192 | ||
193 | if (!buf) | |
194 | return; | |
195 | ||
196 | head = index + bts_buffer_offset(buf, buf->cur_buf); | |
197 | old = local_xchg(&buf->head, head); | |
198 | ||
199 | if (!buf->snapshot) { | |
200 | if (old == head) | |
201 | return; | |
202 | ||
203 | if (ds->bts_index >= ds->bts_absolute_maximum) | |
204 | local_inc(&buf->lost); | |
205 | ||
206 | /* | |
207 | * old and head are always in the same physical buffer, so we | |
208 | * can subtract them to get the data size. | |
209 | */ | |
210 | local_add(head - old, &buf->data_size); | |
211 | } else { | |
212 | local_set(&buf->data_size, head); | |
213 | } | |
214 | } | |
215 | ||
216 | static void __bts_event_start(struct perf_event *event) | |
217 | { | |
218 | struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); | |
219 | struct bts_buffer *buf = perf_get_aux(&bts->handle); | |
220 | u64 config = 0; | |
221 | ||
222 | if (!buf || bts_buffer_is_full(buf, bts)) | |
223 | return; | |
224 | ||
d2498729 | 225 | event->hw.itrace_started = 1; |
8062382c AS |
226 | event->hw.state = 0; |
227 | ||
228 | if (!buf->snapshot) | |
229 | config |= ARCH_PERFMON_EVENTSEL_INT; | |
230 | if (!event->attr.exclude_kernel) | |
231 | config |= ARCH_PERFMON_EVENTSEL_OS; | |
232 | if (!event->attr.exclude_user) | |
233 | config |= ARCH_PERFMON_EVENTSEL_USR; | |
234 | ||
235 | bts_config_buffer(buf); | |
236 | ||
237 | /* | |
238 | * local barrier to make sure that ds configuration made it | |
239 | * before we enable BTS | |
240 | */ | |
241 | wmb(); | |
242 | ||
243 | intel_pmu_enable_bts(config); | |
244 | } | |
245 | ||
246 | static void bts_event_start(struct perf_event *event, int flags) | |
247 | { | |
248 | struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); | |
249 | ||
250 | __bts_event_start(event); | |
251 | ||
252 | /* PMI handler: this counter is running and likely generating PMIs */ | |
253 | ACCESS_ONCE(bts->started) = 1; | |
254 | } | |
255 | ||
256 | static void __bts_event_stop(struct perf_event *event) | |
257 | { | |
258 | /* | |
259 | * No extra synchronization is mandated by the documentation to have | |
260 | * BTS data stores globally visible. | |
261 | */ | |
262 | intel_pmu_disable_bts(); | |
263 | ||
264 | if (event->hw.state & PERF_HES_STOPPED) | |
265 | return; | |
266 | ||
267 | ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED; | |
268 | } | |
269 | ||
270 | static void bts_event_stop(struct perf_event *event, int flags) | |
271 | { | |
272 | struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); | |
273 | ||
274 | /* PMI handler: don't restart this counter */ | |
275 | ACCESS_ONCE(bts->started) = 0; | |
276 | ||
277 | __bts_event_stop(event); | |
278 | ||
279 | if (flags & PERF_EF_UPDATE) | |
280 | bts_update(bts); | |
281 | } | |
282 | ||
283 | void intel_bts_enable_local(void) | |
284 | { | |
285 | struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); | |
286 | ||
287 | if (bts->handle.event && bts->started) | |
288 | __bts_event_start(bts->handle.event); | |
289 | } | |
290 | ||
291 | void intel_bts_disable_local(void) | |
292 | { | |
293 | struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); | |
294 | ||
295 | if (bts->handle.event) | |
296 | __bts_event_stop(bts->handle.event); | |
297 | } | |
298 | ||
299 | static int | |
300 | bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle) | |
301 | { | |
302 | unsigned long head, space, next_space, pad, gap, skip, wakeup; | |
303 | unsigned int next_buf; | |
304 | struct bts_phys *phys, *next_phys; | |
305 | int ret; | |
306 | ||
307 | if (buf->snapshot) | |
308 | return 0; | |
309 | ||
310 | head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); | |
311 | if (WARN_ON_ONCE(head != local_read(&buf->head))) | |
312 | return -EINVAL; | |
313 | ||
314 | phys = &buf->buf[buf->cur_buf]; | |
315 | space = phys->offset + phys->displacement + phys->size - head; | |
316 | pad = space; | |
317 | if (space > handle->size) { | |
318 | space = handle->size; | |
319 | space -= space % BTS_RECORD_SIZE; | |
320 | } | |
321 | if (space <= BTS_SAFETY_MARGIN) { | |
322 | /* See if next phys buffer has more space */ | |
323 | next_buf = buf->cur_buf + 1; | |
324 | if (next_buf >= buf->nr_bufs) | |
325 | next_buf = 0; | |
326 | next_phys = &buf->buf[next_buf]; | |
327 | gap = buf_size(phys->page) - phys->displacement - phys->size + | |
328 | next_phys->displacement; | |
329 | skip = pad + gap; | |
330 | if (handle->size >= skip) { | |
331 | next_space = next_phys->size; | |
332 | if (next_space + skip > handle->size) { | |
333 | next_space = handle->size - skip; | |
334 | next_space -= next_space % BTS_RECORD_SIZE; | |
335 | } | |
336 | if (next_space > space || !space) { | |
337 | if (pad) | |
338 | bts_buffer_pad_out(phys, head); | |
339 | ret = perf_aux_output_skip(handle, skip); | |
340 | if (ret) | |
341 | return ret; | |
342 | /* Advance to next phys buffer */ | |
343 | phys = next_phys; | |
344 | space = next_space; | |
345 | head = phys->offset + phys->displacement; | |
346 | /* | |
347 | * After this, cur_buf and head won't match ds | |
348 | * anymore, so we must not be racing with | |
349 | * bts_update(). | |
350 | */ | |
351 | buf->cur_buf = next_buf; | |
352 | local_set(&buf->head, head); | |
353 | } | |
354 | } | |
355 | } | |
356 | ||
357 | /* Don't go far beyond wakeup watermark */ | |
358 | wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup - | |
359 | handle->head; | |
360 | if (space > wakeup) { | |
361 | space = wakeup; | |
362 | space -= space % BTS_RECORD_SIZE; | |
363 | } | |
364 | ||
365 | buf->end = head + space; | |
366 | ||
367 | /* | |
368 | * If we have no space, the lost notification would have been sent when | |
369 | * we hit absolute_maximum - see bts_update() | |
370 | */ | |
371 | if (!space) | |
372 | return -ENOSPC; | |
373 | ||
374 | return 0; | |
375 | } | |
376 | ||
377 | int intel_bts_interrupt(void) | |
378 | { | |
379 | struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); | |
380 | struct perf_event *event = bts->handle.event; | |
381 | struct bts_buffer *buf; | |
382 | s64 old_head; | |
383 | int err; | |
384 | ||
385 | if (!event || !bts->started) | |
386 | return 0; | |
387 | ||
388 | buf = perf_get_aux(&bts->handle); | |
389 | /* | |
390 | * Skip snapshot counters: they don't use the interrupt, but | |
391 | * there's no other way of telling, because the pointer will | |
392 | * keep moving | |
393 | */ | |
394 | if (!buf || buf->snapshot) | |
395 | return 0; | |
396 | ||
397 | old_head = local_read(&buf->head); | |
398 | bts_update(bts); | |
399 | ||
400 | /* no new data */ | |
401 | if (old_head == local_read(&buf->head)) | |
402 | return 0; | |
403 | ||
404 | perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), | |
405 | !!local_xchg(&buf->lost, 0)); | |
406 | ||
407 | buf = perf_aux_output_begin(&bts->handle, event); | |
408 | if (!buf) | |
409 | return 1; | |
410 | ||
411 | err = bts_buffer_reset(buf, &bts->handle); | |
412 | if (err) | |
413 | perf_aux_output_end(&bts->handle, 0, false); | |
414 | ||
415 | return 1; | |
416 | } | |
417 | ||
418 | static void bts_event_del(struct perf_event *event, int mode) | |
419 | { | |
420 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
421 | struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); | |
422 | struct bts_buffer *buf = perf_get_aux(&bts->handle); | |
423 | ||
424 | bts_event_stop(event, PERF_EF_UPDATE); | |
425 | ||
426 | if (buf) { | |
427 | if (buf->snapshot) | |
428 | bts->handle.head = | |
429 | local_xchg(&buf->data_size, | |
430 | buf->nr_pages << PAGE_SHIFT); | |
431 | perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), | |
432 | !!local_xchg(&buf->lost, 0)); | |
433 | } | |
434 | ||
435 | cpuc->ds->bts_index = bts->ds_back.bts_buffer_base; | |
436 | cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base; | |
437 | cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum; | |
438 | cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold; | |
439 | } | |
440 | ||
441 | static int bts_event_add(struct perf_event *event, int mode) | |
442 | { | |
443 | struct bts_buffer *buf; | |
444 | struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); | |
445 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
446 | struct hw_perf_event *hwc = &event->hw; | |
447 | int ret = -EBUSY; | |
448 | ||
449 | event->hw.state = PERF_HES_STOPPED; | |
450 | ||
451 | if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | |
452 | return -EBUSY; | |
453 | ||
454 | if (bts->handle.event) | |
455 | return -EBUSY; | |
456 | ||
457 | buf = perf_aux_output_begin(&bts->handle, event); | |
458 | if (!buf) | |
459 | return -EINVAL; | |
460 | ||
461 | ret = bts_buffer_reset(buf, &bts->handle); | |
462 | if (ret) { | |
463 | perf_aux_output_end(&bts->handle, 0, false); | |
464 | return ret; | |
465 | } | |
466 | ||
467 | bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base; | |
468 | bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum; | |
469 | bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold; | |
470 | ||
471 | if (mode & PERF_EF_START) { | |
472 | bts_event_start(event, 0); | |
473 | if (hwc->state & PERF_HES_STOPPED) { | |
474 | bts_event_del(event, 0); | |
475 | return -EBUSY; | |
476 | } | |
477 | } | |
478 | ||
479 | return 0; | |
480 | } | |
481 | ||
482 | static void bts_event_destroy(struct perf_event *event) | |
483 | { | |
6b099d9b | 484 | x86_release_hardware(); |
8062382c AS |
485 | x86_del_exclusive(x86_lbr_exclusive_bts); |
486 | } | |
487 | ||
488 | static int bts_event_init(struct perf_event *event) | |
489 | { | |
6b099d9b AS |
490 | int ret; |
491 | ||
8062382c AS |
492 | if (event->attr.type != bts_pmu.type) |
493 | return -ENOENT; | |
494 | ||
495 | if (x86_add_exclusive(x86_lbr_exclusive_bts)) | |
496 | return -EBUSY; | |
497 | ||
d2878d64 AS |
498 | /* |
499 | * BTS leaks kernel addresses even when CPL0 tracing is | |
500 | * disabled, so disallow intel_bts driver for unprivileged | |
501 | * users on paranoid systems since it provides trace data | |
502 | * to the user in a zero-copy fashion. | |
503 | * | |
504 | * Note that the default paranoia setting permits unprivileged | |
505 | * users to profile the kernel. | |
506 | */ | |
507 | if (event->attr.exclude_kernel && perf_paranoid_kernel() && | |
508 | !capable(CAP_SYS_ADMIN)) | |
509 | return -EACCES; | |
510 | ||
6b099d9b AS |
511 | ret = x86_reserve_hardware(); |
512 | if (ret) { | |
513 | x86_del_exclusive(x86_lbr_exclusive_bts); | |
514 | return ret; | |
515 | } | |
516 | ||
8062382c AS |
517 | event->destroy = bts_event_destroy; |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
522 | static void bts_event_read(struct perf_event *event) | |
523 | { | |
524 | } | |
525 | ||
526 | static __init int bts_init(void) | |
527 | { | |
528 | if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) | |
529 | return -ENODEV; | |
530 | ||
531 | bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE; | |
532 | bts_pmu.task_ctx_nr = perf_sw_context; | |
533 | bts_pmu.event_init = bts_event_init; | |
534 | bts_pmu.add = bts_event_add; | |
535 | bts_pmu.del = bts_event_del; | |
536 | bts_pmu.start = bts_event_start; | |
537 | bts_pmu.stop = bts_event_stop; | |
538 | bts_pmu.read = bts_event_read; | |
539 | bts_pmu.setup_aux = bts_buffer_setup_aux; | |
540 | bts_pmu.free_aux = bts_buffer_free_aux; | |
541 | ||
542 | return perf_pmu_register(&bts_pmu, "intel_bts", -1); | |
543 | } | |
ca41d24c | 544 | arch_initcall(bts_init); |