Commit | Line | Data |
---|---|---|
62a038d3 P |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License as published by | |
4 | * the Free Software Foundation; either version 2 of the License, or | |
5 | * (at your option) any later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
15 | * | |
16 | * Copyright (C) 2007 Alan Stern | |
17 | * Copyright (C) IBM Corporation, 2009 | |
24f1e32c | 18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
ba1c813a FW |
19 | * |
20 | * Thanks to Ingo Molnar for his many suggestions. | |
ba6909b7 P |
21 | * |
22 | * Authors: Alan Stern <stern@rowland.harvard.edu> | |
23 | * K.Prasad <prasad@linux.vnet.ibm.com> | |
24 | * Frederic Weisbecker <fweisbec@gmail.com> | |
62a038d3 P |
25 | */ |
26 | ||
27 | /* | |
28 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | |
29 | * using the CPU's debug registers. | |
30 | * This file contains the arch-independent routines. | |
31 | */ | |
32 | ||
33 | #include <linux/irqflags.h> | |
34 | #include <linux/kallsyms.h> | |
35 | #include <linux/notifier.h> | |
36 | #include <linux/kprobes.h> | |
37 | #include <linux/kdebug.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <linux/module.h> | |
40 | #include <linux/percpu.h> | |
41 | #include <linux/sched.h> | |
42 | #include <linux/init.h> | |
43 | #include <linux/smp.h> | |
44 | ||
24f1e32c FW |
45 | #include <linux/hw_breakpoint.h> |
46 | ||
ba1c813a FW |
47 | /* |
48 | * Constraints data | |
49 | */ | |
62a038d3 | 50 | |
ba1c813a FW |
51 | /* Number of pinned cpu breakpoints in a cpu */ |
52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); | |
53 | ||
54 | /* Number of pinned task breakpoints in a cpu */ | |
6ab88863 | 55 | static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]); |
ba1c813a FW |
56 | |
57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | |
58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); | |
59 | ||
60 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ | |
61 | struct bp_busy_slots { | |
62 | unsigned int pinned; | |
63 | unsigned int flexible; | |
64 | }; | |
65 | ||
66 | /* Serialize accesses to the above constraints */ | |
67 | static DEFINE_MUTEX(nr_bp_mutex); | |
68 | ||
69 | /* | |
70 | * Report the maximum number of pinned breakpoints a task | |
71 | * have in this cpu | |
72 | */ | |
73 | static unsigned int max_task_bp_pinned(int cpu) | |
62a038d3 | 74 | { |
ba1c813a | 75 | int i; |
6ab88863 | 76 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
62a038d3 | 77 | |
ba1c813a FW |
78 | for (i = HBP_NUM -1; i >= 0; i--) { |
79 | if (tsk_pinned[i] > 0) | |
80 | return i + 1; | |
62a038d3 P |
81 | } |
82 | ||
24f1e32c | 83 | return 0; |
62a038d3 P |
84 | } |
85 | ||
56053170 FW |
86 | static int task_bp_pinned(struct task_struct *tsk) |
87 | { | |
88 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | |
89 | struct list_head *list; | |
90 | struct perf_event *bp; | |
91 | unsigned long flags; | |
92 | int count = 0; | |
93 | ||
94 | if (WARN_ONCE(!ctx, "No perf context for this task")) | |
95 | return 0; | |
96 | ||
97 | list = &ctx->event_list; | |
98 | ||
99 | spin_lock_irqsave(&ctx->lock, flags); | |
100 | ||
101 | /* | |
102 | * The current breakpoint counter is not included in the list | |
103 | * at the open() callback time | |
104 | */ | |
105 | list_for_each_entry(bp, list, event_entry) { | |
106 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | |
107 | count++; | |
108 | } | |
109 | ||
110 | spin_unlock_irqrestore(&ctx->lock, flags); | |
111 | ||
112 | return count; | |
113 | } | |
114 | ||
ba1c813a FW |
115 | /* |
116 | * Report the number of pinned/un-pinned breakpoints we have in | |
117 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | |
118 | */ | |
56053170 FW |
119 | static void |
120 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp) | |
ba1c813a | 121 | { |
56053170 FW |
122 | int cpu = bp->cpu; |
123 | struct task_struct *tsk = bp->ctx->task; | |
124 | ||
ba1c813a FW |
125 | if (cpu >= 0) { |
126 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); | |
56053170 FW |
127 | if (!tsk) |
128 | slots->pinned += max_task_bp_pinned(cpu); | |
129 | else | |
130 | slots->pinned += task_bp_pinned(tsk); | |
ba1c813a FW |
131 | slots->flexible = per_cpu(nr_bp_flexible, cpu); |
132 | ||
133 | return; | |
134 | } | |
135 | ||
136 | for_each_online_cpu(cpu) { | |
137 | unsigned int nr; | |
138 | ||
139 | nr = per_cpu(nr_cpu_bp_pinned, cpu); | |
56053170 FW |
140 | if (!tsk) |
141 | nr += max_task_bp_pinned(cpu); | |
142 | else | |
143 | nr += task_bp_pinned(tsk); | |
ba1c813a FW |
144 | |
145 | if (nr > slots->pinned) | |
146 | slots->pinned = nr; | |
147 | ||
148 | nr = per_cpu(nr_bp_flexible, cpu); | |
149 | ||
150 | if (nr > slots->flexible) | |
151 | slots->flexible = nr; | |
152 | } | |
153 | } | |
154 | ||
155 | /* | |
156 | * Add a pinned breakpoint for the given task in our constraint table | |
157 | */ | |
158 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | |
159 | { | |
11e66357 | 160 | unsigned int *tsk_pinned; |
56053170 | 161 | int count = 0; |
ba1c813a | 162 | |
56053170 | 163 | count = task_bp_pinned(tsk); |
ba1c813a | 164 | |
6ab88863 | 165 | tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
ba1c813a | 166 | if (enable) { |
11e66357 | 167 | tsk_pinned[count]++; |
ba1c813a | 168 | if (count > 0) |
11e66357 | 169 | tsk_pinned[count-1]--; |
ba1c813a | 170 | } else { |
11e66357 | 171 | tsk_pinned[count]--; |
ba1c813a | 172 | if (count > 0) |
11e66357 | 173 | tsk_pinned[count-1]++; |
ba1c813a FW |
174 | } |
175 | } | |
176 | ||
177 | /* | |
178 | * Add/remove the given breakpoint in our constraint table | |
179 | */ | |
180 | static void toggle_bp_slot(struct perf_event *bp, bool enable) | |
181 | { | |
182 | int cpu = bp->cpu; | |
183 | struct task_struct *tsk = bp->ctx->task; | |
184 | ||
185 | /* Pinned counter task profiling */ | |
186 | if (tsk) { | |
187 | if (cpu >= 0) { | |
188 | toggle_bp_task_slot(tsk, cpu, enable); | |
189 | return; | |
190 | } | |
191 | ||
192 | for_each_online_cpu(cpu) | |
193 | toggle_bp_task_slot(tsk, cpu, enable); | |
194 | return; | |
195 | } | |
196 | ||
197 | /* Pinned counter cpu profiling */ | |
198 | if (enable) | |
199 | per_cpu(nr_cpu_bp_pinned, bp->cpu)++; | |
200 | else | |
201 | per_cpu(nr_cpu_bp_pinned, bp->cpu)--; | |
202 | } | |
203 | ||
204 | /* | |
205 | * Contraints to check before allowing this new breakpoint counter: | |
206 | * | |
207 | * == Non-pinned counter == (Considered as pinned for now) | |
208 | * | |
209 | * - If attached to a single cpu, check: | |
210 | * | |
211 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | |
6ab88863 | 212 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
ba1c813a FW |
213 | * |
214 | * -> If there are already non-pinned counters in this cpu, it means | |
215 | * there is already a free slot for them. | |
216 | * Otherwise, we check that the maximum number of per task | |
217 | * breakpoints (for this cpu) plus the number of per cpu breakpoint | |
218 | * (for this cpu) doesn't cover every registers. | |
219 | * | |
220 | * - If attached to every cpus, check: | |
221 | * | |
222 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | |
6ab88863 | 223 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
ba1c813a FW |
224 | * |
225 | * -> This is roughly the same, except we check the number of per cpu | |
226 | * bp for every cpu and we keep the max one. Same for the per tasks | |
227 | * breakpoints. | |
228 | * | |
229 | * | |
230 | * == Pinned counter == | |
231 | * | |
232 | * - If attached to a single cpu, check: | |
233 | * | |
234 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | |
6ab88863 | 235 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
ba1c813a FW |
236 | * |
237 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | |
238 | * one register at least (or they will never be fed). | |
239 | * | |
240 | * - If attached to every cpus, check: | |
241 | * | |
242 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | |
6ab88863 | 243 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
ba1c813a FW |
244 | */ |
245 | int reserve_bp_slot(struct perf_event *bp) | |
246 | { | |
247 | struct bp_busy_slots slots = {0}; | |
248 | int ret = 0; | |
249 | ||
250 | mutex_lock(&nr_bp_mutex); | |
251 | ||
56053170 | 252 | fetch_bp_busy_slots(&slots, bp); |
ba1c813a FW |
253 | |
254 | /* Flexible counters need to keep at least one slot */ | |
255 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | |
256 | ret = -ENOSPC; | |
257 | goto end; | |
258 | } | |
259 | ||
260 | toggle_bp_slot(bp, true); | |
261 | ||
262 | end: | |
263 | mutex_unlock(&nr_bp_mutex); | |
264 | ||
265 | return ret; | |
266 | } | |
267 | ||
24f1e32c | 268 | void release_bp_slot(struct perf_event *bp) |
62a038d3 | 269 | { |
ba1c813a FW |
270 | mutex_lock(&nr_bp_mutex); |
271 | ||
272 | toggle_bp_slot(bp, false); | |
273 | ||
274 | mutex_unlock(&nr_bp_mutex); | |
62a038d3 P |
275 | } |
276 | ||
ba1c813a | 277 | |
b326e956 | 278 | int register_perf_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 279 | { |
24f1e32c | 280 | int ret; |
62a038d3 | 281 | |
24f1e32c FW |
282 | ret = reserve_bp_slot(bp); |
283 | if (ret) | |
284 | return ret; | |
62a038d3 | 285 | |
fdf6bc95 FW |
286 | /* |
287 | * Ptrace breakpoints can be temporary perf events only | |
288 | * meant to reserve a slot. In this case, it is created disabled and | |
289 | * we don't want to check the params right now (as we put a null addr) | |
290 | * But perf tools create events as disabled and we want to check | |
291 | * the params for them. | |
292 | * This is a quick hack that will be removed soon, once we remove | |
293 | * the tmp breakpoints from ptrace | |
294 | */ | |
b326e956 | 295 | if (!bp->attr.disabled || !bp->overflow_handler) |
24f1e32c | 296 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
62a038d3 | 297 | |
24f1e32c FW |
298 | return ret; |
299 | } | |
62a038d3 | 300 | |
62a038d3 P |
301 | /** |
302 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | |
5fa10b28 | 303 | * @attr: breakpoint attributes |
24f1e32c | 304 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 305 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 306 | */ |
24f1e32c | 307 | struct perf_event * |
5fa10b28 | 308 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
b326e956 | 309 | perf_overflow_handler_t triggered, |
5fa10b28 | 310 | struct task_struct *tsk) |
62a038d3 | 311 | { |
5fa10b28 | 312 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); |
62a038d3 P |
313 | } |
314 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |
315 | ||
316 | /** | |
317 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | |
24f1e32c | 318 | * @bp: the breakpoint structure to modify |
5fa10b28 | 319 | * @attr: new breakpoint attributes |
24f1e32c | 320 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 321 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 322 | */ |
44234adc | 323 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
62a038d3 | 324 | { |
44234adc FW |
325 | u64 old_addr = bp->attr.bp_addr; |
326 | int old_type = bp->attr.bp_type; | |
327 | int old_len = bp->attr.bp_len; | |
328 | int err = 0; | |
329 | ||
330 | perf_event_disable(bp); | |
331 | ||
332 | bp->attr.bp_addr = attr->bp_addr; | |
333 | bp->attr.bp_type = attr->bp_type; | |
334 | bp->attr.bp_len = attr->bp_len; | |
335 | ||
336 | if (attr->disabled) | |
337 | goto end; | |
62a038d3 | 338 | |
44234adc FW |
339 | err = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
340 | if (!err) | |
341 | perf_event_enable(bp); | |
342 | ||
343 | if (err) { | |
344 | bp->attr.bp_addr = old_addr; | |
345 | bp->attr.bp_type = old_type; | |
346 | bp->attr.bp_len = old_len; | |
347 | if (!bp->attr.disabled) | |
348 | perf_event_enable(bp); | |
349 | ||
350 | return err; | |
351 | } | |
352 | ||
353 | end: | |
354 | bp->attr.disabled = attr->disabled; | |
355 | ||
356 | return 0; | |
62a038d3 P |
357 | } |
358 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | |
359 | ||
360 | /** | |
24f1e32c | 361 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
62a038d3 | 362 | * @bp: the breakpoint structure to unregister |
62a038d3 | 363 | */ |
24f1e32c | 364 | void unregister_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 365 | { |
24f1e32c FW |
366 | if (!bp) |
367 | return; | |
368 | perf_event_release_kernel(bp); | |
369 | } | |
370 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |
371 | ||
62a038d3 | 372 | /** |
24f1e32c | 373 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
dd1853c3 | 374 | * @attr: breakpoint attributes |
24f1e32c | 375 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 376 | * |
24f1e32c | 377 | * @return a set of per_cpu pointers to perf events |
62a038d3 | 378 | */ |
24f1e32c | 379 | struct perf_event ** |
dd1853c3 | 380 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
b326e956 | 381 | perf_overflow_handler_t triggered) |
62a038d3 | 382 | { |
24f1e32c FW |
383 | struct perf_event **cpu_events, **pevent, *bp; |
384 | long err; | |
385 | int cpu; | |
386 | ||
387 | cpu_events = alloc_percpu(typeof(*cpu_events)); | |
388 | if (!cpu_events) | |
389 | return ERR_PTR(-ENOMEM); | |
62a038d3 | 390 | |
24f1e32c FW |
391 | for_each_possible_cpu(cpu) { |
392 | pevent = per_cpu_ptr(cpu_events, cpu); | |
dd1853c3 | 393 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); |
62a038d3 | 394 | |
24f1e32c | 395 | *pevent = bp; |
62a038d3 | 396 | |
605bfaee | 397 | if (IS_ERR(bp)) { |
24f1e32c FW |
398 | err = PTR_ERR(bp); |
399 | goto fail; | |
400 | } | |
62a038d3 P |
401 | } |
402 | ||
24f1e32c FW |
403 | return cpu_events; |
404 | ||
405 | fail: | |
406 | for_each_possible_cpu(cpu) { | |
407 | pevent = per_cpu_ptr(cpu_events, cpu); | |
605bfaee | 408 | if (IS_ERR(*pevent)) |
24f1e32c FW |
409 | break; |
410 | unregister_hw_breakpoint(*pevent); | |
411 | } | |
412 | free_percpu(cpu_events); | |
413 | /* return the error if any */ | |
414 | return ERR_PTR(err); | |
62a038d3 | 415 | } |
f60d24d2 | 416 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
62a038d3 P |
417 | |
418 | /** | |
24f1e32c FW |
419 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
420 | * @cpu_events: the per cpu set of events to unregister | |
62a038d3 | 421 | */ |
24f1e32c | 422 | void unregister_wide_hw_breakpoint(struct perf_event **cpu_events) |
62a038d3 | 423 | { |
24f1e32c FW |
424 | int cpu; |
425 | struct perf_event **pevent; | |
62a038d3 | 426 | |
24f1e32c FW |
427 | for_each_possible_cpu(cpu) { |
428 | pevent = per_cpu_ptr(cpu_events, cpu); | |
429 | unregister_hw_breakpoint(*pevent); | |
62a038d3 | 430 | } |
24f1e32c | 431 | free_percpu(cpu_events); |
62a038d3 | 432 | } |
f60d24d2 | 433 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
62a038d3 P |
434 | |
435 | static struct notifier_block hw_breakpoint_exceptions_nb = { | |
436 | .notifier_call = hw_breakpoint_exceptions_notify, | |
437 | /* we need to be notified first */ | |
438 | .priority = 0x7fffffff | |
439 | }; | |
440 | ||
441 | static int __init init_hw_breakpoint(void) | |
442 | { | |
443 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | |
444 | } | |
62a038d3 | 445 | core_initcall(init_hw_breakpoint); |
24f1e32c FW |
446 | |
447 | ||
448 | struct pmu perf_ops_bp = { | |
449 | .enable = arch_install_hw_breakpoint, | |
450 | .disable = arch_uninstall_hw_breakpoint, | |
451 | .read = hw_breakpoint_pmu_read, | |
452 | .unthrottle = hw_breakpoint_pmu_unthrottle | |
453 | }; |