Commit | Line | Data |
---|---|---|
241771ef IM |
1 | /* |
2 | * Performance counter x86 architecture code | |
3 | * | |
4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar | |
6 | * | |
7 | * For licencing details see kernel-base/COPYING | |
8 | */ | |
9 | ||
10 | #include <linux/perf_counter.h> | |
11 | #include <linux/capability.h> | |
12 | #include <linux/notifier.h> | |
13 | #include <linux/hardirq.h> | |
14 | #include <linux/kprobes.h> | |
4ac13294 | 15 | #include <linux/module.h> |
241771ef IM |
16 | #include <linux/kdebug.h> |
17 | #include <linux/sched.h> | |
18 | ||
19 | #include <asm/intel_arch_perfmon.h> | |
20 | #include <asm/apic.h> | |
21 | ||
22 | static bool perf_counters_initialized __read_mostly; | |
23 | ||
24 | /* | |
25 | * Number of (generic) HW counters: | |
26 | */ | |
27 | static int nr_hw_counters __read_mostly; | |
28 | static u32 perf_counter_mask __read_mostly; | |
29 | ||
30 | /* No support for fixed function counters yet */ | |
31 | ||
32 | #define MAX_HW_COUNTERS 8 | |
33 | ||
34 | struct cpu_hw_counters { | |
35 | struct perf_counter *counters[MAX_HW_COUNTERS]; | |
36 | unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)]; | |
241771ef IM |
37 | }; |
38 | ||
39 | /* | |
40 | * Intel PerfMon v3. Used on Core2 and later. | |
41 | */ | |
42 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); | |
43 | ||
94c46572 | 44 | static const int intel_perfmon_event_map[] = |
241771ef IM |
45 | { |
46 | [PERF_COUNT_CYCLES] = 0x003c, | |
47 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, | |
48 | [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, | |
49 | [PERF_COUNT_CACHE_MISSES] = 0x412e, | |
50 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, | |
51 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, | |
52 | }; | |
53 | ||
94c46572 | 54 | static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); |
241771ef | 55 | |
ee06094f IM |
56 | /* |
57 | * Propagate counter elapsed time into the generic counter. | |
58 | * Can only be executed on the CPU where the counter is active. | |
59 | * Returns the delta events processed. | |
60 | */ | |
61 | static void | |
62 | x86_perf_counter_update(struct perf_counter *counter, | |
63 | struct hw_perf_counter *hwc, int idx) | |
64 | { | |
65 | u64 prev_raw_count, new_raw_count, delta; | |
66 | ||
ee06094f IM |
67 | /* |
68 | * Careful: an NMI might modify the previous counter value. | |
69 | * | |
70 | * Our tactic to handle this is to first atomically read and | |
71 | * exchange a new raw count - then add that new-prev delta | |
72 | * count to the generic counter atomically: | |
73 | */ | |
74 | again: | |
75 | prev_raw_count = atomic64_read(&hwc->prev_count); | |
76 | rdmsrl(hwc->counter_base + idx, new_raw_count); | |
77 | ||
78 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
79 | new_raw_count) != prev_raw_count) | |
80 | goto again; | |
81 | ||
82 | /* | |
83 | * Now we have the new raw value and have updated the prev | |
84 | * timestamp already. We can now calculate the elapsed delta | |
85 | * (counter-)time and add that to the generic counter. | |
86 | * | |
87 | * Careful, not all hw sign-extends above the physical width | |
88 | * of the count, so we do that by clipping the delta to 32 bits: | |
89 | */ | |
90 | delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); | |
ee06094f IM |
91 | |
92 | atomic64_add(delta, &counter->count); | |
93 | atomic64_sub(delta, &hwc->period_left); | |
94 | } | |
95 | ||
241771ef IM |
96 | /* |
97 | * Setup the hardware configuration for a given hw_event_type | |
98 | */ | |
621a01ea | 99 | static int __hw_perf_counter_init(struct perf_counter *counter) |
241771ef | 100 | { |
9f66a381 | 101 | struct perf_counter_hw_event *hw_event = &counter->hw_event; |
241771ef IM |
102 | struct hw_perf_counter *hwc = &counter->hw; |
103 | ||
104 | if (unlikely(!perf_counters_initialized)) | |
105 | return -EINVAL; | |
106 | ||
107 | /* | |
108 | * Count user events, and generate PMC IRQs: | |
109 | * (keep 'enabled' bit clear for now) | |
110 | */ | |
111 | hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT; | |
112 | ||
113 | /* | |
114 | * If privileged enough, count OS events too, and allow | |
115 | * NMI events as well: | |
116 | */ | |
117 | hwc->nmi = 0; | |
118 | if (capable(CAP_SYS_ADMIN)) { | |
119 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; | |
9f66a381 | 120 | if (hw_event->nmi) |
241771ef IM |
121 | hwc->nmi = 1; |
122 | } | |
123 | ||
9f66a381 IM |
124 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; |
125 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; | |
241771ef | 126 | |
9f66a381 | 127 | hwc->irq_period = hw_event->irq_period; |
241771ef IM |
128 | /* |
129 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
130 | * so we install an artificial 1<<31 period regardless of | |
131 | * the generic counter period: | |
132 | */ | |
ee06094f | 133 | if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF) |
241771ef IM |
134 | hwc->irq_period = 0x7FFFFFFF; |
135 | ||
ee06094f | 136 | atomic64_set(&hwc->period_left, hwc->irq_period); |
241771ef IM |
137 | |
138 | /* | |
dfa7c899 | 139 | * Raw event type provide the config in the event structure |
241771ef | 140 | */ |
9f66a381 IM |
141 | if (hw_event->raw) { |
142 | hwc->config |= hw_event->type; | |
241771ef | 143 | } else { |
9f66a381 | 144 | if (hw_event->type >= max_intel_perfmon_events) |
241771ef IM |
145 | return -EINVAL; |
146 | /* | |
147 | * The generic map: | |
148 | */ | |
9f66a381 | 149 | hwc->config |= intel_perfmon_event_map[hw_event->type]; |
241771ef | 150 | } |
241771ef IM |
151 | counter->wakeup_pending = 0; |
152 | ||
153 | return 0; | |
154 | } | |
155 | ||
241771ef IM |
156 | void hw_perf_enable_all(void) |
157 | { | |
2b9ff0db IM |
158 | if (unlikely(!perf_counters_initialized)) |
159 | return; | |
160 | ||
43874d23 | 161 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); |
241771ef IM |
162 | } |
163 | ||
01b2838c | 164 | u64 hw_perf_save_disable(void) |
4ac13294 TG |
165 | { |
166 | u64 ctrl; | |
167 | ||
2b9ff0db IM |
168 | if (unlikely(!perf_counters_initialized)) |
169 | return 0; | |
170 | ||
4ac13294 | 171 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
241771ef | 172 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); |
2b9ff0db | 173 | |
4ac13294 | 174 | return ctrl; |
241771ef | 175 | } |
01b2838c | 176 | EXPORT_SYMBOL_GPL(hw_perf_save_disable); |
241771ef | 177 | |
ee06094f IM |
178 | void hw_perf_restore(u64 ctrl) |
179 | { | |
2b9ff0db IM |
180 | if (unlikely(!perf_counters_initialized)) |
181 | return; | |
182 | ||
ee06094f IM |
183 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); |
184 | } | |
185 | EXPORT_SYMBOL_GPL(hw_perf_restore); | |
186 | ||
7e2ae347 | 187 | static inline void |
ee06094f IM |
188 | __x86_perf_counter_disable(struct perf_counter *counter, |
189 | struct hw_perf_counter *hwc, unsigned int idx) | |
7e2ae347 | 190 | { |
ee06094f IM |
191 | int err; |
192 | ||
193 | err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); | |
7e2ae347 IM |
194 | } |
195 | ||
ee06094f | 196 | static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]); |
241771ef | 197 | |
ee06094f IM |
198 | /* |
199 | * Set the next IRQ period, based on the hwc->period_left value. | |
200 | * To be called with the counter disabled in hw: | |
201 | */ | |
202 | static void | |
203 | __hw_perf_counter_set_period(struct perf_counter *counter, | |
204 | struct hw_perf_counter *hwc, int idx) | |
241771ef | 205 | { |
ee06094f IM |
206 | s32 left = atomic64_read(&hwc->period_left); |
207 | s32 period = hwc->irq_period; | |
208 | ||
ee06094f IM |
209 | /* |
210 | * If we are way outside a reasoable range then just skip forward: | |
211 | */ | |
212 | if (unlikely(left <= -period)) { | |
213 | left = period; | |
214 | atomic64_set(&hwc->period_left, left); | |
215 | } | |
216 | ||
217 | if (unlikely(left <= 0)) { | |
218 | left += period; | |
219 | atomic64_set(&hwc->period_left, left); | |
220 | } | |
241771ef | 221 | |
ee06094f IM |
222 | per_cpu(prev_left[idx], smp_processor_id()) = left; |
223 | ||
224 | /* | |
225 | * The hw counter starts counting from this counter offset, | |
226 | * mark it to be able to extra future deltas: | |
227 | */ | |
228 | atomic64_set(&hwc->prev_count, (u64)(s64)-left); | |
229 | ||
230 | wrmsr(hwc->counter_base + idx, -left, 0); | |
7e2ae347 IM |
231 | } |
232 | ||
ee06094f IM |
233 | static void |
234 | __x86_perf_counter_enable(struct perf_counter *counter, | |
235 | struct hw_perf_counter *hwc, int idx) | |
7e2ae347 IM |
236 | { |
237 | wrmsr(hwc->config_base + idx, | |
238 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); | |
241771ef IM |
239 | } |
240 | ||
ee06094f IM |
241 | /* |
242 | * Find a PMC slot for the freshly enabled / scheduled in counter: | |
243 | */ | |
621a01ea | 244 | static void x86_perf_counter_enable(struct perf_counter *counter) |
241771ef IM |
245 | { |
246 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
247 | struct hw_perf_counter *hwc = &counter->hw; | |
248 | int idx = hwc->idx; | |
249 | ||
250 | /* Try to get the previous counter again */ | |
251 | if (test_and_set_bit(idx, cpuc->used)) { | |
252 | idx = find_first_zero_bit(cpuc->used, nr_hw_counters); | |
253 | set_bit(idx, cpuc->used); | |
254 | hwc->idx = idx; | |
255 | } | |
256 | ||
257 | perf_counters_lapic_init(hwc->nmi); | |
258 | ||
ee06094f | 259 | __x86_perf_counter_disable(counter, hwc, idx); |
241771ef IM |
260 | |
261 | cpuc->counters[idx] = counter; | |
7e2ae347 | 262 | |
ee06094f IM |
263 | __hw_perf_counter_set_period(counter, hwc, idx); |
264 | __x86_perf_counter_enable(counter, hwc, idx); | |
241771ef IM |
265 | } |
266 | ||
267 | void perf_counter_print_debug(void) | |
268 | { | |
ee06094f | 269 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left; |
1e125676 IM |
270 | int cpu, idx; |
271 | ||
272 | if (!nr_hw_counters) | |
273 | return; | |
241771ef IM |
274 | |
275 | local_irq_disable(); | |
276 | ||
277 | cpu = smp_processor_id(); | |
278 | ||
1e125676 IM |
279 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
280 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
281 | rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); | |
241771ef IM |
282 | |
283 | printk(KERN_INFO "\n"); | |
284 | printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); | |
285 | printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); | |
286 | printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); | |
287 | ||
288 | for (idx = 0; idx < nr_hw_counters; idx++) { | |
1e125676 IM |
289 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
290 | rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); | |
241771ef | 291 | |
ee06094f | 292 | prev_left = per_cpu(prev_left[idx], cpu); |
241771ef IM |
293 | |
294 | printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n", | |
295 | cpu, idx, pmc_ctrl); | |
296 | printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n", | |
297 | cpu, idx, pmc_count); | |
ee06094f IM |
298 | printk(KERN_INFO "CPU#%d: PMC%d left: %016llx\n", |
299 | cpu, idx, prev_left); | |
241771ef IM |
300 | } |
301 | local_irq_enable(); | |
302 | } | |
303 | ||
621a01ea | 304 | static void x86_perf_counter_disable(struct perf_counter *counter) |
241771ef IM |
305 | { |
306 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
307 | struct hw_perf_counter *hwc = &counter->hw; | |
308 | unsigned int idx = hwc->idx; | |
309 | ||
ee06094f | 310 | __x86_perf_counter_disable(counter, hwc, idx); |
241771ef IM |
311 | |
312 | clear_bit(idx, cpuc->used); | |
313 | cpuc->counters[idx] = NULL; | |
241771ef | 314 | |
ee06094f IM |
315 | /* |
316 | * Drain the remaining delta count out of a counter | |
317 | * that we are disabling: | |
318 | */ | |
319 | x86_perf_counter_update(counter, hwc, idx); | |
241771ef IM |
320 | } |
321 | ||
322 | static void perf_store_irq_data(struct perf_counter *counter, u64 data) | |
323 | { | |
324 | struct perf_data *irqdata = counter->irqdata; | |
325 | ||
326 | if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { | |
327 | irqdata->overrun++; | |
328 | } else { | |
329 | u64 *p = (u64 *) &irqdata->data[irqdata->len]; | |
330 | ||
331 | *p = data; | |
332 | irqdata->len += sizeof(u64); | |
333 | } | |
334 | } | |
335 | ||
7e2ae347 | 336 | /* |
ee06094f IM |
337 | * Save and restart an expired counter. Called by NMI contexts, |
338 | * so it has to be careful about preempting normal counter ops: | |
7e2ae347 | 339 | */ |
241771ef IM |
340 | static void perf_save_and_restart(struct perf_counter *counter) |
341 | { | |
342 | struct hw_perf_counter *hwc = &counter->hw; | |
343 | int idx = hwc->idx; | |
7e2ae347 | 344 | u64 pmc_ctrl; |
241771ef | 345 | |
1e125676 | 346 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
241771ef | 347 | |
ee06094f IM |
348 | x86_perf_counter_update(counter, hwc, idx); |
349 | __hw_perf_counter_set_period(counter, hwc, idx); | |
7e2ae347 IM |
350 | |
351 | if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) | |
ee06094f | 352 | __x86_perf_counter_enable(counter, hwc, idx); |
241771ef IM |
353 | } |
354 | ||
355 | static void | |
04289bb9 | 356 | perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) |
241771ef | 357 | { |
04289bb9 | 358 | struct perf_counter *counter, *group_leader = sibling->group_leader; |
241771ef | 359 | |
04289bb9 | 360 | /* |
ee06094f | 361 | * Store sibling timestamps (if any): |
04289bb9 IM |
362 | */ |
363 | list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { | |
ee06094f | 364 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); |
04289bb9 | 365 | perf_store_irq_data(sibling, counter->hw_event.type); |
ee06094f | 366 | perf_store_irq_data(sibling, atomic64_read(&counter->count)); |
241771ef IM |
367 | } |
368 | } | |
369 | ||
370 | /* | |
371 | * This handler is triggered by the local APIC, so the APIC IRQ handling | |
372 | * rules apply: | |
373 | */ | |
374 | static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) | |
375 | { | |
376 | int bit, cpu = smp_processor_id(); | |
43874d23 | 377 | u64 ack, status, saved_global; |
241771ef | 378 | struct cpu_hw_counters *cpuc; |
43874d23 IM |
379 | |
380 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); | |
241771ef | 381 | |
241771ef IM |
382 | /* Disable counters globally */ |
383 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); | |
384 | ack_APIC_irq(); | |
385 | ||
386 | cpuc = &per_cpu(cpu_hw_counters, cpu); | |
387 | ||
87b9cf46 IM |
388 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
389 | if (!status) | |
390 | goto out; | |
391 | ||
241771ef IM |
392 | again: |
393 | ack = status; | |
394 | for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { | |
395 | struct perf_counter *counter = cpuc->counters[bit]; | |
396 | ||
397 | clear_bit(bit, (unsigned long *) &status); | |
398 | if (!counter) | |
399 | continue; | |
400 | ||
401 | perf_save_and_restart(counter); | |
402 | ||
9f66a381 | 403 | switch (counter->hw_event.record_type) { |
241771ef IM |
404 | case PERF_RECORD_SIMPLE: |
405 | continue; | |
406 | case PERF_RECORD_IRQ: | |
407 | perf_store_irq_data(counter, instruction_pointer(regs)); | |
408 | break; | |
409 | case PERF_RECORD_GROUP: | |
241771ef IM |
410 | perf_handle_group(counter, &status, &ack); |
411 | break; | |
412 | } | |
413 | /* | |
414 | * From NMI context we cannot call into the scheduler to | |
415 | * do a task wakeup - but we mark these counters as | |
416 | * wakeup_pending and initate a wakeup callback: | |
417 | */ | |
418 | if (nmi) { | |
419 | counter->wakeup_pending = 1; | |
420 | set_tsk_thread_flag(current, TIF_PERF_COUNTERS); | |
421 | } else { | |
422 | wake_up(&counter->waitq); | |
423 | } | |
424 | } | |
425 | ||
426 | wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0); | |
427 | ||
428 | /* | |
429 | * Repeat if there is more work to be done: | |
430 | */ | |
431 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
432 | if (status) | |
433 | goto again; | |
87b9cf46 | 434 | out: |
241771ef | 435 | /* |
43874d23 | 436 | * Restore - do not reenable when global enable is off: |
241771ef | 437 | */ |
43874d23 | 438 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0); |
241771ef IM |
439 | } |
440 | ||
441 | void smp_perf_counter_interrupt(struct pt_regs *regs) | |
442 | { | |
443 | irq_enter(); | |
92bf73e9 | 444 | inc_irq_stat(apic_perf_irqs); |
241771ef IM |
445 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
446 | __smp_perf_counter_interrupt(regs, 0); | |
447 | ||
448 | irq_exit(); | |
449 | } | |
450 | ||
451 | /* | |
452 | * This handler is triggered by NMI contexts: | |
453 | */ | |
454 | void perf_counter_notify(struct pt_regs *regs) | |
455 | { | |
456 | struct cpu_hw_counters *cpuc; | |
457 | unsigned long flags; | |
458 | int bit, cpu; | |
459 | ||
460 | local_irq_save(flags); | |
461 | cpu = smp_processor_id(); | |
462 | cpuc = &per_cpu(cpu_hw_counters, cpu); | |
463 | ||
464 | for_each_bit(bit, cpuc->used, nr_hw_counters) { | |
465 | struct perf_counter *counter = cpuc->counters[bit]; | |
466 | ||
467 | if (!counter) | |
468 | continue; | |
469 | ||
470 | if (counter->wakeup_pending) { | |
471 | counter->wakeup_pending = 0; | |
472 | wake_up(&counter->waitq); | |
473 | } | |
474 | } | |
475 | ||
476 | local_irq_restore(flags); | |
477 | } | |
478 | ||
479 | void __cpuinit perf_counters_lapic_init(int nmi) | |
480 | { | |
481 | u32 apic_val; | |
482 | ||
483 | if (!perf_counters_initialized) | |
484 | return; | |
485 | /* | |
486 | * Enable the performance counter vector in the APIC LVT: | |
487 | */ | |
488 | apic_val = apic_read(APIC_LVTERR); | |
489 | ||
490 | apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); | |
491 | if (nmi) | |
492 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
493 | else | |
494 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); | |
495 | apic_write(APIC_LVTERR, apic_val); | |
496 | } | |
497 | ||
498 | static int __kprobes | |
499 | perf_counter_nmi_handler(struct notifier_block *self, | |
500 | unsigned long cmd, void *__args) | |
501 | { | |
502 | struct die_args *args = __args; | |
503 | struct pt_regs *regs; | |
504 | ||
505 | if (likely(cmd != DIE_NMI_IPI)) | |
506 | return NOTIFY_DONE; | |
507 | ||
508 | regs = args->regs; | |
509 | ||
510 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
511 | __smp_perf_counter_interrupt(regs, 1); | |
512 | ||
513 | return NOTIFY_STOP; | |
514 | } | |
515 | ||
516 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |
517 | .notifier_call = perf_counter_nmi_handler | |
518 | }; | |
519 | ||
520 | void __init init_hw_perf_counters(void) | |
521 | { | |
522 | union cpuid10_eax eax; | |
523 | unsigned int unused; | |
524 | unsigned int ebx; | |
525 | ||
526 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | |
527 | return; | |
528 | ||
529 | /* | |
530 | * Check whether the Architectural PerfMon supports | |
531 | * Branch Misses Retired Event or not. | |
532 | */ | |
533 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | |
534 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | |
535 | return; | |
536 | ||
537 | printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); | |
538 | ||
539 | printk(KERN_INFO "... version: %d\n", eax.split.version_id); | |
540 | printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); | |
541 | nr_hw_counters = eax.split.num_counters; | |
542 | if (nr_hw_counters > MAX_HW_COUNTERS) { | |
543 | nr_hw_counters = MAX_HW_COUNTERS; | |
544 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", | |
545 | nr_hw_counters, MAX_HW_COUNTERS); | |
546 | } | |
547 | perf_counter_mask = (1 << nr_hw_counters) - 1; | |
548 | perf_max_counters = nr_hw_counters; | |
549 | ||
550 | printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width); | |
551 | printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length); | |
552 | ||
75f224cf IM |
553 | perf_counters_initialized = true; |
554 | ||
241771ef IM |
555 | perf_counters_lapic_init(0); |
556 | register_die_notifier(&perf_counter_nmi_notifier); | |
241771ef | 557 | } |
621a01ea | 558 | |
ee06094f IM |
559 | static void x86_perf_counter_read(struct perf_counter *counter) |
560 | { | |
561 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); | |
562 | } | |
563 | ||
5c92d124 | 564 | static const struct hw_perf_counter_ops x86_perf_counter_ops = { |
621a01ea IM |
565 | .hw_perf_counter_enable = x86_perf_counter_enable, |
566 | .hw_perf_counter_disable = x86_perf_counter_disable, | |
567 | .hw_perf_counter_read = x86_perf_counter_read, | |
568 | }; | |
569 | ||
5c92d124 IM |
570 | const struct hw_perf_counter_ops * |
571 | hw_perf_counter_init(struct perf_counter *counter) | |
621a01ea IM |
572 | { |
573 | int err; | |
574 | ||
575 | err = __hw_perf_counter_init(counter); | |
576 | if (err) | |
577 | return NULL; | |
578 | ||
579 | return &x86_perf_counter_ops; | |
580 | } |