2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
17 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
19 #include <linux/kprobes.h>
20 #include <linux/hardirq.h>
24 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
25 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
36 struct perf_event
*event
;
37 unsigned long state
[BITS_TO_LONGS(IBS_MAX_STATES
)];
48 unsigned long offset_mask
[1];
50 struct cpu_perf_ibs __percpu
*pcpu
;
51 u64 (*get_count
)(u64 config
);
54 struct perf_ibs_data
{
57 u32 data
[0]; /* data buffer starts here */
60 u64 regs
[MSR_AMD64_IBS_REG_COUNT_MAX
];
64 perf_event_set_period(struct hw_perf_event
*hwc
, u64 min
, u64 max
, u64
*count
)
66 s64 left
= local64_read(&hwc
->period_left
);
67 s64 period
= hwc
->sample_period
;
71 * If we are way outside a reasonable range then just skip forward:
73 if (unlikely(left
<= -period
)) {
75 local64_set(&hwc
->period_left
, left
);
76 hwc
->last_period
= period
;
80 if (unlikely(left
<= 0)) {
82 local64_set(&hwc
->period_left
, left
);
83 hwc
->last_period
= period
;
87 if (unlikely(left
< min
))
99 perf_event_try_update(struct perf_event
*event
, u64 new_raw_count
, int width
)
101 struct hw_perf_event
*hwc
= &event
->hw
;
102 int shift
= 64 - width
;
107 * Careful: an NMI might modify the previous event value.
109 * Our tactic to handle this is to first atomically read and
110 * exchange a new raw count - then add that new-prev delta
111 * count to the generic event atomically:
113 prev_raw_count
= local64_read(&hwc
->prev_count
);
114 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
115 new_raw_count
) != prev_raw_count
)
119 * Now we have the new raw value and have updated the prev
120 * timestamp already. We can now calculate the elapsed delta
121 * (event-)time and add that to the generic event.
123 * Careful, not all hw sign-extends above the physical width
126 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
129 local64_add(delta
, &event
->count
);
130 local64_sub(delta
, &hwc
->period_left
);
135 static struct perf_ibs perf_ibs_fetch
;
136 static struct perf_ibs perf_ibs_op
;
138 static struct perf_ibs
*get_ibs_pmu(int type
)
140 if (perf_ibs_fetch
.pmu
.type
== type
)
141 return &perf_ibs_fetch
;
142 if (perf_ibs_op
.pmu
.type
== type
)
147 static int perf_ibs_init(struct perf_event
*event
)
149 struct hw_perf_event
*hwc
= &event
->hw
;
150 struct perf_ibs
*perf_ibs
;
153 perf_ibs
= get_ibs_pmu(event
->attr
.type
);
157 config
= event
->attr
.config
;
158 if (config
& ~perf_ibs
->config_mask
)
161 if (hwc
->sample_period
) {
162 if (config
& perf_ibs
->cnt_mask
)
163 /* raw max_cnt may not be set */
165 if (!event
->attr
.sample_freq
&& hwc
->sample_period
& 0x0f)
167 * lower 4 bits can not be set in ibs max cnt,
168 * but allowing it in case we adjust the
169 * sample period to set a frequency.
172 hwc
->sample_period
&= ~0x0FULL
;
173 if (!hwc
->sample_period
)
174 hwc
->sample_period
= 0x10;
176 max_cnt
= config
& perf_ibs
->cnt_mask
;
177 config
&= ~perf_ibs
->cnt_mask
;
178 event
->attr
.sample_period
= max_cnt
<< 4;
179 hwc
->sample_period
= event
->attr
.sample_period
;
182 if (!hwc
->sample_period
)
186 * If we modify hwc->sample_period, we also need to update
187 * hwc->last_period and hwc->period_left.
189 hwc
->last_period
= hwc
->sample_period
;
190 local64_set(&hwc
->period_left
, hwc
->sample_period
);
192 hwc
->config_base
= perf_ibs
->msr
;
193 hwc
->config
= config
;
198 static int perf_ibs_set_period(struct perf_ibs
*perf_ibs
,
199 struct hw_perf_event
*hwc
, u64
*period
)
203 /* ignore lower 4 bits in min count: */
204 ret
= perf_event_set_period(hwc
, 1<<4, perf_ibs
->max_period
, period
);
205 local64_set(&hwc
->prev_count
, 0);
210 static u64
get_ibs_fetch_count(u64 config
)
212 return (config
& IBS_FETCH_CNT
) >> 12;
215 static u64
get_ibs_op_count(u64 config
)
217 return (config
& IBS_OP_CUR_CNT
) >> 32;
221 perf_ibs_event_update(struct perf_ibs
*perf_ibs
, struct perf_event
*event
,
224 u64 count
= perf_ibs
->get_count(config
);
226 while (!perf_event_try_update(event
, count
, 20)) {
227 rdmsrl(event
->hw
.config_base
, config
);
228 count
= perf_ibs
->get_count(config
);
232 /* Note: The enable mask must be encoded in the config argument. */
233 static inline void perf_ibs_enable_event(struct hw_perf_event
*hwc
, u64 config
)
235 wrmsrl(hwc
->config_base
, hwc
->config
| config
);
239 * We cannot restore the ibs pmu state, so we always needs to update
240 * the event while stopping it and then reset the state when starting
241 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
242 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
244 static void perf_ibs_start(struct perf_event
*event
, int flags
)
246 struct hw_perf_event
*hwc
= &event
->hw
;
247 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
248 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
251 if (WARN_ON_ONCE(!(hwc
->state
& PERF_HES_STOPPED
)))
254 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
257 perf_ibs_set_period(perf_ibs
, hwc
, &config
);
258 config
= (config
>> 4) | perf_ibs
->enable_mask
;
259 set_bit(IBS_STARTED
, pcpu
->state
);
260 perf_ibs_enable_event(hwc
, config
);
262 perf_event_update_userpage(event
);
265 static void perf_ibs_stop(struct perf_event
*event
, int flags
)
267 struct hw_perf_event
*hwc
= &event
->hw
;
268 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
269 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
273 stopping
= test_and_clear_bit(IBS_STARTED
, pcpu
->state
);
275 if (!stopping
&& (hwc
->state
& PERF_HES_UPTODATE
))
278 rdmsrl(hwc
->config_base
, val
);
281 set_bit(IBS_STOPPING
, pcpu
->state
);
282 val
&= ~perf_ibs
->enable_mask
;
283 wrmsrl(hwc
->config_base
, val
);
284 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
285 hwc
->state
|= PERF_HES_STOPPED
;
288 if (hwc
->state
& PERF_HES_UPTODATE
)
291 perf_ibs_event_update(perf_ibs
, event
, val
);
292 hwc
->state
|= PERF_HES_UPTODATE
;
295 static int perf_ibs_add(struct perf_event
*event
, int flags
)
297 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
298 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
300 if (test_and_set_bit(IBS_ENABLED
, pcpu
->state
))
303 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
307 if (flags
& PERF_EF_START
)
308 perf_ibs_start(event
, PERF_EF_RELOAD
);
313 static void perf_ibs_del(struct perf_event
*event
, int flags
)
315 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
316 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
318 if (!test_and_clear_bit(IBS_ENABLED
, pcpu
->state
))
321 perf_ibs_stop(event
, PERF_EF_UPDATE
);
325 perf_event_update_userpage(event
);
328 static void perf_ibs_read(struct perf_event
*event
) { }
330 static struct perf_ibs perf_ibs_fetch
= {
332 .task_ctx_nr
= perf_invalid_context
,
334 .event_init
= perf_ibs_init
,
337 .start
= perf_ibs_start
,
338 .stop
= perf_ibs_stop
,
339 .read
= perf_ibs_read
,
341 .msr
= MSR_AMD64_IBSFETCHCTL
,
342 .config_mask
= IBS_FETCH_CONFIG_MASK
,
343 .cnt_mask
= IBS_FETCH_MAX_CNT
,
344 .enable_mask
= IBS_FETCH_ENABLE
,
345 .valid_mask
= IBS_FETCH_VAL
,
346 .max_period
= IBS_FETCH_MAX_CNT
<< 4,
347 .offset_mask
= { MSR_AMD64_IBSFETCH_REG_MASK
},
348 .offset_max
= MSR_AMD64_IBSFETCH_REG_COUNT
,
350 .get_count
= get_ibs_fetch_count
,
353 static struct perf_ibs perf_ibs_op
= {
355 .task_ctx_nr
= perf_invalid_context
,
357 .event_init
= perf_ibs_init
,
360 .start
= perf_ibs_start
,
361 .stop
= perf_ibs_stop
,
362 .read
= perf_ibs_read
,
364 .msr
= MSR_AMD64_IBSOPCTL
,
365 .config_mask
= IBS_OP_CONFIG_MASK
,
366 .cnt_mask
= IBS_OP_MAX_CNT
,
367 .enable_mask
= IBS_OP_ENABLE
,
368 .valid_mask
= IBS_OP_VAL
,
369 .max_period
= IBS_OP_MAX_CNT
<< 4,
370 .offset_mask
= { MSR_AMD64_IBSOP_REG_MASK
},
371 .offset_max
= MSR_AMD64_IBSOP_REG_COUNT
,
373 .get_count
= get_ibs_op_count
,
376 static int perf_ibs_handle_irq(struct perf_ibs
*perf_ibs
, struct pt_regs
*iregs
)
378 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
379 struct perf_event
*event
= pcpu
->event
;
380 struct hw_perf_event
*hwc
= &event
->hw
;
381 struct perf_sample_data data
;
382 struct perf_raw_record raw
;
384 struct perf_ibs_data ibs_data
;
385 int offset
, size
, overflow
, reenable
;
389 if (!test_bit(IBS_STARTED
, pcpu
->state
)) {
390 /* Catch spurious interrupts after stopping IBS: */
391 if (!test_and_clear_bit(IBS_STOPPING
, pcpu
->state
))
393 rdmsrl(perf_ibs
->msr
, *ibs_data
.regs
);
394 return (*ibs_data
.regs
& perf_ibs
->valid_mask
) ? 1 : 0;
397 msr
= hwc
->config_base
;
400 if (!(*buf
++ & perf_ibs
->valid_mask
))
404 * Emulate IbsOpCurCnt in MSRC001_1033 (IbsOpCtl), not
405 * supported in all cpus. As this triggered an interrupt, we
406 * set the current count to the max count.
408 config
= ibs_data
.regs
[0];
409 if (perf_ibs
== &perf_ibs_op
&& !(ibs_caps
& IBS_CAPS_RDWROPCNT
)) {
410 config
&= ~IBS_OP_CUR_CNT
;
411 config
|= (config
& IBS_OP_MAX_CNT
) << 36;
414 perf_ibs_event_update(perf_ibs
, event
, config
);
415 perf_sample_data_init(&data
, 0, hwc
->last_period
);
417 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
418 ibs_data
.caps
= ibs_caps
;
422 rdmsrl(msr
+ offset
, *buf
++);
424 offset
= find_next_bit(perf_ibs
->offset_mask
,
425 perf_ibs
->offset_max
,
427 } while (offset
< perf_ibs
->offset_max
);
428 raw
.size
= sizeof(u32
) + sizeof(u64
) * size
;
429 raw
.data
= ibs_data
.data
;
433 regs
= *iregs
; /* XXX: update ip from ibs sample */
435 overflow
= perf_ibs_set_period(perf_ibs
, hwc
, &config
);
436 reenable
= !(overflow
&& perf_event_overflow(event
, &data
, ®s
));
437 config
= (config
>> 4) | (reenable
? perf_ibs
->enable_mask
: 0);
438 perf_ibs_enable_event(hwc
, config
);
440 perf_event_update_userpage(event
);
446 perf_ibs_nmi_handler(unsigned int cmd
, struct pt_regs
*regs
)
450 handled
+= perf_ibs_handle_irq(&perf_ibs_fetch
, regs
);
451 handled
+= perf_ibs_handle_irq(&perf_ibs_op
, regs
);
454 inc_irq_stat(apic_perf_irqs
);
459 static __init
int perf_ibs_pmu_init(struct perf_ibs
*perf_ibs
, char *name
)
461 struct cpu_perf_ibs __percpu
*pcpu
;
464 pcpu
= alloc_percpu(struct cpu_perf_ibs
);
468 perf_ibs
->pcpu
= pcpu
;
470 ret
= perf_pmu_register(&perf_ibs
->pmu
, name
, -1);
472 perf_ibs
->pcpu
= NULL
;
479 static __init
int perf_event_ibs_init(void)
482 return -ENODEV
; /* ibs not supported by the cpu */
484 perf_ibs_pmu_init(&perf_ibs_fetch
, "ibs_fetch");
485 if (ibs_caps
& IBS_CAPS_OPCNT
)
486 perf_ibs_op
.config_mask
|= IBS_OP_CNT_CTL
;
487 perf_ibs_pmu_init(&perf_ibs_op
, "ibs_op");
488 register_nmi_handler(NMI_LOCAL
, perf_ibs_nmi_handler
, 0, "perf_ibs");
489 printk(KERN_INFO
"perf: AMD IBS detected (0x%08x)\n", ibs_caps
);
494 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
496 static __init
int perf_event_ibs_init(void) { return 0; }
500 /* IBS - apic initialization, for perf and oprofile */
502 static __init u32
__get_ibs_caps(void)
505 unsigned int max_level
;
507 if (!boot_cpu_has(X86_FEATURE_IBS
))
510 /* check IBS cpuid feature flags */
511 max_level
= cpuid_eax(0x80000000);
512 if (max_level
< IBS_CPUID_FEATURES
)
513 return IBS_CAPS_DEFAULT
;
515 caps
= cpuid_eax(IBS_CPUID_FEATURES
);
516 if (!(caps
& IBS_CAPS_AVAIL
))
517 /* cpuid flags not valid */
518 return IBS_CAPS_DEFAULT
;
523 u32
get_ibs_caps(void)
528 EXPORT_SYMBOL(get_ibs_caps
);
530 static inline int get_eilvt(int offset
)
532 return !setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 1);
535 static inline int put_eilvt(int offset
)
537 return !setup_APIC_eilvt(offset
, 0, 0, 1);
541 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
543 static inline int ibs_eilvt_valid(void)
551 rdmsrl(MSR_AMD64_IBSCTL
, val
);
552 offset
= val
& IBSCTL_LVT_OFFSET_MASK
;
554 if (!(val
& IBSCTL_LVT_OFFSET_VALID
)) {
555 pr_err(FW_BUG
"cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
556 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
560 if (!get_eilvt(offset
)) {
561 pr_err(FW_BUG
"cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
562 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
573 static int setup_ibs_ctl(int ibs_eilvt_off
)
575 struct pci_dev
*cpu_cfg
;
582 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
583 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
588 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
589 | IBSCTL_LVT_OFFSET_VALID
);
590 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
591 if (value
!= (ibs_eilvt_off
| IBSCTL_LVT_OFFSET_VALID
)) {
592 pci_dev_put(cpu_cfg
);
593 printk(KERN_DEBUG
"Failed to setup IBS LVT offset, "
594 "IBSCTL = 0x%08x\n", value
);
600 printk(KERN_DEBUG
"No CPU node configured for IBS\n");
608 * This runs only on the current cpu. We try to find an LVT offset and
609 * setup the local APIC. For this we must disable preemption. On
610 * success we initialize all nodes with this offset. This updates then
611 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
612 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
613 * is using the new offset.
615 static int force_ibs_eilvt_setup(void)
621 /* find the next free available EILVT entry, skip offset 0 */
622 for (offset
= 1; offset
< APIC_EILVT_NR_MAX
; offset
++) {
623 if (get_eilvt(offset
))
628 if (offset
== APIC_EILVT_NR_MAX
) {
629 printk(KERN_DEBUG
"No EILVT entry available\n");
633 ret
= setup_ibs_ctl(offset
);
637 if (!ibs_eilvt_valid()) {
642 pr_info("IBS: LVT offset %d assigned\n", offset
);
652 static inline int get_ibs_lvt_offset(void)
656 rdmsrl(MSR_AMD64_IBSCTL
, val
);
657 if (!(val
& IBSCTL_LVT_OFFSET_VALID
))
660 return val
& IBSCTL_LVT_OFFSET_MASK
;
663 static void setup_APIC_ibs(void *dummy
)
667 offset
= get_ibs_lvt_offset();
671 if (!setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 0))
674 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
678 static void clear_APIC_ibs(void *dummy
)
682 offset
= get_ibs_lvt_offset();
684 setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_FIX
, 1);
688 perf_ibs_cpu_notifier(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
690 switch (action
& ~CPU_TASKS_FROZEN
) {
692 setup_APIC_ibs(NULL
);
695 clear_APIC_ibs(NULL
);
704 static __init
int amd_ibs_init(void)
709 caps
= __get_ibs_caps();
711 return -ENODEV
; /* ibs not supported by the cpu */
714 * Force LVT offset assignment for family 10h: The offsets are
715 * not assigned by the BIOS for this family, so the OS is
716 * responsible for doing it. If the OS assignment fails, fall
717 * back to BIOS settings and try to setup this.
719 if (boot_cpu_data
.x86
== 0x10)
720 force_ibs_eilvt_setup();
722 if (!ibs_eilvt_valid())
727 /* make ibs_caps visible to other cpus: */
729 perf_cpu_notifier(perf_ibs_cpu_notifier
);
730 smp_call_function(setup_APIC_ibs
, NULL
, 1);
733 ret
= perf_event_ibs_init();
736 pr_err("Failed to setup IBS, %d\n", ret
);
740 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
741 device_initcall(amd_ibs_init
);