2 * Performance event support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
22 #define BHRB_MAX_ENTRIES 32
23 #define BHRB_TARGET 0x0000000000000002
24 #define BHRB_PREDICTION 0x0000000000000001
25 #define BHRB_EA 0xFFFFFFFFFFFFFFFC
27 struct cpu_hw_events
{
34 struct perf_event
*event
[MAX_HWEVENTS
];
35 u64 events
[MAX_HWEVENTS
];
36 unsigned int flags
[MAX_HWEVENTS
];
37 unsigned long mmcr
[3];
38 struct perf_event
*limited_counter
[MAX_LIMITED_HWCOUNTERS
];
39 u8 limited_hwidx
[MAX_LIMITED_HWCOUNTERS
];
40 u64 alternatives
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
41 unsigned long amasks
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
42 unsigned long avalues
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
44 unsigned int group_flag
;
48 u64 bhrb_filter
; /* BHRB HW branch filter */
51 struct perf_branch_stack bhrb_stack
;
52 struct perf_branch_entry bhrb_entries
[BHRB_MAX_ENTRIES
];
55 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
57 struct power_pmu
*ppmu
;
60 * Normally, to ignore kernel events we set the FCS (freeze counters
61 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
62 * hypervisor bit set in the MSR, or if we are running on a processor
63 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
64 * then we need to use the FCHV bit to ignore kernel events.
66 static unsigned int freeze_events_kernel
= MMCR0_FCS
;
69 * 32-bit doesn't have MMCRA but does have an MMCR2,
70 * and a few other names are different.
75 #define MMCR0_PMCjCE MMCR0_PMCnCE
77 #define SPRN_MMCRA SPRN_MMCR2
78 #define MMCRA_SAMPLE_ENABLE 0
80 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
84 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
) { }
85 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
89 static inline void perf_read_regs(struct pt_regs
*regs
)
93 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
98 static inline int siar_valid(struct pt_regs
*regs
)
103 static inline void power_pmu_bhrb_enable(struct perf_event
*event
) {}
104 static inline void power_pmu_bhrb_disable(struct perf_event
*event
) {}
105 void power_pmu_flush_branch_stack(void) {}
106 static inline void power_pmu_bhrb_read(struct cpu_hw_events
*cpuhw
) {}
107 #endif /* CONFIG_PPC32 */
109 static bool regs_use_siar(struct pt_regs
*regs
)
111 return !!(regs
->result
& 1);
115 * Things that are specific to 64-bit implementations.
119 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
121 unsigned long mmcra
= regs
->dsisr
;
123 if ((ppmu
->flags
& PPMU_HAS_SSLOT
) && (mmcra
& MMCRA_SAMPLE_ENABLE
)) {
124 unsigned long slot
= (mmcra
& MMCRA_SLOT
) >> MMCRA_SLOT_SHIFT
;
126 return 4 * (slot
- 1);
133 * The user wants a data address recorded.
134 * If we're not doing instruction sampling, give them the SDAR
135 * (sampled data address). If we are doing instruction sampling, then
136 * only give them the SDAR if it corresponds to the instruction
137 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
138 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
140 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
)
142 unsigned long mmcra
= regs
->dsisr
;
143 unsigned long sdsync
;
145 if (ppmu
->flags
& PPMU_SIAR_VALID
)
146 sdsync
= POWER7P_MMCRA_SDAR_VALID
;
147 else if (ppmu
->flags
& PPMU_ALT_SIPR
)
148 sdsync
= POWER6_MMCRA_SDSYNC
;
150 sdsync
= MMCRA_SDSYNC
;
152 if (!(mmcra
& MMCRA_SAMPLE_ENABLE
) || (mmcra
& sdsync
))
153 *addrp
= mfspr(SPRN_SDAR
);
156 static bool regs_sihv(struct pt_regs
*regs
)
158 unsigned long sihv
= MMCRA_SIHV
;
160 if (ppmu
->flags
& PPMU_HAS_SIER
)
161 return !!(regs
->dar
& SIER_SIHV
);
163 if (ppmu
->flags
& PPMU_ALT_SIPR
)
164 sihv
= POWER6_MMCRA_SIHV
;
166 return !!(regs
->dsisr
& sihv
);
169 static bool regs_sipr(struct pt_regs
*regs
)
171 unsigned long sipr
= MMCRA_SIPR
;
173 if (ppmu
->flags
& PPMU_HAS_SIER
)
174 return !!(regs
->dar
& SIER_SIPR
);
176 if (ppmu
->flags
& PPMU_ALT_SIPR
)
177 sipr
= POWER6_MMCRA_SIPR
;
179 return !!(regs
->dsisr
& sipr
);
182 static bool regs_no_sipr(struct pt_regs
*regs
)
184 return !!(regs
->result
& 2);
187 static inline u32
perf_flags_from_msr(struct pt_regs
*regs
)
189 if (regs
->msr
& MSR_PR
)
190 return PERF_RECORD_MISC_USER
;
191 if ((regs
->msr
& MSR_HV
) && freeze_events_kernel
!= MMCR0_FCHV
)
192 return PERF_RECORD_MISC_HYPERVISOR
;
193 return PERF_RECORD_MISC_KERNEL
;
196 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
198 bool use_siar
= regs_use_siar(regs
);
201 return perf_flags_from_msr(regs
);
204 * If we don't have flags in MMCRA, rather than using
205 * the MSR, we intuit the flags from the address in
206 * SIAR which should give slightly more reliable
209 if (regs_no_sipr(regs
)) {
210 unsigned long siar
= mfspr(SPRN_SIAR
);
211 if (siar
>= PAGE_OFFSET
)
212 return PERF_RECORD_MISC_KERNEL
;
213 return PERF_RECORD_MISC_USER
;
216 /* PR has priority over HV, so order below is important */
218 return PERF_RECORD_MISC_USER
;
220 if (regs_sihv(regs
) && (freeze_events_kernel
!= MMCR0_FCHV
))
221 return PERF_RECORD_MISC_HYPERVISOR
;
223 return PERF_RECORD_MISC_KERNEL
;
227 * Overload regs->dsisr to store MMCRA so we only need to read it once
229 * Overload regs->dar to store SIER if we have it.
230 * Overload regs->result to specify whether we should use the MSR (result
231 * is zero) or the SIAR (result is non zero).
233 static inline void perf_read_regs(struct pt_regs
*regs
)
235 unsigned long mmcra
= mfspr(SPRN_MMCRA
);
236 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
242 if (ppmu
->flags
& PPMU_NO_SIPR
)
246 * On power8 if we're in random sampling mode, the SIER is updated.
247 * If we're in continuous sampling mode, we don't have SIPR.
249 if (ppmu
->flags
& PPMU_HAS_SIER
) {
251 regs
->dar
= mfspr(SPRN_SIER
);
258 * If this isn't a PMU exception (eg a software event) the SIAR is
259 * not valid. Use pt_regs.
261 * If it is a marked event use the SIAR.
263 * If the PMU doesn't update the SIAR for non marked events use
266 * If the PMU has HV/PR flags then check to see if they
267 * place the exception in userspace. If so, use pt_regs. In
268 * continuous sampling mode the SIAR and the PMU exception are
269 * not synchronised, so they may be many instructions apart.
270 * This can result in confusing backtraces. We still want
271 * hypervisor samples as well as samples in the kernel with
272 * interrupts off hence the userspace check.
274 if (TRAP(regs
) != 0xf00)
278 else if ((ppmu
->flags
& PPMU_NO_CONT_SAMPLING
))
280 else if (!regs_no_sipr(regs
) && regs_sipr(regs
))
285 regs
->result
|= use_siar
;
289 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
292 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
298 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
299 * must be sampled only if the SIAR-valid bit is set.
301 * For unmarked instructions and for processors that don't have the SIAR-Valid
302 * bit, assume that SIAR is valid.
304 static inline int siar_valid(struct pt_regs
*regs
)
306 unsigned long mmcra
= regs
->dsisr
;
307 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
309 if ((ppmu
->flags
& PPMU_SIAR_VALID
) && marked
)
310 return mmcra
& POWER7P_MMCRA_SIAR_VALID
;
316 /* Reset all possible BHRB entries */
317 static void power_pmu_bhrb_reset(void)
319 asm volatile(PPC_CLRBHRB
);
322 static void power_pmu_bhrb_enable(struct perf_event
*event
)
324 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
329 /* Clear BHRB if we changed task context to avoid data leaks */
330 if (event
->ctx
->task
&& cpuhw
->bhrb_context
!= event
->ctx
) {
331 power_pmu_bhrb_reset();
332 cpuhw
->bhrb_context
= event
->ctx
;
337 static void power_pmu_bhrb_disable(struct perf_event
*event
)
339 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
345 WARN_ON_ONCE(cpuhw
->bhrb_users
< 0);
347 if (!cpuhw
->disabled
&& !cpuhw
->bhrb_users
) {
348 /* BHRB cannot be turned off when other
349 * events are active on the PMU.
352 /* avoid stale pointer */
353 cpuhw
->bhrb_context
= NULL
;
357 /* Called from ctxsw to prevent one process's branch entries to
358 * mingle with the other process's entries during context switch.
360 void power_pmu_flush_branch_stack(void)
363 power_pmu_bhrb_reset();
366 /* Processing BHRB entries */
367 void power_pmu_bhrb_read(struct cpu_hw_events
*cpuhw
)
371 int r_index
, u_index
, pred
;
375 while (r_index
< ppmu
->bhrb_nr
) {
376 /* Assembly read function */
377 val
= read_bhrb(r_index
++);
379 /* Terminal marker: End of valid BHRB entries */
382 addr
= val
& BHRB_EA
;
383 pred
= val
& BHRB_PREDICTION
;
389 /* Branches are read most recent first (ie. mfbhrb 0 is
390 * the most recent branch).
391 * There are two types of valid entries:
392 * 1) a target entry which is the to address of a
393 * computed goto like a blr,bctr,btar. The next
394 * entry read from the bhrb will be branch
395 * corresponding to this target (ie. the actual
396 * blr/bctr/btar instruction).
397 * 2) a from address which is an actual branch. If a
398 * target entry proceeds this, then this is the
399 * matching branch for that target. If this is not
400 * following a target entry, then this is a branch
401 * where the target is given as an immediate field
402 * in the instruction (ie. an i or b form branch).
403 * In this case we need to read the instruction from
404 * memory to determine the target/to address.
407 if (val
& BHRB_TARGET
) {
408 /* Target branches use two entries
409 * (ie. computed gotos/XL form)
411 cpuhw
->bhrb_entries
[u_index
].to
= addr
;
412 cpuhw
->bhrb_entries
[u_index
].mispred
= pred
;
413 cpuhw
->bhrb_entries
[u_index
].predicted
= ~pred
;
415 /* Get from address in next entry */
416 val
= read_bhrb(r_index
++);
417 addr
= val
& BHRB_EA
;
418 if (val
& BHRB_TARGET
) {
419 /* Shouldn't have two targets in a
420 row.. Reset index and try again */
424 cpuhw
->bhrb_entries
[u_index
].from
= addr
;
426 /* Branches to immediate field
428 cpuhw
->bhrb_entries
[u_index
].from
= addr
;
429 cpuhw
->bhrb_entries
[u_index
].to
= 0;
430 cpuhw
->bhrb_entries
[u_index
].mispred
= pred
;
431 cpuhw
->bhrb_entries
[u_index
].predicted
= ~pred
;
437 cpuhw
->bhrb_stack
.nr
= u_index
;
441 #endif /* CONFIG_PPC64 */
443 static void perf_event_interrupt(struct pt_regs
*regs
);
445 void perf_event_print_debug(void)
450 * Read one performance monitor counter (PMC).
452 static unsigned long read_pmc(int idx
)
458 val
= mfspr(SPRN_PMC1
);
461 val
= mfspr(SPRN_PMC2
);
464 val
= mfspr(SPRN_PMC3
);
467 val
= mfspr(SPRN_PMC4
);
470 val
= mfspr(SPRN_PMC5
);
473 val
= mfspr(SPRN_PMC6
);
477 val
= mfspr(SPRN_PMC7
);
480 val
= mfspr(SPRN_PMC8
);
482 #endif /* CONFIG_PPC64 */
484 printk(KERN_ERR
"oops trying to read PMC%d\n", idx
);
493 static void write_pmc(int idx
, unsigned long val
)
497 mtspr(SPRN_PMC1
, val
);
500 mtspr(SPRN_PMC2
, val
);
503 mtspr(SPRN_PMC3
, val
);
506 mtspr(SPRN_PMC4
, val
);
509 mtspr(SPRN_PMC5
, val
);
512 mtspr(SPRN_PMC6
, val
);
516 mtspr(SPRN_PMC7
, val
);
519 mtspr(SPRN_PMC8
, val
);
521 #endif /* CONFIG_PPC64 */
523 printk(KERN_ERR
"oops trying to write PMC%d\n", idx
);
528 * Check if a set of events can all go on the PMU at once.
529 * If they can't, this will look at alternative codes for the events
530 * and see if any combination of alternative codes is feasible.
531 * The feasible set is returned in event_id[].
533 static int power_check_constraints(struct cpu_hw_events
*cpuhw
,
534 u64 event_id
[], unsigned int cflags
[],
537 unsigned long mask
, value
, nv
;
538 unsigned long smasks
[MAX_HWEVENTS
], svalues
[MAX_HWEVENTS
];
539 int n_alt
[MAX_HWEVENTS
], choice
[MAX_HWEVENTS
];
541 unsigned long addf
= ppmu
->add_fields
;
542 unsigned long tadd
= ppmu
->test_adder
;
544 if (n_ev
> ppmu
->n_counter
)
547 /* First see if the events will go on as-is */
548 for (i
= 0; i
< n_ev
; ++i
) {
549 if ((cflags
[i
] & PPMU_LIMITED_PMC_REQD
)
550 && !ppmu
->limited_pmc_event(event_id
[i
])) {
551 ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
552 cpuhw
->alternatives
[i
]);
553 event_id
[i
] = cpuhw
->alternatives
[i
][0];
555 if (ppmu
->get_constraint(event_id
[i
], &cpuhw
->amasks
[i
][0],
556 &cpuhw
->avalues
[i
][0]))
560 for (i
= 0; i
< n_ev
; ++i
) {
561 nv
= (value
| cpuhw
->avalues
[i
][0]) +
562 (value
& cpuhw
->avalues
[i
][0] & addf
);
563 if ((((nv
+ tadd
) ^ value
) & mask
) != 0 ||
564 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][0]) &
565 cpuhw
->amasks
[i
][0]) != 0)
568 mask
|= cpuhw
->amasks
[i
][0];
571 return 0; /* all OK */
573 /* doesn't work, gather alternatives... */
574 if (!ppmu
->get_alternatives
)
576 for (i
= 0; i
< n_ev
; ++i
) {
578 n_alt
[i
] = ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
579 cpuhw
->alternatives
[i
]);
580 for (j
= 1; j
< n_alt
[i
]; ++j
)
581 ppmu
->get_constraint(cpuhw
->alternatives
[i
][j
],
582 &cpuhw
->amasks
[i
][j
],
583 &cpuhw
->avalues
[i
][j
]);
586 /* enumerate all possibilities and see if any will work */
589 value
= mask
= nv
= 0;
592 /* we're backtracking, restore context */
598 * See if any alternative k for event_id i,
599 * where k > j, will satisfy the constraints.
601 while (++j
< n_alt
[i
]) {
602 nv
= (value
| cpuhw
->avalues
[i
][j
]) +
603 (value
& cpuhw
->avalues
[i
][j
] & addf
);
604 if ((((nv
+ tadd
) ^ value
) & mask
) == 0 &&
605 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][j
])
606 & cpuhw
->amasks
[i
][j
]) == 0)
611 * No feasible alternative, backtrack
612 * to event_id i-1 and continue enumerating its
613 * alternatives from where we got up to.
619 * Found a feasible alternative for event_id i,
620 * remember where we got up to with this event_id,
621 * go on to the next event_id, and start with
622 * the first alternative for it.
628 mask
|= cpuhw
->amasks
[i
][j
];
634 /* OK, we have a feasible combination, tell the caller the solution */
635 for (i
= 0; i
< n_ev
; ++i
)
636 event_id
[i
] = cpuhw
->alternatives
[i
][choice
[i
]];
641 * Check if newly-added events have consistent settings for
642 * exclude_{user,kernel,hv} with each other and any previously
645 static int check_excludes(struct perf_event
**ctrs
, unsigned int cflags
[],
646 int n_prev
, int n_new
)
648 int eu
= 0, ek
= 0, eh
= 0;
650 struct perf_event
*event
;
657 for (i
= 0; i
< n
; ++i
) {
658 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
) {
659 cflags
[i
] &= ~PPMU_LIMITED_PMC_REQD
;
664 eu
= event
->attr
.exclude_user
;
665 ek
= event
->attr
.exclude_kernel
;
666 eh
= event
->attr
.exclude_hv
;
668 } else if (event
->attr
.exclude_user
!= eu
||
669 event
->attr
.exclude_kernel
!= ek
||
670 event
->attr
.exclude_hv
!= eh
) {
676 for (i
= 0; i
< n
; ++i
)
677 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
)
678 cflags
[i
] |= PPMU_LIMITED_PMC_REQD
;
683 static u64
check_and_compute_delta(u64 prev
, u64 val
)
685 u64 delta
= (val
- prev
) & 0xfffffffful
;
688 * POWER7 can roll back counter values, if the new value is smaller
689 * than the previous value it will cause the delta and the counter to
690 * have bogus values unless we rolled a counter over. If a coutner is
691 * rolled back, it will be smaller, but within 256, which is the maximum
692 * number of events to rollback at once. If we dectect a rollback
693 * return 0. This can lead to a small lack of precision in the
696 if (prev
> val
&& (prev
- val
) < 256)
702 static void power_pmu_read(struct perf_event
*event
)
704 s64 val
, delta
, prev
;
706 if (event
->hw
.state
& PERF_HES_STOPPED
)
712 * Performance monitor interrupts come even when interrupts
713 * are soft-disabled, as long as interrupts are hard-enabled.
714 * Therefore we treat them like NMIs.
717 prev
= local64_read(&event
->hw
.prev_count
);
719 val
= read_pmc(event
->hw
.idx
);
720 delta
= check_and_compute_delta(prev
, val
);
723 } while (local64_cmpxchg(&event
->hw
.prev_count
, prev
, val
) != prev
);
725 local64_add(delta
, &event
->count
);
726 local64_sub(delta
, &event
->hw
.period_left
);
730 * On some machines, PMC5 and PMC6 can't be written, don't respect
731 * the freeze conditions, and don't generate interrupts. This tells
732 * us if `event' is using such a PMC.
734 static int is_limited_pmc(int pmcnum
)
736 return (ppmu
->flags
& PPMU_LIMITED_PMC5_6
)
737 && (pmcnum
== 5 || pmcnum
== 6);
740 static void freeze_limited_counters(struct cpu_hw_events
*cpuhw
,
741 unsigned long pmc5
, unsigned long pmc6
)
743 struct perf_event
*event
;
744 u64 val
, prev
, delta
;
747 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
748 event
= cpuhw
->limited_counter
[i
];
751 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
752 prev
= local64_read(&event
->hw
.prev_count
);
754 delta
= check_and_compute_delta(prev
, val
);
756 local64_add(delta
, &event
->count
);
760 static void thaw_limited_counters(struct cpu_hw_events
*cpuhw
,
761 unsigned long pmc5
, unsigned long pmc6
)
763 struct perf_event
*event
;
767 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
768 event
= cpuhw
->limited_counter
[i
];
769 event
->hw
.idx
= cpuhw
->limited_hwidx
[i
];
770 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
771 prev
= local64_read(&event
->hw
.prev_count
);
772 if (check_and_compute_delta(prev
, val
))
773 local64_set(&event
->hw
.prev_count
, val
);
774 perf_event_update_userpage(event
);
779 * Since limited events don't respect the freeze conditions, we
780 * have to read them immediately after freezing or unfreezing the
781 * other events. We try to keep the values from the limited
782 * events as consistent as possible by keeping the delay (in
783 * cycles and instructions) between freezing/unfreezing and reading
784 * the limited events as small and consistent as possible.
785 * Therefore, if any limited events are in use, we read them
786 * both, and always in the same order, to minimize variability,
787 * and do it inside the same asm that writes MMCR0.
789 static void write_mmcr0(struct cpu_hw_events
*cpuhw
, unsigned long mmcr0
)
791 unsigned long pmc5
, pmc6
;
793 if (!cpuhw
->n_limited
) {
794 mtspr(SPRN_MMCR0
, mmcr0
);
799 * Write MMCR0, then read PMC5 and PMC6 immediately.
800 * To ensure we don't get a performance monitor interrupt
801 * between writing MMCR0 and freezing/thawing the limited
802 * events, we first write MMCR0 with the event overflow
803 * interrupt enable bits turned off.
805 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
806 : "=&r" (pmc5
), "=&r" (pmc6
)
807 : "r" (mmcr0
& ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
)),
809 "i" (SPRN_PMC5
), "i" (SPRN_PMC6
));
811 if (mmcr0
& MMCR0_FC
)
812 freeze_limited_counters(cpuhw
, pmc5
, pmc6
);
814 thaw_limited_counters(cpuhw
, pmc5
, pmc6
);
817 * Write the full MMCR0 including the event overflow interrupt
818 * enable bits, if necessary.
820 if (mmcr0
& (MMCR0_PMC1CE
| MMCR0_PMCjCE
))
821 mtspr(SPRN_MMCR0
, mmcr0
);
825 * Disable all events to prevent PMU interrupts and to allow
826 * events to be added or removed.
828 static void power_pmu_disable(struct pmu
*pmu
)
830 struct cpu_hw_events
*cpuhw
;
835 local_irq_save(flags
);
836 cpuhw
= &__get_cpu_var(cpu_hw_events
);
838 if (!cpuhw
->disabled
) {
843 * Check if we ever enabled the PMU on this cpu.
845 if (!cpuhw
->pmcs_enabled
) {
847 cpuhw
->pmcs_enabled
= 1;
851 * Disable instruction sampling if it was enabled
853 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
855 cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
860 * Set the 'freeze counters' bit.
861 * The barrier is to make sure the mtspr has been
862 * executed and the PMU has frozen the events
865 write_mmcr0(cpuhw
, mfspr(SPRN_MMCR0
) | MMCR0_FC
);
868 local_irq_restore(flags
);
872 * Re-enable all events if disable == 0.
873 * If we were previously disabled and events were added, then
874 * put the new config on the PMU.
876 static void power_pmu_enable(struct pmu
*pmu
)
878 struct perf_event
*event
;
879 struct cpu_hw_events
*cpuhw
;
884 unsigned int hwc_index
[MAX_HWEVENTS
];
890 local_irq_save(flags
);
891 cpuhw
= &__get_cpu_var(cpu_hw_events
);
892 if (!cpuhw
->disabled
) {
893 local_irq_restore(flags
);
899 * If we didn't change anything, or only removed events,
900 * no need to recalculate MMCR* settings and reset the PMCs.
901 * Just reenable the PMU with the current MMCR* settings
902 * (possibly updated for removal of events).
904 if (!cpuhw
->n_added
) {
905 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
906 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
907 if (cpuhw
->n_events
== 0)
908 ppc_set_pmu_inuse(0);
913 * Compute MMCR* values for the new set of events
915 if (ppmu
->compute_mmcr(cpuhw
->events
, cpuhw
->n_events
, hwc_index
,
917 /* shouldn't ever get here */
918 printk(KERN_ERR
"oops compute_mmcr failed\n");
923 * Add in MMCR0 freeze bits corresponding to the
924 * attr.exclude_* bits for the first event.
925 * We have already checked that all events have the
926 * same values for these bits as the first event.
928 event
= cpuhw
->event
[0];
929 if (event
->attr
.exclude_user
)
930 cpuhw
->mmcr
[0] |= MMCR0_FCP
;
931 if (event
->attr
.exclude_kernel
)
932 cpuhw
->mmcr
[0] |= freeze_events_kernel
;
933 if (event
->attr
.exclude_hv
)
934 cpuhw
->mmcr
[0] |= MMCR0_FCHV
;
937 * Write the new configuration to MMCR* with the freeze
938 * bit set and set the hardware events to their initial values.
939 * Then unfreeze the events.
941 ppc_set_pmu_inuse(1);
942 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
943 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
944 mtspr(SPRN_MMCR0
, (cpuhw
->mmcr
[0] & ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
))
948 * Read off any pre-existing events that need to move
951 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
952 event
= cpuhw
->event
[i
];
953 if (event
->hw
.idx
&& event
->hw
.idx
!= hwc_index
[i
] + 1) {
954 power_pmu_read(event
);
955 write_pmc(event
->hw
.idx
, 0);
961 * Initialize the PMCs for all the new and moved events.
963 cpuhw
->n_limited
= n_lim
= 0;
964 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
965 event
= cpuhw
->event
[i
];
968 idx
= hwc_index
[i
] + 1;
969 if (is_limited_pmc(idx
)) {
970 cpuhw
->limited_counter
[n_lim
] = event
;
971 cpuhw
->limited_hwidx
[n_lim
] = idx
;
976 if (event
->hw
.sample_period
) {
977 left
= local64_read(&event
->hw
.period_left
);
978 if (left
< 0x80000000L
)
979 val
= 0x80000000L
- left
;
981 local64_set(&event
->hw
.prev_count
, val
);
983 if (event
->hw
.state
& PERF_HES_STOPPED
)
986 perf_event_update_userpage(event
);
988 cpuhw
->n_limited
= n_lim
;
989 cpuhw
->mmcr
[0] |= MMCR0_PMXE
| MMCR0_FCECE
;
993 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
996 * Enable instruction sampling if necessary
998 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
1000 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
1004 if (cpuhw
->bhrb_users
)
1005 ppmu
->config_bhrb(cpuhw
->bhrb_filter
);
1007 local_irq_restore(flags
);
1010 static int collect_events(struct perf_event
*group
, int max_count
,
1011 struct perf_event
*ctrs
[], u64
*events
,
1012 unsigned int *flags
)
1015 struct perf_event
*event
;
1017 if (!is_software_event(group
)) {
1021 flags
[n
] = group
->hw
.event_base
;
1022 events
[n
++] = group
->hw
.config
;
1024 list_for_each_entry(event
, &group
->sibling_list
, group_entry
) {
1025 if (!is_software_event(event
) &&
1026 event
->state
!= PERF_EVENT_STATE_OFF
) {
1030 flags
[n
] = event
->hw
.event_base
;
1031 events
[n
++] = event
->hw
.config
;
1038 * Add a event to the PMU.
1039 * If all events are not already frozen, then we disable and
1040 * re-enable the PMU in order to get hw_perf_enable to do the
1041 * actual work of reconfiguring the PMU.
1043 static int power_pmu_add(struct perf_event
*event
, int ef_flags
)
1045 struct cpu_hw_events
*cpuhw
;
1046 unsigned long flags
;
1050 local_irq_save(flags
);
1051 perf_pmu_disable(event
->pmu
);
1054 * Add the event to the list (if there is room)
1055 * and check whether the total set is still feasible.
1057 cpuhw
= &__get_cpu_var(cpu_hw_events
);
1058 n0
= cpuhw
->n_events
;
1059 if (n0
>= ppmu
->n_counter
)
1061 cpuhw
->event
[n0
] = event
;
1062 cpuhw
->events
[n0
] = event
->hw
.config
;
1063 cpuhw
->flags
[n0
] = event
->hw
.event_base
;
1066 * This event may have been disabled/stopped in record_and_restart()
1067 * because we exceeded the ->event_limit. If re-starting the event,
1068 * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
1069 * notification is re-enabled.
1071 if (!(ef_flags
& PERF_EF_START
))
1072 event
->hw
.state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
1074 event
->hw
.state
= 0;
1077 * If group events scheduling transaction was started,
1078 * skip the schedulability test here, it will be performed
1079 * at commit time(->commit_txn) as a whole
1081 if (cpuhw
->group_flag
& PERF_EVENT_TXN
)
1084 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, n0
, 1))
1086 if (power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n0
+ 1))
1088 event
->hw
.config
= cpuhw
->events
[n0
];
1096 if (has_branch_stack(event
))
1097 power_pmu_bhrb_enable(event
);
1099 perf_pmu_enable(event
->pmu
);
1100 local_irq_restore(flags
);
1105 * Remove a event from the PMU.
1107 static void power_pmu_del(struct perf_event
*event
, int ef_flags
)
1109 struct cpu_hw_events
*cpuhw
;
1111 unsigned long flags
;
1113 local_irq_save(flags
);
1114 perf_pmu_disable(event
->pmu
);
1116 power_pmu_read(event
);
1118 cpuhw
= &__get_cpu_var(cpu_hw_events
);
1119 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
1120 if (event
== cpuhw
->event
[i
]) {
1121 while (++i
< cpuhw
->n_events
) {
1122 cpuhw
->event
[i
-1] = cpuhw
->event
[i
];
1123 cpuhw
->events
[i
-1] = cpuhw
->events
[i
];
1124 cpuhw
->flags
[i
-1] = cpuhw
->flags
[i
];
1127 ppmu
->disable_pmc(event
->hw
.idx
- 1, cpuhw
->mmcr
);
1128 if (event
->hw
.idx
) {
1129 write_pmc(event
->hw
.idx
, 0);
1132 perf_event_update_userpage(event
);
1136 for (i
= 0; i
< cpuhw
->n_limited
; ++i
)
1137 if (event
== cpuhw
->limited_counter
[i
])
1139 if (i
< cpuhw
->n_limited
) {
1140 while (++i
< cpuhw
->n_limited
) {
1141 cpuhw
->limited_counter
[i
-1] = cpuhw
->limited_counter
[i
];
1142 cpuhw
->limited_hwidx
[i
-1] = cpuhw
->limited_hwidx
[i
];
1146 if (cpuhw
->n_events
== 0) {
1147 /* disable exceptions if no events are running */
1148 cpuhw
->mmcr
[0] &= ~(MMCR0_PMXE
| MMCR0_FCECE
);
1151 if (has_branch_stack(event
))
1152 power_pmu_bhrb_disable(event
);
1154 perf_pmu_enable(event
->pmu
);
1155 local_irq_restore(flags
);
1159 * POWER-PMU does not support disabling individual counters, hence
1160 * program their cycle counter to their max value and ignore the interrupts.
1163 static void power_pmu_start(struct perf_event
*event
, int ef_flags
)
1165 unsigned long flags
;
1169 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
1172 if (!(event
->hw
.state
& PERF_HES_STOPPED
))
1175 if (ef_flags
& PERF_EF_RELOAD
)
1176 WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_UPTODATE
));
1178 local_irq_save(flags
);
1179 perf_pmu_disable(event
->pmu
);
1181 event
->hw
.state
= 0;
1182 left
= local64_read(&event
->hw
.period_left
);
1185 if (left
< 0x80000000L
)
1186 val
= 0x80000000L
- left
;
1188 write_pmc(event
->hw
.idx
, val
);
1190 perf_event_update_userpage(event
);
1191 perf_pmu_enable(event
->pmu
);
1192 local_irq_restore(flags
);
1195 static void power_pmu_stop(struct perf_event
*event
, int ef_flags
)
1197 unsigned long flags
;
1199 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
1202 if (event
->hw
.state
& PERF_HES_STOPPED
)
1205 local_irq_save(flags
);
1206 perf_pmu_disable(event
->pmu
);
1208 power_pmu_read(event
);
1209 event
->hw
.state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
1210 write_pmc(event
->hw
.idx
, 0);
1212 perf_event_update_userpage(event
);
1213 perf_pmu_enable(event
->pmu
);
1214 local_irq_restore(flags
);
1218 * Start group events scheduling transaction
1219 * Set the flag to make pmu::enable() not perform the
1220 * schedulability test, it will be performed at commit time
1222 void power_pmu_start_txn(struct pmu
*pmu
)
1224 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1226 perf_pmu_disable(pmu
);
1227 cpuhw
->group_flag
|= PERF_EVENT_TXN
;
1228 cpuhw
->n_txn_start
= cpuhw
->n_events
;
1232 * Stop group events scheduling transaction
1233 * Clear the flag and pmu::enable() will perform the
1234 * schedulability test.
1236 void power_pmu_cancel_txn(struct pmu
*pmu
)
1238 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1240 cpuhw
->group_flag
&= ~PERF_EVENT_TXN
;
1241 perf_pmu_enable(pmu
);
1245 * Commit group events scheduling transaction
1246 * Perform the group schedulability test as a whole
1247 * Return 0 if success
1249 int power_pmu_commit_txn(struct pmu
*pmu
)
1251 struct cpu_hw_events
*cpuhw
;
1256 cpuhw
= &__get_cpu_var(cpu_hw_events
);
1257 n
= cpuhw
->n_events
;
1258 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, 0, n
))
1260 i
= power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n
);
1264 for (i
= cpuhw
->n_txn_start
; i
< n
; ++i
)
1265 cpuhw
->event
[i
]->hw
.config
= cpuhw
->events
[i
];
1267 cpuhw
->group_flag
&= ~PERF_EVENT_TXN
;
1268 perf_pmu_enable(pmu
);
1273 * Return 1 if we might be able to put event on a limited PMC,
1275 * A event can only go on a limited PMC if it counts something
1276 * that a limited PMC can count, doesn't require interrupts, and
1277 * doesn't exclude any processor mode.
1279 static int can_go_on_limited_pmc(struct perf_event
*event
, u64 ev
,
1283 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1285 if (event
->attr
.exclude_user
1286 || event
->attr
.exclude_kernel
1287 || event
->attr
.exclude_hv
1288 || event
->attr
.sample_period
)
1291 if (ppmu
->limited_pmc_event(ev
))
1295 * The requested event_id isn't on a limited PMC already;
1296 * see if any alternative code goes on a limited PMC.
1298 if (!ppmu
->get_alternatives
)
1301 flags
|= PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
;
1302 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1308 * Find an alternative event_id that goes on a normal PMC, if possible,
1309 * and return the event_id code, or 0 if there is no such alternative.
1310 * (Note: event_id code 0 is "don't count" on all machines.)
1312 static u64
normal_pmc_alternative(u64 ev
, unsigned long flags
)
1314 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1317 flags
&= ~(PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
);
1318 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1324 /* Number of perf_events counting hardware events */
1325 static atomic_t num_events
;
1326 /* Used to avoid races in calling reserve/release_pmc_hardware */
1327 static DEFINE_MUTEX(pmc_reserve_mutex
);
1330 * Release the PMU if this is the last perf_event.
1332 static void hw_perf_event_destroy(struct perf_event
*event
)
1334 if (!atomic_add_unless(&num_events
, -1, 1)) {
1335 mutex_lock(&pmc_reserve_mutex
);
1336 if (atomic_dec_return(&num_events
) == 0)
1337 release_pmc_hardware();
1338 mutex_unlock(&pmc_reserve_mutex
);
1343 * Translate a generic cache event_id config to a raw event_id code.
1345 static int hw_perf_cache_event(u64 config
, u64
*eventp
)
1347 unsigned long type
, op
, result
;
1350 if (!ppmu
->cache_events
)
1354 type
= config
& 0xff;
1355 op
= (config
>> 8) & 0xff;
1356 result
= (config
>> 16) & 0xff;
1358 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
1359 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
1360 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
1363 ev
= (*ppmu
->cache_events
)[type
][op
][result
];
1372 static int power_pmu_event_init(struct perf_event
*event
)
1375 unsigned long flags
;
1376 struct perf_event
*ctrs
[MAX_HWEVENTS
];
1377 u64 events
[MAX_HWEVENTS
];
1378 unsigned int cflags
[MAX_HWEVENTS
];
1381 struct cpu_hw_events
*cpuhw
;
1386 if (has_branch_stack(event
)) {
1387 /* PMU has BHRB enabled */
1388 if (!(ppmu
->flags
& PPMU_BHRB
))
1392 switch (event
->attr
.type
) {
1393 case PERF_TYPE_HARDWARE
:
1394 ev
= event
->attr
.config
;
1395 if (ev
>= ppmu
->n_generic
|| ppmu
->generic_events
[ev
] == 0)
1397 ev
= ppmu
->generic_events
[ev
];
1399 case PERF_TYPE_HW_CACHE
:
1400 err
= hw_perf_cache_event(event
->attr
.config
, &ev
);
1405 ev
= event
->attr
.config
;
1411 event
->hw
.config_base
= ev
;
1415 * If we are not running on a hypervisor, force the
1416 * exclude_hv bit to 0 so that we don't care what
1417 * the user set it to.
1419 if (!firmware_has_feature(FW_FEATURE_LPAR
))
1420 event
->attr
.exclude_hv
= 0;
1423 * If this is a per-task event, then we can use
1424 * PM_RUN_* events interchangeably with their non RUN_*
1425 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1426 * XXX we should check if the task is an idle task.
1429 if (event
->attach_state
& PERF_ATTACH_TASK
)
1430 flags
|= PPMU_ONLY_COUNT_RUN
;
1433 * If this machine has limited events, check whether this
1434 * event_id could go on a limited event.
1436 if (ppmu
->flags
& PPMU_LIMITED_PMC5_6
) {
1437 if (can_go_on_limited_pmc(event
, ev
, flags
)) {
1438 flags
|= PPMU_LIMITED_PMC_OK
;
1439 } else if (ppmu
->limited_pmc_event(ev
)) {
1441 * The requested event_id is on a limited PMC,
1442 * but we can't use a limited PMC; see if any
1443 * alternative goes on a normal PMC.
1445 ev
= normal_pmc_alternative(ev
, flags
);
1452 * If this is in a group, check if it can go on with all the
1453 * other hardware events in the group. We assume the event
1454 * hasn't been linked into its leader's sibling list at this point.
1457 if (event
->group_leader
!= event
) {
1458 n
= collect_events(event
->group_leader
, ppmu
->n_counter
- 1,
1459 ctrs
, events
, cflags
);
1466 if (check_excludes(ctrs
, cflags
, n
, 1))
1469 cpuhw
= &get_cpu_var(cpu_hw_events
);
1470 err
= power_check_constraints(cpuhw
, events
, cflags
, n
+ 1);
1472 if (has_branch_stack(event
)) {
1473 cpuhw
->bhrb_filter
= ppmu
->bhrb_filter_map(
1474 event
->attr
.branch_sample_type
);
1476 if(cpuhw
->bhrb_filter
== -1)
1480 put_cpu_var(cpu_hw_events
);
1484 event
->hw
.config
= events
[n
];
1485 event
->hw
.event_base
= cflags
[n
];
1486 event
->hw
.last_period
= event
->hw
.sample_period
;
1487 local64_set(&event
->hw
.period_left
, event
->hw
.last_period
);
1490 * See if we need to reserve the PMU.
1491 * If no events are currently in use, then we have to take a
1492 * mutex to ensure that we don't race with another task doing
1493 * reserve_pmc_hardware or release_pmc_hardware.
1496 if (!atomic_inc_not_zero(&num_events
)) {
1497 mutex_lock(&pmc_reserve_mutex
);
1498 if (atomic_read(&num_events
) == 0 &&
1499 reserve_pmc_hardware(perf_event_interrupt
))
1502 atomic_inc(&num_events
);
1503 mutex_unlock(&pmc_reserve_mutex
);
1505 event
->destroy
= hw_perf_event_destroy
;
1510 static int power_pmu_event_idx(struct perf_event
*event
)
1512 return event
->hw
.idx
;
1515 ssize_t
power_events_sysfs_show(struct device
*dev
,
1516 struct device_attribute
*attr
, char *page
)
1518 struct perf_pmu_events_attr
*pmu_attr
;
1520 pmu_attr
= container_of(attr
, struct perf_pmu_events_attr
, attr
);
1522 return sprintf(page
, "event=0x%02llx\n", pmu_attr
->id
);
1525 struct pmu power_pmu
= {
1526 .pmu_enable
= power_pmu_enable
,
1527 .pmu_disable
= power_pmu_disable
,
1528 .event_init
= power_pmu_event_init
,
1529 .add
= power_pmu_add
,
1530 .del
= power_pmu_del
,
1531 .start
= power_pmu_start
,
1532 .stop
= power_pmu_stop
,
1533 .read
= power_pmu_read
,
1534 .start_txn
= power_pmu_start_txn
,
1535 .cancel_txn
= power_pmu_cancel_txn
,
1536 .commit_txn
= power_pmu_commit_txn
,
1537 .event_idx
= power_pmu_event_idx
,
1538 .flush_branch_stack
= power_pmu_flush_branch_stack
,
1542 * A counter has overflowed; update its count and record
1543 * things if requested. Note that interrupts are hard-disabled
1544 * here so there is no possibility of being interrupted.
1546 static void record_and_restart(struct perf_event
*event
, unsigned long val
,
1547 struct pt_regs
*regs
)
1549 u64 period
= event
->hw
.sample_period
;
1550 s64 prev
, delta
, left
;
1553 if (event
->hw
.state
& PERF_HES_STOPPED
) {
1554 write_pmc(event
->hw
.idx
, 0);
1558 /* we don't have to worry about interrupts here */
1559 prev
= local64_read(&event
->hw
.prev_count
);
1560 delta
= check_and_compute_delta(prev
, val
);
1561 local64_add(delta
, &event
->count
);
1564 * See if the total period for this event has expired,
1565 * and update for the next period.
1568 left
= local64_read(&event
->hw
.period_left
) - delta
;
1576 record
= siar_valid(regs
);
1577 event
->hw
.last_period
= event
->hw
.sample_period
;
1579 if (left
< 0x80000000LL
)
1580 val
= 0x80000000LL
- left
;
1583 write_pmc(event
->hw
.idx
, val
);
1584 local64_set(&event
->hw
.prev_count
, val
);
1585 local64_set(&event
->hw
.period_left
, left
);
1586 perf_event_update_userpage(event
);
1589 * Finally record data if requested.
1592 struct perf_sample_data data
;
1594 perf_sample_data_init(&data
, ~0ULL, event
->hw
.last_period
);
1596 if (event
->attr
.sample_type
& PERF_SAMPLE_ADDR
)
1597 perf_get_data_addr(regs
, &data
.addr
);
1599 if (event
->attr
.sample_type
& PERF_SAMPLE_BRANCH_STACK
) {
1600 struct cpu_hw_events
*cpuhw
;
1601 cpuhw
= &__get_cpu_var(cpu_hw_events
);
1602 power_pmu_bhrb_read(cpuhw
);
1603 data
.br_stack
= &cpuhw
->bhrb_stack
;
1606 if (perf_event_overflow(event
, &data
, regs
))
1607 power_pmu_stop(event
, 0);
1612 * Called from generic code to get the misc flags (i.e. processor mode)
1615 unsigned long perf_misc_flags(struct pt_regs
*regs
)
1617 u32 flags
= perf_get_misc_flags(regs
);
1621 return user_mode(regs
) ? PERF_RECORD_MISC_USER
:
1622 PERF_RECORD_MISC_KERNEL
;
1626 * Called from generic code to get the instruction pointer
1629 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
1631 bool use_siar
= regs_use_siar(regs
);
1633 if (use_siar
&& siar_valid(regs
))
1634 return mfspr(SPRN_SIAR
) + perf_ip_adjust(regs
);
1636 return 0; // no valid instruction pointer
1641 static bool pmc_overflow_power7(unsigned long val
)
1644 * Events on POWER7 can roll back if a speculative event doesn't
1645 * eventually complete. Unfortunately in some rare cases they will
1646 * raise a performance monitor exception. We need to catch this to
1647 * ensure we reset the PMC. In all cases the PMC will be 256 or less
1648 * cycles from overflow.
1650 * We only do this if the first pass fails to find any overflowing
1651 * PMCs because a user might set a period of less than 256 and we
1652 * don't want to mistakenly reset them.
1654 if ((0x80000000 - val
) <= 256)
1660 static bool pmc_overflow(unsigned long val
)
1669 * Performance monitor interrupt stuff
1671 static void perf_event_interrupt(struct pt_regs
*regs
)
1674 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1675 struct perf_event
*event
;
1676 unsigned long val
[8];
1680 if (cpuhw
->n_limited
)
1681 freeze_limited_counters(cpuhw
, mfspr(SPRN_PMC5
),
1684 perf_read_regs(regs
);
1686 nmi
= perf_intr_is_nmi(regs
);
1692 /* Read all the PMCs since we'll need them a bunch of times */
1693 for (i
= 0; i
< ppmu
->n_counter
; ++i
)
1694 val
[i
] = read_pmc(i
+ 1);
1696 /* Try to find what caused the IRQ */
1698 for (i
= 0; i
< ppmu
->n_counter
; ++i
) {
1699 if (!pmc_overflow(val
[i
]))
1701 if (is_limited_pmc(i
+ 1))
1702 continue; /* these won't generate IRQs */
1704 * We've found one that's overflowed. For active
1705 * counters we need to log this. For inactive
1706 * counters, we need to reset it anyway
1710 for (j
= 0; j
< cpuhw
->n_events
; ++j
) {
1711 event
= cpuhw
->event
[j
];
1712 if (event
->hw
.idx
== (i
+ 1)) {
1714 record_and_restart(event
, val
[i
], regs
);
1719 /* reset non active counters that have overflowed */
1720 write_pmc(i
+ 1, 0);
1722 if (!found
&& pvr_version_is(PVR_POWER7
)) {
1723 /* check active counters for special buggy p7 overflow */
1724 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
1725 event
= cpuhw
->event
[i
];
1726 if (!event
->hw
.idx
|| is_limited_pmc(event
->hw
.idx
))
1728 if (pmc_overflow_power7(val
[event
->hw
.idx
- 1])) {
1729 /* event has overflowed in a buggy way*/
1731 record_and_restart(event
,
1732 val
[event
->hw
.idx
- 1],
1737 if ((!found
) && printk_ratelimit())
1738 printk(KERN_WARNING
"Can't find PMC that caused IRQ\n");
1741 * Reset MMCR0 to its normal value. This will set PMXE and
1742 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1743 * and thus allow interrupts to occur again.
1744 * XXX might want to use MSR.PM to keep the events frozen until
1745 * we get back out of this interrupt.
1747 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
1755 static void power_pmu_setup(int cpu
)
1757 struct cpu_hw_events
*cpuhw
= &per_cpu(cpu_hw_events
, cpu
);
1761 memset(cpuhw
, 0, sizeof(*cpuhw
));
1762 cpuhw
->mmcr
[0] = MMCR0_FC
;
1765 static int __cpuinit
1766 power_pmu_notifier(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
1768 unsigned int cpu
= (long)hcpu
;
1770 switch (action
& ~CPU_TASKS_FROZEN
) {
1771 case CPU_UP_PREPARE
:
1772 power_pmu_setup(cpu
);
1782 int __cpuinit
register_power_pmu(struct power_pmu
*pmu
)
1785 return -EBUSY
; /* something's already registered */
1788 pr_info("%s performance monitor hardware support registered\n",
1791 power_pmu
.attr_groups
= ppmu
->attr_groups
;
1795 * Use FCHV to ignore kernel events if MSR.HV is set.
1797 if (mfmsr() & MSR_HV
)
1798 freeze_events_kernel
= MMCR0_FCHV
;
1799 #endif /* CONFIG_PPC64 */
1801 perf_pmu_register(&power_pmu
, "cpu", PERF_TYPE_RAW
);
1802 perf_cpu_notifier(power_pmu_notifier
);