Commit | Line | Data |
---|---|---|
e5dcb58a DD |
1 | /* |
2 | * Linux performance counter support for MIPS. | |
3 | * | |
4 | * Copyright (C) 2010 MIPS Technologies, Inc. | |
82091564 | 5 | * Copyright (C) 2011 Cavium Networks, Inc. |
e5dcb58a DD |
6 | * Author: Deng-Cheng Zhu |
7 | * | |
8 | * This code is based on the implementation for ARM, which is in turn | |
9 | * based on the sparc64 perf event code and the x86 code. Performance | |
10 | * counter access is based on the MIPS Oprofile code. And the callchain | |
11 | * support references the code of MIPS stacktrace.c. | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License version 2 as | |
15 | * published by the Free Software Foundation. | |
16 | */ | |
17 | ||
18 | #include <linux/cpumask.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/perf_event.h> | |
23 | #include <linux/uaccess.h> | |
24 | ||
25 | #include <asm/irq.h> | |
26 | #include <asm/irq_regs.h> | |
27 | #include <asm/stacktrace.h> | |
28 | #include <asm/time.h> /* For perf_irq */ | |
29 | ||
e5dcb58a DD |
30 | #define MIPS_MAX_HWEVENTS 4 |
31 | ||
32 | struct cpu_hw_events { | |
33 | /* Array of events on this cpu. */ | |
34 | struct perf_event *events[MIPS_MAX_HWEVENTS]; | |
35 | ||
36 | /* | |
37 | * Set the bit (indexed by the counter number) when the counter | |
38 | * is used for an event. | |
39 | */ | |
40 | unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; | |
41 | ||
e5dcb58a DD |
42 | /* |
43 | * Software copy of the control register for each performance counter. | |
44 | * MIPS CPUs vary in performance counters. They use this differently, | |
45 | * and even may not use it. | |
46 | */ | |
47 | unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; | |
48 | }; | |
49 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | |
50 | .saved_ctrl = {0}, | |
51 | }; | |
52 | ||
53 | /* The description of MIPS performance events. */ | |
54 | struct mips_perf_event { | |
55 | unsigned int event_id; | |
56 | /* | |
57 | * MIPS performance counters are indexed starting from 0. | |
58 | * CNTR_EVEN indicates the indexes of the counters to be used are | |
59 | * even numbers. | |
60 | */ | |
61 | unsigned int cntr_mask; | |
62 | #define CNTR_EVEN 0x55555555 | |
63 | #define CNTR_ODD 0xaaaaaaaa | |
82091564 | 64 | #define CNTR_ALL 0xffffffff |
e5dcb58a DD |
65 | #ifdef CONFIG_MIPS_MT_SMP |
66 | enum { | |
67 | T = 0, | |
68 | V = 1, | |
69 | P = 2, | |
70 | } range; | |
71 | #else | |
72 | #define T | |
73 | #define V | |
74 | #define P | |
75 | #endif | |
76 | }; | |
77 | ||
78 | static struct mips_perf_event raw_event; | |
79 | static DEFINE_MUTEX(raw_event_mutex); | |
80 | ||
81 | #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff | |
82 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
83 | ||
84 | struct mips_pmu { | |
82091564 DD |
85 | u64 max_period; |
86 | u64 valid_count; | |
87 | u64 overflow; | |
e5dcb58a DD |
88 | const char *name; |
89 | int irq; | |
e5dcb58a DD |
90 | u64 (*read_counter)(unsigned int idx); |
91 | void (*write_counter)(unsigned int idx, u64 val); | |
e5dcb58a DD |
92 | const struct mips_perf_event *(*map_raw_event)(u64 config); |
93 | const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; | |
94 | const struct mips_perf_event (*cache_event_map) | |
95 | [PERF_COUNT_HW_CACHE_MAX] | |
96 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
97 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
98 | unsigned int num_counters; | |
99 | }; | |
100 | ||
82091564 DD |
101 | static struct mips_pmu mipspmu; |
102 | ||
103 | #define M_CONFIG1_PC (1 << 4) | |
104 | ||
105 | #define M_PERFCTL_EXL (1 << 0) | |
106 | #define M_PERFCTL_KERNEL (1 << 1) | |
107 | #define M_PERFCTL_SUPERVISOR (1 << 2) | |
108 | #define M_PERFCTL_USER (1 << 3) | |
109 | #define M_PERFCTL_INTERRUPT_ENABLE (1 << 4) | |
110 | #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5) | |
111 | #define M_PERFCTL_VPEID(vpe) ((vpe) << 16) | |
112 | #define M_PERFCTL_MT_EN(filter) ((filter) << 20) | |
113 | #define M_TC_EN_ALL M_PERFCTL_MT_EN(0) | |
114 | #define M_TC_EN_VPE M_PERFCTL_MT_EN(1) | |
115 | #define M_TC_EN_TC M_PERFCTL_MT_EN(2) | |
116 | #define M_PERFCTL_TCID(tcid) ((tcid) << 22) | |
117 | #define M_PERFCTL_WIDE (1 << 30) | |
118 | #define M_PERFCTL_MORE (1 << 31) | |
119 | ||
120 | #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \ | |
121 | M_PERFCTL_KERNEL | \ | |
122 | M_PERFCTL_USER | \ | |
123 | M_PERFCTL_SUPERVISOR | \ | |
124 | M_PERFCTL_INTERRUPT_ENABLE) | |
125 | ||
126 | #ifdef CONFIG_MIPS_MT_SMP | |
127 | #define M_PERFCTL_CONFIG_MASK 0x3fff801f | |
128 | #else | |
129 | #define M_PERFCTL_CONFIG_MASK 0x1f | |
130 | #endif | |
131 | #define M_PERFCTL_EVENT_MASK 0xfe0 | |
132 | ||
133 | ||
134 | #ifdef CONFIG_MIPS_MT_SMP | |
135 | static int cpu_has_mipsmt_pertccounters; | |
136 | ||
137 | static DEFINE_RWLOCK(pmuint_rwlock); | |
138 | ||
139 | /* | |
140 | * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because | |
141 | * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs. | |
142 | */ | |
143 | #if defined(CONFIG_HW_PERF_EVENTS) | |
144 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ | |
145 | 0 : smp_processor_id()) | |
146 | #else | |
147 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ | |
148 | 0 : cpu_data[smp_processor_id()].vpe_id) | |
149 | #endif | |
150 | ||
151 | /* Copied from op_model_mipsxx.c */ | |
152 | static unsigned int vpe_shift(void) | |
153 | { | |
154 | if (num_possible_cpus() > 1) | |
155 | return 1; | |
156 | ||
157 | return 0; | |
158 | } | |
159 | ||
160 | static unsigned int counters_total_to_per_cpu(unsigned int counters) | |
161 | { | |
162 | return counters >> vpe_shift(); | |
163 | } | |
164 | ||
165 | static unsigned int counters_per_cpu_to_total(unsigned int counters) | |
166 | { | |
167 | return counters << vpe_shift(); | |
168 | } | |
169 | ||
170 | #else /* !CONFIG_MIPS_MT_SMP */ | |
171 | #define vpe_id() 0 | |
172 | ||
173 | #endif /* CONFIG_MIPS_MT_SMP */ | |
174 | ||
175 | static void resume_local_counters(void); | |
176 | static void pause_local_counters(void); | |
177 | static irqreturn_t mipsxx_pmu_handle_irq(int, void *); | |
178 | static int mipsxx_pmu_handle_shared_irq(void); | |
179 | ||
180 | static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx) | |
181 | { | |
182 | if (vpe_id() == 1) | |
183 | idx = (idx + 2) & 3; | |
184 | return idx; | |
185 | } | |
186 | ||
187 | static u64 mipsxx_pmu_read_counter(unsigned int idx) | |
188 | { | |
189 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | |
190 | ||
191 | switch (idx) { | |
192 | case 0: | |
193 | /* | |
194 | * The counters are unsigned, we must cast to truncate | |
195 | * off the high bits. | |
196 | */ | |
197 | return (u32)read_c0_perfcntr0(); | |
198 | case 1: | |
199 | return (u32)read_c0_perfcntr1(); | |
200 | case 2: | |
201 | return (u32)read_c0_perfcntr2(); | |
202 | case 3: | |
203 | return (u32)read_c0_perfcntr3(); | |
204 | default: | |
205 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | |
206 | return 0; | |
207 | } | |
208 | } | |
209 | ||
210 | static u64 mipsxx_pmu_read_counter_64(unsigned int idx) | |
211 | { | |
212 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | |
213 | ||
214 | switch (idx) { | |
215 | case 0: | |
216 | return read_c0_perfcntr0_64(); | |
217 | case 1: | |
218 | return read_c0_perfcntr1_64(); | |
219 | case 2: | |
220 | return read_c0_perfcntr2_64(); | |
221 | case 3: | |
222 | return read_c0_perfcntr3_64(); | |
223 | default: | |
224 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | |
225 | return 0; | |
226 | } | |
227 | } | |
228 | ||
229 | static void mipsxx_pmu_write_counter(unsigned int idx, u64 val) | |
230 | { | |
231 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | |
232 | ||
233 | switch (idx) { | |
234 | case 0: | |
235 | write_c0_perfcntr0(val); | |
236 | return; | |
237 | case 1: | |
238 | write_c0_perfcntr1(val); | |
239 | return; | |
240 | case 2: | |
241 | write_c0_perfcntr2(val); | |
242 | return; | |
243 | case 3: | |
244 | write_c0_perfcntr3(val); | |
245 | return; | |
246 | } | |
247 | } | |
248 | ||
249 | static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val) | |
250 | { | |
251 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | |
252 | ||
253 | switch (idx) { | |
254 | case 0: | |
255 | write_c0_perfcntr0_64(val); | |
256 | return; | |
257 | case 1: | |
258 | write_c0_perfcntr1_64(val); | |
259 | return; | |
260 | case 2: | |
261 | write_c0_perfcntr2_64(val); | |
262 | return; | |
263 | case 3: | |
264 | write_c0_perfcntr3_64(val); | |
265 | return; | |
266 | } | |
267 | } | |
268 | ||
269 | static unsigned int mipsxx_pmu_read_control(unsigned int idx) | |
270 | { | |
271 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | |
272 | ||
273 | switch (idx) { | |
274 | case 0: | |
275 | return read_c0_perfctrl0(); | |
276 | case 1: | |
277 | return read_c0_perfctrl1(); | |
278 | case 2: | |
279 | return read_c0_perfctrl2(); | |
280 | case 3: | |
281 | return read_c0_perfctrl3(); | |
282 | default: | |
283 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | |
284 | return 0; | |
285 | } | |
286 | } | |
287 | ||
288 | static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val) | |
289 | { | |
290 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | |
291 | ||
292 | switch (idx) { | |
293 | case 0: | |
294 | write_c0_perfctrl0(val); | |
295 | return; | |
296 | case 1: | |
297 | write_c0_perfctrl1(val); | |
298 | return; | |
299 | case 2: | |
300 | write_c0_perfctrl2(val); | |
301 | return; | |
302 | case 3: | |
303 | write_c0_perfctrl3(val); | |
304 | return; | |
305 | } | |
306 | } | |
307 | ||
308 | static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, | |
309 | struct hw_perf_event *hwc) | |
310 | { | |
311 | int i; | |
312 | ||
313 | /* | |
314 | * We only need to care the counter mask. The range has been | |
315 | * checked definitely. | |
316 | */ | |
317 | unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff; | |
318 | ||
319 | for (i = mipspmu.num_counters - 1; i >= 0; i--) { | |
320 | /* | |
321 | * Note that some MIPS perf events can be counted by both | |
322 | * even and odd counters, wheresas many other are only by | |
323 | * even _or_ odd counters. This introduces an issue that | |
324 | * when the former kind of event takes the counter the | |
325 | * latter kind of event wants to use, then the "counter | |
326 | * allocation" for the latter event will fail. In fact if | |
327 | * they can be dynamically swapped, they both feel happy. | |
328 | * But here we leave this issue alone for now. | |
329 | */ | |
330 | if (test_bit(i, &cntr_mask) && | |
331 | !test_and_set_bit(i, cpuc->used_mask)) | |
332 | return i; | |
333 | } | |
334 | ||
335 | return -EAGAIN; | |
336 | } | |
337 | ||
338 | static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | |
339 | { | |
340 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
341 | ||
342 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | |
343 | ||
344 | cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | | |
345 | (evt->config_base & M_PERFCTL_CONFIG_MASK) | | |
346 | /* Make sure interrupt enabled. */ | |
347 | M_PERFCTL_INTERRUPT_ENABLE; | |
348 | /* | |
349 | * We do not actually let the counter run. Leave it until start(). | |
350 | */ | |
351 | } | |
352 | ||
353 | static void mipsxx_pmu_disable_event(int idx) | |
354 | { | |
355 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
356 | unsigned long flags; | |
357 | ||
358 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | |
359 | ||
360 | local_irq_save(flags); | |
361 | cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & | |
362 | ~M_PERFCTL_COUNT_EVENT_WHENEVER; | |
363 | mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); | |
364 | local_irq_restore(flags); | |
365 | } | |
e5dcb58a DD |
366 | |
367 | static int mipspmu_event_set_period(struct perf_event *event, | |
368 | struct hw_perf_event *hwc, | |
369 | int idx) | |
370 | { | |
82091564 DD |
371 | u64 left = local64_read(&hwc->period_left); |
372 | u64 period = hwc->sample_period; | |
e5dcb58a | 373 | int ret = 0; |
e5dcb58a | 374 | |
82091564 DD |
375 | if (unlikely((left + period) & (1ULL << 63))) { |
376 | /* left underflowed by more than period. */ | |
e5dcb58a DD |
377 | left = period; |
378 | local64_set(&hwc->period_left, left); | |
379 | hwc->last_period = period; | |
380 | ret = 1; | |
82091564 DD |
381 | } else if (unlikely((left + period) <= period)) { |
382 | /* left underflowed by less than period. */ | |
e5dcb58a DD |
383 | left += period; |
384 | local64_set(&hwc->period_left, left); | |
385 | hwc->last_period = period; | |
386 | ret = 1; | |
387 | } | |
388 | ||
82091564 DD |
389 | if (left > mipspmu.max_period) { |
390 | left = mipspmu.max_period; | |
391 | local64_set(&hwc->period_left, left); | |
392 | } | |
e5dcb58a | 393 | |
82091564 | 394 | local64_set(&hwc->prev_count, mipspmu.overflow - left); |
e5dcb58a | 395 | |
82091564 | 396 | mipspmu.write_counter(idx, mipspmu.overflow - left); |
e5dcb58a DD |
397 | |
398 | perf_event_update_userpage(event); | |
399 | ||
400 | return ret; | |
401 | } | |
402 | ||
403 | static void mipspmu_event_update(struct perf_event *event, | |
404 | struct hw_perf_event *hwc, | |
405 | int idx) | |
406 | { | |
82091564 | 407 | u64 prev_raw_count, new_raw_count; |
e5dcb58a DD |
408 | u64 delta; |
409 | ||
410 | again: | |
411 | prev_raw_count = local64_read(&hwc->prev_count); | |
82091564 | 412 | new_raw_count = mipspmu.read_counter(idx); |
e5dcb58a DD |
413 | |
414 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
415 | new_raw_count) != prev_raw_count) | |
416 | goto again; | |
417 | ||
82091564 | 418 | delta = new_raw_count - prev_raw_count; |
e5dcb58a DD |
419 | |
420 | local64_add(delta, &event->count); | |
421 | local64_sub(delta, &hwc->period_left); | |
422 | } | |
423 | ||
424 | static void mipspmu_start(struct perf_event *event, int flags) | |
425 | { | |
426 | struct hw_perf_event *hwc = &event->hw; | |
427 | ||
e5dcb58a DD |
428 | if (flags & PERF_EF_RELOAD) |
429 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
430 | ||
431 | hwc->state = 0; | |
432 | ||
433 | /* Set the period for the event. */ | |
434 | mipspmu_event_set_period(event, hwc, hwc->idx); | |
435 | ||
436 | /* Enable the event. */ | |
82091564 | 437 | mipsxx_pmu_enable_event(hwc, hwc->idx); |
e5dcb58a DD |
438 | } |
439 | ||
440 | static void mipspmu_stop(struct perf_event *event, int flags) | |
441 | { | |
442 | struct hw_perf_event *hwc = &event->hw; | |
443 | ||
e5dcb58a DD |
444 | if (!(hwc->state & PERF_HES_STOPPED)) { |
445 | /* We are working on a local event. */ | |
82091564 | 446 | mipsxx_pmu_disable_event(hwc->idx); |
e5dcb58a DD |
447 | barrier(); |
448 | mipspmu_event_update(event, hwc, hwc->idx); | |
449 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
450 | } | |
451 | } | |
452 | ||
453 | static int mipspmu_add(struct perf_event *event, int flags) | |
454 | { | |
455 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
456 | struct hw_perf_event *hwc = &event->hw; | |
457 | int idx; | |
458 | int err = 0; | |
459 | ||
460 | perf_pmu_disable(event->pmu); | |
461 | ||
462 | /* To look for a free counter for this event. */ | |
82091564 | 463 | idx = mipsxx_pmu_alloc_counter(cpuc, hwc); |
e5dcb58a DD |
464 | if (idx < 0) { |
465 | err = idx; | |
466 | goto out; | |
467 | } | |
468 | ||
469 | /* | |
470 | * If there is an event in the counter we are going to use then | |
471 | * make sure it is disabled. | |
472 | */ | |
473 | event->hw.idx = idx; | |
82091564 | 474 | mipsxx_pmu_disable_event(idx); |
e5dcb58a DD |
475 | cpuc->events[idx] = event; |
476 | ||
477 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
478 | if (flags & PERF_EF_START) | |
479 | mipspmu_start(event, PERF_EF_RELOAD); | |
480 | ||
481 | /* Propagate our changes to the userspace mapping. */ | |
482 | perf_event_update_userpage(event); | |
483 | ||
484 | out: | |
485 | perf_pmu_enable(event->pmu); | |
486 | return err; | |
487 | } | |
488 | ||
489 | static void mipspmu_del(struct perf_event *event, int flags) | |
490 | { | |
491 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
492 | struct hw_perf_event *hwc = &event->hw; | |
493 | int idx = hwc->idx; | |
494 | ||
82091564 | 495 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); |
e5dcb58a DD |
496 | |
497 | mipspmu_stop(event, PERF_EF_UPDATE); | |
498 | cpuc->events[idx] = NULL; | |
499 | clear_bit(idx, cpuc->used_mask); | |
500 | ||
501 | perf_event_update_userpage(event); | |
502 | } | |
503 | ||
504 | static void mipspmu_read(struct perf_event *event) | |
505 | { | |
506 | struct hw_perf_event *hwc = &event->hw; | |
507 | ||
508 | /* Don't read disabled counters! */ | |
509 | if (hwc->idx < 0) | |
510 | return; | |
511 | ||
512 | mipspmu_event_update(event, hwc, hwc->idx); | |
513 | } | |
514 | ||
515 | static void mipspmu_enable(struct pmu *pmu) | |
516 | { | |
82091564 DD |
517 | #ifdef CONFIG_MIPS_MT_SMP |
518 | write_unlock(&pmuint_rwlock); | |
519 | #endif | |
520 | resume_local_counters(); | |
e5dcb58a DD |
521 | } |
522 | ||
82091564 DD |
523 | /* |
524 | * MIPS performance counters can be per-TC. The control registers can | |
525 | * not be directly accessed accross CPUs. Hence if we want to do global | |
526 | * control, we need cross CPU calls. on_each_cpu() can help us, but we | |
527 | * can not make sure this function is called with interrupts enabled. So | |
528 | * here we pause local counters and then grab a rwlock and leave the | |
529 | * counters on other CPUs alone. If any counter interrupt raises while | |
530 | * we own the write lock, simply pause local counters on that CPU and | |
531 | * spin in the handler. Also we know we won't be switched to another | |
532 | * CPU after pausing local counters and before grabbing the lock. | |
533 | */ | |
e5dcb58a DD |
534 | static void mipspmu_disable(struct pmu *pmu) |
535 | { | |
82091564 DD |
536 | pause_local_counters(); |
537 | #ifdef CONFIG_MIPS_MT_SMP | |
538 | write_lock(&pmuint_rwlock); | |
539 | #endif | |
e5dcb58a DD |
540 | } |
541 | ||
542 | static atomic_t active_events = ATOMIC_INIT(0); | |
543 | static DEFINE_MUTEX(pmu_reserve_mutex); | |
544 | static int (*save_perf_irq)(void); | |
545 | ||
546 | static int mipspmu_get_irq(void) | |
547 | { | |
548 | int err; | |
549 | ||
82091564 | 550 | if (mipspmu.irq >= 0) { |
e5dcb58a | 551 | /* Request my own irq handler. */ |
82091564 DD |
552 | err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, |
553 | IRQF_PERCPU | IRQF_NOBALANCING, | |
e5dcb58a DD |
554 | "mips_perf_pmu", NULL); |
555 | if (err) { | |
556 | pr_warning("Unable to request IRQ%d for MIPS " | |
82091564 | 557 | "performance counters!\n", mipspmu.irq); |
e5dcb58a DD |
558 | } |
559 | } else if (cp0_perfcount_irq < 0) { | |
560 | /* | |
561 | * We are sharing the irq number with the timer interrupt. | |
562 | */ | |
563 | save_perf_irq = perf_irq; | |
82091564 | 564 | perf_irq = mipsxx_pmu_handle_shared_irq; |
e5dcb58a DD |
565 | err = 0; |
566 | } else { | |
567 | pr_warning("The platform hasn't properly defined its " | |
568 | "interrupt controller.\n"); | |
569 | err = -ENOENT; | |
570 | } | |
571 | ||
572 | return err; | |
573 | } | |
574 | ||
575 | static void mipspmu_free_irq(void) | |
576 | { | |
82091564 DD |
577 | if (mipspmu.irq >= 0) |
578 | free_irq(mipspmu.irq, NULL); | |
e5dcb58a DD |
579 | else if (cp0_perfcount_irq < 0) |
580 | perf_irq = save_perf_irq; | |
581 | } | |
582 | ||
583 | /* | |
584 | * mipsxx/rm9000/loongson2 have different performance counters, they have | |
585 | * specific low-level init routines. | |
586 | */ | |
587 | static void reset_counters(void *arg); | |
588 | static int __hw_perf_event_init(struct perf_event *event); | |
589 | ||
590 | static void hw_perf_event_destroy(struct perf_event *event) | |
591 | { | |
592 | if (atomic_dec_and_mutex_lock(&active_events, | |
593 | &pmu_reserve_mutex)) { | |
594 | /* | |
595 | * We must not call the destroy function with interrupts | |
596 | * disabled. | |
597 | */ | |
598 | on_each_cpu(reset_counters, | |
82091564 | 599 | (void *)(long)mipspmu.num_counters, 1); |
e5dcb58a DD |
600 | mipspmu_free_irq(); |
601 | mutex_unlock(&pmu_reserve_mutex); | |
602 | } | |
603 | } | |
604 | ||
605 | static int mipspmu_event_init(struct perf_event *event) | |
606 | { | |
607 | int err = 0; | |
608 | ||
609 | switch (event->attr.type) { | |
610 | case PERF_TYPE_RAW: | |
611 | case PERF_TYPE_HARDWARE: | |
612 | case PERF_TYPE_HW_CACHE: | |
613 | break; | |
614 | ||
615 | default: | |
616 | return -ENOENT; | |
617 | } | |
618 | ||
82091564 DD |
619 | if (event->cpu >= nr_cpumask_bits || |
620 | (event->cpu >= 0 && !cpu_online(event->cpu))) | |
e5dcb58a DD |
621 | return -ENODEV; |
622 | ||
623 | if (!atomic_inc_not_zero(&active_events)) { | |
624 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | |
625 | atomic_dec(&active_events); | |
626 | return -ENOSPC; | |
627 | } | |
628 | ||
629 | mutex_lock(&pmu_reserve_mutex); | |
630 | if (atomic_read(&active_events) == 0) | |
631 | err = mipspmu_get_irq(); | |
632 | ||
633 | if (!err) | |
634 | atomic_inc(&active_events); | |
635 | mutex_unlock(&pmu_reserve_mutex); | |
636 | } | |
637 | ||
638 | if (err) | |
639 | return err; | |
640 | ||
641 | err = __hw_perf_event_init(event); | |
642 | if (err) | |
643 | hw_perf_event_destroy(event); | |
644 | ||
645 | return err; | |
646 | } | |
647 | ||
648 | static struct pmu pmu = { | |
649 | .pmu_enable = mipspmu_enable, | |
650 | .pmu_disable = mipspmu_disable, | |
651 | .event_init = mipspmu_event_init, | |
652 | .add = mipspmu_add, | |
653 | .del = mipspmu_del, | |
654 | .start = mipspmu_start, | |
655 | .stop = mipspmu_stop, | |
656 | .read = mipspmu_read, | |
657 | }; | |
658 | ||
659 | static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) | |
660 | { | |
661 | /* | |
662 | * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for | |
663 | * event_id. | |
664 | */ | |
665 | #ifdef CONFIG_MIPS_MT_SMP | |
666 | return ((unsigned int)pev->range << 24) | | |
667 | (pev->cntr_mask & 0xffff00) | | |
668 | (pev->event_id & 0xff); | |
669 | #else | |
670 | return (pev->cntr_mask & 0xffff00) | | |
671 | (pev->event_id & 0xff); | |
672 | #endif | |
673 | } | |
674 | ||
675 | static const struct mips_perf_event *mipspmu_map_general_event(int idx) | |
676 | { | |
677 | const struct mips_perf_event *pev; | |
678 | ||
82091564 | 679 | pev = ((*mipspmu.general_event_map)[idx].event_id == |
e5dcb58a | 680 | UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) : |
82091564 | 681 | &(*mipspmu.general_event_map)[idx]); |
e5dcb58a DD |
682 | |
683 | return pev; | |
684 | } | |
685 | ||
686 | static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) | |
687 | { | |
688 | unsigned int cache_type, cache_op, cache_result; | |
689 | const struct mips_perf_event *pev; | |
690 | ||
691 | cache_type = (config >> 0) & 0xff; | |
692 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
693 | return ERR_PTR(-EINVAL); | |
694 | ||
695 | cache_op = (config >> 8) & 0xff; | |
696 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
697 | return ERR_PTR(-EINVAL); | |
698 | ||
699 | cache_result = (config >> 16) & 0xff; | |
700 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
701 | return ERR_PTR(-EINVAL); | |
702 | ||
82091564 | 703 | pev = &((*mipspmu.cache_event_map) |
e5dcb58a DD |
704 | [cache_type] |
705 | [cache_op] | |
706 | [cache_result]); | |
707 | ||
708 | if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) | |
709 | return ERR_PTR(-EOPNOTSUPP); | |
710 | ||
711 | return pev; | |
712 | ||
713 | } | |
714 | ||
715 | static int validate_event(struct cpu_hw_events *cpuc, | |
716 | struct perf_event *event) | |
717 | { | |
718 | struct hw_perf_event fake_hwc = event->hw; | |
719 | ||
720 | /* Allow mixed event group. So return 1 to pass validation. */ | |
721 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) | |
722 | return 1; | |
723 | ||
82091564 | 724 | return mipsxx_pmu_alloc_counter(cpuc, &fake_hwc) >= 0; |
e5dcb58a DD |
725 | } |
726 | ||
727 | static int validate_group(struct perf_event *event) | |
728 | { | |
729 | struct perf_event *sibling, *leader = event->group_leader; | |
730 | struct cpu_hw_events fake_cpuc; | |
731 | ||
732 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); | |
733 | ||
734 | if (!validate_event(&fake_cpuc, leader)) | |
735 | return -ENOSPC; | |
736 | ||
737 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
738 | if (!validate_event(&fake_cpuc, sibling)) | |
739 | return -ENOSPC; | |
740 | } | |
741 | ||
742 | if (!validate_event(&fake_cpuc, event)) | |
743 | return -ENOSPC; | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
748 | /* This is needed by specific irq handlers in perf_event_*.c */ | |
749 | static void handle_associated_event(struct cpu_hw_events *cpuc, | |
750 | int idx, struct perf_sample_data *data, | |
751 | struct pt_regs *regs) | |
752 | { | |
753 | struct perf_event *event = cpuc->events[idx]; | |
754 | struct hw_perf_event *hwc = &event->hw; | |
755 | ||
756 | mipspmu_event_update(event, hwc, idx); | |
757 | data->period = event->hw.last_period; | |
758 | if (!mipspmu_event_set_period(event, hwc, idx)) | |
759 | return; | |
760 | ||
761 | if (perf_event_overflow(event, data, regs)) | |
82091564 | 762 | mipsxx_pmu_disable_event(idx); |
e5dcb58a | 763 | } |
3a9ab99e | 764 | |
3a9ab99e | 765 | |
4409af37 | 766 | static int __n_counters(void) |
3a9ab99e DCZ |
767 | { |
768 | if (!(read_c0_config1() & M_CONFIG1_PC)) | |
769 | return 0; | |
770 | if (!(read_c0_perfctrl0() & M_PERFCTL_MORE)) | |
771 | return 1; | |
772 | if (!(read_c0_perfctrl1() & M_PERFCTL_MORE)) | |
773 | return 2; | |
774 | if (!(read_c0_perfctrl2() & M_PERFCTL_MORE)) | |
775 | return 3; | |
776 | ||
777 | return 4; | |
778 | } | |
779 | ||
4409af37 | 780 | static int n_counters(void) |
3a9ab99e DCZ |
781 | { |
782 | int counters; | |
783 | ||
784 | switch (current_cpu_type()) { | |
785 | case CPU_R10000: | |
786 | counters = 2; | |
787 | break; | |
788 | ||
789 | case CPU_R12000: | |
790 | case CPU_R14000: | |
791 | counters = 4; | |
792 | break; | |
793 | ||
794 | default: | |
795 | counters = __n_counters(); | |
796 | } | |
797 | ||
798 | return counters; | |
799 | } | |
800 | ||
801 | static void reset_counters(void *arg) | |
802 | { | |
803 | int counters = (int)(long)arg; | |
804 | switch (counters) { | |
805 | case 4: | |
82091564 DD |
806 | mipsxx_pmu_write_control(3, 0); |
807 | mipspmu.write_counter(3, 0); | |
3a9ab99e | 808 | case 3: |
82091564 DD |
809 | mipsxx_pmu_write_control(2, 0); |
810 | mipspmu.write_counter(2, 0); | |
3a9ab99e | 811 | case 2: |
82091564 DD |
812 | mipsxx_pmu_write_control(1, 0); |
813 | mipspmu.write_counter(1, 0); | |
3a9ab99e | 814 | case 1: |
82091564 DD |
815 | mipsxx_pmu_write_control(0, 0); |
816 | mipspmu.write_counter(0, 0); | |
3a9ab99e DCZ |
817 | } |
818 | } | |
819 | ||
3a9ab99e DCZ |
820 | /* 24K/34K/1004K cores can share the same event map. */ |
821 | static const struct mips_perf_event mipsxxcore_event_map | |
822 | [PERF_COUNT_HW_MAX] = { | |
823 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, | |
824 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, | |
825 | [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID }, | |
826 | [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID }, | |
827 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T }, | |
828 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, | |
829 | [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, | |
830 | }; | |
831 | ||
832 | /* 74K core has different branch event code. */ | |
833 | static const struct mips_perf_event mipsxx74Kcore_event_map | |
834 | [PERF_COUNT_HW_MAX] = { | |
835 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, | |
836 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, | |
837 | [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID }, | |
838 | [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID }, | |
839 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T }, | |
840 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, | |
841 | [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, | |
842 | }; | |
843 | ||
844 | /* 24K/34K/1004K cores can share the same cache event map. */ | |
845 | static const struct mips_perf_event mipsxxcore_cache_map | |
846 | [PERF_COUNT_HW_CACHE_MAX] | |
847 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
848 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
849 | [C(L1D)] = { | |
850 | /* | |
851 | * Like some other architectures (e.g. ARM), the performance | |
852 | * counters don't differentiate between read and write | |
853 | * accesses/misses, so this isn't strictly correct, but it's the | |
854 | * best we can do. Writes and reads get combined. | |
855 | */ | |
856 | [C(OP_READ)] = { | |
857 | [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, | |
858 | [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, | |
859 | }, | |
860 | [C(OP_WRITE)] = { | |
861 | [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, | |
862 | [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, | |
863 | }, | |
864 | [C(OP_PREFETCH)] = { | |
865 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
866 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
867 | }, | |
868 | }, | |
869 | [C(L1I)] = { | |
870 | [C(OP_READ)] = { | |
871 | [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, | |
872 | [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, | |
873 | }, | |
874 | [C(OP_WRITE)] = { | |
875 | [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, | |
876 | [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, | |
877 | }, | |
878 | [C(OP_PREFETCH)] = { | |
879 | [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T }, | |
880 | /* | |
881 | * Note that MIPS has only "hit" events countable for | |
882 | * the prefetch operation. | |
883 | */ | |
884 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
885 | }, | |
886 | }, | |
887 | [C(LL)] = { | |
888 | [C(OP_READ)] = { | |
889 | [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, | |
890 | [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, | |
891 | }, | |
892 | [C(OP_WRITE)] = { | |
893 | [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, | |
894 | [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, | |
895 | }, | |
896 | [C(OP_PREFETCH)] = { | |
897 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
898 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
899 | }, | |
900 | }, | |
901 | [C(DTLB)] = { | |
902 | [C(OP_READ)] = { | |
903 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, | |
904 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, | |
905 | }, | |
906 | [C(OP_WRITE)] = { | |
907 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, | |
908 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, | |
909 | }, | |
910 | [C(OP_PREFETCH)] = { | |
911 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
912 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
913 | }, | |
914 | }, | |
915 | [C(ITLB)] = { | |
916 | [C(OP_READ)] = { | |
917 | [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, | |
918 | [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, | |
919 | }, | |
920 | [C(OP_WRITE)] = { | |
921 | [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, | |
922 | [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, | |
923 | }, | |
924 | [C(OP_PREFETCH)] = { | |
925 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
926 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
927 | }, | |
928 | }, | |
929 | [C(BPU)] = { | |
930 | /* Using the same code for *HW_BRANCH* */ | |
931 | [C(OP_READ)] = { | |
932 | [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, | |
933 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, | |
934 | }, | |
935 | [C(OP_WRITE)] = { | |
936 | [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, | |
937 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, | |
938 | }, | |
939 | [C(OP_PREFETCH)] = { | |
940 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
941 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
942 | }, | |
943 | }, | |
89d6c0b5 PZ |
944 | [C(NODE)] = { |
945 | [C(OP_READ)] = { | |
946 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
947 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
948 | }, | |
949 | [C(OP_WRITE)] = { | |
950 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
951 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
952 | }, | |
953 | [C(OP_PREFETCH)] = { | |
954 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
955 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
956 | }, | |
957 | }, | |
3a9ab99e DCZ |
958 | }; |
959 | ||
960 | /* 74K core has completely different cache event map. */ | |
961 | static const struct mips_perf_event mipsxx74Kcore_cache_map | |
962 | [PERF_COUNT_HW_CACHE_MAX] | |
963 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
964 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
965 | [C(L1D)] = { | |
966 | /* | |
967 | * Like some other architectures (e.g. ARM), the performance | |
968 | * counters don't differentiate between read and write | |
969 | * accesses/misses, so this isn't strictly correct, but it's the | |
970 | * best we can do. Writes and reads get combined. | |
971 | */ | |
972 | [C(OP_READ)] = { | |
973 | [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, | |
974 | [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, | |
975 | }, | |
976 | [C(OP_WRITE)] = { | |
977 | [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, | |
978 | [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, | |
979 | }, | |
980 | [C(OP_PREFETCH)] = { | |
981 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
982 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
983 | }, | |
984 | }, | |
985 | [C(L1I)] = { | |
986 | [C(OP_READ)] = { | |
987 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, | |
988 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, | |
989 | }, | |
990 | [C(OP_WRITE)] = { | |
991 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, | |
992 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, | |
993 | }, | |
994 | [C(OP_PREFETCH)] = { | |
995 | [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T }, | |
996 | /* | |
997 | * Note that MIPS has only "hit" events countable for | |
998 | * the prefetch operation. | |
999 | */ | |
1000 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1001 | }, | |
1002 | }, | |
1003 | [C(LL)] = { | |
1004 | [C(OP_READ)] = { | |
1005 | [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, | |
1006 | [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, | |
1007 | }, | |
1008 | [C(OP_WRITE)] = { | |
1009 | [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, | |
1010 | [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, | |
1011 | }, | |
1012 | [C(OP_PREFETCH)] = { | |
1013 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1014 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1015 | }, | |
1016 | }, | |
1017 | [C(DTLB)] = { | |
1018 | /* 74K core does not have specific DTLB events. */ | |
1019 | [C(OP_READ)] = { | |
1020 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1021 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1022 | }, | |
1023 | [C(OP_WRITE)] = { | |
1024 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1025 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1026 | }, | |
1027 | [C(OP_PREFETCH)] = { | |
1028 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1029 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1030 | }, | |
1031 | }, | |
1032 | [C(ITLB)] = { | |
1033 | [C(OP_READ)] = { | |
1034 | [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, | |
1035 | [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, | |
1036 | }, | |
1037 | [C(OP_WRITE)] = { | |
1038 | [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, | |
1039 | [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, | |
1040 | }, | |
1041 | [C(OP_PREFETCH)] = { | |
1042 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1043 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1044 | }, | |
1045 | }, | |
1046 | [C(BPU)] = { | |
1047 | /* Using the same code for *HW_BRANCH* */ | |
1048 | [C(OP_READ)] = { | |
1049 | [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, | |
1050 | [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, | |
1051 | }, | |
1052 | [C(OP_WRITE)] = { | |
1053 | [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, | |
1054 | [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, | |
1055 | }, | |
1056 | [C(OP_PREFETCH)] = { | |
1057 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1058 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1059 | }, | |
1060 | }, | |
89d6c0b5 PZ |
1061 | [C(NODE)] = { |
1062 | [C(OP_READ)] = { | |
1063 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1064 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1065 | }, | |
1066 | [C(OP_WRITE)] = { | |
1067 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1068 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1069 | }, | |
1070 | [C(OP_PREFETCH)] = { | |
1071 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1072 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | |
1073 | }, | |
1074 | }, | |
3a9ab99e DCZ |
1075 | }; |
1076 | ||
1077 | #ifdef CONFIG_MIPS_MT_SMP | |
4409af37 DD |
1078 | static void check_and_calc_range(struct perf_event *event, |
1079 | const struct mips_perf_event *pev) | |
3a9ab99e DCZ |
1080 | { |
1081 | struct hw_perf_event *hwc = &event->hw; | |
1082 | ||
1083 | if (event->cpu >= 0) { | |
1084 | if (pev->range > V) { | |
1085 | /* | |
1086 | * The user selected an event that is processor | |
1087 | * wide, while expecting it to be VPE wide. | |
1088 | */ | |
1089 | hwc->config_base |= M_TC_EN_ALL; | |
1090 | } else { | |
1091 | /* | |
1092 | * FIXME: cpu_data[event->cpu].vpe_id reports 0 | |
1093 | * for both CPUs. | |
1094 | */ | |
1095 | hwc->config_base |= M_PERFCTL_VPEID(event->cpu); | |
1096 | hwc->config_base |= M_TC_EN_VPE; | |
1097 | } | |
1098 | } else | |
1099 | hwc->config_base |= M_TC_EN_ALL; | |
1100 | } | |
1101 | #else | |
4409af37 DD |
1102 | static void check_and_calc_range(struct perf_event *event, |
1103 | const struct mips_perf_event *pev) | |
3a9ab99e DCZ |
1104 | { |
1105 | } | |
1106 | #endif | |
1107 | ||
1108 | static int __hw_perf_event_init(struct perf_event *event) | |
1109 | { | |
1110 | struct perf_event_attr *attr = &event->attr; | |
1111 | struct hw_perf_event *hwc = &event->hw; | |
1112 | const struct mips_perf_event *pev; | |
1113 | int err; | |
1114 | ||
1115 | /* Returning MIPS event descriptor for generic perf event. */ | |
1116 | if (PERF_TYPE_HARDWARE == event->attr.type) { | |
1117 | if (event->attr.config >= PERF_COUNT_HW_MAX) | |
1118 | return -EINVAL; | |
1119 | pev = mipspmu_map_general_event(event->attr.config); | |
1120 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { | |
1121 | pev = mipspmu_map_cache_event(event->attr.config); | |
1122 | } else if (PERF_TYPE_RAW == event->attr.type) { | |
1123 | /* We are working on the global raw event. */ | |
1124 | mutex_lock(&raw_event_mutex); | |
82091564 | 1125 | pev = mipspmu.map_raw_event(event->attr.config); |
3a9ab99e DCZ |
1126 | } else { |
1127 | /* The event type is not (yet) supported. */ | |
1128 | return -EOPNOTSUPP; | |
1129 | } | |
1130 | ||
1131 | if (IS_ERR(pev)) { | |
1132 | if (PERF_TYPE_RAW == event->attr.type) | |
1133 | mutex_unlock(&raw_event_mutex); | |
1134 | return PTR_ERR(pev); | |
1135 | } | |
1136 | ||
1137 | /* | |
1138 | * We allow max flexibility on how each individual counter shared | |
1139 | * by the single CPU operates (the mode exclusion and the range). | |
1140 | */ | |
1141 | hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE; | |
1142 | ||
1143 | /* Calculate range bits and validate it. */ | |
1144 | if (num_possible_cpus() > 1) | |
1145 | check_and_calc_range(event, pev); | |
1146 | ||
1147 | hwc->event_base = mipspmu_perf_event_encode(pev); | |
1148 | if (PERF_TYPE_RAW == event->attr.type) | |
1149 | mutex_unlock(&raw_event_mutex); | |
1150 | ||
1151 | if (!attr->exclude_user) | |
1152 | hwc->config_base |= M_PERFCTL_USER; | |
1153 | if (!attr->exclude_kernel) { | |
1154 | hwc->config_base |= M_PERFCTL_KERNEL; | |
1155 | /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */ | |
1156 | hwc->config_base |= M_PERFCTL_EXL; | |
1157 | } | |
1158 | if (!attr->exclude_hv) | |
1159 | hwc->config_base |= M_PERFCTL_SUPERVISOR; | |
1160 | ||
1161 | hwc->config_base &= M_PERFCTL_CONFIG_MASK; | |
1162 | /* | |
1163 | * The event can belong to another cpu. We do not assign a local | |
1164 | * counter for it for now. | |
1165 | */ | |
1166 | hwc->idx = -1; | |
1167 | hwc->config = 0; | |
1168 | ||
1169 | if (!hwc->sample_period) { | |
82091564 | 1170 | hwc->sample_period = mipspmu.max_period; |
3a9ab99e DCZ |
1171 | hwc->last_period = hwc->sample_period; |
1172 | local64_set(&hwc->period_left, hwc->sample_period); | |
1173 | } | |
1174 | ||
1175 | err = 0; | |
1176 | if (event->group_leader != event) { | |
1177 | err = validate_group(event); | |
1178 | if (err) | |
1179 | return -EINVAL; | |
1180 | } | |
1181 | ||
1182 | event->destroy = hw_perf_event_destroy; | |
3a9ab99e DCZ |
1183 | return err; |
1184 | } | |
1185 | ||
1186 | static void pause_local_counters(void) | |
1187 | { | |
1188 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
82091564 | 1189 | int ctr = mipspmu.num_counters; |
3a9ab99e DCZ |
1190 | unsigned long flags; |
1191 | ||
1192 | local_irq_save(flags); | |
82091564 DD |
1193 | do { |
1194 | ctr--; | |
1195 | cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr); | |
1196 | mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & | |
1197 | ~M_PERFCTL_COUNT_EVENT_WHENEVER); | |
1198 | } while (ctr > 0); | |
3a9ab99e DCZ |
1199 | local_irq_restore(flags); |
1200 | } | |
1201 | ||
1202 | static void resume_local_counters(void) | |
1203 | { | |
1204 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
82091564 | 1205 | int ctr = mipspmu.num_counters; |
3a9ab99e | 1206 | |
82091564 DD |
1207 | do { |
1208 | ctr--; | |
1209 | mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]); | |
1210 | } while (ctr > 0); | |
3a9ab99e DCZ |
1211 | } |
1212 | ||
1213 | static int mipsxx_pmu_handle_shared_irq(void) | |
1214 | { | |
1215 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
1216 | struct perf_sample_data data; | |
82091564 DD |
1217 | unsigned int counters = mipspmu.num_counters; |
1218 | u64 counter; | |
3a9ab99e DCZ |
1219 | int handled = IRQ_NONE; |
1220 | struct pt_regs *regs; | |
1221 | ||
1222 | if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26))) | |
1223 | return handled; | |
3a9ab99e DCZ |
1224 | /* |
1225 | * First we pause the local counters, so that when we are locked | |
1226 | * here, the counters are all paused. When it gets locked due to | |
1227 | * perf_disable(), the timer interrupt handler will be delayed. | |
1228 | * | |
1229 | * See also mipsxx_pmu_start(). | |
1230 | */ | |
1231 | pause_local_counters(); | |
1232 | #ifdef CONFIG_MIPS_MT_SMP | |
1233 | read_lock(&pmuint_rwlock); | |
1234 | #endif | |
1235 | ||
1236 | regs = get_irq_regs(); | |
1237 | ||
1238 | perf_sample_data_init(&data, 0); | |
1239 | ||
1240 | switch (counters) { | |
1241 | #define HANDLE_COUNTER(n) \ | |
1242 | case n + 1: \ | |
1243 | if (test_bit(n, cpuc->used_mask)) { \ | |
82091564 DD |
1244 | counter = mipspmu.read_counter(n); \ |
1245 | if (counter & mipspmu.overflow) { \ | |
1246 | handle_associated_event(cpuc, n, &data, regs); \ | |
3a9ab99e DCZ |
1247 | handled = IRQ_HANDLED; \ |
1248 | } \ | |
1249 | } | |
1250 | HANDLE_COUNTER(3) | |
1251 | HANDLE_COUNTER(2) | |
1252 | HANDLE_COUNTER(1) | |
1253 | HANDLE_COUNTER(0) | |
1254 | } | |
1255 | ||
1256 | /* | |
1257 | * Do all the work for the pending perf events. We can do this | |
1258 | * in here because the performance counter interrupt is a regular | |
1259 | * interrupt, not NMI. | |
1260 | */ | |
1261 | if (handled == IRQ_HANDLED) | |
91f01737 | 1262 | irq_work_run(); |
3a9ab99e DCZ |
1263 | |
1264 | #ifdef CONFIG_MIPS_MT_SMP | |
1265 | read_unlock(&pmuint_rwlock); | |
1266 | #endif | |
1267 | resume_local_counters(); | |
1268 | return handled; | |
1269 | } | |
1270 | ||
4409af37 | 1271 | static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) |
3a9ab99e DCZ |
1272 | { |
1273 | return mipsxx_pmu_handle_shared_irq(); | |
1274 | } | |
1275 | ||
3a9ab99e DCZ |
1276 | /* 24K */ |
1277 | #define IS_UNSUPPORTED_24K_EVENT(r, b) \ | |
1278 | ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \ | |
1279 | (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \ | |
1280 | (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \ | |
1281 | (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \ | |
1282 | ((b) >= 68 && (b) <= 127)) | |
1283 | #define IS_BOTH_COUNTERS_24K_EVENT(b) \ | |
1284 | ((b) == 0 || (b) == 1 || (b) == 11) | |
1285 | ||
1286 | /* 34K */ | |
1287 | #define IS_UNSUPPORTED_34K_EVENT(r, b) \ | |
1288 | ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \ | |
1289 | (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \ | |
1290 | ((b) >= 68 && (b) <= 127)) | |
1291 | #define IS_BOTH_COUNTERS_34K_EVENT(b) \ | |
1292 | ((b) == 0 || (b) == 1 || (b) == 11) | |
1293 | #ifdef CONFIG_MIPS_MT_SMP | |
1294 | #define IS_RANGE_P_34K_EVENT(r, b) \ | |
1295 | ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ | |
1296 | (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \ | |
1297 | (r) == 176 || ((b) >= 50 && (b) <= 55) || \ | |
1298 | ((b) >= 64 && (b) <= 67)) | |
1299 | #define IS_RANGE_V_34K_EVENT(r) ((r) == 47) | |
1300 | #endif | |
1301 | ||
1302 | /* 74K */ | |
1303 | #define IS_UNSUPPORTED_74K_EVENT(r, b) \ | |
1304 | ((r) == 5 || ((r) >= 135 && (r) <= 137) || \ | |
1305 | ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \ | |
1306 | (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \ | |
1307 | (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \ | |
1308 | (b) == 61 || (r) == 62 || (r) == 191 || \ | |
1309 | ((b) >= 64 && (b) <= 127)) | |
1310 | #define IS_BOTH_COUNTERS_74K_EVENT(b) \ | |
1311 | ((b) == 0 || (b) == 1) | |
1312 | ||
1313 | /* 1004K */ | |
1314 | #define IS_UNSUPPORTED_1004K_EVENT(r, b) \ | |
1315 | ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \ | |
1316 | (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127)) | |
1317 | #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ | |
1318 | ((b) == 0 || (b) == 1 || (b) == 11) | |
1319 | #ifdef CONFIG_MIPS_MT_SMP | |
1320 | #define IS_RANGE_P_1004K_EVENT(r, b) \ | |
1321 | ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ | |
1322 | (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \ | |
1323 | (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \ | |
1324 | (r) == 188 || (b) == 61 || (b) == 62 || \ | |
1325 | ((b) >= 64 && (b) <= 67)) | |
1326 | #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) | |
1327 | #endif | |
1328 | ||
1329 | /* | |
1330 | * User can use 0-255 raw events, where 0-127 for the events of even | |
1331 | * counters, and 128-255 for odd counters. Note that bit 7 is used to | |
1332 | * indicate the parity. So, for example, when user wants to take the | |
1333 | * Event Num of 15 for odd counters (by referring to the user manual), | |
1334 | * then 128 needs to be added to 15 as the input for the event config, | |
1335 | * i.e., 143 (0x8F) to be used. | |
1336 | */ | |
4409af37 | 1337 | static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) |
3a9ab99e DCZ |
1338 | { |
1339 | unsigned int raw_id = config & 0xff; | |
1340 | unsigned int base_id = raw_id & 0x7f; | |
1341 | ||
1342 | switch (current_cpu_type()) { | |
1343 | case CPU_24K: | |
1344 | if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id)) | |
1345 | return ERR_PTR(-EOPNOTSUPP); | |
1346 | raw_event.event_id = base_id; | |
1347 | if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) | |
1348 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | |
1349 | else | |
1350 | raw_event.cntr_mask = | |
1351 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | |
1352 | #ifdef CONFIG_MIPS_MT_SMP | |
1353 | /* | |
1354 | * This is actually doing nothing. Non-multithreading | |
1355 | * CPUs will not check and calculate the range. | |
1356 | */ | |
1357 | raw_event.range = P; | |
1358 | #endif | |
1359 | break; | |
1360 | case CPU_34K: | |
1361 | if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id)) | |
1362 | return ERR_PTR(-EOPNOTSUPP); | |
1363 | raw_event.event_id = base_id; | |
1364 | if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) | |
1365 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | |
1366 | else | |
1367 | raw_event.cntr_mask = | |
1368 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | |
1369 | #ifdef CONFIG_MIPS_MT_SMP | |
1370 | if (IS_RANGE_P_34K_EVENT(raw_id, base_id)) | |
1371 | raw_event.range = P; | |
1372 | else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id))) | |
1373 | raw_event.range = V; | |
1374 | else | |
1375 | raw_event.range = T; | |
1376 | #endif | |
1377 | break; | |
1378 | case CPU_74K: | |
1379 | if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id)) | |
1380 | return ERR_PTR(-EOPNOTSUPP); | |
1381 | raw_event.event_id = base_id; | |
1382 | if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) | |
1383 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | |
1384 | else | |
1385 | raw_event.cntr_mask = | |
1386 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | |
1387 | #ifdef CONFIG_MIPS_MT_SMP | |
1388 | raw_event.range = P; | |
1389 | #endif | |
1390 | break; | |
1391 | case CPU_1004K: | |
1392 | if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id)) | |
1393 | return ERR_PTR(-EOPNOTSUPP); | |
1394 | raw_event.event_id = base_id; | |
1395 | if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) | |
1396 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | |
1397 | else | |
1398 | raw_event.cntr_mask = | |
1399 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | |
1400 | #ifdef CONFIG_MIPS_MT_SMP | |
1401 | if (IS_RANGE_P_1004K_EVENT(raw_id, base_id)) | |
1402 | raw_event.range = P; | |
1403 | else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id))) | |
1404 | raw_event.range = V; | |
1405 | else | |
1406 | raw_event.range = T; | |
1407 | #endif | |
1408 | break; | |
1409 | } | |
1410 | ||
1411 | return &raw_event; | |
1412 | } | |
1413 | ||
3a9ab99e DCZ |
1414 | static int __init |
1415 | init_hw_perf_events(void) | |
1416 | { | |
1417 | int counters, irq; | |
82091564 | 1418 | int counter_bits; |
3a9ab99e DCZ |
1419 | |
1420 | pr_info("Performance counters: "); | |
1421 | ||
1422 | counters = n_counters(); | |
1423 | if (counters == 0) { | |
1424 | pr_cont("No available PMU.\n"); | |
1425 | return -ENODEV; | |
1426 | } | |
1427 | ||
1428 | #ifdef CONFIG_MIPS_MT_SMP | |
1429 | cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); | |
1430 | if (!cpu_has_mipsmt_pertccounters) | |
1431 | counters = counters_total_to_per_cpu(counters); | |
1432 | #endif | |
1433 | ||
1434 | #ifdef MSC01E_INT_BASE | |
1435 | if (cpu_has_veic) { | |
1436 | /* | |
1437 | * Using platform specific interrupt controller defines. | |
1438 | */ | |
1439 | irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; | |
1440 | } else { | |
1441 | #endif | |
1442 | if (cp0_perfcount_irq >= 0) | |
1443 | irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; | |
1444 | else | |
1445 | irq = -1; | |
1446 | #ifdef MSC01E_INT_BASE | |
1447 | } | |
1448 | #endif | |
1449 | ||
82091564 | 1450 | mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; |
3a9ab99e DCZ |
1451 | |
1452 | switch (current_cpu_type()) { | |
1453 | case CPU_24K: | |
82091564 DD |
1454 | mipspmu.name = "mips/24K"; |
1455 | mipspmu.general_event_map = &mipsxxcore_event_map; | |
1456 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | |
3a9ab99e DCZ |
1457 | break; |
1458 | case CPU_34K: | |
82091564 DD |
1459 | mipspmu.name = "mips/34K"; |
1460 | mipspmu.general_event_map = &mipsxxcore_event_map; | |
1461 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | |
3a9ab99e DCZ |
1462 | break; |
1463 | case CPU_74K: | |
82091564 DD |
1464 | mipspmu.name = "mips/74K"; |
1465 | mipspmu.general_event_map = &mipsxx74Kcore_event_map; | |
1466 | mipspmu.cache_event_map = &mipsxx74Kcore_cache_map; | |
3a9ab99e DCZ |
1467 | break; |
1468 | case CPU_1004K: | |
82091564 DD |
1469 | mipspmu.name = "mips/1004K"; |
1470 | mipspmu.general_event_map = &mipsxxcore_event_map; | |
1471 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | |
3a9ab99e DCZ |
1472 | break; |
1473 | default: | |
1474 | pr_cont("Either hardware does not support performance " | |
1475 | "counters, or not yet implemented.\n"); | |
1476 | return -ENODEV; | |
1477 | } | |
1478 | ||
82091564 DD |
1479 | mipspmu.num_counters = counters; |
1480 | mipspmu.irq = irq; | |
1481 | ||
1482 | if (read_c0_perfctrl0() & M_PERFCTL_WIDE) { | |
1483 | mipspmu.max_period = (1ULL << 63) - 1; | |
1484 | mipspmu.valid_count = (1ULL << 63) - 1; | |
1485 | mipspmu.overflow = 1ULL << 63; | |
1486 | mipspmu.read_counter = mipsxx_pmu_read_counter_64; | |
1487 | mipspmu.write_counter = mipsxx_pmu_write_counter_64; | |
1488 | counter_bits = 64; | |
1489 | } else { | |
1490 | mipspmu.max_period = (1ULL << 31) - 1; | |
1491 | mipspmu.valid_count = (1ULL << 31) - 1; | |
1492 | mipspmu.overflow = 1ULL << 31; | |
1493 | mipspmu.read_counter = mipsxx_pmu_read_counter; | |
1494 | mipspmu.write_counter = mipsxx_pmu_write_counter; | |
1495 | counter_bits = 32; | |
1496 | } | |
1497 | ||
1498 | on_each_cpu(reset_counters, (void *)(long)counters, 1); | |
1499 | ||
1500 | pr_cont("%s PMU enabled, %d %d-bit counters available to each " | |
1501 | "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq, | |
1502 | irq < 0 ? " (share with timer interrupt)" : ""); | |
3a9ab99e | 1503 | |
404ff638 DCZ |
1504 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
1505 | ||
3a9ab99e DCZ |
1506 | return 0; |
1507 | } | |
004417a6 | 1508 | early_initcall(init_hw_perf_events); |