Commit | Line | Data |
---|---|---|
0dd450fe MJ |
1 | /* |
2 | * Linux performance counter support for ARC700 series | |
3 | * | |
fb7c5725 | 4 | * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) |
0dd450fe MJ |
5 | * |
6 | * This code is inspired by the perf support of various other architectures. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | */ | |
13 | #include <linux/errno.h> | |
36481cf7 | 14 | #include <linux/interrupt.h> |
0dd450fe MJ |
15 | #include <linux/module.h> |
16 | #include <linux/of.h> | |
17 | #include <linux/perf_event.h> | |
18 | #include <linux/platform_device.h> | |
19 | #include <asm/arcregs.h> | |
389e3160 | 20 | #include <asm/stacktrace.h> |
0dd450fe MJ |
21 | |
22 | struct arc_pmu { | |
23 | struct pmu pmu; | |
e525c37f | 24 | unsigned int irq; |
0dd450fe | 25 | int n_counters; |
1fe8bfa5 | 26 | u64 max_period; |
0dd450fe | 27 | int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; |
e525c37f AB |
28 | }; |
29 | ||
30 | struct arc_pmu_cpu { | |
31 | /* | |
32 | * A 1 bit for an index indicates that the counter is being used for | |
33 | * an event. A 0 means that the counter can be used. | |
34 | */ | |
35 | unsigned long used_mask[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS)]; | |
36 | ||
37 | /* | |
38 | * The events that are active on the PMU for the given index. | |
39 | */ | |
36481cf7 | 40 | struct perf_event *act_counter[ARC_PERF_MAX_COUNTERS]; |
0dd450fe MJ |
41 | }; |
42 | ||
389e3160 VG |
43 | struct arc_callchain_trace { |
44 | int depth; | |
45 | void *perf_stuff; | |
46 | }; | |
47 | ||
48 | static int callchain_trace(unsigned int addr, void *data) | |
49 | { | |
50 | struct arc_callchain_trace *ctrl = data; | |
51 | struct perf_callchain_entry *entry = ctrl->perf_stuff; | |
52 | perf_callchain_store(entry, addr); | |
53 | ||
54 | if (ctrl->depth++ < 3) | |
55 | return 0; | |
56 | ||
57 | return -1; | |
58 | } | |
59 | ||
60 | void | |
61 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |
62 | { | |
63 | struct arc_callchain_trace ctrl = { | |
64 | .depth = 0, | |
65 | .perf_stuff = entry, | |
66 | }; | |
67 | ||
68 | arc_unwind_core(NULL, regs, callchain_trace, &ctrl); | |
69 | } | |
70 | ||
22f6b899 VG |
71 | void |
72 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |
73 | { | |
74 | /* | |
75 | * User stack can't be unwound trivially with kernel dwarf unwinder | |
76 | * So for now just record the user PC | |
77 | */ | |
78 | perf_callchain_store(entry, instruction_pointer(regs)); | |
79 | } | |
80 | ||
03c94fcf | 81 | static struct arc_pmu *arc_pmu; |
e525c37f | 82 | static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); |
03c94fcf | 83 | |
0dd450fe MJ |
84 | /* read counter #idx; note that counter# != event# on ARC! */ |
85 | static uint64_t arc_pmu_read_counter(int idx) | |
86 | { | |
87 | uint32_t tmp; | |
88 | uint64_t result; | |
89 | ||
90 | /* | |
91 | * ARC supports making 'snapshots' of the counters, so we don't | |
92 | * need to care about counters wrapping to 0 underneath our feet | |
93 | */ | |
94 | write_aux_reg(ARC_REG_PCT_INDEX, idx); | |
95 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); | |
96 | write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); | |
97 | result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; | |
98 | result |= read_aux_reg(ARC_REG_PCT_SNAPL); | |
99 | ||
100 | return result; | |
101 | } | |
102 | ||
103 | static void arc_perf_event_update(struct perf_event *event, | |
104 | struct hw_perf_event *hwc, int idx) | |
105 | { | |
1fe8bfa5 AB |
106 | uint64_t prev_raw_count = local64_read(&hwc->prev_count); |
107 | uint64_t new_raw_count = arc_pmu_read_counter(idx); | |
108 | int64_t delta = new_raw_count - prev_raw_count; | |
0dd450fe | 109 | |
1fe8bfa5 AB |
110 | /* |
111 | * We don't afaraid of hwc->prev_count changing beneath our feet | |
112 | * because there's no way for us to re-enter this function anytime. | |
113 | */ | |
114 | local64_set(&hwc->prev_count, new_raw_count); | |
0dd450fe MJ |
115 | local64_add(delta, &event->count); |
116 | local64_sub(delta, &hwc->period_left); | |
117 | } | |
118 | ||
119 | static void arc_pmu_read(struct perf_event *event) | |
120 | { | |
121 | arc_perf_event_update(event, &event->hw, event->hw.idx); | |
122 | } | |
123 | ||
124 | static int arc_pmu_cache_event(u64 config) | |
125 | { | |
126 | unsigned int cache_type, cache_op, cache_result; | |
127 | int ret; | |
128 | ||
129 | cache_type = (config >> 0) & 0xff; | |
130 | cache_op = (config >> 8) & 0xff; | |
131 | cache_result = (config >> 16) & 0xff; | |
132 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
133 | return -EINVAL; | |
da990a4f | 134 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) |
0dd450fe | 135 | return -EINVAL; |
da990a4f | 136 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
0dd450fe MJ |
137 | return -EINVAL; |
138 | ||
139 | ret = arc_pmu_cache_map[cache_type][cache_op][cache_result]; | |
140 | ||
141 | if (ret == CACHE_OP_UNSUPPORTED) | |
142 | return -ENOENT; | |
143 | ||
bde80c23 VG |
144 | pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n", |
145 | cache_type, cache_op, cache_result, ret, | |
146 | arc_pmu_ev_hw_map[ret]); | |
147 | ||
0dd450fe MJ |
148 | return ret; |
149 | } | |
150 | ||
151 | /* initializes hw_perf_event structure if event is supported */ | |
152 | static int arc_pmu_event_init(struct perf_event *event) | |
153 | { | |
0dd450fe MJ |
154 | struct hw_perf_event *hwc = &event->hw; |
155 | int ret; | |
156 | ||
36481cf7 AB |
157 | if (!is_sampling_event(event)) { |
158 | hwc->sample_period = arc_pmu->max_period; | |
159 | hwc->last_period = hwc->sample_period; | |
160 | local64_set(&hwc->period_left, hwc->sample_period); | |
161 | } | |
1fe8bfa5 | 162 | |
e6b1d126 AB |
163 | hwc->config = 0; |
164 | ||
165 | if (is_isa_arcv2()) { | |
166 | /* "exclude user" means "count only kernel" */ | |
167 | if (event->attr.exclude_user) | |
168 | hwc->config |= ARC_REG_PCT_CONFIG_KERN; | |
169 | ||
170 | /* "exclude kernel" means "count only user" */ | |
171 | if (event->attr.exclude_kernel) | |
172 | hwc->config |= ARC_REG_PCT_CONFIG_USER; | |
173 | } | |
174 | ||
0dd450fe MJ |
175 | switch (event->attr.type) { |
176 | case PERF_TYPE_HARDWARE: | |
177 | if (event->attr.config >= PERF_COUNT_HW_MAX) | |
178 | return -ENOENT; | |
179 | if (arc_pmu->ev_hw_idx[event->attr.config] < 0) | |
180 | return -ENOENT; | |
e6b1d126 | 181 | hwc->config |= arc_pmu->ev_hw_idx[event->attr.config]; |
bde80c23 VG |
182 | pr_debug("init event %d with h/w %d \'%s\'\n", |
183 | (int) event->attr.config, (int) hwc->config, | |
184 | arc_pmu_ev_hw_map[event->attr.config]); | |
0dd450fe | 185 | return 0; |
1fe8bfa5 | 186 | |
0dd450fe MJ |
187 | case PERF_TYPE_HW_CACHE: |
188 | ret = arc_pmu_cache_event(event->attr.config); | |
189 | if (ret < 0) | |
190 | return ret; | |
e6b1d126 | 191 | hwc->config |= arc_pmu->ev_hw_idx[ret]; |
0dd450fe MJ |
192 | return 0; |
193 | default: | |
194 | return -ENOENT; | |
195 | } | |
196 | } | |
197 | ||
198 | /* starts all counters */ | |
199 | static void arc_pmu_enable(struct pmu *pmu) | |
200 | { | |
201 | uint32_t tmp; | |
202 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); | |
203 | write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); | |
204 | } | |
205 | ||
206 | /* stops all counters */ | |
207 | static void arc_pmu_disable(struct pmu *pmu) | |
208 | { | |
209 | uint32_t tmp; | |
210 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); | |
211 | write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); | |
212 | } | |
213 | ||
1fe8bfa5 AB |
214 | static int arc_pmu_event_set_period(struct perf_event *event) |
215 | { | |
216 | struct hw_perf_event *hwc = &event->hw; | |
217 | s64 left = local64_read(&hwc->period_left); | |
218 | s64 period = hwc->sample_period; | |
219 | int idx = hwc->idx; | |
220 | int overflow = 0; | |
221 | u64 value; | |
222 | ||
223 | if (unlikely(left <= -period)) { | |
224 | /* left underflowed by more than period. */ | |
225 | left = period; | |
226 | local64_set(&hwc->period_left, left); | |
227 | hwc->last_period = period; | |
228 | overflow = 1; | |
229 | } else if (unlikely(left <= 0)) { | |
230 | /* left underflowed by less than period. */ | |
231 | left += period; | |
232 | local64_set(&hwc->period_left, left); | |
233 | hwc->last_period = period; | |
234 | overflow = 1; | |
235 | } | |
236 | ||
237 | if (left > arc_pmu->max_period) | |
238 | left = arc_pmu->max_period; | |
239 | ||
240 | value = arc_pmu->max_period - left; | |
241 | local64_set(&hwc->prev_count, value); | |
242 | ||
243 | /* Select counter */ | |
244 | write_aux_reg(ARC_REG_PCT_INDEX, idx); | |
245 | ||
246 | /* Write value */ | |
247 | write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); | |
248 | write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); | |
249 | ||
250 | perf_event_update_userpage(event); | |
251 | ||
252 | return overflow; | |
253 | } | |
254 | ||
0dd450fe MJ |
255 | /* |
256 | * Assigns hardware counter to hardware condition. | |
257 | * Note that there is no separate start/stop mechanism; | |
258 | * stopping is achieved by assigning the 'never' condition | |
259 | */ | |
260 | static void arc_pmu_start(struct perf_event *event, int flags) | |
261 | { | |
262 | struct hw_perf_event *hwc = &event->hw; | |
263 | int idx = hwc->idx; | |
264 | ||
265 | if (WARN_ON_ONCE(idx == -1)) | |
266 | return; | |
267 | ||
268 | if (flags & PERF_EF_RELOAD) | |
1fe8bfa5 AB |
269 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
270 | ||
271 | hwc->state = 0; | |
0dd450fe | 272 | |
1fe8bfa5 | 273 | arc_pmu_event_set_period(event); |
0dd450fe | 274 | |
36481cf7 AB |
275 | /* Enable interrupt for this counter */ |
276 | if (is_sampling_event(event)) | |
277 | write_aux_reg(ARC_REG_PCT_INT_CTRL, | |
278 | read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); | |
279 | ||
0dd450fe | 280 | /* enable ARC pmu here */ |
09074950 VG |
281 | write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ |
282 | write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); /* condition */ | |
0dd450fe MJ |
283 | } |
284 | ||
285 | static void arc_pmu_stop(struct perf_event *event, int flags) | |
286 | { | |
287 | struct hw_perf_event *hwc = &event->hw; | |
288 | int idx = hwc->idx; | |
289 | ||
36481cf7 AB |
290 | /* Disable interrupt for this counter */ |
291 | if (is_sampling_event(event)) { | |
292 | /* | |
293 | * Reset interrupt flag by writing of 1. This is required | |
294 | * to make sure pending interrupt was not left. | |
295 | */ | |
296 | write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); | |
297 | write_aux_reg(ARC_REG_PCT_INT_CTRL, | |
298 | read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); | |
299 | } | |
300 | ||
0dd450fe MJ |
301 | if (!(event->hw.state & PERF_HES_STOPPED)) { |
302 | /* stop ARC pmu here */ | |
303 | write_aux_reg(ARC_REG_PCT_INDEX, idx); | |
304 | ||
305 | /* condition code #0 is always "never" */ | |
306 | write_aux_reg(ARC_REG_PCT_CONFIG, 0); | |
307 | ||
308 | event->hw.state |= PERF_HES_STOPPED; | |
309 | } | |
310 | ||
311 | if ((flags & PERF_EF_UPDATE) && | |
312 | !(event->hw.state & PERF_HES_UPTODATE)) { | |
313 | arc_perf_event_update(event, &event->hw, idx); | |
314 | event->hw.state |= PERF_HES_UPTODATE; | |
315 | } | |
316 | } | |
317 | ||
318 | static void arc_pmu_del(struct perf_event *event, int flags) | |
319 | { | |
e525c37f AB |
320 | struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); |
321 | ||
0dd450fe | 322 | arc_pmu_stop(event, PERF_EF_UPDATE); |
e525c37f | 323 | __clear_bit(event->hw.idx, pmu_cpu->used_mask); |
0dd450fe | 324 | |
e525c37f | 325 | pmu_cpu->act_counter[event->hw.idx] = 0; |
36481cf7 | 326 | |
0dd450fe MJ |
327 | perf_event_update_userpage(event); |
328 | } | |
329 | ||
330 | /* allocate hardware counter and optionally start counting */ | |
331 | static int arc_pmu_add(struct perf_event *event, int flags) | |
332 | { | |
e525c37f | 333 | struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); |
0dd450fe MJ |
334 | struct hw_perf_event *hwc = &event->hw; |
335 | int idx = hwc->idx; | |
336 | ||
e525c37f AB |
337 | if (__test_and_set_bit(idx, pmu_cpu->used_mask)) { |
338 | idx = find_first_zero_bit(pmu_cpu->used_mask, | |
0dd450fe MJ |
339 | arc_pmu->n_counters); |
340 | if (idx == arc_pmu->n_counters) | |
341 | return -EAGAIN; | |
342 | ||
e525c37f | 343 | __set_bit(idx, pmu_cpu->used_mask); |
0dd450fe MJ |
344 | hwc->idx = idx; |
345 | } | |
346 | ||
347 | write_aux_reg(ARC_REG_PCT_INDEX, idx); | |
36481cf7 | 348 | |
e525c37f | 349 | pmu_cpu->act_counter[idx] = event; |
36481cf7 AB |
350 | |
351 | if (is_sampling_event(event)) { | |
352 | /* Mimic full counter overflow as other arches do */ | |
353 | write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); | |
354 | write_aux_reg(ARC_REG_PCT_INT_CNTH, | |
355 | (arc_pmu->max_period >> 32)); | |
356 | } | |
357 | ||
0dd450fe MJ |
358 | write_aux_reg(ARC_REG_PCT_CONFIG, 0); |
359 | write_aux_reg(ARC_REG_PCT_COUNTL, 0); | |
360 | write_aux_reg(ARC_REG_PCT_COUNTH, 0); | |
361 | local64_set(&hwc->prev_count, 0); | |
362 | ||
363 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | |
364 | if (flags & PERF_EF_START) | |
365 | arc_pmu_start(event, PERF_EF_RELOAD); | |
366 | ||
367 | perf_event_update_userpage(event); | |
368 | ||
369 | return 0; | |
370 | } | |
371 | ||
36481cf7 AB |
372 | #ifdef CONFIG_ISA_ARCV2 |
373 | static irqreturn_t arc_pmu_intr(int irq, void *dev) | |
374 | { | |
375 | struct perf_sample_data data; | |
e525c37f | 376 | struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); |
36481cf7 AB |
377 | struct pt_regs *regs; |
378 | int active_ints; | |
379 | int idx; | |
380 | ||
381 | arc_pmu_disable(&arc_pmu->pmu); | |
382 | ||
383 | active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT); | |
384 | ||
385 | regs = get_irq_regs(); | |
386 | ||
387 | for (idx = 0; idx < arc_pmu->n_counters; idx++) { | |
e525c37f | 388 | struct perf_event *event = pmu_cpu->act_counter[idx]; |
36481cf7 AB |
389 | struct hw_perf_event *hwc; |
390 | ||
391 | if (!(active_ints & (1 << idx))) | |
392 | continue; | |
393 | ||
394 | /* Reset interrupt flag by writing of 1 */ | |
395 | write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); | |
396 | ||
397 | /* | |
398 | * On reset of "interrupt active" bit corresponding | |
399 | * "interrupt enable" bit gets automatically reset as well. | |
400 | * Now we need to re-enable interrupt for the counter. | |
401 | */ | |
402 | write_aux_reg(ARC_REG_PCT_INT_CTRL, | |
403 | read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); | |
404 | ||
405 | hwc = &event->hw; | |
406 | ||
407 | WARN_ON_ONCE(hwc->idx != idx); | |
408 | ||
409 | arc_perf_event_update(event, &event->hw, event->hw.idx); | |
410 | perf_sample_data_init(&data, 0, hwc->last_period); | |
411 | if (!arc_pmu_event_set_period(event)) | |
412 | continue; | |
413 | ||
414 | if (perf_event_overflow(event, &data, regs)) | |
415 | arc_pmu_stop(event, 0); | |
416 | } | |
417 | ||
418 | arc_pmu_enable(&arc_pmu->pmu); | |
419 | ||
420 | return IRQ_HANDLED; | |
421 | } | |
422 | #else | |
423 | ||
424 | static irqreturn_t arc_pmu_intr(int irq, void *dev) | |
425 | { | |
426 | return IRQ_NONE; | |
427 | } | |
428 | ||
429 | #endif /* CONFIG_ISA_ARCV2 */ | |
430 | ||
c6317bc7 | 431 | static void arc_cpu_pmu_irq_init(void *data) |
e525c37f | 432 | { |
c6317bc7 | 433 | int irq = *(int *)data; |
e525c37f | 434 | |
c6317bc7 | 435 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
e525c37f AB |
436 | |
437 | /* Clear all pending interrupt flags */ | |
438 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); | |
439 | } | |
440 | ||
0dd450fe MJ |
441 | static int arc_pmu_device_probe(struct platform_device *pdev) |
442 | { | |
0dd450fe MJ |
443 | struct arc_reg_pct_build pct_bcr; |
444 | struct arc_reg_cc_build cc_bcr; | |
36481cf7 | 445 | int i, j, has_interrupts; |
1fe8bfa5 | 446 | int counter_size; /* in bits */ |
0dd450fe MJ |
447 | |
448 | union cc_name { | |
449 | struct { | |
450 | uint32_t word0, word1; | |
451 | char sentinel; | |
452 | } indiv; | |
453 | char str[9]; | |
454 | } cc_name; | |
455 | ||
456 | ||
457 | READ_BCR(ARC_REG_PCT_BUILD, pct_bcr); | |
458 | if (!pct_bcr.v) { | |
459 | pr_err("This core does not have performance counters!\n"); | |
460 | return -ENODEV; | |
461 | } | |
fb7c5725 | 462 | BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); |
0dd450fe | 463 | |
56372082 | 464 | READ_BCR(ARC_REG_CC_BUILD, cc_bcr); |
d8f6ad85 | 465 | BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ |
56372082 VG |
466 | |
467 | arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); | |
0dd450fe MJ |
468 | if (!arc_pmu) |
469 | return -ENOMEM; | |
470 | ||
36481cf7 AB |
471 | has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; |
472 | ||
0dd450fe | 473 | arc_pmu->n_counters = pct_bcr.c; |
1fe8bfa5 | 474 | counter_size = 32 + (pct_bcr.s << 4); |
36481cf7 | 475 | |
1fe8bfa5 | 476 | arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL; |
0dd450fe | 477 | |
36481cf7 AB |
478 | pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", |
479 | arc_pmu->n_counters, counter_size, cc_bcr.c, | |
480 | has_interrupts ? ", [overflow IRQ support]":""); | |
0dd450fe MJ |
481 | |
482 | cc_name.str[8] = 0; | |
bde80c23 | 483 | for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) |
0dd450fe MJ |
484 | arc_pmu->ev_hw_idx[i] = -1; |
485 | ||
bde80c23 | 486 | /* loop thru all available h/w condition indexes */ |
0dd450fe MJ |
487 | for (j = 0; j < cc_bcr.c; j++) { |
488 | write_aux_reg(ARC_REG_CC_INDEX, j); | |
489 | cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); | |
490 | cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); | |
bde80c23 VG |
491 | |
492 | /* See if it has been mapped to a perf event_id */ | |
0dd450fe MJ |
493 | for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { |
494 | if (arc_pmu_ev_hw_map[i] && | |
495 | !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) && | |
496 | strlen(arc_pmu_ev_hw_map[i])) { | |
bde80c23 VG |
497 | pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", |
498 | i, cc_name.str, j); | |
0dd450fe MJ |
499 | arc_pmu->ev_hw_idx[i] = j; |
500 | } | |
501 | } | |
502 | } | |
503 | ||
504 | arc_pmu->pmu = (struct pmu) { | |
505 | .pmu_enable = arc_pmu_enable, | |
506 | .pmu_disable = arc_pmu_disable, | |
507 | .event_init = arc_pmu_event_init, | |
508 | .add = arc_pmu_add, | |
509 | .del = arc_pmu_del, | |
510 | .start = arc_pmu_start, | |
511 | .stop = arc_pmu_stop, | |
512 | .read = arc_pmu_read, | |
513 | }; | |
514 | ||
36481cf7 AB |
515 | if (has_interrupts) { |
516 | int irq = platform_get_irq(pdev, 0); | |
517 | ||
518 | if (irq < 0) { | |
519 | pr_err("Cannot get IRQ number for the platform\n"); | |
520 | return -ENODEV; | |
521 | } | |
522 | ||
e525c37f AB |
523 | arc_pmu->irq = irq; |
524 | ||
c6317bc7 VG |
525 | /* intc map function ensures irq_set_percpu_devid() called */ |
526 | request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", | |
527 | this_cpu_ptr(&arc_pmu_cpu)); | |
528 | ||
529 | on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); | |
530 | ||
36481cf7 AB |
531 | } else |
532 | arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | |
2cc9e588 | 533 | |
082ae1e1 | 534 | return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); |
0dd450fe MJ |
535 | } |
536 | ||
537 | #ifdef CONFIG_OF | |
538 | static const struct of_device_id arc_pmu_match[] = { | |
30fdd373 | 539 | { .compatible = "snps,arc700-pct" }, |
9b28829d | 540 | { .compatible = "snps,archs-pct" }, |
0dd450fe MJ |
541 | {}, |
542 | }; | |
543 | MODULE_DEVICE_TABLE(of, arc_pmu_match); | |
544 | #endif | |
545 | ||
546 | static struct platform_driver arc_pmu_driver = { | |
547 | .driver = { | |
9b28829d | 548 | .name = "arc-pct", |
0dd450fe MJ |
549 | .of_match_table = of_match_ptr(arc_pmu_match), |
550 | }, | |
551 | .probe = arc_pmu_device_probe, | |
552 | }; | |
553 | ||
554 | module_platform_driver(arc_pmu_driver); | |
555 | ||
556 | MODULE_LICENSE("GPL"); | |
557 | MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>"); | |
558 | MODULE_DESCRIPTION("ARC PMU driver"); |