Commit | Line | Data |
---|---|---|
0f4f0672 JI |
1 | /* |
2 | * linux/arch/arm/include/asm/pmu.h | |
3 | * | |
4 | * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | */ | |
11 | ||
12 | #ifndef __ARM_PMU_H__ | |
13 | #define __ARM_PMU_H__ | |
14 | ||
0e25a5c9 | 15 | #include <linux/interrupt.h> |
0ce47080 | 16 | #include <linux/perf_event.h> |
0e25a5c9 | 17 | |
b0e89590 WD |
18 | /* |
19 | * Types of PMUs that can be accessed directly and require mutual | |
20 | * exclusion between profiling tools. | |
21 | */ | |
28d7f4ec WD |
22 | enum arm_pmu_type { |
23 | ARM_PMU_DEVICE_CPU = 0, | |
24 | ARM_NUM_PMU_DEVICES, | |
25 | }; | |
26 | ||
0e25a5c9 RV |
27 | /* |
28 | * struct arm_pmu_platdata - ARM PMU platform data | |
29 | * | |
e0516a64 ML |
30 | * @handle_irq: an optional handler which will be called from the |
31 | * interrupt and passed the address of the low level handler, | |
32 | * and can be used to implement any platform specific handling | |
33 | * before or after calling it. | |
34 | * @enable_irq: an optional handler which will be called after | |
35 | * request_irq and be used to handle some platform specific | |
36 | * irq enablement | |
37 | * @disable_irq: an optional handler which will be called before | |
38 | * free_irq and be used to handle some platform specific | |
39 | * irq disablement | |
0e25a5c9 RV |
40 | */ |
41 | struct arm_pmu_platdata { | |
42 | irqreturn_t (*handle_irq)(int irq, void *dev, | |
43 | irq_handler_t pmu_handler); | |
e0516a64 ML |
44 | void (*enable_irq)(int irq); |
45 | void (*disable_irq)(int irq); | |
0e25a5c9 RV |
46 | }; |
47 | ||
0f4f0672 JI |
48 | #ifdef CONFIG_CPU_HAS_PMU |
49 | ||
0f4f0672 JI |
50 | /** |
51 | * reserve_pmu() - reserve the hardware performance counters | |
52 | * | |
53 | * Reserve the hardware performance counters in the system for exclusive use. | |
b0e89590 | 54 | * Returns 0 on success or -EBUSY if the lock is already held. |
0f4f0672 | 55 | */ |
b0e89590 | 56 | extern int |
7fdd3c49 | 57 | reserve_pmu(enum arm_pmu_type type); |
0f4f0672 JI |
58 | |
59 | /** | |
60 | * release_pmu() - Relinquish control of the performance counters | |
61 | * | |
62 | * Release the performance counters and allow someone else to use them. | |
0f4f0672 | 63 | */ |
b0e89590 | 64 | extern void |
f12482c9 | 65 | release_pmu(enum arm_pmu_type type); |
0f4f0672 | 66 | |
0f4f0672 JI |
67 | #else /* CONFIG_CPU_HAS_PMU */ |
68 | ||
49c006b9 WD |
69 | #include <linux/err.h> |
70 | ||
0f4f0672 | 71 | static inline int |
b0e89590 | 72 | reserve_pmu(enum arm_pmu_type type) |
0f4f0672 JI |
73 | { |
74 | return -ENODEV; | |
75 | } | |
76 | ||
b0e89590 WD |
77 | static inline void |
78 | release_pmu(enum arm_pmu_type type) { } | |
0f4f0672 JI |
79 | |
80 | #endif /* CONFIG_CPU_HAS_PMU */ | |
81 | ||
0ce47080 MR |
82 | #ifdef CONFIG_HW_PERF_EVENTS |
83 | ||
84 | /* The events for a given PMU register set. */ | |
85 | struct pmu_hw_events { | |
86 | /* | |
87 | * The events that are active on the PMU for the given index. | |
88 | */ | |
89 | struct perf_event **events; | |
90 | ||
91 | /* | |
92 | * A 1 bit for an index indicates that the counter is being used for | |
93 | * an event. A 0 means that the counter can be used. | |
94 | */ | |
95 | unsigned long *used_mask; | |
96 | ||
97 | /* | |
98 | * Hardware lock to serialize accesses to PMU registers. Needed for the | |
99 | * read/modify/write sequences. | |
100 | */ | |
101 | raw_spinlock_t pmu_lock; | |
102 | }; | |
103 | ||
104 | struct arm_pmu { | |
105 | struct pmu pmu; | |
0ce47080 MR |
106 | enum arm_pmu_type type; |
107 | cpumask_t active_irqs; | |
4295b898 | 108 | char *name; |
0ce47080 MR |
109 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
110 | void (*enable)(struct hw_perf_event *evt, int idx); | |
111 | void (*disable)(struct hw_perf_event *evt, int idx); | |
112 | int (*get_event_idx)(struct pmu_hw_events *hw_events, | |
113 | struct hw_perf_event *hwc); | |
114 | int (*set_event_filter)(struct hw_perf_event *evt, | |
115 | struct perf_event_attr *attr); | |
116 | u32 (*read_counter)(int idx); | |
117 | void (*write_counter)(int idx, u32 val); | |
118 | void (*start)(void); | |
119 | void (*stop)(void); | |
120 | void (*reset)(void *); | |
121 | int (*map_event)(struct perf_event *event); | |
122 | int num_events; | |
123 | atomic_t active_events; | |
124 | struct mutex reserve_mutex; | |
125 | u64 max_period; | |
126 | struct platform_device *plat_device; | |
127 | struct pmu_hw_events *(*get_hw_events)(void); | |
128 | }; | |
129 | ||
130 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) | |
131 | ||
132 | int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); | |
133 | ||
134 | u64 armpmu_event_update(struct perf_event *event, | |
135 | struct hw_perf_event *hwc, | |
57273471 | 136 | int idx); |
0ce47080 MR |
137 | |
138 | int armpmu_event_set_period(struct perf_event *event, | |
139 | struct hw_perf_event *hwc, | |
140 | int idx); | |
141 | ||
142 | #endif /* CONFIG_HW_PERF_EVENTS */ | |
143 | ||
0f4f0672 | 144 | #endif /* __ARM_PMU_H__ */ |