Merge remote-tracking branch 'spi/for-next'
[deliverable/linux.git] / include / linux / perf / arm_pmu.h
1 /*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12 #ifndef __ARM_PMU_H__
13 #define __ARM_PMU_H__
14
15 #include <linux/interrupt.h>
16 #include <linux/perf_event.h>
17 #include <linux/sysfs.h>
18 #include <asm/cputype.h>
19
20 /*
21 * struct arm_pmu_platdata - ARM PMU platform data
22 *
23 * @handle_irq: an optional handler which will be called from the
24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling
26 * before or after calling it.
27 */
28 struct arm_pmu_platdata {
29 irqreturn_t (*handle_irq)(int irq, void *dev,
30 irq_handler_t pmu_handler);
31 };
32
33 #ifdef CONFIG_ARM_PMU
34
35 /*
36 * The ARMv7 CPU PMU supports up to 32 event counters.
37 */
38 #define ARMPMU_MAX_HWEVENTS 32
39
40 #define HW_OP_UNSUPPORTED 0xFFFF
41 #define C(_x) PERF_COUNT_HW_CACHE_##_x
42 #define CACHE_OP_UNSUPPORTED 0xFFFF
43
44 #define PERF_MAP_ALL_UNSUPPORTED \
45 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
46
47 #define PERF_CACHE_MAP_ALL_UNSUPPORTED \
48 [0 ... C(MAX) - 1] = { \
49 [0 ... C(OP_MAX) - 1] = { \
50 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
51 }, \
52 }
53
54 /* The events for a given PMU register set. */
55 struct pmu_hw_events {
56 /*
57 * The events that are active on the PMU for the given index.
58 */
59 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
60
61 /*
62 * A 1 bit for an index indicates that the counter is being used for
63 * an event. A 0 means that the counter can be used.
64 */
65 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
66
67 /*
68 * Hardware lock to serialize accesses to PMU registers. Needed for the
69 * read/modify/write sequences.
70 */
71 raw_spinlock_t pmu_lock;
72
73 /*
74 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
75 * already have to allocate this struct per cpu.
76 */
77 struct arm_pmu *percpu_pmu;
78 };
79
80 enum armpmu_attr_groups {
81 ARMPMU_ATTR_GROUP_COMMON,
82 ARMPMU_ATTR_GROUP_EVENTS,
83 ARMPMU_ATTR_GROUP_FORMATS,
84 ARMPMU_NR_ATTR_GROUPS
85 };
86
87 struct arm_pmu {
88 struct pmu pmu;
89 cpumask_t active_irqs;
90 cpumask_t supported_cpus;
91 int *irq_affinity;
92 char *name;
93 irqreturn_t (*handle_irq)(int irq_num, void *dev);
94 void (*enable)(struct perf_event *event);
95 void (*disable)(struct perf_event *event);
96 int (*get_event_idx)(struct pmu_hw_events *hw_events,
97 struct perf_event *event);
98 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
99 struct perf_event *event);
100 int (*set_event_filter)(struct hw_perf_event *evt,
101 struct perf_event_attr *attr);
102 u32 (*read_counter)(struct perf_event *event);
103 void (*write_counter)(struct perf_event *event, u32 val);
104 void (*start)(struct arm_pmu *);
105 void (*stop)(struct arm_pmu *);
106 void (*reset)(void *);
107 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
108 void (*free_irq)(struct arm_pmu *);
109 int (*map_event)(struct perf_event *event);
110 int num_events;
111 atomic_t active_events;
112 struct mutex reserve_mutex;
113 u64 max_period;
114 bool secure_access; /* 32-bit ARM only */
115 #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
116 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
117 struct platform_device *plat_device;
118 struct pmu_hw_events __percpu *hw_events;
119 struct list_head entry;
120 struct notifier_block cpu_pm_nb;
121 /* the attr_groups array must be NULL-terminated */
122 const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
123 };
124
125 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
126
127 u64 armpmu_event_update(struct perf_event *event);
128
129 int armpmu_event_set_period(struct perf_event *event);
130
131 int armpmu_map_event(struct perf_event *event,
132 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
133 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
134 [PERF_COUNT_HW_CACHE_OP_MAX]
135 [PERF_COUNT_HW_CACHE_RESULT_MAX],
136 u32 raw_event_mask);
137
138 struct pmu_probe_info {
139 unsigned int cpuid;
140 unsigned int mask;
141 int (*init)(struct arm_pmu *);
142 };
143
144 #define PMU_PROBE(_cpuid, _mask, _fn) \
145 { \
146 .cpuid = (_cpuid), \
147 .mask = (_mask), \
148 .init = (_fn), \
149 }
150
151 #define ARM_PMU_PROBE(_cpuid, _fn) \
152 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
153
154 #define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
155
156 #define XSCALE_PMU_PROBE(_version, _fn) \
157 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
158
159 int arm_pmu_device_probe(struct platform_device *pdev,
160 const struct of_device_id *of_table,
161 const struct pmu_probe_info *probe_table);
162
163 #endif /* CONFIG_ARM_PMU */
164
165 #endif /* __ARM_PMU_H__ */
This page took 0.041626 seconds and 5 git commands to generate.