arm: perf: kill get_hw_events()
[deliverable/linux.git] / arch / arm / include / asm / pmu.h
CommitLineData
0f4f0672
JI
1/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
0e25a5c9 15#include <linux/interrupt.h>
0ce47080 16#include <linux/perf_event.h>
0e25a5c9 17
548a86ca
MR
18#include <asm/cputype.h>
19
0e25a5c9
RV
20/*
21 * struct arm_pmu_platdata - ARM PMU platform data
22 *
e0516a64
ML
23 * @handle_irq: an optional handler which will be called from the
24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling
26 * before or after calling it.
7be2958e
JH
27 * @runtime_resume: an optional handler which will be called by the
28 * runtime PM framework following a call to pm_runtime_get().
29 * Note that if pm_runtime_get() is called more than once in
30 * succession this handler will only be called once.
31 * @runtime_suspend: an optional handler which will be called by the
32 * runtime PM framework following a call to pm_runtime_put().
33 * Note that if pm_runtime_get() is called more than once in
34 * succession this handler will only be called following the
35 * final call to pm_runtime_put() that actually disables the
36 * hardware.
0e25a5c9
RV
37 */
38struct arm_pmu_platdata {
39 irqreturn_t (*handle_irq)(int irq, void *dev,
40 irq_handler_t pmu_handler);
7be2958e
JH
41 int (*runtime_resume)(struct device *dev);
42 int (*runtime_suspend)(struct device *dev);
0e25a5c9
RV
43};
44
0ce47080
MR
45#ifdef CONFIG_HW_PERF_EVENTS
46
ac8674dc
MR
47/*
48 * The ARMv7 CPU PMU supports up to 32 event counters.
49 */
50#define ARMPMU_MAX_HWEVENTS 32
51
52#define HW_OP_UNSUPPORTED 0xFFFF
53#define C(_x) PERF_COUNT_HW_CACHE_##_x
54#define CACHE_OP_UNSUPPORTED 0xFFFF
55
1113ff98
MR
56#define PERF_MAP_ALL_UNSUPPORTED \
57 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
58
59#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
60[0 ... C(MAX) - 1] = { \
61 [0 ... C(OP_MAX) - 1] = { \
62 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
63 }, \
64}
65
0ce47080
MR
66/* The events for a given PMU register set. */
67struct pmu_hw_events {
68 /*
69 * The events that are active on the PMU for the given index.
70 */
a4560846 71 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
0ce47080
MR
72
73 /*
74 * A 1 bit for an index indicates that the counter is being used for
75 * an event. A 0 means that the counter can be used.
76 */
a4560846 77 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
0ce47080
MR
78
79 /*
80 * Hardware lock to serialize accesses to PMU registers. Needed for the
81 * read/modify/write sequences.
82 */
83 raw_spinlock_t pmu_lock;
84};
85
86struct arm_pmu {
87 struct pmu pmu;
0ce47080 88 cpumask_t active_irqs;
4295b898 89 char *name;
0ce47080 90 irqreturn_t (*handle_irq)(int irq_num, void *dev);
ed6f2a52
SK
91 void (*enable)(struct perf_event *event);
92 void (*disable)(struct perf_event *event);
0ce47080 93 int (*get_event_idx)(struct pmu_hw_events *hw_events,
ed6f2a52 94 struct perf_event *event);
eab443ef
SB
95 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
96 struct perf_event *event);
0ce47080
MR
97 int (*set_event_filter)(struct hw_perf_event *evt,
98 struct perf_event_attr *attr);
ed6f2a52
SK
99 u32 (*read_counter)(struct perf_event *event);
100 void (*write_counter)(struct perf_event *event, u32 val);
101 void (*start)(struct arm_pmu *);
102 void (*stop)(struct arm_pmu *);
0ce47080 103 void (*reset)(void *);
ed6f2a52
SK
104 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
105 void (*free_irq)(struct arm_pmu *);
0ce47080
MR
106 int (*map_event)(struct perf_event *event);
107 int num_events;
108 atomic_t active_events;
109 struct mutex reserve_mutex;
110 u64 max_period;
111 struct platform_device *plat_device;
11679250 112 struct pmu_hw_events __percpu *hw_events;
0ce47080
MR
113};
114
115#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
116
6dbc0029
WD
117extern const struct dev_pm_ops armpmu_dev_pm_ops;
118
0305230a 119int armpmu_register(struct arm_pmu *armpmu, int type);
0ce47080 120
ed6f2a52 121u64 armpmu_event_update(struct perf_event *event);
0ce47080 122
ed6f2a52 123int armpmu_event_set_period(struct perf_event *event);
0ce47080 124
6dbc0029
WD
125int armpmu_map_event(struct perf_event *event,
126 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
127 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
128 [PERF_COUNT_HW_CACHE_OP_MAX]
129 [PERF_COUNT_HW_CACHE_RESULT_MAX],
130 u32 raw_event_mask);
131
548a86ca
MR
132struct pmu_probe_info {
133 unsigned int cpuid;
134 unsigned int mask;
135 int (*init)(struct arm_pmu *);
136};
137
138#define PMU_PROBE(_cpuid, _mask, _fn) \
139{ \
140 .cpuid = (_cpuid), \
141 .mask = (_mask), \
142 .init = (_fn), \
143}
144
145#define ARM_PMU_PROBE(_cpuid, _fn) \
146 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
147
148#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
149
150#define XSCALE_PMU_PROBE(_version, _fn) \
151 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
152
0ce47080
MR
153#endif /* CONFIG_HW_PERF_EVENTS */
154
0f4f0672 155#endif /* __ARM_PMU_H__ */
This page took 0.504366 seconds and 5 git commands to generate.