Merge remote-tracking branch 'renesas/next'
[deliverable/linux.git] / arch / x86 / events / msr.c
CommitLineData
b7b7c782 1#include <linux/perf_event.h>
353bf605 2#include <asm/intel-family.h>
b7b7c782
AL
3
4enum perf_msr_id {
5 PERF_MSR_TSC = 0,
6 PERF_MSR_APERF = 1,
7 PERF_MSR_MPERF = 2,
8 PERF_MSR_PPERF = 3,
9 PERF_MSR_SMI = 4,
8a224261 10 PERF_MSR_PTSC = 5,
aaf24884 11 PERF_MSR_IRPERF = 6,
b7b7c782
AL
12
13 PERF_MSR_EVENT_MAX,
14};
15
7e5560a5 16static bool test_aperfmperf(int idx)
19b3340c
PZ
17{
18 return boot_cpu_has(X86_FEATURE_APERFMPERF);
19}
20
8a224261
HR
21static bool test_ptsc(int idx)
22{
23 return boot_cpu_has(X86_FEATURE_PTSC);
24}
25
aaf24884
HR
26static bool test_irperf(int idx)
27{
28 return boot_cpu_has(X86_FEATURE_IRPERF);
29}
30
7e5560a5 31static bool test_intel(int idx)
19b3340c
PZ
32{
33 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
34 boot_cpu_data.x86 != 6)
35 return false;
36
37 switch (boot_cpu_data.x86_model) {
353bf605 38 case INTEL_FAM6_NEHALEM:
b325e04e 39 case INTEL_FAM6_NEHALEM_G:
353bf605
DH
40 case INTEL_FAM6_NEHALEM_EP:
41 case INTEL_FAM6_NEHALEM_EX:
19b3340c 42
353bf605
DH
43 case INTEL_FAM6_WESTMERE:
44 case INTEL_FAM6_WESTMERE_EP:
45 case INTEL_FAM6_WESTMERE_EX:
19b3340c 46
353bf605
DH
47 case INTEL_FAM6_SANDYBRIDGE:
48 case INTEL_FAM6_SANDYBRIDGE_X:
19b3340c 49
353bf605
DH
50 case INTEL_FAM6_IVYBRIDGE:
51 case INTEL_FAM6_IVYBRIDGE_X:
19b3340c 52
353bf605
DH
53 case INTEL_FAM6_HASWELL_CORE:
54 case INTEL_FAM6_HASWELL_X:
55 case INTEL_FAM6_HASWELL_ULT:
56 case INTEL_FAM6_HASWELL_GT3E:
19b3340c 57
353bf605
DH
58 case INTEL_FAM6_BROADWELL_CORE:
59 case INTEL_FAM6_BROADWELL_XEON_D:
60 case INTEL_FAM6_BROADWELL_GT3E:
61 case INTEL_FAM6_BROADWELL_X:
19b3340c 62
353bf605
DH
63 case INTEL_FAM6_ATOM_SILVERMONT1:
64 case INTEL_FAM6_ATOM_SILVERMONT2:
65 case INTEL_FAM6_ATOM_AIRMONT:
19b3340c
PZ
66 if (idx == PERF_MSR_SMI)
67 return true;
68 break;
69
353bf605
DH
70 case INTEL_FAM6_SKYLAKE_MOBILE:
71 case INTEL_FAM6_SKYLAKE_DESKTOP:
5134596c
DH
72 case INTEL_FAM6_SKYLAKE_X:
73 case INTEL_FAM6_KABYLAKE_MOBILE:
74 case INTEL_FAM6_KABYLAKE_DESKTOP:
19b3340c
PZ
75 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
76 return true;
77 break;
78 }
79
80 return false;
81}
82
b7b7c782 83struct perf_msr {
b7b7c782 84 u64 msr;
19b3340c
PZ
85 struct perf_pmu_events_attr *attr;
86 bool (*test)(int idx);
b7b7c782
AL
87};
88
aaf24884
HR
89PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
90PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
91PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
92PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
93PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
94PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05");
95PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
b7b7c782 96
19b3340c 97static struct perf_msr msr[] = {
aaf24884
HR
98 [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
99 [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
100 [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
101 [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
102 [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
8a224261 103 [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
aaf24884 104 [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
19b3340c
PZ
105};
106
b7b7c782 107static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
19b3340c 108 NULL,
b7b7c782
AL
109};
110
111static struct attribute_group events_attr_group = {
112 .name = "events",
113 .attrs = events_attrs,
114};
115
116PMU_FORMAT_ATTR(event, "config:0-63");
117static struct attribute *format_attrs[] = {
118 &format_attr_event.attr,
119 NULL,
120};
121static struct attribute_group format_attr_group = {
122 .name = "format",
123 .attrs = format_attrs,
124};
125
126static const struct attribute_group *attr_groups[] = {
127 &events_attr_group,
128 &format_attr_group,
129 NULL,
130};
131
132static int msr_event_init(struct perf_event *event)
133{
134 u64 cfg = event->attr.config;
135
136 if (event->attr.type != event->pmu->type)
137 return -ENOENT;
138
139 if (cfg >= PERF_MSR_EVENT_MAX)
140 return -EINVAL;
141
142 /* unsupported modes and filters */
143 if (event->attr.exclude_user ||
144 event->attr.exclude_kernel ||
145 event->attr.exclude_hv ||
146 event->attr.exclude_idle ||
147 event->attr.exclude_host ||
148 event->attr.exclude_guest ||
149 event->attr.sample_period) /* no sampling */
150 return -EINVAL;
151
19b3340c
PZ
152 if (!msr[cfg].attr)
153 return -EINVAL;
154
b7b7c782
AL
155 event->hw.idx = -1;
156 event->hw.event_base = msr[cfg].msr;
157 event->hw.config = cfg;
158
159 return 0;
160}
161
162static inline u64 msr_read_counter(struct perf_event *event)
163{
164 u64 now;
165
166 if (event->hw.event_base)
167 rdmsrl(event->hw.event_base, now);
168 else
82819ffb 169 rdtscll(now);
b7b7c782
AL
170
171 return now;
172}
173static void msr_event_update(struct perf_event *event)
174{
175 u64 prev, now;
176 s64 delta;
177
178 /* Careful, an NMI might modify the previous event value. */
179again:
180 prev = local64_read(&event->hw.prev_count);
181 now = msr_read_counter(event);
182
183 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
184 goto again;
185
186 delta = now - prev;
78e3c795
MK
187 if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
188 delta = sign_extend64(delta, 31);
189
3c3116b7 190 local64_add(delta, &event->count);
b7b7c782
AL
191}
192
193static void msr_event_start(struct perf_event *event, int flags)
194{
195 u64 now;
196
197 now = msr_read_counter(event);
198 local64_set(&event->hw.prev_count, now);
199}
200
201static void msr_event_stop(struct perf_event *event, int flags)
202{
203 msr_event_update(event);
204}
205
206static void msr_event_del(struct perf_event *event, int flags)
207{
208 msr_event_stop(event, PERF_EF_UPDATE);
209}
210
211static int msr_event_add(struct perf_event *event, int flags)
212{
213 if (flags & PERF_EF_START)
214 msr_event_start(event, flags);
215
216 return 0;
217}
218
219static struct pmu pmu_msr = {
220 .task_ctx_nr = perf_sw_context,
221 .attr_groups = attr_groups,
222 .event_init = msr_event_init,
223 .add = msr_event_add,
224 .del = msr_event_del,
225 .start = msr_event_start,
226 .stop = msr_event_stop,
227 .read = msr_event_update,
228 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
229};
230
b7b7c782
AL
231static int __init msr_init(void)
232{
19b3340c 233 int i, j = 0;
b7b7c782 234
19b3340c
PZ
235 if (!boot_cpu_has(X86_FEATURE_TSC)) {
236 pr_cont("no MSR PMU driver.\n");
237 return 0;
b7b7c782
AL
238 }
239
19b3340c
PZ
240 /* Probe the MSRs. */
241 for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
242 u64 val;
b7b7c782 243
19b3340c
PZ
244 /*
245 * Virt sucks arse; you cannot tell if a R/O MSR is present :/
246 */
247 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
248 msr[i].attr = NULL;
b7b7c782
AL
249 }
250
19b3340c
PZ
251 /* List remaining MSRs in the sysfs attrs. */
252 for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
253 if (msr[i].attr)
254 events_attrs[j++] = &msr[i].attr->attr.attr;
b7b7c782 255 }
19b3340c 256 events_attrs[j] = NULL;
b7b7c782
AL
257
258 perf_pmu_register(&pmu_msr, "msr", -1);
259
260 return 0;
261}
262device_initcall(msr_init);
This page took 0.118708 seconds and 5 git commands to generate.