perf/x86: Fix uncore PCI fixed counter handling
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
CommitLineData
087bfbb0
YZ
1#include "perf_event_intel_uncore.h"
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
4static struct intel_uncore_type **msr_uncores = empty_uncore;
14371cce
YZ
5static struct intel_uncore_type **pci_uncores = empty_uncore;
6/* pci bus to socket mapping */
7static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
899396cf
YZ
9static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
14371cce 11static DEFINE_RAW_SPINLOCK(uncore_box_lock);
087bfbb0
YZ
12
13/* mask of cpus that collect uncore events */
14static cpumask_t uncore_cpu_mask;
15
16/* constraint for the fixed counter */
17static struct event_constraint constraint_fixed =
18 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
6a67943a
YZ
19static struct event_constraint constraint_empty =
20 EVENT_CONSTRAINT(0, 0, 0);
087bfbb0 21
46bdd905
YZ
22#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
23 ((1ULL << (n)) - 1)))
24
fcde10e9 25DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
c1ece48c 26DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
fcde10e9
YZ
27DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
6a67943a 29DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
fcde10e9
YZ
30DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
7c94ee2e
YZ
33DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
6a67943a 38DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
e850f9c3 39DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
6a67943a 40DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
e850f9c3 41DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
6a67943a 42DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
e850f9c3 43DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
6a67943a 44DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
e850f9c3 45DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
4f3f713f
YZ
46DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
fd1ec259
YZ
50DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
7c94ee2e 68
254298c7
YZ
69static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
70{
71 u64 count;
72
73 rdmsrl(event->hw.event_base, count);
74
75 return count;
76}
77
78/*
79 * generic get constraint function for shared match/mask registers.
80 */
81static struct event_constraint *
82uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
83{
84 struct intel_uncore_extra_reg *er;
85 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
86 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
87 unsigned long flags;
88 bool ok = false;
89
90 /*
91 * reg->alloc can be set due to existing state, so for fake box we
92 * need to ignore this, otherwise we might fail to allocate proper
93 * fake state for this extra reg constraint.
94 */
95 if (reg1->idx == EXTRA_REG_NONE ||
96 (!uncore_box_is_fake(box) && reg1->alloc))
97 return NULL;
98
99 er = &box->shared_regs[reg1->idx];
100 raw_spin_lock_irqsave(&er->lock, flags);
101 if (!atomic_read(&er->ref) ||
102 (er->config1 == reg1->config && er->config2 == reg2->config)) {
103 atomic_inc(&er->ref);
104 er->config1 = reg1->config;
105 er->config2 = reg2->config;
106 ok = true;
107 }
108 raw_spin_unlock_irqrestore(&er->lock, flags);
109
110 if (ok) {
111 if (!uncore_box_is_fake(box))
112 reg1->alloc = 1;
113 return NULL;
114 }
115
116 return &constraint_empty;
117}
118
119static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
120{
121 struct intel_uncore_extra_reg *er;
122 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
123
124 /*
125 * Only put constraint if extra reg was actually allocated. Also
126 * takes care of event which do not use an extra shared reg.
127 *
128 * Also, if this is a fake box we shouldn't touch any event state
129 * (reg->alloc) and we don't care about leaving inconsistent box
130 * state either since it will be thrown out.
131 */
132 if (uncore_box_is_fake(box) || !reg1->alloc)
133 return;
134
135 er = &box->shared_regs[reg1->idx];
136 atomic_dec(&er->ref);
137 reg1->alloc = 0;
138}
139
46bdd905
YZ
140static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
141{
142 struct intel_uncore_extra_reg *er;
143 unsigned long flags;
144 u64 config;
145
146 er = &box->shared_regs[idx];
147
148 raw_spin_lock_irqsave(&er->lock, flags);
149 config = er->config;
150 raw_spin_unlock_irqrestore(&er->lock, flags);
151
152 return config;
153}
154
7c94ee2e 155/* Sandy Bridge-EP uncore support */
6a67943a
YZ
156static struct intel_uncore_type snbep_uncore_cbox;
157static struct intel_uncore_type snbep_uncore_pcu;
158
7c94ee2e
YZ
159static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
160{
161 struct pci_dev *pdev = box->pci_dev;
162 int box_ctl = uncore_pci_box_ctl(box);
032c3851 163 u32 config = 0;
7c94ee2e 164
032c3851
YZ
165 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
166 config |= SNBEP_PMON_BOX_CTL_FRZ;
167 pci_write_config_dword(pdev, box_ctl, config);
168 }
7c94ee2e
YZ
169}
170
171static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
172{
173 struct pci_dev *pdev = box->pci_dev;
174 int box_ctl = uncore_pci_box_ctl(box);
032c3851 175 u32 config = 0;
7c94ee2e 176
032c3851
YZ
177 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
178 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
179 pci_write_config_dword(pdev, box_ctl, config);
180 }
7c94ee2e
YZ
181}
182
254298c7 183static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
184{
185 struct pci_dev *pdev = box->pci_dev;
186 struct hw_perf_event *hwc = &event->hw;
187
254298c7 188 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
7c94ee2e
YZ
189}
190
254298c7 191static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
192{
193 struct pci_dev *pdev = box->pci_dev;
194 struct hw_perf_event *hwc = &event->hw;
195
196 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
197}
198
254298c7 199static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
200{
201 struct pci_dev *pdev = box->pci_dev;
202 struct hw_perf_event *hwc = &event->hw;
032c3851 203 u64 count = 0;
7c94ee2e
YZ
204
205 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
206 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
254298c7 207
7c94ee2e
YZ
208 return count;
209}
210
211static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
212{
213 struct pci_dev *pdev = box->pci_dev;
254298c7
YZ
214
215 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
7c94ee2e
YZ
216}
217
218static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
219{
220 u64 config;
221 unsigned msr;
222
223 msr = uncore_msr_box_ctl(box);
224 if (msr) {
225 rdmsrl(msr, config);
226 config |= SNBEP_PMON_BOX_CTL_FRZ;
227 wrmsrl(msr, config);
7c94ee2e
YZ
228 }
229}
230
231static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
232{
233 u64 config;
234 unsigned msr;
235
236 msr = uncore_msr_box_ctl(box);
237 if (msr) {
238 rdmsrl(msr, config);
239 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
240 wrmsrl(msr, config);
7c94ee2e
YZ
241 }
242}
243
254298c7 244static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
245{
246 struct hw_perf_event *hwc = &event->hw;
6a67943a
YZ
247 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
248
249 if (reg1->idx != EXTRA_REG_NONE)
46bdd905 250 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
7c94ee2e
YZ
251
252 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
253}
254
255static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
256 struct perf_event *event)
257{
258 struct hw_perf_event *hwc = &event->hw;
259
260 wrmsrl(hwc->config_base, hwc->config);
261}
262
7c94ee2e
YZ
263static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
264{
265 unsigned msr = uncore_msr_box_ctl(box);
254298c7 266
7c94ee2e
YZ
267 if (msr)
268 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
269}
270
271static struct attribute *snbep_uncore_formats_attr[] = {
272 &format_attr_event.attr,
273 &format_attr_umask.attr,
274 &format_attr_edge.attr,
275 &format_attr_inv.attr,
276 &format_attr_thresh8.attr,
277 NULL,
278};
279
280static struct attribute *snbep_uncore_ubox_formats_attr[] = {
281 &format_attr_event.attr,
282 &format_attr_umask.attr,
283 &format_attr_edge.attr,
284 &format_attr_inv.attr,
285 &format_attr_thresh5.attr,
286 NULL,
287};
288
6a67943a
YZ
289static struct attribute *snbep_uncore_cbox_formats_attr[] = {
290 &format_attr_event.attr,
291 &format_attr_umask.attr,
292 &format_attr_edge.attr,
293 &format_attr_tid_en.attr,
294 &format_attr_inv.attr,
295 &format_attr_thresh8.attr,
296 &format_attr_filter_tid.attr,
297 &format_attr_filter_nid.attr,
298 &format_attr_filter_state.attr,
299 &format_attr_filter_opc.attr,
300 NULL,
301};
302
7c94ee2e 303static struct attribute *snbep_uncore_pcu_formats_attr[] = {
77b339bc 304 &format_attr_event_ext.attr,
7c94ee2e
YZ
305 &format_attr_occ_sel.attr,
306 &format_attr_edge.attr,
307 &format_attr_inv.attr,
308 &format_attr_thresh5.attr,
309 &format_attr_occ_invert.attr,
310 &format_attr_occ_edge.attr,
4f3f713f
YZ
311 &format_attr_filter_band0.attr,
312 &format_attr_filter_band1.attr,
313 &format_attr_filter_band2.attr,
314 &format_attr_filter_band3.attr,
7c94ee2e
YZ
315 NULL,
316};
317
c1ece48c
YZ
318static struct attribute *snbep_uncore_qpi_formats_attr[] = {
319 &format_attr_event_ext.attr,
320 &format_attr_umask.attr,
321 &format_attr_edge.attr,
322 &format_attr_inv.attr,
323 &format_attr_thresh8.attr,
fd1ec259
YZ
324 &format_attr_match_rds.attr,
325 &format_attr_match_rnid30.attr,
326 &format_attr_match_rnid4.attr,
327 &format_attr_match_dnid.attr,
328 &format_attr_match_mc.attr,
329 &format_attr_match_opc.attr,
330 &format_attr_match_vnw.attr,
331 &format_attr_match0.attr,
332 &format_attr_match1.attr,
333 &format_attr_mask_rds.attr,
334 &format_attr_mask_rnid30.attr,
335 &format_attr_mask_rnid4.attr,
336 &format_attr_mask_dnid.attr,
337 &format_attr_mask_mc.attr,
338 &format_attr_mask_opc.attr,
339 &format_attr_mask_vnw.attr,
340 &format_attr_mask0.attr,
341 &format_attr_mask1.attr,
c1ece48c
YZ
342 NULL,
343};
344
7c94ee2e 345static struct uncore_event_desc snbep_uncore_imc_events[] = {
eca26c99 346 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
2992c542
PZ
347 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
348 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
7c94ee2e
YZ
349 { /* end: all zeroes */ },
350};
351
352static struct uncore_event_desc snbep_uncore_qpi_events[] = {
2992c542
PZ
353 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
354 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
c9601247
VW
355 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
356 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
7c94ee2e
YZ
357 { /* end: all zeroes */ },
358};
359
360static struct attribute_group snbep_uncore_format_group = {
361 .name = "format",
362 .attrs = snbep_uncore_formats_attr,
363};
364
365static struct attribute_group snbep_uncore_ubox_format_group = {
366 .name = "format",
367 .attrs = snbep_uncore_ubox_formats_attr,
368};
369
6a67943a
YZ
370static struct attribute_group snbep_uncore_cbox_format_group = {
371 .name = "format",
372 .attrs = snbep_uncore_cbox_formats_attr,
373};
374
7c94ee2e
YZ
375static struct attribute_group snbep_uncore_pcu_format_group = {
376 .name = "format",
377 .attrs = snbep_uncore_pcu_formats_attr,
378};
379
c1ece48c
YZ
380static struct attribute_group snbep_uncore_qpi_format_group = {
381 .name = "format",
382 .attrs = snbep_uncore_qpi_formats_attr,
383};
384
46bdd905
YZ
385#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
386 .init_box = snbep_uncore_msr_init_box, \
387 .disable_box = snbep_uncore_msr_disable_box, \
388 .enable_box = snbep_uncore_msr_enable_box, \
389 .disable_event = snbep_uncore_msr_disable_event, \
390 .enable_event = snbep_uncore_msr_enable_event, \
391 .read_counter = uncore_msr_read_counter
392
7c94ee2e 393static struct intel_uncore_ops snbep_uncore_msr_ops = {
46bdd905 394 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
7c94ee2e
YZ
395};
396
fd1ec259
YZ
397#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
398 .init_box = snbep_uncore_pci_init_box, \
399 .disable_box = snbep_uncore_pci_disable_box, \
400 .enable_box = snbep_uncore_pci_enable_box, \
401 .disable_event = snbep_uncore_pci_disable_event, \
402 .read_counter = snbep_uncore_pci_read_counter
403
7c94ee2e 404static struct intel_uncore_ops snbep_uncore_pci_ops = {
fd1ec259
YZ
405 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
406 .enable_event = snbep_uncore_pci_enable_event, \
7c94ee2e
YZ
407};
408
409static struct event_constraint snbep_uncore_cbox_constraints[] = {
410 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
411 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
412 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
413 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
414 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
e850f9c3 415 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
7c94ee2e
YZ
416 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
417 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
418 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
419 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
420 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
421 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
422 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
3b19e4c9 423 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
7c94ee2e
YZ
424 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
427 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
428 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
429 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
430 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
431 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
432 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
433 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
434 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
435 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
436 EVENT_CONSTRAINT_END
437};
438
439static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
440 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
441 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
442 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
443 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
444 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
445 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
446 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
447 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
448 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
449 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
450 EVENT_CONSTRAINT_END
451};
452
453static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
454 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
455 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
456 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
457 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
458 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
459 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
460 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
461 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
462 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
463 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
464 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
e850f9c3
YZ
465 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
466 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
467 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
468 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
469 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
470 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
471 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
472 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
7c94ee2e
YZ
473 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
474 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
475 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
476 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
477 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
478 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
479 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
e850f9c3
YZ
480 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
481 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
7c94ee2e
YZ
482 EVENT_CONSTRAINT_END
483};
484
485static struct intel_uncore_type snbep_uncore_ubox = {
486 .name = "ubox",
487 .num_counters = 2,
488 .num_boxes = 1,
489 .perf_ctr_bits = 44,
490 .fixed_ctr_bits = 48,
491 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
492 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
493 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
494 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
495 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
496 .ops = &snbep_uncore_msr_ops,
497 .format_group = &snbep_uncore_ubox_format_group,
498};
499
46bdd905
YZ
500static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
501 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
502 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
503 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
504 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
505 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
506 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
507 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
508 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
509 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
510 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
511 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
512 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
513 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
514 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
515 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
516 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
517 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
518 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
519 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
520 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
521 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
522 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
523 EVENT_EXTRA_END
524};
525
526static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
527{
528 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
529 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
530 int i;
531
532 if (uncore_box_is_fake(box))
533 return;
534
535 for (i = 0; i < 5; i++) {
536 if (reg1->alloc & (0x1 << i))
537 atomic_sub(1 << (i * 6), &er->ref);
538 }
539 reg1->alloc = 0;
540}
541
542static struct event_constraint *
543__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
544 u64 (*cbox_filter_mask)(int fields))
545{
546 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
547 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
548 int i, alloc = 0;
549 unsigned long flags;
550 u64 mask;
551
552 if (reg1->idx == EXTRA_REG_NONE)
553 return NULL;
554
555 raw_spin_lock_irqsave(&er->lock, flags);
556 for (i = 0; i < 5; i++) {
557 if (!(reg1->idx & (0x1 << i)))
558 continue;
559 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
560 continue;
561
562 mask = cbox_filter_mask(0x1 << i);
563 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
564 !((reg1->config ^ er->config) & mask)) {
565 atomic_add(1 << (i * 6), &er->ref);
566 er->config &= ~mask;
567 er->config |= reg1->config & mask;
568 alloc |= (0x1 << i);
569 } else {
570 break;
571 }
572 }
573 raw_spin_unlock_irqrestore(&er->lock, flags);
574 if (i < 5)
575 goto fail;
576
577 if (!uncore_box_is_fake(box))
578 reg1->alloc |= alloc;
579
b2fa344d 580 return NULL;
46bdd905
YZ
581fail:
582 for (; i >= 0; i--) {
583 if (alloc & (0x1 << i))
584 atomic_sub(1 << (i * 6), &er->ref);
585 }
586 return &constraint_empty;
587}
588
589static u64 snbep_cbox_filter_mask(int fields)
590{
591 u64 mask = 0;
592
593 if (fields & 0x1)
594 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
595 if (fields & 0x2)
596 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
597 if (fields & 0x4)
598 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
599 if (fields & 0x8)
600 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
601
602 return mask;
603}
604
605static struct event_constraint *
606snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
607{
608 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
609}
610
611static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
612{
613 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
614 struct extra_reg *er;
615 int idx = 0;
616
617 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
618 if (er->event != (event->hw.config & er->config_mask))
619 continue;
620 idx |= er->idx;
621 }
622
623 if (idx) {
624 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
625 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
626 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
627 reg1->idx = idx;
628 }
629 return 0;
630}
631
632static struct intel_uncore_ops snbep_uncore_cbox_ops = {
633 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
634 .hw_config = snbep_cbox_hw_config,
635 .get_constraint = snbep_cbox_get_constraint,
636 .put_constraint = snbep_cbox_put_constraint,
637};
638
7c94ee2e 639static struct intel_uncore_type snbep_uncore_cbox = {
6a67943a
YZ
640 .name = "cbox",
641 .num_counters = 4,
642 .num_boxes = 8,
643 .perf_ctr_bits = 44,
644 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
645 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
646 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
647 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
648 .msr_offset = SNBEP_CBO_MSR_OFFSET,
649 .num_shared_regs = 1,
650 .constraints = snbep_uncore_cbox_constraints,
46bdd905 651 .ops = &snbep_uncore_cbox_ops,
6a67943a 652 .format_group = &snbep_uncore_cbox_format_group,
7c94ee2e
YZ
653};
654
46bdd905
YZ
655static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
656{
657 struct hw_perf_event *hwc = &event->hw;
658 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
659 u64 config = reg1->config;
660
661 if (new_idx > reg1->idx)
662 config <<= 8 * (new_idx - reg1->idx);
663 else
664 config >>= 8 * (reg1->idx - new_idx);
665
666 if (modify) {
667 hwc->config += new_idx - reg1->idx;
668 reg1->config = config;
669 reg1->idx = new_idx;
670 }
671 return config;
672}
673
674static struct event_constraint *
675snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
676{
677 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
678 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
679 unsigned long flags;
680 int idx = reg1->idx;
681 u64 mask, config1 = reg1->config;
682 bool ok = false;
683
684 if (reg1->idx == EXTRA_REG_NONE ||
685 (!uncore_box_is_fake(box) && reg1->alloc))
686 return NULL;
687again:
13acac30 688 mask = 0xffULL << (idx * 8);
46bdd905
YZ
689 raw_spin_lock_irqsave(&er->lock, flags);
690 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
691 !((config1 ^ er->config) & mask)) {
692 atomic_add(1 << (idx * 8), &er->ref);
693 er->config &= ~mask;
694 er->config |= config1 & mask;
695 ok = true;
696 }
697 raw_spin_unlock_irqrestore(&er->lock, flags);
698
699 if (!ok) {
700 idx = (idx + 1) % 4;
701 if (idx != reg1->idx) {
702 config1 = snbep_pcu_alter_er(event, idx, false);
703 goto again;
704 }
705 return &constraint_empty;
706 }
707
708 if (!uncore_box_is_fake(box)) {
709 if (idx != reg1->idx)
710 snbep_pcu_alter_er(event, idx, true);
711 reg1->alloc = 1;
712 }
713 return NULL;
714}
715
716static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
717{
718 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
719 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
720
721 if (uncore_box_is_fake(box) || !reg1->alloc)
722 return;
723
724 atomic_sub(1 << (reg1->idx * 8), &er->ref);
725 reg1->alloc = 0;
726}
727
728static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
729{
730 struct hw_perf_event *hwc = &event->hw;
731 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
732 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
733
734 if (ev_sel >= 0xb && ev_sel <= 0xe) {
735 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
736 reg1->idx = ev_sel - 0xb;
737 reg1->config = event->attr.config1 & (0xff << reg1->idx);
738 }
739 return 0;
740}
741
742static struct intel_uncore_ops snbep_uncore_pcu_ops = {
743 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
744 .hw_config = snbep_pcu_hw_config,
745 .get_constraint = snbep_pcu_get_constraint,
746 .put_constraint = snbep_pcu_put_constraint,
747};
748
7c94ee2e 749static struct intel_uncore_type snbep_uncore_pcu = {
6a67943a
YZ
750 .name = "pcu",
751 .num_counters = 4,
752 .num_boxes = 1,
753 .perf_ctr_bits = 48,
754 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
755 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
756 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
757 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
758 .num_shared_regs = 1,
46bdd905 759 .ops = &snbep_uncore_pcu_ops,
6a67943a 760 .format_group = &snbep_uncore_pcu_format_group,
7c94ee2e
YZ
761};
762
763static struct intel_uncore_type *snbep_msr_uncores[] = {
764 &snbep_uncore_ubox,
765 &snbep_uncore_cbox,
766 &snbep_uncore_pcu,
767 NULL,
768};
769
fd1ec259
YZ
770enum {
771 SNBEP_PCI_QPI_PORT0_FILTER,
772 SNBEP_PCI_QPI_PORT1_FILTER,
773};
774
775static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
776{
777 struct hw_perf_event *hwc = &event->hw;
778 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
779 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
780
781 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
782 reg1->idx = 0;
783 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
784 reg1->config = event->attr.config1;
785 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
786 reg2->config = event->attr.config2;
787 }
788 return 0;
789}
790
791static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
792{
793 struct pci_dev *pdev = box->pci_dev;
794 struct hw_perf_event *hwc = &event->hw;
795 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
796 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
797
798 if (reg1->idx != EXTRA_REG_NONE) {
799 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
800 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
801 WARN_ON_ONCE(!filter_pdev);
802 if (filter_pdev) {
803 pci_write_config_dword(filter_pdev, reg1->reg,
804 (u32)reg1->config);
805 pci_write_config_dword(filter_pdev, reg1->reg + 4,
806 (u32)(reg1->config >> 32));
807 pci_write_config_dword(filter_pdev, reg2->reg,
808 (u32)reg2->config);
809 pci_write_config_dword(filter_pdev, reg2->reg + 4,
810 (u32)(reg2->config >> 32));
811 }
812 }
813
814 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
815}
816
817static struct intel_uncore_ops snbep_uncore_qpi_ops = {
818 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
819 .enable_event = snbep_qpi_enable_event,
820 .hw_config = snbep_qpi_hw_config,
821 .get_constraint = uncore_get_constraint,
822 .put_constraint = uncore_put_constraint,
823};
824
7c94ee2e
YZ
825#define SNBEP_UNCORE_PCI_COMMON_INIT() \
826 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
827 .event_ctl = SNBEP_PCI_PMON_CTL0, \
828 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
829 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
830 .ops = &snbep_uncore_pci_ops, \
831 .format_group = &snbep_uncore_format_group
832
833static struct intel_uncore_type snbep_uncore_ha = {
834 .name = "ha",
835 .num_counters = 4,
836 .num_boxes = 1,
837 .perf_ctr_bits = 48,
838 SNBEP_UNCORE_PCI_COMMON_INIT(),
839};
840
841static struct intel_uncore_type snbep_uncore_imc = {
842 .name = "imc",
843 .num_counters = 4,
844 .num_boxes = 4,
845 .perf_ctr_bits = 48,
846 .fixed_ctr_bits = 48,
847 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
848 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
849 .event_descs = snbep_uncore_imc_events,
850 SNBEP_UNCORE_PCI_COMMON_INIT(),
851};
852
853static struct intel_uncore_type snbep_uncore_qpi = {
fd1ec259
YZ
854 .name = "qpi",
855 .num_counters = 4,
856 .num_boxes = 2,
857 .perf_ctr_bits = 48,
858 .perf_ctr = SNBEP_PCI_PMON_CTR0,
859 .event_ctl = SNBEP_PCI_PMON_CTL0,
860 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
861 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
862 .num_shared_regs = 1,
863 .ops = &snbep_uncore_qpi_ops,
864 .event_descs = snbep_uncore_qpi_events,
865 .format_group = &snbep_uncore_qpi_format_group,
7c94ee2e
YZ
866};
867
868
869static struct intel_uncore_type snbep_uncore_r2pcie = {
870 .name = "r2pcie",
871 .num_counters = 4,
872 .num_boxes = 1,
873 .perf_ctr_bits = 44,
874 .constraints = snbep_uncore_r2pcie_constraints,
875 SNBEP_UNCORE_PCI_COMMON_INIT(),
876};
877
878static struct intel_uncore_type snbep_uncore_r3qpi = {
879 .name = "r3qpi",
880 .num_counters = 3,
881 .num_boxes = 2,
882 .perf_ctr_bits = 44,
883 .constraints = snbep_uncore_r3qpi_constraints,
884 SNBEP_UNCORE_PCI_COMMON_INIT(),
885};
886
e850f9c3
YZ
887enum {
888 SNBEP_PCI_UNCORE_HA,
889 SNBEP_PCI_UNCORE_IMC,
890 SNBEP_PCI_UNCORE_QPI,
891 SNBEP_PCI_UNCORE_R2PCIE,
892 SNBEP_PCI_UNCORE_R3QPI,
893};
894
7c94ee2e 895static struct intel_uncore_type *snbep_pci_uncores[] = {
e850f9c3
YZ
896 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
897 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
898 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
899 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
900 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
7c94ee2e
YZ
901 NULL,
902};
903
904static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
905 { /* Home Agent */
906 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
899396cf 907 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
7c94ee2e
YZ
908 },
909 { /* MC Channel 0 */
910 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
899396cf 911 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
7c94ee2e
YZ
912 },
913 { /* MC Channel 1 */
914 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
899396cf 915 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
7c94ee2e
YZ
916 },
917 { /* MC Channel 2 */
918 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
899396cf 919 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
7c94ee2e
YZ
920 },
921 { /* MC Channel 3 */
922 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
899396cf 923 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
7c94ee2e
YZ
924 },
925 { /* QPI Port 0 */
926 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
899396cf 927 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
7c94ee2e
YZ
928 },
929 { /* QPI Port 1 */
930 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
899396cf 931 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
7c94ee2e 932 },
e850f9c3 933 { /* R2PCIe */
7c94ee2e 934 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
899396cf 935 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
7c94ee2e
YZ
936 },
937 { /* R3QPI Link 0 */
938 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
899396cf 939 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
7c94ee2e
YZ
940 },
941 { /* R3QPI Link 1 */
942 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
899396cf 943 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
7c94ee2e 944 },
fd1ec259
YZ
945 { /* QPI Port 0 filter */
946 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
947 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
948 SNBEP_PCI_QPI_PORT0_FILTER),
949 },
950 { /* QPI Port 0 filter */
951 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
952 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
953 SNBEP_PCI_QPI_PORT1_FILTER),
954 },
7c94ee2e
YZ
955 { /* end: all zeroes */ }
956};
957
958static struct pci_driver snbep_uncore_pci_driver = {
959 .name = "snbep_uncore",
960 .id_table = snbep_uncore_pci_ids,
961};
962
963/*
964 * build pci bus to socket mapping
965 */
e850f9c3 966static int snbep_pci2phy_map_init(int devid)
7c94ee2e
YZ
967{
968 struct pci_dev *ubox_dev = NULL;
969 int i, bus, nodeid;
032c3851
YZ
970 int err = 0;
971 u32 config = 0;
7c94ee2e
YZ
972
973 while (1) {
974 /* find the UBOX device */
e850f9c3 975 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
7c94ee2e
YZ
976 if (!ubox_dev)
977 break;
978 bus = ubox_dev->bus->number;
979 /* get the Node ID of the local register */
032c3851
YZ
980 err = pci_read_config_dword(ubox_dev, 0x40, &config);
981 if (err)
982 break;
7c94ee2e
YZ
983 nodeid = config;
984 /* get the Node ID mapping */
032c3851
YZ
985 err = pci_read_config_dword(ubox_dev, 0x54, &config);
986 if (err)
987 break;
7c94ee2e 988 /*
254298c7
YZ
989 * every three bits in the Node ID mapping register maps
990 * to a particular node.
991 */
992 for (i = 0; i < 8; i++) {
993 if (nodeid == ((config >> (3 * i)) & 0x7)) {
994 pcibus_to_physid[bus] = i;
995 break;
996 }
997 }
e850f9c3 998 }
032c3851
YZ
999
1000 if (ubox_dev)
1001 pci_dev_put(ubox_dev);
1002
1003 return err ? pcibios_err_to_errno(err) : 0;
254298c7
YZ
1004}
1005/* end of Sandy Bridge-EP uncore support */
1006
e850f9c3
YZ
1007/* IvyTown uncore support */
1008static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1009{
1010 unsigned msr = uncore_msr_box_ctl(box);
1011 if (msr)
1012 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1013}
1014
1015static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1016{
1017 struct pci_dev *pdev = box->pci_dev;
1018
1019 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1020}
1021
1022#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
1023 .init_box = ivt_uncore_msr_init_box, \
1024 .disable_box = snbep_uncore_msr_disable_box, \
1025 .enable_box = snbep_uncore_msr_enable_box, \
1026 .disable_event = snbep_uncore_msr_disable_event, \
1027 .enable_event = snbep_uncore_msr_enable_event, \
1028 .read_counter = uncore_msr_read_counter
1029
1030static struct intel_uncore_ops ivt_uncore_msr_ops = {
1031 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1032};
1033
1034static struct intel_uncore_ops ivt_uncore_pci_ops = {
1035 .init_box = ivt_uncore_pci_init_box,
1036 .disable_box = snbep_uncore_pci_disable_box,
1037 .enable_box = snbep_uncore_pci_enable_box,
1038 .disable_event = snbep_uncore_pci_disable_event,
1039 .enable_event = snbep_uncore_pci_enable_event,
1040 .read_counter = snbep_uncore_pci_read_counter,
1041};
1042
1043#define IVT_UNCORE_PCI_COMMON_INIT() \
1044 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1045 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1046 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
1047 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1048 .ops = &ivt_uncore_pci_ops, \
1049 .format_group = &ivt_uncore_format_group
1050
1051static struct attribute *ivt_uncore_formats_attr[] = {
1052 &format_attr_event.attr,
1053 &format_attr_umask.attr,
1054 &format_attr_edge.attr,
1055 &format_attr_inv.attr,
1056 &format_attr_thresh8.attr,
1057 NULL,
1058};
1059
1060static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1061 &format_attr_event.attr,
1062 &format_attr_umask.attr,
1063 &format_attr_edge.attr,
1064 &format_attr_inv.attr,
1065 &format_attr_thresh5.attr,
1066 NULL,
1067};
1068
1069static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1070 &format_attr_event.attr,
1071 &format_attr_umask.attr,
1072 &format_attr_edge.attr,
1073 &format_attr_tid_en.attr,
1074 &format_attr_thresh8.attr,
1075 &format_attr_filter_tid.attr,
1076 &format_attr_filter_link.attr,
1077 &format_attr_filter_state2.attr,
1078 &format_attr_filter_nid2.attr,
1079 &format_attr_filter_opc2.attr,
1080 NULL,
1081};
1082
1083static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1084 &format_attr_event_ext.attr,
1085 &format_attr_occ_sel.attr,
1086 &format_attr_edge.attr,
1087 &format_attr_thresh5.attr,
1088 &format_attr_occ_invert.attr,
1089 &format_attr_occ_edge.attr,
1090 &format_attr_filter_band0.attr,
1091 &format_attr_filter_band1.attr,
1092 &format_attr_filter_band2.attr,
1093 &format_attr_filter_band3.attr,
1094 NULL,
1095};
1096
1097static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1098 &format_attr_event_ext.attr,
1099 &format_attr_umask.attr,
1100 &format_attr_edge.attr,
1101 &format_attr_thresh8.attr,
1102 NULL,
1103};
1104
1105static struct attribute_group ivt_uncore_format_group = {
1106 .name = "format",
1107 .attrs = ivt_uncore_formats_attr,
1108};
1109
1110static struct attribute_group ivt_uncore_ubox_format_group = {
1111 .name = "format",
1112 .attrs = ivt_uncore_ubox_formats_attr,
1113};
1114
1115static struct attribute_group ivt_uncore_cbox_format_group = {
1116 .name = "format",
1117 .attrs = ivt_uncore_cbox_formats_attr,
1118};
1119
1120static struct attribute_group ivt_uncore_pcu_format_group = {
1121 .name = "format",
1122 .attrs = ivt_uncore_pcu_formats_attr,
1123};
1124
1125static struct attribute_group ivt_uncore_qpi_format_group = {
1126 .name = "format",
1127 .attrs = ivt_uncore_qpi_formats_attr,
1128};
1129
1130static struct intel_uncore_type ivt_uncore_ubox = {
1131 .name = "ubox",
1132 .num_counters = 2,
1133 .num_boxes = 1,
1134 .perf_ctr_bits = 44,
1135 .fixed_ctr_bits = 48,
1136 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1137 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1138 .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1139 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1140 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1141 .ops = &ivt_uncore_msr_ops,
1142 .format_group = &ivt_uncore_ubox_format_group,
1143};
1144
1145static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1146 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1147 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1148 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1149 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1150 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1151 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1152 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1153 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1154 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1155 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1156 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1157 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1158 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1159 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1160 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1161 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1162 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1163 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1164 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1165 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1166 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1167 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1168 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1169 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1170 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1171 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1172 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1173 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1174 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1175 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1176 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1177 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1178 EVENT_EXTRA_END
1179};
1180
1181static u64 ivt_cbox_filter_mask(int fields)
1182{
1183 u64 mask = 0;
1184
1185 if (fields & 0x1)
1186 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1187 if (fields & 0x2)
1188 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1189 if (fields & 0x4)
1190 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1191 if (fields & 0x8)
1192 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1193 if (fields & 0x10)
1194 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1195
1196 return mask;
1197}
1198
1199static struct event_constraint *
1200ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1201{
1202 return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1203}
1204
1205static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1206{
1207 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1208 struct extra_reg *er;
1209 int idx = 0;
1210
1211 for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1212 if (er->event != (event->hw.config & er->config_mask))
1213 continue;
1214 idx |= er->idx;
1215 }
1216
1217 if (idx) {
1218 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1219 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1220 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1221 reg1->idx = idx;
1222 }
1223 return 0;
1224}
1225
1226static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1227{
1228 struct hw_perf_event *hwc = &event->hw;
1229 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1230
1231 if (reg1->idx != EXTRA_REG_NONE) {
1232 u64 filter = uncore_shared_reg_config(box, 0);
1233 wrmsrl(reg1->reg, filter & 0xffffffff);
1234 wrmsrl(reg1->reg + 6, filter >> 32);
1235 }
1236
1237 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1238}
1239
1240static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1241 .init_box = ivt_uncore_msr_init_box,
1242 .disable_box = snbep_uncore_msr_disable_box,
1243 .enable_box = snbep_uncore_msr_enable_box,
1244 .disable_event = snbep_uncore_msr_disable_event,
1245 .enable_event = ivt_cbox_enable_event,
1246 .read_counter = uncore_msr_read_counter,
1247 .hw_config = ivt_cbox_hw_config,
1248 .get_constraint = ivt_cbox_get_constraint,
1249 .put_constraint = snbep_cbox_put_constraint,
1250};
1251
1252static struct intel_uncore_type ivt_uncore_cbox = {
1253 .name = "cbox",
1254 .num_counters = 4,
1255 .num_boxes = 15,
1256 .perf_ctr_bits = 44,
1257 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1258 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1259 .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1260 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1261 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1262 .num_shared_regs = 1,
1263 .constraints = snbep_uncore_cbox_constraints,
1264 .ops = &ivt_uncore_cbox_ops,
1265 .format_group = &ivt_uncore_cbox_format_group,
1266};
1267
1268static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1269 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1270 .hw_config = snbep_pcu_hw_config,
1271 .get_constraint = snbep_pcu_get_constraint,
1272 .put_constraint = snbep_pcu_put_constraint,
1273};
1274
1275static struct intel_uncore_type ivt_uncore_pcu = {
1276 .name = "pcu",
1277 .num_counters = 4,
1278 .num_boxes = 1,
1279 .perf_ctr_bits = 48,
1280 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1281 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1282 .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1283 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1284 .num_shared_regs = 1,
1285 .ops = &ivt_uncore_pcu_ops,
1286 .format_group = &ivt_uncore_pcu_format_group,
1287};
1288
1289static struct intel_uncore_type *ivt_msr_uncores[] = {
1290 &ivt_uncore_ubox,
1291 &ivt_uncore_cbox,
1292 &ivt_uncore_pcu,
1293 NULL,
1294};
1295
1296static struct intel_uncore_type ivt_uncore_ha = {
1297 .name = "ha",
1298 .num_counters = 4,
1299 .num_boxes = 2,
1300 .perf_ctr_bits = 48,
1301 IVT_UNCORE_PCI_COMMON_INIT(),
1302};
1303
1304static struct intel_uncore_type ivt_uncore_imc = {
1305 .name = "imc",
1306 .num_counters = 4,
1307 .num_boxes = 8,
1308 .perf_ctr_bits = 48,
1309 .fixed_ctr_bits = 48,
1310 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1311 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1312 IVT_UNCORE_PCI_COMMON_INIT(),
1313};
1314
1315static struct intel_uncore_type ivt_uncore_qpi = {
1316 .name = "qpi",
1317 .num_counters = 4,
1318 .num_boxes = 3,
1319 .perf_ctr_bits = 48,
1320 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1321 .event_ctl = SNBEP_PCI_PMON_CTL0,
1322 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1323 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1324 .ops = &ivt_uncore_pci_ops,
1325 .format_group = &ivt_uncore_qpi_format_group,
1326};
1327
1328static struct intel_uncore_type ivt_uncore_r2pcie = {
1329 .name = "r2pcie",
1330 .num_counters = 4,
1331 .num_boxes = 1,
1332 .perf_ctr_bits = 44,
1333 .constraints = snbep_uncore_r2pcie_constraints,
1334 IVT_UNCORE_PCI_COMMON_INIT(),
1335};
1336
1337static struct intel_uncore_type ivt_uncore_r3qpi = {
1338 .name = "r3qpi",
1339 .num_counters = 3,
1340 .num_boxes = 2,
1341 .perf_ctr_bits = 44,
1342 .constraints = snbep_uncore_r3qpi_constraints,
1343 IVT_UNCORE_PCI_COMMON_INIT(),
1344};
1345
1346enum {
1347 IVT_PCI_UNCORE_HA,
1348 IVT_PCI_UNCORE_IMC,
1349 IVT_PCI_UNCORE_QPI,
1350 IVT_PCI_UNCORE_R2PCIE,
1351 IVT_PCI_UNCORE_R3QPI,
1352};
1353
1354static struct intel_uncore_type *ivt_pci_uncores[] = {
1355 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1356 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
1357 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1358 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1359 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
1360 NULL,
1361};
1362
1363static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1364 { /* Home Agent 0 */
1365 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
899396cf 1366 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
e850f9c3
YZ
1367 },
1368 { /* Home Agent 1 */
1369 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
899396cf 1370 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
e850f9c3
YZ
1371 },
1372 { /* MC0 Channel 0 */
1373 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
899396cf 1374 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
e850f9c3
YZ
1375 },
1376 { /* MC0 Channel 1 */
1377 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
899396cf 1378 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
e850f9c3
YZ
1379 },
1380 { /* MC0 Channel 3 */
1381 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
899396cf 1382 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
e850f9c3
YZ
1383 },
1384 { /* MC0 Channel 4 */
1385 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
899396cf 1386 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
e850f9c3
YZ
1387 },
1388 { /* MC1 Channel 0 */
1389 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
899396cf 1390 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
e850f9c3
YZ
1391 },
1392 { /* MC1 Channel 1 */
1393 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
899396cf 1394 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
e850f9c3
YZ
1395 },
1396 { /* MC1 Channel 3 */
1397 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
899396cf 1398 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
e850f9c3
YZ
1399 },
1400 { /* MC1 Channel 4 */
1401 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
899396cf 1402 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
e850f9c3
YZ
1403 },
1404 { /* QPI0 Port 0 */
1405 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
899396cf 1406 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
e850f9c3
YZ
1407 },
1408 { /* QPI0 Port 1 */
1409 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
899396cf 1410 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
e850f9c3
YZ
1411 },
1412 { /* QPI1 Port 2 */
1413 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
899396cf 1414 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
e850f9c3
YZ
1415 },
1416 { /* R2PCIe */
1417 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
899396cf 1418 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
e850f9c3
YZ
1419 },
1420 { /* R3QPI0 Link 0 */
1421 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
899396cf 1422 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
e850f9c3
YZ
1423 },
1424 { /* R3QPI0 Link 1 */
1425 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
899396cf 1426 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
e850f9c3
YZ
1427 },
1428 { /* R3QPI1 Link 2 */
1429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
899396cf 1430 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
e850f9c3
YZ
1431 },
1432 { /* end: all zeroes */ }
1433};
1434
1435static struct pci_driver ivt_uncore_pci_driver = {
1436 .name = "ivt_uncore",
1437 .id_table = ivt_uncore_pci_ids,
1438};
1439/* end of IvyTown uncore support */
1440
254298c7
YZ
1441/* Sandy Bridge uncore support */
1442static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1443{
1444 struct hw_perf_event *hwc = &event->hw;
1445
1446 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1447 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1448 else
1449 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1450}
1451
1452static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1453{
1454 wrmsrl(event->hw.config_base, 0);
1455}
1456
1457static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1458{
1459 if (box->pmu->pmu_idx == 0) {
1460 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1461 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1462 }
1463}
1464
35534b20
SE
1465static struct uncore_event_desc snb_uncore_events[] = {
1466 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1467 { /* end: all zeroes */ },
1468};
1469
254298c7
YZ
1470static struct attribute *snb_uncore_formats_attr[] = {
1471 &format_attr_event.attr,
1472 &format_attr_umask.attr,
1473 &format_attr_edge.attr,
1474 &format_attr_inv.attr,
1475 &format_attr_cmask5.attr,
1476 NULL,
1477};
1478
1479static struct attribute_group snb_uncore_format_group = {
1480 .name = "format",
1481 .attrs = snb_uncore_formats_attr,
1482};
1483
1484static struct intel_uncore_ops snb_uncore_msr_ops = {
1485 .init_box = snb_uncore_msr_init_box,
1486 .disable_event = snb_uncore_msr_disable_event,
1487 .enable_event = snb_uncore_msr_enable_event,
1488 .read_counter = uncore_msr_read_counter,
1489};
1490
1491static struct event_constraint snb_uncore_cbox_constraints[] = {
1492 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1493 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1494 EVENT_CONSTRAINT_END
1495};
1496
1497static struct intel_uncore_type snb_uncore_cbox = {
1498 .name = "cbox",
1499 .num_counters = 2,
1500 .num_boxes = 4,
1501 .perf_ctr_bits = 44,
1502 .fixed_ctr_bits = 48,
1503 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
1504 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
1505 .fixed_ctr = SNB_UNC_FIXED_CTR,
1506 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
1507 .single_fixed = 1,
1508 .event_mask = SNB_UNC_RAW_EVENT_MASK,
1509 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
1510 .constraints = snb_uncore_cbox_constraints,
1511 .ops = &snb_uncore_msr_ops,
1512 .format_group = &snb_uncore_format_group,
35534b20 1513 .event_descs = snb_uncore_events,
254298c7
YZ
1514};
1515
1516static struct intel_uncore_type *snb_msr_uncores[] = {
1517 &snb_uncore_cbox,
1518 NULL,
1519};
1520/* end of Sandy Bridge uncore support */
1521
1522/* Nehalem uncore support */
1523static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1524{
1525 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1526}
1527
1528static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1529{
1530 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1531}
1532
1533static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1534{
1535 struct hw_perf_event *hwc = &event->hw;
1536
1537 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1538 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1539 else
1540 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1541}
1542
1543static struct attribute *nhm_uncore_formats_attr[] = {
1544 &format_attr_event.attr,
1545 &format_attr_umask.attr,
1546 &format_attr_edge.attr,
1547 &format_attr_inv.attr,
1548 &format_attr_cmask8.attr,
1549 NULL,
1550};
1551
1552static struct attribute_group nhm_uncore_format_group = {
1553 .name = "format",
1554 .attrs = nhm_uncore_formats_attr,
1555};
1556
1557static struct uncore_event_desc nhm_uncore_events[] = {
1558 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1559 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
1560 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
1561 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
1562 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
1563 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
1564 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1565 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
1566 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
1567 { /* end: all zeroes */ },
1568};
1569
1570static struct intel_uncore_ops nhm_uncore_msr_ops = {
1571 .disable_box = nhm_uncore_msr_disable_box,
1572 .enable_box = nhm_uncore_msr_enable_box,
1573 .disable_event = snb_uncore_msr_disable_event,
1574 .enable_event = nhm_uncore_msr_enable_event,
1575 .read_counter = uncore_msr_read_counter,
1576};
1577
1578static struct intel_uncore_type nhm_uncore = {
1579 .name = "",
1580 .num_counters = 8,
1581 .num_boxes = 1,
1582 .perf_ctr_bits = 48,
1583 .fixed_ctr_bits = 48,
1584 .event_ctl = NHM_UNC_PERFEVTSEL0,
1585 .perf_ctr = NHM_UNC_UNCORE_PMC0,
1586 .fixed_ctr = NHM_UNC_FIXED_CTR,
1587 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
1588 .event_mask = NHM_UNC_RAW_EVENT_MASK,
1589 .event_descs = nhm_uncore_events,
1590 .ops = &nhm_uncore_msr_ops,
1591 .format_group = &nhm_uncore_format_group,
1592};
1593
1594static struct intel_uncore_type *nhm_msr_uncores[] = {
1595 &nhm_uncore,
1596 NULL,
1597};
1598/* end of Nehalem uncore support */
1599
1600/* Nehalem-EX uncore support */
254298c7
YZ
1601DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
1602DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
254298c7
YZ
1603DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
1604DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
1605
1606static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
1607{
1608 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
1609}
1610
1611static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
1612{
1613 unsigned msr = uncore_msr_box_ctl(box);
1614 u64 config;
1615
1616 if (msr) {
1617 rdmsrl(msr, config);
1618 config &= ~((1ULL << uncore_num_counters(box)) - 1);
1619 /* WBox has a fixed counter */
1620 if (uncore_msr_fixed_ctl(box))
1621 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
1622 wrmsrl(msr, config);
1623 }
1624}
1625
1626static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
1627{
1628 unsigned msr = uncore_msr_box_ctl(box);
1629 u64 config;
1630
1631 if (msr) {
1632 rdmsrl(msr, config);
1633 config |= (1ULL << uncore_num_counters(box)) - 1;
1634 /* WBox has a fixed counter */
1635 if (uncore_msr_fixed_ctl(box))
1636 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
1637 wrmsrl(msr, config);
1638 }
1639}
1640
1641static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1642{
1643 wrmsrl(event->hw.config_base, 0);
1644}
1645
1646static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1647{
1648 struct hw_perf_event *hwc = &event->hw;
1649
1650 if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
1651 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
1652 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
1653 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1654 else
1655 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1656}
1657
1658#define NHMEX_UNCORE_OPS_COMMON_INIT() \
1659 .init_box = nhmex_uncore_msr_init_box, \
1660 .disable_box = nhmex_uncore_msr_disable_box, \
1661 .enable_box = nhmex_uncore_msr_enable_box, \
1662 .disable_event = nhmex_uncore_msr_disable_event, \
1663 .read_counter = uncore_msr_read_counter
1664
1665static struct intel_uncore_ops nhmex_uncore_ops = {
1666 NHMEX_UNCORE_OPS_COMMON_INIT(),
1667 .enable_event = nhmex_uncore_msr_enable_event,
1668};
1669
1670static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
1671 &format_attr_event.attr,
1672 &format_attr_edge.attr,
1673 NULL,
1674};
1675
1676static struct attribute_group nhmex_uncore_ubox_format_group = {
1677 .name = "format",
1678 .attrs = nhmex_uncore_ubox_formats_attr,
1679};
1680
1681static struct intel_uncore_type nhmex_uncore_ubox = {
1682 .name = "ubox",
1683 .num_counters = 1,
1684 .num_boxes = 1,
1685 .perf_ctr_bits = 48,
1686 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
1687 .perf_ctr = NHMEX_U_MSR_PMON_CTR,
1688 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
1689 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
1690 .ops = &nhmex_uncore_ops,
1691 .format_group = &nhmex_uncore_ubox_format_group
1692};
1693
1694static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
1695 &format_attr_event.attr,
1696 &format_attr_umask.attr,
1697 &format_attr_edge.attr,
1698 &format_attr_inv.attr,
1699 &format_attr_thresh8.attr,
1700 NULL,
1701};
1702
1703static struct attribute_group nhmex_uncore_cbox_format_group = {
1704 .name = "format",
1705 .attrs = nhmex_uncore_cbox_formats_attr,
1706};
1707
cb37af77
YZ
1708/* msr offset for each instance of cbox */
1709static unsigned nhmex_cbox_msr_offsets[] = {
1710 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
1711};
1712
254298c7
YZ
1713static struct intel_uncore_type nhmex_uncore_cbox = {
1714 .name = "cbox",
1715 .num_counters = 6,
cb37af77 1716 .num_boxes = 10,
254298c7
YZ
1717 .perf_ctr_bits = 48,
1718 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
1719 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
1720 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1721 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
cb37af77 1722 .msr_offsets = nhmex_cbox_msr_offsets,
254298c7
YZ
1723 .pair_ctr_ctl = 1,
1724 .ops = &nhmex_uncore_ops,
1725 .format_group = &nhmex_uncore_cbox_format_group
1726};
1727
1728static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
1729 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
1730 { /* end: all zeroes */ },
1731};
1732
1733static struct intel_uncore_type nhmex_uncore_wbox = {
1734 .name = "wbox",
1735 .num_counters = 4,
1736 .num_boxes = 1,
1737 .perf_ctr_bits = 48,
1738 .event_ctl = NHMEX_W_MSR_PMON_CNT0,
1739 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
1740 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
1741 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
1742 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1743 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
1744 .pair_ctr_ctl = 1,
1745 .event_descs = nhmex_uncore_wbox_events,
1746 .ops = &nhmex_uncore_ops,
1747 .format_group = &nhmex_uncore_cbox_format_group
1748};
1749
1750static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1751{
1752 struct hw_perf_event *hwc = &event->hw;
1753 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1754 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1755 int ctr, ev_sel;
1756
1757 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
1758 NHMEX_B_PMON_CTR_SHIFT;
1759 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
1760 NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
1761
1762 /* events that do not use the match/mask registers */
1763 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
1764 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
1765 return 0;
1766
1767 if (box->pmu->pmu_idx == 0)
1768 reg1->reg = NHMEX_B0_MSR_MATCH;
1769 else
1770 reg1->reg = NHMEX_B1_MSR_MATCH;
1771 reg1->idx = 0;
1772 reg1->config = event->attr.config1;
1773 reg2->config = event->attr.config2;
1774 return 0;
1775}
1776
1777static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1778{
1779 struct hw_perf_event *hwc = &event->hw;
1780 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1781 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1782
1783 if (reg1->idx != EXTRA_REG_NONE) {
1784 wrmsrl(reg1->reg, reg1->config);
1785 wrmsrl(reg1->reg + 1, reg2->config);
1786 }
1787 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1788 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
1789}
1790
1791/*
1792 * The Bbox has 4 counters, but each counter monitors different events.
1793 * Use bits 6-7 in the event config to select counter.
1794 */
1795static struct event_constraint nhmex_uncore_bbox_constraints[] = {
1796 EVENT_CONSTRAINT(0 , 1, 0xc0),
1797 EVENT_CONSTRAINT(0x40, 2, 0xc0),
1798 EVENT_CONSTRAINT(0x80, 4, 0xc0),
1799 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1800 EVENT_CONSTRAINT_END,
1801};
1802
1803static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1804 &format_attr_event5.attr,
1805 &format_attr_counter.attr,
1806 &format_attr_match.attr,
1807 &format_attr_mask.attr,
1808 NULL,
1809};
1810
1811static struct attribute_group nhmex_uncore_bbox_format_group = {
1812 .name = "format",
1813 .attrs = nhmex_uncore_bbox_formats_attr,
1814};
1815
1816static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1817 NHMEX_UNCORE_OPS_COMMON_INIT(),
1818 .enable_event = nhmex_bbox_msr_enable_event,
1819 .hw_config = nhmex_bbox_hw_config,
1820 .get_constraint = uncore_get_constraint,
1821 .put_constraint = uncore_put_constraint,
1822};
1823
1824static struct intel_uncore_type nhmex_uncore_bbox = {
1825 .name = "bbox",
1826 .num_counters = 4,
1827 .num_boxes = 2,
1828 .perf_ctr_bits = 48,
1829 .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
1830 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
1831 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
1832 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1833 .msr_offset = NHMEX_B_MSR_OFFSET,
1834 .pair_ctr_ctl = 1,
1835 .num_shared_regs = 1,
1836 .constraints = nhmex_uncore_bbox_constraints,
1837 .ops = &nhmex_uncore_bbox_ops,
1838 .format_group = &nhmex_uncore_bbox_format_group
1839};
1840
1841static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1842{
ebb6cc03
YZ
1843 struct hw_perf_event *hwc = &event->hw;
1844 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1845 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
254298c7 1846
ebb6cc03
YZ
1847 /* only TO_R_PROG_EV event uses the match/mask register */
1848 if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1849 NHMEX_S_EVENT_TO_R_PROG_EV)
1850 return 0;
254298c7
YZ
1851
1852 if (box->pmu->pmu_idx == 0)
1853 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1854 else
1855 reg1->reg = NHMEX_S1_MSR_MM_CFG;
254298c7 1856 reg1->idx = 0;
ebb6cc03
YZ
1857 reg1->config = event->attr.config1;
1858 reg2->config = event->attr.config2;
254298c7
YZ
1859 return 0;
1860}
1861
1862static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1863{
1864 struct hw_perf_event *hwc = &event->hw;
1865 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1866 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1867
ebb6cc03
YZ
1868 if (reg1->idx != EXTRA_REG_NONE) {
1869 wrmsrl(reg1->reg, 0);
254298c7
YZ
1870 wrmsrl(reg1->reg + 1, reg1->config);
1871 wrmsrl(reg1->reg + 2, reg2->config);
1872 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1873 }
1874 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1875}
1876
1877static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1878 &format_attr_event.attr,
1879 &format_attr_umask.attr,
1880 &format_attr_edge.attr,
1881 &format_attr_inv.attr,
1882 &format_attr_thresh8.attr,
254298c7
YZ
1883 &format_attr_match.attr,
1884 &format_attr_mask.attr,
1885 NULL,
1886};
1887
1888static struct attribute_group nhmex_uncore_sbox_format_group = {
1889 .name = "format",
1890 .attrs = nhmex_uncore_sbox_formats_attr,
1891};
1892
1893static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
1894 NHMEX_UNCORE_OPS_COMMON_INIT(),
1895 .enable_event = nhmex_sbox_msr_enable_event,
1896 .hw_config = nhmex_sbox_hw_config,
1897 .get_constraint = uncore_get_constraint,
1898 .put_constraint = uncore_put_constraint,
1899};
1900
1901static struct intel_uncore_type nhmex_uncore_sbox = {
1902 .name = "sbox",
1903 .num_counters = 4,
1904 .num_boxes = 2,
1905 .perf_ctr_bits = 48,
1906 .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
1907 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
1908 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1909 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
1910 .msr_offset = NHMEX_S_MSR_OFFSET,
1911 .pair_ctr_ctl = 1,
1912 .num_shared_regs = 1,
1913 .ops = &nhmex_uncore_sbox_ops,
1914 .format_group = &nhmex_uncore_sbox_format_group
1915};
1916
1917enum {
1918 EXTRA_REG_NHMEX_M_FILTER,
1919 EXTRA_REG_NHMEX_M_DSP,
1920 EXTRA_REG_NHMEX_M_ISS,
1921 EXTRA_REG_NHMEX_M_MAP,
1922 EXTRA_REG_NHMEX_M_MSC_THR,
1923 EXTRA_REG_NHMEX_M_PGT,
1924 EXTRA_REG_NHMEX_M_PLD,
1925 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
1926};
1927
1928static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1929 MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
1930 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
1931 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
1932 MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
1933 /* event 0xa uses two extra registers */
1934 MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
1935 MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
1936 MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
1937 /* events 0xd ~ 0x10 use the same extra register */
1938 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
1939 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
1940 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
1941 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
1942 MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
1943 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
1944 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
1945 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
1946 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
1947 EVENT_EXTRA_END
1948};
1949
cb37af77 1950/* Nehalem-EX or Westmere-EX ? */
46bdd905 1951static bool uncore_nhmex;
cb37af77 1952
254298c7
YZ
1953static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1954{
1955 struct intel_uncore_extra_reg *er;
1956 unsigned long flags;
1957 bool ret = false;
1958 u64 mask;
1959
1960 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1961 er = &box->shared_regs[idx];
1962 raw_spin_lock_irqsave(&er->lock, flags);
1963 if (!atomic_read(&er->ref) || er->config == config) {
1964 atomic_inc(&er->ref);
1965 er->config = config;
1966 ret = true;
1967 }
1968 raw_spin_unlock_irqrestore(&er->lock, flags);
1969
1970 return ret;
1971 }
1972 /*
1973 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
1974 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
1975 * fields which are shared.
1976 */
1977 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1978 if (WARN_ON_ONCE(idx >= 4))
1979 return false;
1980
1981 /* mask of the shared fields */
cb37af77
YZ
1982 if (uncore_nhmex)
1983 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
1984 else
1985 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
254298c7
YZ
1986 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1987
1988 raw_spin_lock_irqsave(&er->lock, flags);
1989 /* add mask of the non-shared field if it's in use */
cb37af77
YZ
1990 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
1991 if (uncore_nhmex)
1992 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1993 else
1994 mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1995 }
254298c7
YZ
1996
1997 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
1998 atomic_add(1 << (idx * 8), &er->ref);
cb37af77
YZ
1999 if (uncore_nhmex)
2000 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2001 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2002 else
2003 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2004 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
2005 er->config &= ~mask;
2006 er->config |= (config & mask);
2007 ret = true;
2008 }
2009 raw_spin_unlock_irqrestore(&er->lock, flags);
2010
2011 return ret;
2012}
2013
2014static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2015{
2016 struct intel_uncore_extra_reg *er;
2017
2018 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2019 er = &box->shared_regs[idx];
2020 atomic_dec(&er->ref);
2021 return;
2022 }
2023
2024 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2025 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2026 atomic_sub(1 << (idx * 8), &er->ref);
2027}
2028
46bdd905 2029static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
254298c7
YZ
2030{
2031 struct hw_perf_event *hwc = &event->hw;
2032 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
13acac30 2033 u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
254298c7
YZ
2034 u64 config = reg1->config;
2035
2036 /* get the non-shared control bits and shift them */
2037 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
cb37af77
YZ
2038 if (uncore_nhmex)
2039 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2040 else
2041 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
2042 if (new_idx > orig_idx) {
2043 idx = new_idx - orig_idx;
2044 config <<= 3 * idx;
2045 } else {
2046 idx = orig_idx - new_idx;
2047 config >>= 3 * idx;
2048 }
2049
2050 /* add the shared control bits back */
cb37af77
YZ
2051 if (uncore_nhmex)
2052 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2053 else
2054 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
254298c7
YZ
2055 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2056 if (modify) {
2057 /* adjust the main event selector */
2058 if (new_idx > orig_idx)
2059 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2060 else
2061 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2062 reg1->config = config;
2063 reg1->idx = ~0xff | new_idx;
2064 }
2065 return config;
2066}
2067
2068static struct event_constraint *
2069nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2070{
2071 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2072 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2073 int i, idx[2], alloc = 0;
2074 u64 config1 = reg1->config;
2075
2076 idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2077 idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2078again:
2079 for (i = 0; i < 2; i++) {
2080 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2081 idx[i] = 0xff;
2082
2083 if (idx[i] == 0xff)
2084 continue;
2085
2086 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2087 __BITS_VALUE(config1, i, 32)))
2088 goto fail;
2089 alloc |= (0x1 << i);
2090 }
2091
2092 /* for the match/mask registers */
ebb6cc03
YZ
2093 if (reg2->idx != EXTRA_REG_NONE &&
2094 (uncore_box_is_fake(box) || !reg2->alloc) &&
254298c7
YZ
2095 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2096 goto fail;
2097
2098 /*
2099 * If it's a fake box -- as per validate_{group,event}() we
2100 * shouldn't touch event state and we can avoid doing so
2101 * since both will only call get_event_constraints() once
2102 * on each event, this avoids the need for reg->alloc.
2103 */
2104 if (!uncore_box_is_fake(box)) {
2105 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2106 nhmex_mbox_alter_er(event, idx[0], true);
2107 reg1->alloc |= alloc;
ebb6cc03
YZ
2108 if (reg2->idx != EXTRA_REG_NONE)
2109 reg2->alloc = 1;
254298c7
YZ
2110 }
2111 return NULL;
2112fail:
2113 if (idx[0] != 0xff && !(alloc & 0x1) &&
2114 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2115 /*
2116 * events 0xd ~ 0x10 are functional identical, but are
2117 * controlled by different fields in the ZDP_CTL_FVC
2118 * register. If we failed to take one field, try the
2119 * rest 3 choices.
7c94ee2e 2120 */
254298c7
YZ
2121 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2122 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2123 idx[0] = (idx[0] + 1) % 4;
2124 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2125 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2126 config1 = nhmex_mbox_alter_er(event, idx[0], false);
2127 goto again;
7c94ee2e 2128 }
254298c7 2129 }
7c94ee2e 2130
254298c7
YZ
2131 if (alloc & 0x1)
2132 nhmex_mbox_put_shared_reg(box, idx[0]);
2133 if (alloc & 0x2)
2134 nhmex_mbox_put_shared_reg(box, idx[1]);
2135 return &constraint_empty;
2136}
fcde10e9 2137
254298c7 2138static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2139{
254298c7
YZ
2140 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2141 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
fcde10e9 2142
254298c7
YZ
2143 if (uncore_box_is_fake(box))
2144 return;
2145
2146 if (reg1->alloc & 0x1)
2147 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2148 if (reg1->alloc & 0x2)
2149 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2150 reg1->alloc = 0;
2151
2152 if (reg2->alloc) {
2153 nhmex_mbox_put_shared_reg(box, reg2->idx);
2154 reg2->alloc = 0;
2155 }
fcde10e9
YZ
2156}
2157
254298c7 2158static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
fcde10e9 2159{
254298c7
YZ
2160 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2161 return er->idx;
2162 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
fcde10e9
YZ
2163}
2164
254298c7 2165static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2166{
254298c7
YZ
2167 struct intel_uncore_type *type = box->pmu->type;
2168 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2169 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2170 struct extra_reg *er;
2171 unsigned msr;
2172 int reg_idx = 0;
254298c7
YZ
2173 /*
2174 * The mbox events may require 2 extra MSRs at the most. But only
2175 * the lower 32 bits in these MSRs are significant, so we can use
2176 * config1 to pass two MSRs' config.
2177 */
2178 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2179 if (er->event != (event->hw.config & er->config_mask))
2180 continue;
2181 if (event->attr.config1 & ~er->valid_mask)
2182 return -EINVAL;
254298c7
YZ
2183
2184 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2185 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2186 return -EINVAL;
2187
2188 /* always use the 32~63 bits to pass the PLD config */
2189 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2190 reg_idx = 1;
ebb6cc03
YZ
2191 else if (WARN_ON_ONCE(reg_idx > 0))
2192 return -EINVAL;
254298c7
YZ
2193
2194 reg1->idx &= ~(0xff << (reg_idx * 8));
2195 reg1->reg &= ~(0xffff << (reg_idx * 16));
2196 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2197 reg1->reg |= msr << (reg_idx * 16);
2198 reg1->config = event->attr.config1;
2199 reg_idx++;
2200 }
ebb6cc03
YZ
2201 /*
2202 * The mbox only provides ability to perform address matching
2203 * for the PLD events.
2204 */
2205 if (reg_idx == 2) {
2206 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2207 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2208 reg2->config = event->attr.config2;
2209 else
2210 reg2->config = ~0ULL;
2211 if (box->pmu->pmu_idx == 0)
2212 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2213 else
2214 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2215 }
254298c7 2216 return 0;
fcde10e9
YZ
2217}
2218
254298c7 2219static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
fcde10e9 2220{
254298c7
YZ
2221 struct intel_uncore_extra_reg *er;
2222 unsigned long flags;
2223 u64 config;
2224
2225 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2226 return box->shared_regs[idx].config;
2227
2228 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2229 raw_spin_lock_irqsave(&er->lock, flags);
2230 config = er->config;
2231 raw_spin_unlock_irqrestore(&er->lock, flags);
2232 return config;
2233}
2234
2235static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2236{
2237 struct hw_perf_event *hwc = &event->hw;
2238 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2239 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2240 int idx;
2241
2242 idx = __BITS_VALUE(reg1->idx, 0, 8);
2243 if (idx != 0xff)
2244 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2245 nhmex_mbox_shared_reg_config(box, idx));
2246 idx = __BITS_VALUE(reg1->idx, 1, 8);
2247 if (idx != 0xff)
2248 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2249 nhmex_mbox_shared_reg_config(box, idx));
2250
ebb6cc03
YZ
2251 if (reg2->idx != EXTRA_REG_NONE) {
2252 wrmsrl(reg2->reg, 0);
2253 if (reg2->config != ~0ULL) {
2254 wrmsrl(reg2->reg + 1,
2255 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2256 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2257 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2258 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2259 }
fcde10e9 2260 }
254298c7
YZ
2261
2262 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
fcde10e9
YZ
2263}
2264
ebb6cc03
YZ
2265DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
2266DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
2267DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
2268DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
2269DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
2270DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
2271DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
2272DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
2273DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
2274DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
2275DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
2276DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
2277DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
2278DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
2279DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
2280DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
254298c7
YZ
2281
2282static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2283 &format_attr_count_mode.attr,
2284 &format_attr_storage_mode.attr,
2285 &format_attr_wrap_mode.attr,
2286 &format_attr_flag_mode.attr,
2287 &format_attr_inc_sel.attr,
2288 &format_attr_set_flag_sel.attr,
ebb6cc03 2289 &format_attr_filter_cfg_en.attr,
254298c7
YZ
2290 &format_attr_filter_match.attr,
2291 &format_attr_filter_mask.attr,
2292 &format_attr_dsp.attr,
2293 &format_attr_thr.attr,
2294 &format_attr_fvc.attr,
2295 &format_attr_pgt.attr,
2296 &format_attr_map.attr,
2297 &format_attr_iss.attr,
2298 &format_attr_pld.attr,
fcde10e9
YZ
2299 NULL,
2300};
2301
254298c7
YZ
2302static struct attribute_group nhmex_uncore_mbox_format_group = {
2303 .name = "format",
2304 .attrs = nhmex_uncore_mbox_formats_attr,
fcde10e9
YZ
2305};
2306
254298c7
YZ
2307static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2308 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2309 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2310 { /* end: all zeroes */ },
fcde10e9
YZ
2311};
2312
cb37af77
YZ
2313static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2314 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2315 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2316 { /* end: all zeroes */ },
2317};
2318
254298c7
YZ
2319static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2320 NHMEX_UNCORE_OPS_COMMON_INIT(),
2321 .enable_event = nhmex_mbox_msr_enable_event,
2322 .hw_config = nhmex_mbox_hw_config,
2323 .get_constraint = nhmex_mbox_get_constraint,
2324 .put_constraint = nhmex_mbox_put_constraint,
fcde10e9
YZ
2325};
2326
254298c7
YZ
2327static struct intel_uncore_type nhmex_uncore_mbox = {
2328 .name = "mbox",
2329 .num_counters = 6,
2330 .num_boxes = 2,
2331 .perf_ctr_bits = 48,
2332 .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
2333 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
2334 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
2335 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
2336 .msr_offset = NHMEX_M_MSR_OFFSET,
2337 .pair_ctr_ctl = 1,
2338 .num_shared_regs = 8,
2339 .event_descs = nhmex_uncore_mbox_events,
2340 .ops = &nhmex_uncore_mbox_ops,
2341 .format_group = &nhmex_uncore_mbox_format_group,
fcde10e9
YZ
2342};
2343
46bdd905 2344static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
254298c7
YZ
2345{
2346 struct hw_perf_event *hwc = &event->hw;
2347 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
fcde10e9 2348
ebb6cc03 2349 /* adjust the main event selector and extra register index */
254298c7
YZ
2350 if (reg1->idx % 2) {
2351 reg1->idx--;
2352 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2353 } else {
2354 reg1->idx++;
2355 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2356 }
2357
ebb6cc03 2358 /* adjust extra register config */
254298c7 2359 switch (reg1->idx % 6) {
254298c7 2360 case 2:
ebb6cc03 2361 /* shift the 8~15 bits to the 0~7 bits */
254298c7
YZ
2362 reg1->config >>= 8;
2363 break;
2364 case 3:
ebb6cc03 2365 /* shift the 0~7 bits to the 8~15 bits */
254298c7
YZ
2366 reg1->config <<= 8;
2367 break;
254298c7
YZ
2368 };
2369}
2370
2371/*
2372 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2373 * An event set consists of 6 events, the 3rd and 4th events in
2374 * an event set use the same extra register. So an event set uses
2375 * 5 extra registers.
2376 */
2377static struct event_constraint *
2378nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2379{
254298c7
YZ
2380 struct hw_perf_event *hwc = &event->hw;
2381 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2382 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2383 struct intel_uncore_extra_reg *er;
2384 unsigned long flags;
2385 int idx, er_idx;
2386 u64 config1;
2387 bool ok = false;
2388
2389 if (!uncore_box_is_fake(box) && reg1->alloc)
2390 return NULL;
2391
2392 idx = reg1->idx % 6;
2393 config1 = reg1->config;
2394again:
2395 er_idx = idx;
2396 /* the 3rd and 4th events use the same extra register */
2397 if (er_idx > 2)
2398 er_idx--;
2399 er_idx += (reg1->idx / 6) * 5;
2400
2401 er = &box->shared_regs[er_idx];
2402 raw_spin_lock_irqsave(&er->lock, flags);
2403 if (idx < 2) {
2404 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2405 atomic_inc(&er->ref);
2406 er->config = reg1->config;
2407 ok = true;
2408 }
2409 } else if (idx == 2 || idx == 3) {
2410 /*
2411 * these two events use different fields in a extra register,
2412 * the 0~7 bits and the 8~15 bits respectively.
2413 */
2414 u64 mask = 0xff << ((idx - 2) * 8);
2415 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2416 !((er->config ^ config1) & mask)) {
2417 atomic_add(1 << ((idx - 2) * 8), &er->ref);
2418 er->config &= ~mask;
2419 er->config |= config1 & mask;
2420 ok = true;
2421 }
2422 } else {
2423 if (!atomic_read(&er->ref) ||
2424 (er->config == (hwc->config >> 32) &&
2425 er->config1 == reg1->config &&
2426 er->config2 == reg2->config)) {
2427 atomic_inc(&er->ref);
2428 er->config = (hwc->config >> 32);
2429 er->config1 = reg1->config;
2430 er->config2 = reg2->config;
2431 ok = true;
2432 }
2433 }
2434 raw_spin_unlock_irqrestore(&er->lock, flags);
2435
2436 if (!ok) {
2437 /*
2438 * The Rbox events are always in pairs. The paired
2439 * events are functional identical, but use different
2440 * extra registers. If we failed to take an extra
2441 * register, try the alternative.
2442 */
2443 if (idx % 2)
2444 idx--;
2445 else
2446 idx++;
2447 if (idx != reg1->idx % 6) {
2448 if (idx == 2)
2449 config1 >>= 8;
2450 else if (idx == 3)
2451 config1 <<= 8;
2452 goto again;
2453 }
2454 } else {
2455 if (!uncore_box_is_fake(box)) {
2456 if (idx != reg1->idx % 6)
2457 nhmex_rbox_alter_er(box, event);
2458 reg1->alloc = 1;
2459 }
2460 return NULL;
2461 }
2462 return &constraint_empty;
fcde10e9
YZ
2463}
2464
254298c7 2465static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2466{
254298c7
YZ
2467 struct intel_uncore_extra_reg *er;
2468 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2469 int idx, er_idx;
2470
2471 if (uncore_box_is_fake(box) || !reg1->alloc)
2472 return;
2473
2474 idx = reg1->idx % 6;
2475 er_idx = idx;
2476 if (er_idx > 2)
2477 er_idx--;
2478 er_idx += (reg1->idx / 6) * 5;
2479
2480 er = &box->shared_regs[er_idx];
2481 if (idx == 2 || idx == 3)
2482 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2483 else
2484 atomic_dec(&er->ref);
2485
2486 reg1->alloc = 0;
fcde10e9
YZ
2487}
2488
254298c7 2489static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9
YZ
2490{
2491 struct hw_perf_event *hwc = &event->hw;
254298c7
YZ
2492 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2493 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
ebb6cc03 2494 int idx;
fcde10e9 2495
254298c7
YZ
2496 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
2497 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2498 if (idx >= 0x18)
2499 return -EINVAL;
2500
2501 reg1->idx = idx;
2502 reg1->config = event->attr.config1;
2503
ebb6cc03 2504 switch (idx % 6) {
254298c7
YZ
2505 case 4:
2506 case 5:
254298c7 2507 hwc->config |= event->attr.config & (~0ULL << 32);
ebb6cc03 2508 reg2->config = event->attr.config2;
254298c7
YZ
2509 break;
2510 };
2511 return 0;
fcde10e9
YZ
2512}
2513
254298c7
YZ
2514static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2515{
2516 struct hw_perf_event *hwc = &event->hw;
2517 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2518 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
ebb6cc03 2519 int idx, port;
254298c7 2520
ebb6cc03
YZ
2521 idx = reg1->idx;
2522 port = idx / 6 + box->pmu->pmu_idx * 4;
254298c7 2523
ebb6cc03 2524 switch (idx % 6) {
254298c7 2525 case 0:
ebb6cc03
YZ
2526 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
2527 break;
254298c7 2528 case 1:
ebb6cc03 2529 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
254298c7
YZ
2530 break;
2531 case 2:
2532 case 3:
ebb6cc03 2533 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
46bdd905 2534 uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
254298c7
YZ
2535 break;
2536 case 4:
ebb6cc03
YZ
2537 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
2538 hwc->config >> 32);
2539 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
2540 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
2541 break;
254298c7 2542 case 5:
ebb6cc03
YZ
2543 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
2544 hwc->config >> 32);
2545 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
2546 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
254298c7
YZ
2547 break;
2548 };
2549
2550 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2551 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
2552}
2553
ebb6cc03
YZ
2554DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
2555DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
254298c7
YZ
2556DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
2557DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
2558DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
2559
2560static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
2561 &format_attr_event5.attr,
2562 &format_attr_xbr_mm_cfg.attr,
2563 &format_attr_xbr_match.attr,
2564 &format_attr_xbr_mask.attr,
2565 &format_attr_qlx_cfg.attr,
2566 &format_attr_iperf_cfg.attr,
fcde10e9
YZ
2567 NULL,
2568};
2569
254298c7 2570static struct attribute_group nhmex_uncore_rbox_format_group = {
fcde10e9 2571 .name = "format",
254298c7 2572 .attrs = nhmex_uncore_rbox_formats_attr,
fcde10e9
YZ
2573};
2574
254298c7
YZ
2575static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
2576 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
2577 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
2578 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
2579 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
2580 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
2581 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
fcde10e9
YZ
2582 { /* end: all zeroes */ },
2583};
2584
254298c7
YZ
2585static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
2586 NHMEX_UNCORE_OPS_COMMON_INIT(),
2587 .enable_event = nhmex_rbox_msr_enable_event,
2588 .hw_config = nhmex_rbox_hw_config,
2589 .get_constraint = nhmex_rbox_get_constraint,
2590 .put_constraint = nhmex_rbox_put_constraint,
fcde10e9
YZ
2591};
2592
254298c7
YZ
2593static struct intel_uncore_type nhmex_uncore_rbox = {
2594 .name = "rbox",
2595 .num_counters = 8,
2596 .num_boxes = 2,
2597 .perf_ctr_bits = 48,
2598 .event_ctl = NHMEX_R_MSR_PMON_CTL0,
2599 .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
2600 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
2601 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
2602 .msr_offset = NHMEX_R_MSR_OFFSET,
2603 .pair_ctr_ctl = 1,
2604 .num_shared_regs = 20,
2605 .event_descs = nhmex_uncore_rbox_events,
2606 .ops = &nhmex_uncore_rbox_ops,
2607 .format_group = &nhmex_uncore_rbox_format_group
fcde10e9
YZ
2608};
2609
254298c7
YZ
2610static struct intel_uncore_type *nhmex_msr_uncores[] = {
2611 &nhmex_uncore_ubox,
2612 &nhmex_uncore_cbox,
2613 &nhmex_uncore_bbox,
2614 &nhmex_uncore_sbox,
2615 &nhmex_uncore_mbox,
2616 &nhmex_uncore_rbox,
2617 &nhmex_uncore_wbox,
fcde10e9
YZ
2618 NULL,
2619};
254298c7 2620/* end of Nehalem-EX uncore support */
fcde10e9 2621
254298c7 2622static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
087bfbb0
YZ
2623{
2624 struct hw_perf_event *hwc = &event->hw;
2625
2626 hwc->idx = idx;
2627 hwc->last_tag = ++box->tags[idx];
2628
2629 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
14371cce
YZ
2630 hwc->event_base = uncore_fixed_ctr(box);
2631 hwc->config_base = uncore_fixed_ctl(box);
087bfbb0
YZ
2632 return;
2633 }
2634
14371cce
YZ
2635 hwc->config_base = uncore_event_ctl(box, hwc->idx);
2636 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
087bfbb0
YZ
2637}
2638
254298c7 2639static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0
YZ
2640{
2641 u64 prev_count, new_count, delta;
2642 int shift;
2643
2644 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
2645 shift = 64 - uncore_fixed_ctr_bits(box);
2646 else
2647 shift = 64 - uncore_perf_ctr_bits(box);
2648
2649 /* the hrtimer might modify the previous event value */
2650again:
2651 prev_count = local64_read(&event->hw.prev_count);
2652 new_count = uncore_read_counter(box, event);
2653 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
2654 goto again;
2655
2656 delta = (new_count << shift) - (prev_count << shift);
2657 delta >>= shift;
2658
2659 local64_add(delta, &event->count);
2660}
2661
2662/*
2663 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
2664 * for SandyBridge. So we use hrtimer to periodically poll the counter
2665 * to avoid overflow.
2666 */
2667static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
2668{
2669 struct intel_uncore_box *box;
2670 unsigned long flags;
2671 int bit;
2672
2673 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
2674 if (!box->n_active || box->cpu != smp_processor_id())
2675 return HRTIMER_NORESTART;
2676 /*
2677 * disable local interrupt to prevent uncore_pmu_event_start/stop
2678 * to interrupt the update process
2679 */
2680 local_irq_save(flags);
2681
2682 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
2683 uncore_perf_event_update(box, box->events[bit]);
2684
2685 local_irq_restore(flags);
2686
2687 hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
2688 return HRTIMER_RESTART;
2689}
2690
2691static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
2692{
2693 __hrtimer_start_range_ns(&box->hrtimer,
2694 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
2695 HRTIMER_MODE_REL_PINNED, 0);
2696}
2697
2698static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
2699{
2700 hrtimer_cancel(&box->hrtimer);
2701}
2702
2703static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2704{
2705 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2706 box->hrtimer.function = uncore_pmu_hrtimer;
2707}
2708
254298c7 2709struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
087bfbb0
YZ
2710{
2711 struct intel_uncore_box *box;
6a67943a 2712 int i, size;
087bfbb0 2713
254298c7 2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
6a67943a 2715
7bfb7e6b 2716 box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
087bfbb0
YZ
2717 if (!box)
2718 return NULL;
2719
6a67943a
YZ
2720 for (i = 0; i < type->num_shared_regs; i++)
2721 raw_spin_lock_init(&box->shared_regs[i].lock);
2722
087bfbb0
YZ
2723 uncore_pmu_init_hrtimer(box);
2724 atomic_set(&box->refcnt, 1);
2725 box->cpu = -1;
2726 box->phys_id = -1;
2727
2728 return box;
2729}
2730
2731static struct intel_uncore_box *
2732uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
2733{
402537fd 2734 struct intel_uncore_box *box;
14371cce
YZ
2735
2736 box = *per_cpu_ptr(pmu->box, cpu);
2737 if (box)
2738 return box;
2739
2740 raw_spin_lock(&uncore_box_lock);
2741 list_for_each_entry(box, &pmu->box_list, list) {
2742 if (box->phys_id == topology_physical_package_id(cpu)) {
2743 atomic_inc(&box->refcnt);
2744 *per_cpu_ptr(pmu->box, cpu) = box;
2745 break;
2746 }
2747 }
2748 raw_spin_unlock(&uncore_box_lock);
2749
087bfbb0
YZ
2750 return *per_cpu_ptr(pmu->box, cpu);
2751}
2752
2753static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
2754{
2755 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
2756}
2757
2758static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
2759{
2760 /*
2761 * perf core schedules event on the basis of cpu, uncore events are
2762 * collected by one of the cpus inside a physical package.
2763 */
254298c7 2764 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
087bfbb0
YZ
2765}
2766
254298c7
YZ
2767static int
2768uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
087bfbb0
YZ
2769{
2770 struct perf_event *event;
2771 int n, max_count;
2772
2773 max_count = box->pmu->type->num_counters;
2774 if (box->pmu->type->fixed_ctl)
2775 max_count++;
2776
2777 if (box->n_events >= max_count)
2778 return -EINVAL;
2779
2780 n = box->n_events;
2781 box->event_list[n] = leader;
2782 n++;
2783 if (!dogrp)
2784 return n;
2785
2786 list_for_each_entry(event, &leader->sibling_list, group_entry) {
2787 if (event->state <= PERF_EVENT_STATE_OFF)
2788 continue;
2789
2790 if (n >= max_count)
2791 return -EINVAL;
2792
2793 box->event_list[n] = event;
2794 n++;
2795 }
2796 return n;
2797}
2798
2799static struct event_constraint *
254298c7 2800uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0 2801{
6a67943a 2802 struct intel_uncore_type *type = box->pmu->type;
087bfbb0
YZ
2803 struct event_constraint *c;
2804
6a67943a
YZ
2805 if (type->ops->get_constraint) {
2806 c = type->ops->get_constraint(box, event);
2807 if (c)
2808 return c;
2809 }
2810
dbc33f70 2811 if (event->attr.config == UNCORE_FIXED_EVENT)
087bfbb0
YZ
2812 return &constraint_fixed;
2813
2814 if (type->constraints) {
2815 for_each_event_constraint(c, type->constraints) {
2816 if ((event->hw.config & c->cmask) == c->code)
2817 return c;
2818 }
2819 }
2820
2821 return &type->unconstrainted;
2822}
2823
254298c7 2824static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
2825{
2826 if (box->pmu->type->ops->put_constraint)
2827 box->pmu->type->ops->put_constraint(box, event);
2828}
2829
254298c7 2830static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
087bfbb0
YZ
2831{
2832 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
43b45780 2833 struct event_constraint *c;
6a67943a 2834 int i, wmin, wmax, ret = 0;
087bfbb0
YZ
2835 struct hw_perf_event *hwc;
2836
2837 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2838
2839 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
43b45780 2840 hwc = &box->event_list[i]->hw;
6a67943a 2841 c = uncore_get_event_constraint(box, box->event_list[i]);
43b45780 2842 hwc->constraint = c;
087bfbb0
YZ
2843 wmin = min(wmin, c->weight);
2844 wmax = max(wmax, c->weight);
2845 }
2846
2847 /* fastpath, try to reuse previous register */
2848 for (i = 0; i < n; i++) {
2849 hwc = &box->event_list[i]->hw;
43b45780 2850 c = hwc->constraint;
087bfbb0
YZ
2851
2852 /* never assigned */
2853 if (hwc->idx == -1)
2854 break;
2855
2856 /* constraint still honored */
2857 if (!test_bit(hwc->idx, c->idxmsk))
2858 break;
2859
2860 /* not already used */
2861 if (test_bit(hwc->idx, used_mask))
2862 break;
2863
2864 __set_bit(hwc->idx, used_mask);
6a67943a
YZ
2865 if (assign)
2866 assign[i] = hwc->idx;
087bfbb0 2867 }
087bfbb0 2868 /* slow path */
6a67943a 2869 if (i != n)
43b45780
AH
2870 ret = perf_assign_events(box->event_list, n,
2871 wmin, wmax, assign);
6a67943a
YZ
2872
2873 if (!assign || ret) {
2874 for (i = 0; i < n; i++)
2875 uncore_put_event_constraint(box, box->event_list[i]);
2876 }
087bfbb0
YZ
2877 return ret ? -EINVAL : 0;
2878}
2879
2880static void uncore_pmu_event_start(struct perf_event *event, int flags)
2881{
2882 struct intel_uncore_box *box = uncore_event_to_box(event);
2883 int idx = event->hw.idx;
2884
2885 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
2886 return;
2887
2888 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
2889 return;
2890
2891 event->hw.state = 0;
2892 box->events[idx] = event;
2893 box->n_active++;
2894 __set_bit(idx, box->active_mask);
2895
2896 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
2897 uncore_enable_event(box, event);
2898
2899 if (box->n_active == 1) {
2900 uncore_enable_box(box);
2901 uncore_pmu_start_hrtimer(box);
2902 }
2903}
2904
2905static void uncore_pmu_event_stop(struct perf_event *event, int flags)
2906{
2907 struct intel_uncore_box *box = uncore_event_to_box(event);
2908 struct hw_perf_event *hwc = &event->hw;
2909
2910 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
2911 uncore_disable_event(box, event);
2912 box->n_active--;
2913 box->events[hwc->idx] = NULL;
2914 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
2915 hwc->state |= PERF_HES_STOPPED;
2916
2917 if (box->n_active == 0) {
2918 uncore_disable_box(box);
2919 uncore_pmu_cancel_hrtimer(box);
2920 }
2921 }
2922
2923 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
2924 /*
2925 * Drain the remaining delta count out of a event
2926 * that we are disabling:
2927 */
2928 uncore_perf_event_update(box, event);
2929 hwc->state |= PERF_HES_UPTODATE;
2930 }
2931}
2932
2933static int uncore_pmu_event_add(struct perf_event *event, int flags)
2934{
2935 struct intel_uncore_box *box = uncore_event_to_box(event);
2936 struct hw_perf_event *hwc = &event->hw;
2937 int assign[UNCORE_PMC_IDX_MAX];
2938 int i, n, ret;
2939
2940 if (!box)
2941 return -ENODEV;
2942
2943 ret = n = uncore_collect_events(box, event, false);
2944 if (ret < 0)
2945 return ret;
2946
2947 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
2948 if (!(flags & PERF_EF_START))
2949 hwc->state |= PERF_HES_ARCH;
2950
2951 ret = uncore_assign_events(box, assign, n);
2952 if (ret)
2953 return ret;
2954
2955 /* save events moving to new counters */
2956 for (i = 0; i < box->n_events; i++) {
2957 event = box->event_list[i];
2958 hwc = &event->hw;
2959
2960 if (hwc->idx == assign[i] &&
2961 hwc->last_tag == box->tags[assign[i]])
2962 continue;
2963 /*
2964 * Ensure we don't accidentally enable a stopped
2965 * counter simply because we rescheduled.
2966 */
2967 if (hwc->state & PERF_HES_STOPPED)
2968 hwc->state |= PERF_HES_ARCH;
2969
2970 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2971 }
2972
2973 /* reprogram moved events into new counters */
2974 for (i = 0; i < n; i++) {
2975 event = box->event_list[i];
2976 hwc = &event->hw;
2977
2978 if (hwc->idx != assign[i] ||
2979 hwc->last_tag != box->tags[assign[i]])
2980 uncore_assign_hw_event(box, event, assign[i]);
2981 else if (i < box->n_events)
2982 continue;
2983
2984 if (hwc->state & PERF_HES_ARCH)
2985 continue;
2986
2987 uncore_pmu_event_start(event, 0);
2988 }
2989 box->n_events = n;
2990
2991 return 0;
2992}
2993
2994static void uncore_pmu_event_del(struct perf_event *event, int flags)
2995{
2996 struct intel_uncore_box *box = uncore_event_to_box(event);
2997 int i;
2998
2999 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3000
3001 for (i = 0; i < box->n_events; i++) {
3002 if (event == box->event_list[i]) {
6a67943a
YZ
3003 uncore_put_event_constraint(box, event);
3004
087bfbb0
YZ
3005 while (++i < box->n_events)
3006 box->event_list[i - 1] = box->event_list[i];
3007
3008 --box->n_events;
3009 break;
3010 }
3011 }
3012
3013 event->hw.idx = -1;
3014 event->hw.last_tag = ~0ULL;
3015}
3016
3017static void uncore_pmu_event_read(struct perf_event *event)
3018{
3019 struct intel_uncore_box *box = uncore_event_to_box(event);
3020 uncore_perf_event_update(box, event);
3021}
3022
3023/*
3024 * validation ensures the group can be loaded onto the
3025 * PMU if it was the only group available.
3026 */
3027static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3028 struct perf_event *event)
3029{
3030 struct perf_event *leader = event->group_leader;
3031 struct intel_uncore_box *fake_box;
087bfbb0
YZ
3032 int ret = -EINVAL, n;
3033
6a67943a 3034 fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
087bfbb0
YZ
3035 if (!fake_box)
3036 return -ENOMEM;
3037
3038 fake_box->pmu = pmu;
3039 /*
3040 * the event is not yet connected with its
3041 * siblings therefore we must first collect
3042 * existing siblings, then add the new event
3043 * before we can simulate the scheduling
3044 */
3045 n = uncore_collect_events(fake_box, leader, true);
3046 if (n < 0)
3047 goto out;
3048
3049 fake_box->n_events = n;
3050 n = uncore_collect_events(fake_box, event, false);
3051 if (n < 0)
3052 goto out;
3053
3054 fake_box->n_events = n;
3055
6a67943a 3056 ret = uncore_assign_events(fake_box, NULL, n);
087bfbb0
YZ
3057out:
3058 kfree(fake_box);
3059 return ret;
3060}
3061
46bdd905 3062static int uncore_pmu_event_init(struct perf_event *event)
087bfbb0
YZ
3063{
3064 struct intel_uncore_pmu *pmu;
3065 struct intel_uncore_box *box;
3066 struct hw_perf_event *hwc = &event->hw;
3067 int ret;
3068
3069 if (event->attr.type != event->pmu->type)
3070 return -ENOENT;
3071
3072 pmu = uncore_event_to_pmu(event);
3073 /* no device found for this pmu */
3074 if (pmu->func_id < 0)
3075 return -ENOENT;
3076
3077 /*
3078 * Uncore PMU does measure at all privilege level all the time.
3079 * So it doesn't make sense to specify any exclude bits.
3080 */
3081 if (event->attr.exclude_user || event->attr.exclude_kernel ||
3082 event->attr.exclude_hv || event->attr.exclude_idle)
3083 return -EINVAL;
3084
3085 /* Sampling not supported yet */
3086 if (hwc->sample_period)
3087 return -EINVAL;
3088
3089 /*
3090 * Place all uncore events for a particular physical package
3091 * onto a single cpu
3092 */
3093 if (event->cpu < 0)
3094 return -EINVAL;
3095 box = uncore_pmu_to_box(pmu, event->cpu);
3096 if (!box || box->cpu < 0)
3097 return -EINVAL;
3098 event->cpu = box->cpu;
3099
6a67943a
YZ
3100 event->hw.idx = -1;
3101 event->hw.last_tag = ~0ULL;
3102 event->hw.extra_reg.idx = EXTRA_REG_NONE;
ebb6cc03 3103 event->hw.branch_reg.idx = EXTRA_REG_NONE;
6a67943a 3104
087bfbb0
YZ
3105 if (event->attr.config == UNCORE_FIXED_EVENT) {
3106 /* no fixed counter */
3107 if (!pmu->type->fixed_ctl)
3108 return -EINVAL;
3109 /*
3110 * if there is only one fixed counter, only the first pmu
3111 * can access the fixed counter
3112 */
3113 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3114 return -EINVAL;
dbc33f70
SE
3115
3116 /* fixed counters have event field hardcoded to zero */
3117 hwc->config = 0ULL;
087bfbb0
YZ
3118 } else {
3119 hwc->config = event->attr.config & pmu->type->event_mask;
6a67943a
YZ
3120 if (pmu->type->ops->hw_config) {
3121 ret = pmu->type->ops->hw_config(box, event);
3122 if (ret)
3123 return ret;
3124 }
087bfbb0
YZ
3125 }
3126
087bfbb0
YZ
3127 if (event->group_leader != event)
3128 ret = uncore_validate_group(pmu, event);
3129 else
3130 ret = 0;
3131
3132 return ret;
3133}
3134
314d9f63
YZ
3135static ssize_t uncore_get_attr_cpumask(struct device *dev,
3136 struct device_attribute *attr, char *buf)
3137{
3138 int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3139
3140 buf[n++] = '\n';
3141 buf[n] = '\0';
3142 return n;
3143}
3144
3145static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3146
3147static struct attribute *uncore_pmu_attrs[] = {
3148 &dev_attr_cpumask.attr,
3149 NULL,
3150};
3151
3152static struct attribute_group uncore_pmu_attr_group = {
3153 .attrs = uncore_pmu_attrs,
3154};
3155
087bfbb0
YZ
3156static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3157{
3158 int ret;
3159
3160 pmu->pmu = (struct pmu) {
3161 .attr_groups = pmu->type->attr_groups,
3162 .task_ctx_nr = perf_invalid_context,
3163 .event_init = uncore_pmu_event_init,
3164 .add = uncore_pmu_event_add,
3165 .del = uncore_pmu_event_del,
3166 .start = uncore_pmu_event_start,
3167 .stop = uncore_pmu_event_stop,
3168 .read = uncore_pmu_event_read,
3169 };
3170
3171 if (pmu->type->num_boxes == 1) {
3172 if (strlen(pmu->type->name) > 0)
3173 sprintf(pmu->name, "uncore_%s", pmu->type->name);
3174 else
3175 sprintf(pmu->name, "uncore");
3176 } else {
3177 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3178 pmu->pmu_idx);
3179 }
3180
3181 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3182 return ret;
3183}
3184
3185static void __init uncore_type_exit(struct intel_uncore_type *type)
3186{
3187 int i;
3188
3189 for (i = 0; i < type->num_boxes; i++)
3190 free_percpu(type->pmus[i].box);
3191 kfree(type->pmus);
3192 type->pmus = NULL;
314d9f63
YZ
3193 kfree(type->events_group);
3194 type->events_group = NULL;
087bfbb0
YZ
3195}
3196
cffa59ba 3197static void __init uncore_types_exit(struct intel_uncore_type **types)
14371cce
YZ
3198{
3199 int i;
3200 for (i = 0; types[i]; i++)
3201 uncore_type_exit(types[i]);
3202}
3203
087bfbb0
YZ
3204static int __init uncore_type_init(struct intel_uncore_type *type)
3205{
3206 struct intel_uncore_pmu *pmus;
1b0dac2a 3207 struct attribute_group *attr_group;
087bfbb0
YZ
3208 struct attribute **attrs;
3209 int i, j;
3210
3211 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3212 if (!pmus)
3213 return -ENOMEM;
3214
3215 type->unconstrainted = (struct event_constraint)
3216 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
9fac2cf3 3217 0, type->num_counters, 0, 0);
087bfbb0
YZ
3218
3219 for (i = 0; i < type->num_boxes; i++) {
3220 pmus[i].func_id = -1;
3221 pmus[i].pmu_idx = i;
3222 pmus[i].type = type;
14371cce 3223 INIT_LIST_HEAD(&pmus[i].box_list);
087bfbb0
YZ
3224 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3225 if (!pmus[i].box)
3226 goto fail;
3227 }
3228
3229 if (type->event_descs) {
3230 i = 0;
3231 while (type->event_descs[i].attr.attr.name)
3232 i++;
3233
1b0dac2a
JSM
3234 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3235 sizeof(*attr_group), GFP_KERNEL);
3236 if (!attr_group)
087bfbb0
YZ
3237 goto fail;
3238
1b0dac2a
JSM
3239 attrs = (struct attribute **)(attr_group + 1);
3240 attr_group->name = "events";
3241 attr_group->attrs = attrs;
087bfbb0
YZ
3242
3243 for (j = 0; j < i; j++)
3244 attrs[j] = &type->event_descs[j].attr.attr;
3245
1b0dac2a 3246 type->events_group = attr_group;
087bfbb0
YZ
3247 }
3248
314d9f63 3249 type->pmu_group = &uncore_pmu_attr_group;
087bfbb0
YZ
3250 type->pmus = pmus;
3251 return 0;
3252fail:
3253 uncore_type_exit(type);
3254 return -ENOMEM;
3255}
3256
3257static int __init uncore_types_init(struct intel_uncore_type **types)
3258{
3259 int i, ret;
3260
3261 for (i = 0; types[i]; i++) {
3262 ret = uncore_type_init(types[i]);
3263 if (ret)
3264 goto fail;
3265 }
3266 return 0;
3267fail:
3268 while (--i >= 0)
3269 uncore_type_exit(types[i]);
3270 return ret;
3271}
3272
14371cce
YZ
3273static struct pci_driver *uncore_pci_driver;
3274static bool pcidrv_registered;
3275
3276/*
3277 * add a pci uncore device
3278 */
899396cf 3279static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
14371cce
YZ
3280{
3281 struct intel_uncore_pmu *pmu;
3282 struct intel_uncore_box *box;
899396cf
YZ
3283 struct intel_uncore_type *type;
3284 int phys_id;
14371cce
YZ
3285
3286 phys_id = pcibus_to_physid[pdev->bus->number];
3287 if (phys_id < 0)
3288 return -ENODEV;
3289
899396cf
YZ
3290 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3291 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3292 pci_set_drvdata(pdev, NULL);
3293 return 0;
3294 }
3295
3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
6a67943a 3297 box = uncore_alloc_box(type, 0);
14371cce
YZ
3298 if (!box)
3299 return -ENOMEM;
3300
3301 /*
3302 * for performance monitoring unit with multiple boxes,
3303 * each box has a different function id.
3304 */
899396cf
YZ
3305 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3306 if (pmu->func_id < 0)
3307 pmu->func_id = pdev->devfn;
3308 else
3309 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
14371cce
YZ
3310
3311 box->phys_id = phys_id;
3312 box->pci_dev = pdev;
3313 box->pmu = pmu;
3314 uncore_box_init(box);
3315 pci_set_drvdata(pdev, box);
3316
3317 raw_spin_lock(&uncore_box_lock);
3318 list_add_tail(&box->list, &pmu->box_list);
3319 raw_spin_unlock(&uncore_box_lock);
3320
3321 return 0;
3322}
3323
357398e9 3324static void uncore_pci_remove(struct pci_dev *pdev)
14371cce
YZ
3325{
3326 struct intel_uncore_box *box = pci_get_drvdata(pdev);
899396cf
YZ
3327 struct intel_uncore_pmu *pmu;
3328 int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3329
3330 box = pci_get_drvdata(pdev);
3331 if (!box) {
3332 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3333 if (extra_pci_dev[phys_id][i] == pdev) {
3334 extra_pci_dev[phys_id][i] = NULL;
3335 break;
3336 }
3337 }
3338 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3339 return;
3340 }
14371cce 3341
899396cf 3342 pmu = box->pmu;
14371cce
YZ
3343 if (WARN_ON_ONCE(phys_id != box->phys_id))
3344 return;
3345
e850f9c3
YZ
3346 pci_set_drvdata(pdev, NULL);
3347
14371cce
YZ
3348 raw_spin_lock(&uncore_box_lock);
3349 list_del(&box->list);
3350 raw_spin_unlock(&uncore_box_lock);
3351
3352 for_each_possible_cpu(cpu) {
3353 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3354 *per_cpu_ptr(pmu->box, cpu) = NULL;
3355 atomic_dec(&box->refcnt);
3356 }
3357 }
3358
3359 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3360 kfree(box);
3361}
3362
14371cce
YZ
3363static int __init uncore_pci_init(void)
3364{
3365 int ret;
3366
3367 switch (boot_cpu_data.x86_model) {
7c94ee2e 3368 case 45: /* Sandy Bridge-EP */
e850f9c3 3369 ret = snbep_pci2phy_map_init(0x3ce0);
032c3851
YZ
3370 if (ret)
3371 return ret;
7c94ee2e
YZ
3372 pci_uncores = snbep_pci_uncores;
3373 uncore_pci_driver = &snbep_uncore_pci_driver;
7c94ee2e 3374 break;
e850f9c3
YZ
3375 case 62: /* IvyTown */
3376 ret = snbep_pci2phy_map_init(0x0e1e);
3377 if (ret)
3378 return ret;
3379 pci_uncores = ivt_pci_uncores;
3380 uncore_pci_driver = &ivt_uncore_pci_driver;
3381 break;
14371cce
YZ
3382 default:
3383 return 0;
3384 }
3385
3386 ret = uncore_types_init(pci_uncores);
3387 if (ret)
3388 return ret;
3389
3390 uncore_pci_driver->probe = uncore_pci_probe;
3391 uncore_pci_driver->remove = uncore_pci_remove;
3392
3393 ret = pci_register_driver(uncore_pci_driver);
3394 if (ret == 0)
3395 pcidrv_registered = true;
3396 else
3397 uncore_types_exit(pci_uncores);
3398
3399 return ret;
3400}
3401
3402static void __init uncore_pci_exit(void)
3403{
3404 if (pcidrv_registered) {
3405 pcidrv_registered = false;
3406 pci_unregister_driver(uncore_pci_driver);
3407 uncore_types_exit(pci_uncores);
3408 }
3409}
3410
22cc4ccf
YZ
3411/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3412static LIST_HEAD(boxes_to_free);
3413
148f9bb8 3414static void uncore_kfree_boxes(void)
22cc4ccf
YZ
3415{
3416 struct intel_uncore_box *box;
3417
3418 while (!list_empty(&boxes_to_free)) {
3419 box = list_entry(boxes_to_free.next,
3420 struct intel_uncore_box, list);
3421 list_del(&box->list);
3422 kfree(box);
3423 }
3424}
3425
148f9bb8 3426static void uncore_cpu_dying(int cpu)
087bfbb0
YZ
3427{
3428 struct intel_uncore_type *type;
3429 struct intel_uncore_pmu *pmu;
3430 struct intel_uncore_box *box;
3431 int i, j;
3432
3433 for (i = 0; msr_uncores[i]; i++) {
3434 type = msr_uncores[i];
3435 for (j = 0; j < type->num_boxes; j++) {
3436 pmu = &type->pmus[j];
3437 box = *per_cpu_ptr(pmu->box, cpu);
3438 *per_cpu_ptr(pmu->box, cpu) = NULL;
3439 if (box && atomic_dec_and_test(&box->refcnt))
22cc4ccf 3440 list_add(&box->list, &boxes_to_free);
087bfbb0
YZ
3441 }
3442 }
3443}
3444
148f9bb8 3445static int uncore_cpu_starting(int cpu)
087bfbb0
YZ
3446{
3447 struct intel_uncore_type *type;
3448 struct intel_uncore_pmu *pmu;
3449 struct intel_uncore_box *box, *exist;
3450 int i, j, k, phys_id;
3451
3452 phys_id = topology_physical_package_id(cpu);
3453
3454 for (i = 0; msr_uncores[i]; i++) {
3455 type = msr_uncores[i];
3456 for (j = 0; j < type->num_boxes; j++) {
3457 pmu = &type->pmus[j];
3458 box = *per_cpu_ptr(pmu->box, cpu);
3459 /* called by uncore_cpu_init? */
3460 if (box && box->phys_id >= 0) {
3461 uncore_box_init(box);
3462 continue;
3463 }
3464
3465 for_each_online_cpu(k) {
3466 exist = *per_cpu_ptr(pmu->box, k);
3467 if (exist && exist->phys_id == phys_id) {
3468 atomic_inc(&exist->refcnt);
3469 *per_cpu_ptr(pmu->box, cpu) = exist;
22cc4ccf
YZ
3470 if (box) {
3471 list_add(&box->list,
3472 &boxes_to_free);
3473 box = NULL;
3474 }
087bfbb0
YZ
3475 break;
3476 }
3477 }
3478
3479 if (box) {
3480 box->phys_id = phys_id;
3481 uncore_box_init(box);
3482 }
3483 }
3484 }
3485 return 0;
3486}
3487
148f9bb8 3488static int uncore_cpu_prepare(int cpu, int phys_id)
087bfbb0
YZ
3489{
3490 struct intel_uncore_type *type;
3491 struct intel_uncore_pmu *pmu;
3492 struct intel_uncore_box *box;
3493 int i, j;
3494
3495 for (i = 0; msr_uncores[i]; i++) {
3496 type = msr_uncores[i];
3497 for (j = 0; j < type->num_boxes; j++) {
3498 pmu = &type->pmus[j];
3499 if (pmu->func_id < 0)
3500 pmu->func_id = j;
3501
6a67943a 3502 box = uncore_alloc_box(type, cpu);
087bfbb0
YZ
3503 if (!box)
3504 return -ENOMEM;
3505
3506 box->pmu = pmu;
3507 box->phys_id = phys_id;
3508 *per_cpu_ptr(pmu->box, cpu) = box;
3509 }
3510 }
3511 return 0;
3512}
3513
148f9bb8 3514static void
254298c7 3515uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
087bfbb0
YZ
3516{
3517 struct intel_uncore_type *type;
3518 struct intel_uncore_pmu *pmu;
3519 struct intel_uncore_box *box;
3520 int i, j;
3521
3522 for (i = 0; uncores[i]; i++) {
3523 type = uncores[i];
3524 for (j = 0; j < type->num_boxes; j++) {
3525 pmu = &type->pmus[j];
3526 if (old_cpu < 0)
3527 box = uncore_pmu_to_box(pmu, new_cpu);
3528 else
3529 box = uncore_pmu_to_box(pmu, old_cpu);
3530 if (!box)
3531 continue;
3532
3533 if (old_cpu < 0) {
3534 WARN_ON_ONCE(box->cpu != -1);
3535 box->cpu = new_cpu;
3536 continue;
3537 }
3538
3539 WARN_ON_ONCE(box->cpu != old_cpu);
3540 if (new_cpu >= 0) {
3541 uncore_pmu_cancel_hrtimer(box);
3542 perf_pmu_migrate_context(&pmu->pmu,
3543 old_cpu, new_cpu);
3544 box->cpu = new_cpu;
3545 } else {
3546 box->cpu = -1;
3547 }
3548 }
3549 }
3550}
3551
148f9bb8 3552static void uncore_event_exit_cpu(int cpu)
087bfbb0
YZ
3553{
3554 int i, phys_id, target;
3555
3556 /* if exiting cpu is used for collecting uncore events */
3557 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
3558 return;
3559
3560 /* find a new cpu to collect uncore events */
3561 phys_id = topology_physical_package_id(cpu);
3562 target = -1;
3563 for_each_online_cpu(i) {
3564 if (i == cpu)
3565 continue;
3566 if (phys_id == topology_physical_package_id(i)) {
3567 target = i;
3568 break;
3569 }
3570 }
3571
3572 /* migrate uncore events to the new cpu */
3573 if (target >= 0)
3574 cpumask_set_cpu(target, &uncore_cpu_mask);
3575
3576 uncore_change_context(msr_uncores, cpu, target);
14371cce 3577 uncore_change_context(pci_uncores, cpu, target);
087bfbb0
YZ
3578}
3579
148f9bb8 3580static void uncore_event_init_cpu(int cpu)
087bfbb0
YZ
3581{
3582 int i, phys_id;
3583
3584 phys_id = topology_physical_package_id(cpu);
3585 for_each_cpu(i, &uncore_cpu_mask) {
3586 if (phys_id == topology_physical_package_id(i))
3587 return;
3588 }
3589
3590 cpumask_set_cpu(cpu, &uncore_cpu_mask);
3591
3592 uncore_change_context(msr_uncores, -1, cpu);
14371cce 3593 uncore_change_context(pci_uncores, -1, cpu);
087bfbb0
YZ
3594}
3595
148f9bb8
PG
3596static int uncore_cpu_notifier(struct notifier_block *self,
3597 unsigned long action, void *hcpu)
087bfbb0
YZ
3598{
3599 unsigned int cpu = (long)hcpu;
3600
3601 /* allocate/free data structure for uncore box */
3602 switch (action & ~CPU_TASKS_FROZEN) {
3603 case CPU_UP_PREPARE:
3604 uncore_cpu_prepare(cpu, -1);
3605 break;
3606 case CPU_STARTING:
3607 uncore_cpu_starting(cpu);
3608 break;
3609 case CPU_UP_CANCELED:
3610 case CPU_DYING:
3611 uncore_cpu_dying(cpu);
3612 break;
22cc4ccf
YZ
3613 case CPU_ONLINE:
3614 case CPU_DEAD:
3615 uncore_kfree_boxes();
3616 break;
087bfbb0
YZ
3617 default:
3618 break;
3619 }
3620
3621 /* select the cpu that collects uncore events */
3622 switch (action & ~CPU_TASKS_FROZEN) {
3623 case CPU_DOWN_FAILED:
3624 case CPU_STARTING:
3625 uncore_event_init_cpu(cpu);
3626 break;
3627 case CPU_DOWN_PREPARE:
3628 uncore_event_exit_cpu(cpu);
3629 break;
3630 default:
3631 break;
3632 }
3633
3634 return NOTIFY_OK;
3635}
3636
148f9bb8 3637static struct notifier_block uncore_cpu_nb = {
254298c7 3638 .notifier_call = uncore_cpu_notifier,
087bfbb0
YZ
3639 /*
3640 * to migrate uncore events, our notifier should be executed
3641 * before perf core's notifier.
3642 */
254298c7 3643 .priority = CPU_PRI_PERF + 1,
087bfbb0
YZ
3644};
3645
3646static void __init uncore_cpu_setup(void *dummy)
3647{
3648 uncore_cpu_starting(smp_processor_id());
3649}
3650
3651static int __init uncore_cpu_init(void)
3652{
42089697 3653 int ret, cpu, max_cores;
087bfbb0 3654
42089697 3655 max_cores = boot_cpu_data.x86_max_cores;
087bfbb0 3656 switch (boot_cpu_data.x86_model) {
fcde10e9
YZ
3657 case 26: /* Nehalem */
3658 case 30:
3659 case 37: /* Westmere */
3660 case 44:
3661 msr_uncores = nhm_msr_uncores;
3662 break;
3663 case 42: /* Sandy Bridge */
9a6bc143 3664 case 58: /* Ivy Bridge */
42089697
YZ
3665 if (snb_uncore_cbox.num_boxes > max_cores)
3666 snb_uncore_cbox.num_boxes = max_cores;
fcde10e9
YZ
3667 msr_uncores = snb_msr_uncores;
3668 break;
80e217e9 3669 case 45: /* Sandy Bridge-EP */
42089697
YZ
3670 if (snbep_uncore_cbox.num_boxes > max_cores)
3671 snbep_uncore_cbox.num_boxes = max_cores;
7c94ee2e
YZ
3672 msr_uncores = snbep_msr_uncores;
3673 break;
cb37af77
YZ
3674 case 46: /* Nehalem-EX */
3675 uncore_nhmex = true;
3676 case 47: /* Westmere-EX aka. Xeon E7 */
3677 if (!uncore_nhmex)
3678 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
3679 if (nhmex_uncore_cbox.num_boxes > max_cores)
3680 nhmex_uncore_cbox.num_boxes = max_cores;
254298c7
YZ
3681 msr_uncores = nhmex_msr_uncores;
3682 break;
e850f9c3
YZ
3683 case 62: /* IvyTown */
3684 if (ivt_uncore_cbox.num_boxes > max_cores)
3685 ivt_uncore_cbox.num_boxes = max_cores;
3686 msr_uncores = ivt_msr_uncores;
3687 break;
3688
087bfbb0
YZ
3689 default:
3690 return 0;
3691 }
3692
3693 ret = uncore_types_init(msr_uncores);
3694 if (ret)
3695 return ret;
3696
3697 get_online_cpus();
3698
3699 for_each_online_cpu(cpu) {
3700 int i, phys_id = topology_physical_package_id(cpu);
3701
3702 for_each_cpu(i, &uncore_cpu_mask) {
3703 if (phys_id == topology_physical_package_id(i)) {
3704 phys_id = -1;
3705 break;
3706 }
3707 }
3708 if (phys_id < 0)
3709 continue;
3710
3711 uncore_cpu_prepare(cpu, phys_id);
3712 uncore_event_init_cpu(cpu);
3713 }
3714 on_each_cpu(uncore_cpu_setup, NULL, 1);
3715
3716 register_cpu_notifier(&uncore_cpu_nb);
3717
3718 put_online_cpus();
3719
3720 return 0;
3721}
3722
3723static int __init uncore_pmus_register(void)
3724{
3725 struct intel_uncore_pmu *pmu;
3726 struct intel_uncore_type *type;
3727 int i, j;
3728
3729 for (i = 0; msr_uncores[i]; i++) {
3730 type = msr_uncores[i];
3731 for (j = 0; j < type->num_boxes; j++) {
3732 pmu = &type->pmus[j];
3733 uncore_pmu_register(pmu);
3734 }
3735 }
3736
14371cce
YZ
3737 for (i = 0; pci_uncores[i]; i++) {
3738 type = pci_uncores[i];
3739 for (j = 0; j < type->num_boxes; j++) {
3740 pmu = &type->pmus[j];
3741 uncore_pmu_register(pmu);
3742 }
3743 }
3744
087bfbb0
YZ
3745 return 0;
3746}
3747
3748static int __init intel_uncore_init(void)
3749{
3750 int ret;
3751
3752 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3753 return -ENODEV;
3754
a05123bd
YZ
3755 if (cpu_has_hypervisor)
3756 return -ENODEV;
3757
14371cce 3758 ret = uncore_pci_init();
087bfbb0
YZ
3759 if (ret)
3760 goto fail;
14371cce
YZ
3761 ret = uncore_cpu_init();
3762 if (ret) {
3763 uncore_pci_exit();
3764 goto fail;
3765 }
087bfbb0
YZ
3766
3767 uncore_pmus_register();
3768 return 0;
3769fail:
3770 return ret;
3771}
3772device_initcall(intel_uncore_init);
This page took 0.346116 seconds and 5 git commands to generate.