1 #include "perf_event_intel_uncore.h"
3 static struct intel_uncore_type
*empty_uncore
[] = { NULL
, };
4 static struct intel_uncore_type
**msr_uncores
= empty_uncore
;
5 static struct intel_uncore_type
**pci_uncores
= empty_uncore
;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid
[256] = { [0 ... 255] = -1, };
9 static struct pci_dev
*extra_pci_dev
[UNCORE_SOCKET_MAX
][UNCORE_EXTRA_PCI_DEV_MAX
];
11 static DEFINE_RAW_SPINLOCK(uncore_box_lock
);
13 /* mask of cpus that collect uncore events */
14 static cpumask_t uncore_cpu_mask
;
16 /* constraint for the fixed counter */
17 static struct event_constraint constraint_fixed
=
18 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED
, ~0ULL);
19 static struct event_constraint constraint_empty
=
20 EVENT_CONSTRAINT(0, 0, 0);
22 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
25 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
26 DEFINE_UNCORE_FORMAT_ATTR(event_ext
, event
, "config:0-7,21");
27 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
28 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
29 DEFINE_UNCORE_FORMAT_ATTR(tid_en
, tid_en
, "config:19");
30 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
31 DEFINE_UNCORE_FORMAT_ATTR(cmask5
, cmask
, "config:24-28");
32 DEFINE_UNCORE_FORMAT_ATTR(cmask8
, cmask
, "config:24-31");
33 DEFINE_UNCORE_FORMAT_ATTR(thresh8
, thresh
, "config:24-31");
34 DEFINE_UNCORE_FORMAT_ATTR(thresh5
, thresh
, "config:24-28");
35 DEFINE_UNCORE_FORMAT_ATTR(occ_sel
, occ_sel
, "config:14-15");
36 DEFINE_UNCORE_FORMAT_ATTR(occ_invert
, occ_invert
, "config:30");
37 DEFINE_UNCORE_FORMAT_ATTR(occ_edge
, occ_edge
, "config:14-51");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_tid
, filter_tid
, "config1:0-4");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_link
, filter_link
, "config1:5-8");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_nid
, filter_nid
, "config1:10-17");
41 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2
, filter_nid
, "config1:32-47");
42 DEFINE_UNCORE_FORMAT_ATTR(filter_state
, filter_state
, "config1:18-22");
43 DEFINE_UNCORE_FORMAT_ATTR(filter_state2
, filter_state
, "config1:17-22");
44 DEFINE_UNCORE_FORMAT_ATTR(filter_opc
, filter_opc
, "config1:23-31");
45 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2
, filter_opc
, "config1:52-60");
46 DEFINE_UNCORE_FORMAT_ATTR(filter_band0
, filter_band0
, "config1:0-7");
47 DEFINE_UNCORE_FORMAT_ATTR(filter_band1
, filter_band1
, "config1:8-15");
48 DEFINE_UNCORE_FORMAT_ATTR(filter_band2
, filter_band2
, "config1:16-23");
49 DEFINE_UNCORE_FORMAT_ATTR(filter_band3
, filter_band3
, "config1:24-31");
50 DEFINE_UNCORE_FORMAT_ATTR(match_rds
, match_rds
, "config1:48-51");
51 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30
, match_rnid30
, "config1:32-35");
52 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4
, match_rnid4
, "config1:31");
53 DEFINE_UNCORE_FORMAT_ATTR(match_dnid
, match_dnid
, "config1:13-17");
54 DEFINE_UNCORE_FORMAT_ATTR(match_mc
, match_mc
, "config1:9-12");
55 DEFINE_UNCORE_FORMAT_ATTR(match_opc
, match_opc
, "config1:5-8");
56 DEFINE_UNCORE_FORMAT_ATTR(match_vnw
, match_vnw
, "config1:3-4");
57 DEFINE_UNCORE_FORMAT_ATTR(match0
, match0
, "config1:0-31");
58 DEFINE_UNCORE_FORMAT_ATTR(match1
, match1
, "config1:32-63");
59 DEFINE_UNCORE_FORMAT_ATTR(mask_rds
, mask_rds
, "config2:48-51");
60 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30
, mask_rnid30
, "config2:32-35");
61 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4
, mask_rnid4
, "config2:31");
62 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid
, mask_dnid
, "config2:13-17");
63 DEFINE_UNCORE_FORMAT_ATTR(mask_mc
, mask_mc
, "config2:9-12");
64 DEFINE_UNCORE_FORMAT_ATTR(mask_opc
, mask_opc
, "config2:5-8");
65 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw
, mask_vnw
, "config2:3-4");
66 DEFINE_UNCORE_FORMAT_ATTR(mask0
, mask0
, "config2:0-31");
67 DEFINE_UNCORE_FORMAT_ATTR(mask1
, mask1
, "config2:32-63");
69 static u64
uncore_msr_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
73 rdmsrl(event
->hw
.event_base
, count
);
79 * generic get constraint function for shared match/mask registers.
81 static struct event_constraint
*
82 uncore_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
84 struct intel_uncore_extra_reg
*er
;
85 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
86 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
91 * reg->alloc can be set due to existing state, so for fake box we
92 * need to ignore this, otherwise we might fail to allocate proper
93 * fake state for this extra reg constraint.
95 if (reg1
->idx
== EXTRA_REG_NONE
||
96 (!uncore_box_is_fake(box
) && reg1
->alloc
))
99 er
= &box
->shared_regs
[reg1
->idx
];
100 raw_spin_lock_irqsave(&er
->lock
, flags
);
101 if (!atomic_read(&er
->ref
) ||
102 (er
->config1
== reg1
->config
&& er
->config2
== reg2
->config
)) {
103 atomic_inc(&er
->ref
);
104 er
->config1
= reg1
->config
;
105 er
->config2
= reg2
->config
;
108 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
111 if (!uncore_box_is_fake(box
))
116 return &constraint_empty
;
119 static void uncore_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
121 struct intel_uncore_extra_reg
*er
;
122 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
125 * Only put constraint if extra reg was actually allocated. Also
126 * takes care of event which do not use an extra shared reg.
128 * Also, if this is a fake box we shouldn't touch any event state
129 * (reg->alloc) and we don't care about leaving inconsistent box
130 * state either since it will be thrown out.
132 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
135 er
= &box
->shared_regs
[reg1
->idx
];
136 atomic_dec(&er
->ref
);
140 static u64
uncore_shared_reg_config(struct intel_uncore_box
*box
, int idx
)
142 struct intel_uncore_extra_reg
*er
;
146 er
= &box
->shared_regs
[idx
];
148 raw_spin_lock_irqsave(&er
->lock
, flags
);
150 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
155 /* Sandy Bridge-EP uncore support */
156 static struct intel_uncore_type snbep_uncore_cbox
;
157 static struct intel_uncore_type snbep_uncore_pcu
;
159 static void snbep_uncore_pci_disable_box(struct intel_uncore_box
*box
)
161 struct pci_dev
*pdev
= box
->pci_dev
;
162 int box_ctl
= uncore_pci_box_ctl(box
);
165 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
166 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
167 pci_write_config_dword(pdev
, box_ctl
, config
);
171 static void snbep_uncore_pci_enable_box(struct intel_uncore_box
*box
)
173 struct pci_dev
*pdev
= box
->pci_dev
;
174 int box_ctl
= uncore_pci_box_ctl(box
);
177 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
178 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
179 pci_write_config_dword(pdev
, box_ctl
, config
);
183 static void snbep_uncore_pci_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
185 struct pci_dev
*pdev
= box
->pci_dev
;
186 struct hw_perf_event
*hwc
= &event
->hw
;
188 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
191 static void snbep_uncore_pci_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
193 struct pci_dev
*pdev
= box
->pci_dev
;
194 struct hw_perf_event
*hwc
= &event
->hw
;
196 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
);
199 static u64
snbep_uncore_pci_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
201 struct pci_dev
*pdev
= box
->pci_dev
;
202 struct hw_perf_event
*hwc
= &event
->hw
;
205 pci_read_config_dword(pdev
, hwc
->event_base
, (u32
*)&count
);
206 pci_read_config_dword(pdev
, hwc
->event_base
+ 4, (u32
*)&count
+ 1);
211 static void snbep_uncore_pci_init_box(struct intel_uncore_box
*box
)
213 struct pci_dev
*pdev
= box
->pci_dev
;
215 pci_write_config_dword(pdev
, SNBEP_PCI_PMON_BOX_CTL
, SNBEP_PMON_BOX_CTL_INT
);
218 static void snbep_uncore_msr_disable_box(struct intel_uncore_box
*box
)
223 msr
= uncore_msr_box_ctl(box
);
226 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
231 static void snbep_uncore_msr_enable_box(struct intel_uncore_box
*box
)
236 msr
= uncore_msr_box_ctl(box
);
239 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
244 static void snbep_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
246 struct hw_perf_event
*hwc
= &event
->hw
;
247 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
249 if (reg1
->idx
!= EXTRA_REG_NONE
)
250 wrmsrl(reg1
->reg
, uncore_shared_reg_config(box
, 0));
252 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
255 static void snbep_uncore_msr_disable_event(struct intel_uncore_box
*box
,
256 struct perf_event
*event
)
258 struct hw_perf_event
*hwc
= &event
->hw
;
260 wrmsrl(hwc
->config_base
, hwc
->config
);
263 static void snbep_uncore_msr_init_box(struct intel_uncore_box
*box
)
265 unsigned msr
= uncore_msr_box_ctl(box
);
268 wrmsrl(msr
, SNBEP_PMON_BOX_CTL_INT
);
271 static struct attribute
*snbep_uncore_formats_attr
[] = {
272 &format_attr_event
.attr
,
273 &format_attr_umask
.attr
,
274 &format_attr_edge
.attr
,
275 &format_attr_inv
.attr
,
276 &format_attr_thresh8
.attr
,
280 static struct attribute
*snbep_uncore_ubox_formats_attr
[] = {
281 &format_attr_event
.attr
,
282 &format_attr_umask
.attr
,
283 &format_attr_edge
.attr
,
284 &format_attr_inv
.attr
,
285 &format_attr_thresh5
.attr
,
289 static struct attribute
*snbep_uncore_cbox_formats_attr
[] = {
290 &format_attr_event
.attr
,
291 &format_attr_umask
.attr
,
292 &format_attr_edge
.attr
,
293 &format_attr_tid_en
.attr
,
294 &format_attr_inv
.attr
,
295 &format_attr_thresh8
.attr
,
296 &format_attr_filter_tid
.attr
,
297 &format_attr_filter_nid
.attr
,
298 &format_attr_filter_state
.attr
,
299 &format_attr_filter_opc
.attr
,
303 static struct attribute
*snbep_uncore_pcu_formats_attr
[] = {
304 &format_attr_event_ext
.attr
,
305 &format_attr_occ_sel
.attr
,
306 &format_attr_edge
.attr
,
307 &format_attr_inv
.attr
,
308 &format_attr_thresh5
.attr
,
309 &format_attr_occ_invert
.attr
,
310 &format_attr_occ_edge
.attr
,
311 &format_attr_filter_band0
.attr
,
312 &format_attr_filter_band1
.attr
,
313 &format_attr_filter_band2
.attr
,
314 &format_attr_filter_band3
.attr
,
318 static struct attribute
*snbep_uncore_qpi_formats_attr
[] = {
319 &format_attr_event_ext
.attr
,
320 &format_attr_umask
.attr
,
321 &format_attr_edge
.attr
,
322 &format_attr_inv
.attr
,
323 &format_attr_thresh8
.attr
,
324 &format_attr_match_rds
.attr
,
325 &format_attr_match_rnid30
.attr
,
326 &format_attr_match_rnid4
.attr
,
327 &format_attr_match_dnid
.attr
,
328 &format_attr_match_mc
.attr
,
329 &format_attr_match_opc
.attr
,
330 &format_attr_match_vnw
.attr
,
331 &format_attr_match0
.attr
,
332 &format_attr_match1
.attr
,
333 &format_attr_mask_rds
.attr
,
334 &format_attr_mask_rnid30
.attr
,
335 &format_attr_mask_rnid4
.attr
,
336 &format_attr_mask_dnid
.attr
,
337 &format_attr_mask_mc
.attr
,
338 &format_attr_mask_opc
.attr
,
339 &format_attr_mask_vnw
.attr
,
340 &format_attr_mask0
.attr
,
341 &format_attr_mask1
.attr
,
345 static struct uncore_event_desc snbep_uncore_imc_events
[] = {
346 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
347 INTEL_UNCORE_EVENT_DESC(cas_count_read
, "event=0x04,umask=0x03"),
348 INTEL_UNCORE_EVENT_DESC(cas_count_write
, "event=0x04,umask=0x0c"),
349 { /* end: all zeroes */ },
352 static struct uncore_event_desc snbep_uncore_qpi_events
[] = {
353 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0x14"),
354 INTEL_UNCORE_EVENT_DESC(txl_flits_active
, "event=0x00,umask=0x06"),
355 INTEL_UNCORE_EVENT_DESC(drs_data
, "event=0x102,umask=0x08"),
356 INTEL_UNCORE_EVENT_DESC(ncb_data
, "event=0x103,umask=0x04"),
357 { /* end: all zeroes */ },
360 static struct attribute_group snbep_uncore_format_group
= {
362 .attrs
= snbep_uncore_formats_attr
,
365 static struct attribute_group snbep_uncore_ubox_format_group
= {
367 .attrs
= snbep_uncore_ubox_formats_attr
,
370 static struct attribute_group snbep_uncore_cbox_format_group
= {
372 .attrs
= snbep_uncore_cbox_formats_attr
,
375 static struct attribute_group snbep_uncore_pcu_format_group
= {
377 .attrs
= snbep_uncore_pcu_formats_attr
,
380 static struct attribute_group snbep_uncore_qpi_format_group
= {
382 .attrs
= snbep_uncore_qpi_formats_attr
,
385 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
386 .init_box = snbep_uncore_msr_init_box, \
387 .disable_box = snbep_uncore_msr_disable_box, \
388 .enable_box = snbep_uncore_msr_enable_box, \
389 .disable_event = snbep_uncore_msr_disable_event, \
390 .enable_event = snbep_uncore_msr_enable_event, \
391 .read_counter = uncore_msr_read_counter
393 static struct intel_uncore_ops snbep_uncore_msr_ops
= {
394 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
397 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
398 .init_box = snbep_uncore_pci_init_box, \
399 .disable_box = snbep_uncore_pci_disable_box, \
400 .enable_box = snbep_uncore_pci_enable_box, \
401 .disable_event = snbep_uncore_pci_disable_event, \
402 .read_counter = snbep_uncore_pci_read_counter
404 static struct intel_uncore_ops snbep_uncore_pci_ops
= {
405 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
406 .enable_event
= snbep_uncore_pci_enable_event
, \
409 static struct event_constraint snbep_uncore_cbox_constraints
[] = {
410 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
411 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
412 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
413 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
414 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
415 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
416 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
417 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
418 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
419 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
420 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
421 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
422 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
423 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
424 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
427 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
428 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
429 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
430 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
431 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
432 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
433 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
434 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
435 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
439 static struct event_constraint snbep_uncore_r2pcie_constraints
[] = {
440 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
441 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
442 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
443 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
444 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
445 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
446 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
447 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
448 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
449 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
453 static struct event_constraint snbep_uncore_r3qpi_constraints
[] = {
454 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
455 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
456 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
457 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
458 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
459 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
460 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
461 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
462 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
463 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
464 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
465 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
466 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
467 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
468 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
469 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
470 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
471 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
472 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
473 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
474 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
475 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
476 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
477 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
478 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
479 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
480 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
481 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
485 static struct intel_uncore_type snbep_uncore_ubox
= {
490 .fixed_ctr_bits
= 48,
491 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
492 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
493 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
494 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
495 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
496 .ops
= &snbep_uncore_msr_ops
,
497 .format_group
= &snbep_uncore_ubox_format_group
,
500 static struct extra_reg snbep_uncore_cbox_extra_regs
[] = {
501 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
502 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
503 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
504 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
505 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
506 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
507 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
508 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
509 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
510 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
511 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
512 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
513 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
514 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
515 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
516 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
517 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
518 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
519 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
520 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
521 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
522 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
526 static void snbep_cbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
528 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
529 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
532 if (uncore_box_is_fake(box
))
535 for (i
= 0; i
< 5; i
++) {
536 if (reg1
->alloc
& (0x1 << i
))
537 atomic_sub(1 << (i
* 6), &er
->ref
);
542 static struct event_constraint
*
543 __snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
,
544 u64 (*cbox_filter_mask
)(int fields
))
546 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
547 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
552 if (reg1
->idx
== EXTRA_REG_NONE
)
555 raw_spin_lock_irqsave(&er
->lock
, flags
);
556 for (i
= 0; i
< 5; i
++) {
557 if (!(reg1
->idx
& (0x1 << i
)))
559 if (!uncore_box_is_fake(box
) && (reg1
->alloc
& (0x1 << i
)))
562 mask
= cbox_filter_mask(0x1 << i
);
563 if (!__BITS_VALUE(atomic_read(&er
->ref
), i
, 6) ||
564 !((reg1
->config
^ er
->config
) & mask
)) {
565 atomic_add(1 << (i
* 6), &er
->ref
);
567 er
->config
|= reg1
->config
& mask
;
573 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
577 if (!uncore_box_is_fake(box
))
578 reg1
->alloc
|= alloc
;
582 for (; i
>= 0; i
--) {
583 if (alloc
& (0x1 << i
))
584 atomic_sub(1 << (i
* 6), &er
->ref
);
586 return &constraint_empty
;
589 static u64
snbep_cbox_filter_mask(int fields
)
594 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID
;
596 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID
;
598 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
600 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
605 static struct event_constraint
*
606 snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
608 return __snbep_cbox_get_constraint(box
, event
, snbep_cbox_filter_mask
);
611 static int snbep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
613 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
614 struct extra_reg
*er
;
617 for (er
= snbep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
618 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
624 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
625 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
626 reg1
->config
= event
->attr
.config1
& snbep_cbox_filter_mask(idx
);
632 static struct intel_uncore_ops snbep_uncore_cbox_ops
= {
633 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
634 .hw_config
= snbep_cbox_hw_config
,
635 .get_constraint
= snbep_cbox_get_constraint
,
636 .put_constraint
= snbep_cbox_put_constraint
,
639 static struct intel_uncore_type snbep_uncore_cbox
= {
644 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
645 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
646 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
647 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
648 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
649 .num_shared_regs
= 1,
650 .constraints
= snbep_uncore_cbox_constraints
,
651 .ops
= &snbep_uncore_cbox_ops
,
652 .format_group
= &snbep_uncore_cbox_format_group
,
655 static u64
snbep_pcu_alter_er(struct perf_event
*event
, int new_idx
, bool modify
)
657 struct hw_perf_event
*hwc
= &event
->hw
;
658 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
659 u64 config
= reg1
->config
;
661 if (new_idx
> reg1
->idx
)
662 config
<<= 8 * (new_idx
- reg1
->idx
);
664 config
>>= 8 * (reg1
->idx
- new_idx
);
667 hwc
->config
+= new_idx
- reg1
->idx
;
668 reg1
->config
= config
;
674 static struct event_constraint
*
675 snbep_pcu_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
677 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
678 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
681 u64 mask
, config1
= reg1
->config
;
684 if (reg1
->idx
== EXTRA_REG_NONE
||
685 (!uncore_box_is_fake(box
) && reg1
->alloc
))
688 mask
= 0xffULL
<< (idx
* 8);
689 raw_spin_lock_irqsave(&er
->lock
, flags
);
690 if (!__BITS_VALUE(atomic_read(&er
->ref
), idx
, 8) ||
691 !((config1
^ er
->config
) & mask
)) {
692 atomic_add(1 << (idx
* 8), &er
->ref
);
694 er
->config
|= config1
& mask
;
697 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
701 if (idx
!= reg1
->idx
) {
702 config1
= snbep_pcu_alter_er(event
, idx
, false);
705 return &constraint_empty
;
708 if (!uncore_box_is_fake(box
)) {
709 if (idx
!= reg1
->idx
)
710 snbep_pcu_alter_er(event
, idx
, true);
716 static void snbep_pcu_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
718 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
719 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
721 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
724 atomic_sub(1 << (reg1
->idx
* 8), &er
->ref
);
728 static int snbep_pcu_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
730 struct hw_perf_event
*hwc
= &event
->hw
;
731 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
732 int ev_sel
= hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
;
734 if (ev_sel
>= 0xb && ev_sel
<= 0xe) {
735 reg1
->reg
= SNBEP_PCU_MSR_PMON_BOX_FILTER
;
736 reg1
->idx
= ev_sel
- 0xb;
737 reg1
->config
= event
->attr
.config1
& (0xff << reg1
->idx
);
742 static struct intel_uncore_ops snbep_uncore_pcu_ops
= {
743 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
744 .hw_config
= snbep_pcu_hw_config
,
745 .get_constraint
= snbep_pcu_get_constraint
,
746 .put_constraint
= snbep_pcu_put_constraint
,
749 static struct intel_uncore_type snbep_uncore_pcu
= {
754 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
755 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
756 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
757 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
758 .num_shared_regs
= 1,
759 .ops
= &snbep_uncore_pcu_ops
,
760 .format_group
= &snbep_uncore_pcu_format_group
,
763 static struct intel_uncore_type
*snbep_msr_uncores
[] = {
771 SNBEP_PCI_QPI_PORT0_FILTER
,
772 SNBEP_PCI_QPI_PORT1_FILTER
,
775 static int snbep_qpi_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
777 struct hw_perf_event
*hwc
= &event
->hw
;
778 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
779 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
781 if ((hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
) == 0x38) {
783 reg1
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MATCH0
;
784 reg1
->config
= event
->attr
.config1
;
785 reg2
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MASK0
;
786 reg2
->config
= event
->attr
.config2
;
791 static void snbep_qpi_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
793 struct pci_dev
*pdev
= box
->pci_dev
;
794 struct hw_perf_event
*hwc
= &event
->hw
;
795 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
796 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
798 if (reg1
->idx
!= EXTRA_REG_NONE
) {
799 int idx
= box
->pmu
->pmu_idx
+ SNBEP_PCI_QPI_PORT0_FILTER
;
800 struct pci_dev
*filter_pdev
= extra_pci_dev
[box
->phys_id
][idx
];
801 WARN_ON_ONCE(!filter_pdev
);
803 pci_write_config_dword(filter_pdev
, reg1
->reg
,
805 pci_write_config_dword(filter_pdev
, reg1
->reg
+ 4,
806 (u32
)(reg1
->config
>> 32));
807 pci_write_config_dword(filter_pdev
, reg2
->reg
,
809 pci_write_config_dword(filter_pdev
, reg2
->reg
+ 4,
810 (u32
)(reg2
->config
>> 32));
814 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
817 static struct intel_uncore_ops snbep_uncore_qpi_ops
= {
818 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
819 .enable_event
= snbep_qpi_enable_event
,
820 .hw_config
= snbep_qpi_hw_config
,
821 .get_constraint
= uncore_get_constraint
,
822 .put_constraint
= uncore_put_constraint
,
825 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
826 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
827 .event_ctl = SNBEP_PCI_PMON_CTL0, \
828 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
829 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
830 .ops = &snbep_uncore_pci_ops, \
831 .format_group = &snbep_uncore_format_group
833 static struct intel_uncore_type snbep_uncore_ha
= {
838 SNBEP_UNCORE_PCI_COMMON_INIT(),
841 static struct intel_uncore_type snbep_uncore_imc
= {
846 .fixed_ctr_bits
= 48,
847 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
848 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
849 .event_descs
= snbep_uncore_imc_events
,
850 SNBEP_UNCORE_PCI_COMMON_INIT(),
853 static struct intel_uncore_type snbep_uncore_qpi
= {
858 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
859 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
860 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
861 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
862 .num_shared_regs
= 1,
863 .ops
= &snbep_uncore_qpi_ops
,
864 .event_descs
= snbep_uncore_qpi_events
,
865 .format_group
= &snbep_uncore_qpi_format_group
,
869 static struct intel_uncore_type snbep_uncore_r2pcie
= {
874 .constraints
= snbep_uncore_r2pcie_constraints
,
875 SNBEP_UNCORE_PCI_COMMON_INIT(),
878 static struct intel_uncore_type snbep_uncore_r3qpi
= {
883 .constraints
= snbep_uncore_r3qpi_constraints
,
884 SNBEP_UNCORE_PCI_COMMON_INIT(),
889 SNBEP_PCI_UNCORE_IMC
,
890 SNBEP_PCI_UNCORE_QPI
,
891 SNBEP_PCI_UNCORE_R2PCIE
,
892 SNBEP_PCI_UNCORE_R3QPI
,
895 static struct intel_uncore_type
*snbep_pci_uncores
[] = {
896 [SNBEP_PCI_UNCORE_HA
] = &snbep_uncore_ha
,
897 [SNBEP_PCI_UNCORE_IMC
] = &snbep_uncore_imc
,
898 [SNBEP_PCI_UNCORE_QPI
] = &snbep_uncore_qpi
,
899 [SNBEP_PCI_UNCORE_R2PCIE
] = &snbep_uncore_r2pcie
,
900 [SNBEP_PCI_UNCORE_R3QPI
] = &snbep_uncore_r3qpi
,
904 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids
) = {
906 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_HA
),
907 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA
, 0),
910 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC0
),
911 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 0),
914 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC1
),
915 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 1),
918 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC2
),
919 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 2),
922 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC3
),
923 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 3),
926 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI0
),
927 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 0),
930 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI1
),
931 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 1),
934 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R2PCIE
),
935 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE
, 0),
938 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI0
),
939 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 0),
942 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI1
),
943 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 1),
945 { /* QPI Port 0 filter */
946 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c86),
947 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
948 SNBEP_PCI_QPI_PORT0_FILTER
),
950 { /* QPI Port 0 filter */
951 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c96),
952 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
953 SNBEP_PCI_QPI_PORT1_FILTER
),
955 { /* end: all zeroes */ }
958 static struct pci_driver snbep_uncore_pci_driver
= {
959 .name
= "snbep_uncore",
960 .id_table
= snbep_uncore_pci_ids
,
964 * build pci bus to socket mapping
966 static int snbep_pci2phy_map_init(int devid
)
968 struct pci_dev
*ubox_dev
= NULL
;
974 /* find the UBOX device */
975 ubox_dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, ubox_dev
);
978 bus
= ubox_dev
->bus
->number
;
979 /* get the Node ID of the local register */
980 err
= pci_read_config_dword(ubox_dev
, 0x40, &config
);
984 /* get the Node ID mapping */
985 err
= pci_read_config_dword(ubox_dev
, 0x54, &config
);
989 * every three bits in the Node ID mapping register maps
990 * to a particular node.
992 for (i
= 0; i
< 8; i
++) {
993 if (nodeid
== ((config
>> (3 * i
)) & 0x7)) {
994 pcibus_to_physid
[bus
] = i
;
1001 pci_dev_put(ubox_dev
);
1003 return err
? pcibios_err_to_errno(err
) : 0;
1005 /* end of Sandy Bridge-EP uncore support */
1007 /* IvyTown uncore support */
1008 static void ivt_uncore_msr_init_box(struct intel_uncore_box
*box
)
1010 unsigned msr
= uncore_msr_box_ctl(box
);
1012 wrmsrl(msr
, IVT_PMON_BOX_CTL_INT
);
1015 static void ivt_uncore_pci_init_box(struct intel_uncore_box
*box
)
1017 struct pci_dev
*pdev
= box
->pci_dev
;
1019 pci_write_config_dword(pdev
, SNBEP_PCI_PMON_BOX_CTL
, IVT_PMON_BOX_CTL_INT
);
1022 #define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
1023 .init_box = ivt_uncore_msr_init_box, \
1024 .disable_box = snbep_uncore_msr_disable_box, \
1025 .enable_box = snbep_uncore_msr_enable_box, \
1026 .disable_event = snbep_uncore_msr_disable_event, \
1027 .enable_event = snbep_uncore_msr_enable_event, \
1028 .read_counter = uncore_msr_read_counter
1030 static struct intel_uncore_ops ivt_uncore_msr_ops
= {
1031 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1034 static struct intel_uncore_ops ivt_uncore_pci_ops
= {
1035 .init_box
= ivt_uncore_pci_init_box
,
1036 .disable_box
= snbep_uncore_pci_disable_box
,
1037 .enable_box
= snbep_uncore_pci_enable_box
,
1038 .disable_event
= snbep_uncore_pci_disable_event
,
1039 .enable_event
= snbep_uncore_pci_enable_event
,
1040 .read_counter
= snbep_uncore_pci_read_counter
,
1043 #define IVT_UNCORE_PCI_COMMON_INIT() \
1044 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1045 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1046 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
1047 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1048 .ops = &ivt_uncore_pci_ops, \
1049 .format_group = &ivt_uncore_format_group
1051 static struct attribute
*ivt_uncore_formats_attr
[] = {
1052 &format_attr_event
.attr
,
1053 &format_attr_umask
.attr
,
1054 &format_attr_edge
.attr
,
1055 &format_attr_inv
.attr
,
1056 &format_attr_thresh8
.attr
,
1060 static struct attribute
*ivt_uncore_ubox_formats_attr
[] = {
1061 &format_attr_event
.attr
,
1062 &format_attr_umask
.attr
,
1063 &format_attr_edge
.attr
,
1064 &format_attr_inv
.attr
,
1065 &format_attr_thresh5
.attr
,
1069 static struct attribute
*ivt_uncore_cbox_formats_attr
[] = {
1070 &format_attr_event
.attr
,
1071 &format_attr_umask
.attr
,
1072 &format_attr_edge
.attr
,
1073 &format_attr_tid_en
.attr
,
1074 &format_attr_thresh8
.attr
,
1075 &format_attr_filter_tid
.attr
,
1076 &format_attr_filter_link
.attr
,
1077 &format_attr_filter_state2
.attr
,
1078 &format_attr_filter_nid2
.attr
,
1079 &format_attr_filter_opc2
.attr
,
1083 static struct attribute
*ivt_uncore_pcu_formats_attr
[] = {
1084 &format_attr_event_ext
.attr
,
1085 &format_attr_occ_sel
.attr
,
1086 &format_attr_edge
.attr
,
1087 &format_attr_thresh5
.attr
,
1088 &format_attr_occ_invert
.attr
,
1089 &format_attr_occ_edge
.attr
,
1090 &format_attr_filter_band0
.attr
,
1091 &format_attr_filter_band1
.attr
,
1092 &format_attr_filter_band2
.attr
,
1093 &format_attr_filter_band3
.attr
,
1097 static struct attribute
*ivt_uncore_qpi_formats_attr
[] = {
1098 &format_attr_event_ext
.attr
,
1099 &format_attr_umask
.attr
,
1100 &format_attr_edge
.attr
,
1101 &format_attr_thresh8
.attr
,
1105 static struct attribute_group ivt_uncore_format_group
= {
1107 .attrs
= ivt_uncore_formats_attr
,
1110 static struct attribute_group ivt_uncore_ubox_format_group
= {
1112 .attrs
= ivt_uncore_ubox_formats_attr
,
1115 static struct attribute_group ivt_uncore_cbox_format_group
= {
1117 .attrs
= ivt_uncore_cbox_formats_attr
,
1120 static struct attribute_group ivt_uncore_pcu_format_group
= {
1122 .attrs
= ivt_uncore_pcu_formats_attr
,
1125 static struct attribute_group ivt_uncore_qpi_format_group
= {
1127 .attrs
= ivt_uncore_qpi_formats_attr
,
1130 static struct intel_uncore_type ivt_uncore_ubox
= {
1134 .perf_ctr_bits
= 44,
1135 .fixed_ctr_bits
= 48,
1136 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
1137 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
1138 .event_mask
= IVT_U_MSR_PMON_RAW_EVENT_MASK
,
1139 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
1140 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
1141 .ops
= &ivt_uncore_msr_ops
,
1142 .format_group
= &ivt_uncore_ubox_format_group
,
1145 static struct extra_reg ivt_uncore_cbox_extra_regs
[] = {
1146 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
1147 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
1148 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1149 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1150 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1151 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1152 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1153 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1154 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1155 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1156 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1157 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1158 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1159 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1160 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1161 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1162 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1163 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1164 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1165 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1166 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1167 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1168 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1169 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1170 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1171 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1172 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1173 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1174 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1175 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1176 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1177 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1181 static u64
ivt_cbox_filter_mask(int fields
)
1186 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_TID
;
1188 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_LINK
;
1190 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_STATE
;
1192 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_NID
;
1194 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_OPC
;
1199 static struct event_constraint
*
1200 ivt_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1202 return __snbep_cbox_get_constraint(box
, event
, ivt_cbox_filter_mask
);
1205 static int ivt_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1207 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1208 struct extra_reg
*er
;
1211 for (er
= ivt_uncore_cbox_extra_regs
; er
->msr
; er
++) {
1212 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
1218 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
1219 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
1220 reg1
->config
= event
->attr
.config1
& ivt_cbox_filter_mask(idx
);
1226 static void ivt_cbox_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1228 struct hw_perf_event
*hwc
= &event
->hw
;
1229 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1231 if (reg1
->idx
!= EXTRA_REG_NONE
) {
1232 u64 filter
= uncore_shared_reg_config(box
, 0);
1233 wrmsrl(reg1
->reg
, filter
& 0xffffffff);
1234 wrmsrl(reg1
->reg
+ 6, filter
>> 32);
1237 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
1240 static struct intel_uncore_ops ivt_uncore_cbox_ops
= {
1241 .init_box
= ivt_uncore_msr_init_box
,
1242 .disable_box
= snbep_uncore_msr_disable_box
,
1243 .enable_box
= snbep_uncore_msr_enable_box
,
1244 .disable_event
= snbep_uncore_msr_disable_event
,
1245 .enable_event
= ivt_cbox_enable_event
,
1246 .read_counter
= uncore_msr_read_counter
,
1247 .hw_config
= ivt_cbox_hw_config
,
1248 .get_constraint
= ivt_cbox_get_constraint
,
1249 .put_constraint
= snbep_cbox_put_constraint
,
1252 static struct intel_uncore_type ivt_uncore_cbox
= {
1256 .perf_ctr_bits
= 44,
1257 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
1258 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
1259 .event_mask
= IVT_CBO_MSR_PMON_RAW_EVENT_MASK
,
1260 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
1261 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
1262 .num_shared_regs
= 1,
1263 .constraints
= snbep_uncore_cbox_constraints
,
1264 .ops
= &ivt_uncore_cbox_ops
,
1265 .format_group
= &ivt_uncore_cbox_format_group
,
1268 static struct intel_uncore_ops ivt_uncore_pcu_ops
= {
1269 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1270 .hw_config
= snbep_pcu_hw_config
,
1271 .get_constraint
= snbep_pcu_get_constraint
,
1272 .put_constraint
= snbep_pcu_put_constraint
,
1275 static struct intel_uncore_type ivt_uncore_pcu
= {
1279 .perf_ctr_bits
= 48,
1280 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
1281 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
1282 .event_mask
= IVT_PCU_MSR_PMON_RAW_EVENT_MASK
,
1283 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
1284 .num_shared_regs
= 1,
1285 .ops
= &ivt_uncore_pcu_ops
,
1286 .format_group
= &ivt_uncore_pcu_format_group
,
1289 static struct intel_uncore_type
*ivt_msr_uncores
[] = {
1296 static struct intel_uncore_type ivt_uncore_ha
= {
1300 .perf_ctr_bits
= 48,
1301 IVT_UNCORE_PCI_COMMON_INIT(),
1304 static struct intel_uncore_type ivt_uncore_imc
= {
1308 .perf_ctr_bits
= 48,
1309 .fixed_ctr_bits
= 48,
1310 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
1311 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
1312 IVT_UNCORE_PCI_COMMON_INIT(),
1315 static struct intel_uncore_type ivt_uncore_qpi
= {
1319 .perf_ctr_bits
= 48,
1320 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
1321 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
1322 .event_mask
= IVT_QPI_PCI_PMON_RAW_EVENT_MASK
,
1323 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1324 .ops
= &ivt_uncore_pci_ops
,
1325 .format_group
= &ivt_uncore_qpi_format_group
,
1328 static struct intel_uncore_type ivt_uncore_r2pcie
= {
1332 .perf_ctr_bits
= 44,
1333 .constraints
= snbep_uncore_r2pcie_constraints
,
1334 IVT_UNCORE_PCI_COMMON_INIT(),
1337 static struct intel_uncore_type ivt_uncore_r3qpi
= {
1341 .perf_ctr_bits
= 44,
1342 .constraints
= snbep_uncore_r3qpi_constraints
,
1343 IVT_UNCORE_PCI_COMMON_INIT(),
1350 IVT_PCI_UNCORE_R2PCIE
,
1351 IVT_PCI_UNCORE_R3QPI
,
1354 static struct intel_uncore_type
*ivt_pci_uncores
[] = {
1355 [IVT_PCI_UNCORE_HA
] = &ivt_uncore_ha
,
1356 [IVT_PCI_UNCORE_IMC
] = &ivt_uncore_imc
,
1357 [IVT_PCI_UNCORE_QPI
] = &ivt_uncore_qpi
,
1358 [IVT_PCI_UNCORE_R2PCIE
] = &ivt_uncore_r2pcie
,
1359 [IVT_PCI_UNCORE_R3QPI
] = &ivt_uncore_r3qpi
,
1363 static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids
) = {
1364 { /* Home Agent 0 */
1365 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe30),
1366 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA
, 0),
1368 { /* Home Agent 1 */
1369 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe38),
1370 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA
, 1),
1372 { /* MC0 Channel 0 */
1373 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb4),
1374 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 0),
1376 { /* MC0 Channel 1 */
1377 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb5),
1378 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 1),
1380 { /* MC0 Channel 3 */
1381 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb0),
1382 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 2),
1384 { /* MC0 Channel 4 */
1385 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb1),
1386 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 3),
1388 { /* MC1 Channel 0 */
1389 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef4),
1390 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 4),
1392 { /* MC1 Channel 1 */
1393 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef5),
1394 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 5),
1396 { /* MC1 Channel 3 */
1397 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef0),
1398 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 6),
1400 { /* MC1 Channel 4 */
1401 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef1),
1402 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 7),
1405 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe32),
1406 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI
, 0),
1409 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe33),
1410 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI
, 1),
1413 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3a),
1414 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI
, 2),
1417 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe34),
1418 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE
, 0),
1420 { /* R3QPI0 Link 0 */
1421 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe36),
1422 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI
, 0),
1424 { /* R3QPI0 Link 1 */
1425 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe37),
1426 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI
, 1),
1428 { /* R3QPI1 Link 2 */
1429 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3e),
1430 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI
, 2),
1432 { /* end: all zeroes */ }
1435 static struct pci_driver ivt_uncore_pci_driver
= {
1436 .name
= "ivt_uncore",
1437 .id_table
= ivt_uncore_pci_ids
,
1439 /* end of IvyTown uncore support */
1441 /* Sandy Bridge uncore support */
1442 static void snb_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1444 struct hw_perf_event
*hwc
= &event
->hw
;
1446 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
1447 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
1449 wrmsrl(hwc
->config_base
, SNB_UNC_CTL_EN
);
1452 static void snb_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1454 wrmsrl(event
->hw
.config_base
, 0);
1457 static void snb_uncore_msr_init_box(struct intel_uncore_box
*box
)
1459 if (box
->pmu
->pmu_idx
== 0) {
1460 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
,
1461 SNB_UNC_GLOBAL_CTL_EN
| SNB_UNC_GLOBAL_CTL_CORE_ALL
);
1465 static struct uncore_event_desc snb_uncore_events
[] = {
1466 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
1467 { /* end: all zeroes */ },
1470 static struct attribute
*snb_uncore_formats_attr
[] = {
1471 &format_attr_event
.attr
,
1472 &format_attr_umask
.attr
,
1473 &format_attr_edge
.attr
,
1474 &format_attr_inv
.attr
,
1475 &format_attr_cmask5
.attr
,
1479 static struct attribute_group snb_uncore_format_group
= {
1481 .attrs
= snb_uncore_formats_attr
,
1484 static struct intel_uncore_ops snb_uncore_msr_ops
= {
1485 .init_box
= snb_uncore_msr_init_box
,
1486 .disable_event
= snb_uncore_msr_disable_event
,
1487 .enable_event
= snb_uncore_msr_enable_event
,
1488 .read_counter
= uncore_msr_read_counter
,
1491 static struct event_constraint snb_uncore_cbox_constraints
[] = {
1492 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1493 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1494 EVENT_CONSTRAINT_END
1497 static struct intel_uncore_type snb_uncore_cbox
= {
1501 .perf_ctr_bits
= 44,
1502 .fixed_ctr_bits
= 48,
1503 .perf_ctr
= SNB_UNC_CBO_0_PER_CTR0
,
1504 .event_ctl
= SNB_UNC_CBO_0_PERFEVTSEL0
,
1505 .fixed_ctr
= SNB_UNC_FIXED_CTR
,
1506 .fixed_ctl
= SNB_UNC_FIXED_CTR_CTRL
,
1508 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
1509 .msr_offset
= SNB_UNC_CBO_MSR_OFFSET
,
1510 .constraints
= snb_uncore_cbox_constraints
,
1511 .ops
= &snb_uncore_msr_ops
,
1512 .format_group
= &snb_uncore_format_group
,
1513 .event_descs
= snb_uncore_events
,
1516 static struct intel_uncore_type
*snb_msr_uncores
[] = {
1520 /* end of Sandy Bridge uncore support */
1522 /* Nehalem uncore support */
1523 static void nhm_uncore_msr_disable_box(struct intel_uncore_box
*box
)
1525 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, 0);
1528 static void nhm_uncore_msr_enable_box(struct intel_uncore_box
*box
)
1530 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, NHM_UNC_GLOBAL_CTL_EN_PC_ALL
| NHM_UNC_GLOBAL_CTL_EN_FC
);
1533 static void nhm_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1535 struct hw_perf_event
*hwc
= &event
->hw
;
1537 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
1538 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
1540 wrmsrl(hwc
->config_base
, NHM_UNC_FIXED_CTR_CTL_EN
);
1543 static struct attribute
*nhm_uncore_formats_attr
[] = {
1544 &format_attr_event
.attr
,
1545 &format_attr_umask
.attr
,
1546 &format_attr_edge
.attr
,
1547 &format_attr_inv
.attr
,
1548 &format_attr_cmask8
.attr
,
1552 static struct attribute_group nhm_uncore_format_group
= {
1554 .attrs
= nhm_uncore_formats_attr
,
1557 static struct uncore_event_desc nhm_uncore_events
[] = {
1558 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
1559 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any
, "event=0x2f,umask=0x0f"),
1560 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any
, "event=0x2c,umask=0x0f"),
1561 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads
, "event=0x20,umask=0x01"),
1562 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes
, "event=0x20,umask=0x02"),
1563 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads
, "event=0x20,umask=0x04"),
1564 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes
, "event=0x20,umask=0x08"),
1565 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads
, "event=0x20,umask=0x10"),
1566 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes
, "event=0x20,umask=0x20"),
1567 { /* end: all zeroes */ },
1570 static struct intel_uncore_ops nhm_uncore_msr_ops
= {
1571 .disable_box
= nhm_uncore_msr_disable_box
,
1572 .enable_box
= nhm_uncore_msr_enable_box
,
1573 .disable_event
= snb_uncore_msr_disable_event
,
1574 .enable_event
= nhm_uncore_msr_enable_event
,
1575 .read_counter
= uncore_msr_read_counter
,
1578 static struct intel_uncore_type nhm_uncore
= {
1582 .perf_ctr_bits
= 48,
1583 .fixed_ctr_bits
= 48,
1584 .event_ctl
= NHM_UNC_PERFEVTSEL0
,
1585 .perf_ctr
= NHM_UNC_UNCORE_PMC0
,
1586 .fixed_ctr
= NHM_UNC_FIXED_CTR
,
1587 .fixed_ctl
= NHM_UNC_FIXED_CTR_CTRL
,
1588 .event_mask
= NHM_UNC_RAW_EVENT_MASK
,
1589 .event_descs
= nhm_uncore_events
,
1590 .ops
= &nhm_uncore_msr_ops
,
1591 .format_group
= &nhm_uncore_format_group
,
1594 static struct intel_uncore_type
*nhm_msr_uncores
[] = {
1598 /* end of Nehalem uncore support */
1600 /* Nehalem-EX uncore support */
1601 DEFINE_UNCORE_FORMAT_ATTR(event5
, event
, "config:1-5");
1602 DEFINE_UNCORE_FORMAT_ATTR(counter
, counter
, "config:6-7");
1603 DEFINE_UNCORE_FORMAT_ATTR(match
, match
, "config1:0-63");
1604 DEFINE_UNCORE_FORMAT_ATTR(mask
, mask
, "config2:0-63");
1606 static void nhmex_uncore_msr_init_box(struct intel_uncore_box
*box
)
1608 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL
, NHMEX_U_PMON_GLOBAL_EN_ALL
);
1611 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box
*box
)
1613 unsigned msr
= uncore_msr_box_ctl(box
);
1617 rdmsrl(msr
, config
);
1618 config
&= ~((1ULL << uncore_num_counters(box
)) - 1);
1619 /* WBox has a fixed counter */
1620 if (uncore_msr_fixed_ctl(box
))
1621 config
&= ~NHMEX_W_PMON_GLOBAL_FIXED_EN
;
1622 wrmsrl(msr
, config
);
1626 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box
*box
)
1628 unsigned msr
= uncore_msr_box_ctl(box
);
1632 rdmsrl(msr
, config
);
1633 config
|= (1ULL << uncore_num_counters(box
)) - 1;
1634 /* WBox has a fixed counter */
1635 if (uncore_msr_fixed_ctl(box
))
1636 config
|= NHMEX_W_PMON_GLOBAL_FIXED_EN
;
1637 wrmsrl(msr
, config
);
1641 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1643 wrmsrl(event
->hw
.config_base
, 0);
1646 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1648 struct hw_perf_event
*hwc
= &event
->hw
;
1650 if (hwc
->idx
>= UNCORE_PMC_IDX_FIXED
)
1651 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
);
1652 else if (box
->pmu
->type
->event_mask
& NHMEX_PMON_CTL_EN_BIT0
)
1653 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT22
);
1655 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT0
);
1658 #define NHMEX_UNCORE_OPS_COMMON_INIT() \
1659 .init_box = nhmex_uncore_msr_init_box, \
1660 .disable_box = nhmex_uncore_msr_disable_box, \
1661 .enable_box = nhmex_uncore_msr_enable_box, \
1662 .disable_event = nhmex_uncore_msr_disable_event, \
1663 .read_counter = uncore_msr_read_counter
1665 static struct intel_uncore_ops nhmex_uncore_ops
= {
1666 NHMEX_UNCORE_OPS_COMMON_INIT(),
1667 .enable_event
= nhmex_uncore_msr_enable_event
,
1670 static struct attribute
*nhmex_uncore_ubox_formats_attr
[] = {
1671 &format_attr_event
.attr
,
1672 &format_attr_edge
.attr
,
1676 static struct attribute_group nhmex_uncore_ubox_format_group
= {
1678 .attrs
= nhmex_uncore_ubox_formats_attr
,
1681 static struct intel_uncore_type nhmex_uncore_ubox
= {
1685 .perf_ctr_bits
= 48,
1686 .event_ctl
= NHMEX_U_MSR_PMON_EV_SEL
,
1687 .perf_ctr
= NHMEX_U_MSR_PMON_CTR
,
1688 .event_mask
= NHMEX_U_PMON_RAW_EVENT_MASK
,
1689 .box_ctl
= NHMEX_U_MSR_PMON_GLOBAL_CTL
,
1690 .ops
= &nhmex_uncore_ops
,
1691 .format_group
= &nhmex_uncore_ubox_format_group
1694 static struct attribute
*nhmex_uncore_cbox_formats_attr
[] = {
1695 &format_attr_event
.attr
,
1696 &format_attr_umask
.attr
,
1697 &format_attr_edge
.attr
,
1698 &format_attr_inv
.attr
,
1699 &format_attr_thresh8
.attr
,
1703 static struct attribute_group nhmex_uncore_cbox_format_group
= {
1705 .attrs
= nhmex_uncore_cbox_formats_attr
,
1708 /* msr offset for each instance of cbox */
1709 static unsigned nhmex_cbox_msr_offsets
[] = {
1710 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
1713 static struct intel_uncore_type nhmex_uncore_cbox
= {
1717 .perf_ctr_bits
= 48,
1718 .event_ctl
= NHMEX_C0_MSR_PMON_EV_SEL0
,
1719 .perf_ctr
= NHMEX_C0_MSR_PMON_CTR0
,
1720 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
1721 .box_ctl
= NHMEX_C0_MSR_PMON_GLOBAL_CTL
,
1722 .msr_offsets
= nhmex_cbox_msr_offsets
,
1724 .ops
= &nhmex_uncore_ops
,
1725 .format_group
= &nhmex_uncore_cbox_format_group
1728 static struct uncore_event_desc nhmex_uncore_wbox_events
[] = {
1729 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0"),
1730 { /* end: all zeroes */ },
1733 static struct intel_uncore_type nhmex_uncore_wbox
= {
1737 .perf_ctr_bits
= 48,
1738 .event_ctl
= NHMEX_W_MSR_PMON_CNT0
,
1739 .perf_ctr
= NHMEX_W_MSR_PMON_EVT_SEL0
,
1740 .fixed_ctr
= NHMEX_W_MSR_PMON_FIXED_CTR
,
1741 .fixed_ctl
= NHMEX_W_MSR_PMON_FIXED_CTL
,
1742 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
1743 .box_ctl
= NHMEX_W_MSR_GLOBAL_CTL
,
1745 .event_descs
= nhmex_uncore_wbox_events
,
1746 .ops
= &nhmex_uncore_ops
,
1747 .format_group
= &nhmex_uncore_cbox_format_group
1750 static int nhmex_bbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1752 struct hw_perf_event
*hwc
= &event
->hw
;
1753 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1754 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1757 ctr
= (hwc
->config
& NHMEX_B_PMON_CTR_MASK
) >>
1758 NHMEX_B_PMON_CTR_SHIFT
;
1759 ev_sel
= (hwc
->config
& NHMEX_B_PMON_CTL_EV_SEL_MASK
) >>
1760 NHMEX_B_PMON_CTL_EV_SEL_SHIFT
;
1762 /* events that do not use the match/mask registers */
1763 if ((ctr
== 0 && ev_sel
> 0x3) || (ctr
== 1 && ev_sel
> 0x6) ||
1764 (ctr
== 2 && ev_sel
!= 0x4) || ctr
== 3)
1767 if (box
->pmu
->pmu_idx
== 0)
1768 reg1
->reg
= NHMEX_B0_MSR_MATCH
;
1770 reg1
->reg
= NHMEX_B1_MSR_MATCH
;
1772 reg1
->config
= event
->attr
.config1
;
1773 reg2
->config
= event
->attr
.config2
;
1777 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1779 struct hw_perf_event
*hwc
= &event
->hw
;
1780 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1781 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1783 if (reg1
->idx
!= EXTRA_REG_NONE
) {
1784 wrmsrl(reg1
->reg
, reg1
->config
);
1785 wrmsrl(reg1
->reg
+ 1, reg2
->config
);
1787 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
|
1788 (hwc
->config
& NHMEX_B_PMON_CTL_EV_SEL_MASK
));
1792 * The Bbox has 4 counters, but each counter monitors different events.
1793 * Use bits 6-7 in the event config to select counter.
1795 static struct event_constraint nhmex_uncore_bbox_constraints
[] = {
1796 EVENT_CONSTRAINT(0 , 1, 0xc0),
1797 EVENT_CONSTRAINT(0x40, 2, 0xc0),
1798 EVENT_CONSTRAINT(0x80, 4, 0xc0),
1799 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1800 EVENT_CONSTRAINT_END
,
1803 static struct attribute
*nhmex_uncore_bbox_formats_attr
[] = {
1804 &format_attr_event5
.attr
,
1805 &format_attr_counter
.attr
,
1806 &format_attr_match
.attr
,
1807 &format_attr_mask
.attr
,
1811 static struct attribute_group nhmex_uncore_bbox_format_group
= {
1813 .attrs
= nhmex_uncore_bbox_formats_attr
,
1816 static struct intel_uncore_ops nhmex_uncore_bbox_ops
= {
1817 NHMEX_UNCORE_OPS_COMMON_INIT(),
1818 .enable_event
= nhmex_bbox_msr_enable_event
,
1819 .hw_config
= nhmex_bbox_hw_config
,
1820 .get_constraint
= uncore_get_constraint
,
1821 .put_constraint
= uncore_put_constraint
,
1824 static struct intel_uncore_type nhmex_uncore_bbox
= {
1828 .perf_ctr_bits
= 48,
1829 .event_ctl
= NHMEX_B0_MSR_PMON_CTL0
,
1830 .perf_ctr
= NHMEX_B0_MSR_PMON_CTR0
,
1831 .event_mask
= NHMEX_B_PMON_RAW_EVENT_MASK
,
1832 .box_ctl
= NHMEX_B0_MSR_PMON_GLOBAL_CTL
,
1833 .msr_offset
= NHMEX_B_MSR_OFFSET
,
1835 .num_shared_regs
= 1,
1836 .constraints
= nhmex_uncore_bbox_constraints
,
1837 .ops
= &nhmex_uncore_bbox_ops
,
1838 .format_group
= &nhmex_uncore_bbox_format_group
1841 static int nhmex_sbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1843 struct hw_perf_event
*hwc
= &event
->hw
;
1844 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1845 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1847 /* only TO_R_PROG_EV event uses the match/mask register */
1848 if ((hwc
->config
& NHMEX_PMON_CTL_EV_SEL_MASK
) !=
1849 NHMEX_S_EVENT_TO_R_PROG_EV
)
1852 if (box
->pmu
->pmu_idx
== 0)
1853 reg1
->reg
= NHMEX_S0_MSR_MM_CFG
;
1855 reg1
->reg
= NHMEX_S1_MSR_MM_CFG
;
1857 reg1
->config
= event
->attr
.config1
;
1858 reg2
->config
= event
->attr
.config2
;
1862 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1864 struct hw_perf_event
*hwc
= &event
->hw
;
1865 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1866 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1868 if (reg1
->idx
!= EXTRA_REG_NONE
) {
1869 wrmsrl(reg1
->reg
, 0);
1870 wrmsrl(reg1
->reg
+ 1, reg1
->config
);
1871 wrmsrl(reg1
->reg
+ 2, reg2
->config
);
1872 wrmsrl(reg1
->reg
, NHMEX_S_PMON_MM_CFG_EN
);
1874 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT22
);
1877 static struct attribute
*nhmex_uncore_sbox_formats_attr
[] = {
1878 &format_attr_event
.attr
,
1879 &format_attr_umask
.attr
,
1880 &format_attr_edge
.attr
,
1881 &format_attr_inv
.attr
,
1882 &format_attr_thresh8
.attr
,
1883 &format_attr_match
.attr
,
1884 &format_attr_mask
.attr
,
1888 static struct attribute_group nhmex_uncore_sbox_format_group
= {
1890 .attrs
= nhmex_uncore_sbox_formats_attr
,
1893 static struct intel_uncore_ops nhmex_uncore_sbox_ops
= {
1894 NHMEX_UNCORE_OPS_COMMON_INIT(),
1895 .enable_event
= nhmex_sbox_msr_enable_event
,
1896 .hw_config
= nhmex_sbox_hw_config
,
1897 .get_constraint
= uncore_get_constraint
,
1898 .put_constraint
= uncore_put_constraint
,
1901 static struct intel_uncore_type nhmex_uncore_sbox
= {
1905 .perf_ctr_bits
= 48,
1906 .event_ctl
= NHMEX_S0_MSR_PMON_CTL0
,
1907 .perf_ctr
= NHMEX_S0_MSR_PMON_CTR0
,
1908 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
1909 .box_ctl
= NHMEX_S0_MSR_PMON_GLOBAL_CTL
,
1910 .msr_offset
= NHMEX_S_MSR_OFFSET
,
1912 .num_shared_regs
= 1,
1913 .ops
= &nhmex_uncore_sbox_ops
,
1914 .format_group
= &nhmex_uncore_sbox_format_group
1918 EXTRA_REG_NHMEX_M_FILTER
,
1919 EXTRA_REG_NHMEX_M_DSP
,
1920 EXTRA_REG_NHMEX_M_ISS
,
1921 EXTRA_REG_NHMEX_M_MAP
,
1922 EXTRA_REG_NHMEX_M_MSC_THR
,
1923 EXTRA_REG_NHMEX_M_PGT
,
1924 EXTRA_REG_NHMEX_M_PLD
,
1925 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
,
1928 static struct extra_reg nhmex_uncore_mbox_extra_regs
[] = {
1929 MBOX_INC_SEL_EXTAR_REG(0x0, DSP
),
1930 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR
),
1931 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR
),
1932 MBOX_INC_SEL_EXTAR_REG(0x9, ISS
),
1933 /* event 0xa uses two extra registers */
1934 MBOX_INC_SEL_EXTAR_REG(0xa, ISS
),
1935 MBOX_INC_SEL_EXTAR_REG(0xa, PLD
),
1936 MBOX_INC_SEL_EXTAR_REG(0xb, PLD
),
1937 /* events 0xd ~ 0x10 use the same extra register */
1938 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC
),
1939 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC
),
1940 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC
),
1941 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC
),
1942 MBOX_INC_SEL_EXTAR_REG(0x16, PGT
),
1943 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP
),
1944 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS
),
1945 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT
),
1946 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP
),
1950 /* Nehalem-EX or Westmere-EX ? */
1951 static bool uncore_nhmex
;
1953 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box
*box
, int idx
, u64 config
)
1955 struct intel_uncore_extra_reg
*er
;
1956 unsigned long flags
;
1960 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
1961 er
= &box
->shared_regs
[idx
];
1962 raw_spin_lock_irqsave(&er
->lock
, flags
);
1963 if (!atomic_read(&er
->ref
) || er
->config
== config
) {
1964 atomic_inc(&er
->ref
);
1965 er
->config
= config
;
1968 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
1973 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
1974 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
1975 * fields which are shared.
1977 idx
-= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
1978 if (WARN_ON_ONCE(idx
>= 4))
1981 /* mask of the shared fields */
1983 mask
= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
;
1985 mask
= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
;
1986 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
1988 raw_spin_lock_irqsave(&er
->lock
, flags
);
1989 /* add mask of the non-shared field if it's in use */
1990 if (__BITS_VALUE(atomic_read(&er
->ref
), idx
, 8)) {
1992 mask
|= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
1994 mask
|= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
1997 if (!atomic_read(&er
->ref
) || !((er
->config
^ config
) & mask
)) {
1998 atomic_add(1 << (idx
* 8), &er
->ref
);
2000 mask
= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
|
2001 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2003 mask
= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
|
2004 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2005 er
->config
&= ~mask
;
2006 er
->config
|= (config
& mask
);
2009 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
2014 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box
*box
, int idx
)
2016 struct intel_uncore_extra_reg
*er
;
2018 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
2019 er
= &box
->shared_regs
[idx
];
2020 atomic_dec(&er
->ref
);
2024 idx
-= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
2025 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
2026 atomic_sub(1 << (idx
* 8), &er
->ref
);
2029 static u64
nhmex_mbox_alter_er(struct perf_event
*event
, int new_idx
, bool modify
)
2031 struct hw_perf_event
*hwc
= &event
->hw
;
2032 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2033 u64 idx
, orig_idx
= __BITS_VALUE(reg1
->idx
, 0, 8);
2034 u64 config
= reg1
->config
;
2036 /* get the non-shared control bits and shift them */
2037 idx
= orig_idx
- EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
2039 config
&= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2041 config
&= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2042 if (new_idx
> orig_idx
) {
2043 idx
= new_idx
- orig_idx
;
2046 idx
= orig_idx
- new_idx
;
2050 /* add the shared control bits back */
2052 config
|= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
2054 config
|= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
2055 config
|= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
2057 /* adjust the main event selector */
2058 if (new_idx
> orig_idx
)
2059 hwc
->config
+= idx
<< NHMEX_M_PMON_CTL_INC_SEL_SHIFT
;
2061 hwc
->config
-= idx
<< NHMEX_M_PMON_CTL_INC_SEL_SHIFT
;
2062 reg1
->config
= config
;
2063 reg1
->idx
= ~0xff | new_idx
;
2068 static struct event_constraint
*
2069 nhmex_mbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2071 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2072 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
2073 int i
, idx
[2], alloc
= 0;
2074 u64 config1
= reg1
->config
;
2076 idx
[0] = __BITS_VALUE(reg1
->idx
, 0, 8);
2077 idx
[1] = __BITS_VALUE(reg1
->idx
, 1, 8);
2079 for (i
= 0; i
< 2; i
++) {
2080 if (!uncore_box_is_fake(box
) && (reg1
->alloc
& (0x1 << i
)))
2086 if (!nhmex_mbox_get_shared_reg(box
, idx
[i
],
2087 __BITS_VALUE(config1
, i
, 32)))
2089 alloc
|= (0x1 << i
);
2092 /* for the match/mask registers */
2093 if (reg2
->idx
!= EXTRA_REG_NONE
&&
2094 (uncore_box_is_fake(box
) || !reg2
->alloc
) &&
2095 !nhmex_mbox_get_shared_reg(box
, reg2
->idx
, reg2
->config
))
2099 * If it's a fake box -- as per validate_{group,event}() we
2100 * shouldn't touch event state and we can avoid doing so
2101 * since both will only call get_event_constraints() once
2102 * on each event, this avoids the need for reg->alloc.
2104 if (!uncore_box_is_fake(box
)) {
2105 if (idx
[0] != 0xff && idx
[0] != __BITS_VALUE(reg1
->idx
, 0, 8))
2106 nhmex_mbox_alter_er(event
, idx
[0], true);
2107 reg1
->alloc
|= alloc
;
2108 if (reg2
->idx
!= EXTRA_REG_NONE
)
2113 if (idx
[0] != 0xff && !(alloc
& 0x1) &&
2114 idx
[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
2116 * events 0xd ~ 0x10 are functional identical, but are
2117 * controlled by different fields in the ZDP_CTL_FVC
2118 * register. If we failed to take one field, try the
2121 BUG_ON(__BITS_VALUE(reg1
->idx
, 1, 8) != 0xff);
2122 idx
[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
2123 idx
[0] = (idx
[0] + 1) % 4;
2124 idx
[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
2125 if (idx
[0] != __BITS_VALUE(reg1
->idx
, 0, 8)) {
2126 config1
= nhmex_mbox_alter_er(event
, idx
[0], false);
2132 nhmex_mbox_put_shared_reg(box
, idx
[0]);
2134 nhmex_mbox_put_shared_reg(box
, idx
[1]);
2135 return &constraint_empty
;
2138 static void nhmex_mbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2140 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2141 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
2143 if (uncore_box_is_fake(box
))
2146 if (reg1
->alloc
& 0x1)
2147 nhmex_mbox_put_shared_reg(box
, __BITS_VALUE(reg1
->idx
, 0, 8));
2148 if (reg1
->alloc
& 0x2)
2149 nhmex_mbox_put_shared_reg(box
, __BITS_VALUE(reg1
->idx
, 1, 8));
2153 nhmex_mbox_put_shared_reg(box
, reg2
->idx
);
2158 static int nhmex_mbox_extra_reg_idx(struct extra_reg
*er
)
2160 if (er
->idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
)
2162 return er
->idx
+ (er
->event
>> NHMEX_M_PMON_CTL_INC_SEL_SHIFT
) - 0xd;
2165 static int nhmex_mbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2167 struct intel_uncore_type
*type
= box
->pmu
->type
;
2168 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2169 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
2170 struct extra_reg
*er
;
2174 * The mbox events may require 2 extra MSRs at the most. But only
2175 * the lower 32 bits in these MSRs are significant, so we can use
2176 * config1 to pass two MSRs' config.
2178 for (er
= nhmex_uncore_mbox_extra_regs
; er
->msr
; er
++) {
2179 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
2181 if (event
->attr
.config1
& ~er
->valid_mask
)
2184 msr
= er
->msr
+ type
->msr_offset
* box
->pmu
->pmu_idx
;
2185 if (WARN_ON_ONCE(msr
>= 0xffff || er
->idx
>= 0xff))
2188 /* always use the 32~63 bits to pass the PLD config */
2189 if (er
->idx
== EXTRA_REG_NHMEX_M_PLD
)
2191 else if (WARN_ON_ONCE(reg_idx
> 0))
2194 reg1
->idx
&= ~(0xff << (reg_idx
* 8));
2195 reg1
->reg
&= ~(0xffff << (reg_idx
* 16));
2196 reg1
->idx
|= nhmex_mbox_extra_reg_idx(er
) << (reg_idx
* 8);
2197 reg1
->reg
|= msr
<< (reg_idx
* 16);
2198 reg1
->config
= event
->attr
.config1
;
2202 * The mbox only provides ability to perform address matching
2203 * for the PLD events.
2206 reg2
->idx
= EXTRA_REG_NHMEX_M_FILTER
;
2207 if (event
->attr
.config2
& NHMEX_M_PMON_MM_CFG_EN
)
2208 reg2
->config
= event
->attr
.config2
;
2210 reg2
->config
= ~0ULL;
2211 if (box
->pmu
->pmu_idx
== 0)
2212 reg2
->reg
= NHMEX_M0_MSR_PMU_MM_CFG
;
2214 reg2
->reg
= NHMEX_M1_MSR_PMU_MM_CFG
;
2219 static u64
nhmex_mbox_shared_reg_config(struct intel_uncore_box
*box
, int idx
)
2221 struct intel_uncore_extra_reg
*er
;
2222 unsigned long flags
;
2225 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
)
2226 return box
->shared_regs
[idx
].config
;
2228 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
2229 raw_spin_lock_irqsave(&er
->lock
, flags
);
2230 config
= er
->config
;
2231 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
2235 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
2237 struct hw_perf_event
*hwc
= &event
->hw
;
2238 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2239 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
2242 idx
= __BITS_VALUE(reg1
->idx
, 0, 8);
2244 wrmsrl(__BITS_VALUE(reg1
->reg
, 0, 16),
2245 nhmex_mbox_shared_reg_config(box
, idx
));
2246 idx
= __BITS_VALUE(reg1
->idx
, 1, 8);
2248 wrmsrl(__BITS_VALUE(reg1
->reg
, 1, 16),
2249 nhmex_mbox_shared_reg_config(box
, idx
));
2251 if (reg2
->idx
!= EXTRA_REG_NONE
) {
2252 wrmsrl(reg2
->reg
, 0);
2253 if (reg2
->config
!= ~0ULL) {
2254 wrmsrl(reg2
->reg
+ 1,
2255 reg2
->config
& NHMEX_M_PMON_ADDR_MATCH_MASK
);
2256 wrmsrl(reg2
->reg
+ 2, NHMEX_M_PMON_ADDR_MASK_MASK
&
2257 (reg2
->config
>> NHMEX_M_PMON_ADDR_MASK_SHIFT
));
2258 wrmsrl(reg2
->reg
, NHMEX_M_PMON_MM_CFG_EN
);
2262 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT0
);
2265 DEFINE_UNCORE_FORMAT_ATTR(count_mode
, count_mode
, "config:2-3");
2266 DEFINE_UNCORE_FORMAT_ATTR(storage_mode
, storage_mode
, "config:4-5");
2267 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode
, wrap_mode
, "config:6");
2268 DEFINE_UNCORE_FORMAT_ATTR(flag_mode
, flag_mode
, "config:7");
2269 DEFINE_UNCORE_FORMAT_ATTR(inc_sel
, inc_sel
, "config:9-13");
2270 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel
, set_flag_sel
, "config:19-21");
2271 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en
, filter_cfg_en
, "config2:63");
2272 DEFINE_UNCORE_FORMAT_ATTR(filter_match
, filter_match
, "config2:0-33");
2273 DEFINE_UNCORE_FORMAT_ATTR(filter_mask
, filter_mask
, "config2:34-61");
2274 DEFINE_UNCORE_FORMAT_ATTR(dsp
, dsp
, "config1:0-31");
2275 DEFINE_UNCORE_FORMAT_ATTR(thr
, thr
, "config1:0-31");
2276 DEFINE_UNCORE_FORMAT_ATTR(fvc
, fvc
, "config1:0-31");
2277 DEFINE_UNCORE_FORMAT_ATTR(pgt
, pgt
, "config1:0-31");
2278 DEFINE_UNCORE_FORMAT_ATTR(map
, map
, "config1:0-31");
2279 DEFINE_UNCORE_FORMAT_ATTR(iss
, iss
, "config1:0-31");
2280 DEFINE_UNCORE_FORMAT_ATTR(pld
, pld
, "config1:32-63");
2282 static struct attribute
*nhmex_uncore_mbox_formats_attr
[] = {
2283 &format_attr_count_mode
.attr
,
2284 &format_attr_storage_mode
.attr
,
2285 &format_attr_wrap_mode
.attr
,
2286 &format_attr_flag_mode
.attr
,
2287 &format_attr_inc_sel
.attr
,
2288 &format_attr_set_flag_sel
.attr
,
2289 &format_attr_filter_cfg_en
.attr
,
2290 &format_attr_filter_match
.attr
,
2291 &format_attr_filter_mask
.attr
,
2292 &format_attr_dsp
.attr
,
2293 &format_attr_thr
.attr
,
2294 &format_attr_fvc
.attr
,
2295 &format_attr_pgt
.attr
,
2296 &format_attr_map
.attr
,
2297 &format_attr_iss
.attr
,
2298 &format_attr_pld
.attr
,
2302 static struct attribute_group nhmex_uncore_mbox_format_group
= {
2304 .attrs
= nhmex_uncore_mbox_formats_attr
,
2307 static struct uncore_event_desc nhmex_uncore_mbox_events
[] = {
2308 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read
, "inc_sel=0xd,fvc=0x2800"),
2309 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write
, "inc_sel=0xd,fvc=0x2820"),
2310 { /* end: all zeroes */ },
2313 static struct uncore_event_desc wsmex_uncore_mbox_events
[] = {
2314 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read
, "inc_sel=0xd,fvc=0x5000"),
2315 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write
, "inc_sel=0xd,fvc=0x5040"),
2316 { /* end: all zeroes */ },
2319 static struct intel_uncore_ops nhmex_uncore_mbox_ops
= {
2320 NHMEX_UNCORE_OPS_COMMON_INIT(),
2321 .enable_event
= nhmex_mbox_msr_enable_event
,
2322 .hw_config
= nhmex_mbox_hw_config
,
2323 .get_constraint
= nhmex_mbox_get_constraint
,
2324 .put_constraint
= nhmex_mbox_put_constraint
,
2327 static struct intel_uncore_type nhmex_uncore_mbox
= {
2331 .perf_ctr_bits
= 48,
2332 .event_ctl
= NHMEX_M0_MSR_PMU_CTL0
,
2333 .perf_ctr
= NHMEX_M0_MSR_PMU_CNT0
,
2334 .event_mask
= NHMEX_M_PMON_RAW_EVENT_MASK
,
2335 .box_ctl
= NHMEX_M0_MSR_GLOBAL_CTL
,
2336 .msr_offset
= NHMEX_M_MSR_OFFSET
,
2338 .num_shared_regs
= 8,
2339 .event_descs
= nhmex_uncore_mbox_events
,
2340 .ops
= &nhmex_uncore_mbox_ops
,
2341 .format_group
= &nhmex_uncore_mbox_format_group
,
2344 static void nhmex_rbox_alter_er(struct intel_uncore_box
*box
, struct perf_event
*event
)
2346 struct hw_perf_event
*hwc
= &event
->hw
;
2347 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2349 /* adjust the main event selector and extra register index */
2350 if (reg1
->idx
% 2) {
2352 hwc
->config
-= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
2355 hwc
->config
+= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
2358 /* adjust extra register config */
2359 switch (reg1
->idx
% 6) {
2361 /* shift the 8~15 bits to the 0~7 bits */
2365 /* shift the 0~7 bits to the 8~15 bits */
2372 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2373 * An event set consists of 6 events, the 3rd and 4th events in
2374 * an event set use the same extra register. So an event set uses
2375 * 5 extra registers.
2377 static struct event_constraint
*
2378 nhmex_rbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2380 struct hw_perf_event
*hwc
= &event
->hw
;
2381 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2382 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
2383 struct intel_uncore_extra_reg
*er
;
2384 unsigned long flags
;
2389 if (!uncore_box_is_fake(box
) && reg1
->alloc
)
2392 idx
= reg1
->idx
% 6;
2393 config1
= reg1
->config
;
2396 /* the 3rd and 4th events use the same extra register */
2399 er_idx
+= (reg1
->idx
/ 6) * 5;
2401 er
= &box
->shared_regs
[er_idx
];
2402 raw_spin_lock_irqsave(&er
->lock
, flags
);
2404 if (!atomic_read(&er
->ref
) || er
->config
== reg1
->config
) {
2405 atomic_inc(&er
->ref
);
2406 er
->config
= reg1
->config
;
2409 } else if (idx
== 2 || idx
== 3) {
2411 * these two events use different fields in a extra register,
2412 * the 0~7 bits and the 8~15 bits respectively.
2414 u64 mask
= 0xff << ((idx
- 2) * 8);
2415 if (!__BITS_VALUE(atomic_read(&er
->ref
), idx
- 2, 8) ||
2416 !((er
->config
^ config1
) & mask
)) {
2417 atomic_add(1 << ((idx
- 2) * 8), &er
->ref
);
2418 er
->config
&= ~mask
;
2419 er
->config
|= config1
& mask
;
2423 if (!atomic_read(&er
->ref
) ||
2424 (er
->config
== (hwc
->config
>> 32) &&
2425 er
->config1
== reg1
->config
&&
2426 er
->config2
== reg2
->config
)) {
2427 atomic_inc(&er
->ref
);
2428 er
->config
= (hwc
->config
>> 32);
2429 er
->config1
= reg1
->config
;
2430 er
->config2
= reg2
->config
;
2434 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
2438 * The Rbox events are always in pairs. The paired
2439 * events are functional identical, but use different
2440 * extra registers. If we failed to take an extra
2441 * register, try the alternative.
2447 if (idx
!= reg1
->idx
% 6) {
2455 if (!uncore_box_is_fake(box
)) {
2456 if (idx
!= reg1
->idx
% 6)
2457 nhmex_rbox_alter_er(box
, event
);
2462 return &constraint_empty
;
2465 static void nhmex_rbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2467 struct intel_uncore_extra_reg
*er
;
2468 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2471 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
2474 idx
= reg1
->idx
% 6;
2478 er_idx
+= (reg1
->idx
/ 6) * 5;
2480 er
= &box
->shared_regs
[er_idx
];
2481 if (idx
== 2 || idx
== 3)
2482 atomic_sub(1 << ((idx
- 2) * 8), &er
->ref
);
2484 atomic_dec(&er
->ref
);
2489 static int nhmex_rbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2491 struct hw_perf_event
*hwc
= &event
->hw
;
2492 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2493 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
2496 idx
= (event
->hw
.config
& NHMEX_R_PMON_CTL_EV_SEL_MASK
) >>
2497 NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
2502 reg1
->config
= event
->attr
.config1
;
2507 hwc
->config
|= event
->attr
.config
& (~0ULL << 32);
2508 reg2
->config
= event
->attr
.config2
;
2514 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
2516 struct hw_perf_event
*hwc
= &event
->hw
;
2517 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2518 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
2522 port
= idx
/ 6 + box
->pmu
->pmu_idx
* 4;
2526 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port
), reg1
->config
);
2529 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port
), reg1
->config
);
2533 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port
),
2534 uncore_shared_reg_config(box
, 2 + (idx
/ 6) * 5));
2537 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port
),
2539 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port
), reg1
->config
);
2540 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port
), reg2
->config
);
2543 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port
),
2545 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port
), reg1
->config
);
2546 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port
), reg2
->config
);
2550 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
|
2551 (hwc
->config
& NHMEX_R_PMON_CTL_EV_SEL_MASK
));
2554 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg
, xbr_mm_cfg
, "config:32-63");
2555 DEFINE_UNCORE_FORMAT_ATTR(xbr_match
, xbr_match
, "config1:0-63");
2556 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask
, xbr_mask
, "config2:0-63");
2557 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg
, qlx_cfg
, "config1:0-15");
2558 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg
, iperf_cfg
, "config1:0-31");
2560 static struct attribute
*nhmex_uncore_rbox_formats_attr
[] = {
2561 &format_attr_event5
.attr
,
2562 &format_attr_xbr_mm_cfg
.attr
,
2563 &format_attr_xbr_match
.attr
,
2564 &format_attr_xbr_mask
.attr
,
2565 &format_attr_qlx_cfg
.attr
,
2566 &format_attr_iperf_cfg
.attr
,
2570 static struct attribute_group nhmex_uncore_rbox_format_group
= {
2572 .attrs
= nhmex_uncore_rbox_formats_attr
,
2575 static struct uncore_event_desc nhmex_uncore_rbox_events
[] = {
2576 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send
, "event=0x0,iperf_cfg=0x80000000"),
2577 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send
, "event=0x6,iperf_cfg=0x80000000"),
2578 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt
, "event=0x0,iperf_cfg=0x40000000"),
2579 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt
, "event=0x6,iperf_cfg=0x40000000"),
2580 INTEL_UNCORE_EVENT_DESC(qpi0_date_response
, "event=0x0,iperf_cfg=0xc4"),
2581 INTEL_UNCORE_EVENT_DESC(qpi1_date_response
, "event=0x6,iperf_cfg=0xc4"),
2582 { /* end: all zeroes */ },
2585 static struct intel_uncore_ops nhmex_uncore_rbox_ops
= {
2586 NHMEX_UNCORE_OPS_COMMON_INIT(),
2587 .enable_event
= nhmex_rbox_msr_enable_event
,
2588 .hw_config
= nhmex_rbox_hw_config
,
2589 .get_constraint
= nhmex_rbox_get_constraint
,
2590 .put_constraint
= nhmex_rbox_put_constraint
,
2593 static struct intel_uncore_type nhmex_uncore_rbox
= {
2597 .perf_ctr_bits
= 48,
2598 .event_ctl
= NHMEX_R_MSR_PMON_CTL0
,
2599 .perf_ctr
= NHMEX_R_MSR_PMON_CNT0
,
2600 .event_mask
= NHMEX_R_PMON_RAW_EVENT_MASK
,
2601 .box_ctl
= NHMEX_R_MSR_GLOBAL_CTL
,
2602 .msr_offset
= NHMEX_R_MSR_OFFSET
,
2604 .num_shared_regs
= 20,
2605 .event_descs
= nhmex_uncore_rbox_events
,
2606 .ops
= &nhmex_uncore_rbox_ops
,
2607 .format_group
= &nhmex_uncore_rbox_format_group
2610 static struct intel_uncore_type
*nhmex_msr_uncores
[] = {
2620 /* end of Nehalem-EX uncore support */
2622 static void uncore_assign_hw_event(struct intel_uncore_box
*box
, struct perf_event
*event
, int idx
)
2624 struct hw_perf_event
*hwc
= &event
->hw
;
2627 hwc
->last_tag
= ++box
->tags
[idx
];
2629 if (hwc
->idx
== UNCORE_PMC_IDX_FIXED
) {
2630 hwc
->event_base
= uncore_fixed_ctr(box
);
2631 hwc
->config_base
= uncore_fixed_ctl(box
);
2635 hwc
->config_base
= uncore_event_ctl(box
, hwc
->idx
);
2636 hwc
->event_base
= uncore_perf_ctr(box
, hwc
->idx
);
2639 static void uncore_perf_event_update(struct intel_uncore_box
*box
, struct perf_event
*event
)
2641 u64 prev_count
, new_count
, delta
;
2644 if (event
->hw
.idx
>= UNCORE_PMC_IDX_FIXED
)
2645 shift
= 64 - uncore_fixed_ctr_bits(box
);
2647 shift
= 64 - uncore_perf_ctr_bits(box
);
2649 /* the hrtimer might modify the previous event value */
2651 prev_count
= local64_read(&event
->hw
.prev_count
);
2652 new_count
= uncore_read_counter(box
, event
);
2653 if (local64_xchg(&event
->hw
.prev_count
, new_count
) != prev_count
)
2656 delta
= (new_count
<< shift
) - (prev_count
<< shift
);
2659 local64_add(delta
, &event
->count
);
2663 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
2664 * for SandyBridge. So we use hrtimer to periodically poll the counter
2665 * to avoid overflow.
2667 static enum hrtimer_restart
uncore_pmu_hrtimer(struct hrtimer
*hrtimer
)
2669 struct intel_uncore_box
*box
;
2670 unsigned long flags
;
2673 box
= container_of(hrtimer
, struct intel_uncore_box
, hrtimer
);
2674 if (!box
->n_active
|| box
->cpu
!= smp_processor_id())
2675 return HRTIMER_NORESTART
;
2677 * disable local interrupt to prevent uncore_pmu_event_start/stop
2678 * to interrupt the update process
2680 local_irq_save(flags
);
2682 for_each_set_bit(bit
, box
->active_mask
, UNCORE_PMC_IDX_MAX
)
2683 uncore_perf_event_update(box
, box
->events
[bit
]);
2685 local_irq_restore(flags
);
2687 hrtimer_forward_now(hrtimer
, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL
));
2688 return HRTIMER_RESTART
;
2691 static void uncore_pmu_start_hrtimer(struct intel_uncore_box
*box
)
2693 __hrtimer_start_range_ns(&box
->hrtimer
,
2694 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL
), 0,
2695 HRTIMER_MODE_REL_PINNED
, 0);
2698 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box
*box
)
2700 hrtimer_cancel(&box
->hrtimer
);
2703 static void uncore_pmu_init_hrtimer(struct intel_uncore_box
*box
)
2705 hrtimer_init(&box
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2706 box
->hrtimer
.function
= uncore_pmu_hrtimer
;
2709 struct intel_uncore_box
*uncore_alloc_box(struct intel_uncore_type
*type
, int cpu
)
2711 struct intel_uncore_box
*box
;
2714 size
= sizeof(*box
) + type
->num_shared_regs
* sizeof(struct intel_uncore_extra_reg
);
2716 box
= kzalloc_node(size
, GFP_KERNEL
, cpu_to_node(cpu
));
2720 for (i
= 0; i
< type
->num_shared_regs
; i
++)
2721 raw_spin_lock_init(&box
->shared_regs
[i
].lock
);
2723 uncore_pmu_init_hrtimer(box
);
2724 atomic_set(&box
->refcnt
, 1);
2731 static struct intel_uncore_box
*
2732 uncore_pmu_to_box(struct intel_uncore_pmu
*pmu
, int cpu
)
2734 struct intel_uncore_box
*box
;
2736 box
= *per_cpu_ptr(pmu
->box
, cpu
);
2740 raw_spin_lock(&uncore_box_lock
);
2741 list_for_each_entry(box
, &pmu
->box_list
, list
) {
2742 if (box
->phys_id
== topology_physical_package_id(cpu
)) {
2743 atomic_inc(&box
->refcnt
);
2744 *per_cpu_ptr(pmu
->box
, cpu
) = box
;
2748 raw_spin_unlock(&uncore_box_lock
);
2750 return *per_cpu_ptr(pmu
->box
, cpu
);
2753 static struct intel_uncore_pmu
*uncore_event_to_pmu(struct perf_event
*event
)
2755 return container_of(event
->pmu
, struct intel_uncore_pmu
, pmu
);
2758 static struct intel_uncore_box
*uncore_event_to_box(struct perf_event
*event
)
2761 * perf core schedules event on the basis of cpu, uncore events are
2762 * collected by one of the cpus inside a physical package.
2764 return uncore_pmu_to_box(uncore_event_to_pmu(event
), smp_processor_id());
2768 uncore_collect_events(struct intel_uncore_box
*box
, struct perf_event
*leader
, bool dogrp
)
2770 struct perf_event
*event
;
2773 max_count
= box
->pmu
->type
->num_counters
;
2774 if (box
->pmu
->type
->fixed_ctl
)
2777 if (box
->n_events
>= max_count
)
2781 box
->event_list
[n
] = leader
;
2786 list_for_each_entry(event
, &leader
->sibling_list
, group_entry
) {
2787 if (event
->state
<= PERF_EVENT_STATE_OFF
)
2793 box
->event_list
[n
] = event
;
2799 static struct event_constraint
*
2800 uncore_get_event_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2802 struct intel_uncore_type
*type
= box
->pmu
->type
;
2803 struct event_constraint
*c
;
2805 if (type
->ops
->get_constraint
) {
2806 c
= type
->ops
->get_constraint(box
, event
);
2811 if (event
->hw
.config
== ~0ULL)
2812 return &constraint_fixed
;
2814 if (type
->constraints
) {
2815 for_each_event_constraint(c
, type
->constraints
) {
2816 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
2821 return &type
->unconstrainted
;
2824 static void uncore_put_event_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2826 if (box
->pmu
->type
->ops
->put_constraint
)
2827 box
->pmu
->type
->ops
->put_constraint(box
, event
);
2830 static int uncore_assign_events(struct intel_uncore_box
*box
, int assign
[], int n
)
2832 unsigned long used_mask
[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX
)];
2833 struct event_constraint
*c
;
2834 int i
, wmin
, wmax
, ret
= 0;
2835 struct hw_perf_event
*hwc
;
2837 bitmap_zero(used_mask
, UNCORE_PMC_IDX_MAX
);
2839 for (i
= 0, wmin
= UNCORE_PMC_IDX_MAX
, wmax
= 0; i
< n
; i
++) {
2840 hwc
= &box
->event_list
[i
]->hw
;
2841 c
= uncore_get_event_constraint(box
, box
->event_list
[i
]);
2842 hwc
->constraint
= c
;
2843 wmin
= min(wmin
, c
->weight
);
2844 wmax
= max(wmax
, c
->weight
);
2847 /* fastpath, try to reuse previous register */
2848 for (i
= 0; i
< n
; i
++) {
2849 hwc
= &box
->event_list
[i
]->hw
;
2850 c
= hwc
->constraint
;
2852 /* never assigned */
2856 /* constraint still honored */
2857 if (!test_bit(hwc
->idx
, c
->idxmsk
))
2860 /* not already used */
2861 if (test_bit(hwc
->idx
, used_mask
))
2864 __set_bit(hwc
->idx
, used_mask
);
2866 assign
[i
] = hwc
->idx
;
2870 ret
= perf_assign_events(box
->event_list
, n
,
2871 wmin
, wmax
, assign
);
2873 if (!assign
|| ret
) {
2874 for (i
= 0; i
< n
; i
++)
2875 uncore_put_event_constraint(box
, box
->event_list
[i
]);
2877 return ret
? -EINVAL
: 0;
2880 static void uncore_pmu_event_start(struct perf_event
*event
, int flags
)
2882 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
2883 int idx
= event
->hw
.idx
;
2885 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
2888 if (WARN_ON_ONCE(idx
== -1 || idx
>= UNCORE_PMC_IDX_MAX
))
2891 event
->hw
.state
= 0;
2892 box
->events
[idx
] = event
;
2894 __set_bit(idx
, box
->active_mask
);
2896 local64_set(&event
->hw
.prev_count
, uncore_read_counter(box
, event
));
2897 uncore_enable_event(box
, event
);
2899 if (box
->n_active
== 1) {
2900 uncore_enable_box(box
);
2901 uncore_pmu_start_hrtimer(box
);
2905 static void uncore_pmu_event_stop(struct perf_event
*event
, int flags
)
2907 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
2908 struct hw_perf_event
*hwc
= &event
->hw
;
2910 if (__test_and_clear_bit(hwc
->idx
, box
->active_mask
)) {
2911 uncore_disable_event(box
, event
);
2913 box
->events
[hwc
->idx
] = NULL
;
2914 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
2915 hwc
->state
|= PERF_HES_STOPPED
;
2917 if (box
->n_active
== 0) {
2918 uncore_disable_box(box
);
2919 uncore_pmu_cancel_hrtimer(box
);
2923 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
2925 * Drain the remaining delta count out of a event
2926 * that we are disabling:
2928 uncore_perf_event_update(box
, event
);
2929 hwc
->state
|= PERF_HES_UPTODATE
;
2933 static int uncore_pmu_event_add(struct perf_event
*event
, int flags
)
2935 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
2936 struct hw_perf_event
*hwc
= &event
->hw
;
2937 int assign
[UNCORE_PMC_IDX_MAX
];
2943 ret
= n
= uncore_collect_events(box
, event
, false);
2947 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
2948 if (!(flags
& PERF_EF_START
))
2949 hwc
->state
|= PERF_HES_ARCH
;
2951 ret
= uncore_assign_events(box
, assign
, n
);
2955 /* save events moving to new counters */
2956 for (i
= 0; i
< box
->n_events
; i
++) {
2957 event
= box
->event_list
[i
];
2960 if (hwc
->idx
== assign
[i
] &&
2961 hwc
->last_tag
== box
->tags
[assign
[i
]])
2964 * Ensure we don't accidentally enable a stopped
2965 * counter simply because we rescheduled.
2967 if (hwc
->state
& PERF_HES_STOPPED
)
2968 hwc
->state
|= PERF_HES_ARCH
;
2970 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
2973 /* reprogram moved events into new counters */
2974 for (i
= 0; i
< n
; i
++) {
2975 event
= box
->event_list
[i
];
2978 if (hwc
->idx
!= assign
[i
] ||
2979 hwc
->last_tag
!= box
->tags
[assign
[i
]])
2980 uncore_assign_hw_event(box
, event
, assign
[i
]);
2981 else if (i
< box
->n_events
)
2984 if (hwc
->state
& PERF_HES_ARCH
)
2987 uncore_pmu_event_start(event
, 0);
2994 static void uncore_pmu_event_del(struct perf_event
*event
, int flags
)
2996 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
2999 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
3001 for (i
= 0; i
< box
->n_events
; i
++) {
3002 if (event
== box
->event_list
[i
]) {
3003 uncore_put_event_constraint(box
, event
);
3005 while (++i
< box
->n_events
)
3006 box
->event_list
[i
- 1] = box
->event_list
[i
];
3014 event
->hw
.last_tag
= ~0ULL;
3017 static void uncore_pmu_event_read(struct perf_event
*event
)
3019 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
3020 uncore_perf_event_update(box
, event
);
3024 * validation ensures the group can be loaded onto the
3025 * PMU if it was the only group available.
3027 static int uncore_validate_group(struct intel_uncore_pmu
*pmu
,
3028 struct perf_event
*event
)
3030 struct perf_event
*leader
= event
->group_leader
;
3031 struct intel_uncore_box
*fake_box
;
3032 int ret
= -EINVAL
, n
;
3034 fake_box
= uncore_alloc_box(pmu
->type
, smp_processor_id());
3038 fake_box
->pmu
= pmu
;
3040 * the event is not yet connected with its
3041 * siblings therefore we must first collect
3042 * existing siblings, then add the new event
3043 * before we can simulate the scheduling
3045 n
= uncore_collect_events(fake_box
, leader
, true);
3049 fake_box
->n_events
= n
;
3050 n
= uncore_collect_events(fake_box
, event
, false);
3054 fake_box
->n_events
= n
;
3056 ret
= uncore_assign_events(fake_box
, NULL
, n
);
3062 static int uncore_pmu_event_init(struct perf_event
*event
)
3064 struct intel_uncore_pmu
*pmu
;
3065 struct intel_uncore_box
*box
;
3066 struct hw_perf_event
*hwc
= &event
->hw
;
3069 if (event
->attr
.type
!= event
->pmu
->type
)
3072 pmu
= uncore_event_to_pmu(event
);
3073 /* no device found for this pmu */
3074 if (pmu
->func_id
< 0)
3078 * Uncore PMU does measure at all privilege level all the time.
3079 * So it doesn't make sense to specify any exclude bits.
3081 if (event
->attr
.exclude_user
|| event
->attr
.exclude_kernel
||
3082 event
->attr
.exclude_hv
|| event
->attr
.exclude_idle
)
3085 /* Sampling not supported yet */
3086 if (hwc
->sample_period
)
3090 * Place all uncore events for a particular physical package
3095 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
3096 if (!box
|| box
->cpu
< 0)
3098 event
->cpu
= box
->cpu
;
3101 event
->hw
.last_tag
= ~0ULL;
3102 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
3103 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
3105 if (event
->attr
.config
== UNCORE_FIXED_EVENT
) {
3106 /* no fixed counter */
3107 if (!pmu
->type
->fixed_ctl
)
3110 * if there is only one fixed counter, only the first pmu
3111 * can access the fixed counter
3113 if (pmu
->type
->single_fixed
&& pmu
->pmu_idx
> 0)
3115 hwc
->config
= ~0ULL;
3117 hwc
->config
= event
->attr
.config
& pmu
->type
->event_mask
;
3118 if (pmu
->type
->ops
->hw_config
) {
3119 ret
= pmu
->type
->ops
->hw_config(box
, event
);
3125 if (event
->group_leader
!= event
)
3126 ret
= uncore_validate_group(pmu
, event
);
3133 static ssize_t
uncore_get_attr_cpumask(struct device
*dev
,
3134 struct device_attribute
*attr
, char *buf
)
3136 int n
= cpulist_scnprintf(buf
, PAGE_SIZE
- 2, &uncore_cpu_mask
);
3143 static DEVICE_ATTR(cpumask
, S_IRUGO
, uncore_get_attr_cpumask
, NULL
);
3145 static struct attribute
*uncore_pmu_attrs
[] = {
3146 &dev_attr_cpumask
.attr
,
3150 static struct attribute_group uncore_pmu_attr_group
= {
3151 .attrs
= uncore_pmu_attrs
,
3154 static int __init
uncore_pmu_register(struct intel_uncore_pmu
*pmu
)
3158 pmu
->pmu
= (struct pmu
) {
3159 .attr_groups
= pmu
->type
->attr_groups
,
3160 .task_ctx_nr
= perf_invalid_context
,
3161 .event_init
= uncore_pmu_event_init
,
3162 .add
= uncore_pmu_event_add
,
3163 .del
= uncore_pmu_event_del
,
3164 .start
= uncore_pmu_event_start
,
3165 .stop
= uncore_pmu_event_stop
,
3166 .read
= uncore_pmu_event_read
,
3169 if (pmu
->type
->num_boxes
== 1) {
3170 if (strlen(pmu
->type
->name
) > 0)
3171 sprintf(pmu
->name
, "uncore_%s", pmu
->type
->name
);
3173 sprintf(pmu
->name
, "uncore");
3175 sprintf(pmu
->name
, "uncore_%s_%d", pmu
->type
->name
,
3179 ret
= perf_pmu_register(&pmu
->pmu
, pmu
->name
, -1);
3183 static void __init
uncore_type_exit(struct intel_uncore_type
*type
)
3187 for (i
= 0; i
< type
->num_boxes
; i
++)
3188 free_percpu(type
->pmus
[i
].box
);
3191 kfree(type
->events_group
);
3192 type
->events_group
= NULL
;
3195 static void __init
uncore_types_exit(struct intel_uncore_type
**types
)
3198 for (i
= 0; types
[i
]; i
++)
3199 uncore_type_exit(types
[i
]);
3202 static int __init
uncore_type_init(struct intel_uncore_type
*type
)
3204 struct intel_uncore_pmu
*pmus
;
3205 struct attribute_group
*attr_group
;
3206 struct attribute
**attrs
;
3209 pmus
= kzalloc(sizeof(*pmus
) * type
->num_boxes
, GFP_KERNEL
);
3213 type
->unconstrainted
= (struct event_constraint
)
3214 __EVENT_CONSTRAINT(0, (1ULL << type
->num_counters
) - 1,
3215 0, type
->num_counters
, 0, 0);
3217 for (i
= 0; i
< type
->num_boxes
; i
++) {
3218 pmus
[i
].func_id
= -1;
3219 pmus
[i
].pmu_idx
= i
;
3220 pmus
[i
].type
= type
;
3221 INIT_LIST_HEAD(&pmus
[i
].box_list
);
3222 pmus
[i
].box
= alloc_percpu(struct intel_uncore_box
*);
3227 if (type
->event_descs
) {
3229 while (type
->event_descs
[i
].attr
.attr
.name
)
3232 attr_group
= kzalloc(sizeof(struct attribute
*) * (i
+ 1) +
3233 sizeof(*attr_group
), GFP_KERNEL
);
3237 attrs
= (struct attribute
**)(attr_group
+ 1);
3238 attr_group
->name
= "events";
3239 attr_group
->attrs
= attrs
;
3241 for (j
= 0; j
< i
; j
++)
3242 attrs
[j
] = &type
->event_descs
[j
].attr
.attr
;
3244 type
->events_group
= attr_group
;
3247 type
->pmu_group
= &uncore_pmu_attr_group
;
3251 uncore_type_exit(type
);
3255 static int __init
uncore_types_init(struct intel_uncore_type
**types
)
3259 for (i
= 0; types
[i
]; i
++) {
3260 ret
= uncore_type_init(types
[i
]);
3267 uncore_type_exit(types
[i
]);
3271 static struct pci_driver
*uncore_pci_driver
;
3272 static bool pcidrv_registered
;
3275 * add a pci uncore device
3277 static int uncore_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
3279 struct intel_uncore_pmu
*pmu
;
3280 struct intel_uncore_box
*box
;
3281 struct intel_uncore_type
*type
;
3284 phys_id
= pcibus_to_physid
[pdev
->bus
->number
];
3288 if (UNCORE_PCI_DEV_TYPE(id
->driver_data
) == UNCORE_EXTRA_PCI_DEV
) {
3289 extra_pci_dev
[phys_id
][UNCORE_PCI_DEV_IDX(id
->driver_data
)] = pdev
;
3290 pci_set_drvdata(pdev
, NULL
);
3294 type
= pci_uncores
[UNCORE_PCI_DEV_TYPE(id
->driver_data
)];
3295 box
= uncore_alloc_box(type
, 0);
3300 * for performance monitoring unit with multiple boxes,
3301 * each box has a different function id.
3303 pmu
= &type
->pmus
[UNCORE_PCI_DEV_IDX(id
->driver_data
)];
3304 if (pmu
->func_id
< 0)
3305 pmu
->func_id
= pdev
->devfn
;
3307 WARN_ON_ONCE(pmu
->func_id
!= pdev
->devfn
);
3309 box
->phys_id
= phys_id
;
3310 box
->pci_dev
= pdev
;
3312 uncore_box_init(box
);
3313 pci_set_drvdata(pdev
, box
);
3315 raw_spin_lock(&uncore_box_lock
);
3316 list_add_tail(&box
->list
, &pmu
->box_list
);
3317 raw_spin_unlock(&uncore_box_lock
);
3322 static void uncore_pci_remove(struct pci_dev
*pdev
)
3324 struct intel_uncore_box
*box
= pci_get_drvdata(pdev
);
3325 struct intel_uncore_pmu
*pmu
;
3326 int i
, cpu
, phys_id
= pcibus_to_physid
[pdev
->bus
->number
];
3328 box
= pci_get_drvdata(pdev
);
3330 for (i
= 0; i
< UNCORE_EXTRA_PCI_DEV_MAX
; i
++) {
3331 if (extra_pci_dev
[phys_id
][i
] == pdev
) {
3332 extra_pci_dev
[phys_id
][i
] = NULL
;
3336 WARN_ON_ONCE(i
>= UNCORE_EXTRA_PCI_DEV_MAX
);
3341 if (WARN_ON_ONCE(phys_id
!= box
->phys_id
))
3344 pci_set_drvdata(pdev
, NULL
);
3346 raw_spin_lock(&uncore_box_lock
);
3347 list_del(&box
->list
);
3348 raw_spin_unlock(&uncore_box_lock
);
3350 for_each_possible_cpu(cpu
) {
3351 if (*per_cpu_ptr(pmu
->box
, cpu
) == box
) {
3352 *per_cpu_ptr(pmu
->box
, cpu
) = NULL
;
3353 atomic_dec(&box
->refcnt
);
3357 WARN_ON_ONCE(atomic_read(&box
->refcnt
) != 1);
3361 static int __init
uncore_pci_init(void)
3365 switch (boot_cpu_data
.x86_model
) {
3366 case 45: /* Sandy Bridge-EP */
3367 ret
= snbep_pci2phy_map_init(0x3ce0);
3370 pci_uncores
= snbep_pci_uncores
;
3371 uncore_pci_driver
= &snbep_uncore_pci_driver
;
3373 case 62: /* IvyTown */
3374 ret
= snbep_pci2phy_map_init(0x0e1e);
3377 pci_uncores
= ivt_pci_uncores
;
3378 uncore_pci_driver
= &ivt_uncore_pci_driver
;
3384 ret
= uncore_types_init(pci_uncores
);
3388 uncore_pci_driver
->probe
= uncore_pci_probe
;
3389 uncore_pci_driver
->remove
= uncore_pci_remove
;
3391 ret
= pci_register_driver(uncore_pci_driver
);
3393 pcidrv_registered
= true;
3395 uncore_types_exit(pci_uncores
);
3400 static void __init
uncore_pci_exit(void)
3402 if (pcidrv_registered
) {
3403 pcidrv_registered
= false;
3404 pci_unregister_driver(uncore_pci_driver
);
3405 uncore_types_exit(pci_uncores
);
3409 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3410 static LIST_HEAD(boxes_to_free
);
3412 static void uncore_kfree_boxes(void)
3414 struct intel_uncore_box
*box
;
3416 while (!list_empty(&boxes_to_free
)) {
3417 box
= list_entry(boxes_to_free
.next
,
3418 struct intel_uncore_box
, list
);
3419 list_del(&box
->list
);
3424 static void uncore_cpu_dying(int cpu
)
3426 struct intel_uncore_type
*type
;
3427 struct intel_uncore_pmu
*pmu
;
3428 struct intel_uncore_box
*box
;
3431 for (i
= 0; msr_uncores
[i
]; i
++) {
3432 type
= msr_uncores
[i
];
3433 for (j
= 0; j
< type
->num_boxes
; j
++) {
3434 pmu
= &type
->pmus
[j
];
3435 box
= *per_cpu_ptr(pmu
->box
, cpu
);
3436 *per_cpu_ptr(pmu
->box
, cpu
) = NULL
;
3437 if (box
&& atomic_dec_and_test(&box
->refcnt
))
3438 list_add(&box
->list
, &boxes_to_free
);
3443 static int uncore_cpu_starting(int cpu
)
3445 struct intel_uncore_type
*type
;
3446 struct intel_uncore_pmu
*pmu
;
3447 struct intel_uncore_box
*box
, *exist
;
3448 int i
, j
, k
, phys_id
;
3450 phys_id
= topology_physical_package_id(cpu
);
3452 for (i
= 0; msr_uncores
[i
]; i
++) {
3453 type
= msr_uncores
[i
];
3454 for (j
= 0; j
< type
->num_boxes
; j
++) {
3455 pmu
= &type
->pmus
[j
];
3456 box
= *per_cpu_ptr(pmu
->box
, cpu
);
3457 /* called by uncore_cpu_init? */
3458 if (box
&& box
->phys_id
>= 0) {
3459 uncore_box_init(box
);
3463 for_each_online_cpu(k
) {
3464 exist
= *per_cpu_ptr(pmu
->box
, k
);
3465 if (exist
&& exist
->phys_id
== phys_id
) {
3466 atomic_inc(&exist
->refcnt
);
3467 *per_cpu_ptr(pmu
->box
, cpu
) = exist
;
3469 list_add(&box
->list
,
3478 box
->phys_id
= phys_id
;
3479 uncore_box_init(box
);
3486 static int uncore_cpu_prepare(int cpu
, int phys_id
)
3488 struct intel_uncore_type
*type
;
3489 struct intel_uncore_pmu
*pmu
;
3490 struct intel_uncore_box
*box
;
3493 for (i
= 0; msr_uncores
[i
]; i
++) {
3494 type
= msr_uncores
[i
];
3495 for (j
= 0; j
< type
->num_boxes
; j
++) {
3496 pmu
= &type
->pmus
[j
];
3497 if (pmu
->func_id
< 0)
3500 box
= uncore_alloc_box(type
, cpu
);
3505 box
->phys_id
= phys_id
;
3506 *per_cpu_ptr(pmu
->box
, cpu
) = box
;
3513 uncore_change_context(struct intel_uncore_type
**uncores
, int old_cpu
, int new_cpu
)
3515 struct intel_uncore_type
*type
;
3516 struct intel_uncore_pmu
*pmu
;
3517 struct intel_uncore_box
*box
;
3520 for (i
= 0; uncores
[i
]; i
++) {
3522 for (j
= 0; j
< type
->num_boxes
; j
++) {
3523 pmu
= &type
->pmus
[j
];
3525 box
= uncore_pmu_to_box(pmu
, new_cpu
);
3527 box
= uncore_pmu_to_box(pmu
, old_cpu
);
3532 WARN_ON_ONCE(box
->cpu
!= -1);
3537 WARN_ON_ONCE(box
->cpu
!= old_cpu
);
3539 uncore_pmu_cancel_hrtimer(box
);
3540 perf_pmu_migrate_context(&pmu
->pmu
,
3550 static void uncore_event_exit_cpu(int cpu
)
3552 int i
, phys_id
, target
;
3554 /* if exiting cpu is used for collecting uncore events */
3555 if (!cpumask_test_and_clear_cpu(cpu
, &uncore_cpu_mask
))
3558 /* find a new cpu to collect uncore events */
3559 phys_id
= topology_physical_package_id(cpu
);
3561 for_each_online_cpu(i
) {
3564 if (phys_id
== topology_physical_package_id(i
)) {
3570 /* migrate uncore events to the new cpu */
3572 cpumask_set_cpu(target
, &uncore_cpu_mask
);
3574 uncore_change_context(msr_uncores
, cpu
, target
);
3575 uncore_change_context(pci_uncores
, cpu
, target
);
3578 static void uncore_event_init_cpu(int cpu
)
3582 phys_id
= topology_physical_package_id(cpu
);
3583 for_each_cpu(i
, &uncore_cpu_mask
) {
3584 if (phys_id
== topology_physical_package_id(i
))
3588 cpumask_set_cpu(cpu
, &uncore_cpu_mask
);
3590 uncore_change_context(msr_uncores
, -1, cpu
);
3591 uncore_change_context(pci_uncores
, -1, cpu
);
3594 static int uncore_cpu_notifier(struct notifier_block
*self
,
3595 unsigned long action
, void *hcpu
)
3597 unsigned int cpu
= (long)hcpu
;
3599 /* allocate/free data structure for uncore box */
3600 switch (action
& ~CPU_TASKS_FROZEN
) {
3601 case CPU_UP_PREPARE
:
3602 uncore_cpu_prepare(cpu
, -1);
3605 uncore_cpu_starting(cpu
);
3607 case CPU_UP_CANCELED
:
3609 uncore_cpu_dying(cpu
);
3613 uncore_kfree_boxes();
3619 /* select the cpu that collects uncore events */
3620 switch (action
& ~CPU_TASKS_FROZEN
) {
3621 case CPU_DOWN_FAILED
:
3623 uncore_event_init_cpu(cpu
);
3625 case CPU_DOWN_PREPARE
:
3626 uncore_event_exit_cpu(cpu
);
3635 static struct notifier_block uncore_cpu_nb
= {
3636 .notifier_call
= uncore_cpu_notifier
,
3638 * to migrate uncore events, our notifier should be executed
3639 * before perf core's notifier.
3641 .priority
= CPU_PRI_PERF
+ 1,
3644 static void __init
uncore_cpu_setup(void *dummy
)
3646 uncore_cpu_starting(smp_processor_id());
3649 static int __init
uncore_cpu_init(void)
3651 int ret
, cpu
, max_cores
;
3653 max_cores
= boot_cpu_data
.x86_max_cores
;
3654 switch (boot_cpu_data
.x86_model
) {
3655 case 26: /* Nehalem */
3657 case 37: /* Westmere */
3659 msr_uncores
= nhm_msr_uncores
;
3661 case 42: /* Sandy Bridge */
3662 case 58: /* Ivy Bridge */
3663 if (snb_uncore_cbox
.num_boxes
> max_cores
)
3664 snb_uncore_cbox
.num_boxes
= max_cores
;
3665 msr_uncores
= snb_msr_uncores
;
3667 case 45: /* Sandy Bridge-EP */
3668 if (snbep_uncore_cbox
.num_boxes
> max_cores
)
3669 snbep_uncore_cbox
.num_boxes
= max_cores
;
3670 msr_uncores
= snbep_msr_uncores
;
3672 case 46: /* Nehalem-EX */
3673 uncore_nhmex
= true;
3674 case 47: /* Westmere-EX aka. Xeon E7 */
3676 nhmex_uncore_mbox
.event_descs
= wsmex_uncore_mbox_events
;
3677 if (nhmex_uncore_cbox
.num_boxes
> max_cores
)
3678 nhmex_uncore_cbox
.num_boxes
= max_cores
;
3679 msr_uncores
= nhmex_msr_uncores
;
3681 case 62: /* IvyTown */
3682 if (ivt_uncore_cbox
.num_boxes
> max_cores
)
3683 ivt_uncore_cbox
.num_boxes
= max_cores
;
3684 msr_uncores
= ivt_msr_uncores
;
3691 ret
= uncore_types_init(msr_uncores
);
3697 for_each_online_cpu(cpu
) {
3698 int i
, phys_id
= topology_physical_package_id(cpu
);
3700 for_each_cpu(i
, &uncore_cpu_mask
) {
3701 if (phys_id
== topology_physical_package_id(i
)) {
3709 uncore_cpu_prepare(cpu
, phys_id
);
3710 uncore_event_init_cpu(cpu
);
3712 on_each_cpu(uncore_cpu_setup
, NULL
, 1);
3714 register_cpu_notifier(&uncore_cpu_nb
);
3721 static int __init
uncore_pmus_register(void)
3723 struct intel_uncore_pmu
*pmu
;
3724 struct intel_uncore_type
*type
;
3727 for (i
= 0; msr_uncores
[i
]; i
++) {
3728 type
= msr_uncores
[i
];
3729 for (j
= 0; j
< type
->num_boxes
; j
++) {
3730 pmu
= &type
->pmus
[j
];
3731 uncore_pmu_register(pmu
);
3735 for (i
= 0; pci_uncores
[i
]; i
++) {
3736 type
= pci_uncores
[i
];
3737 for (j
= 0; j
< type
->num_boxes
; j
++) {
3738 pmu
= &type
->pmus
[j
];
3739 uncore_pmu_register(pmu
);
3746 static int __init
intel_uncore_init(void)
3750 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
3753 if (cpu_has_hypervisor
)
3756 ret
= uncore_pci_init();
3759 ret
= uncore_cpu_init();
3765 uncore_pmus_register();
3770 device_initcall(intel_uncore_init
);