1 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
4 /* Uncore IMC PCI IDs */
5 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
6 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
11 #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c
14 /* SNB event control */
15 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
16 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
17 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
18 #define SNB_UNC_CTL_EN (1 << 22)
19 #define SNB_UNC_CTL_INVERT (1 << 23)
20 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
21 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
22 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
24 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
25 SNB_UNC_CTL_UMASK_MASK | \
26 SNB_UNC_CTL_EDGE_DET | \
27 SNB_UNC_CTL_INVERT | \
28 SNB_UNC_CTL_CMASK_MASK)
30 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
31 SNB_UNC_CTL_UMASK_MASK | \
32 SNB_UNC_CTL_EDGE_DET | \
33 SNB_UNC_CTL_INVERT | \
34 NHM_UNC_CTL_CMASK_MASK)
36 /* SNB global control register */
37 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
38 #define SNB_UNC_FIXED_CTR_CTRL 0x394
39 #define SNB_UNC_FIXED_CTR 0x395
41 /* SNB uncore global control */
42 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
43 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
45 /* SNB Cbo register */
46 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
47 #define SNB_UNC_CBO_0_PER_CTR0 0x706
48 #define SNB_UNC_CBO_MSR_OFFSET 0x10
50 /* SNB ARB register */
51 #define SNB_UNC_ARB_PER_CTR0 0x3b0
52 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
53 #define SNB_UNC_ARB_MSR_OFFSET 0x10
55 /* NHM global control register */
56 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
57 #define NHM_UNC_FIXED_CTR 0x394
58 #define NHM_UNC_FIXED_CTR_CTRL 0x395
60 /* NHM uncore global control */
61 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
62 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
64 /* NHM uncore register */
65 #define NHM_UNC_PERFEVTSEL0 0x3c0
66 #define NHM_UNC_UNCORE_PMC0 0x3b0
68 /* SKL uncore global control */
69 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01
70 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
72 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
73 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
74 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
75 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
76 DEFINE_UNCORE_FORMAT_ATTR(cmask5
, cmask
, "config:24-28");
77 DEFINE_UNCORE_FORMAT_ATTR(cmask8
, cmask
, "config:24-31");
79 /* Sandy Bridge uncore support */
80 static void snb_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
82 struct hw_perf_event
*hwc
= &event
->hw
;
84 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
85 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
87 wrmsrl(hwc
->config_base
, SNB_UNC_CTL_EN
);
90 static void snb_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
92 wrmsrl(event
->hw
.config_base
, 0);
95 static void snb_uncore_msr_init_box(struct intel_uncore_box
*box
)
97 if (box
->pmu
->pmu_idx
== 0) {
98 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
,
99 SNB_UNC_GLOBAL_CTL_EN
| SNB_UNC_GLOBAL_CTL_CORE_ALL
);
103 static void snb_uncore_msr_exit_box(struct intel_uncore_box
*box
)
105 if (box
->pmu
->pmu_idx
== 0)
106 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
, 0);
109 static struct uncore_event_desc snb_uncore_events
[] = {
110 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
111 { /* end: all zeroes */ },
114 static struct attribute
*snb_uncore_formats_attr
[] = {
115 &format_attr_event
.attr
,
116 &format_attr_umask
.attr
,
117 &format_attr_edge
.attr
,
118 &format_attr_inv
.attr
,
119 &format_attr_cmask5
.attr
,
123 static struct attribute_group snb_uncore_format_group
= {
125 .attrs
= snb_uncore_formats_attr
,
128 static struct intel_uncore_ops snb_uncore_msr_ops
= {
129 .init_box
= snb_uncore_msr_init_box
,
130 .exit_box
= snb_uncore_msr_exit_box
,
131 .disable_event
= snb_uncore_msr_disable_event
,
132 .enable_event
= snb_uncore_msr_enable_event
,
133 .read_counter
= uncore_msr_read_counter
,
136 static struct event_constraint snb_uncore_arb_constraints
[] = {
137 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
138 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
142 static struct intel_uncore_type snb_uncore_cbox
= {
147 .fixed_ctr_bits
= 48,
148 .perf_ctr
= SNB_UNC_CBO_0_PER_CTR0
,
149 .event_ctl
= SNB_UNC_CBO_0_PERFEVTSEL0
,
150 .fixed_ctr
= SNB_UNC_FIXED_CTR
,
151 .fixed_ctl
= SNB_UNC_FIXED_CTR_CTRL
,
153 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
154 .msr_offset
= SNB_UNC_CBO_MSR_OFFSET
,
155 .ops
= &snb_uncore_msr_ops
,
156 .format_group
= &snb_uncore_format_group
,
157 .event_descs
= snb_uncore_events
,
160 static struct intel_uncore_type snb_uncore_arb
= {
165 .perf_ctr
= SNB_UNC_ARB_PER_CTR0
,
166 .event_ctl
= SNB_UNC_ARB_PERFEVTSEL0
,
167 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
168 .msr_offset
= SNB_UNC_ARB_MSR_OFFSET
,
169 .constraints
= snb_uncore_arb_constraints
,
170 .ops
= &snb_uncore_msr_ops
,
171 .format_group
= &snb_uncore_format_group
,
174 static struct intel_uncore_type
*snb_msr_uncores
[] = {
180 void snb_uncore_cpu_init(void)
182 uncore_msr_uncores
= snb_msr_uncores
;
183 if (snb_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
184 snb_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
187 static void skl_uncore_msr_init_box(struct intel_uncore_box
*box
)
189 if (box
->pmu
->pmu_idx
== 0) {
190 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL
,
191 SNB_UNC_GLOBAL_CTL_EN
| SKL_UNC_GLOBAL_CTL_CORE_ALL
);
195 static void skl_uncore_msr_exit_box(struct intel_uncore_box
*box
)
197 if (box
->pmu
->pmu_idx
== 0)
198 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL
, 0);
201 static struct intel_uncore_ops skl_uncore_msr_ops
= {
202 .init_box
= skl_uncore_msr_init_box
,
203 .exit_box
= skl_uncore_msr_exit_box
,
204 .disable_event
= snb_uncore_msr_disable_event
,
205 .enable_event
= snb_uncore_msr_enable_event
,
206 .read_counter
= uncore_msr_read_counter
,
209 static struct intel_uncore_type skl_uncore_cbox
= {
214 .fixed_ctr_bits
= 48,
215 .perf_ctr
= SNB_UNC_CBO_0_PER_CTR0
,
216 .event_ctl
= SNB_UNC_CBO_0_PERFEVTSEL0
,
217 .fixed_ctr
= SNB_UNC_FIXED_CTR
,
218 .fixed_ctl
= SNB_UNC_FIXED_CTR_CTRL
,
220 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
221 .msr_offset
= SNB_UNC_CBO_MSR_OFFSET
,
222 .ops
= &skl_uncore_msr_ops
,
223 .format_group
= &snb_uncore_format_group
,
224 .event_descs
= snb_uncore_events
,
227 static struct intel_uncore_type
*skl_msr_uncores
[] = {
233 void skl_uncore_cpu_init(void)
235 uncore_msr_uncores
= skl_msr_uncores
;
236 if (skl_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
237 skl_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
238 snb_uncore_arb
.ops
= &skl_uncore_msr_ops
;
245 static struct uncore_event_desc snb_uncore_imc_events
[] = {
246 INTEL_UNCORE_EVENT_DESC(data_reads
, "event=0x01"),
247 INTEL_UNCORE_EVENT_DESC(data_reads
.scale
, "6.103515625e-5"),
248 INTEL_UNCORE_EVENT_DESC(data_reads
.unit
, "MiB"),
250 INTEL_UNCORE_EVENT_DESC(data_writes
, "event=0x02"),
251 INTEL_UNCORE_EVENT_DESC(data_writes
.scale
, "6.103515625e-5"),
252 INTEL_UNCORE_EVENT_DESC(data_writes
.unit
, "MiB"),
254 { /* end: all zeroes */ },
257 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
258 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
260 /* page size multiple covering all config regs */
261 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
263 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
264 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
265 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
266 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
267 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
269 static struct attribute
*snb_uncore_imc_formats_attr
[] = {
270 &format_attr_event
.attr
,
274 static struct attribute_group snb_uncore_imc_format_group
= {
276 .attrs
= snb_uncore_imc_formats_attr
,
279 static void snb_uncore_imc_init_box(struct intel_uncore_box
*box
)
281 struct pci_dev
*pdev
= box
->pci_dev
;
282 int where
= SNB_UNCORE_PCI_IMC_BAR_OFFSET
;
283 resource_size_t addr
;
286 pci_read_config_dword(pdev
, where
, &pci_dword
);
289 #ifdef CONFIG_PHYS_ADDR_T_64BIT
290 pci_read_config_dword(pdev
, where
+ 4, &pci_dword
);
291 addr
|= ((resource_size_t
)pci_dword
<< 32);
294 addr
&= ~(PAGE_SIZE
- 1);
296 box
->io_addr
= ioremap(addr
, SNB_UNCORE_PCI_IMC_MAP_SIZE
);
297 box
->hrtimer_duration
= UNCORE_SNB_IMC_HRTIMER_INTERVAL
;
300 static void snb_uncore_imc_exit_box(struct intel_uncore_box
*box
)
302 iounmap(box
->io_addr
);
305 static void snb_uncore_imc_enable_box(struct intel_uncore_box
*box
)
308 static void snb_uncore_imc_disable_box(struct intel_uncore_box
*box
)
311 static void snb_uncore_imc_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
314 static void snb_uncore_imc_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
317 static u64
snb_uncore_imc_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
319 struct hw_perf_event
*hwc
= &event
->hw
;
321 return (u64
)*(unsigned int *)(box
->io_addr
+ hwc
->event_base
);
325 * custom event_init() function because we define our own fixed, free
326 * running counters, so we do not want to conflict with generic uncore
327 * logic. Also simplifies processing
329 static int snb_uncore_imc_event_init(struct perf_event
*event
)
331 struct intel_uncore_pmu
*pmu
;
332 struct intel_uncore_box
*box
;
333 struct hw_perf_event
*hwc
= &event
->hw
;
334 u64 cfg
= event
->attr
.config
& SNB_UNCORE_PCI_IMC_EVENT_MASK
;
337 if (event
->attr
.type
!= event
->pmu
->type
)
340 pmu
= uncore_event_to_pmu(event
);
341 /* no device found for this pmu */
342 if (pmu
->func_id
< 0)
345 /* Sampling not supported yet */
346 if (hwc
->sample_period
)
349 /* unsupported modes and filters */
350 if (event
->attr
.exclude_user
||
351 event
->attr
.exclude_kernel
||
352 event
->attr
.exclude_hv
||
353 event
->attr
.exclude_idle
||
354 event
->attr
.exclude_host
||
355 event
->attr
.exclude_guest
||
356 event
->attr
.sample_period
) /* no sampling */
360 * Place all uncore events for a particular physical package
366 /* check only supported bits are set */
367 if (event
->attr
.config
& ~SNB_UNCORE_PCI_IMC_EVENT_MASK
)
370 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
371 if (!box
|| box
->cpu
< 0)
374 event
->cpu
= box
->cpu
;
375 event
->pmu_private
= box
;
378 event
->hw
.last_tag
= ~0ULL;
379 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
380 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
382 * check event is known (whitelist, determines counter)
385 case SNB_UNCORE_PCI_IMC_DATA_READS
:
386 base
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
;
387 idx
= UNCORE_PMC_IDX_FIXED
;
389 case SNB_UNCORE_PCI_IMC_DATA_WRITES
:
390 base
= SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE
;
391 idx
= UNCORE_PMC_IDX_FIXED
+ 1;
397 /* must be done before validate_group */
398 event
->hw
.event_base
= base
;
399 event
->hw
.config
= cfg
;
402 /* no group validation needed, we have free running counters */
407 static int snb_uncore_imc_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
412 static void snb_uncore_imc_event_start(struct perf_event
*event
, int flags
)
414 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
417 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
423 list_add_tail(&event
->active_entry
, &box
->active_list
);
425 count
= snb_uncore_imc_read_counter(box
, event
);
426 local64_set(&event
->hw
.prev_count
, count
);
428 if (box
->n_active
== 1)
429 uncore_pmu_start_hrtimer(box
);
432 static void snb_uncore_imc_event_stop(struct perf_event
*event
, int flags
)
434 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
435 struct hw_perf_event
*hwc
= &event
->hw
;
437 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
440 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
441 hwc
->state
|= PERF_HES_STOPPED
;
443 list_del(&event
->active_entry
);
445 if (box
->n_active
== 0)
446 uncore_pmu_cancel_hrtimer(box
);
449 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
451 * Drain the remaining delta count out of a event
452 * that we are disabling:
454 uncore_perf_event_update(box
, event
);
455 hwc
->state
|= PERF_HES_UPTODATE
;
459 static int snb_uncore_imc_event_add(struct perf_event
*event
, int flags
)
461 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
462 struct hw_perf_event
*hwc
= &event
->hw
;
467 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
468 if (!(flags
& PERF_EF_START
))
469 hwc
->state
|= PERF_HES_ARCH
;
471 snb_uncore_imc_event_start(event
, 0);
478 static void snb_uncore_imc_event_del(struct perf_event
*event
, int flags
)
480 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
483 snb_uncore_imc_event_stop(event
, PERF_EF_UPDATE
);
485 for (i
= 0; i
< box
->n_events
; i
++) {
486 if (event
== box
->event_list
[i
]) {
493 int snb_pci2phy_map_init(int devid
)
495 struct pci_dev
*dev
= NULL
;
496 struct pci2phy_map
*map
;
499 dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, dev
);
503 bus
= dev
->bus
->number
;
504 segment
= pci_domain_nr(dev
->bus
);
506 raw_spin_lock(&pci2phy_map_lock
);
507 map
= __find_pci2phy_map(segment
);
509 raw_spin_unlock(&pci2phy_map_lock
);
513 map
->pbus_to_physid
[bus
] = 0;
514 raw_spin_unlock(&pci2phy_map_lock
);
521 static struct pmu snb_uncore_imc_pmu
= {
522 .task_ctx_nr
= perf_invalid_context
,
523 .event_init
= snb_uncore_imc_event_init
,
524 .add
= snb_uncore_imc_event_add
,
525 .del
= snb_uncore_imc_event_del
,
526 .start
= snb_uncore_imc_event_start
,
527 .stop
= snb_uncore_imc_event_stop
,
528 .read
= uncore_pmu_event_read
,
531 static struct intel_uncore_ops snb_uncore_imc_ops
= {
532 .init_box
= snb_uncore_imc_init_box
,
533 .exit_box
= snb_uncore_imc_exit_box
,
534 .enable_box
= snb_uncore_imc_enable_box
,
535 .disable_box
= snb_uncore_imc_disable_box
,
536 .disable_event
= snb_uncore_imc_disable_event
,
537 .enable_event
= snb_uncore_imc_enable_event
,
538 .hw_config
= snb_uncore_imc_hw_config
,
539 .read_counter
= snb_uncore_imc_read_counter
,
542 static struct intel_uncore_type snb_uncore_imc
= {
546 .fixed_ctr_bits
= 32,
547 .fixed_ctr
= SNB_UNCORE_PCI_IMC_CTR_BASE
,
548 .event_descs
= snb_uncore_imc_events
,
549 .format_group
= &snb_uncore_imc_format_group
,
550 .perf_ctr
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
,
551 .event_mask
= SNB_UNCORE_PCI_IMC_EVENT_MASK
,
552 .ops
= &snb_uncore_imc_ops
,
553 .pmu
= &snb_uncore_imc_pmu
,
556 static struct intel_uncore_type
*snb_pci_uncores
[] = {
557 [SNB_PCI_UNCORE_IMC
] = &snb_uncore_imc
,
561 static const struct pci_device_id snb_uncore_pci_ids
[] = {
563 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SNB_IMC
),
564 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
566 { /* end: all zeroes */ },
569 static const struct pci_device_id ivb_uncore_pci_ids
[] = {
571 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_IMC
),
572 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
575 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_E3_IMC
),
576 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
578 { /* end: all zeroes */ },
581 static const struct pci_device_id hsw_uncore_pci_ids
[] = {
583 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_IMC
),
584 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
587 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_U_IMC
),
588 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
590 { /* end: all zeroes */ },
593 static const struct pci_device_id bdw_uncore_pci_ids
[] = {
595 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_BDW_IMC
),
596 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
598 { /* end: all zeroes */ },
601 static const struct pci_device_id skl_uncore_pci_ids
[] = {
603 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SKL_IMC
),
604 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
607 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SKL_U_IMC
),
608 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
611 { /* end: all zeroes */ },
614 static struct pci_driver snb_uncore_pci_driver
= {
615 .name
= "snb_uncore",
616 .id_table
= snb_uncore_pci_ids
,
619 static struct pci_driver ivb_uncore_pci_driver
= {
620 .name
= "ivb_uncore",
621 .id_table
= ivb_uncore_pci_ids
,
624 static struct pci_driver hsw_uncore_pci_driver
= {
625 .name
= "hsw_uncore",
626 .id_table
= hsw_uncore_pci_ids
,
629 static struct pci_driver bdw_uncore_pci_driver
= {
630 .name
= "bdw_uncore",
631 .id_table
= bdw_uncore_pci_ids
,
634 static struct pci_driver skl_uncore_pci_driver
= {
635 .name
= "skl_uncore",
636 .id_table
= skl_uncore_pci_ids
,
639 struct imc_uncore_pci_dev
{
641 struct pci_driver
*driver
;
643 #define IMC_DEV(a, d) \
644 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
646 static const struct imc_uncore_pci_dev desktop_imc_pci_ids
[] = {
647 IMC_DEV(SNB_IMC
, &snb_uncore_pci_driver
),
648 IMC_DEV(IVB_IMC
, &ivb_uncore_pci_driver
), /* 3rd Gen Core processor */
649 IMC_DEV(IVB_E3_IMC
, &ivb_uncore_pci_driver
), /* Xeon E3-1200 v2/3rd Gen Core processor */
650 IMC_DEV(HSW_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core Processor */
651 IMC_DEV(HSW_U_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core ULT Mobile Processor */
652 IMC_DEV(BDW_IMC
, &bdw_uncore_pci_driver
), /* 5th Gen Core U */
653 IMC_DEV(SKL_IMC
, &skl_uncore_pci_driver
), /* 6th Gen Core */
654 IMC_DEV(SKL_U_IMC
, &skl_uncore_pci_driver
), /* 6th Gen Core U */
659 #define for_each_imc_pci_id(x, t) \
660 for (x = (t); (x)->pci_id; x++)
662 static struct pci_driver
*imc_uncore_find_dev(void)
664 const struct imc_uncore_pci_dev
*p
;
667 for_each_imc_pci_id(p
, desktop_imc_pci_ids
) {
668 ret
= snb_pci2phy_map_init(p
->pci_id
);
675 static int imc_uncore_pci_init(void)
677 struct pci_driver
*imc_drv
= imc_uncore_find_dev();
682 uncore_pci_uncores
= snb_pci_uncores
;
683 uncore_pci_driver
= imc_drv
;
688 int snb_uncore_pci_init(void)
690 return imc_uncore_pci_init();
693 int ivb_uncore_pci_init(void)
695 return imc_uncore_pci_init();
697 int hsw_uncore_pci_init(void)
699 return imc_uncore_pci_init();
702 int bdw_uncore_pci_init(void)
704 return imc_uncore_pci_init();
707 int skl_uncore_pci_init(void)
709 return imc_uncore_pci_init();
712 /* end of Sandy Bridge uncore support */
714 /* Nehalem uncore support */
715 static void nhm_uncore_msr_disable_box(struct intel_uncore_box
*box
)
717 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, 0);
720 static void nhm_uncore_msr_enable_box(struct intel_uncore_box
*box
)
722 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, NHM_UNC_GLOBAL_CTL_EN_PC_ALL
| NHM_UNC_GLOBAL_CTL_EN_FC
);
725 static void nhm_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
727 struct hw_perf_event
*hwc
= &event
->hw
;
729 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
730 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
732 wrmsrl(hwc
->config_base
, NHM_UNC_FIXED_CTR_CTL_EN
);
735 static struct attribute
*nhm_uncore_formats_attr
[] = {
736 &format_attr_event
.attr
,
737 &format_attr_umask
.attr
,
738 &format_attr_edge
.attr
,
739 &format_attr_inv
.attr
,
740 &format_attr_cmask8
.attr
,
744 static struct attribute_group nhm_uncore_format_group
= {
746 .attrs
= nhm_uncore_formats_attr
,
749 static struct uncore_event_desc nhm_uncore_events
[] = {
750 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
751 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any
, "event=0x2f,umask=0x0f"),
752 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any
, "event=0x2c,umask=0x0f"),
753 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads
, "event=0x20,umask=0x01"),
754 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes
, "event=0x20,umask=0x02"),
755 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads
, "event=0x20,umask=0x04"),
756 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes
, "event=0x20,umask=0x08"),
757 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads
, "event=0x20,umask=0x10"),
758 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes
, "event=0x20,umask=0x20"),
759 { /* end: all zeroes */ },
762 static struct intel_uncore_ops nhm_uncore_msr_ops
= {
763 .disable_box
= nhm_uncore_msr_disable_box
,
764 .enable_box
= nhm_uncore_msr_enable_box
,
765 .disable_event
= snb_uncore_msr_disable_event
,
766 .enable_event
= nhm_uncore_msr_enable_event
,
767 .read_counter
= uncore_msr_read_counter
,
770 static struct intel_uncore_type nhm_uncore
= {
775 .fixed_ctr_bits
= 48,
776 .event_ctl
= NHM_UNC_PERFEVTSEL0
,
777 .perf_ctr
= NHM_UNC_UNCORE_PMC0
,
778 .fixed_ctr
= NHM_UNC_FIXED_CTR
,
779 .fixed_ctl
= NHM_UNC_FIXED_CTR_CTRL
,
780 .event_mask
= NHM_UNC_RAW_EVENT_MASK
,
781 .event_descs
= nhm_uncore_events
,
782 .ops
= &nhm_uncore_msr_ops
,
783 .format_group
= &nhm_uncore_format_group
,
786 static struct intel_uncore_type
*nhm_msr_uncores
[] = {
791 void nhm_uncore_cpu_init(void)
793 uncore_msr_uncores
= nhm_msr_uncores
;
796 /* end of Nehalem uncore support */