1 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
4 /* Uncore IMC PCI IDs */
5 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
6 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
11 #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c
14 /* SNB event control */
15 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
16 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
17 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
18 #define SNB_UNC_CTL_EN (1 << 22)
19 #define SNB_UNC_CTL_INVERT (1 << 23)
20 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
21 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
22 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
24 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
25 SNB_UNC_CTL_UMASK_MASK | \
26 SNB_UNC_CTL_EDGE_DET | \
27 SNB_UNC_CTL_INVERT | \
28 SNB_UNC_CTL_CMASK_MASK)
30 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
31 SNB_UNC_CTL_UMASK_MASK | \
32 SNB_UNC_CTL_EDGE_DET | \
33 SNB_UNC_CTL_INVERT | \
34 NHM_UNC_CTL_CMASK_MASK)
36 /* SNB global control register */
37 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
38 #define SNB_UNC_FIXED_CTR_CTRL 0x394
39 #define SNB_UNC_FIXED_CTR 0x395
41 /* SNB uncore global control */
42 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
43 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
45 /* SNB Cbo register */
46 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
47 #define SNB_UNC_CBO_0_PER_CTR0 0x706
48 #define SNB_UNC_CBO_MSR_OFFSET 0x10
50 /* SNB ARB register */
51 #define SNB_UNC_ARB_PER_CTR0 0x3b0
52 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
53 #define SNB_UNC_ARB_MSR_OFFSET 0x10
55 /* NHM global control register */
56 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
57 #define NHM_UNC_FIXED_CTR 0x394
58 #define NHM_UNC_FIXED_CTR_CTRL 0x395
60 /* NHM uncore global control */
61 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
62 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
64 /* NHM uncore register */
65 #define NHM_UNC_PERFEVTSEL0 0x3c0
66 #define NHM_UNC_UNCORE_PMC0 0x3b0
68 /* SKL uncore global control */
69 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01
70 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
72 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
73 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
74 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
75 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
76 DEFINE_UNCORE_FORMAT_ATTR(cmask5
, cmask
, "config:24-28");
77 DEFINE_UNCORE_FORMAT_ATTR(cmask8
, cmask
, "config:24-31");
79 /* Sandy Bridge uncore support */
80 static void snb_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
82 struct hw_perf_event
*hwc
= &event
->hw
;
84 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
85 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
87 wrmsrl(hwc
->config_base
, SNB_UNC_CTL_EN
);
90 static void snb_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
92 wrmsrl(event
->hw
.config_base
, 0);
95 static void snb_uncore_msr_init_box(struct intel_uncore_box
*box
)
97 if (box
->pmu
->pmu_idx
== 0) {
98 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
,
99 SNB_UNC_GLOBAL_CTL_EN
| SNB_UNC_GLOBAL_CTL_CORE_ALL
);
103 static void snb_uncore_msr_enable_box(struct intel_uncore_box
*box
)
105 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
,
106 SNB_UNC_GLOBAL_CTL_EN
| SNB_UNC_GLOBAL_CTL_CORE_ALL
);
109 static void snb_uncore_msr_exit_box(struct intel_uncore_box
*box
)
111 if (box
->pmu
->pmu_idx
== 0)
112 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
, 0);
115 static struct uncore_event_desc snb_uncore_events
[] = {
116 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
117 { /* end: all zeroes */ },
120 static struct attribute
*snb_uncore_formats_attr
[] = {
121 &format_attr_event
.attr
,
122 &format_attr_umask
.attr
,
123 &format_attr_edge
.attr
,
124 &format_attr_inv
.attr
,
125 &format_attr_cmask5
.attr
,
129 static struct attribute_group snb_uncore_format_group
= {
131 .attrs
= snb_uncore_formats_attr
,
134 static struct intel_uncore_ops snb_uncore_msr_ops
= {
135 .init_box
= snb_uncore_msr_init_box
,
136 .enable_box
= snb_uncore_msr_enable_box
,
137 .exit_box
= snb_uncore_msr_exit_box
,
138 .disable_event
= snb_uncore_msr_disable_event
,
139 .enable_event
= snb_uncore_msr_enable_event
,
140 .read_counter
= uncore_msr_read_counter
,
143 static struct event_constraint snb_uncore_arb_constraints
[] = {
144 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
145 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
149 static struct intel_uncore_type snb_uncore_cbox
= {
154 .fixed_ctr_bits
= 48,
155 .perf_ctr
= SNB_UNC_CBO_0_PER_CTR0
,
156 .event_ctl
= SNB_UNC_CBO_0_PERFEVTSEL0
,
157 .fixed_ctr
= SNB_UNC_FIXED_CTR
,
158 .fixed_ctl
= SNB_UNC_FIXED_CTR_CTRL
,
160 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
161 .msr_offset
= SNB_UNC_CBO_MSR_OFFSET
,
162 .ops
= &snb_uncore_msr_ops
,
163 .format_group
= &snb_uncore_format_group
,
164 .event_descs
= snb_uncore_events
,
167 static struct intel_uncore_type snb_uncore_arb
= {
172 .perf_ctr
= SNB_UNC_ARB_PER_CTR0
,
173 .event_ctl
= SNB_UNC_ARB_PERFEVTSEL0
,
174 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
175 .msr_offset
= SNB_UNC_ARB_MSR_OFFSET
,
176 .constraints
= snb_uncore_arb_constraints
,
177 .ops
= &snb_uncore_msr_ops
,
178 .format_group
= &snb_uncore_format_group
,
181 static struct intel_uncore_type
*snb_msr_uncores
[] = {
187 void snb_uncore_cpu_init(void)
189 uncore_msr_uncores
= snb_msr_uncores
;
190 if (snb_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
191 snb_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
194 static void skl_uncore_msr_init_box(struct intel_uncore_box
*box
)
196 if (box
->pmu
->pmu_idx
== 0) {
197 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL
,
198 SNB_UNC_GLOBAL_CTL_EN
| SKL_UNC_GLOBAL_CTL_CORE_ALL
);
202 static void skl_uncore_msr_enable_box(struct intel_uncore_box
*box
)
204 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL
,
205 SNB_UNC_GLOBAL_CTL_EN
| SKL_UNC_GLOBAL_CTL_CORE_ALL
);
208 static void skl_uncore_msr_exit_box(struct intel_uncore_box
*box
)
210 if (box
->pmu
->pmu_idx
== 0)
211 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL
, 0);
214 static struct intel_uncore_ops skl_uncore_msr_ops
= {
215 .init_box
= skl_uncore_msr_init_box
,
216 .enable_box
= skl_uncore_msr_enable_box
,
217 .exit_box
= skl_uncore_msr_exit_box
,
218 .disable_event
= snb_uncore_msr_disable_event
,
219 .enable_event
= snb_uncore_msr_enable_event
,
220 .read_counter
= uncore_msr_read_counter
,
223 static struct intel_uncore_type skl_uncore_cbox
= {
228 .fixed_ctr_bits
= 48,
229 .perf_ctr
= SNB_UNC_CBO_0_PER_CTR0
,
230 .event_ctl
= SNB_UNC_CBO_0_PERFEVTSEL0
,
231 .fixed_ctr
= SNB_UNC_FIXED_CTR
,
232 .fixed_ctl
= SNB_UNC_FIXED_CTR_CTRL
,
234 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
235 .msr_offset
= SNB_UNC_CBO_MSR_OFFSET
,
236 .ops
= &skl_uncore_msr_ops
,
237 .format_group
= &snb_uncore_format_group
,
238 .event_descs
= snb_uncore_events
,
241 static struct intel_uncore_type
*skl_msr_uncores
[] = {
247 void skl_uncore_cpu_init(void)
249 uncore_msr_uncores
= skl_msr_uncores
;
250 if (skl_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
251 skl_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
252 snb_uncore_arb
.ops
= &skl_uncore_msr_ops
;
259 static struct uncore_event_desc snb_uncore_imc_events
[] = {
260 INTEL_UNCORE_EVENT_DESC(data_reads
, "event=0x01"),
261 INTEL_UNCORE_EVENT_DESC(data_reads
.scale
, "6.103515625e-5"),
262 INTEL_UNCORE_EVENT_DESC(data_reads
.unit
, "MiB"),
264 INTEL_UNCORE_EVENT_DESC(data_writes
, "event=0x02"),
265 INTEL_UNCORE_EVENT_DESC(data_writes
.scale
, "6.103515625e-5"),
266 INTEL_UNCORE_EVENT_DESC(data_writes
.unit
, "MiB"),
268 { /* end: all zeroes */ },
271 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
272 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
274 /* page size multiple covering all config regs */
275 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
277 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
278 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
279 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
280 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
281 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
283 static struct attribute
*snb_uncore_imc_formats_attr
[] = {
284 &format_attr_event
.attr
,
288 static struct attribute_group snb_uncore_imc_format_group
= {
290 .attrs
= snb_uncore_imc_formats_attr
,
293 static void snb_uncore_imc_init_box(struct intel_uncore_box
*box
)
295 struct pci_dev
*pdev
= box
->pci_dev
;
296 int where
= SNB_UNCORE_PCI_IMC_BAR_OFFSET
;
297 resource_size_t addr
;
300 pci_read_config_dword(pdev
, where
, &pci_dword
);
303 #ifdef CONFIG_PHYS_ADDR_T_64BIT
304 pci_read_config_dword(pdev
, where
+ 4, &pci_dword
);
305 addr
|= ((resource_size_t
)pci_dword
<< 32);
308 addr
&= ~(PAGE_SIZE
- 1);
310 box
->io_addr
= ioremap(addr
, SNB_UNCORE_PCI_IMC_MAP_SIZE
);
311 box
->hrtimer_duration
= UNCORE_SNB_IMC_HRTIMER_INTERVAL
;
314 static void snb_uncore_imc_exit_box(struct intel_uncore_box
*box
)
316 iounmap(box
->io_addr
);
319 static void snb_uncore_imc_enable_box(struct intel_uncore_box
*box
)
322 static void snb_uncore_imc_disable_box(struct intel_uncore_box
*box
)
325 static void snb_uncore_imc_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
328 static void snb_uncore_imc_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
331 static u64
snb_uncore_imc_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
333 struct hw_perf_event
*hwc
= &event
->hw
;
335 return (u64
)*(unsigned int *)(box
->io_addr
+ hwc
->event_base
);
339 * custom event_init() function because we define our own fixed, free
340 * running counters, so we do not want to conflict with generic uncore
341 * logic. Also simplifies processing
343 static int snb_uncore_imc_event_init(struct perf_event
*event
)
345 struct intel_uncore_pmu
*pmu
;
346 struct intel_uncore_box
*box
;
347 struct hw_perf_event
*hwc
= &event
->hw
;
348 u64 cfg
= event
->attr
.config
& SNB_UNCORE_PCI_IMC_EVENT_MASK
;
351 if (event
->attr
.type
!= event
->pmu
->type
)
354 pmu
= uncore_event_to_pmu(event
);
355 /* no device found for this pmu */
356 if (pmu
->func_id
< 0)
359 /* Sampling not supported yet */
360 if (hwc
->sample_period
)
363 /* unsupported modes and filters */
364 if (event
->attr
.exclude_user
||
365 event
->attr
.exclude_kernel
||
366 event
->attr
.exclude_hv
||
367 event
->attr
.exclude_idle
||
368 event
->attr
.exclude_host
||
369 event
->attr
.exclude_guest
||
370 event
->attr
.sample_period
) /* no sampling */
374 * Place all uncore events for a particular physical package
380 /* check only supported bits are set */
381 if (event
->attr
.config
& ~SNB_UNCORE_PCI_IMC_EVENT_MASK
)
384 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
385 if (!box
|| box
->cpu
< 0)
388 event
->cpu
= box
->cpu
;
389 event
->pmu_private
= box
;
392 event
->hw
.last_tag
= ~0ULL;
393 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
394 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
396 * check event is known (whitelist, determines counter)
399 case SNB_UNCORE_PCI_IMC_DATA_READS
:
400 base
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
;
401 idx
= UNCORE_PMC_IDX_FIXED
;
403 case SNB_UNCORE_PCI_IMC_DATA_WRITES
:
404 base
= SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE
;
405 idx
= UNCORE_PMC_IDX_FIXED
+ 1;
411 /* must be done before validate_group */
412 event
->hw
.event_base
= base
;
413 event
->hw
.config
= cfg
;
416 /* no group validation needed, we have free running counters */
421 static int snb_uncore_imc_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
426 static void snb_uncore_imc_event_start(struct perf_event
*event
, int flags
)
428 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
431 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
437 list_add_tail(&event
->active_entry
, &box
->active_list
);
439 count
= snb_uncore_imc_read_counter(box
, event
);
440 local64_set(&event
->hw
.prev_count
, count
);
442 if (box
->n_active
== 1)
443 uncore_pmu_start_hrtimer(box
);
446 static void snb_uncore_imc_event_stop(struct perf_event
*event
, int flags
)
448 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
449 struct hw_perf_event
*hwc
= &event
->hw
;
451 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
454 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
455 hwc
->state
|= PERF_HES_STOPPED
;
457 list_del(&event
->active_entry
);
459 if (box
->n_active
== 0)
460 uncore_pmu_cancel_hrtimer(box
);
463 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
465 * Drain the remaining delta count out of a event
466 * that we are disabling:
468 uncore_perf_event_update(box
, event
);
469 hwc
->state
|= PERF_HES_UPTODATE
;
473 static int snb_uncore_imc_event_add(struct perf_event
*event
, int flags
)
475 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
476 struct hw_perf_event
*hwc
= &event
->hw
;
481 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
482 if (!(flags
& PERF_EF_START
))
483 hwc
->state
|= PERF_HES_ARCH
;
485 snb_uncore_imc_event_start(event
, 0);
492 static void snb_uncore_imc_event_del(struct perf_event
*event
, int flags
)
494 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
497 snb_uncore_imc_event_stop(event
, PERF_EF_UPDATE
);
499 for (i
= 0; i
< box
->n_events
; i
++) {
500 if (event
== box
->event_list
[i
]) {
507 int snb_pci2phy_map_init(int devid
)
509 struct pci_dev
*dev
= NULL
;
510 struct pci2phy_map
*map
;
513 dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, dev
);
517 bus
= dev
->bus
->number
;
518 segment
= pci_domain_nr(dev
->bus
);
520 raw_spin_lock(&pci2phy_map_lock
);
521 map
= __find_pci2phy_map(segment
);
523 raw_spin_unlock(&pci2phy_map_lock
);
527 map
->pbus_to_physid
[bus
] = 0;
528 raw_spin_unlock(&pci2phy_map_lock
);
535 static struct pmu snb_uncore_imc_pmu
= {
536 .task_ctx_nr
= perf_invalid_context
,
537 .event_init
= snb_uncore_imc_event_init
,
538 .add
= snb_uncore_imc_event_add
,
539 .del
= snb_uncore_imc_event_del
,
540 .start
= snb_uncore_imc_event_start
,
541 .stop
= snb_uncore_imc_event_stop
,
542 .read
= uncore_pmu_event_read
,
545 static struct intel_uncore_ops snb_uncore_imc_ops
= {
546 .init_box
= snb_uncore_imc_init_box
,
547 .exit_box
= snb_uncore_imc_exit_box
,
548 .enable_box
= snb_uncore_imc_enable_box
,
549 .disable_box
= snb_uncore_imc_disable_box
,
550 .disable_event
= snb_uncore_imc_disable_event
,
551 .enable_event
= snb_uncore_imc_enable_event
,
552 .hw_config
= snb_uncore_imc_hw_config
,
553 .read_counter
= snb_uncore_imc_read_counter
,
556 static struct intel_uncore_type snb_uncore_imc
= {
560 .fixed_ctr_bits
= 32,
561 .fixed_ctr
= SNB_UNCORE_PCI_IMC_CTR_BASE
,
562 .event_descs
= snb_uncore_imc_events
,
563 .format_group
= &snb_uncore_imc_format_group
,
564 .perf_ctr
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
,
565 .event_mask
= SNB_UNCORE_PCI_IMC_EVENT_MASK
,
566 .ops
= &snb_uncore_imc_ops
,
567 .pmu
= &snb_uncore_imc_pmu
,
570 static struct intel_uncore_type
*snb_pci_uncores
[] = {
571 [SNB_PCI_UNCORE_IMC
] = &snb_uncore_imc
,
575 static const struct pci_device_id snb_uncore_pci_ids
[] = {
577 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SNB_IMC
),
578 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
580 { /* end: all zeroes */ },
583 static const struct pci_device_id ivb_uncore_pci_ids
[] = {
585 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_IMC
),
586 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
589 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_E3_IMC
),
590 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
592 { /* end: all zeroes */ },
595 static const struct pci_device_id hsw_uncore_pci_ids
[] = {
597 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_IMC
),
598 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
601 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_U_IMC
),
602 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
604 { /* end: all zeroes */ },
607 static const struct pci_device_id bdw_uncore_pci_ids
[] = {
609 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_BDW_IMC
),
610 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
612 { /* end: all zeroes */ },
615 static const struct pci_device_id skl_uncore_pci_ids
[] = {
617 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SKL_IMC
),
618 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
621 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SKL_U_IMC
),
622 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
625 { /* end: all zeroes */ },
628 static struct pci_driver snb_uncore_pci_driver
= {
629 .name
= "snb_uncore",
630 .id_table
= snb_uncore_pci_ids
,
633 static struct pci_driver ivb_uncore_pci_driver
= {
634 .name
= "ivb_uncore",
635 .id_table
= ivb_uncore_pci_ids
,
638 static struct pci_driver hsw_uncore_pci_driver
= {
639 .name
= "hsw_uncore",
640 .id_table
= hsw_uncore_pci_ids
,
643 static struct pci_driver bdw_uncore_pci_driver
= {
644 .name
= "bdw_uncore",
645 .id_table
= bdw_uncore_pci_ids
,
648 static struct pci_driver skl_uncore_pci_driver
= {
649 .name
= "skl_uncore",
650 .id_table
= skl_uncore_pci_ids
,
653 struct imc_uncore_pci_dev
{
655 struct pci_driver
*driver
;
657 #define IMC_DEV(a, d) \
658 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
660 static const struct imc_uncore_pci_dev desktop_imc_pci_ids
[] = {
661 IMC_DEV(SNB_IMC
, &snb_uncore_pci_driver
),
662 IMC_DEV(IVB_IMC
, &ivb_uncore_pci_driver
), /* 3rd Gen Core processor */
663 IMC_DEV(IVB_E3_IMC
, &ivb_uncore_pci_driver
), /* Xeon E3-1200 v2/3rd Gen Core processor */
664 IMC_DEV(HSW_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core Processor */
665 IMC_DEV(HSW_U_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core ULT Mobile Processor */
666 IMC_DEV(BDW_IMC
, &bdw_uncore_pci_driver
), /* 5th Gen Core U */
667 IMC_DEV(SKL_IMC
, &skl_uncore_pci_driver
), /* 6th Gen Core */
668 IMC_DEV(SKL_U_IMC
, &skl_uncore_pci_driver
), /* 6th Gen Core U */
673 #define for_each_imc_pci_id(x, t) \
674 for (x = (t); (x)->pci_id; x++)
676 static struct pci_driver
*imc_uncore_find_dev(void)
678 const struct imc_uncore_pci_dev
*p
;
681 for_each_imc_pci_id(p
, desktop_imc_pci_ids
) {
682 ret
= snb_pci2phy_map_init(p
->pci_id
);
689 static int imc_uncore_pci_init(void)
691 struct pci_driver
*imc_drv
= imc_uncore_find_dev();
696 uncore_pci_uncores
= snb_pci_uncores
;
697 uncore_pci_driver
= imc_drv
;
702 int snb_uncore_pci_init(void)
704 return imc_uncore_pci_init();
707 int ivb_uncore_pci_init(void)
709 return imc_uncore_pci_init();
711 int hsw_uncore_pci_init(void)
713 return imc_uncore_pci_init();
716 int bdw_uncore_pci_init(void)
718 return imc_uncore_pci_init();
721 int skl_uncore_pci_init(void)
723 return imc_uncore_pci_init();
726 /* end of Sandy Bridge uncore support */
728 /* Nehalem uncore support */
729 static void nhm_uncore_msr_disable_box(struct intel_uncore_box
*box
)
731 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, 0);
734 static void nhm_uncore_msr_enable_box(struct intel_uncore_box
*box
)
736 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, NHM_UNC_GLOBAL_CTL_EN_PC_ALL
| NHM_UNC_GLOBAL_CTL_EN_FC
);
739 static void nhm_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
741 struct hw_perf_event
*hwc
= &event
->hw
;
743 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
744 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
746 wrmsrl(hwc
->config_base
, NHM_UNC_FIXED_CTR_CTL_EN
);
749 static struct attribute
*nhm_uncore_formats_attr
[] = {
750 &format_attr_event
.attr
,
751 &format_attr_umask
.attr
,
752 &format_attr_edge
.attr
,
753 &format_attr_inv
.attr
,
754 &format_attr_cmask8
.attr
,
758 static struct attribute_group nhm_uncore_format_group
= {
760 .attrs
= nhm_uncore_formats_attr
,
763 static struct uncore_event_desc nhm_uncore_events
[] = {
764 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
765 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any
, "event=0x2f,umask=0x0f"),
766 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any
, "event=0x2c,umask=0x0f"),
767 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads
, "event=0x20,umask=0x01"),
768 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes
, "event=0x20,umask=0x02"),
769 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads
, "event=0x20,umask=0x04"),
770 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes
, "event=0x20,umask=0x08"),
771 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads
, "event=0x20,umask=0x10"),
772 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes
, "event=0x20,umask=0x20"),
773 { /* end: all zeroes */ },
776 static struct intel_uncore_ops nhm_uncore_msr_ops
= {
777 .disable_box
= nhm_uncore_msr_disable_box
,
778 .enable_box
= nhm_uncore_msr_enable_box
,
779 .disable_event
= snb_uncore_msr_disable_event
,
780 .enable_event
= nhm_uncore_msr_enable_event
,
781 .read_counter
= uncore_msr_read_counter
,
784 static struct intel_uncore_type nhm_uncore
= {
789 .fixed_ctr_bits
= 48,
790 .event_ctl
= NHM_UNC_PERFEVTSEL0
,
791 .perf_ctr
= NHM_UNC_UNCORE_PMC0
,
792 .fixed_ctr
= NHM_UNC_FIXED_CTR
,
793 .fixed_ctl
= NHM_UNC_FIXED_CTR_CTRL
,
794 .event_mask
= NHM_UNC_RAW_EVENT_MASK
,
795 .event_descs
= nhm_uncore_events
,
796 .ops
= &nhm_uncore_msr_ops
,
797 .format_group
= &nhm_uncore_format_group
,
800 static struct intel_uncore_type
*nhm_msr_uncores
[] = {
805 void nhm_uncore_cpu_init(void)
807 uncore_msr_uncores
= nhm_msr_uncores
;
810 /* end of Nehalem uncore support */