1 /* Nehalem/SandBridge/Haswell uncore support */
4 /* Uncore IMC PCI IDs */
5 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
6 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
11 #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
13 /* SNB event control */
14 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
15 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
16 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
17 #define SNB_UNC_CTL_EN (1 << 22)
18 #define SNB_UNC_CTL_INVERT (1 << 23)
19 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
20 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
21 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
23 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
24 SNB_UNC_CTL_UMASK_MASK | \
25 SNB_UNC_CTL_EDGE_DET | \
26 SNB_UNC_CTL_INVERT | \
27 SNB_UNC_CTL_CMASK_MASK)
29 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
30 SNB_UNC_CTL_UMASK_MASK | \
31 SNB_UNC_CTL_EDGE_DET | \
32 SNB_UNC_CTL_INVERT | \
33 NHM_UNC_CTL_CMASK_MASK)
35 /* SNB global control register */
36 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
37 #define SNB_UNC_FIXED_CTR_CTRL 0x394
38 #define SNB_UNC_FIXED_CTR 0x395
40 /* SNB uncore global control */
41 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
42 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
44 /* SNB Cbo register */
45 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
46 #define SNB_UNC_CBO_0_PER_CTR0 0x706
47 #define SNB_UNC_CBO_MSR_OFFSET 0x10
49 /* SNB ARB register */
50 #define SNB_UNC_ARB_PER_CTR0 0x3b0
51 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
52 #define SNB_UNC_ARB_MSR_OFFSET 0x10
54 /* NHM global control register */
55 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
56 #define NHM_UNC_FIXED_CTR 0x394
57 #define NHM_UNC_FIXED_CTR_CTRL 0x395
59 /* NHM uncore global control */
60 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
61 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
63 /* NHM uncore register */
64 #define NHM_UNC_PERFEVTSEL0 0x3c0
65 #define NHM_UNC_UNCORE_PMC0 0x3b0
67 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
68 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
69 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
70 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
71 DEFINE_UNCORE_FORMAT_ATTR(cmask5
, cmask
, "config:24-28");
72 DEFINE_UNCORE_FORMAT_ATTR(cmask8
, cmask
, "config:24-31");
74 /* Sandy Bridge uncore support */
75 static void snb_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
77 struct hw_perf_event
*hwc
= &event
->hw
;
79 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
80 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
82 wrmsrl(hwc
->config_base
, SNB_UNC_CTL_EN
);
85 static void snb_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
87 wrmsrl(event
->hw
.config_base
, 0);
90 static void snb_uncore_msr_init_box(struct intel_uncore_box
*box
)
92 if (box
->pmu
->pmu_idx
== 0) {
93 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
,
94 SNB_UNC_GLOBAL_CTL_EN
| SNB_UNC_GLOBAL_CTL_CORE_ALL
);
98 static void snb_uncore_msr_exit_box(struct intel_uncore_box
*box
)
100 if (box
->pmu
->pmu_idx
== 0)
101 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
, 0);
104 static struct uncore_event_desc snb_uncore_events
[] = {
105 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
106 { /* end: all zeroes */ },
109 static struct attribute
*snb_uncore_formats_attr
[] = {
110 &format_attr_event
.attr
,
111 &format_attr_umask
.attr
,
112 &format_attr_edge
.attr
,
113 &format_attr_inv
.attr
,
114 &format_attr_cmask5
.attr
,
118 static struct attribute_group snb_uncore_format_group
= {
120 .attrs
= snb_uncore_formats_attr
,
123 static struct intel_uncore_ops snb_uncore_msr_ops
= {
124 .init_box
= snb_uncore_msr_init_box
,
125 .exit_box
= snb_uncore_msr_exit_box
,
126 .disable_event
= snb_uncore_msr_disable_event
,
127 .enable_event
= snb_uncore_msr_enable_event
,
128 .read_counter
= uncore_msr_read_counter
,
131 static struct event_constraint snb_uncore_arb_constraints
[] = {
132 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
133 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
137 static struct intel_uncore_type snb_uncore_cbox
= {
142 .fixed_ctr_bits
= 48,
143 .perf_ctr
= SNB_UNC_CBO_0_PER_CTR0
,
144 .event_ctl
= SNB_UNC_CBO_0_PERFEVTSEL0
,
145 .fixed_ctr
= SNB_UNC_FIXED_CTR
,
146 .fixed_ctl
= SNB_UNC_FIXED_CTR_CTRL
,
148 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
149 .msr_offset
= SNB_UNC_CBO_MSR_OFFSET
,
150 .ops
= &snb_uncore_msr_ops
,
151 .format_group
= &snb_uncore_format_group
,
152 .event_descs
= snb_uncore_events
,
155 static struct intel_uncore_type snb_uncore_arb
= {
160 .perf_ctr
= SNB_UNC_ARB_PER_CTR0
,
161 .event_ctl
= SNB_UNC_ARB_PERFEVTSEL0
,
162 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
163 .msr_offset
= SNB_UNC_ARB_MSR_OFFSET
,
164 .constraints
= snb_uncore_arb_constraints
,
165 .ops
= &snb_uncore_msr_ops
,
166 .format_group
= &snb_uncore_format_group
,
169 static struct intel_uncore_type
*snb_msr_uncores
[] = {
175 void snb_uncore_cpu_init(void)
177 uncore_msr_uncores
= snb_msr_uncores
;
178 if (snb_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
179 snb_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
186 static struct uncore_event_desc snb_uncore_imc_events
[] = {
187 INTEL_UNCORE_EVENT_DESC(data_reads
, "event=0x01"),
188 INTEL_UNCORE_EVENT_DESC(data_reads
.scale
, "6.103515625e-5"),
189 INTEL_UNCORE_EVENT_DESC(data_reads
.unit
, "MiB"),
191 INTEL_UNCORE_EVENT_DESC(data_writes
, "event=0x02"),
192 INTEL_UNCORE_EVENT_DESC(data_writes
.scale
, "6.103515625e-5"),
193 INTEL_UNCORE_EVENT_DESC(data_writes
.unit
, "MiB"),
195 { /* end: all zeroes */ },
198 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
199 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
201 /* page size multiple covering all config regs */
202 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
204 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
205 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
206 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
207 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
208 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
210 static struct attribute
*snb_uncore_imc_formats_attr
[] = {
211 &format_attr_event
.attr
,
215 static struct attribute_group snb_uncore_imc_format_group
= {
217 .attrs
= snb_uncore_imc_formats_attr
,
220 static void snb_uncore_imc_init_box(struct intel_uncore_box
*box
)
222 struct pci_dev
*pdev
= box
->pci_dev
;
223 int where
= SNB_UNCORE_PCI_IMC_BAR_OFFSET
;
224 resource_size_t addr
;
227 pci_read_config_dword(pdev
, where
, &pci_dword
);
230 #ifdef CONFIG_PHYS_ADDR_T_64BIT
231 pci_read_config_dword(pdev
, where
+ 4, &pci_dword
);
232 addr
|= ((resource_size_t
)pci_dword
<< 32);
235 addr
&= ~(PAGE_SIZE
- 1);
237 box
->io_addr
= ioremap(addr
, SNB_UNCORE_PCI_IMC_MAP_SIZE
);
238 box
->hrtimer_duration
= UNCORE_SNB_IMC_HRTIMER_INTERVAL
;
241 static void snb_uncore_imc_exit_box(struct intel_uncore_box
*box
)
243 iounmap(box
->io_addr
);
246 static void snb_uncore_imc_enable_box(struct intel_uncore_box
*box
)
249 static void snb_uncore_imc_disable_box(struct intel_uncore_box
*box
)
252 static void snb_uncore_imc_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
255 static void snb_uncore_imc_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
258 static u64
snb_uncore_imc_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
260 struct hw_perf_event
*hwc
= &event
->hw
;
262 return (u64
)*(unsigned int *)(box
->io_addr
+ hwc
->event_base
);
266 * custom event_init() function because we define our own fixed, free
267 * running counters, so we do not want to conflict with generic uncore
268 * logic. Also simplifies processing
270 static int snb_uncore_imc_event_init(struct perf_event
*event
)
272 struct intel_uncore_pmu
*pmu
;
273 struct intel_uncore_box
*box
;
274 struct hw_perf_event
*hwc
= &event
->hw
;
275 u64 cfg
= event
->attr
.config
& SNB_UNCORE_PCI_IMC_EVENT_MASK
;
278 if (event
->attr
.type
!= event
->pmu
->type
)
281 pmu
= uncore_event_to_pmu(event
);
282 /* no device found for this pmu */
283 if (pmu
->func_id
< 0)
286 /* Sampling not supported yet */
287 if (hwc
->sample_period
)
290 /* unsupported modes and filters */
291 if (event
->attr
.exclude_user
||
292 event
->attr
.exclude_kernel
||
293 event
->attr
.exclude_hv
||
294 event
->attr
.exclude_idle
||
295 event
->attr
.exclude_host
||
296 event
->attr
.exclude_guest
||
297 event
->attr
.sample_period
) /* no sampling */
301 * Place all uncore events for a particular physical package
307 /* check only supported bits are set */
308 if (event
->attr
.config
& ~SNB_UNCORE_PCI_IMC_EVENT_MASK
)
311 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
312 if (!box
|| box
->cpu
< 0)
315 event
->cpu
= box
->cpu
;
316 event
->pmu_private
= box
;
319 event
->hw
.last_tag
= ~0ULL;
320 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
321 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
323 * check event is known (whitelist, determines counter)
326 case SNB_UNCORE_PCI_IMC_DATA_READS
:
327 base
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
;
328 idx
= UNCORE_PMC_IDX_FIXED
;
330 case SNB_UNCORE_PCI_IMC_DATA_WRITES
:
331 base
= SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE
;
332 idx
= UNCORE_PMC_IDX_FIXED
+ 1;
338 /* must be done before validate_group */
339 event
->hw
.event_base
= base
;
340 event
->hw
.config
= cfg
;
343 /* no group validation needed, we have free running counters */
348 static int snb_uncore_imc_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
353 static void snb_uncore_imc_event_start(struct perf_event
*event
, int flags
)
355 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
358 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
364 list_add_tail(&event
->active_entry
, &box
->active_list
);
366 count
= snb_uncore_imc_read_counter(box
, event
);
367 local64_set(&event
->hw
.prev_count
, count
);
369 if (box
->n_active
== 1)
370 uncore_pmu_start_hrtimer(box
);
373 static void snb_uncore_imc_event_stop(struct perf_event
*event
, int flags
)
375 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
376 struct hw_perf_event
*hwc
= &event
->hw
;
378 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
381 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
382 hwc
->state
|= PERF_HES_STOPPED
;
384 list_del(&event
->active_entry
);
386 if (box
->n_active
== 0)
387 uncore_pmu_cancel_hrtimer(box
);
390 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
392 * Drain the remaining delta count out of a event
393 * that we are disabling:
395 uncore_perf_event_update(box
, event
);
396 hwc
->state
|= PERF_HES_UPTODATE
;
400 static int snb_uncore_imc_event_add(struct perf_event
*event
, int flags
)
402 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
403 struct hw_perf_event
*hwc
= &event
->hw
;
408 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
409 if (!(flags
& PERF_EF_START
))
410 hwc
->state
|= PERF_HES_ARCH
;
412 snb_uncore_imc_event_start(event
, 0);
419 static void snb_uncore_imc_event_del(struct perf_event
*event
, int flags
)
421 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
424 snb_uncore_imc_event_stop(event
, PERF_EF_UPDATE
);
426 for (i
= 0; i
< box
->n_events
; i
++) {
427 if (event
== box
->event_list
[i
]) {
434 int snb_pci2phy_map_init(int devid
)
436 struct pci_dev
*dev
= NULL
;
437 struct pci2phy_map
*map
;
440 dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, dev
);
444 bus
= dev
->bus
->number
;
445 segment
= pci_domain_nr(dev
->bus
);
447 raw_spin_lock(&pci2phy_map_lock
);
448 map
= __find_pci2phy_map(segment
);
450 raw_spin_unlock(&pci2phy_map_lock
);
454 map
->pbus_to_physid
[bus
] = 0;
455 raw_spin_unlock(&pci2phy_map_lock
);
462 static struct pmu snb_uncore_imc_pmu
= {
463 .task_ctx_nr
= perf_invalid_context
,
464 .event_init
= snb_uncore_imc_event_init
,
465 .add
= snb_uncore_imc_event_add
,
466 .del
= snb_uncore_imc_event_del
,
467 .start
= snb_uncore_imc_event_start
,
468 .stop
= snb_uncore_imc_event_stop
,
469 .read
= uncore_pmu_event_read
,
472 static struct intel_uncore_ops snb_uncore_imc_ops
= {
473 .init_box
= snb_uncore_imc_init_box
,
474 .exit_box
= snb_uncore_imc_exit_box
,
475 .enable_box
= snb_uncore_imc_enable_box
,
476 .disable_box
= snb_uncore_imc_disable_box
,
477 .disable_event
= snb_uncore_imc_disable_event
,
478 .enable_event
= snb_uncore_imc_enable_event
,
479 .hw_config
= snb_uncore_imc_hw_config
,
480 .read_counter
= snb_uncore_imc_read_counter
,
483 static struct intel_uncore_type snb_uncore_imc
= {
487 .fixed_ctr_bits
= 32,
488 .fixed_ctr
= SNB_UNCORE_PCI_IMC_CTR_BASE
,
489 .event_descs
= snb_uncore_imc_events
,
490 .format_group
= &snb_uncore_imc_format_group
,
491 .perf_ctr
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
,
492 .event_mask
= SNB_UNCORE_PCI_IMC_EVENT_MASK
,
493 .ops
= &snb_uncore_imc_ops
,
494 .pmu
= &snb_uncore_imc_pmu
,
497 static struct intel_uncore_type
*snb_pci_uncores
[] = {
498 [SNB_PCI_UNCORE_IMC
] = &snb_uncore_imc
,
502 static const struct pci_device_id snb_uncore_pci_ids
[] = {
504 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SNB_IMC
),
505 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
507 { /* end: all zeroes */ },
510 static const struct pci_device_id ivb_uncore_pci_ids
[] = {
512 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_IMC
),
513 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
516 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_E3_IMC
),
517 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
519 { /* end: all zeroes */ },
522 static const struct pci_device_id hsw_uncore_pci_ids
[] = {
524 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_IMC
),
525 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
528 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_U_IMC
),
529 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
531 { /* end: all zeroes */ },
534 static const struct pci_device_id bdw_uncore_pci_ids
[] = {
536 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_BDW_IMC
),
537 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
539 { /* end: all zeroes */ },
542 static const struct pci_device_id skl_uncore_pci_ids
[] = {
544 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SKL_IMC
),
545 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
547 { /* end: all zeroes */ },
550 static struct pci_driver snb_uncore_pci_driver
= {
551 .name
= "snb_uncore",
552 .id_table
= snb_uncore_pci_ids
,
555 static struct pci_driver ivb_uncore_pci_driver
= {
556 .name
= "ivb_uncore",
557 .id_table
= ivb_uncore_pci_ids
,
560 static struct pci_driver hsw_uncore_pci_driver
= {
561 .name
= "hsw_uncore",
562 .id_table
= hsw_uncore_pci_ids
,
565 static struct pci_driver bdw_uncore_pci_driver
= {
566 .name
= "bdw_uncore",
567 .id_table
= bdw_uncore_pci_ids
,
570 static struct pci_driver skl_uncore_pci_driver
= {
571 .name
= "skl_uncore",
572 .id_table
= skl_uncore_pci_ids
,
575 struct imc_uncore_pci_dev
{
577 struct pci_driver
*driver
;
579 #define IMC_DEV(a, d) \
580 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
582 static const struct imc_uncore_pci_dev desktop_imc_pci_ids
[] = {
583 IMC_DEV(SNB_IMC
, &snb_uncore_pci_driver
),
584 IMC_DEV(IVB_IMC
, &ivb_uncore_pci_driver
), /* 3rd Gen Core processor */
585 IMC_DEV(IVB_E3_IMC
, &ivb_uncore_pci_driver
), /* Xeon E3-1200 v2/3rd Gen Core processor */
586 IMC_DEV(HSW_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core Processor */
587 IMC_DEV(HSW_U_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core ULT Mobile Processor */
588 IMC_DEV(BDW_IMC
, &bdw_uncore_pci_driver
), /* 5th Gen Core U */
589 IMC_DEV(SKL_IMC
, &skl_uncore_pci_driver
), /* 6th Gen Core */
594 #define for_each_imc_pci_id(x, t) \
595 for (x = (t); (x)->pci_id; x++)
597 static struct pci_driver
*imc_uncore_find_dev(void)
599 const struct imc_uncore_pci_dev
*p
;
602 for_each_imc_pci_id(p
, desktop_imc_pci_ids
) {
603 ret
= snb_pci2phy_map_init(p
->pci_id
);
610 static int imc_uncore_pci_init(void)
612 struct pci_driver
*imc_drv
= imc_uncore_find_dev();
617 uncore_pci_uncores
= snb_pci_uncores
;
618 uncore_pci_driver
= imc_drv
;
623 int snb_uncore_pci_init(void)
625 return imc_uncore_pci_init();
628 int ivb_uncore_pci_init(void)
630 return imc_uncore_pci_init();
632 int hsw_uncore_pci_init(void)
634 return imc_uncore_pci_init();
637 int bdw_uncore_pci_init(void)
639 return imc_uncore_pci_init();
642 int skl_uncore_pci_init(void)
644 return imc_uncore_pci_init();
647 /* end of Sandy Bridge uncore support */
649 /* Nehalem uncore support */
650 static void nhm_uncore_msr_disable_box(struct intel_uncore_box
*box
)
652 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, 0);
655 static void nhm_uncore_msr_enable_box(struct intel_uncore_box
*box
)
657 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, NHM_UNC_GLOBAL_CTL_EN_PC_ALL
| NHM_UNC_GLOBAL_CTL_EN_FC
);
660 static void nhm_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
662 struct hw_perf_event
*hwc
= &event
->hw
;
664 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
665 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
667 wrmsrl(hwc
->config_base
, NHM_UNC_FIXED_CTR_CTL_EN
);
670 static struct attribute
*nhm_uncore_formats_attr
[] = {
671 &format_attr_event
.attr
,
672 &format_attr_umask
.attr
,
673 &format_attr_edge
.attr
,
674 &format_attr_inv
.attr
,
675 &format_attr_cmask8
.attr
,
679 static struct attribute_group nhm_uncore_format_group
= {
681 .attrs
= nhm_uncore_formats_attr
,
684 static struct uncore_event_desc nhm_uncore_events
[] = {
685 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
686 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any
, "event=0x2f,umask=0x0f"),
687 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any
, "event=0x2c,umask=0x0f"),
688 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads
, "event=0x20,umask=0x01"),
689 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes
, "event=0x20,umask=0x02"),
690 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads
, "event=0x20,umask=0x04"),
691 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes
, "event=0x20,umask=0x08"),
692 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads
, "event=0x20,umask=0x10"),
693 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes
, "event=0x20,umask=0x20"),
694 { /* end: all zeroes */ },
697 static struct intel_uncore_ops nhm_uncore_msr_ops
= {
698 .disable_box
= nhm_uncore_msr_disable_box
,
699 .enable_box
= nhm_uncore_msr_enable_box
,
700 .disable_event
= snb_uncore_msr_disable_event
,
701 .enable_event
= nhm_uncore_msr_enable_event
,
702 .read_counter
= uncore_msr_read_counter
,
705 static struct intel_uncore_type nhm_uncore
= {
710 .fixed_ctr_bits
= 48,
711 .event_ctl
= NHM_UNC_PERFEVTSEL0
,
712 .perf_ctr
= NHM_UNC_UNCORE_PMC0
,
713 .fixed_ctr
= NHM_UNC_FIXED_CTR
,
714 .fixed_ctl
= NHM_UNC_FIXED_CTR_CTRL
,
715 .event_mask
= NHM_UNC_RAW_EVENT_MASK
,
716 .event_descs
= nhm_uncore_events
,
717 .ops
= &nhm_uncore_msr_ops
,
718 .format_group
= &nhm_uncore_format_group
,
721 static struct intel_uncore_type
*nhm_msr_uncores
[] = {
726 void nhm_uncore_cpu_init(void)
728 uncore_msr_uncores
= nhm_msr_uncores
;
731 /* end of Nehalem uncore support */