perf tools: Provide backward compatibility with previous perf.data version
[deliverable/linux.git] / arch / x86 / include / asm / perf_event.h
CommitLineData
cdd6c482
IM
1#ifndef _ASM_X86_PERF_EVENT_H
2#define _ASM_X86_PERF_EVENT_H
003a46cf 3
eb2b8618 4/*
cdd6c482 5 * Performance event hw details:
eb2b8618
IM
6 */
7
8#define X86_PMC_MAX_GENERIC 8
9#define X86_PMC_MAX_FIXED 3
10
862a1a5f
IM
11#define X86_PMC_IDX_GENERIC 0
12#define X86_PMC_IDX_FIXED 32
13#define X86_PMC_IDX_MAX 64
14
241771ef
IM
15#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
003a46cf 17
241771ef
IM
18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
003a46cf 20
241771ef
IM
21#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
22#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
23#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
24#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
003a46cf 25
2f18d1e8
IM
26/*
27 * Includes eventsel and unit mask as well:
28 */
29#define ARCH_PERFMON_EVENT_MASK 0xffff
30
241771ef
IM
31#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
32#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
33#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
003a46cf 34#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
241771ef
IM
35 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
36
37#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
003a46cf 38
eb2b8618
IM
39/*
40 * Intel "Architectural Performance Monitoring" CPUID
41 * detection/enumeration details:
42 */
003a46cf
TG
43union cpuid10_eax {
44 struct {
45 unsigned int version_id:8;
cdd6c482 46 unsigned int num_events:8;
003a46cf
TG
47 unsigned int bit_width:8;
48 unsigned int mask_length:8;
49 } split;
50 unsigned int full;
51};
52
703e937c
IM
53union cpuid10_edx {
54 struct {
cdd6c482 55 unsigned int num_events_fixed:4;
703e937c
IM
56 unsigned int reserved:28;
57 } split;
58 unsigned int full;
59};
60
61
62/*
cdd6c482 63 * Fixed-purpose performance events:
703e937c
IM
64 */
65
862a1a5f
IM
66/*
67 * All 3 fixed-mode PMCs are configured via this single MSR:
68 */
69#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
70
71/*
72 * The counts are available in three separate MSRs:
73 */
74
703e937c
IM
75/* Instr_Retired.Any: */
76#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
2f18d1e8 77#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
703e937c
IM
78
79/* CPU_CLK_Unhalted.Core: */
80#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
2f18d1e8 81#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
703e937c
IM
82
83/* CPU_CLK_Unhalted.Ref: */
84#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
2f18d1e8 85#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
703e937c 86
30dd568c
MM
87/*
88 * We model BTS tracing as another fixed-mode PMC.
89 *
cdd6c482
IM
90 * We choose a value in the middle of the fixed event range, since lower
91 * values are used by actual fixed events and higher values are used
30dd568c
MM
92 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
93 */
94#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
95
96
cdd6c482
IM
97#ifdef CONFIG_PERF_EVENTS
98extern void init_hw_perf_events(void);
99extern void perf_events_lapic_init(void);
194002b2 100
cdd6c482 101#define PERF_EVENT_INDEX_OFFSET 0
194002b2 102
241771ef 103#else
cdd6c482
IM
104static inline void init_hw_perf_events(void) { }
105static inline void perf_events_lapic_init(void) { }
241771ef
IM
106#endif
107
cdd6c482 108#endif /* _ASM_X86_PERF_EVENT_H */
This page took 0.146935 seconds and 5 git commands to generate.