Commit | Line | Data |
---|---|---|
25462f7f WH |
1 | /* |
2 | * KVM PMU support for AMD | |
3 | * | |
4 | * Copyright 2015, Red Hat, Inc. and/or its affiliates. | |
5 | * | |
6 | * Author: | |
7 | * Wei Huang <wei@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Implementation is based on pmu_intel.c file | |
13 | */ | |
14 | #include <linux/types.h> | |
15 | #include <linux/kvm_host.h> | |
16 | #include <linux/perf_event.h> | |
17 | #include "x86.h" | |
18 | #include "cpuid.h" | |
19 | #include "lapic.h" | |
20 | #include "pmu.h" | |
21 | ||
ca724305 WH |
22 | /* duplicated from amd_perfmon_event_map, K7 and above should work. */ |
23 | static struct kvm_event_hw_type_mapping amd_event_mapping[] = { | |
24 | [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, | |
25 | [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, | |
26 | [2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES }, | |
27 | [3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES }, | |
28 | [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, | |
29 | [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, | |
30 | [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, | |
31 | [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, | |
32 | }; | |
33 | ||
25462f7f WH |
34 | static unsigned amd_find_arch_event(struct kvm_pmu *pmu, |
35 | u8 event_select, | |
36 | u8 unit_mask) | |
37 | { | |
ca724305 WH |
38 | int i; |
39 | ||
40 | for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) | |
41 | if (amd_event_mapping[i].eventsel == event_select | |
42 | && amd_event_mapping[i].unit_mask == unit_mask) | |
43 | break; | |
44 | ||
45 | if (i == ARRAY_SIZE(amd_event_mapping)) | |
46 | return PERF_COUNT_HW_MAX; | |
47 | ||
48 | return amd_event_mapping[i].event_type; | |
25462f7f WH |
49 | } |
50 | ||
51 | /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */ | |
52 | static unsigned amd_find_fixed_event(int idx) | |
53 | { | |
54 | return PERF_COUNT_HW_MAX; | |
55 | } | |
56 | ||
ca724305 WH |
57 | /* check if a PMC is enabled by comparing it against global_ctrl bits. Because |
58 | * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE). | |
59 | */ | |
25462f7f WH |
60 | static bool amd_pmc_is_enabled(struct kvm_pmc *pmc) |
61 | { | |
ca724305 | 62 | return true; |
25462f7f WH |
63 | } |
64 | ||
65 | static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) | |
66 | { | |
ca724305 | 67 | return get_gp_pmc(pmu, MSR_K7_EVNTSEL0 + pmc_idx, MSR_K7_EVNTSEL0); |
25462f7f WH |
68 | } |
69 | ||
70 | /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */ | |
71 | static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) | |
72 | { | |
ca724305 WH |
73 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
74 | ||
75 | idx &= ~(3u << 30); | |
76 | ||
77 | return (idx >= pmu->nr_arch_gp_counters); | |
25462f7f WH |
78 | } |
79 | ||
80 | /* idx is the ECX register of RDPMC instruction */ | |
81 | static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx) | |
82 | { | |
ca724305 WH |
83 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
84 | struct kvm_pmc *counters; | |
85 | ||
86 | idx &= ~(3u << 30); | |
87 | if (idx >= pmu->nr_arch_gp_counters) | |
88 | return NULL; | |
89 | counters = pmu->gp_counters; | |
90 | ||
91 | return &counters[idx]; | |
25462f7f WH |
92 | } |
93 | ||
94 | static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) | |
95 | { | |
ca724305 WH |
96 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
97 | int ret = false; | |
98 | ||
99 | ret = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0) || | |
100 | get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0); | |
101 | ||
102 | return ret; | |
25462f7f WH |
103 | } |
104 | ||
105 | static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) | |
106 | { | |
ca724305 WH |
107 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
108 | struct kvm_pmc *pmc; | |
109 | ||
110 | /* MSR_K7_PERFCTRn */ | |
111 | pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0); | |
112 | if (pmc) { | |
113 | *data = pmc_read_counter(pmc); | |
114 | return 0; | |
115 | } | |
116 | /* MSR_K7_EVNTSELn */ | |
117 | pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0); | |
118 | if (pmc) { | |
119 | *data = pmc->eventsel; | |
120 | return 0; | |
121 | } | |
122 | ||
25462f7f WH |
123 | return 1; |
124 | } | |
125 | ||
126 | static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |
127 | { | |
ca724305 WH |
128 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
129 | struct kvm_pmc *pmc; | |
130 | u32 msr = msr_info->index; | |
131 | u64 data = msr_info->data; | |
132 | ||
133 | /* MSR_K7_PERFCTRn */ | |
134 | pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0); | |
135 | if (pmc) { | |
ca724305 WH |
136 | pmc->counter += data - pmc_read_counter(pmc); |
137 | return 0; | |
138 | } | |
139 | /* MSR_K7_EVNTSELn */ | |
140 | pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0); | |
141 | if (pmc) { | |
142 | if (data == pmc->eventsel) | |
143 | return 0; | |
144 | if (!(data & pmu->reserved_bits)) { | |
145 | reprogram_gp_counter(pmc, data); | |
146 | return 0; | |
147 | } | |
148 | } | |
149 | ||
25462f7f WH |
150 | return 1; |
151 | } | |
152 | ||
153 | static void amd_pmu_refresh(struct kvm_vcpu *vcpu) | |
154 | { | |
ca724305 WH |
155 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
156 | ||
157 | pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; | |
158 | pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; | |
159 | pmu->reserved_bits = 0xffffffff00200000ull; | |
160 | /* not applicable to AMD; but clean them to prevent any fall out */ | |
161 | pmu->counter_bitmask[KVM_PMC_FIXED] = 0; | |
162 | pmu->nr_arch_fixed_counters = 0; | |
163 | pmu->version = 0; | |
164 | pmu->global_status = 0; | |
25462f7f WH |
165 | } |
166 | ||
167 | static void amd_pmu_init(struct kvm_vcpu *vcpu) | |
168 | { | |
ca724305 WH |
169 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
170 | int i; | |
171 | ||
172 | for (i = 0; i < AMD64_NUM_COUNTERS ; i++) { | |
173 | pmu->gp_counters[i].type = KVM_PMC_GP; | |
174 | pmu->gp_counters[i].vcpu = vcpu; | |
175 | pmu->gp_counters[i].idx = i; | |
176 | } | |
25462f7f WH |
177 | } |
178 | ||
179 | static void amd_pmu_reset(struct kvm_vcpu *vcpu) | |
180 | { | |
ca724305 WH |
181 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
182 | int i; | |
183 | ||
184 | for (i = 0; i < AMD64_NUM_COUNTERS; i++) { | |
185 | struct kvm_pmc *pmc = &pmu->gp_counters[i]; | |
186 | ||
187 | pmc_stop_counter(pmc); | |
188 | pmc->counter = pmc->eventsel = 0; | |
189 | } | |
25462f7f WH |
190 | } |
191 | ||
192 | struct kvm_pmu_ops amd_pmu_ops = { | |
193 | .find_arch_event = amd_find_arch_event, | |
194 | .find_fixed_event = amd_find_fixed_event, | |
195 | .pmc_is_enabled = amd_pmc_is_enabled, | |
196 | .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, | |
197 | .msr_idx_to_pmc = amd_msr_idx_to_pmc, | |
198 | .is_valid_msr_idx = amd_is_valid_msr_idx, | |
199 | .is_valid_msr = amd_is_valid_msr, | |
200 | .get_msr = amd_pmu_get_msr, | |
201 | .set_msr = amd_pmu_set_msr, | |
202 | .refresh = amd_pmu_refresh, | |
203 | .init = amd_pmu_init, | |
204 | .reset = amd_pmu_reset, | |
205 | }; |