Commit | Line | Data |
---|---|---|
e83d5887 AS |
1 | /* |
2 | * KVM Microsoft Hyper-V emulation | |
3 | * | |
4 | * derived from arch/x86/kvm/x86.c | |
5 | * | |
6 | * Copyright (C) 2006 Qumranet, Inc. | |
7 | * Copyright (C) 2008 Qumranet, Inc. | |
8 | * Copyright IBM Corporation, 2008 | |
9 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
10 | * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> | |
11 | * | |
12 | * Authors: | |
13 | * Avi Kivity <avi@qumranet.com> | |
14 | * Yaniv Kamay <yaniv@qumranet.com> | |
15 | * Amit Shah <amit.shah@qumranet.com> | |
16 | * Ben-Ami Yassour <benami@il.ibm.com> | |
17 | * Andrey Smetanin <asmetanin@virtuozzo.com> | |
18 | * | |
19 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
20 | * the COPYING file in the top-level directory. | |
21 | * | |
22 | */ | |
23 | ||
24 | #include "x86.h" | |
25 | #include "lapic.h" | |
5c919412 | 26 | #include "ioapic.h" |
e83d5887 AS |
27 | #include "hyperv.h" |
28 | ||
29 | #include <linux/kvm_host.h> | |
765eaa0f | 30 | #include <linux/highmem.h> |
5c919412 | 31 | #include <asm/apicdef.h> |
e83d5887 AS |
32 | #include <trace/events/kvm.h> |
33 | ||
34 | #include "trace.h" | |
35 | ||
5c919412 AS |
36 | static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) |
37 | { | |
38 | return atomic64_read(&synic->sint[sint]); | |
39 | } | |
40 | ||
41 | static inline int synic_get_sint_vector(u64 sint_value) | |
42 | { | |
43 | if (sint_value & HV_SYNIC_SINT_MASKED) | |
44 | return -1; | |
45 | return sint_value & HV_SYNIC_SINT_VECTOR_MASK; | |
46 | } | |
47 | ||
48 | static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic, | |
49 | int vector) | |
50 | { | |
51 | int i; | |
52 | ||
53 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
54 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
55 | return true; | |
56 | } | |
57 | return false; | |
58 | } | |
59 | ||
60 | static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic, | |
61 | int vector) | |
62 | { | |
63 | int i; | |
64 | u64 sint_value; | |
65 | ||
66 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
67 | sint_value = synic_read_sint(synic, i); | |
68 | if (synic_get_sint_vector(sint_value) == vector && | |
69 | sint_value & HV_SYNIC_SINT_AUTO_EOI) | |
70 | return true; | |
71 | } | |
72 | return false; | |
73 | } | |
74 | ||
7be58a64 AS |
75 | static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, |
76 | u64 data, bool host) | |
5c919412 AS |
77 | { |
78 | int vector; | |
79 | ||
80 | vector = data & HV_SYNIC_SINT_VECTOR_MASK; | |
7be58a64 | 81 | if (vector < 16 && !host) |
5c919412 AS |
82 | return 1; |
83 | /* | |
84 | * Guest may configure multiple SINTs to use the same vector, so | |
85 | * we maintain a bitmap of vectors handled by synic, and a | |
86 | * bitmap of vectors with auto-eoi behavior. The bitmaps are | |
87 | * updated here, and atomically queried on fast paths. | |
88 | */ | |
89 | ||
90 | atomic64_set(&synic->sint[sint], data); | |
91 | ||
92 | if (synic_has_vector_connected(synic, vector)) | |
93 | __set_bit(vector, synic->vec_bitmap); | |
94 | else | |
95 | __clear_bit(vector, synic->vec_bitmap); | |
96 | ||
97 | if (synic_has_vector_auto_eoi(synic, vector)) | |
98 | __set_bit(vector, synic->auto_eoi_bitmap); | |
99 | else | |
100 | __clear_bit(vector, synic->auto_eoi_bitmap); | |
101 | ||
102 | /* Load SynIC vectors into EOI exit bitmap */ | |
103 | kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic)); | |
104 | return 0; | |
105 | } | |
106 | ||
107 | static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vcpu_id) | |
108 | { | |
109 | struct kvm_vcpu *vcpu; | |
110 | struct kvm_vcpu_hv_synic *synic; | |
111 | ||
112 | if (vcpu_id >= atomic_read(&kvm->online_vcpus)) | |
113 | return NULL; | |
114 | vcpu = kvm_get_vcpu(kvm, vcpu_id); | |
115 | if (!vcpu) | |
116 | return NULL; | |
117 | synic = vcpu_to_synic(vcpu); | |
118 | return (synic->active) ? synic : NULL; | |
119 | } | |
120 | ||
765eaa0f AS |
121 | static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic, |
122 | u32 sint) | |
123 | { | |
124 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
125 | struct page *page; | |
126 | gpa_t gpa; | |
127 | struct hv_message *msg; | |
128 | struct hv_message_page *msg_page; | |
129 | ||
130 | gpa = synic->msg_page & PAGE_MASK; | |
131 | page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); | |
132 | if (is_error_page(page)) { | |
133 | vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n", | |
134 | gpa); | |
135 | return; | |
136 | } | |
137 | msg_page = kmap_atomic(page); | |
138 | ||
139 | msg = &msg_page->sint_message[sint]; | |
140 | msg->header.message_flags.msg_pending = 0; | |
141 | ||
142 | kunmap_atomic(msg_page); | |
143 | kvm_release_page_dirty(page); | |
144 | kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); | |
145 | } | |
146 | ||
5c919412 AS |
147 | static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) |
148 | { | |
149 | struct kvm *kvm = vcpu->kvm; | |
765eaa0f | 150 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); |
1f4b34f8 AS |
151 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); |
152 | struct kvm_vcpu_hv_stimer *stimer; | |
153 | int gsi, idx, stimers_pending; | |
5c919412 | 154 | |
18659a9c | 155 | trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); |
5c919412 | 156 | |
765eaa0f AS |
157 | if (synic->msg_page & HV_SYNIC_SIMP_ENABLE) |
158 | synic_clear_sint_msg_pending(synic, sint); | |
159 | ||
1f4b34f8 AS |
160 | /* Try to deliver pending Hyper-V SynIC timers messages */ |
161 | stimers_pending = 0; | |
162 | for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { | |
163 | stimer = &hv_vcpu->stimer[idx]; | |
164 | if (stimer->msg_pending && | |
165 | (stimer->config & HV_STIMER_ENABLE) && | |
166 | HV_STIMER_SINT(stimer->config) == sint) { | |
167 | set_bit(stimer->index, | |
168 | hv_vcpu->stimer_pending_bitmap); | |
169 | stimers_pending++; | |
170 | } | |
171 | } | |
172 | if (stimers_pending) | |
173 | kvm_make_request(KVM_REQ_HV_STIMER, vcpu); | |
174 | ||
5c919412 | 175 | idx = srcu_read_lock(&kvm->irq_srcu); |
1f4b34f8 | 176 | gsi = atomic_read(&synic->sint_to_gsi[sint]); |
5c919412 AS |
177 | if (gsi != -1) |
178 | kvm_notify_acked_gsi(kvm, gsi); | |
179 | srcu_read_unlock(&kvm->irq_srcu, idx); | |
180 | } | |
181 | ||
db397571 AS |
182 | static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) |
183 | { | |
184 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
185 | struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; | |
186 | ||
187 | hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; | |
188 | hv_vcpu->exit.u.synic.msr = msr; | |
189 | hv_vcpu->exit.u.synic.control = synic->control; | |
190 | hv_vcpu->exit.u.synic.evt_page = synic->evt_page; | |
191 | hv_vcpu->exit.u.synic.msg_page = synic->msg_page; | |
192 | ||
193 | kvm_make_request(KVM_REQ_HV_EXIT, vcpu); | |
194 | } | |
195 | ||
5c919412 AS |
196 | static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, |
197 | u32 msr, u64 data, bool host) | |
198 | { | |
199 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
200 | int ret; | |
201 | ||
202 | if (!synic->active) | |
203 | return 1; | |
204 | ||
18659a9c AS |
205 | trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); |
206 | ||
5c919412 AS |
207 | ret = 0; |
208 | switch (msr) { | |
209 | case HV_X64_MSR_SCONTROL: | |
210 | synic->control = data; | |
db397571 AS |
211 | if (!host) |
212 | synic_exit(synic, msr); | |
5c919412 AS |
213 | break; |
214 | case HV_X64_MSR_SVERSION: | |
215 | if (!host) { | |
216 | ret = 1; | |
217 | break; | |
218 | } | |
219 | synic->version = data; | |
220 | break; | |
221 | case HV_X64_MSR_SIEFP: | |
222 | if (data & HV_SYNIC_SIEFP_ENABLE) | |
223 | if (kvm_clear_guest(vcpu->kvm, | |
224 | data & PAGE_MASK, PAGE_SIZE)) { | |
225 | ret = 1; | |
226 | break; | |
227 | } | |
228 | synic->evt_page = data; | |
db397571 AS |
229 | if (!host) |
230 | synic_exit(synic, msr); | |
5c919412 AS |
231 | break; |
232 | case HV_X64_MSR_SIMP: | |
233 | if (data & HV_SYNIC_SIMP_ENABLE) | |
234 | if (kvm_clear_guest(vcpu->kvm, | |
235 | data & PAGE_MASK, PAGE_SIZE)) { | |
236 | ret = 1; | |
237 | break; | |
238 | } | |
239 | synic->msg_page = data; | |
db397571 AS |
240 | if (!host) |
241 | synic_exit(synic, msr); | |
5c919412 AS |
242 | break; |
243 | case HV_X64_MSR_EOM: { | |
244 | int i; | |
245 | ||
246 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) | |
247 | kvm_hv_notify_acked_sint(vcpu, i); | |
248 | break; | |
249 | } | |
250 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
7be58a64 | 251 | ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); |
5c919412 AS |
252 | break; |
253 | default: | |
254 | ret = 1; | |
255 | break; | |
256 | } | |
257 | return ret; | |
258 | } | |
259 | ||
260 | static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata) | |
261 | { | |
262 | int ret; | |
263 | ||
264 | if (!synic->active) | |
265 | return 1; | |
266 | ||
267 | ret = 0; | |
268 | switch (msr) { | |
269 | case HV_X64_MSR_SCONTROL: | |
270 | *pdata = synic->control; | |
271 | break; | |
272 | case HV_X64_MSR_SVERSION: | |
273 | *pdata = synic->version; | |
274 | break; | |
275 | case HV_X64_MSR_SIEFP: | |
276 | *pdata = synic->evt_page; | |
277 | break; | |
278 | case HV_X64_MSR_SIMP: | |
279 | *pdata = synic->msg_page; | |
280 | break; | |
281 | case HV_X64_MSR_EOM: | |
282 | *pdata = 0; | |
283 | break; | |
284 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
285 | *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); | |
286 | break; | |
287 | default: | |
288 | ret = 1; | |
289 | break; | |
290 | } | |
291 | return ret; | |
292 | } | |
293 | ||
294 | int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) | |
295 | { | |
296 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
297 | struct kvm_lapic_irq irq; | |
298 | int ret, vector; | |
299 | ||
300 | if (sint >= ARRAY_SIZE(synic->sint)) | |
301 | return -EINVAL; | |
302 | ||
303 | vector = synic_get_sint_vector(synic_read_sint(synic, sint)); | |
304 | if (vector < 0) | |
305 | return -ENOENT; | |
306 | ||
307 | memset(&irq, 0, sizeof(irq)); | |
308 | irq.dest_id = kvm_apic_id(vcpu->arch.apic); | |
309 | irq.dest_mode = APIC_DEST_PHYSICAL; | |
310 | irq.delivery_mode = APIC_DM_FIXED; | |
311 | irq.vector = vector; | |
312 | irq.level = 1; | |
313 | ||
314 | ret = kvm_irq_delivery_to_apic(vcpu->kvm, NULL, &irq, NULL); | |
18659a9c | 315 | trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); |
5c919412 AS |
316 | return ret; |
317 | } | |
318 | ||
319 | int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint) | |
320 | { | |
321 | struct kvm_vcpu_hv_synic *synic; | |
322 | ||
323 | synic = synic_get(kvm, vcpu_id); | |
324 | if (!synic) | |
325 | return -EINVAL; | |
326 | ||
327 | return synic_set_irq(synic, sint); | |
328 | } | |
329 | ||
330 | void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) | |
331 | { | |
332 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); | |
333 | int i; | |
334 | ||
18659a9c | 335 | trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); |
5c919412 AS |
336 | |
337 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) | |
338 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
339 | kvm_hv_notify_acked_sint(vcpu, i); | |
340 | } | |
341 | ||
342 | static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vcpu_id, u32 sint, int gsi) | |
343 | { | |
344 | struct kvm_vcpu_hv_synic *synic; | |
345 | ||
346 | synic = synic_get(kvm, vcpu_id); | |
347 | if (!synic) | |
348 | return -EINVAL; | |
349 | ||
350 | if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) | |
351 | return -EINVAL; | |
352 | ||
353 | atomic_set(&synic->sint_to_gsi[sint], gsi); | |
354 | return 0; | |
355 | } | |
356 | ||
357 | void kvm_hv_irq_routing_update(struct kvm *kvm) | |
358 | { | |
359 | struct kvm_irq_routing_table *irq_rt; | |
360 | struct kvm_kernel_irq_routing_entry *e; | |
361 | u32 gsi; | |
362 | ||
363 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, | |
364 | lockdep_is_held(&kvm->irq_lock)); | |
365 | ||
366 | for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { | |
367 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { | |
368 | if (e->type == KVM_IRQ_ROUTING_HV_SINT) | |
369 | kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, | |
370 | e->hv_sint.sint, gsi); | |
371 | } | |
372 | } | |
373 | } | |
374 | ||
375 | static void synic_init(struct kvm_vcpu_hv_synic *synic) | |
376 | { | |
377 | int i; | |
378 | ||
379 | memset(synic, 0, sizeof(*synic)); | |
380 | synic->version = HV_SYNIC_VERSION_1; | |
381 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
382 | atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); | |
383 | atomic_set(&synic->sint_to_gsi[i], -1); | |
384 | } | |
385 | } | |
386 | ||
93bf4172 AS |
387 | static u64 get_time_ref_counter(struct kvm *kvm) |
388 | { | |
389 | return div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); | |
390 | } | |
391 | ||
f3b138c5 | 392 | static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, |
1f4b34f8 AS |
393 | bool vcpu_kick) |
394 | { | |
395 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
396 | ||
397 | set_bit(stimer->index, | |
398 | vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); | |
399 | kvm_make_request(KVM_REQ_HV_STIMER, vcpu); | |
400 | if (vcpu_kick) | |
401 | kvm_vcpu_kick(vcpu); | |
402 | } | |
403 | ||
1f4b34f8 AS |
404 | static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) |
405 | { | |
406 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
407 | ||
ac3e5fca AS |
408 | trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id, |
409 | stimer->index); | |
410 | ||
019b9781 | 411 | hrtimer_cancel(&stimer->timer); |
1f4b34f8 AS |
412 | clear_bit(stimer->index, |
413 | vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); | |
414 | stimer->msg_pending = false; | |
f808495d | 415 | stimer->exp_time = 0; |
1f4b34f8 AS |
416 | } |
417 | ||
418 | static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) | |
419 | { | |
420 | struct kvm_vcpu_hv_stimer *stimer; | |
421 | ||
422 | stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); | |
ac3e5fca AS |
423 | trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id, |
424 | stimer->index); | |
f3b138c5 | 425 | stimer_mark_pending(stimer, true); |
1f4b34f8 AS |
426 | |
427 | return HRTIMER_NORESTART; | |
428 | } | |
429 | ||
f808495d AS |
430 | /* |
431 | * stimer_start() assumptions: | |
432 | * a) stimer->count is not equal to 0 | |
433 | * b) stimer->config has HV_STIMER_ENABLE flag | |
434 | */ | |
1f4b34f8 AS |
435 | static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) |
436 | { | |
437 | u64 time_now; | |
438 | ktime_t ktime_now; | |
439 | ||
440 | time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); | |
441 | ktime_now = ktime_get(); | |
442 | ||
443 | if (stimer->config & HV_STIMER_PERIODIC) { | |
f808495d AS |
444 | if (stimer->exp_time) { |
445 | if (time_now >= stimer->exp_time) { | |
446 | u64 remainder; | |
447 | ||
448 | div64_u64_rem(time_now - stimer->exp_time, | |
449 | stimer->count, &remainder); | |
450 | stimer->exp_time = | |
451 | time_now + (stimer->count - remainder); | |
452 | } | |
453 | } else | |
454 | stimer->exp_time = time_now + stimer->count; | |
1f4b34f8 | 455 | |
ac3e5fca AS |
456 | trace_kvm_hv_stimer_start_periodic( |
457 | stimer_to_vcpu(stimer)->vcpu_id, | |
458 | stimer->index, | |
459 | time_now, stimer->exp_time); | |
460 | ||
1f4b34f8 | 461 | hrtimer_start(&stimer->timer, |
f808495d AS |
462 | ktime_add_ns(ktime_now, |
463 | 100 * (stimer->exp_time - time_now)), | |
1f4b34f8 AS |
464 | HRTIMER_MODE_ABS); |
465 | return 0; | |
466 | } | |
467 | stimer->exp_time = stimer->count; | |
468 | if (time_now >= stimer->count) { | |
469 | /* | |
470 | * Expire timer according to Hypervisor Top-Level Functional | |
471 | * specification v4(15.3.1): | |
472 | * "If a one shot is enabled and the specified count is in | |
473 | * the past, it will expire immediately." | |
474 | */ | |
f3b138c5 | 475 | stimer_mark_pending(stimer, false); |
1f4b34f8 AS |
476 | return 0; |
477 | } | |
478 | ||
ac3e5fca AS |
479 | trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id, |
480 | stimer->index, | |
481 | time_now, stimer->count); | |
482 | ||
1f4b34f8 AS |
483 | hrtimer_start(&stimer->timer, |
484 | ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), | |
485 | HRTIMER_MODE_ABS); | |
486 | return 0; | |
487 | } | |
488 | ||
489 | static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, | |
490 | bool host) | |
491 | { | |
ac3e5fca AS |
492 | trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, |
493 | stimer->index, config, host); | |
494 | ||
f3b138c5 | 495 | stimer_cleanup(stimer); |
23a3b201 | 496 | if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0) |
1f4b34f8 AS |
497 | config &= ~HV_STIMER_ENABLE; |
498 | stimer->config = config; | |
f3b138c5 | 499 | stimer_mark_pending(stimer, false); |
1f4b34f8 AS |
500 | return 0; |
501 | } | |
502 | ||
503 | static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, | |
504 | bool host) | |
505 | { | |
ac3e5fca AS |
506 | trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, |
507 | stimer->index, count, host); | |
508 | ||
1f4b34f8 | 509 | stimer_cleanup(stimer); |
f3b138c5 | 510 | stimer->count = count; |
1f4b34f8 AS |
511 | if (stimer->count == 0) |
512 | stimer->config &= ~HV_STIMER_ENABLE; | |
f3b138c5 | 513 | else if (stimer->config & HV_STIMER_AUTOENABLE) |
1f4b34f8 | 514 | stimer->config |= HV_STIMER_ENABLE; |
f3b138c5 | 515 | stimer_mark_pending(stimer, false); |
1f4b34f8 AS |
516 | return 0; |
517 | } | |
518 | ||
519 | static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) | |
520 | { | |
521 | *pconfig = stimer->config; | |
522 | return 0; | |
523 | } | |
524 | ||
525 | static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) | |
526 | { | |
527 | *pcount = stimer->count; | |
528 | return 0; | |
529 | } | |
530 | ||
531 | static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, | |
532 | struct hv_message *src_msg) | |
533 | { | |
534 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
535 | struct page *page; | |
536 | gpa_t gpa; | |
537 | struct hv_message *dst_msg; | |
538 | int r; | |
539 | struct hv_message_page *msg_page; | |
540 | ||
541 | if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) | |
542 | return -ENOENT; | |
543 | ||
544 | gpa = synic->msg_page & PAGE_MASK; | |
545 | page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); | |
546 | if (is_error_page(page)) | |
547 | return -EFAULT; | |
548 | ||
549 | msg_page = kmap_atomic(page); | |
550 | dst_msg = &msg_page->sint_message[sint]; | |
551 | if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE, | |
552 | src_msg->header.message_type) != HVMSG_NONE) { | |
553 | dst_msg->header.message_flags.msg_pending = 1; | |
554 | r = -EAGAIN; | |
555 | } else { | |
556 | memcpy(&dst_msg->u.payload, &src_msg->u.payload, | |
557 | src_msg->header.payload_size); | |
558 | dst_msg->header.message_type = src_msg->header.message_type; | |
559 | dst_msg->header.payload_size = src_msg->header.payload_size; | |
560 | r = synic_set_irq(synic, sint); | |
561 | if (r >= 1) | |
562 | r = 0; | |
563 | else if (r == 0) | |
564 | r = -EFAULT; | |
565 | } | |
566 | kunmap_atomic(msg_page); | |
567 | kvm_release_page_dirty(page); | |
568 | kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); | |
569 | return r; | |
570 | } | |
571 | ||
0cdeabb1 | 572 | static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) |
1f4b34f8 AS |
573 | { |
574 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
575 | struct hv_message *msg = &stimer->msg; | |
576 | struct hv_timer_message_payload *payload = | |
577 | (struct hv_timer_message_payload *)&msg->u.payload; | |
1f4b34f8 | 578 | |
1f4b34f8 AS |
579 | payload->expiration_time = stimer->exp_time; |
580 | payload->delivery_time = get_time_ref_counter(vcpu->kvm); | |
0cdeabb1 AS |
581 | return synic_deliver_msg(vcpu_to_synic(vcpu), |
582 | HV_STIMER_SINT(stimer->config), msg); | |
1f4b34f8 AS |
583 | } |
584 | ||
585 | static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) | |
586 | { | |
ac3e5fca AS |
587 | int r; |
588 | ||
0cdeabb1 | 589 | stimer->msg_pending = true; |
ac3e5fca AS |
590 | r = stimer_send_msg(stimer); |
591 | trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, | |
592 | stimer->index, r); | |
593 | if (!r) { | |
0cdeabb1 AS |
594 | stimer->msg_pending = false; |
595 | if (!(stimer->config & HV_STIMER_PERIODIC)) | |
596 | stimer->config &= ~HV_STIMER_ENABLE; | |
597 | } | |
1f4b34f8 AS |
598 | } |
599 | ||
600 | void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) | |
601 | { | |
602 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); | |
603 | struct kvm_vcpu_hv_stimer *stimer; | |
f3b138c5 | 604 | u64 time_now, exp_time; |
1f4b34f8 AS |
605 | int i; |
606 | ||
607 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
608 | if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { | |
609 | stimer = &hv_vcpu->stimer[i]; | |
1f4b34f8 | 610 | if (stimer->config & HV_STIMER_ENABLE) { |
f3b138c5 AS |
611 | exp_time = stimer->exp_time; |
612 | ||
613 | if (exp_time) { | |
614 | time_now = | |
615 | get_time_ref_counter(vcpu->kvm); | |
616 | if (time_now >= exp_time) | |
617 | stimer_expiration(stimer); | |
618 | } | |
0cdeabb1 | 619 | |
f3b138c5 AS |
620 | if ((stimer->config & HV_STIMER_ENABLE) && |
621 | stimer->count) | |
0cdeabb1 AS |
622 | stimer_start(stimer); |
623 | else | |
624 | stimer_cleanup(stimer); | |
1f4b34f8 AS |
625 | } |
626 | } | |
627 | } | |
628 | ||
629 | void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) | |
630 | { | |
631 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); | |
632 | int i; | |
633 | ||
634 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
635 | stimer_cleanup(&hv_vcpu->stimer[i]); | |
636 | } | |
637 | ||
638 | static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) | |
639 | { | |
640 | struct hv_message *msg = &stimer->msg; | |
641 | struct hv_timer_message_payload *payload = | |
642 | (struct hv_timer_message_payload *)&msg->u.payload; | |
643 | ||
644 | memset(&msg->header, 0, sizeof(msg->header)); | |
645 | msg->header.message_type = HVMSG_TIMER_EXPIRED; | |
646 | msg->header.payload_size = sizeof(*payload); | |
647 | ||
648 | payload->timer_index = stimer->index; | |
649 | payload->expiration_time = 0; | |
650 | payload->delivery_time = 0; | |
651 | } | |
652 | ||
653 | static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) | |
654 | { | |
655 | memset(stimer, 0, sizeof(*stimer)); | |
656 | stimer->index = timer_index; | |
657 | hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
658 | stimer->timer.function = stimer_timer_callback; | |
659 | stimer_prepare_msg(stimer); | |
660 | } | |
661 | ||
5c919412 AS |
662 | void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) |
663 | { | |
1f4b34f8 AS |
664 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); |
665 | int i; | |
666 | ||
667 | synic_init(&hv_vcpu->synic); | |
668 | ||
669 | bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); | |
670 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
671 | stimer_init(&hv_vcpu->stimer[i], i); | |
5c919412 AS |
672 | } |
673 | ||
674 | int kvm_hv_activate_synic(struct kvm_vcpu *vcpu) | |
675 | { | |
676 | /* | |
677 | * Hyper-V SynIC auto EOI SINT's are | |
678 | * not compatible with APICV, so deactivate APICV | |
679 | */ | |
680 | kvm_vcpu_deactivate_apicv(vcpu); | |
681 | vcpu_to_synic(vcpu)->active = true; | |
682 | return 0; | |
683 | } | |
684 | ||
e83d5887 AS |
685 | static bool kvm_hv_msr_partition_wide(u32 msr) |
686 | { | |
687 | bool r = false; | |
688 | ||
689 | switch (msr) { | |
690 | case HV_X64_MSR_GUEST_OS_ID: | |
691 | case HV_X64_MSR_HYPERCALL: | |
692 | case HV_X64_MSR_REFERENCE_TSC: | |
693 | case HV_X64_MSR_TIME_REF_COUNT: | |
e7d9513b AS |
694 | case HV_X64_MSR_CRASH_CTL: |
695 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: | |
e516cebb | 696 | case HV_X64_MSR_RESET: |
e83d5887 AS |
697 | r = true; |
698 | break; | |
699 | } | |
700 | ||
701 | return r; | |
702 | } | |
703 | ||
e7d9513b AS |
704 | static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, |
705 | u32 index, u64 *pdata) | |
706 | { | |
707 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
708 | ||
709 | if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) | |
710 | return -EINVAL; | |
711 | ||
712 | *pdata = hv->hv_crash_param[index]; | |
713 | return 0; | |
714 | } | |
715 | ||
716 | static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) | |
717 | { | |
718 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
719 | ||
720 | *pdata = hv->hv_crash_ctl; | |
721 | return 0; | |
722 | } | |
723 | ||
724 | static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) | |
725 | { | |
726 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
727 | ||
728 | if (host) | |
729 | hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY; | |
730 | ||
731 | if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) { | |
732 | ||
733 | vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", | |
734 | hv->hv_crash_param[0], | |
735 | hv->hv_crash_param[1], | |
736 | hv->hv_crash_param[2], | |
737 | hv->hv_crash_param[3], | |
738 | hv->hv_crash_param[4]); | |
739 | ||
740 | /* Send notification about crash to user space */ | |
741 | kvm_make_request(KVM_REQ_HV_CRASH, vcpu); | |
742 | } | |
743 | ||
744 | return 0; | |
745 | } | |
746 | ||
747 | static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, | |
748 | u32 index, u64 data) | |
749 | { | |
750 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
751 | ||
752 | if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) | |
753 | return -EINVAL; | |
754 | ||
755 | hv->hv_crash_param[index] = data; | |
756 | return 0; | |
757 | } | |
758 | ||
759 | static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, | |
760 | bool host) | |
e83d5887 AS |
761 | { |
762 | struct kvm *kvm = vcpu->kvm; | |
763 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
764 | ||
765 | switch (msr) { | |
766 | case HV_X64_MSR_GUEST_OS_ID: | |
767 | hv->hv_guest_os_id = data; | |
768 | /* setting guest os id to zero disables hypercall page */ | |
769 | if (!hv->hv_guest_os_id) | |
770 | hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; | |
771 | break; | |
772 | case HV_X64_MSR_HYPERCALL: { | |
773 | u64 gfn; | |
774 | unsigned long addr; | |
775 | u8 instructions[4]; | |
776 | ||
777 | /* if guest os id is not set hypercall should remain disabled */ | |
778 | if (!hv->hv_guest_os_id) | |
779 | break; | |
780 | if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { | |
781 | hv->hv_hypercall = data; | |
782 | break; | |
783 | } | |
784 | gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; | |
785 | addr = gfn_to_hva(kvm, gfn); | |
786 | if (kvm_is_error_hva(addr)) | |
787 | return 1; | |
788 | kvm_x86_ops->patch_hypercall(vcpu, instructions); | |
789 | ((unsigned char *)instructions)[3] = 0xc3; /* ret */ | |
790 | if (__copy_to_user((void __user *)addr, instructions, 4)) | |
791 | return 1; | |
792 | hv->hv_hypercall = data; | |
793 | mark_page_dirty(kvm, gfn); | |
794 | break; | |
795 | } | |
796 | case HV_X64_MSR_REFERENCE_TSC: { | |
797 | u64 gfn; | |
798 | HV_REFERENCE_TSC_PAGE tsc_ref; | |
799 | ||
800 | memset(&tsc_ref, 0, sizeof(tsc_ref)); | |
801 | hv->hv_tsc_page = data; | |
802 | if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) | |
803 | break; | |
804 | gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; | |
805 | if (kvm_write_guest( | |
806 | kvm, | |
807 | gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, | |
808 | &tsc_ref, sizeof(tsc_ref))) | |
809 | return 1; | |
810 | mark_page_dirty(kvm, gfn); | |
811 | break; | |
812 | } | |
e7d9513b AS |
813 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
814 | return kvm_hv_msr_set_crash_data(vcpu, | |
815 | msr - HV_X64_MSR_CRASH_P0, | |
816 | data); | |
817 | case HV_X64_MSR_CRASH_CTL: | |
818 | return kvm_hv_msr_set_crash_ctl(vcpu, data, host); | |
e516cebb AS |
819 | case HV_X64_MSR_RESET: |
820 | if (data == 1) { | |
821 | vcpu_debug(vcpu, "hyper-v reset requested\n"); | |
822 | kvm_make_request(KVM_REQ_HV_RESET, vcpu); | |
823 | } | |
824 | break; | |
e83d5887 AS |
825 | default: |
826 | vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", | |
827 | msr, data); | |
828 | return 1; | |
829 | } | |
830 | return 0; | |
831 | } | |
832 | ||
9eec50b8 AS |
833 | /* Calculate cpu time spent by current task in 100ns units */ |
834 | static u64 current_task_runtime_100ns(void) | |
835 | { | |
836 | cputime_t utime, stime; | |
837 | ||
838 | task_cputime_adjusted(current, &utime, &stime); | |
839 | return div_u64(cputime_to_nsecs(utime + stime), 100); | |
840 | } | |
841 | ||
842 | static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) | |
e83d5887 AS |
843 | { |
844 | struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; | |
845 | ||
846 | switch (msr) { | |
847 | case HV_X64_MSR_APIC_ASSIST_PAGE: { | |
848 | u64 gfn; | |
849 | unsigned long addr; | |
850 | ||
851 | if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { | |
852 | hv->hv_vapic = data; | |
853 | if (kvm_lapic_enable_pv_eoi(vcpu, 0)) | |
854 | return 1; | |
855 | break; | |
856 | } | |
857 | gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; | |
858 | addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); | |
859 | if (kvm_is_error_hva(addr)) | |
860 | return 1; | |
861 | if (__clear_user((void __user *)addr, PAGE_SIZE)) | |
862 | return 1; | |
863 | hv->hv_vapic = data; | |
864 | kvm_vcpu_mark_page_dirty(vcpu, gfn); | |
865 | if (kvm_lapic_enable_pv_eoi(vcpu, | |
866 | gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) | |
867 | return 1; | |
868 | break; | |
869 | } | |
870 | case HV_X64_MSR_EOI: | |
871 | return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); | |
872 | case HV_X64_MSR_ICR: | |
873 | return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); | |
874 | case HV_X64_MSR_TPR: | |
875 | return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); | |
9eec50b8 AS |
876 | case HV_X64_MSR_VP_RUNTIME: |
877 | if (!host) | |
878 | return 1; | |
879 | hv->runtime_offset = data - current_task_runtime_100ns(); | |
880 | break; | |
5c919412 AS |
881 | case HV_X64_MSR_SCONTROL: |
882 | case HV_X64_MSR_SVERSION: | |
883 | case HV_X64_MSR_SIEFP: | |
884 | case HV_X64_MSR_SIMP: | |
885 | case HV_X64_MSR_EOM: | |
886 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
887 | return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); | |
1f4b34f8 AS |
888 | case HV_X64_MSR_STIMER0_CONFIG: |
889 | case HV_X64_MSR_STIMER1_CONFIG: | |
890 | case HV_X64_MSR_STIMER2_CONFIG: | |
891 | case HV_X64_MSR_STIMER3_CONFIG: { | |
892 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
893 | ||
894 | return stimer_set_config(vcpu_to_stimer(vcpu, timer_index), | |
895 | data, host); | |
896 | } | |
897 | case HV_X64_MSR_STIMER0_COUNT: | |
898 | case HV_X64_MSR_STIMER1_COUNT: | |
899 | case HV_X64_MSR_STIMER2_COUNT: | |
900 | case HV_X64_MSR_STIMER3_COUNT: { | |
901 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
902 | ||
903 | return stimer_set_count(vcpu_to_stimer(vcpu, timer_index), | |
904 | data, host); | |
905 | } | |
e83d5887 AS |
906 | default: |
907 | vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", | |
908 | msr, data); | |
909 | return 1; | |
910 | } | |
911 | ||
912 | return 0; | |
913 | } | |
914 | ||
915 | static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
916 | { | |
917 | u64 data = 0; | |
918 | struct kvm *kvm = vcpu->kvm; | |
919 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
920 | ||
921 | switch (msr) { | |
922 | case HV_X64_MSR_GUEST_OS_ID: | |
923 | data = hv->hv_guest_os_id; | |
924 | break; | |
925 | case HV_X64_MSR_HYPERCALL: | |
926 | data = hv->hv_hypercall; | |
927 | break; | |
93bf4172 AS |
928 | case HV_X64_MSR_TIME_REF_COUNT: |
929 | data = get_time_ref_counter(kvm); | |
e83d5887 | 930 | break; |
e83d5887 AS |
931 | case HV_X64_MSR_REFERENCE_TSC: |
932 | data = hv->hv_tsc_page; | |
933 | break; | |
e7d9513b AS |
934 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
935 | return kvm_hv_msr_get_crash_data(vcpu, | |
936 | msr - HV_X64_MSR_CRASH_P0, | |
937 | pdata); | |
938 | case HV_X64_MSR_CRASH_CTL: | |
939 | return kvm_hv_msr_get_crash_ctl(vcpu, pdata); | |
e516cebb AS |
940 | case HV_X64_MSR_RESET: |
941 | data = 0; | |
942 | break; | |
e83d5887 AS |
943 | default: |
944 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); | |
945 | return 1; | |
946 | } | |
947 | ||
948 | *pdata = data; | |
949 | return 0; | |
950 | } | |
951 | ||
952 | static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
953 | { | |
954 | u64 data = 0; | |
955 | struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; | |
956 | ||
957 | switch (msr) { | |
958 | case HV_X64_MSR_VP_INDEX: { | |
959 | int r; | |
960 | struct kvm_vcpu *v; | |
961 | ||
962 | kvm_for_each_vcpu(r, v, vcpu->kvm) { | |
963 | if (v == vcpu) { | |
964 | data = r; | |
965 | break; | |
966 | } | |
967 | } | |
968 | break; | |
969 | } | |
970 | case HV_X64_MSR_EOI: | |
971 | return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); | |
972 | case HV_X64_MSR_ICR: | |
973 | return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); | |
974 | case HV_X64_MSR_TPR: | |
975 | return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); | |
976 | case HV_X64_MSR_APIC_ASSIST_PAGE: | |
977 | data = hv->hv_vapic; | |
978 | break; | |
9eec50b8 AS |
979 | case HV_X64_MSR_VP_RUNTIME: |
980 | data = current_task_runtime_100ns() + hv->runtime_offset; | |
981 | break; | |
5c919412 AS |
982 | case HV_X64_MSR_SCONTROL: |
983 | case HV_X64_MSR_SVERSION: | |
984 | case HV_X64_MSR_SIEFP: | |
985 | case HV_X64_MSR_SIMP: | |
986 | case HV_X64_MSR_EOM: | |
987 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
988 | return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata); | |
1f4b34f8 AS |
989 | case HV_X64_MSR_STIMER0_CONFIG: |
990 | case HV_X64_MSR_STIMER1_CONFIG: | |
991 | case HV_X64_MSR_STIMER2_CONFIG: | |
992 | case HV_X64_MSR_STIMER3_CONFIG: { | |
993 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
994 | ||
995 | return stimer_get_config(vcpu_to_stimer(vcpu, timer_index), | |
996 | pdata); | |
997 | } | |
998 | case HV_X64_MSR_STIMER0_COUNT: | |
999 | case HV_X64_MSR_STIMER1_COUNT: | |
1000 | case HV_X64_MSR_STIMER2_COUNT: | |
1001 | case HV_X64_MSR_STIMER3_COUNT: { | |
1002 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
1003 | ||
1004 | return stimer_get_count(vcpu_to_stimer(vcpu, timer_index), | |
1005 | pdata); | |
1006 | } | |
e83d5887 AS |
1007 | default: |
1008 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); | |
1009 | return 1; | |
1010 | } | |
1011 | *pdata = data; | |
1012 | return 0; | |
1013 | } | |
1014 | ||
e7d9513b | 1015 | int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) |
e83d5887 AS |
1016 | { |
1017 | if (kvm_hv_msr_partition_wide(msr)) { | |
1018 | int r; | |
1019 | ||
1020 | mutex_lock(&vcpu->kvm->lock); | |
e7d9513b | 1021 | r = kvm_hv_set_msr_pw(vcpu, msr, data, host); |
e83d5887 AS |
1022 | mutex_unlock(&vcpu->kvm->lock); |
1023 | return r; | |
1024 | } else | |
9eec50b8 | 1025 | return kvm_hv_set_msr(vcpu, msr, data, host); |
e83d5887 AS |
1026 | } |
1027 | ||
1028 | int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
1029 | { | |
1030 | if (kvm_hv_msr_partition_wide(msr)) { | |
1031 | int r; | |
1032 | ||
1033 | mutex_lock(&vcpu->kvm->lock); | |
1034 | r = kvm_hv_get_msr_pw(vcpu, msr, pdata); | |
1035 | mutex_unlock(&vcpu->kvm->lock); | |
1036 | return r; | |
1037 | } else | |
1038 | return kvm_hv_get_msr(vcpu, msr, pdata); | |
1039 | } | |
1040 | ||
1041 | bool kvm_hv_hypercall_enabled(struct kvm *kvm) | |
1042 | { | |
1043 | return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; | |
1044 | } | |
1045 | ||
1046 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |
1047 | { | |
1048 | u64 param, ingpa, outgpa, ret; | |
1049 | uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; | |
1050 | bool fast, longmode; | |
1051 | ||
1052 | /* | |
1053 | * hypercall generates UD from non zero cpl and real mode | |
1054 | * per HYPER-V spec | |
1055 | */ | |
1056 | if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { | |
1057 | kvm_queue_exception(vcpu, UD_VECTOR); | |
1058 | return 0; | |
1059 | } | |
1060 | ||
1061 | longmode = is_64_bit_mode(vcpu); | |
1062 | ||
1063 | if (!longmode) { | |
1064 | param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | | |
1065 | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); | |
1066 | ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | | |
1067 | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); | |
1068 | outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | | |
1069 | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); | |
1070 | } | |
1071 | #ifdef CONFIG_X86_64 | |
1072 | else { | |
1073 | param = kvm_register_read(vcpu, VCPU_REGS_RCX); | |
1074 | ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); | |
1075 | outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); | |
1076 | } | |
1077 | #endif | |
1078 | ||
1079 | code = param & 0xffff; | |
1080 | fast = (param >> 16) & 0x1; | |
1081 | rep_cnt = (param >> 32) & 0xfff; | |
1082 | rep_idx = (param >> 48) & 0xfff; | |
1083 | ||
1084 | trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); | |
1085 | ||
1086 | switch (code) { | |
8ed6d767 | 1087 | case HVCALL_NOTIFY_LONG_SPIN_WAIT: |
e83d5887 AS |
1088 | kvm_vcpu_on_spin(vcpu); |
1089 | break; | |
1090 | default: | |
1091 | res = HV_STATUS_INVALID_HYPERCALL_CODE; | |
1092 | break; | |
1093 | } | |
1094 | ||
1095 | ret = res | (((u64)rep_done & 0xfff) << 32); | |
1096 | if (longmode) { | |
1097 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); | |
1098 | } else { | |
1099 | kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); | |
1100 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); | |
1101 | } | |
1102 | ||
1103 | return 1; | |
1104 | } |