kvm/x86: Added Hyper-V vcpu_to_hv_vcpu()/hv_vcpu_to_vcpu() helpers
[deliverable/linux.git] / arch / x86 / kvm / hyperv.c
CommitLineData
e83d5887
AS
1/*
2 * KVM Microsoft Hyper-V emulation
3 *
4 * derived from arch/x86/kvm/x86.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
11 *
12 * Authors:
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
18 *
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
21 *
22 */
23
24#include "x86.h"
25#include "lapic.h"
5c919412 26#include "ioapic.h"
e83d5887
AS
27#include "hyperv.h"
28
29#include <linux/kvm_host.h>
5c919412 30#include <asm/apicdef.h>
e83d5887
AS
31#include <trace/events/kvm.h>
32
33#include "trace.h"
34
5c919412
AS
35static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
36{
37 return atomic64_read(&synic->sint[sint]);
38}
39
40static inline int synic_get_sint_vector(u64 sint_value)
41{
42 if (sint_value & HV_SYNIC_SINT_MASKED)
43 return -1;
44 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
45}
46
47static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
48 int vector)
49{
50 int i;
51
52 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
53 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
54 return true;
55 }
56 return false;
57}
58
59static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
60 int vector)
61{
62 int i;
63 u64 sint_value;
64
65 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
66 sint_value = synic_read_sint(synic, i);
67 if (synic_get_sint_vector(sint_value) == vector &&
68 sint_value & HV_SYNIC_SINT_AUTO_EOI)
69 return true;
70 }
71 return false;
72}
73
74static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, u64 data)
75{
76 int vector;
77
78 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
79 if (vector < 16)
80 return 1;
81 /*
82 * Guest may configure multiple SINTs to use the same vector, so
83 * we maintain a bitmap of vectors handled by synic, and a
84 * bitmap of vectors with auto-eoi behavior. The bitmaps are
85 * updated here, and atomically queried on fast paths.
86 */
87
88 atomic64_set(&synic->sint[sint], data);
89
90 if (synic_has_vector_connected(synic, vector))
91 __set_bit(vector, synic->vec_bitmap);
92 else
93 __clear_bit(vector, synic->vec_bitmap);
94
95 if (synic_has_vector_auto_eoi(synic, vector))
96 __set_bit(vector, synic->auto_eoi_bitmap);
97 else
98 __clear_bit(vector, synic->auto_eoi_bitmap);
99
100 /* Load SynIC vectors into EOI exit bitmap */
101 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
102 return 0;
103}
104
105static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vcpu_id)
106{
107 struct kvm_vcpu *vcpu;
108 struct kvm_vcpu_hv_synic *synic;
109
110 if (vcpu_id >= atomic_read(&kvm->online_vcpus))
111 return NULL;
112 vcpu = kvm_get_vcpu(kvm, vcpu_id);
113 if (!vcpu)
114 return NULL;
115 synic = vcpu_to_synic(vcpu);
116 return (synic->active) ? synic : NULL;
117}
118
119static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
120{
121 struct kvm *kvm = vcpu->kvm;
122 int gsi, idx;
123
124 vcpu_debug(vcpu, "Hyper-V SynIC acked sint %d\n", sint);
125
126 idx = srcu_read_lock(&kvm->irq_srcu);
127 gsi = atomic_read(&vcpu_to_synic(vcpu)->sint_to_gsi[sint]);
128 if (gsi != -1)
129 kvm_notify_acked_gsi(kvm, gsi);
130 srcu_read_unlock(&kvm->irq_srcu, idx);
131}
132
db397571
AS
133static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
134{
135 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
136 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
137
138 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
139 hv_vcpu->exit.u.synic.msr = msr;
140 hv_vcpu->exit.u.synic.control = synic->control;
141 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
142 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
143
144 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
145}
146
5c919412
AS
147static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
148 u32 msr, u64 data, bool host)
149{
150 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
151 int ret;
152
153 if (!synic->active)
154 return 1;
155
156 vcpu_debug(vcpu, "Hyper-V SynIC set msr 0x%x 0x%llx host %d\n",
157 msr, data, host);
158 ret = 0;
159 switch (msr) {
160 case HV_X64_MSR_SCONTROL:
161 synic->control = data;
db397571
AS
162 if (!host)
163 synic_exit(synic, msr);
5c919412
AS
164 break;
165 case HV_X64_MSR_SVERSION:
166 if (!host) {
167 ret = 1;
168 break;
169 }
170 synic->version = data;
171 break;
172 case HV_X64_MSR_SIEFP:
173 if (data & HV_SYNIC_SIEFP_ENABLE)
174 if (kvm_clear_guest(vcpu->kvm,
175 data & PAGE_MASK, PAGE_SIZE)) {
176 ret = 1;
177 break;
178 }
179 synic->evt_page = data;
db397571
AS
180 if (!host)
181 synic_exit(synic, msr);
5c919412
AS
182 break;
183 case HV_X64_MSR_SIMP:
184 if (data & HV_SYNIC_SIMP_ENABLE)
185 if (kvm_clear_guest(vcpu->kvm,
186 data & PAGE_MASK, PAGE_SIZE)) {
187 ret = 1;
188 break;
189 }
190 synic->msg_page = data;
db397571
AS
191 if (!host)
192 synic_exit(synic, msr);
5c919412
AS
193 break;
194 case HV_X64_MSR_EOM: {
195 int i;
196
197 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
198 kvm_hv_notify_acked_sint(vcpu, i);
199 break;
200 }
201 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
202 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data);
203 break;
204 default:
205 ret = 1;
206 break;
207 }
208 return ret;
209}
210
211static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
212{
213 int ret;
214
215 if (!synic->active)
216 return 1;
217
218 ret = 0;
219 switch (msr) {
220 case HV_X64_MSR_SCONTROL:
221 *pdata = synic->control;
222 break;
223 case HV_X64_MSR_SVERSION:
224 *pdata = synic->version;
225 break;
226 case HV_X64_MSR_SIEFP:
227 *pdata = synic->evt_page;
228 break;
229 case HV_X64_MSR_SIMP:
230 *pdata = synic->msg_page;
231 break;
232 case HV_X64_MSR_EOM:
233 *pdata = 0;
234 break;
235 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
236 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
237 break;
238 default:
239 ret = 1;
240 break;
241 }
242 return ret;
243}
244
245int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
246{
247 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
248 struct kvm_lapic_irq irq;
249 int ret, vector;
250
251 if (sint >= ARRAY_SIZE(synic->sint))
252 return -EINVAL;
253
254 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
255 if (vector < 0)
256 return -ENOENT;
257
258 memset(&irq, 0, sizeof(irq));
259 irq.dest_id = kvm_apic_id(vcpu->arch.apic);
260 irq.dest_mode = APIC_DEST_PHYSICAL;
261 irq.delivery_mode = APIC_DM_FIXED;
262 irq.vector = vector;
263 irq.level = 1;
264
265 ret = kvm_irq_delivery_to_apic(vcpu->kvm, NULL, &irq, NULL);
266 vcpu_debug(vcpu, "Hyper-V SynIC set irq ret %d\n", ret);
267 return ret;
268}
269
270int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint)
271{
272 struct kvm_vcpu_hv_synic *synic;
273
274 synic = synic_get(kvm, vcpu_id);
275 if (!synic)
276 return -EINVAL;
277
278 return synic_set_irq(synic, sint);
279}
280
281void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
282{
283 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
284 int i;
285
286 vcpu_debug(vcpu, "Hyper-V SynIC send eoi vec %d\n", vector);
287
288 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
289 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
290 kvm_hv_notify_acked_sint(vcpu, i);
291}
292
293static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vcpu_id, u32 sint, int gsi)
294{
295 struct kvm_vcpu_hv_synic *synic;
296
297 synic = synic_get(kvm, vcpu_id);
298 if (!synic)
299 return -EINVAL;
300
301 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
302 return -EINVAL;
303
304 atomic_set(&synic->sint_to_gsi[sint], gsi);
305 return 0;
306}
307
308void kvm_hv_irq_routing_update(struct kvm *kvm)
309{
310 struct kvm_irq_routing_table *irq_rt;
311 struct kvm_kernel_irq_routing_entry *e;
312 u32 gsi;
313
314 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
315 lockdep_is_held(&kvm->irq_lock));
316
317 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
318 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
319 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
320 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
321 e->hv_sint.sint, gsi);
322 }
323 }
324}
325
326static void synic_init(struct kvm_vcpu_hv_synic *synic)
327{
328 int i;
329
330 memset(synic, 0, sizeof(*synic));
331 synic->version = HV_SYNIC_VERSION_1;
332 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
333 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
334 atomic_set(&synic->sint_to_gsi[i], -1);
335 }
336}
337
338void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
339{
340 synic_init(vcpu_to_synic(vcpu));
341}
342
343int kvm_hv_activate_synic(struct kvm_vcpu *vcpu)
344{
345 /*
346 * Hyper-V SynIC auto EOI SINT's are
347 * not compatible with APICV, so deactivate APICV
348 */
349 kvm_vcpu_deactivate_apicv(vcpu);
350 vcpu_to_synic(vcpu)->active = true;
351 return 0;
352}
353
e83d5887
AS
354static bool kvm_hv_msr_partition_wide(u32 msr)
355{
356 bool r = false;
357
358 switch (msr) {
359 case HV_X64_MSR_GUEST_OS_ID:
360 case HV_X64_MSR_HYPERCALL:
361 case HV_X64_MSR_REFERENCE_TSC:
362 case HV_X64_MSR_TIME_REF_COUNT:
e7d9513b
AS
363 case HV_X64_MSR_CRASH_CTL:
364 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
e516cebb 365 case HV_X64_MSR_RESET:
e83d5887
AS
366 r = true;
367 break;
368 }
369
370 return r;
371}
372
e7d9513b
AS
373static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
374 u32 index, u64 *pdata)
375{
376 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
377
378 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
379 return -EINVAL;
380
381 *pdata = hv->hv_crash_param[index];
382 return 0;
383}
384
385static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
386{
387 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
388
389 *pdata = hv->hv_crash_ctl;
390 return 0;
391}
392
393static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
394{
395 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
396
397 if (host)
398 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
399
400 if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
401
402 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
403 hv->hv_crash_param[0],
404 hv->hv_crash_param[1],
405 hv->hv_crash_param[2],
406 hv->hv_crash_param[3],
407 hv->hv_crash_param[4]);
408
409 /* Send notification about crash to user space */
410 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
411 }
412
413 return 0;
414}
415
416static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
417 u32 index, u64 data)
418{
419 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
420
421 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
422 return -EINVAL;
423
424 hv->hv_crash_param[index] = data;
425 return 0;
426}
427
428static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
429 bool host)
e83d5887
AS
430{
431 struct kvm *kvm = vcpu->kvm;
432 struct kvm_hv *hv = &kvm->arch.hyperv;
433
434 switch (msr) {
435 case HV_X64_MSR_GUEST_OS_ID:
436 hv->hv_guest_os_id = data;
437 /* setting guest os id to zero disables hypercall page */
438 if (!hv->hv_guest_os_id)
439 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
440 break;
441 case HV_X64_MSR_HYPERCALL: {
442 u64 gfn;
443 unsigned long addr;
444 u8 instructions[4];
445
446 /* if guest os id is not set hypercall should remain disabled */
447 if (!hv->hv_guest_os_id)
448 break;
449 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
450 hv->hv_hypercall = data;
451 break;
452 }
453 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
454 addr = gfn_to_hva(kvm, gfn);
455 if (kvm_is_error_hva(addr))
456 return 1;
457 kvm_x86_ops->patch_hypercall(vcpu, instructions);
458 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
459 if (__copy_to_user((void __user *)addr, instructions, 4))
460 return 1;
461 hv->hv_hypercall = data;
462 mark_page_dirty(kvm, gfn);
463 break;
464 }
465 case HV_X64_MSR_REFERENCE_TSC: {
466 u64 gfn;
467 HV_REFERENCE_TSC_PAGE tsc_ref;
468
469 memset(&tsc_ref, 0, sizeof(tsc_ref));
470 hv->hv_tsc_page = data;
471 if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
472 break;
473 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
474 if (kvm_write_guest(
475 kvm,
476 gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
477 &tsc_ref, sizeof(tsc_ref)))
478 return 1;
479 mark_page_dirty(kvm, gfn);
480 break;
481 }
e7d9513b
AS
482 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
483 return kvm_hv_msr_set_crash_data(vcpu,
484 msr - HV_X64_MSR_CRASH_P0,
485 data);
486 case HV_X64_MSR_CRASH_CTL:
487 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
e516cebb
AS
488 case HV_X64_MSR_RESET:
489 if (data == 1) {
490 vcpu_debug(vcpu, "hyper-v reset requested\n");
491 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
492 }
493 break;
e83d5887
AS
494 default:
495 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
496 msr, data);
497 return 1;
498 }
499 return 0;
500}
501
9eec50b8
AS
502/* Calculate cpu time spent by current task in 100ns units */
503static u64 current_task_runtime_100ns(void)
504{
505 cputime_t utime, stime;
506
507 task_cputime_adjusted(current, &utime, &stime);
508 return div_u64(cputime_to_nsecs(utime + stime), 100);
509}
510
511static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
e83d5887
AS
512{
513 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
514
515 switch (msr) {
516 case HV_X64_MSR_APIC_ASSIST_PAGE: {
517 u64 gfn;
518 unsigned long addr;
519
520 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
521 hv->hv_vapic = data;
522 if (kvm_lapic_enable_pv_eoi(vcpu, 0))
523 return 1;
524 break;
525 }
526 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
527 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
528 if (kvm_is_error_hva(addr))
529 return 1;
530 if (__clear_user((void __user *)addr, PAGE_SIZE))
531 return 1;
532 hv->hv_vapic = data;
533 kvm_vcpu_mark_page_dirty(vcpu, gfn);
534 if (kvm_lapic_enable_pv_eoi(vcpu,
535 gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
536 return 1;
537 break;
538 }
539 case HV_X64_MSR_EOI:
540 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
541 case HV_X64_MSR_ICR:
542 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
543 case HV_X64_MSR_TPR:
544 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
9eec50b8
AS
545 case HV_X64_MSR_VP_RUNTIME:
546 if (!host)
547 return 1;
548 hv->runtime_offset = data - current_task_runtime_100ns();
549 break;
5c919412
AS
550 case HV_X64_MSR_SCONTROL:
551 case HV_X64_MSR_SVERSION:
552 case HV_X64_MSR_SIEFP:
553 case HV_X64_MSR_SIMP:
554 case HV_X64_MSR_EOM:
555 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
556 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
e83d5887
AS
557 default:
558 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
559 msr, data);
560 return 1;
561 }
562
563 return 0;
564}
565
566static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
567{
568 u64 data = 0;
569 struct kvm *kvm = vcpu->kvm;
570 struct kvm_hv *hv = &kvm->arch.hyperv;
571
572 switch (msr) {
573 case HV_X64_MSR_GUEST_OS_ID:
574 data = hv->hv_guest_os_id;
575 break;
576 case HV_X64_MSR_HYPERCALL:
577 data = hv->hv_hypercall;
578 break;
579 case HV_X64_MSR_TIME_REF_COUNT: {
580 data =
581 div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
582 break;
583 }
584 case HV_X64_MSR_REFERENCE_TSC:
585 data = hv->hv_tsc_page;
586 break;
e7d9513b
AS
587 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
588 return kvm_hv_msr_get_crash_data(vcpu,
589 msr - HV_X64_MSR_CRASH_P0,
590 pdata);
591 case HV_X64_MSR_CRASH_CTL:
592 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
e516cebb
AS
593 case HV_X64_MSR_RESET:
594 data = 0;
595 break;
e83d5887
AS
596 default:
597 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
598 return 1;
599 }
600
601 *pdata = data;
602 return 0;
603}
604
605static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
606{
607 u64 data = 0;
608 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
609
610 switch (msr) {
611 case HV_X64_MSR_VP_INDEX: {
612 int r;
613 struct kvm_vcpu *v;
614
615 kvm_for_each_vcpu(r, v, vcpu->kvm) {
616 if (v == vcpu) {
617 data = r;
618 break;
619 }
620 }
621 break;
622 }
623 case HV_X64_MSR_EOI:
624 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
625 case HV_X64_MSR_ICR:
626 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
627 case HV_X64_MSR_TPR:
628 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
629 case HV_X64_MSR_APIC_ASSIST_PAGE:
630 data = hv->hv_vapic;
631 break;
9eec50b8
AS
632 case HV_X64_MSR_VP_RUNTIME:
633 data = current_task_runtime_100ns() + hv->runtime_offset;
634 break;
5c919412
AS
635 case HV_X64_MSR_SCONTROL:
636 case HV_X64_MSR_SVERSION:
637 case HV_X64_MSR_SIEFP:
638 case HV_X64_MSR_SIMP:
639 case HV_X64_MSR_EOM:
640 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
641 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
e83d5887
AS
642 default:
643 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
644 return 1;
645 }
646 *pdata = data;
647 return 0;
648}
649
e7d9513b 650int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
e83d5887
AS
651{
652 if (kvm_hv_msr_partition_wide(msr)) {
653 int r;
654
655 mutex_lock(&vcpu->kvm->lock);
e7d9513b 656 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
e83d5887
AS
657 mutex_unlock(&vcpu->kvm->lock);
658 return r;
659 } else
9eec50b8 660 return kvm_hv_set_msr(vcpu, msr, data, host);
e83d5887
AS
661}
662
663int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
664{
665 if (kvm_hv_msr_partition_wide(msr)) {
666 int r;
667
668 mutex_lock(&vcpu->kvm->lock);
669 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
670 mutex_unlock(&vcpu->kvm->lock);
671 return r;
672 } else
673 return kvm_hv_get_msr(vcpu, msr, pdata);
674}
675
676bool kvm_hv_hypercall_enabled(struct kvm *kvm)
677{
678 return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
679}
680
681int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
682{
683 u64 param, ingpa, outgpa, ret;
684 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
685 bool fast, longmode;
686
687 /*
688 * hypercall generates UD from non zero cpl and real mode
689 * per HYPER-V spec
690 */
691 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
692 kvm_queue_exception(vcpu, UD_VECTOR);
693 return 0;
694 }
695
696 longmode = is_64_bit_mode(vcpu);
697
698 if (!longmode) {
699 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
700 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
701 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
702 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
703 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
704 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
705 }
706#ifdef CONFIG_X86_64
707 else {
708 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
709 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
710 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
711 }
712#endif
713
714 code = param & 0xffff;
715 fast = (param >> 16) & 0x1;
716 rep_cnt = (param >> 32) & 0xfff;
717 rep_idx = (param >> 48) & 0xfff;
718
719 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
720
721 switch (code) {
722 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
723 kvm_vcpu_on_spin(vcpu);
724 break;
725 default:
726 res = HV_STATUS_INVALID_HYPERCALL_CODE;
727 break;
728 }
729
730 ret = res | (((u64)rep_done & 0xfff) << 32);
731 if (longmode) {
732 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
733 } else {
734 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
735 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
736 }
737
738 return 1;
739}
This page took 0.077383 seconds and 5 git commands to generate.