Commit | Line | Data |
---|---|---|
e83d5887 AS |
1 | /* |
2 | * KVM Microsoft Hyper-V emulation | |
3 | * | |
4 | * derived from arch/x86/kvm/x86.c | |
5 | * | |
6 | * Copyright (C) 2006 Qumranet, Inc. | |
7 | * Copyright (C) 2008 Qumranet, Inc. | |
8 | * Copyright IBM Corporation, 2008 | |
9 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
10 | * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> | |
11 | * | |
12 | * Authors: | |
13 | * Avi Kivity <avi@qumranet.com> | |
14 | * Yaniv Kamay <yaniv@qumranet.com> | |
15 | * Amit Shah <amit.shah@qumranet.com> | |
16 | * Ben-Ami Yassour <benami@il.ibm.com> | |
17 | * Andrey Smetanin <asmetanin@virtuozzo.com> | |
18 | * | |
19 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
20 | * the COPYING file in the top-level directory. | |
21 | * | |
22 | */ | |
23 | ||
24 | #include "x86.h" | |
25 | #include "lapic.h" | |
5c919412 | 26 | #include "ioapic.h" |
e83d5887 AS |
27 | #include "hyperv.h" |
28 | ||
29 | #include <linux/kvm_host.h> | |
765eaa0f | 30 | #include <linux/highmem.h> |
5c919412 | 31 | #include <asm/apicdef.h> |
e83d5887 AS |
32 | #include <trace/events/kvm.h> |
33 | ||
34 | #include "trace.h" | |
35 | ||
5c919412 AS |
36 | static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) |
37 | { | |
38 | return atomic64_read(&synic->sint[sint]); | |
39 | } | |
40 | ||
41 | static inline int synic_get_sint_vector(u64 sint_value) | |
42 | { | |
43 | if (sint_value & HV_SYNIC_SINT_MASKED) | |
44 | return -1; | |
45 | return sint_value & HV_SYNIC_SINT_VECTOR_MASK; | |
46 | } | |
47 | ||
48 | static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic, | |
49 | int vector) | |
50 | { | |
51 | int i; | |
52 | ||
53 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
54 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
55 | return true; | |
56 | } | |
57 | return false; | |
58 | } | |
59 | ||
60 | static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic, | |
61 | int vector) | |
62 | { | |
63 | int i; | |
64 | u64 sint_value; | |
65 | ||
66 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
67 | sint_value = synic_read_sint(synic, i); | |
68 | if (synic_get_sint_vector(sint_value) == vector && | |
69 | sint_value & HV_SYNIC_SINT_AUTO_EOI) | |
70 | return true; | |
71 | } | |
72 | return false; | |
73 | } | |
74 | ||
75 | static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, u64 data) | |
76 | { | |
77 | int vector; | |
78 | ||
79 | vector = data & HV_SYNIC_SINT_VECTOR_MASK; | |
80 | if (vector < 16) | |
81 | return 1; | |
82 | /* | |
83 | * Guest may configure multiple SINTs to use the same vector, so | |
84 | * we maintain a bitmap of vectors handled by synic, and a | |
85 | * bitmap of vectors with auto-eoi behavior. The bitmaps are | |
86 | * updated here, and atomically queried on fast paths. | |
87 | */ | |
88 | ||
89 | atomic64_set(&synic->sint[sint], data); | |
90 | ||
91 | if (synic_has_vector_connected(synic, vector)) | |
92 | __set_bit(vector, synic->vec_bitmap); | |
93 | else | |
94 | __clear_bit(vector, synic->vec_bitmap); | |
95 | ||
96 | if (synic_has_vector_auto_eoi(synic, vector)) | |
97 | __set_bit(vector, synic->auto_eoi_bitmap); | |
98 | else | |
99 | __clear_bit(vector, synic->auto_eoi_bitmap); | |
100 | ||
101 | /* Load SynIC vectors into EOI exit bitmap */ | |
102 | kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic)); | |
103 | return 0; | |
104 | } | |
105 | ||
106 | static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vcpu_id) | |
107 | { | |
108 | struct kvm_vcpu *vcpu; | |
109 | struct kvm_vcpu_hv_synic *synic; | |
110 | ||
111 | if (vcpu_id >= atomic_read(&kvm->online_vcpus)) | |
112 | return NULL; | |
113 | vcpu = kvm_get_vcpu(kvm, vcpu_id); | |
114 | if (!vcpu) | |
115 | return NULL; | |
116 | synic = vcpu_to_synic(vcpu); | |
117 | return (synic->active) ? synic : NULL; | |
118 | } | |
119 | ||
765eaa0f AS |
120 | static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic, |
121 | u32 sint) | |
122 | { | |
123 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
124 | struct page *page; | |
125 | gpa_t gpa; | |
126 | struct hv_message *msg; | |
127 | struct hv_message_page *msg_page; | |
128 | ||
129 | gpa = synic->msg_page & PAGE_MASK; | |
130 | page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); | |
131 | if (is_error_page(page)) { | |
132 | vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n", | |
133 | gpa); | |
134 | return; | |
135 | } | |
136 | msg_page = kmap_atomic(page); | |
137 | ||
138 | msg = &msg_page->sint_message[sint]; | |
139 | msg->header.message_flags.msg_pending = 0; | |
140 | ||
141 | kunmap_atomic(msg_page); | |
142 | kvm_release_page_dirty(page); | |
143 | kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); | |
144 | } | |
145 | ||
5c919412 AS |
146 | static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) |
147 | { | |
148 | struct kvm *kvm = vcpu->kvm; | |
765eaa0f | 149 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); |
1f4b34f8 AS |
150 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); |
151 | struct kvm_vcpu_hv_stimer *stimer; | |
152 | int gsi, idx, stimers_pending; | |
5c919412 AS |
153 | |
154 | vcpu_debug(vcpu, "Hyper-V SynIC acked sint %d\n", sint); | |
155 | ||
765eaa0f AS |
156 | if (synic->msg_page & HV_SYNIC_SIMP_ENABLE) |
157 | synic_clear_sint_msg_pending(synic, sint); | |
158 | ||
1f4b34f8 AS |
159 | /* Try to deliver pending Hyper-V SynIC timers messages */ |
160 | stimers_pending = 0; | |
161 | for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { | |
162 | stimer = &hv_vcpu->stimer[idx]; | |
163 | if (stimer->msg_pending && | |
164 | (stimer->config & HV_STIMER_ENABLE) && | |
165 | HV_STIMER_SINT(stimer->config) == sint) { | |
166 | set_bit(stimer->index, | |
167 | hv_vcpu->stimer_pending_bitmap); | |
168 | stimers_pending++; | |
169 | } | |
170 | } | |
171 | if (stimers_pending) | |
172 | kvm_make_request(KVM_REQ_HV_STIMER, vcpu); | |
173 | ||
5c919412 | 174 | idx = srcu_read_lock(&kvm->irq_srcu); |
1f4b34f8 | 175 | gsi = atomic_read(&synic->sint_to_gsi[sint]); |
5c919412 AS |
176 | if (gsi != -1) |
177 | kvm_notify_acked_gsi(kvm, gsi); | |
178 | srcu_read_unlock(&kvm->irq_srcu, idx); | |
179 | } | |
180 | ||
db397571 AS |
181 | static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) |
182 | { | |
183 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
184 | struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; | |
185 | ||
186 | hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; | |
187 | hv_vcpu->exit.u.synic.msr = msr; | |
188 | hv_vcpu->exit.u.synic.control = synic->control; | |
189 | hv_vcpu->exit.u.synic.evt_page = synic->evt_page; | |
190 | hv_vcpu->exit.u.synic.msg_page = synic->msg_page; | |
191 | ||
192 | kvm_make_request(KVM_REQ_HV_EXIT, vcpu); | |
193 | } | |
194 | ||
5c919412 AS |
195 | static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, |
196 | u32 msr, u64 data, bool host) | |
197 | { | |
198 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
199 | int ret; | |
200 | ||
201 | if (!synic->active) | |
202 | return 1; | |
203 | ||
204 | vcpu_debug(vcpu, "Hyper-V SynIC set msr 0x%x 0x%llx host %d\n", | |
205 | msr, data, host); | |
206 | ret = 0; | |
207 | switch (msr) { | |
208 | case HV_X64_MSR_SCONTROL: | |
209 | synic->control = data; | |
db397571 AS |
210 | if (!host) |
211 | synic_exit(synic, msr); | |
5c919412 AS |
212 | break; |
213 | case HV_X64_MSR_SVERSION: | |
214 | if (!host) { | |
215 | ret = 1; | |
216 | break; | |
217 | } | |
218 | synic->version = data; | |
219 | break; | |
220 | case HV_X64_MSR_SIEFP: | |
221 | if (data & HV_SYNIC_SIEFP_ENABLE) | |
222 | if (kvm_clear_guest(vcpu->kvm, | |
223 | data & PAGE_MASK, PAGE_SIZE)) { | |
224 | ret = 1; | |
225 | break; | |
226 | } | |
227 | synic->evt_page = data; | |
db397571 AS |
228 | if (!host) |
229 | synic_exit(synic, msr); | |
5c919412 AS |
230 | break; |
231 | case HV_X64_MSR_SIMP: | |
232 | if (data & HV_SYNIC_SIMP_ENABLE) | |
233 | if (kvm_clear_guest(vcpu->kvm, | |
234 | data & PAGE_MASK, PAGE_SIZE)) { | |
235 | ret = 1; | |
236 | break; | |
237 | } | |
238 | synic->msg_page = data; | |
db397571 AS |
239 | if (!host) |
240 | synic_exit(synic, msr); | |
5c919412 AS |
241 | break; |
242 | case HV_X64_MSR_EOM: { | |
243 | int i; | |
244 | ||
245 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) | |
246 | kvm_hv_notify_acked_sint(vcpu, i); | |
247 | break; | |
248 | } | |
249 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
250 | ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data); | |
251 | break; | |
252 | default: | |
253 | ret = 1; | |
254 | break; | |
255 | } | |
256 | return ret; | |
257 | } | |
258 | ||
259 | static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata) | |
260 | { | |
261 | int ret; | |
262 | ||
263 | if (!synic->active) | |
264 | return 1; | |
265 | ||
266 | ret = 0; | |
267 | switch (msr) { | |
268 | case HV_X64_MSR_SCONTROL: | |
269 | *pdata = synic->control; | |
270 | break; | |
271 | case HV_X64_MSR_SVERSION: | |
272 | *pdata = synic->version; | |
273 | break; | |
274 | case HV_X64_MSR_SIEFP: | |
275 | *pdata = synic->evt_page; | |
276 | break; | |
277 | case HV_X64_MSR_SIMP: | |
278 | *pdata = synic->msg_page; | |
279 | break; | |
280 | case HV_X64_MSR_EOM: | |
281 | *pdata = 0; | |
282 | break; | |
283 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
284 | *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); | |
285 | break; | |
286 | default: | |
287 | ret = 1; | |
288 | break; | |
289 | } | |
290 | return ret; | |
291 | } | |
292 | ||
293 | int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) | |
294 | { | |
295 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
296 | struct kvm_lapic_irq irq; | |
297 | int ret, vector; | |
298 | ||
299 | if (sint >= ARRAY_SIZE(synic->sint)) | |
300 | return -EINVAL; | |
301 | ||
302 | vector = synic_get_sint_vector(synic_read_sint(synic, sint)); | |
303 | if (vector < 0) | |
304 | return -ENOENT; | |
305 | ||
306 | memset(&irq, 0, sizeof(irq)); | |
307 | irq.dest_id = kvm_apic_id(vcpu->arch.apic); | |
308 | irq.dest_mode = APIC_DEST_PHYSICAL; | |
309 | irq.delivery_mode = APIC_DM_FIXED; | |
310 | irq.vector = vector; | |
311 | irq.level = 1; | |
312 | ||
313 | ret = kvm_irq_delivery_to_apic(vcpu->kvm, NULL, &irq, NULL); | |
314 | vcpu_debug(vcpu, "Hyper-V SynIC set irq ret %d\n", ret); | |
315 | return ret; | |
316 | } | |
317 | ||
318 | int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint) | |
319 | { | |
320 | struct kvm_vcpu_hv_synic *synic; | |
321 | ||
322 | synic = synic_get(kvm, vcpu_id); | |
323 | if (!synic) | |
324 | return -EINVAL; | |
325 | ||
326 | return synic_set_irq(synic, sint); | |
327 | } | |
328 | ||
329 | void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) | |
330 | { | |
331 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); | |
332 | int i; | |
333 | ||
334 | vcpu_debug(vcpu, "Hyper-V SynIC send eoi vec %d\n", vector); | |
335 | ||
336 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) | |
337 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
338 | kvm_hv_notify_acked_sint(vcpu, i); | |
339 | } | |
340 | ||
341 | static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vcpu_id, u32 sint, int gsi) | |
342 | { | |
343 | struct kvm_vcpu_hv_synic *synic; | |
344 | ||
345 | synic = synic_get(kvm, vcpu_id); | |
346 | if (!synic) | |
347 | return -EINVAL; | |
348 | ||
349 | if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) | |
350 | return -EINVAL; | |
351 | ||
352 | atomic_set(&synic->sint_to_gsi[sint], gsi); | |
353 | return 0; | |
354 | } | |
355 | ||
356 | void kvm_hv_irq_routing_update(struct kvm *kvm) | |
357 | { | |
358 | struct kvm_irq_routing_table *irq_rt; | |
359 | struct kvm_kernel_irq_routing_entry *e; | |
360 | u32 gsi; | |
361 | ||
362 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, | |
363 | lockdep_is_held(&kvm->irq_lock)); | |
364 | ||
365 | for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { | |
366 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { | |
367 | if (e->type == KVM_IRQ_ROUTING_HV_SINT) | |
368 | kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, | |
369 | e->hv_sint.sint, gsi); | |
370 | } | |
371 | } | |
372 | } | |
373 | ||
374 | static void synic_init(struct kvm_vcpu_hv_synic *synic) | |
375 | { | |
376 | int i; | |
377 | ||
378 | memset(synic, 0, sizeof(*synic)); | |
379 | synic->version = HV_SYNIC_VERSION_1; | |
380 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
381 | atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); | |
382 | atomic_set(&synic->sint_to_gsi[i], -1); | |
383 | } | |
384 | } | |
385 | ||
93bf4172 AS |
386 | static u64 get_time_ref_counter(struct kvm *kvm) |
387 | { | |
388 | return div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); | |
389 | } | |
390 | ||
1f4b34f8 AS |
391 | static void stimer_mark_expired(struct kvm_vcpu_hv_stimer *stimer, |
392 | bool vcpu_kick) | |
393 | { | |
394 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
395 | ||
396 | set_bit(stimer->index, | |
397 | vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); | |
398 | kvm_make_request(KVM_REQ_HV_STIMER, vcpu); | |
399 | if (vcpu_kick) | |
400 | kvm_vcpu_kick(vcpu); | |
401 | } | |
402 | ||
1f4b34f8 AS |
403 | static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) |
404 | { | |
405 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
406 | ||
019b9781 | 407 | hrtimer_cancel(&stimer->timer); |
1f4b34f8 AS |
408 | clear_bit(stimer->index, |
409 | vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); | |
410 | stimer->msg_pending = false; | |
f808495d | 411 | stimer->exp_time = 0; |
1f4b34f8 AS |
412 | } |
413 | ||
414 | static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) | |
415 | { | |
416 | struct kvm_vcpu_hv_stimer *stimer; | |
417 | ||
418 | stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); | |
419 | stimer_mark_expired(stimer, true); | |
420 | ||
421 | return HRTIMER_NORESTART; | |
422 | } | |
423 | ||
f808495d AS |
424 | /* |
425 | * stimer_start() assumptions: | |
426 | * a) stimer->count is not equal to 0 | |
427 | * b) stimer->config has HV_STIMER_ENABLE flag | |
428 | */ | |
1f4b34f8 AS |
429 | static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) |
430 | { | |
431 | u64 time_now; | |
432 | ktime_t ktime_now; | |
433 | ||
434 | time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); | |
435 | ktime_now = ktime_get(); | |
436 | ||
437 | if (stimer->config & HV_STIMER_PERIODIC) { | |
f808495d AS |
438 | if (stimer->exp_time) { |
439 | if (time_now >= stimer->exp_time) { | |
440 | u64 remainder; | |
441 | ||
442 | div64_u64_rem(time_now - stimer->exp_time, | |
443 | stimer->count, &remainder); | |
444 | stimer->exp_time = | |
445 | time_now + (stimer->count - remainder); | |
446 | } | |
447 | } else | |
448 | stimer->exp_time = time_now + stimer->count; | |
1f4b34f8 | 449 | |
1f4b34f8 | 450 | hrtimer_start(&stimer->timer, |
f808495d AS |
451 | ktime_add_ns(ktime_now, |
452 | 100 * (stimer->exp_time - time_now)), | |
1f4b34f8 AS |
453 | HRTIMER_MODE_ABS); |
454 | return 0; | |
455 | } | |
456 | stimer->exp_time = stimer->count; | |
457 | if (time_now >= stimer->count) { | |
458 | /* | |
459 | * Expire timer according to Hypervisor Top-Level Functional | |
460 | * specification v4(15.3.1): | |
461 | * "If a one shot is enabled and the specified count is in | |
462 | * the past, it will expire immediately." | |
463 | */ | |
464 | stimer_mark_expired(stimer, false); | |
465 | return 0; | |
466 | } | |
467 | ||
468 | hrtimer_start(&stimer->timer, | |
469 | ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), | |
470 | HRTIMER_MODE_ABS); | |
471 | return 0; | |
472 | } | |
473 | ||
474 | static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, | |
475 | bool host) | |
476 | { | |
477 | if (stimer->count == 0 || HV_STIMER_SINT(config) == 0) | |
478 | config &= ~HV_STIMER_ENABLE; | |
479 | stimer->config = config; | |
480 | stimer_cleanup(stimer); | |
481 | if (stimer->config & HV_STIMER_ENABLE) | |
482 | if (stimer_start(stimer)) | |
483 | return 1; | |
484 | return 0; | |
485 | } | |
486 | ||
487 | static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, | |
488 | bool host) | |
489 | { | |
490 | stimer->count = count; | |
491 | ||
492 | stimer_cleanup(stimer); | |
493 | if (stimer->count == 0) | |
494 | stimer->config &= ~HV_STIMER_ENABLE; | |
495 | else if (stimer->config & HV_STIMER_AUTOENABLE) { | |
496 | stimer->config |= HV_STIMER_ENABLE; | |
497 | if (stimer_start(stimer)) | |
498 | return 1; | |
499 | } | |
500 | ||
501 | return 0; | |
502 | } | |
503 | ||
504 | static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) | |
505 | { | |
506 | *pconfig = stimer->config; | |
507 | return 0; | |
508 | } | |
509 | ||
510 | static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) | |
511 | { | |
512 | *pcount = stimer->count; | |
513 | return 0; | |
514 | } | |
515 | ||
516 | static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, | |
517 | struct hv_message *src_msg) | |
518 | { | |
519 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
520 | struct page *page; | |
521 | gpa_t gpa; | |
522 | struct hv_message *dst_msg; | |
523 | int r; | |
524 | struct hv_message_page *msg_page; | |
525 | ||
526 | if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) | |
527 | return -ENOENT; | |
528 | ||
529 | gpa = synic->msg_page & PAGE_MASK; | |
530 | page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); | |
531 | if (is_error_page(page)) | |
532 | return -EFAULT; | |
533 | ||
534 | msg_page = kmap_atomic(page); | |
535 | dst_msg = &msg_page->sint_message[sint]; | |
536 | if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE, | |
537 | src_msg->header.message_type) != HVMSG_NONE) { | |
538 | dst_msg->header.message_flags.msg_pending = 1; | |
539 | r = -EAGAIN; | |
540 | } else { | |
541 | memcpy(&dst_msg->u.payload, &src_msg->u.payload, | |
542 | src_msg->header.payload_size); | |
543 | dst_msg->header.message_type = src_msg->header.message_type; | |
544 | dst_msg->header.payload_size = src_msg->header.payload_size; | |
545 | r = synic_set_irq(synic, sint); | |
546 | if (r >= 1) | |
547 | r = 0; | |
548 | else if (r == 0) | |
549 | r = -EFAULT; | |
550 | } | |
551 | kunmap_atomic(msg_page); | |
552 | kvm_release_page_dirty(page); | |
553 | kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); | |
554 | return r; | |
555 | } | |
556 | ||
0cdeabb1 | 557 | static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) |
1f4b34f8 AS |
558 | { |
559 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
560 | struct hv_message *msg = &stimer->msg; | |
561 | struct hv_timer_message_payload *payload = | |
562 | (struct hv_timer_message_payload *)&msg->u.payload; | |
1f4b34f8 | 563 | |
1f4b34f8 AS |
564 | payload->expiration_time = stimer->exp_time; |
565 | payload->delivery_time = get_time_ref_counter(vcpu->kvm); | |
0cdeabb1 AS |
566 | return synic_deliver_msg(vcpu_to_synic(vcpu), |
567 | HV_STIMER_SINT(stimer->config), msg); | |
1f4b34f8 AS |
568 | } |
569 | ||
570 | static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) | |
571 | { | |
0cdeabb1 AS |
572 | stimer->msg_pending = true; |
573 | if (!stimer_send_msg(stimer)) { | |
574 | stimer->msg_pending = false; | |
575 | if (!(stimer->config & HV_STIMER_PERIODIC)) | |
576 | stimer->config &= ~HV_STIMER_ENABLE; | |
577 | } | |
1f4b34f8 AS |
578 | } |
579 | ||
580 | void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) | |
581 | { | |
582 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); | |
583 | struct kvm_vcpu_hv_stimer *stimer; | |
584 | u64 time_now; | |
585 | int i; | |
586 | ||
587 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
588 | if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { | |
589 | stimer = &hv_vcpu->stimer[i]; | |
1f4b34f8 AS |
590 | if (stimer->config & HV_STIMER_ENABLE) { |
591 | time_now = get_time_ref_counter(vcpu->kvm); | |
592 | if (time_now >= stimer->exp_time) | |
593 | stimer_expiration(stimer); | |
0cdeabb1 AS |
594 | |
595 | if (stimer->config & HV_STIMER_ENABLE) | |
596 | stimer_start(stimer); | |
597 | else | |
598 | stimer_cleanup(stimer); | |
1f4b34f8 AS |
599 | } |
600 | } | |
601 | } | |
602 | ||
603 | void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) | |
604 | { | |
605 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); | |
606 | int i; | |
607 | ||
608 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
609 | stimer_cleanup(&hv_vcpu->stimer[i]); | |
610 | } | |
611 | ||
612 | static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) | |
613 | { | |
614 | struct hv_message *msg = &stimer->msg; | |
615 | struct hv_timer_message_payload *payload = | |
616 | (struct hv_timer_message_payload *)&msg->u.payload; | |
617 | ||
618 | memset(&msg->header, 0, sizeof(msg->header)); | |
619 | msg->header.message_type = HVMSG_TIMER_EXPIRED; | |
620 | msg->header.payload_size = sizeof(*payload); | |
621 | ||
622 | payload->timer_index = stimer->index; | |
623 | payload->expiration_time = 0; | |
624 | payload->delivery_time = 0; | |
625 | } | |
626 | ||
627 | static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) | |
628 | { | |
629 | memset(stimer, 0, sizeof(*stimer)); | |
630 | stimer->index = timer_index; | |
631 | hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
632 | stimer->timer.function = stimer_timer_callback; | |
633 | stimer_prepare_msg(stimer); | |
634 | } | |
635 | ||
5c919412 AS |
636 | void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) |
637 | { | |
1f4b34f8 AS |
638 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); |
639 | int i; | |
640 | ||
641 | synic_init(&hv_vcpu->synic); | |
642 | ||
643 | bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); | |
644 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
645 | stimer_init(&hv_vcpu->stimer[i], i); | |
5c919412 AS |
646 | } |
647 | ||
648 | int kvm_hv_activate_synic(struct kvm_vcpu *vcpu) | |
649 | { | |
650 | /* | |
651 | * Hyper-V SynIC auto EOI SINT's are | |
652 | * not compatible with APICV, so deactivate APICV | |
653 | */ | |
654 | kvm_vcpu_deactivate_apicv(vcpu); | |
655 | vcpu_to_synic(vcpu)->active = true; | |
656 | return 0; | |
657 | } | |
658 | ||
e83d5887 AS |
659 | static bool kvm_hv_msr_partition_wide(u32 msr) |
660 | { | |
661 | bool r = false; | |
662 | ||
663 | switch (msr) { | |
664 | case HV_X64_MSR_GUEST_OS_ID: | |
665 | case HV_X64_MSR_HYPERCALL: | |
666 | case HV_X64_MSR_REFERENCE_TSC: | |
667 | case HV_X64_MSR_TIME_REF_COUNT: | |
e7d9513b AS |
668 | case HV_X64_MSR_CRASH_CTL: |
669 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: | |
e516cebb | 670 | case HV_X64_MSR_RESET: |
e83d5887 AS |
671 | r = true; |
672 | break; | |
673 | } | |
674 | ||
675 | return r; | |
676 | } | |
677 | ||
e7d9513b AS |
678 | static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, |
679 | u32 index, u64 *pdata) | |
680 | { | |
681 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
682 | ||
683 | if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) | |
684 | return -EINVAL; | |
685 | ||
686 | *pdata = hv->hv_crash_param[index]; | |
687 | return 0; | |
688 | } | |
689 | ||
690 | static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) | |
691 | { | |
692 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
693 | ||
694 | *pdata = hv->hv_crash_ctl; | |
695 | return 0; | |
696 | } | |
697 | ||
698 | static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) | |
699 | { | |
700 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
701 | ||
702 | if (host) | |
703 | hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY; | |
704 | ||
705 | if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) { | |
706 | ||
707 | vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", | |
708 | hv->hv_crash_param[0], | |
709 | hv->hv_crash_param[1], | |
710 | hv->hv_crash_param[2], | |
711 | hv->hv_crash_param[3], | |
712 | hv->hv_crash_param[4]); | |
713 | ||
714 | /* Send notification about crash to user space */ | |
715 | kvm_make_request(KVM_REQ_HV_CRASH, vcpu); | |
716 | } | |
717 | ||
718 | return 0; | |
719 | } | |
720 | ||
721 | static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, | |
722 | u32 index, u64 data) | |
723 | { | |
724 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
725 | ||
726 | if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) | |
727 | return -EINVAL; | |
728 | ||
729 | hv->hv_crash_param[index] = data; | |
730 | return 0; | |
731 | } | |
732 | ||
733 | static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, | |
734 | bool host) | |
e83d5887 AS |
735 | { |
736 | struct kvm *kvm = vcpu->kvm; | |
737 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
738 | ||
739 | switch (msr) { | |
740 | case HV_X64_MSR_GUEST_OS_ID: | |
741 | hv->hv_guest_os_id = data; | |
742 | /* setting guest os id to zero disables hypercall page */ | |
743 | if (!hv->hv_guest_os_id) | |
744 | hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; | |
745 | break; | |
746 | case HV_X64_MSR_HYPERCALL: { | |
747 | u64 gfn; | |
748 | unsigned long addr; | |
749 | u8 instructions[4]; | |
750 | ||
751 | /* if guest os id is not set hypercall should remain disabled */ | |
752 | if (!hv->hv_guest_os_id) | |
753 | break; | |
754 | if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { | |
755 | hv->hv_hypercall = data; | |
756 | break; | |
757 | } | |
758 | gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; | |
759 | addr = gfn_to_hva(kvm, gfn); | |
760 | if (kvm_is_error_hva(addr)) | |
761 | return 1; | |
762 | kvm_x86_ops->patch_hypercall(vcpu, instructions); | |
763 | ((unsigned char *)instructions)[3] = 0xc3; /* ret */ | |
764 | if (__copy_to_user((void __user *)addr, instructions, 4)) | |
765 | return 1; | |
766 | hv->hv_hypercall = data; | |
767 | mark_page_dirty(kvm, gfn); | |
768 | break; | |
769 | } | |
770 | case HV_X64_MSR_REFERENCE_TSC: { | |
771 | u64 gfn; | |
772 | HV_REFERENCE_TSC_PAGE tsc_ref; | |
773 | ||
774 | memset(&tsc_ref, 0, sizeof(tsc_ref)); | |
775 | hv->hv_tsc_page = data; | |
776 | if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) | |
777 | break; | |
778 | gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; | |
779 | if (kvm_write_guest( | |
780 | kvm, | |
781 | gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, | |
782 | &tsc_ref, sizeof(tsc_ref))) | |
783 | return 1; | |
784 | mark_page_dirty(kvm, gfn); | |
785 | break; | |
786 | } | |
e7d9513b AS |
787 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
788 | return kvm_hv_msr_set_crash_data(vcpu, | |
789 | msr - HV_X64_MSR_CRASH_P0, | |
790 | data); | |
791 | case HV_X64_MSR_CRASH_CTL: | |
792 | return kvm_hv_msr_set_crash_ctl(vcpu, data, host); | |
e516cebb AS |
793 | case HV_X64_MSR_RESET: |
794 | if (data == 1) { | |
795 | vcpu_debug(vcpu, "hyper-v reset requested\n"); | |
796 | kvm_make_request(KVM_REQ_HV_RESET, vcpu); | |
797 | } | |
798 | break; | |
e83d5887 AS |
799 | default: |
800 | vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", | |
801 | msr, data); | |
802 | return 1; | |
803 | } | |
804 | return 0; | |
805 | } | |
806 | ||
9eec50b8 AS |
807 | /* Calculate cpu time spent by current task in 100ns units */ |
808 | static u64 current_task_runtime_100ns(void) | |
809 | { | |
810 | cputime_t utime, stime; | |
811 | ||
812 | task_cputime_adjusted(current, &utime, &stime); | |
813 | return div_u64(cputime_to_nsecs(utime + stime), 100); | |
814 | } | |
815 | ||
816 | static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) | |
e83d5887 AS |
817 | { |
818 | struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; | |
819 | ||
820 | switch (msr) { | |
821 | case HV_X64_MSR_APIC_ASSIST_PAGE: { | |
822 | u64 gfn; | |
823 | unsigned long addr; | |
824 | ||
825 | if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { | |
826 | hv->hv_vapic = data; | |
827 | if (kvm_lapic_enable_pv_eoi(vcpu, 0)) | |
828 | return 1; | |
829 | break; | |
830 | } | |
831 | gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; | |
832 | addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); | |
833 | if (kvm_is_error_hva(addr)) | |
834 | return 1; | |
835 | if (__clear_user((void __user *)addr, PAGE_SIZE)) | |
836 | return 1; | |
837 | hv->hv_vapic = data; | |
838 | kvm_vcpu_mark_page_dirty(vcpu, gfn); | |
839 | if (kvm_lapic_enable_pv_eoi(vcpu, | |
840 | gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) | |
841 | return 1; | |
842 | break; | |
843 | } | |
844 | case HV_X64_MSR_EOI: | |
845 | return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); | |
846 | case HV_X64_MSR_ICR: | |
847 | return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); | |
848 | case HV_X64_MSR_TPR: | |
849 | return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); | |
9eec50b8 AS |
850 | case HV_X64_MSR_VP_RUNTIME: |
851 | if (!host) | |
852 | return 1; | |
853 | hv->runtime_offset = data - current_task_runtime_100ns(); | |
854 | break; | |
5c919412 AS |
855 | case HV_X64_MSR_SCONTROL: |
856 | case HV_X64_MSR_SVERSION: | |
857 | case HV_X64_MSR_SIEFP: | |
858 | case HV_X64_MSR_SIMP: | |
859 | case HV_X64_MSR_EOM: | |
860 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
861 | return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); | |
1f4b34f8 AS |
862 | case HV_X64_MSR_STIMER0_CONFIG: |
863 | case HV_X64_MSR_STIMER1_CONFIG: | |
864 | case HV_X64_MSR_STIMER2_CONFIG: | |
865 | case HV_X64_MSR_STIMER3_CONFIG: { | |
866 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
867 | ||
868 | return stimer_set_config(vcpu_to_stimer(vcpu, timer_index), | |
869 | data, host); | |
870 | } | |
871 | case HV_X64_MSR_STIMER0_COUNT: | |
872 | case HV_X64_MSR_STIMER1_COUNT: | |
873 | case HV_X64_MSR_STIMER2_COUNT: | |
874 | case HV_X64_MSR_STIMER3_COUNT: { | |
875 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
876 | ||
877 | return stimer_set_count(vcpu_to_stimer(vcpu, timer_index), | |
878 | data, host); | |
879 | } | |
e83d5887 AS |
880 | default: |
881 | vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", | |
882 | msr, data); | |
883 | return 1; | |
884 | } | |
885 | ||
886 | return 0; | |
887 | } | |
888 | ||
889 | static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
890 | { | |
891 | u64 data = 0; | |
892 | struct kvm *kvm = vcpu->kvm; | |
893 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
894 | ||
895 | switch (msr) { | |
896 | case HV_X64_MSR_GUEST_OS_ID: | |
897 | data = hv->hv_guest_os_id; | |
898 | break; | |
899 | case HV_X64_MSR_HYPERCALL: | |
900 | data = hv->hv_hypercall; | |
901 | break; | |
93bf4172 AS |
902 | case HV_X64_MSR_TIME_REF_COUNT: |
903 | data = get_time_ref_counter(kvm); | |
e83d5887 | 904 | break; |
e83d5887 AS |
905 | case HV_X64_MSR_REFERENCE_TSC: |
906 | data = hv->hv_tsc_page; | |
907 | break; | |
e7d9513b AS |
908 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
909 | return kvm_hv_msr_get_crash_data(vcpu, | |
910 | msr - HV_X64_MSR_CRASH_P0, | |
911 | pdata); | |
912 | case HV_X64_MSR_CRASH_CTL: | |
913 | return kvm_hv_msr_get_crash_ctl(vcpu, pdata); | |
e516cebb AS |
914 | case HV_X64_MSR_RESET: |
915 | data = 0; | |
916 | break; | |
e83d5887 AS |
917 | default: |
918 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); | |
919 | return 1; | |
920 | } | |
921 | ||
922 | *pdata = data; | |
923 | return 0; | |
924 | } | |
925 | ||
926 | static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
927 | { | |
928 | u64 data = 0; | |
929 | struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; | |
930 | ||
931 | switch (msr) { | |
932 | case HV_X64_MSR_VP_INDEX: { | |
933 | int r; | |
934 | struct kvm_vcpu *v; | |
935 | ||
936 | kvm_for_each_vcpu(r, v, vcpu->kvm) { | |
937 | if (v == vcpu) { | |
938 | data = r; | |
939 | break; | |
940 | } | |
941 | } | |
942 | break; | |
943 | } | |
944 | case HV_X64_MSR_EOI: | |
945 | return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); | |
946 | case HV_X64_MSR_ICR: | |
947 | return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); | |
948 | case HV_X64_MSR_TPR: | |
949 | return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); | |
950 | case HV_X64_MSR_APIC_ASSIST_PAGE: | |
951 | data = hv->hv_vapic; | |
952 | break; | |
9eec50b8 AS |
953 | case HV_X64_MSR_VP_RUNTIME: |
954 | data = current_task_runtime_100ns() + hv->runtime_offset; | |
955 | break; | |
5c919412 AS |
956 | case HV_X64_MSR_SCONTROL: |
957 | case HV_X64_MSR_SVERSION: | |
958 | case HV_X64_MSR_SIEFP: | |
959 | case HV_X64_MSR_SIMP: | |
960 | case HV_X64_MSR_EOM: | |
961 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
962 | return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata); | |
1f4b34f8 AS |
963 | case HV_X64_MSR_STIMER0_CONFIG: |
964 | case HV_X64_MSR_STIMER1_CONFIG: | |
965 | case HV_X64_MSR_STIMER2_CONFIG: | |
966 | case HV_X64_MSR_STIMER3_CONFIG: { | |
967 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
968 | ||
969 | return stimer_get_config(vcpu_to_stimer(vcpu, timer_index), | |
970 | pdata); | |
971 | } | |
972 | case HV_X64_MSR_STIMER0_COUNT: | |
973 | case HV_X64_MSR_STIMER1_COUNT: | |
974 | case HV_X64_MSR_STIMER2_COUNT: | |
975 | case HV_X64_MSR_STIMER3_COUNT: { | |
976 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
977 | ||
978 | return stimer_get_count(vcpu_to_stimer(vcpu, timer_index), | |
979 | pdata); | |
980 | } | |
e83d5887 AS |
981 | default: |
982 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); | |
983 | return 1; | |
984 | } | |
985 | *pdata = data; | |
986 | return 0; | |
987 | } | |
988 | ||
e7d9513b | 989 | int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) |
e83d5887 AS |
990 | { |
991 | if (kvm_hv_msr_partition_wide(msr)) { | |
992 | int r; | |
993 | ||
994 | mutex_lock(&vcpu->kvm->lock); | |
e7d9513b | 995 | r = kvm_hv_set_msr_pw(vcpu, msr, data, host); |
e83d5887 AS |
996 | mutex_unlock(&vcpu->kvm->lock); |
997 | return r; | |
998 | } else | |
9eec50b8 | 999 | return kvm_hv_set_msr(vcpu, msr, data, host); |
e83d5887 AS |
1000 | } |
1001 | ||
1002 | int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
1003 | { | |
1004 | if (kvm_hv_msr_partition_wide(msr)) { | |
1005 | int r; | |
1006 | ||
1007 | mutex_lock(&vcpu->kvm->lock); | |
1008 | r = kvm_hv_get_msr_pw(vcpu, msr, pdata); | |
1009 | mutex_unlock(&vcpu->kvm->lock); | |
1010 | return r; | |
1011 | } else | |
1012 | return kvm_hv_get_msr(vcpu, msr, pdata); | |
1013 | } | |
1014 | ||
1015 | bool kvm_hv_hypercall_enabled(struct kvm *kvm) | |
1016 | { | |
1017 | return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; | |
1018 | } | |
1019 | ||
1020 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |
1021 | { | |
1022 | u64 param, ingpa, outgpa, ret; | |
1023 | uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; | |
1024 | bool fast, longmode; | |
1025 | ||
1026 | /* | |
1027 | * hypercall generates UD from non zero cpl and real mode | |
1028 | * per HYPER-V spec | |
1029 | */ | |
1030 | if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { | |
1031 | kvm_queue_exception(vcpu, UD_VECTOR); | |
1032 | return 0; | |
1033 | } | |
1034 | ||
1035 | longmode = is_64_bit_mode(vcpu); | |
1036 | ||
1037 | if (!longmode) { | |
1038 | param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | | |
1039 | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); | |
1040 | ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | | |
1041 | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); | |
1042 | outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | | |
1043 | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); | |
1044 | } | |
1045 | #ifdef CONFIG_X86_64 | |
1046 | else { | |
1047 | param = kvm_register_read(vcpu, VCPU_REGS_RCX); | |
1048 | ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); | |
1049 | outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); | |
1050 | } | |
1051 | #endif | |
1052 | ||
1053 | code = param & 0xffff; | |
1054 | fast = (param >> 16) & 0x1; | |
1055 | rep_cnt = (param >> 32) & 0xfff; | |
1056 | rep_idx = (param >> 48) & 0xfff; | |
1057 | ||
1058 | trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); | |
1059 | ||
1060 | switch (code) { | |
1061 | case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: | |
1062 | kvm_vcpu_on_spin(vcpu); | |
1063 | break; | |
1064 | default: | |
1065 | res = HV_STATUS_INVALID_HYPERCALL_CODE; | |
1066 | break; | |
1067 | } | |
1068 | ||
1069 | ret = res | (((u64)rep_done & 0xfff) << 32); | |
1070 | if (longmode) { | |
1071 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); | |
1072 | } else { | |
1073 | kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); | |
1074 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); | |
1075 | } | |
1076 | ||
1077 | return 1; | |
1078 | } |