5fb6c620180e19ebae73bee9cf6e83e3c65854e8
[deliverable/linux.git] / arch / x86 / kvm / i8254.c
1 /*
2 * 8253/8254 interval timer emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2006 Intel Corporation
6 * Copyright (c) 2007 Keir Fraser, XenSource Inc
7 * Copyright (c) 2008 Intel Corporation
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 *
28 * Authors:
29 * Sheng Yang <sheng.yang@intel.com>
30 * Based on QEMU and Xen.
31 */
32
33 #define pr_fmt(fmt) "pit: " fmt
34
35 #include <linux/kvm_host.h>
36 #include <linux/slab.h>
37
38 #include "ioapic.h"
39 #include "irq.h"
40 #include "i8254.h"
41 #include "x86.h"
42
43 #ifndef CONFIG_X86_64
44 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
45 #else
46 #define mod_64(x, y) ((x) % (y))
47 #endif
48
49 #define RW_STATE_LSB 1
50 #define RW_STATE_MSB 2
51 #define RW_STATE_WORD0 3
52 #define RW_STATE_WORD1 4
53
54 static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)
55 {
56 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
57
58 switch (c->mode) {
59 default:
60 case 0:
61 case 4:
62 /* XXX: just disable/enable counting */
63 break;
64 case 1:
65 case 2:
66 case 3:
67 case 5:
68 /* Restart counting on rising edge. */
69 if (c->gate < val)
70 c->count_load_time = ktime_get();
71 break;
72 }
73
74 c->gate = val;
75 }
76
77 static int pit_get_gate(struct kvm_pit *pit, int channel)
78 {
79 return pit->pit_state.channels[channel].gate;
80 }
81
82 static s64 __kpit_elapsed(struct kvm_pit *pit)
83 {
84 s64 elapsed;
85 ktime_t remaining;
86 struct kvm_kpit_state *ps = &pit->pit_state;
87
88 if (!ps->period)
89 return 0;
90
91 /*
92 * The Counter does not stop when it reaches zero. In
93 * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to
94 * the highest count, either FFFF hex for binary counting
95 * or 9999 for BCD counting, and continues counting.
96 * Modes 2 and 3 are periodic; the Counter reloads
97 * itself with the initial count and continues counting
98 * from there.
99 */
100 remaining = hrtimer_get_remaining(&ps->timer);
101 elapsed = ps->period - ktime_to_ns(remaining);
102
103 return elapsed;
104 }
105
106 static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c,
107 int channel)
108 {
109 if (channel == 0)
110 return __kpit_elapsed(pit);
111
112 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
113 }
114
115 static int pit_get_count(struct kvm_pit *pit, int channel)
116 {
117 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
118 s64 d, t;
119 int counter;
120
121 t = kpit_elapsed(pit, c, channel);
122 d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
123
124 switch (c->mode) {
125 case 0:
126 case 1:
127 case 4:
128 case 5:
129 counter = (c->count - d) & 0xffff;
130 break;
131 case 3:
132 /* XXX: may be incorrect for odd counts */
133 counter = c->count - (mod_64((2 * d), c->count));
134 break;
135 default:
136 counter = c->count - mod_64(d, c->count);
137 break;
138 }
139 return counter;
140 }
141
142 static int pit_get_out(struct kvm_pit *pit, int channel)
143 {
144 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
145 s64 d, t;
146 int out;
147
148 t = kpit_elapsed(pit, c, channel);
149 d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
150
151 switch (c->mode) {
152 default:
153 case 0:
154 out = (d >= c->count);
155 break;
156 case 1:
157 out = (d < c->count);
158 break;
159 case 2:
160 out = ((mod_64(d, c->count) == 0) && (d != 0));
161 break;
162 case 3:
163 out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
164 break;
165 case 4:
166 case 5:
167 out = (d == c->count);
168 break;
169 }
170
171 return out;
172 }
173
174 static void pit_latch_count(struct kvm_pit *pit, int channel)
175 {
176 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
177
178 if (!c->count_latched) {
179 c->latched_count = pit_get_count(pit, channel);
180 c->count_latched = c->rw_mode;
181 }
182 }
183
184 static void pit_latch_status(struct kvm_pit *pit, int channel)
185 {
186 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
187
188 if (!c->status_latched) {
189 /* TODO: Return NULL COUNT (bit 6). */
190 c->status = ((pit_get_out(pit, channel) << 7) |
191 (c->rw_mode << 4) |
192 (c->mode << 1) |
193 c->bcd);
194 c->status_latched = 1;
195 }
196 }
197
198 static inline struct kvm_pit *pit_state_to_pit(struct kvm_kpit_state *ps)
199 {
200 return container_of(ps, struct kvm_pit, pit_state);
201 }
202
203 static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
204 {
205 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
206 irq_ack_notifier);
207 struct kvm_pit *pit = pit_state_to_pit(ps);
208
209 atomic_set(&ps->irq_ack, 1);
210 /* irq_ack should be set before pending is read. Order accesses with
211 * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work.
212 */
213 smp_mb();
214 if (atomic_dec_if_positive(&ps->pending) > 0)
215 queue_kthread_work(&pit->worker, &pit->expired);
216 }
217
218 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
219 {
220 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
221 struct hrtimer *timer;
222
223 if (!kvm_vcpu_is_bsp(vcpu) || !pit)
224 return;
225
226 timer = &pit->pit_state.timer;
227 mutex_lock(&pit->pit_state.lock);
228 if (hrtimer_cancel(timer))
229 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
230 mutex_unlock(&pit->pit_state.lock);
231 }
232
233 static void destroy_pit_timer(struct kvm_pit *pit)
234 {
235 hrtimer_cancel(&pit->pit_state.timer);
236 flush_kthread_work(&pit->expired);
237 }
238
239 static void pit_do_work(struct kthread_work *work)
240 {
241 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
242 struct kvm *kvm = pit->kvm;
243 struct kvm_vcpu *vcpu;
244 int i;
245 struct kvm_kpit_state *ps = &pit->pit_state;
246
247 if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
248 return;
249
250 kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false);
251 kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false);
252
253 /*
254 * Provides NMI watchdog support via Virtual Wire mode.
255 * The route is: PIT -> LVT0 in NMI mode.
256 *
257 * Note: Our Virtual Wire implementation does not follow
258 * the MP specification. We propagate a PIT interrupt to all
259 * VCPUs and only when LVT0 is in NMI mode. The interrupt can
260 * also be simultaneously delivered through PIC and IOAPIC.
261 */
262 if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
263 kvm_for_each_vcpu(i, vcpu, kvm)
264 kvm_apic_nmi_wd_deliver(vcpu);
265 }
266
267 static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
268 {
269 struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
270 struct kvm_pit *pt = pit_state_to_pit(ps);
271
272 if (atomic_read(&ps->reinject))
273 atomic_inc(&ps->pending);
274
275 queue_kthread_work(&pt->worker, &pt->expired);
276
277 if (ps->is_periodic) {
278 hrtimer_add_expires_ns(&ps->timer, ps->period);
279 return HRTIMER_RESTART;
280 } else
281 return HRTIMER_NORESTART;
282 }
283
284 static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
285 {
286 atomic_set(&pit->pit_state.pending, 0);
287 atomic_set(&pit->pit_state.irq_ack, 1);
288 }
289
290 void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
291 {
292 struct kvm_kpit_state *ps = &pit->pit_state;
293 struct kvm *kvm = pit->kvm;
294
295 if (atomic_read(&ps->reinject) == reinject)
296 return;
297
298 if (reinject) {
299 /* The initial state is preserved while ps->reinject == 0. */
300 kvm_pit_reset_reinject(pit);
301 kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
302 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
303 } else {
304 kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
305 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
306 }
307
308 atomic_set(&ps->reinject, reinject);
309 }
310
311 static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
312 {
313 struct kvm_kpit_state *ps = &pit->pit_state;
314 struct kvm *kvm = pit->kvm;
315 s64 interval;
316
317 if (!ioapic_in_kernel(kvm) ||
318 ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
319 return;
320
321 interval = mul_u64_u32_div(val, NSEC_PER_SEC, KVM_PIT_FREQ);
322
323 pr_debug("create pit timer, interval is %llu nsec\n", interval);
324
325 /* TODO The new value only affected after the retriggered */
326 hrtimer_cancel(&ps->timer);
327 flush_kthread_work(&pit->expired);
328 ps->period = interval;
329 ps->is_periodic = is_period;
330
331 kvm_pit_reset_reinject(pit);
332
333 /*
334 * Do not allow the guest to program periodic timers with small
335 * interval, since the hrtimers are not throttled by the host
336 * scheduler.
337 */
338 if (ps->is_periodic) {
339 s64 min_period = min_timer_period_us * 1000LL;
340
341 if (ps->period < min_period) {
342 pr_info_ratelimited(
343 "kvm: requested %lld ns "
344 "i8254 timer period limited to %lld ns\n",
345 ps->period, min_period);
346 ps->period = min_period;
347 }
348 }
349
350 hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval),
351 HRTIMER_MODE_ABS);
352 }
353
354 static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
355 {
356 struct kvm_kpit_state *ps = &pit->pit_state;
357
358 pr_debug("load_count val is %d, channel is %d\n", val, channel);
359
360 /*
361 * The largest possible initial count is 0; this is equivalent
362 * to 216 for binary counting and 104 for BCD counting.
363 */
364 if (val == 0)
365 val = 0x10000;
366
367 ps->channels[channel].count = val;
368
369 if (channel != 0) {
370 ps->channels[channel].count_load_time = ktime_get();
371 return;
372 }
373
374 /* Two types of timer
375 * mode 1 is one shot, mode 2 is period, otherwise del timer */
376 switch (ps->channels[0].mode) {
377 case 0:
378 case 1:
379 /* FIXME: enhance mode 4 precision */
380 case 4:
381 create_pit_timer(pit, val, 0);
382 break;
383 case 2:
384 case 3:
385 create_pit_timer(pit, val, 1);
386 break;
387 default:
388 destroy_pit_timer(pit);
389 }
390 }
391
392 void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
393 int hpet_legacy_start)
394 {
395 u8 saved_mode;
396
397 WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock));
398
399 if (hpet_legacy_start) {
400 /* save existing mode for later reenablement */
401 WARN_ON(channel != 0);
402 saved_mode = pit->pit_state.channels[0].mode;
403 pit->pit_state.channels[0].mode = 0xff; /* disable timer */
404 pit_load_count(pit, channel, val);
405 pit->pit_state.channels[0].mode = saved_mode;
406 } else {
407 pit_load_count(pit, channel, val);
408 }
409 }
410
411 static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev)
412 {
413 return container_of(dev, struct kvm_pit, dev);
414 }
415
416 static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev)
417 {
418 return container_of(dev, struct kvm_pit, speaker_dev);
419 }
420
421 static inline int pit_in_range(gpa_t addr)
422 {
423 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
424 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
425 }
426
427 static int pit_ioport_write(struct kvm_vcpu *vcpu,
428 struct kvm_io_device *this,
429 gpa_t addr, int len, const void *data)
430 {
431 struct kvm_pit *pit = dev_to_pit(this);
432 struct kvm_kpit_state *pit_state = &pit->pit_state;
433 int channel, access;
434 struct kvm_kpit_channel_state *s;
435 u32 val = *(u32 *) data;
436 if (!pit_in_range(addr))
437 return -EOPNOTSUPP;
438
439 val &= 0xff;
440 addr &= KVM_PIT_CHANNEL_MASK;
441
442 mutex_lock(&pit_state->lock);
443
444 if (val != 0)
445 pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n",
446 (unsigned int)addr, len, val);
447
448 if (addr == 3) {
449 channel = val >> 6;
450 if (channel == 3) {
451 /* Read-Back Command. */
452 for (channel = 0; channel < 3; channel++) {
453 s = &pit_state->channels[channel];
454 if (val & (2 << channel)) {
455 if (!(val & 0x20))
456 pit_latch_count(pit, channel);
457 if (!(val & 0x10))
458 pit_latch_status(pit, channel);
459 }
460 }
461 } else {
462 /* Select Counter <channel>. */
463 s = &pit_state->channels[channel];
464 access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
465 if (access == 0) {
466 pit_latch_count(pit, channel);
467 } else {
468 s->rw_mode = access;
469 s->read_state = access;
470 s->write_state = access;
471 s->mode = (val >> 1) & 7;
472 if (s->mode > 5)
473 s->mode -= 4;
474 s->bcd = val & 1;
475 }
476 }
477 } else {
478 /* Write Count. */
479 s = &pit_state->channels[addr];
480 switch (s->write_state) {
481 default:
482 case RW_STATE_LSB:
483 pit_load_count(pit, addr, val);
484 break;
485 case RW_STATE_MSB:
486 pit_load_count(pit, addr, val << 8);
487 break;
488 case RW_STATE_WORD0:
489 s->write_latch = val;
490 s->write_state = RW_STATE_WORD1;
491 break;
492 case RW_STATE_WORD1:
493 pit_load_count(pit, addr, s->write_latch | (val << 8));
494 s->write_state = RW_STATE_WORD0;
495 break;
496 }
497 }
498
499 mutex_unlock(&pit_state->lock);
500 return 0;
501 }
502
503 static int pit_ioport_read(struct kvm_vcpu *vcpu,
504 struct kvm_io_device *this,
505 gpa_t addr, int len, void *data)
506 {
507 struct kvm_pit *pit = dev_to_pit(this);
508 struct kvm_kpit_state *pit_state = &pit->pit_state;
509 int ret, count;
510 struct kvm_kpit_channel_state *s;
511 if (!pit_in_range(addr))
512 return -EOPNOTSUPP;
513
514 addr &= KVM_PIT_CHANNEL_MASK;
515 if (addr == 3)
516 return 0;
517
518 s = &pit_state->channels[addr];
519
520 mutex_lock(&pit_state->lock);
521
522 if (s->status_latched) {
523 s->status_latched = 0;
524 ret = s->status;
525 } else if (s->count_latched) {
526 switch (s->count_latched) {
527 default:
528 case RW_STATE_LSB:
529 ret = s->latched_count & 0xff;
530 s->count_latched = 0;
531 break;
532 case RW_STATE_MSB:
533 ret = s->latched_count >> 8;
534 s->count_latched = 0;
535 break;
536 case RW_STATE_WORD0:
537 ret = s->latched_count & 0xff;
538 s->count_latched = RW_STATE_MSB;
539 break;
540 }
541 } else {
542 switch (s->read_state) {
543 default:
544 case RW_STATE_LSB:
545 count = pit_get_count(pit, addr);
546 ret = count & 0xff;
547 break;
548 case RW_STATE_MSB:
549 count = pit_get_count(pit, addr);
550 ret = (count >> 8) & 0xff;
551 break;
552 case RW_STATE_WORD0:
553 count = pit_get_count(pit, addr);
554 ret = count & 0xff;
555 s->read_state = RW_STATE_WORD1;
556 break;
557 case RW_STATE_WORD1:
558 count = pit_get_count(pit, addr);
559 ret = (count >> 8) & 0xff;
560 s->read_state = RW_STATE_WORD0;
561 break;
562 }
563 }
564
565 if (len > sizeof(ret))
566 len = sizeof(ret);
567 memcpy(data, (char *)&ret, len);
568
569 mutex_unlock(&pit_state->lock);
570 return 0;
571 }
572
573 static int speaker_ioport_write(struct kvm_vcpu *vcpu,
574 struct kvm_io_device *this,
575 gpa_t addr, int len, const void *data)
576 {
577 struct kvm_pit *pit = speaker_to_pit(this);
578 struct kvm_kpit_state *pit_state = &pit->pit_state;
579 u32 val = *(u32 *) data;
580 if (addr != KVM_SPEAKER_BASE_ADDRESS)
581 return -EOPNOTSUPP;
582
583 mutex_lock(&pit_state->lock);
584 pit_state->speaker_data_on = (val >> 1) & 1;
585 pit_set_gate(pit, 2, val & 1);
586 mutex_unlock(&pit_state->lock);
587 return 0;
588 }
589
590 static int speaker_ioport_read(struct kvm_vcpu *vcpu,
591 struct kvm_io_device *this,
592 gpa_t addr, int len, void *data)
593 {
594 struct kvm_pit *pit = speaker_to_pit(this);
595 struct kvm_kpit_state *pit_state = &pit->pit_state;
596 unsigned int refresh_clock;
597 int ret;
598 if (addr != KVM_SPEAKER_BASE_ADDRESS)
599 return -EOPNOTSUPP;
600
601 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
602 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
603
604 mutex_lock(&pit_state->lock);
605 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(pit, 2) |
606 (pit_get_out(pit, 2) << 5) | (refresh_clock << 4));
607 if (len > sizeof(ret))
608 len = sizeof(ret);
609 memcpy(data, (char *)&ret, len);
610 mutex_unlock(&pit_state->lock);
611 return 0;
612 }
613
614 static void kvm_pit_reset(struct kvm_pit *pit)
615 {
616 int i;
617 struct kvm_kpit_channel_state *c;
618
619 pit->pit_state.flags = 0;
620 for (i = 0; i < 3; i++) {
621 c = &pit->pit_state.channels[i];
622 c->mode = 0xff;
623 c->gate = (i != 2);
624 pit_load_count(pit, i, 0);
625 }
626
627 kvm_pit_reset_reinject(pit);
628 }
629
630 static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
631 {
632 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
633
634 if (!mask)
635 kvm_pit_reset_reinject(pit);
636 }
637
638 static const struct kvm_io_device_ops pit_dev_ops = {
639 .read = pit_ioport_read,
640 .write = pit_ioport_write,
641 };
642
643 static const struct kvm_io_device_ops speaker_dev_ops = {
644 .read = speaker_ioport_read,
645 .write = speaker_ioport_write,
646 };
647
648 struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
649 {
650 struct kvm_pit *pit;
651 struct kvm_kpit_state *pit_state;
652 struct pid *pid;
653 pid_t pid_nr;
654 int ret;
655
656 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
657 if (!pit)
658 return NULL;
659
660 pit->irq_source_id = kvm_request_irq_source_id(kvm);
661 if (pit->irq_source_id < 0)
662 goto fail_request;
663
664 mutex_init(&pit->pit_state.lock);
665
666 pid = get_pid(task_tgid(current));
667 pid_nr = pid_vnr(pid);
668 put_pid(pid);
669
670 init_kthread_worker(&pit->worker);
671 pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
672 "kvm-pit/%d", pid_nr);
673 if (IS_ERR(pit->worker_task))
674 goto fail_kthread;
675
676 init_kthread_work(&pit->expired, pit_do_work);
677
678 pit->kvm = kvm;
679
680 pit_state = &pit->pit_state;
681 hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
682 pit_state->timer.function = pit_timer_fn;
683
684 pit_state->irq_ack_notifier.gsi = 0;
685 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
686 pit->mask_notifier.func = pit_mask_notifer;
687
688 kvm_pit_reset(pit);
689
690 kvm_pit_set_reinject(pit, true);
691
692 mutex_lock(&kvm->slots_lock);
693 kvm_iodevice_init(&pit->dev, &pit_dev_ops);
694 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
695 KVM_PIT_MEM_LENGTH, &pit->dev);
696 if (ret < 0)
697 goto fail_register_pit;
698
699 if (flags & KVM_PIT_SPEAKER_DUMMY) {
700 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
701 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
702 KVM_SPEAKER_BASE_ADDRESS, 4,
703 &pit->speaker_dev);
704 if (ret < 0)
705 goto fail_register_speaker;
706 }
707 mutex_unlock(&kvm->slots_lock);
708
709 return pit;
710
711 fail_register_speaker:
712 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
713 fail_register_pit:
714 mutex_unlock(&kvm->slots_lock);
715 kvm_pit_set_reinject(pit, false);
716 kthread_stop(pit->worker_task);
717 fail_kthread:
718 kvm_free_irq_source_id(kvm, pit->irq_source_id);
719 fail_request:
720 kfree(pit);
721 return NULL;
722 }
723
724 void kvm_free_pit(struct kvm *kvm)
725 {
726 struct kvm_pit *pit = kvm->arch.vpit;
727
728 if (pit) {
729 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
730 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
731 kvm_pit_set_reinject(pit, false);
732 hrtimer_cancel(&pit->pit_state.timer);
733 flush_kthread_work(&pit->expired);
734 kthread_stop(pit->worker_task);
735 kvm_free_irq_source_id(kvm, pit->irq_source_id);
736 kfree(pit);
737 }
738 }
This page took 0.059681 seconds and 4 git commands to generate.