KVM: i8254: pass struct kvm_pit instead of kvm in PIT
[deliverable/linux.git] / arch / x86 / kvm / i8254.c
CommitLineData
7837699f
SY
1/*
2 * 8253/8254 interval timer emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2006 Intel Corporation
6 * Copyright (c) 2007 Keir Fraser, XenSource Inc
7 * Copyright (c) 2008 Intel Corporation
9611c187 8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7837699f
SY
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 *
28 * Authors:
29 * Sheng Yang <sheng.yang@intel.com>
30 * Based on QEMU and Xen.
31 */
32
a78d9626
JP
33#define pr_fmt(fmt) "pit: " fmt
34
7837699f 35#include <linux/kvm_host.h>
5a0e3ad6 36#include <linux/slab.h>
7837699f 37
49df6397 38#include "ioapic.h"
7837699f
SY
39#include "irq.h"
40#include "i8254.h"
9ed96e87 41#include "x86.h"
7837699f
SY
42
43#ifndef CONFIG_X86_64
6f6d6a1a 44#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
7837699f
SY
45#else
46#define mod_64(x, y) ((x) % (y))
47#endif
48
49#define RW_STATE_LSB 1
50#define RW_STATE_MSB 2
51#define RW_STATE_WORD0 3
52#define RW_STATE_WORD1 4
53
54/* Compute with 96 bit intermediate result: (a*b)/c */
55static u64 muldiv64(u64 a, u32 b, u32 c)
56{
57 union {
58 u64 ll;
59 struct {
60 u32 low, high;
61 } l;
62 } u, res;
63 u64 rl, rh;
64
65 u.ll = a;
66 rl = (u64)u.l.low * (u64)b;
67 rh = (u64)u.l.high * (u64)b;
68 rh += (rl >> 32);
6f6d6a1a
RZ
69 res.l.high = div64_u64(rh, c);
70 res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
7837699f
SY
71 return res.ll;
72}
73
09edea72 74static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)
7837699f 75{
09edea72 76 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
7837699f 77
7837699f
SY
78 switch (c->mode) {
79 default:
80 case 0:
81 case 4:
82 /* XXX: just disable/enable counting */
83 break;
84 case 1:
85 case 2:
86 case 3:
87 case 5:
88 /* Restart counting on rising edge. */
89 if (c->gate < val)
90 c->count_load_time = ktime_get();
91 break;
92 }
93
94 c->gate = val;
95}
96
09edea72 97static int pit_get_gate(struct kvm_pit *pit, int channel)
7837699f 98{
09edea72 99 return pit->pit_state.channels[channel].gate;
7837699f
SY
100}
101
09edea72 102static s64 __kpit_elapsed(struct kvm_pit *pit)
fd668423
MT
103{
104 s64 elapsed;
105 ktime_t remaining;
09edea72 106 struct kvm_kpit_state *ps = &pit->pit_state;
fd668423 107
26ef1924 108 if (!ps->period)
0ff77873
MT
109 return 0;
110
ede2ccc5
MT
111 /*
112 * The Counter does not stop when it reaches zero. In
113 * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to
114 * the highest count, either FFFF hex for binary counting
115 * or 9999 for BCD counting, and continues counting.
116 * Modes 2 and 3 are periodic; the Counter reloads
117 * itself with the initial count and continues counting
118 * from there.
119 */
26ef1924
AK
120 remaining = hrtimer_get_remaining(&ps->timer);
121 elapsed = ps->period - ktime_to_ns(remaining);
fd668423
MT
122
123 return elapsed;
124}
125
09edea72 126static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c,
fd668423
MT
127 int channel)
128{
129 if (channel == 0)
09edea72 130 return __kpit_elapsed(pit);
fd668423
MT
131
132 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
133}
134
09edea72 135static int pit_get_count(struct kvm_pit *pit, int channel)
7837699f 136{
09edea72 137 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
7837699f
SY
138 s64 d, t;
139 int counter;
140
09edea72 141 t = kpit_elapsed(pit, c, channel);
7837699f
SY
142 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
143
144 switch (c->mode) {
145 case 0:
146 case 1:
147 case 4:
148 case 5:
149 counter = (c->count - d) & 0xffff;
150 break;
151 case 3:
152 /* XXX: may be incorrect for odd counts */
153 counter = c->count - (mod_64((2 * d), c->count));
154 break;
155 default:
156 counter = c->count - mod_64(d, c->count);
157 break;
158 }
159 return counter;
160}
161
09edea72 162static int pit_get_out(struct kvm_pit *pit, int channel)
7837699f 163{
09edea72 164 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
7837699f
SY
165 s64 d, t;
166 int out;
167
09edea72 168 t = kpit_elapsed(pit, c, channel);
7837699f
SY
169 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
170
171 switch (c->mode) {
172 default:
173 case 0:
174 out = (d >= c->count);
175 break;
176 case 1:
177 out = (d < c->count);
178 break;
179 case 2:
180 out = ((mod_64(d, c->count) == 0) && (d != 0));
181 break;
182 case 3:
183 out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
184 break;
185 case 4:
186 case 5:
187 out = (d == c->count);
188 break;
189 }
190
191 return out;
192}
193
09edea72 194static void pit_latch_count(struct kvm_pit *pit, int channel)
7837699f 195{
09edea72 196 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
7837699f 197
7837699f 198 if (!c->count_latched) {
09edea72 199 c->latched_count = pit_get_count(pit, channel);
7837699f
SY
200 c->count_latched = c->rw_mode;
201 }
202}
203
09edea72 204static void pit_latch_status(struct kvm_pit *pit, int channel)
7837699f 205{
09edea72 206 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
7837699f 207
7837699f
SY
208 if (!c->status_latched) {
209 /* TODO: Return NULL COUNT (bit 6). */
09edea72 210 c->status = ((pit_get_out(pit, channel) << 7) |
7837699f
SY
211 (c->rw_mode << 4) |
212 (c->mode << 1) |
213 c->bcd);
214 c->status_latched = 1;
215 }
216}
217
ee032c99 218static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
3cf57fed
MT
219{
220 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
221 irq_ack_notifier);
33572ac0 222
ddf54503
RK
223 atomic_set(&ps->irq_ack, 1);
224 /* irq_ack should be set before pending is read. Order accesses with
225 * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work.
226 */
227 smp_mb();
f6e0a0c1 228 if (atomic_dec_if_positive(&ps->pending) > 0 && ps->reinject)
b6ddf05f 229 queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
3cf57fed
MT
230}
231
2f599714
MT
232void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
233{
234 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
235 struct hrtimer *timer;
236
c5af89b6 237 if (!kvm_vcpu_is_bsp(vcpu) || !pit)
2f599714
MT
238 return;
239
26ef1924 240 timer = &pit->pit_state.timer;
2febc839 241 mutex_lock(&pit->pit_state.lock);
2f599714 242 if (hrtimer_cancel(timer))
beb20d52 243 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
2febc839 244 mutex_unlock(&pit->pit_state.lock);
2f599714
MT
245}
246
33572ac0 247static void destroy_pit_timer(struct kvm_pit *pit)
7837699f 248{
26ef1924 249 hrtimer_cancel(&pit->pit_state.timer);
b6ddf05f 250 flush_kthread_work(&pit->expired);
7837699f
SY
251}
252
b6ddf05f 253static void pit_do_work(struct kthread_work *work)
33572ac0
CL
254{
255 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
256 struct kvm *kvm = pit->kvm;
257 struct kvm_vcpu *vcpu;
258 int i;
259 struct kvm_kpit_state *ps = &pit->pit_state;
33572ac0 260
ddf54503
RK
261 if (ps->reinject && !atomic_xchg(&ps->irq_ack, 0))
262 return;
263
264 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false);
265 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false);
266
267 /*
268 * Provides NMI watchdog support via Virtual Wire mode.
269 * The route is: PIT -> LVT0 in NMI mode.
270 *
271 * Note: Our Virtual Wire implementation does not follow
272 * the MP specification. We propagate a PIT interrupt to all
273 * VCPUs and only when LVT0 is in NMI mode. The interrupt can
274 * also be simultaneously delivered through PIC and IOAPIC.
33572ac0 275 */
ddf54503
RK
276 if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
277 kvm_for_each_vcpu(i, vcpu, kvm)
278 kvm_apic_nmi_wd_deliver(vcpu);
33572ac0
CL
279}
280
281static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
282{
26ef1924
AK
283 struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
284 struct kvm_pit *pt = ps->kvm->arch.vpit;
33572ac0 285
7dd0fdff 286 if (ps->reinject)
26ef1924 287 atomic_inc(&ps->pending);
7dd0fdff
RK
288
289 queue_kthread_work(&pt->worker, &pt->expired);
33572ac0 290
26ef1924
AK
291 if (ps->is_periodic) {
292 hrtimer_add_expires_ns(&ps->timer, ps->period);
33572ac0
CL
293 return HRTIMER_RESTART;
294 } else
295 return HRTIMER_NORESTART;
296}
297
fd700a00
RK
298static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
299{
300 atomic_set(&pit->pit_state.pending, 0);
ddf54503 301 atomic_set(&pit->pit_state.irq_ack, 1);
fd700a00
RK
302}
303
09edea72 304static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
7837699f 305{
09edea72
RK
306 struct kvm_kpit_state *ps = &pit->pit_state;
307 struct kvm *kvm = pit->kvm;
7837699f
SY
308 s64 interval;
309
49df6397
SR
310 if (!ioapic_in_kernel(kvm) ||
311 ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
0924ab2c
JK
312 return;
313
7837699f
SY
314 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
315
a78d9626 316 pr_debug("create pit timer, interval is %llu nsec\n", interval);
7837699f
SY
317
318 /* TODO The new value only affected after the retriggered */
26ef1924 319 hrtimer_cancel(&ps->timer);
b6ddf05f 320 flush_kthread_work(&ps->pit->expired);
26ef1924 321 ps->period = interval;
d3c7b77d
MT
322 ps->is_periodic = is_period;
323
26ef1924 324 ps->timer.function = pit_timer_fn;
09edea72 325 ps->kvm = pit->kvm;
d3c7b77d 326
09edea72 327 kvm_pit_reset_reinject(pit);
7837699f 328
9ed96e87
MT
329 /*
330 * Do not allow the guest to program periodic timers with small
331 * interval, since the hrtimers are not throttled by the host
332 * scheduler.
333 */
334 if (ps->is_periodic) {
335 s64 min_period = min_timer_period_us * 1000LL;
336
337 if (ps->period < min_period) {
338 pr_info_ratelimited(
339 "kvm: requested %lld ns "
340 "i8254 timer period limited to %lld ns\n",
341 ps->period, min_period);
342 ps->period = min_period;
343 }
344 }
345
26ef1924 346 hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval),
7837699f
SY
347 HRTIMER_MODE_ABS);
348}
349
09edea72 350static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
7837699f 351{
09edea72 352 struct kvm_kpit_state *ps = &pit->pit_state;
7837699f 353
a78d9626 354 pr_debug("load_count val is %d, channel is %d\n", val, channel);
7837699f
SY
355
356 /*
ede2ccc5
MT
357 * The largest possible initial count is 0; this is equivalent
358 * to 216 for binary counting and 104 for BCD counting.
7837699f
SY
359 */
360 if (val == 0)
361 val = 0x10000;
362
7837699f
SY
363 ps->channels[channel].count = val;
364
fd668423
MT
365 if (channel != 0) {
366 ps->channels[channel].count_load_time = ktime_get();
7837699f 367 return;
fd668423 368 }
7837699f
SY
369
370 /* Two types of timer
371 * mode 1 is one shot, mode 2 is period, otherwise del timer */
372 switch (ps->channels[0].mode) {
ede2ccc5 373 case 0:
7837699f 374 case 1:
ece15bab
MT
375 /* FIXME: enhance mode 4 precision */
376 case 4:
09edea72 377 create_pit_timer(pit, val, 0);
7837699f
SY
378 break;
379 case 2:
f6975545 380 case 3:
09edea72 381 create_pit_timer(pit, val, 1);
7837699f
SY
382 break;
383 default:
09edea72 384 destroy_pit_timer(pit);
7837699f
SY
385 }
386}
387
09edea72
RK
388void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
389 int hpet_legacy_start)
e0f63cb9 390{
e9f42757 391 u8 saved_mode;
b69d920f 392
09edea72 393 WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock));
b69d920f 394
e9f42757
BK
395 if (hpet_legacy_start) {
396 /* save existing mode for later reenablement */
e5e57e7a 397 WARN_ON(channel != 0);
09edea72
RK
398 saved_mode = pit->pit_state.channels[0].mode;
399 pit->pit_state.channels[0].mode = 0xff; /* disable timer */
400 pit_load_count(pit, channel, val);
401 pit->pit_state.channels[0].mode = saved_mode;
e9f42757 402 } else {
09edea72 403 pit_load_count(pit, channel, val);
e9f42757 404 }
e0f63cb9
SY
405}
406
d76685c4
GH
407static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev)
408{
409 return container_of(dev, struct kvm_pit, dev);
410}
411
412static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev)
413{
414 return container_of(dev, struct kvm_pit, speaker_dev);
415}
416
bda9020e
MT
417static inline int pit_in_range(gpa_t addr)
418{
419 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
420 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
421}
422
e32edf4f
NN
423static int pit_ioport_write(struct kvm_vcpu *vcpu,
424 struct kvm_io_device *this,
bda9020e 425 gpa_t addr, int len, const void *data)
7837699f 426{
d76685c4 427 struct kvm_pit *pit = dev_to_pit(this);
7837699f 428 struct kvm_kpit_state *pit_state = &pit->pit_state;
7837699f
SY
429 int channel, access;
430 struct kvm_kpit_channel_state *s;
431 u32 val = *(u32 *) data;
bda9020e
MT
432 if (!pit_in_range(addr))
433 return -EOPNOTSUPP;
7837699f
SY
434
435 val &= 0xff;
436 addr &= KVM_PIT_CHANNEL_MASK;
437
438 mutex_lock(&pit_state->lock);
439
440 if (val != 0)
a78d9626
JP
441 pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n",
442 (unsigned int)addr, len, val);
7837699f
SY
443
444 if (addr == 3) {
445 channel = val >> 6;
446 if (channel == 3) {
447 /* Read-Back Command. */
448 for (channel = 0; channel < 3; channel++) {
449 s = &pit_state->channels[channel];
450 if (val & (2 << channel)) {
451 if (!(val & 0x20))
09edea72 452 pit_latch_count(pit, channel);
7837699f 453 if (!(val & 0x10))
09edea72 454 pit_latch_status(pit, channel);
7837699f
SY
455 }
456 }
457 } else {
458 /* Select Counter <channel>. */
459 s = &pit_state->channels[channel];
460 access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
461 if (access == 0) {
09edea72 462 pit_latch_count(pit, channel);
7837699f
SY
463 } else {
464 s->rw_mode = access;
465 s->read_state = access;
466 s->write_state = access;
467 s->mode = (val >> 1) & 7;
468 if (s->mode > 5)
469 s->mode -= 4;
470 s->bcd = val & 1;
471 }
472 }
473 } else {
474 /* Write Count. */
475 s = &pit_state->channels[addr];
476 switch (s->write_state) {
477 default:
478 case RW_STATE_LSB:
09edea72 479 pit_load_count(pit, addr, val);
7837699f
SY
480 break;
481 case RW_STATE_MSB:
09edea72 482 pit_load_count(pit, addr, val << 8);
7837699f
SY
483 break;
484 case RW_STATE_WORD0:
485 s->write_latch = val;
486 s->write_state = RW_STATE_WORD1;
487 break;
488 case RW_STATE_WORD1:
09edea72 489 pit_load_count(pit, addr, s->write_latch | (val << 8));
7837699f
SY
490 s->write_state = RW_STATE_WORD0;
491 break;
492 }
493 }
494
495 mutex_unlock(&pit_state->lock);
bda9020e 496 return 0;
7837699f
SY
497}
498
e32edf4f
NN
499static int pit_ioport_read(struct kvm_vcpu *vcpu,
500 struct kvm_io_device *this,
bda9020e 501 gpa_t addr, int len, void *data)
7837699f 502{
d76685c4 503 struct kvm_pit *pit = dev_to_pit(this);
7837699f 504 struct kvm_kpit_state *pit_state = &pit->pit_state;
7837699f
SY
505 int ret, count;
506 struct kvm_kpit_channel_state *s;
bda9020e
MT
507 if (!pit_in_range(addr))
508 return -EOPNOTSUPP;
7837699f
SY
509
510 addr &= KVM_PIT_CHANNEL_MASK;
ee73f656
MT
511 if (addr == 3)
512 return 0;
513
7837699f
SY
514 s = &pit_state->channels[addr];
515
516 mutex_lock(&pit_state->lock);
517
518 if (s->status_latched) {
519 s->status_latched = 0;
520 ret = s->status;
521 } else if (s->count_latched) {
522 switch (s->count_latched) {
523 default:
524 case RW_STATE_LSB:
525 ret = s->latched_count & 0xff;
526 s->count_latched = 0;
527 break;
528 case RW_STATE_MSB:
529 ret = s->latched_count >> 8;
530 s->count_latched = 0;
531 break;
532 case RW_STATE_WORD0:
533 ret = s->latched_count & 0xff;
534 s->count_latched = RW_STATE_MSB;
535 break;
536 }
537 } else {
538 switch (s->read_state) {
539 default:
540 case RW_STATE_LSB:
09edea72 541 count = pit_get_count(pit, addr);
7837699f
SY
542 ret = count & 0xff;
543 break;
544 case RW_STATE_MSB:
09edea72 545 count = pit_get_count(pit, addr);
7837699f
SY
546 ret = (count >> 8) & 0xff;
547 break;
548 case RW_STATE_WORD0:
09edea72 549 count = pit_get_count(pit, addr);
7837699f
SY
550 ret = count & 0xff;
551 s->read_state = RW_STATE_WORD1;
552 break;
553 case RW_STATE_WORD1:
09edea72 554 count = pit_get_count(pit, addr);
7837699f
SY
555 ret = (count >> 8) & 0xff;
556 s->read_state = RW_STATE_WORD0;
557 break;
558 }
559 }
560
561 if (len > sizeof(ret))
562 len = sizeof(ret);
563 memcpy(data, (char *)&ret, len);
564
565 mutex_unlock(&pit_state->lock);
bda9020e 566 return 0;
7837699f
SY
567}
568
e32edf4f
NN
569static int speaker_ioport_write(struct kvm_vcpu *vcpu,
570 struct kvm_io_device *this,
bda9020e 571 gpa_t addr, int len, const void *data)
7837699f 572{
d76685c4 573 struct kvm_pit *pit = speaker_to_pit(this);
7837699f 574 struct kvm_kpit_state *pit_state = &pit->pit_state;
7837699f 575 u32 val = *(u32 *) data;
bda9020e
MT
576 if (addr != KVM_SPEAKER_BASE_ADDRESS)
577 return -EOPNOTSUPP;
7837699f
SY
578
579 mutex_lock(&pit_state->lock);
580 pit_state->speaker_data_on = (val >> 1) & 1;
09edea72 581 pit_set_gate(pit, 2, val & 1);
7837699f 582 mutex_unlock(&pit_state->lock);
bda9020e 583 return 0;
7837699f
SY
584}
585
e32edf4f
NN
586static int speaker_ioport_read(struct kvm_vcpu *vcpu,
587 struct kvm_io_device *this,
588 gpa_t addr, int len, void *data)
7837699f 589{
d76685c4 590 struct kvm_pit *pit = speaker_to_pit(this);
7837699f 591 struct kvm_kpit_state *pit_state = &pit->pit_state;
7837699f
SY
592 unsigned int refresh_clock;
593 int ret;
bda9020e
MT
594 if (addr != KVM_SPEAKER_BASE_ADDRESS)
595 return -EOPNOTSUPP;
7837699f
SY
596
597 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
598 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
599
600 mutex_lock(&pit_state->lock);
09edea72
RK
601 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(pit, 2) |
602 (pit_get_out(pit, 2) << 5) | (refresh_clock << 4));
7837699f
SY
603 if (len > sizeof(ret))
604 len = sizeof(ret);
605 memcpy(data, (char *)&ret, len);
606 mutex_unlock(&pit_state->lock);
bda9020e 607 return 0;
7837699f
SY
608}
609
308b0f23 610void kvm_pit_reset(struct kvm_pit *pit)
7837699f
SY
611{
612 int i;
308b0f23
SY
613 struct kvm_kpit_channel_state *c;
614
615 mutex_lock(&pit->pit_state.lock);
e9f42757 616 pit->pit_state.flags = 0;
308b0f23
SY
617 for (i = 0; i < 3; i++) {
618 c = &pit->pit_state.channels[i];
619 c->mode = 0xff;
620 c->gate = (i != 2);
09edea72 621 pit_load_count(pit, i, 0);
308b0f23
SY
622 }
623 mutex_unlock(&pit->pit_state.lock);
624
fd700a00 625 kvm_pit_reset_reinject(pit);
308b0f23
SY
626}
627
4780c659
AK
628static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
629{
630 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
631
fd700a00
RK
632 if (!mask)
633 kvm_pit_reset_reinject(pit);
4780c659
AK
634}
635
d76685c4
GH
636static const struct kvm_io_device_ops pit_dev_ops = {
637 .read = pit_ioport_read,
638 .write = pit_ioport_write,
d76685c4
GH
639};
640
641static const struct kvm_io_device_ops speaker_dev_ops = {
642 .read = speaker_ioport_read,
643 .write = speaker_ioport_write,
d76685c4
GH
644};
645
79fac95e 646/* Caller must hold slots_lock */
c5ff41ce 647struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
308b0f23 648{
7837699f
SY
649 struct kvm_pit *pit;
650 struct kvm_kpit_state *pit_state;
b6ddf05f
JK
651 struct pid *pid;
652 pid_t pid_nr;
090b7aff 653 int ret;
7837699f
SY
654
655 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
656 if (!pit)
657 return NULL;
658
5550af4d 659 pit->irq_source_id = kvm_request_irq_source_id(kvm);
e17d1dc0
AK
660 if (pit->irq_source_id < 0) {
661 kfree(pit);
5550af4d 662 return NULL;
e17d1dc0 663 }
5550af4d 664
7837699f
SY
665 mutex_init(&pit->pit_state.lock);
666 mutex_lock(&pit->pit_state.lock);
33572ac0 667
b6ddf05f
JK
668 pid = get_pid(task_tgid(current));
669 pid_nr = pid_vnr(pid);
670 put_pid(pid);
671
672 init_kthread_worker(&pit->worker);
673 pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
674 "kvm-pit/%d", pid_nr);
675 if (IS_ERR(pit->worker_task)) {
673813e8 676 mutex_unlock(&pit->pit_state.lock);
6b5d7a9f 677 kvm_free_irq_source_id(kvm, pit->irq_source_id);
33572ac0
CL
678 kfree(pit);
679 return NULL;
680 }
b6ddf05f 681 init_kthread_work(&pit->expired, pit_do_work);
7837699f 682
7837699f
SY
683 pit->kvm = kvm;
684
685 pit_state = &pit->pit_state;
686 pit_state->pit = pit;
26ef1924 687 hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
3cf57fed
MT
688 pit_state->irq_ack_notifier.gsi = 0;
689 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
690 kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
26ef1924 691 pit_state->reinject = true;
7837699f
SY
692 mutex_unlock(&pit->pit_state.lock);
693
308b0f23 694 kvm_pit_reset(pit);
7837699f 695
4780c659
AK
696 pit->mask_notifier.func = pit_mask_notifer;
697 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
698
6b66ac1a 699 kvm_iodevice_init(&pit->dev, &pit_dev_ops);
743eeb0b
SL
700 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
701 KVM_PIT_MEM_LENGTH, &pit->dev);
090b7aff
GH
702 if (ret < 0)
703 goto fail;
6b66ac1a
GH
704
705 if (flags & KVM_PIT_SPEAKER_DUMMY) {
706 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
e93f8a0f 707 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
743eeb0b
SL
708 KVM_SPEAKER_BASE_ADDRESS, 4,
709 &pit->speaker_dev);
090b7aff
GH
710 if (ret < 0)
711 goto fail_unregister;
6b66ac1a
GH
712 }
713
7837699f 714 return pit;
090b7aff
GH
715
716fail_unregister:
e93f8a0f 717 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
090b7aff
GH
718
719fail:
d225f53b
WY
720 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
721 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
722 kvm_free_irq_source_id(kvm, pit->irq_source_id);
b6ddf05f 723 kthread_stop(pit->worker_task);
090b7aff
GH
724 kfree(pit);
725 return NULL;
7837699f
SY
726}
727
728void kvm_free_pit(struct kvm *kvm)
729{
730 struct hrtimer *timer;
731
732 if (kvm->arch.vpit) {
aea924f6
XG
733 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev);
734 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
735 &kvm->arch.vpit->speaker_dev);
4780c659
AK
736 kvm_unregister_irq_mask_notifier(kvm, 0,
737 &kvm->arch.vpit->mask_notifier);
84fde248
GN
738 kvm_unregister_irq_ack_notifier(kvm,
739 &kvm->arch.vpit->pit_state.irq_ack_notifier);
7837699f 740 mutex_lock(&kvm->arch.vpit->pit_state.lock);
26ef1924 741 timer = &kvm->arch.vpit->pit_state.timer;
7837699f 742 hrtimer_cancel(timer);
b6ddf05f
JK
743 flush_kthread_work(&kvm->arch.vpit->expired);
744 kthread_stop(kvm->arch.vpit->worker_task);
5550af4d 745 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
7837699f
SY
746 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
747 kfree(kvm->arch.vpit);
748 }
749}
This page took 0.486362 seconds and 5 git commands to generate.