2 * 8253/8254 interval timer emulation
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2006 Intel Corporation
6 * Copyright (c) 2007 Keir Fraser, XenSource Inc
7 * Copyright (c) 2008 Intel Corporation
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 * Sheng Yang <sheng.yang@intel.com>
30 * Based on QEMU and Xen.
33 #define pr_fmt(fmt) "pit: " fmt
35 #include <linux/kvm_host.h>
36 #include <linux/slab.h>
44 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
46 #define mod_64(x, y) ((x) % (y))
49 #define RW_STATE_LSB 1
50 #define RW_STATE_MSB 2
51 #define RW_STATE_WORD0 3
52 #define RW_STATE_WORD1 4
54 /* Compute with 96 bit intermediate result: (a*b)/c */
55 static u64
muldiv64(u64 a
, u32 b
, u32 c
)
66 rl
= (u64
)u
.l
.low
* (u64
)b
;
67 rh
= (u64
)u
.l
.high
* (u64
)b
;
69 res
.l
.high
= div64_u64(rh
, c
);
70 res
.l
.low
= div64_u64(((mod_64(rh
, c
) << 32) + (rl
& 0xffffffff)), c
);
74 static void pit_set_gate(struct kvm_pit
*pit
, int channel
, u32 val
)
76 struct kvm_kpit_channel_state
*c
= &pit
->pit_state
.channels
[channel
];
82 /* XXX: just disable/enable counting */
88 /* Restart counting on rising edge. */
90 c
->count_load_time
= ktime_get();
97 static int pit_get_gate(struct kvm_pit
*pit
, int channel
)
99 return pit
->pit_state
.channels
[channel
].gate
;
102 static s64
__kpit_elapsed(struct kvm_pit
*pit
)
106 struct kvm_kpit_state
*ps
= &pit
->pit_state
;
112 * The Counter does not stop when it reaches zero. In
113 * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to
114 * the highest count, either FFFF hex for binary counting
115 * or 9999 for BCD counting, and continues counting.
116 * Modes 2 and 3 are periodic; the Counter reloads
117 * itself with the initial count and continues counting
120 remaining
= hrtimer_get_remaining(&ps
->timer
);
121 elapsed
= ps
->period
- ktime_to_ns(remaining
);
126 static s64
kpit_elapsed(struct kvm_pit
*pit
, struct kvm_kpit_channel_state
*c
,
130 return __kpit_elapsed(pit
);
132 return ktime_to_ns(ktime_sub(ktime_get(), c
->count_load_time
));
135 static int pit_get_count(struct kvm_pit
*pit
, int channel
)
137 struct kvm_kpit_channel_state
*c
= &pit
->pit_state
.channels
[channel
];
141 t
= kpit_elapsed(pit
, c
, channel
);
142 d
= muldiv64(t
, KVM_PIT_FREQ
, NSEC_PER_SEC
);
149 counter
= (c
->count
- d
) & 0xffff;
152 /* XXX: may be incorrect for odd counts */
153 counter
= c
->count
- (mod_64((2 * d
), c
->count
));
156 counter
= c
->count
- mod_64(d
, c
->count
);
162 static int pit_get_out(struct kvm_pit
*pit
, int channel
)
164 struct kvm_kpit_channel_state
*c
= &pit
->pit_state
.channels
[channel
];
168 t
= kpit_elapsed(pit
, c
, channel
);
169 d
= muldiv64(t
, KVM_PIT_FREQ
, NSEC_PER_SEC
);
174 out
= (d
>= c
->count
);
177 out
= (d
< c
->count
);
180 out
= ((mod_64(d
, c
->count
) == 0) && (d
!= 0));
183 out
= (mod_64(d
, c
->count
) < ((c
->count
+ 1) >> 1));
187 out
= (d
== c
->count
);
194 static void pit_latch_count(struct kvm_pit
*pit
, int channel
)
196 struct kvm_kpit_channel_state
*c
= &pit
->pit_state
.channels
[channel
];
198 if (!c
->count_latched
) {
199 c
->latched_count
= pit_get_count(pit
, channel
);
200 c
->count_latched
= c
->rw_mode
;
204 static void pit_latch_status(struct kvm_pit
*pit
, int channel
)
206 struct kvm_kpit_channel_state
*c
= &pit
->pit_state
.channels
[channel
];
208 if (!c
->status_latched
) {
209 /* TODO: Return NULL COUNT (bit 6). */
210 c
->status
= ((pit_get_out(pit
, channel
) << 7) |
214 c
->status_latched
= 1;
218 static inline struct kvm_pit
*pit_state_to_pit(struct kvm_kpit_state
*ps
)
220 return container_of(ps
, struct kvm_pit
, pit_state
);
223 static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier
*kian
)
225 struct kvm_kpit_state
*ps
= container_of(kian
, struct kvm_kpit_state
,
227 struct kvm_pit
*pit
= pit_state_to_pit(ps
);
229 atomic_set(&ps
->irq_ack
, 1);
230 /* irq_ack should be set before pending is read. Order accesses with
231 * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work.
234 if (atomic_dec_if_positive(&ps
->pending
) > 0)
235 queue_kthread_work(&pit
->worker
, &pit
->expired
);
238 void __kvm_migrate_pit_timer(struct kvm_vcpu
*vcpu
)
240 struct kvm_pit
*pit
= vcpu
->kvm
->arch
.vpit
;
241 struct hrtimer
*timer
;
243 if (!kvm_vcpu_is_bsp(vcpu
) || !pit
)
246 timer
= &pit
->pit_state
.timer
;
247 mutex_lock(&pit
->pit_state
.lock
);
248 if (hrtimer_cancel(timer
))
249 hrtimer_start_expires(timer
, HRTIMER_MODE_ABS
);
250 mutex_unlock(&pit
->pit_state
.lock
);
253 static void destroy_pit_timer(struct kvm_pit
*pit
)
255 hrtimer_cancel(&pit
->pit_state
.timer
);
256 flush_kthread_work(&pit
->expired
);
259 static void pit_do_work(struct kthread_work
*work
)
261 struct kvm_pit
*pit
= container_of(work
, struct kvm_pit
, expired
);
262 struct kvm
*kvm
= pit
->kvm
;
263 struct kvm_vcpu
*vcpu
;
265 struct kvm_kpit_state
*ps
= &pit
->pit_state
;
267 if (ps
->reinject
&& !atomic_xchg(&ps
->irq_ack
, 0))
270 kvm_set_irq(kvm
, kvm
->arch
.vpit
->irq_source_id
, 0, 1, false);
271 kvm_set_irq(kvm
, kvm
->arch
.vpit
->irq_source_id
, 0, 0, false);
274 * Provides NMI watchdog support via Virtual Wire mode.
275 * The route is: PIT -> LVT0 in NMI mode.
277 * Note: Our Virtual Wire implementation does not follow
278 * the MP specification. We propagate a PIT interrupt to all
279 * VCPUs and only when LVT0 is in NMI mode. The interrupt can
280 * also be simultaneously delivered through PIC and IOAPIC.
282 if (atomic_read(&kvm
->arch
.vapics_in_nmi_mode
) > 0)
283 kvm_for_each_vcpu(i
, vcpu
, kvm
)
284 kvm_apic_nmi_wd_deliver(vcpu
);
287 static enum hrtimer_restart
pit_timer_fn(struct hrtimer
*data
)
289 struct kvm_kpit_state
*ps
= container_of(data
, struct kvm_kpit_state
, timer
);
290 struct kvm_pit
*pt
= pit_state_to_pit(ps
);
293 atomic_inc(&ps
->pending
);
295 queue_kthread_work(&pt
->worker
, &pt
->expired
);
297 if (ps
->is_periodic
) {
298 hrtimer_add_expires_ns(&ps
->timer
, ps
->period
);
299 return HRTIMER_RESTART
;
301 return HRTIMER_NORESTART
;
304 static inline void kvm_pit_reset_reinject(struct kvm_pit
*pit
)
306 atomic_set(&pit
->pit_state
.pending
, 0);
307 atomic_set(&pit
->pit_state
.irq_ack
, 1);
310 void kvm_pit_set_reinject(struct kvm_pit
*pit
, bool reinject
)
312 struct kvm_kpit_state
*ps
= &pit
->pit_state
;
313 struct kvm
*kvm
= pit
->kvm
;
315 if (ps
->reinject
== reinject
)
319 /* The initial state is preserved while ps->reinject == 0. */
320 kvm_pit_reset_reinject(pit
);
321 kvm_register_irq_ack_notifier(kvm
, &ps
->irq_ack_notifier
);
322 kvm_register_irq_mask_notifier(kvm
, 0, &pit
->mask_notifier
);
324 kvm_unregister_irq_ack_notifier(kvm
, &ps
->irq_ack_notifier
);
325 kvm_unregister_irq_mask_notifier(kvm
, 0, &pit
->mask_notifier
);
328 ps
->reinject
= reinject
;
331 static void create_pit_timer(struct kvm_pit
*pit
, u32 val
, int is_period
)
333 struct kvm_kpit_state
*ps
= &pit
->pit_state
;
334 struct kvm
*kvm
= pit
->kvm
;
337 if (!ioapic_in_kernel(kvm
) ||
338 ps
->flags
& KVM_PIT_FLAGS_HPET_LEGACY
)
341 interval
= muldiv64(val
, NSEC_PER_SEC
, KVM_PIT_FREQ
);
343 pr_debug("create pit timer, interval is %llu nsec\n", interval
);
345 /* TODO The new value only affected after the retriggered */
346 hrtimer_cancel(&ps
->timer
);
347 flush_kthread_work(&pit
->expired
);
348 ps
->period
= interval
;
349 ps
->is_periodic
= is_period
;
351 ps
->timer
.function
= pit_timer_fn
;
353 kvm_pit_reset_reinject(pit
);
356 * Do not allow the guest to program periodic timers with small
357 * interval, since the hrtimers are not throttled by the host
360 if (ps
->is_periodic
) {
361 s64 min_period
= min_timer_period_us
* 1000LL;
363 if (ps
->period
< min_period
) {
365 "kvm: requested %lld ns "
366 "i8254 timer period limited to %lld ns\n",
367 ps
->period
, min_period
);
368 ps
->period
= min_period
;
372 hrtimer_start(&ps
->timer
, ktime_add_ns(ktime_get(), interval
),
376 static void pit_load_count(struct kvm_pit
*pit
, int channel
, u32 val
)
378 struct kvm_kpit_state
*ps
= &pit
->pit_state
;
380 pr_debug("load_count val is %d, channel is %d\n", val
, channel
);
383 * The largest possible initial count is 0; this is equivalent
384 * to 216 for binary counting and 104 for BCD counting.
389 ps
->channels
[channel
].count
= val
;
392 ps
->channels
[channel
].count_load_time
= ktime_get();
396 /* Two types of timer
397 * mode 1 is one shot, mode 2 is period, otherwise del timer */
398 switch (ps
->channels
[0].mode
) {
401 /* FIXME: enhance mode 4 precision */
403 create_pit_timer(pit
, val
, 0);
407 create_pit_timer(pit
, val
, 1);
410 destroy_pit_timer(pit
);
414 void kvm_pit_load_count(struct kvm_pit
*pit
, int channel
, u32 val
,
415 int hpet_legacy_start
)
419 WARN_ON_ONCE(!mutex_is_locked(&pit
->pit_state
.lock
));
421 if (hpet_legacy_start
) {
422 /* save existing mode for later reenablement */
423 WARN_ON(channel
!= 0);
424 saved_mode
= pit
->pit_state
.channels
[0].mode
;
425 pit
->pit_state
.channels
[0].mode
= 0xff; /* disable timer */
426 pit_load_count(pit
, channel
, val
);
427 pit
->pit_state
.channels
[0].mode
= saved_mode
;
429 pit_load_count(pit
, channel
, val
);
433 static inline struct kvm_pit
*dev_to_pit(struct kvm_io_device
*dev
)
435 return container_of(dev
, struct kvm_pit
, dev
);
438 static inline struct kvm_pit
*speaker_to_pit(struct kvm_io_device
*dev
)
440 return container_of(dev
, struct kvm_pit
, speaker_dev
);
443 static inline int pit_in_range(gpa_t addr
)
445 return ((addr
>= KVM_PIT_BASE_ADDRESS
) &&
446 (addr
< KVM_PIT_BASE_ADDRESS
+ KVM_PIT_MEM_LENGTH
));
449 static int pit_ioport_write(struct kvm_vcpu
*vcpu
,
450 struct kvm_io_device
*this,
451 gpa_t addr
, int len
, const void *data
)
453 struct kvm_pit
*pit
= dev_to_pit(this);
454 struct kvm_kpit_state
*pit_state
= &pit
->pit_state
;
456 struct kvm_kpit_channel_state
*s
;
457 u32 val
= *(u32
*) data
;
458 if (!pit_in_range(addr
))
462 addr
&= KVM_PIT_CHANNEL_MASK
;
464 mutex_lock(&pit_state
->lock
);
467 pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n",
468 (unsigned int)addr
, len
, val
);
473 /* Read-Back Command. */
474 for (channel
= 0; channel
< 3; channel
++) {
475 s
= &pit_state
->channels
[channel
];
476 if (val
& (2 << channel
)) {
478 pit_latch_count(pit
, channel
);
480 pit_latch_status(pit
, channel
);
484 /* Select Counter <channel>. */
485 s
= &pit_state
->channels
[channel
];
486 access
= (val
>> 4) & KVM_PIT_CHANNEL_MASK
;
488 pit_latch_count(pit
, channel
);
491 s
->read_state
= access
;
492 s
->write_state
= access
;
493 s
->mode
= (val
>> 1) & 7;
501 s
= &pit_state
->channels
[addr
];
502 switch (s
->write_state
) {
505 pit_load_count(pit
, addr
, val
);
508 pit_load_count(pit
, addr
, val
<< 8);
511 s
->write_latch
= val
;
512 s
->write_state
= RW_STATE_WORD1
;
515 pit_load_count(pit
, addr
, s
->write_latch
| (val
<< 8));
516 s
->write_state
= RW_STATE_WORD0
;
521 mutex_unlock(&pit_state
->lock
);
525 static int pit_ioport_read(struct kvm_vcpu
*vcpu
,
526 struct kvm_io_device
*this,
527 gpa_t addr
, int len
, void *data
)
529 struct kvm_pit
*pit
= dev_to_pit(this);
530 struct kvm_kpit_state
*pit_state
= &pit
->pit_state
;
532 struct kvm_kpit_channel_state
*s
;
533 if (!pit_in_range(addr
))
536 addr
&= KVM_PIT_CHANNEL_MASK
;
540 s
= &pit_state
->channels
[addr
];
542 mutex_lock(&pit_state
->lock
);
544 if (s
->status_latched
) {
545 s
->status_latched
= 0;
547 } else if (s
->count_latched
) {
548 switch (s
->count_latched
) {
551 ret
= s
->latched_count
& 0xff;
552 s
->count_latched
= 0;
555 ret
= s
->latched_count
>> 8;
556 s
->count_latched
= 0;
559 ret
= s
->latched_count
& 0xff;
560 s
->count_latched
= RW_STATE_MSB
;
564 switch (s
->read_state
) {
567 count
= pit_get_count(pit
, addr
);
571 count
= pit_get_count(pit
, addr
);
572 ret
= (count
>> 8) & 0xff;
575 count
= pit_get_count(pit
, addr
);
577 s
->read_state
= RW_STATE_WORD1
;
580 count
= pit_get_count(pit
, addr
);
581 ret
= (count
>> 8) & 0xff;
582 s
->read_state
= RW_STATE_WORD0
;
587 if (len
> sizeof(ret
))
589 memcpy(data
, (char *)&ret
, len
);
591 mutex_unlock(&pit_state
->lock
);
595 static int speaker_ioport_write(struct kvm_vcpu
*vcpu
,
596 struct kvm_io_device
*this,
597 gpa_t addr
, int len
, const void *data
)
599 struct kvm_pit
*pit
= speaker_to_pit(this);
600 struct kvm_kpit_state
*pit_state
= &pit
->pit_state
;
601 u32 val
= *(u32
*) data
;
602 if (addr
!= KVM_SPEAKER_BASE_ADDRESS
)
605 mutex_lock(&pit_state
->lock
);
606 pit_state
->speaker_data_on
= (val
>> 1) & 1;
607 pit_set_gate(pit
, 2, val
& 1);
608 mutex_unlock(&pit_state
->lock
);
612 static int speaker_ioport_read(struct kvm_vcpu
*vcpu
,
613 struct kvm_io_device
*this,
614 gpa_t addr
, int len
, void *data
)
616 struct kvm_pit
*pit
= speaker_to_pit(this);
617 struct kvm_kpit_state
*pit_state
= &pit
->pit_state
;
618 unsigned int refresh_clock
;
620 if (addr
!= KVM_SPEAKER_BASE_ADDRESS
)
623 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
624 refresh_clock
= ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
626 mutex_lock(&pit_state
->lock
);
627 ret
= ((pit_state
->speaker_data_on
<< 1) | pit_get_gate(pit
, 2) |
628 (pit_get_out(pit
, 2) << 5) | (refresh_clock
<< 4));
629 if (len
> sizeof(ret
))
631 memcpy(data
, (char *)&ret
, len
);
632 mutex_unlock(&pit_state
->lock
);
636 static void kvm_pit_reset(struct kvm_pit
*pit
)
639 struct kvm_kpit_channel_state
*c
;
641 pit
->pit_state
.flags
= 0;
642 for (i
= 0; i
< 3; i
++) {
643 c
= &pit
->pit_state
.channels
[i
];
646 pit_load_count(pit
, i
, 0);
649 kvm_pit_reset_reinject(pit
);
652 static void pit_mask_notifer(struct kvm_irq_mask_notifier
*kimn
, bool mask
)
654 struct kvm_pit
*pit
= container_of(kimn
, struct kvm_pit
, mask_notifier
);
657 kvm_pit_reset_reinject(pit
);
660 static const struct kvm_io_device_ops pit_dev_ops
= {
661 .read
= pit_ioport_read
,
662 .write
= pit_ioport_write
,
665 static const struct kvm_io_device_ops speaker_dev_ops
= {
666 .read
= speaker_ioport_read
,
667 .write
= speaker_ioport_write
,
670 /* Caller must hold slots_lock */
671 struct kvm_pit
*kvm_create_pit(struct kvm
*kvm
, u32 flags
)
674 struct kvm_kpit_state
*pit_state
;
679 pit
= kzalloc(sizeof(struct kvm_pit
), GFP_KERNEL
);
683 pit
->irq_source_id
= kvm_request_irq_source_id(kvm
);
684 if (pit
->irq_source_id
< 0)
687 mutex_init(&pit
->pit_state
.lock
);
689 pid
= get_pid(task_tgid(current
));
690 pid_nr
= pid_vnr(pid
);
693 init_kthread_worker(&pit
->worker
);
694 pit
->worker_task
= kthread_run(kthread_worker_fn
, &pit
->worker
,
695 "kvm-pit/%d", pid_nr
);
696 if (IS_ERR(pit
->worker_task
))
699 init_kthread_work(&pit
->expired
, pit_do_work
);
703 pit_state
= &pit
->pit_state
;
704 hrtimer_init(&pit_state
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
706 pit_state
->irq_ack_notifier
.gsi
= 0;
707 pit_state
->irq_ack_notifier
.irq_acked
= kvm_pit_ack_irq
;
708 pit
->mask_notifier
.func
= pit_mask_notifer
;
712 kvm_pit_set_reinject(pit
, true);
714 kvm_iodevice_init(&pit
->dev
, &pit_dev_ops
);
715 ret
= kvm_io_bus_register_dev(kvm
, KVM_PIO_BUS
, KVM_PIT_BASE_ADDRESS
,
716 KVM_PIT_MEM_LENGTH
, &pit
->dev
);
718 goto fail_register_pit
;
720 if (flags
& KVM_PIT_SPEAKER_DUMMY
) {
721 kvm_iodevice_init(&pit
->speaker_dev
, &speaker_dev_ops
);
722 ret
= kvm_io_bus_register_dev(kvm
, KVM_PIO_BUS
,
723 KVM_SPEAKER_BASE_ADDRESS
, 4,
726 goto fail_register_speaker
;
731 fail_register_speaker
:
732 kvm_io_bus_unregister_dev(kvm
, KVM_PIO_BUS
, &pit
->dev
);
734 kvm_pit_set_reinject(pit
, false);
735 kthread_stop(pit
->worker_task
);
737 kvm_free_irq_source_id(kvm
, pit
->irq_source_id
);
743 void kvm_free_pit(struct kvm
*kvm
)
745 struct kvm_pit
*pit
= kvm
->arch
.vpit
;
748 kvm_io_bus_unregister_dev(kvm
, KVM_PIO_BUS
, &pit
->dev
);
749 kvm_io_bus_unregister_dev(kvm
, KVM_PIO_BUS
, &pit
->speaker_dev
);
750 kvm_pit_set_reinject(pit
, false);
751 hrtimer_cancel(&pit
->pit_state
.timer
);
752 flush_kthread_work(&pit
->expired
);
753 kthread_stop(pit
->worker_task
);
754 kvm_free_irq_source_id(kvm
, pit
->irq_source_id
);