3 * Local APIC virtualization
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
24 #include <linux/highmem.h>
25 #include <linux/smp.h>
26 #include <linux/hrtimer.h>
28 #include <linux/module.h>
29 #include <linux/math64.h>
30 #include <linux/slab.h>
31 #include <asm/processor.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <linux/atomic.h>
37 #include <linux/jump_label.h>
38 #include "kvm_cache_regs.h"
45 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
47 #define mod_64(x, y) ((x) % (y))
55 #define APIC_BUS_CYCLE_NS 1
57 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
58 #define apic_debug(fmt, arg...)
60 #define APIC_LVT_NUM 6
61 /* 14 is the version for Xeon and Pentium 8.4.8*/
62 #define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16))
63 #define LAPIC_MMIO_LENGTH (1 << 12)
64 /* followed define is not in apicdef.h */
65 #define APIC_SHORT_MASK 0xc0000
66 #define APIC_DEST_NOSHORT 0x0
67 #define APIC_DEST_MASK 0x800
68 #define MAX_APIC_VECTOR 256
69 #define APIC_VECTORS_PER_REG 32
71 #define VEC_POS(v) ((v) & (32 - 1))
72 #define REG_POS(v) (((v) >> 5) << 4)
74 static inline void apic_set_reg(struct kvm_lapic
*apic
, int reg_off
, u32 val
)
76 *((u32
*) (apic
->regs
+ reg_off
)) = val
;
79 static inline int apic_test_vector(int vec
, void *bitmap
)
81 return test_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
84 bool kvm_apic_pending_eoi(struct kvm_vcpu
*vcpu
, int vector
)
86 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
88 return apic_test_vector(vector
, apic
->regs
+ APIC_ISR
) ||
89 apic_test_vector(vector
, apic
->regs
+ APIC_IRR
);
92 static inline void apic_set_vector(int vec
, void *bitmap
)
94 set_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
97 static inline void apic_clear_vector(int vec
, void *bitmap
)
99 clear_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
102 static inline int __apic_test_and_set_vector(int vec
, void *bitmap
)
104 return __test_and_set_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
107 static inline int __apic_test_and_clear_vector(int vec
, void *bitmap
)
109 return __test_and_clear_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
112 struct static_key_deferred apic_hw_disabled __read_mostly
;
113 struct static_key_deferred apic_sw_disabled __read_mostly
;
115 static inline int apic_enabled(struct kvm_lapic
*apic
)
117 return kvm_apic_sw_enabled(apic
) && kvm_apic_hw_enabled(apic
);
121 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
124 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
125 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
127 static inline int kvm_apic_id(struct kvm_lapic
*apic
)
129 return (kvm_apic_get_reg(apic
, APIC_ID
) >> 24) & 0xff;
132 #define KVM_X2APIC_CID_BITS 0
134 static void recalculate_apic_map(struct kvm
*kvm
)
136 struct kvm_apic_map
*new, *old
= NULL
;
137 struct kvm_vcpu
*vcpu
;
140 new = kzalloc(sizeof(struct kvm_apic_map
), GFP_KERNEL
);
142 mutex_lock(&kvm
->arch
.apic_map_lock
);
148 /* flat mode is default */
151 new->lid_mask
= 0xff;
153 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
154 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
158 if (!kvm_apic_present(vcpu
))
162 * All APICs have to be configured in the same mode by an OS.
163 * We take advatage of this while building logical id loockup
164 * table. After reset APICs are in xapic/flat mode, so if we
165 * find apic with different setting we assume this is the mode
166 * OS wants all apics to be in; build lookup table accordingly.
168 if (apic_x2apic_mode(apic
)) {
171 new->cid_mask
= (1 << KVM_X2APIC_CID_BITS
) - 1;
172 new->lid_mask
= 0xffff;
173 } else if (kvm_apic_sw_enabled(apic
) &&
174 !new->cid_mask
/* flat mode */ &&
175 kvm_apic_get_reg(apic
, APIC_DFR
) == APIC_DFR_CLUSTER
) {
181 new->phys_map
[kvm_apic_id(apic
)] = apic
;
183 ldr
= kvm_apic_get_reg(apic
, APIC_LDR
);
184 cid
= apic_cluster_id(new, ldr
);
185 lid
= apic_logical_id(new, ldr
);
188 new->logical_map
[cid
][ffs(lid
) - 1] = apic
;
191 old
= rcu_dereference_protected(kvm
->arch
.apic_map
,
192 lockdep_is_held(&kvm
->arch
.apic_map_lock
));
193 rcu_assign_pointer(kvm
->arch
.apic_map
, new);
194 mutex_unlock(&kvm
->arch
.apic_map_lock
);
199 kvm_vcpu_request_scan_ioapic(kvm
);
202 static inline void apic_set_spiv(struct kvm_lapic
*apic
, u32 val
)
204 u32 prev
= kvm_apic_get_reg(apic
, APIC_SPIV
);
206 apic_set_reg(apic
, APIC_SPIV
, val
);
207 if ((prev
^ val
) & APIC_SPIV_APIC_ENABLED
) {
208 if (val
& APIC_SPIV_APIC_ENABLED
) {
209 static_key_slow_dec_deferred(&apic_sw_disabled
);
210 recalculate_apic_map(apic
->vcpu
->kvm
);
212 static_key_slow_inc(&apic_sw_disabled
.key
);
216 static inline void kvm_apic_set_id(struct kvm_lapic
*apic
, u8 id
)
218 apic_set_reg(apic
, APIC_ID
, id
<< 24);
219 recalculate_apic_map(apic
->vcpu
->kvm
);
222 static inline void kvm_apic_set_ldr(struct kvm_lapic
*apic
, u32 id
)
224 apic_set_reg(apic
, APIC_LDR
, id
);
225 recalculate_apic_map(apic
->vcpu
->kvm
);
228 static inline int apic_lvt_enabled(struct kvm_lapic
*apic
, int lvt_type
)
230 return !(kvm_apic_get_reg(apic
, lvt_type
) & APIC_LVT_MASKED
);
233 static inline int apic_lvt_vector(struct kvm_lapic
*apic
, int lvt_type
)
235 return kvm_apic_get_reg(apic
, lvt_type
) & APIC_VECTOR_MASK
;
238 static inline int apic_lvtt_oneshot(struct kvm_lapic
*apic
)
240 return ((kvm_apic_get_reg(apic
, APIC_LVTT
) &
241 apic
->lapic_timer
.timer_mode_mask
) == APIC_LVT_TIMER_ONESHOT
);
244 static inline int apic_lvtt_period(struct kvm_lapic
*apic
)
246 return ((kvm_apic_get_reg(apic
, APIC_LVTT
) &
247 apic
->lapic_timer
.timer_mode_mask
) == APIC_LVT_TIMER_PERIODIC
);
250 static inline int apic_lvtt_tscdeadline(struct kvm_lapic
*apic
)
252 return ((kvm_apic_get_reg(apic
, APIC_LVTT
) &
253 apic
->lapic_timer
.timer_mode_mask
) ==
254 APIC_LVT_TIMER_TSCDEADLINE
);
257 static inline int apic_lvt_nmi_mode(u32 lvt_val
)
259 return (lvt_val
& (APIC_MODE_MASK
| APIC_LVT_MASKED
)) == APIC_DM_NMI
;
262 void kvm_apic_set_version(struct kvm_vcpu
*vcpu
)
264 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
265 struct kvm_cpuid_entry2
*feat
;
266 u32 v
= APIC_VERSION
;
268 if (!kvm_vcpu_has_lapic(vcpu
))
271 feat
= kvm_find_cpuid_entry(apic
->vcpu
, 0x1, 0);
272 if (feat
&& (feat
->ecx
& (1 << (X86_FEATURE_X2APIC
& 31))))
273 v
|= APIC_LVR_DIRECTED_EOI
;
274 apic_set_reg(apic
, APIC_LVR
, v
);
277 static const unsigned int apic_lvt_mask
[APIC_LVT_NUM
] = {
278 LVT_MASK
, /* part LVTT mask, timer mode mask added at runtime */
279 LVT_MASK
| APIC_MODE_MASK
, /* LVTTHMR */
280 LVT_MASK
| APIC_MODE_MASK
, /* LVTPC */
281 LINT_MASK
, LINT_MASK
, /* LVT0-1 */
282 LVT_MASK
/* LVTERR */
285 static int find_highest_vector(void *bitmap
)
290 for (vec
= MAX_APIC_VECTOR
- APIC_VECTORS_PER_REG
;
291 vec
>= 0; vec
-= APIC_VECTORS_PER_REG
) {
292 reg
= bitmap
+ REG_POS(vec
);
294 return fls(*reg
) - 1 + vec
;
300 static u8
count_vectors(void *bitmap
)
306 for (vec
= 0; vec
< MAX_APIC_VECTOR
; vec
+= APIC_VECTORS_PER_REG
) {
307 reg
= bitmap
+ REG_POS(vec
);
308 count
+= hweight32(*reg
);
314 void kvm_apic_update_irr(struct kvm_vcpu
*vcpu
, u32
*pir
)
317 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
319 for (i
= 0; i
<= 7; i
++) {
320 pir_val
= xchg(&pir
[i
], 0);
322 *((u32
*)(apic
->regs
+ APIC_IRR
+ i
* 0x10)) |= pir_val
;
325 EXPORT_SYMBOL_GPL(kvm_apic_update_irr
);
327 static inline void apic_set_irr(int vec
, struct kvm_lapic
*apic
)
329 apic
->irr_pending
= true;
330 apic_set_vector(vec
, apic
->regs
+ APIC_IRR
);
333 static inline int apic_search_irr(struct kvm_lapic
*apic
)
335 return find_highest_vector(apic
->regs
+ APIC_IRR
);
338 static inline int apic_find_highest_irr(struct kvm_lapic
*apic
)
343 * Note that irr_pending is just a hint. It will be always
344 * true with virtual interrupt delivery enabled.
346 if (!apic
->irr_pending
)
349 kvm_x86_ops
->sync_pir_to_irr(apic
->vcpu
);
350 result
= apic_search_irr(apic
);
351 ASSERT(result
== -1 || result
>= 16);
356 static inline void apic_clear_irr(int vec
, struct kvm_lapic
*apic
)
358 struct kvm_vcpu
*vcpu
;
362 apic_clear_vector(vec
, apic
->regs
+ APIC_IRR
);
363 if (unlikely(kvm_apic_vid_enabled(vcpu
->kvm
)))
364 /* try to update RVI */
365 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
367 vec
= apic_search_irr(apic
);
368 apic
->irr_pending
= (vec
!= -1);
372 static inline void apic_set_isr(int vec
, struct kvm_lapic
*apic
)
374 struct kvm_vcpu
*vcpu
;
376 if (__apic_test_and_set_vector(vec
, apic
->regs
+ APIC_ISR
))
382 * With APIC virtualization enabled, all caching is disabled
383 * because the processor can modify ISR under the hood. Instead
386 if (unlikely(kvm_apic_vid_enabled(vcpu
->kvm
)))
387 kvm_x86_ops
->hwapic_isr_update(vcpu
->kvm
, vec
);
390 BUG_ON(apic
->isr_count
> MAX_APIC_VECTOR
);
392 * ISR (in service register) bit is set when injecting an interrupt.
393 * The highest vector is injected. Thus the latest bit set matches
394 * the highest bit in ISR.
396 apic
->highest_isr_cache
= vec
;
400 static inline int apic_find_highest_isr(struct kvm_lapic
*apic
)
405 * Note that isr_count is always 1, and highest_isr_cache
406 * is always -1, with APIC virtualization enabled.
408 if (!apic
->isr_count
)
410 if (likely(apic
->highest_isr_cache
!= -1))
411 return apic
->highest_isr_cache
;
413 result
= find_highest_vector(apic
->regs
+ APIC_ISR
);
414 ASSERT(result
== -1 || result
>= 16);
419 static inline void apic_clear_isr(int vec
, struct kvm_lapic
*apic
)
421 struct kvm_vcpu
*vcpu
;
422 if (!__apic_test_and_clear_vector(vec
, apic
->regs
+ APIC_ISR
))
428 * We do get here for APIC virtualization enabled if the guest
429 * uses the Hyper-V APIC enlightenment. In this case we may need
430 * to trigger a new interrupt delivery by writing the SVI field;
431 * on the other hand isr_count and highest_isr_cache are unused
432 * and must be left alone.
434 if (unlikely(kvm_apic_vid_enabled(vcpu
->kvm
)))
435 kvm_x86_ops
->hwapic_isr_update(vcpu
->kvm
,
436 apic_find_highest_isr(apic
));
439 BUG_ON(apic
->isr_count
< 0);
440 apic
->highest_isr_cache
= -1;
444 int kvm_lapic_find_highest_irr(struct kvm_vcpu
*vcpu
)
448 /* This may race with setting of irr in __apic_accept_irq() and
449 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
450 * will cause vmexit immediately and the value will be recalculated
451 * on the next vmentry.
453 if (!kvm_vcpu_has_lapic(vcpu
))
455 highest_irr
= apic_find_highest_irr(vcpu
->arch
.apic
);
460 static int __apic_accept_irq(struct kvm_lapic
*apic
, int delivery_mode
,
461 int vector
, int level
, int trig_mode
,
462 unsigned long *dest_map
);
464 int kvm_apic_set_irq(struct kvm_vcpu
*vcpu
, struct kvm_lapic_irq
*irq
,
465 unsigned long *dest_map
)
467 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
469 return __apic_accept_irq(apic
, irq
->delivery_mode
, irq
->vector
,
470 irq
->level
, irq
->trig_mode
, dest_map
);
473 static int pv_eoi_put_user(struct kvm_vcpu
*vcpu
, u8 val
)
476 return kvm_write_guest_cached(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
, &val
,
480 static int pv_eoi_get_user(struct kvm_vcpu
*vcpu
, u8
*val
)
483 return kvm_read_guest_cached(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
, val
,
487 static inline bool pv_eoi_enabled(struct kvm_vcpu
*vcpu
)
489 return vcpu
->arch
.pv_eoi
.msr_val
& KVM_MSR_ENABLED
;
492 static bool pv_eoi_get_pending(struct kvm_vcpu
*vcpu
)
495 if (pv_eoi_get_user(vcpu
, &val
) < 0)
496 apic_debug("Can't read EOI MSR value: 0x%llx\n",
497 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
501 static void pv_eoi_set_pending(struct kvm_vcpu
*vcpu
)
503 if (pv_eoi_put_user(vcpu
, KVM_PV_EOI_ENABLED
) < 0) {
504 apic_debug("Can't set EOI MSR value: 0x%llx\n",
505 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
508 __set_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
);
511 static void pv_eoi_clr_pending(struct kvm_vcpu
*vcpu
)
513 if (pv_eoi_put_user(vcpu
, KVM_PV_EOI_DISABLED
) < 0) {
514 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
515 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
518 __clear_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
);
521 void kvm_apic_update_tmr(struct kvm_vcpu
*vcpu
, u32
*tmr
)
523 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
526 for (i
= 0; i
< 8; i
++)
527 apic_set_reg(apic
, APIC_TMR
+ 0x10 * i
, tmr
[i
]);
530 static void apic_update_ppr(struct kvm_lapic
*apic
)
532 u32 tpr
, isrv
, ppr
, old_ppr
;
535 old_ppr
= kvm_apic_get_reg(apic
, APIC_PROCPRI
);
536 tpr
= kvm_apic_get_reg(apic
, APIC_TASKPRI
);
537 isr
= apic_find_highest_isr(apic
);
538 isrv
= (isr
!= -1) ? isr
: 0;
540 if ((tpr
& 0xf0) >= (isrv
& 0xf0))
545 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
546 apic
, ppr
, isr
, isrv
);
548 if (old_ppr
!= ppr
) {
549 apic_set_reg(apic
, APIC_PROCPRI
, ppr
);
551 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
555 static void apic_set_tpr(struct kvm_lapic
*apic
, u32 tpr
)
557 apic_set_reg(apic
, APIC_TASKPRI
, tpr
);
558 apic_update_ppr(apic
);
561 int kvm_apic_match_physical_addr(struct kvm_lapic
*apic
, u16 dest
)
563 return dest
== 0xff || kvm_apic_id(apic
) == dest
;
566 int kvm_apic_match_logical_addr(struct kvm_lapic
*apic
, u8 mda
)
571 if (apic_x2apic_mode(apic
)) {
572 logical_id
= kvm_apic_get_reg(apic
, APIC_LDR
);
573 return logical_id
& mda
;
576 logical_id
= GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic
, APIC_LDR
));
578 switch (kvm_apic_get_reg(apic
, APIC_DFR
)) {
580 if (logical_id
& mda
)
583 case APIC_DFR_CLUSTER
:
584 if (((logical_id
>> 4) == (mda
>> 0x4))
585 && (logical_id
& mda
& 0xf))
589 apic_debug("Bad DFR vcpu %d: %08x\n",
590 apic
->vcpu
->vcpu_id
, kvm_apic_get_reg(apic
, APIC_DFR
));
597 int kvm_apic_match_dest(struct kvm_vcpu
*vcpu
, struct kvm_lapic
*source
,
598 int short_hand
, int dest
, int dest_mode
)
601 struct kvm_lapic
*target
= vcpu
->arch
.apic
;
603 apic_debug("target %p, source %p, dest 0x%x, "
604 "dest_mode 0x%x, short_hand 0x%x\n",
605 target
, source
, dest
, dest_mode
, short_hand
);
608 switch (short_hand
) {
609 case APIC_DEST_NOSHORT
:
612 result
= kvm_apic_match_physical_addr(target
, dest
);
615 result
= kvm_apic_match_logical_addr(target
, dest
);
618 result
= (target
== source
);
620 case APIC_DEST_ALLINC
:
623 case APIC_DEST_ALLBUT
:
624 result
= (target
!= source
);
627 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
635 bool kvm_irq_delivery_to_apic_fast(struct kvm
*kvm
, struct kvm_lapic
*src
,
636 struct kvm_lapic_irq
*irq
, int *r
, unsigned long *dest_map
)
638 struct kvm_apic_map
*map
;
639 unsigned long bitmap
= 1;
640 struct kvm_lapic
**dst
;
646 if (irq
->shorthand
== APIC_DEST_SELF
) {
647 *r
= kvm_apic_set_irq(src
->vcpu
, irq
, dest_map
);
655 map
= rcu_dereference(kvm
->arch
.apic_map
);
660 if (irq
->dest_mode
== 0) { /* physical mode */
661 if (irq
->delivery_mode
== APIC_DM_LOWEST
||
662 irq
->dest_id
== 0xff)
664 dst
= &map
->phys_map
[irq
->dest_id
& 0xff];
666 u32 mda
= irq
->dest_id
<< (32 - map
->ldr_bits
);
668 dst
= map
->logical_map
[apic_cluster_id(map
, mda
)];
670 bitmap
= apic_logical_id(map
, mda
);
672 if (irq
->delivery_mode
== APIC_DM_LOWEST
) {
674 for_each_set_bit(i
, &bitmap
, 16) {
679 else if (kvm_apic_compare_prio(dst
[i
]->vcpu
, dst
[l
]->vcpu
) < 0)
683 bitmap
= (l
>= 0) ? 1 << l
: 0;
687 for_each_set_bit(i
, &bitmap
, 16) {
692 *r
+= kvm_apic_set_irq(dst
[i
]->vcpu
, irq
, dest_map
);
702 * Add a pending IRQ into lapic.
703 * Return 1 if successfully added and 0 if discarded.
705 static int __apic_accept_irq(struct kvm_lapic
*apic
, int delivery_mode
,
706 int vector
, int level
, int trig_mode
,
707 unsigned long *dest_map
)
710 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
712 trace_kvm_apic_accept_irq(vcpu
->vcpu_id
, delivery_mode
,
714 switch (delivery_mode
) {
716 vcpu
->arch
.apic_arb_prio
++;
718 /* FIXME add logic for vcpu on reset */
719 if (unlikely(!apic_enabled(apic
)))
725 __set_bit(vcpu
->vcpu_id
, dest_map
);
727 if (kvm_x86_ops
->deliver_posted_interrupt
)
728 kvm_x86_ops
->deliver_posted_interrupt(vcpu
, vector
);
730 apic_set_irr(vector
, apic
);
732 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
739 vcpu
->arch
.pv
.pv_unhalted
= 1;
740 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
745 apic_debug("Ignoring guest SMI\n");
750 kvm_inject_nmi(vcpu
);
755 if (!trig_mode
|| level
) {
757 /* assumes that there are only KVM_APIC_INIT/SIPI */
758 apic
->pending_events
= (1UL << KVM_APIC_INIT
);
759 /* make sure pending_events is visible before sending
762 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
765 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
770 case APIC_DM_STARTUP
:
771 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
772 vcpu
->vcpu_id
, vector
);
774 apic
->sipi_vector
= vector
;
775 /* make sure sipi_vector is visible for the receiver */
777 set_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
778 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
784 * Should only be called by kvm_apic_local_deliver() with LVT0,
785 * before NMI watchdog was enabled. Already handled by
786 * kvm_apic_accept_pic_intr().
791 printk(KERN_ERR
"TODO: unsupported delivery mode %x\n",
798 int kvm_apic_compare_prio(struct kvm_vcpu
*vcpu1
, struct kvm_vcpu
*vcpu2
)
800 return vcpu1
->arch
.apic_arb_prio
- vcpu2
->arch
.apic_arb_prio
;
803 static void kvm_ioapic_send_eoi(struct kvm_lapic
*apic
, int vector
)
805 if (!(kvm_apic_get_reg(apic
, APIC_SPIV
) & APIC_SPIV_DIRECTED_EOI
) &&
806 kvm_ioapic_handles_vector(apic
->vcpu
->kvm
, vector
)) {
808 if (apic_test_vector(vector
, apic
->regs
+ APIC_TMR
))
809 trigger_mode
= IOAPIC_LEVEL_TRIG
;
811 trigger_mode
= IOAPIC_EDGE_TRIG
;
812 kvm_ioapic_update_eoi(apic
->vcpu
, vector
, trigger_mode
);
816 static int apic_set_eoi(struct kvm_lapic
*apic
)
818 int vector
= apic_find_highest_isr(apic
);
820 trace_kvm_eoi(apic
, vector
);
823 * Not every write EOI will has corresponding ISR,
824 * one example is when Kernel check timer on setup_IO_APIC
829 apic_clear_isr(vector
, apic
);
830 apic_update_ppr(apic
);
832 kvm_ioapic_send_eoi(apic
, vector
);
833 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
838 * this interface assumes a trap-like exit, which has already finished
839 * desired side effect including vISR and vPPR update.
841 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu
*vcpu
, int vector
)
843 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
845 trace_kvm_eoi(apic
, vector
);
847 kvm_ioapic_send_eoi(apic
, vector
);
848 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
850 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated
);
852 static void apic_send_ipi(struct kvm_lapic
*apic
)
854 u32 icr_low
= kvm_apic_get_reg(apic
, APIC_ICR
);
855 u32 icr_high
= kvm_apic_get_reg(apic
, APIC_ICR2
);
856 struct kvm_lapic_irq irq
;
858 irq
.vector
= icr_low
& APIC_VECTOR_MASK
;
859 irq
.delivery_mode
= icr_low
& APIC_MODE_MASK
;
860 irq
.dest_mode
= icr_low
& APIC_DEST_MASK
;
861 irq
.level
= icr_low
& APIC_INT_ASSERT
;
862 irq
.trig_mode
= icr_low
& APIC_INT_LEVELTRIG
;
863 irq
.shorthand
= icr_low
& APIC_SHORT_MASK
;
864 if (apic_x2apic_mode(apic
))
865 irq
.dest_id
= icr_high
;
867 irq
.dest_id
= GET_APIC_DEST_FIELD(icr_high
);
869 trace_kvm_apic_ipi(icr_low
, irq
.dest_id
);
871 apic_debug("icr_high 0x%x, icr_low 0x%x, "
872 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
873 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
874 icr_high
, icr_low
, irq
.shorthand
, irq
.dest_id
,
875 irq
.trig_mode
, irq
.level
, irq
.dest_mode
, irq
.delivery_mode
,
878 kvm_irq_delivery_to_apic(apic
->vcpu
->kvm
, apic
, &irq
, NULL
);
881 static u32
apic_get_tmcct(struct kvm_lapic
*apic
)
887 ASSERT(apic
!= NULL
);
889 /* if initial count is 0, current count should also be 0 */
890 if (kvm_apic_get_reg(apic
, APIC_TMICT
) == 0 ||
891 apic
->lapic_timer
.period
== 0)
894 remaining
= hrtimer_get_remaining(&apic
->lapic_timer
.timer
);
895 if (ktime_to_ns(remaining
) < 0)
896 remaining
= ktime_set(0, 0);
898 ns
= mod_64(ktime_to_ns(remaining
), apic
->lapic_timer
.period
);
899 tmcct
= div64_u64(ns
,
900 (APIC_BUS_CYCLE_NS
* apic
->divide_count
));
905 static void __report_tpr_access(struct kvm_lapic
*apic
, bool write
)
907 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
908 struct kvm_run
*run
= vcpu
->run
;
910 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS
, vcpu
);
911 run
->tpr_access
.rip
= kvm_rip_read(vcpu
);
912 run
->tpr_access
.is_write
= write
;
915 static inline void report_tpr_access(struct kvm_lapic
*apic
, bool write
)
917 if (apic
->vcpu
->arch
.tpr_access_reporting
)
918 __report_tpr_access(apic
, write
);
921 static u32
__apic_read(struct kvm_lapic
*apic
, unsigned int offset
)
925 if (offset
>= LAPIC_MMIO_LENGTH
)
930 if (apic_x2apic_mode(apic
))
931 val
= kvm_apic_id(apic
);
933 val
= kvm_apic_id(apic
) << 24;
936 apic_debug("Access APIC ARBPRI register which is for P6\n");
939 case APIC_TMCCT
: /* Timer CCR */
940 if (apic_lvtt_tscdeadline(apic
))
943 val
= apic_get_tmcct(apic
);
946 apic_update_ppr(apic
);
947 val
= kvm_apic_get_reg(apic
, offset
);
950 report_tpr_access(apic
, false);
953 val
= kvm_apic_get_reg(apic
, offset
);
960 static inline struct kvm_lapic
*to_lapic(struct kvm_io_device
*dev
)
962 return container_of(dev
, struct kvm_lapic
, dev
);
965 static int apic_reg_read(struct kvm_lapic
*apic
, u32 offset
, int len
,
968 unsigned char alignment
= offset
& 0xf;
970 /* this bitmask has a bit cleared for each reserved register */
971 static const u64 rmask
= 0x43ff01ffffffe70cULL
;
973 if ((alignment
+ len
) > 4) {
974 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
979 if (offset
> 0x3f0 || !(rmask
& (1ULL << (offset
>> 4)))) {
980 apic_debug("KVM_APIC_READ: read reserved register %x\n",
985 result
= __apic_read(apic
, offset
& ~0xf);
987 trace_kvm_apic_read(offset
, result
);
993 memcpy(data
, (char *)&result
+ alignment
, len
);
996 printk(KERN_ERR
"Local APIC read with len = %x, "
997 "should be 1,2, or 4 instead\n", len
);
1003 static int apic_mmio_in_range(struct kvm_lapic
*apic
, gpa_t addr
)
1005 return kvm_apic_hw_enabled(apic
) &&
1006 addr
>= apic
->base_address
&&
1007 addr
< apic
->base_address
+ LAPIC_MMIO_LENGTH
;
1010 static int apic_mmio_read(struct kvm_io_device
*this,
1011 gpa_t address
, int len
, void *data
)
1013 struct kvm_lapic
*apic
= to_lapic(this);
1014 u32 offset
= address
- apic
->base_address
;
1016 if (!apic_mmio_in_range(apic
, address
))
1019 apic_reg_read(apic
, offset
, len
, data
);
1024 static void update_divide_count(struct kvm_lapic
*apic
)
1026 u32 tmp1
, tmp2
, tdcr
;
1028 tdcr
= kvm_apic_get_reg(apic
, APIC_TDCR
);
1030 tmp2
= ((tmp1
& 0x3) | ((tmp1
& 0x8) >> 1)) + 1;
1031 apic
->divide_count
= 0x1 << (tmp2
& 0x7);
1033 apic_debug("timer divide count is 0x%x\n",
1034 apic
->divide_count
);
1037 static void start_apic_timer(struct kvm_lapic
*apic
)
1040 atomic_set(&apic
->lapic_timer
.pending
, 0);
1042 if (apic_lvtt_period(apic
) || apic_lvtt_oneshot(apic
)) {
1043 /* lapic timer in oneshot or periodic mode */
1044 now
= apic
->lapic_timer
.timer
.base
->get_time();
1045 apic
->lapic_timer
.period
= (u64
)kvm_apic_get_reg(apic
, APIC_TMICT
)
1046 * APIC_BUS_CYCLE_NS
* apic
->divide_count
;
1048 if (!apic
->lapic_timer
.period
)
1051 * Do not allow the guest to program periodic timers with small
1052 * interval, since the hrtimers are not throttled by the host
1055 if (apic_lvtt_period(apic
)) {
1056 s64 min_period
= min_timer_period_us
* 1000LL;
1058 if (apic
->lapic_timer
.period
< min_period
) {
1059 pr_info_ratelimited(
1060 "kvm: vcpu %i: requested %lld ns "
1061 "lapic timer period limited to %lld ns\n",
1062 apic
->vcpu
->vcpu_id
,
1063 apic
->lapic_timer
.period
, min_period
);
1064 apic
->lapic_timer
.period
= min_period
;
1068 hrtimer_start(&apic
->lapic_timer
.timer
,
1069 ktime_add_ns(now
, apic
->lapic_timer
.period
),
1072 apic_debug("%s: bus cycle is %" PRId64
"ns, now 0x%016"
1074 "timer initial count 0x%x, period %lldns, "
1075 "expire @ 0x%016" PRIx64
".\n", __func__
,
1076 APIC_BUS_CYCLE_NS
, ktime_to_ns(now
),
1077 kvm_apic_get_reg(apic
, APIC_TMICT
),
1078 apic
->lapic_timer
.period
,
1079 ktime_to_ns(ktime_add_ns(now
,
1080 apic
->lapic_timer
.period
)));
1081 } else if (apic_lvtt_tscdeadline(apic
)) {
1082 /* lapic timer in tsc deadline mode */
1083 u64 guest_tsc
, tscdeadline
= apic
->lapic_timer
.tscdeadline
;
1085 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1086 unsigned long this_tsc_khz
= vcpu
->arch
.virtual_tsc_khz
;
1087 unsigned long flags
;
1089 if (unlikely(!tscdeadline
|| !this_tsc_khz
))
1092 local_irq_save(flags
);
1094 now
= apic
->lapic_timer
.timer
.base
->get_time();
1095 guest_tsc
= kvm_x86_ops
->read_l1_tsc(vcpu
, native_read_tsc());
1096 if (likely(tscdeadline
> guest_tsc
)) {
1097 ns
= (tscdeadline
- guest_tsc
) * 1000000ULL;
1098 do_div(ns
, this_tsc_khz
);
1100 hrtimer_start(&apic
->lapic_timer
.timer
,
1101 ktime_add_ns(now
, ns
), HRTIMER_MODE_ABS
);
1103 local_irq_restore(flags
);
1107 static void apic_manage_nmi_watchdog(struct kvm_lapic
*apic
, u32 lvt0_val
)
1109 int nmi_wd_enabled
= apic_lvt_nmi_mode(kvm_apic_get_reg(apic
, APIC_LVT0
));
1111 if (apic_lvt_nmi_mode(lvt0_val
)) {
1112 if (!nmi_wd_enabled
) {
1113 apic_debug("Receive NMI setting on APIC_LVT0 "
1114 "for cpu %d\n", apic
->vcpu
->vcpu_id
);
1115 apic
->vcpu
->kvm
->arch
.vapics_in_nmi_mode
++;
1117 } else if (nmi_wd_enabled
)
1118 apic
->vcpu
->kvm
->arch
.vapics_in_nmi_mode
--;
1121 static int apic_reg_write(struct kvm_lapic
*apic
, u32 reg
, u32 val
)
1125 trace_kvm_apic_write(reg
, val
);
1128 case APIC_ID
: /* Local APIC ID */
1129 if (!apic_x2apic_mode(apic
))
1130 kvm_apic_set_id(apic
, val
>> 24);
1136 report_tpr_access(apic
, true);
1137 apic_set_tpr(apic
, val
& 0xff);
1145 if (!apic_x2apic_mode(apic
))
1146 kvm_apic_set_ldr(apic
, val
& APIC_LDR_MASK
);
1152 if (!apic_x2apic_mode(apic
)) {
1153 apic_set_reg(apic
, APIC_DFR
, val
| 0x0FFFFFFF);
1154 recalculate_apic_map(apic
->vcpu
->kvm
);
1161 if (kvm_apic_get_reg(apic
, APIC_LVR
) & APIC_LVR_DIRECTED_EOI
)
1162 mask
|= APIC_SPIV_DIRECTED_EOI
;
1163 apic_set_spiv(apic
, val
& mask
);
1164 if (!(val
& APIC_SPIV_APIC_ENABLED
)) {
1168 for (i
= 0; i
< APIC_LVT_NUM
; i
++) {
1169 lvt_val
= kvm_apic_get_reg(apic
,
1170 APIC_LVTT
+ 0x10 * i
);
1171 apic_set_reg(apic
, APIC_LVTT
+ 0x10 * i
,
1172 lvt_val
| APIC_LVT_MASKED
);
1174 atomic_set(&apic
->lapic_timer
.pending
, 0);
1180 /* No delay here, so we always clear the pending bit */
1181 apic_set_reg(apic
, APIC_ICR
, val
& ~(1 << 12));
1182 apic_send_ipi(apic
);
1186 if (!apic_x2apic_mode(apic
))
1188 apic_set_reg(apic
, APIC_ICR2
, val
);
1192 apic_manage_nmi_watchdog(apic
, val
);
1197 /* TODO: Check vector */
1198 if (!kvm_apic_sw_enabled(apic
))
1199 val
|= APIC_LVT_MASKED
;
1201 val
&= apic_lvt_mask
[(reg
- APIC_LVTT
) >> 4];
1202 apic_set_reg(apic
, reg
, val
);
1207 if ((kvm_apic_get_reg(apic
, APIC_LVTT
) &
1208 apic
->lapic_timer
.timer_mode_mask
) !=
1209 (val
& apic
->lapic_timer
.timer_mode_mask
))
1210 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1212 if (!kvm_apic_sw_enabled(apic
))
1213 val
|= APIC_LVT_MASKED
;
1214 val
&= (apic_lvt_mask
[0] | apic
->lapic_timer
.timer_mode_mask
);
1215 apic_set_reg(apic
, APIC_LVTT
, val
);
1219 if (apic_lvtt_tscdeadline(apic
))
1222 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1223 apic_set_reg(apic
, APIC_TMICT
, val
);
1224 start_apic_timer(apic
);
1229 apic_debug("KVM_WRITE:TDCR %x\n", val
);
1230 apic_set_reg(apic
, APIC_TDCR
, val
);
1231 update_divide_count(apic
);
1235 if (apic_x2apic_mode(apic
) && val
!= 0) {
1236 apic_debug("KVM_WRITE:ESR not zero %x\n", val
);
1242 if (apic_x2apic_mode(apic
)) {
1243 apic_reg_write(apic
, APIC_ICR
, 0x40000 | (val
& 0xff));
1252 apic_debug("Local APIC Write to read-only register %x\n", reg
);
1256 static int apic_mmio_write(struct kvm_io_device
*this,
1257 gpa_t address
, int len
, const void *data
)
1259 struct kvm_lapic
*apic
= to_lapic(this);
1260 unsigned int offset
= address
- apic
->base_address
;
1263 if (!apic_mmio_in_range(apic
, address
))
1267 * APIC register must be aligned on 128-bits boundary.
1268 * 32/64/128 bits registers must be accessed thru 32 bits.
1271 if (len
!= 4 || (offset
& 0xf)) {
1272 /* Don't shout loud, $infamous_os would cause only noise. */
1273 apic_debug("apic write: bad size=%d %lx\n", len
, (long)address
);
1279 /* too common printing */
1280 if (offset
!= APIC_EOI
)
1281 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1282 "0x%x\n", __func__
, offset
, len
, val
);
1284 apic_reg_write(apic
, offset
& 0xff0, val
);
1289 void kvm_lapic_set_eoi(struct kvm_vcpu
*vcpu
)
1291 if (kvm_vcpu_has_lapic(vcpu
))
1292 apic_reg_write(vcpu
->arch
.apic
, APIC_EOI
, 0);
1294 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi
);
1296 /* emulate APIC access in a trap manner */
1297 void kvm_apic_write_nodecode(struct kvm_vcpu
*vcpu
, u32 offset
)
1301 /* hw has done the conditional check and inst decode */
1304 apic_reg_read(vcpu
->arch
.apic
, offset
, 4, &val
);
1306 /* TODO: optimize to just emulate side effect w/o one more write */
1307 apic_reg_write(vcpu
->arch
.apic
, offset
, val
);
1309 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode
);
1311 void kvm_free_lapic(struct kvm_vcpu
*vcpu
)
1313 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1315 if (!vcpu
->arch
.apic
)
1318 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1320 if (!(vcpu
->arch
.apic_base
& MSR_IA32_APICBASE_ENABLE
))
1321 static_key_slow_dec_deferred(&apic_hw_disabled
);
1323 if (!(kvm_apic_get_reg(apic
, APIC_SPIV
) & APIC_SPIV_APIC_ENABLED
))
1324 static_key_slow_dec_deferred(&apic_sw_disabled
);
1327 free_page((unsigned long)apic
->regs
);
1333 *----------------------------------------------------------------------
1335 *----------------------------------------------------------------------
1338 u64
kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
)
1340 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1342 if (!kvm_vcpu_has_lapic(vcpu
) || apic_lvtt_oneshot(apic
) ||
1343 apic_lvtt_period(apic
))
1346 return apic
->lapic_timer
.tscdeadline
;
1349 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
, u64 data
)
1351 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1353 if (!kvm_vcpu_has_lapic(vcpu
) || apic_lvtt_oneshot(apic
) ||
1354 apic_lvtt_period(apic
))
1357 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1358 /* Inject here so clearing tscdeadline won't override new value */
1359 if (apic_has_pending_timer(vcpu
))
1360 kvm_inject_apic_timer_irqs(vcpu
);
1361 apic
->lapic_timer
.tscdeadline
= data
;
1362 start_apic_timer(apic
);
1365 void kvm_lapic_set_tpr(struct kvm_vcpu
*vcpu
, unsigned long cr8
)
1367 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1369 if (!kvm_vcpu_has_lapic(vcpu
))
1372 apic_set_tpr(apic
, ((cr8
& 0x0f) << 4)
1373 | (kvm_apic_get_reg(apic
, APIC_TASKPRI
) & 4));
1376 u64
kvm_lapic_get_cr8(struct kvm_vcpu
*vcpu
)
1380 if (!kvm_vcpu_has_lapic(vcpu
))
1383 tpr
= (u64
) kvm_apic_get_reg(vcpu
->arch
.apic
, APIC_TASKPRI
);
1385 return (tpr
& 0xf0) >> 4;
1388 void kvm_lapic_set_base(struct kvm_vcpu
*vcpu
, u64 value
)
1390 u64 old_value
= vcpu
->arch
.apic_base
;
1391 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1394 value
|= MSR_IA32_APICBASE_BSP
;
1395 vcpu
->arch
.apic_base
= value
;
1399 if (!kvm_vcpu_is_bsp(apic
->vcpu
))
1400 value
&= ~MSR_IA32_APICBASE_BSP
;
1401 vcpu
->arch
.apic_base
= value
;
1403 /* update jump label if enable bit changes */
1404 if ((old_value
^ value
) & MSR_IA32_APICBASE_ENABLE
) {
1405 if (value
& MSR_IA32_APICBASE_ENABLE
)
1406 static_key_slow_dec_deferred(&apic_hw_disabled
);
1408 static_key_slow_inc(&apic_hw_disabled
.key
);
1409 recalculate_apic_map(vcpu
->kvm
);
1412 if ((old_value
^ value
) & X2APIC_ENABLE
) {
1413 if (value
& X2APIC_ENABLE
) {
1414 u32 id
= kvm_apic_id(apic
);
1415 u32 ldr
= ((id
>> 4) << 16) | (1 << (id
& 0xf));
1416 kvm_apic_set_ldr(apic
, ldr
);
1417 kvm_x86_ops
->set_virtual_x2apic_mode(vcpu
, true);
1419 kvm_x86_ops
->set_virtual_x2apic_mode(vcpu
, false);
1422 apic
->base_address
= apic
->vcpu
->arch
.apic_base
&
1423 MSR_IA32_APICBASE_BASE
;
1425 /* with FSB delivery interrupt, we can restart APIC functionality */
1426 apic_debug("apic base msr is 0x%016" PRIx64
", and base address is "
1427 "0x%lx.\n", apic
->vcpu
->arch
.apic_base
, apic
->base_address
);
1431 void kvm_lapic_reset(struct kvm_vcpu
*vcpu
)
1433 struct kvm_lapic
*apic
;
1436 apic_debug("%s\n", __func__
);
1439 apic
= vcpu
->arch
.apic
;
1440 ASSERT(apic
!= NULL
);
1442 /* Stop the timer in case it's a reset to an active apic */
1443 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1445 kvm_apic_set_id(apic
, vcpu
->vcpu_id
);
1446 kvm_apic_set_version(apic
->vcpu
);
1448 for (i
= 0; i
< APIC_LVT_NUM
; i
++)
1449 apic_set_reg(apic
, APIC_LVTT
+ 0x10 * i
, APIC_LVT_MASKED
);
1450 apic_set_reg(apic
, APIC_LVT0
,
1451 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT
));
1453 apic_set_reg(apic
, APIC_DFR
, 0xffffffffU
);
1454 apic_set_spiv(apic
, 0xff);
1455 apic_set_reg(apic
, APIC_TASKPRI
, 0);
1456 kvm_apic_set_ldr(apic
, 0);
1457 apic_set_reg(apic
, APIC_ESR
, 0);
1458 apic_set_reg(apic
, APIC_ICR
, 0);
1459 apic_set_reg(apic
, APIC_ICR2
, 0);
1460 apic_set_reg(apic
, APIC_TDCR
, 0);
1461 apic_set_reg(apic
, APIC_TMICT
, 0);
1462 for (i
= 0; i
< 8; i
++) {
1463 apic_set_reg(apic
, APIC_IRR
+ 0x10 * i
, 0);
1464 apic_set_reg(apic
, APIC_ISR
+ 0x10 * i
, 0);
1465 apic_set_reg(apic
, APIC_TMR
+ 0x10 * i
, 0);
1467 apic
->irr_pending
= kvm_apic_vid_enabled(vcpu
->kvm
);
1468 apic
->isr_count
= kvm_apic_vid_enabled(vcpu
->kvm
);
1469 apic
->highest_isr_cache
= -1;
1470 update_divide_count(apic
);
1471 atomic_set(&apic
->lapic_timer
.pending
, 0);
1472 if (kvm_vcpu_is_bsp(vcpu
))
1473 kvm_lapic_set_base(vcpu
,
1474 vcpu
->arch
.apic_base
| MSR_IA32_APICBASE_BSP
);
1475 vcpu
->arch
.pv_eoi
.msr_val
= 0;
1476 apic_update_ppr(apic
);
1478 vcpu
->arch
.apic_arb_prio
= 0;
1479 vcpu
->arch
.apic_attention
= 0;
1481 apic_debug("%s: vcpu=%p, id=%d, base_msr="
1482 "0x%016" PRIx64
", base_address=0x%0lx.\n", __func__
,
1483 vcpu
, kvm_apic_id(apic
),
1484 vcpu
->arch
.apic_base
, apic
->base_address
);
1488 *----------------------------------------------------------------------
1490 *----------------------------------------------------------------------
1493 static bool lapic_is_periodic(struct kvm_lapic
*apic
)
1495 return apic_lvtt_period(apic
);
1498 int apic_has_pending_timer(struct kvm_vcpu
*vcpu
)
1500 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1502 if (kvm_vcpu_has_lapic(vcpu
) && apic_enabled(apic
) &&
1503 apic_lvt_enabled(apic
, APIC_LVTT
))
1504 return atomic_read(&apic
->lapic_timer
.pending
);
1509 int kvm_apic_local_deliver(struct kvm_lapic
*apic
, int lvt_type
)
1511 u32 reg
= kvm_apic_get_reg(apic
, lvt_type
);
1512 int vector
, mode
, trig_mode
;
1514 if (kvm_apic_hw_enabled(apic
) && !(reg
& APIC_LVT_MASKED
)) {
1515 vector
= reg
& APIC_VECTOR_MASK
;
1516 mode
= reg
& APIC_MODE_MASK
;
1517 trig_mode
= reg
& APIC_LVT_LEVEL_TRIGGER
;
1518 return __apic_accept_irq(apic
, mode
, vector
, 1, trig_mode
,
1524 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu
*vcpu
)
1526 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1529 kvm_apic_local_deliver(apic
, APIC_LVT0
);
1532 static const struct kvm_io_device_ops apic_mmio_ops
= {
1533 .read
= apic_mmio_read
,
1534 .write
= apic_mmio_write
,
1537 static enum hrtimer_restart
apic_timer_fn(struct hrtimer
*data
)
1539 struct kvm_timer
*ktimer
= container_of(data
, struct kvm_timer
, timer
);
1540 struct kvm_lapic
*apic
= container_of(ktimer
, struct kvm_lapic
, lapic_timer
);
1541 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1542 wait_queue_head_t
*q
= &vcpu
->wq
;
1545 * There is a race window between reading and incrementing, but we do
1546 * not care about potentially losing timer events in the !reinject
1547 * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
1548 * in vcpu_enter_guest.
1550 if (!atomic_read(&ktimer
->pending
)) {
1551 atomic_inc(&ktimer
->pending
);
1552 /* FIXME: this code should not know anything about vcpus */
1553 kvm_make_request(KVM_REQ_PENDING_TIMER
, vcpu
);
1556 if (waitqueue_active(q
))
1557 wake_up_interruptible(q
);
1559 if (lapic_is_periodic(apic
)) {
1560 hrtimer_add_expires_ns(&ktimer
->timer
, ktimer
->period
);
1561 return HRTIMER_RESTART
;
1563 return HRTIMER_NORESTART
;
1566 int kvm_create_lapic(struct kvm_vcpu
*vcpu
)
1568 struct kvm_lapic
*apic
;
1570 ASSERT(vcpu
!= NULL
);
1571 apic_debug("apic_init %d\n", vcpu
->vcpu_id
);
1573 apic
= kzalloc(sizeof(*apic
), GFP_KERNEL
);
1577 vcpu
->arch
.apic
= apic
;
1579 apic
->regs
= (void *)get_zeroed_page(GFP_KERNEL
);
1581 printk(KERN_ERR
"malloc apic regs error for vcpu %x\n",
1583 goto nomem_free_apic
;
1587 hrtimer_init(&apic
->lapic_timer
.timer
, CLOCK_MONOTONIC
,
1589 apic
->lapic_timer
.timer
.function
= apic_timer_fn
;
1592 * APIC is created enabled. This will prevent kvm_lapic_set_base from
1593 * thinking that APIC satet has changed.
1595 vcpu
->arch
.apic_base
= MSR_IA32_APICBASE_ENABLE
;
1596 kvm_lapic_set_base(vcpu
,
1597 APIC_DEFAULT_PHYS_BASE
| MSR_IA32_APICBASE_ENABLE
);
1599 static_key_slow_inc(&apic_sw_disabled
.key
); /* sw disabled at reset */
1600 kvm_lapic_reset(vcpu
);
1601 kvm_iodevice_init(&apic
->dev
, &apic_mmio_ops
);
1610 int kvm_apic_has_interrupt(struct kvm_vcpu
*vcpu
)
1612 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1615 if (!kvm_vcpu_has_lapic(vcpu
) || !apic_enabled(apic
))
1618 apic_update_ppr(apic
);
1619 highest_irr
= apic_find_highest_irr(apic
);
1620 if ((highest_irr
== -1) ||
1621 ((highest_irr
& 0xF0) <= kvm_apic_get_reg(apic
, APIC_PROCPRI
)))
1626 int kvm_apic_accept_pic_intr(struct kvm_vcpu
*vcpu
)
1628 u32 lvt0
= kvm_apic_get_reg(vcpu
->arch
.apic
, APIC_LVT0
);
1631 if (!kvm_apic_hw_enabled(vcpu
->arch
.apic
))
1633 if ((lvt0
& APIC_LVT_MASKED
) == 0 &&
1634 GET_APIC_DELIVERY_MODE(lvt0
) == APIC_MODE_EXTINT
)
1639 void kvm_inject_apic_timer_irqs(struct kvm_vcpu
*vcpu
)
1641 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1643 if (!kvm_vcpu_has_lapic(vcpu
))
1646 if (atomic_read(&apic
->lapic_timer
.pending
) > 0) {
1647 kvm_apic_local_deliver(apic
, APIC_LVTT
);
1648 if (apic_lvtt_tscdeadline(apic
))
1649 apic
->lapic_timer
.tscdeadline
= 0;
1650 atomic_set(&apic
->lapic_timer
.pending
, 0);
1654 int kvm_get_apic_interrupt(struct kvm_vcpu
*vcpu
)
1656 int vector
= kvm_apic_has_interrupt(vcpu
);
1657 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1663 * We get here even with APIC virtualization enabled, if doing
1664 * nested virtualization and L1 runs with the "acknowledge interrupt
1665 * on exit" mode. Then we cannot inject the interrupt via RVI,
1666 * because the process would deliver it through the IDT.
1669 apic_set_isr(vector
, apic
);
1670 apic_update_ppr(apic
);
1671 apic_clear_irr(vector
, apic
);
1675 void kvm_apic_post_state_restore(struct kvm_vcpu
*vcpu
,
1676 struct kvm_lapic_state
*s
)
1678 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1680 kvm_lapic_set_base(vcpu
, vcpu
->arch
.apic_base
);
1681 /* set SPIV separately to get count of SW disabled APICs right */
1682 apic_set_spiv(apic
, *((u32
*)(s
->regs
+ APIC_SPIV
)));
1683 memcpy(vcpu
->arch
.apic
->regs
, s
->regs
, sizeof *s
);
1684 /* call kvm_apic_set_id() to put apic into apic_map */
1685 kvm_apic_set_id(apic
, kvm_apic_id(apic
));
1686 kvm_apic_set_version(vcpu
);
1688 apic_update_ppr(apic
);
1689 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1690 update_divide_count(apic
);
1691 start_apic_timer(apic
);
1692 apic
->irr_pending
= true;
1693 apic
->isr_count
= kvm_apic_vid_enabled(vcpu
->kvm
) ?
1694 1 : count_vectors(apic
->regs
+ APIC_ISR
);
1695 apic
->highest_isr_cache
= -1;
1696 kvm_x86_ops
->hwapic_isr_update(vcpu
->kvm
, apic_find_highest_isr(apic
));
1697 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1698 kvm_rtc_eoi_tracking_restore_one(vcpu
);
1701 void __kvm_migrate_apic_timer(struct kvm_vcpu
*vcpu
)
1703 struct hrtimer
*timer
;
1705 if (!kvm_vcpu_has_lapic(vcpu
))
1708 timer
= &vcpu
->arch
.apic
->lapic_timer
.timer
;
1709 if (hrtimer_cancel(timer
))
1710 hrtimer_start_expires(timer
, HRTIMER_MODE_ABS
);
1714 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
1716 * Detect whether guest triggered PV EOI since the
1717 * last entry. If yes, set EOI on guests's behalf.
1718 * Clear PV EOI in guest memory in any case.
1720 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu
*vcpu
,
1721 struct kvm_lapic
*apic
)
1726 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
1727 * and KVM_PV_EOI_ENABLED in guest memory as follows:
1729 * KVM_APIC_PV_EOI_PENDING is unset:
1730 * -> host disabled PV EOI.
1731 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
1732 * -> host enabled PV EOI, guest did not execute EOI yet.
1733 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
1734 * -> host enabled PV EOI, guest executed EOI.
1736 BUG_ON(!pv_eoi_enabled(vcpu
));
1737 pending
= pv_eoi_get_pending(vcpu
);
1739 * Clear pending bit in any case: it will be set again on vmentry.
1740 * While this might not be ideal from performance point of view,
1741 * this makes sure pv eoi is only enabled when we know it's safe.
1743 pv_eoi_clr_pending(vcpu
);
1746 vector
= apic_set_eoi(apic
);
1747 trace_kvm_pv_eoi(apic
, vector
);
1750 void kvm_lapic_sync_from_vapic(struct kvm_vcpu
*vcpu
)
1754 if (test_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
))
1755 apic_sync_pv_eoi_from_guest(vcpu
, vcpu
->arch
.apic
);
1757 if (!test_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
))
1760 kvm_read_guest_cached(vcpu
->kvm
, &vcpu
->arch
.apic
->vapic_cache
, &data
,
1763 apic_set_tpr(vcpu
->arch
.apic
, data
& 0xff);
1767 * apic_sync_pv_eoi_to_guest - called before vmentry
1769 * Detect whether it's safe to enable PV EOI and
1772 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu
*vcpu
,
1773 struct kvm_lapic
*apic
)
1775 if (!pv_eoi_enabled(vcpu
) ||
1776 /* IRR set or many bits in ISR: could be nested. */
1777 apic
->irr_pending
||
1778 /* Cache not set: could be safe but we don't bother. */
1779 apic
->highest_isr_cache
== -1 ||
1780 /* Need EOI to update ioapic. */
1781 kvm_ioapic_handles_vector(vcpu
->kvm
, apic
->highest_isr_cache
)) {
1783 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
1784 * so we need not do anything here.
1789 pv_eoi_set_pending(apic
->vcpu
);
1792 void kvm_lapic_sync_to_vapic(struct kvm_vcpu
*vcpu
)
1795 int max_irr
, max_isr
;
1796 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1798 apic_sync_pv_eoi_to_guest(vcpu
, apic
);
1800 if (!test_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
))
1803 tpr
= kvm_apic_get_reg(apic
, APIC_TASKPRI
) & 0xff;
1804 max_irr
= apic_find_highest_irr(apic
);
1807 max_isr
= apic_find_highest_isr(apic
);
1810 data
= (tpr
& 0xff) | ((max_isr
& 0xf0) << 8) | (max_irr
<< 24);
1812 kvm_write_guest_cached(vcpu
->kvm
, &vcpu
->arch
.apic
->vapic_cache
, &data
,
1816 int kvm_lapic_set_vapic_addr(struct kvm_vcpu
*vcpu
, gpa_t vapic_addr
)
1819 if (kvm_gfn_to_hva_cache_init(vcpu
->kvm
,
1820 &vcpu
->arch
.apic
->vapic_cache
,
1821 vapic_addr
, sizeof(u32
)))
1823 __set_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
);
1825 __clear_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
);
1828 vcpu
->arch
.apic
->vapic_addr
= vapic_addr
;
1832 int kvm_x2apic_msr_write(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
1834 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1835 u32 reg
= (msr
- APIC_BASE_MSR
) << 4;
1837 if (!irqchip_in_kernel(vcpu
->kvm
) || !apic_x2apic_mode(apic
))
1840 /* if this is ICR write vector before command */
1842 apic_reg_write(apic
, APIC_ICR2
, (u32
)(data
>> 32));
1843 return apic_reg_write(apic
, reg
, (u32
)data
);
1846 int kvm_x2apic_msr_read(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*data
)
1848 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1849 u32 reg
= (msr
- APIC_BASE_MSR
) << 4, low
, high
= 0;
1851 if (!irqchip_in_kernel(vcpu
->kvm
) || !apic_x2apic_mode(apic
))
1854 if (apic_reg_read(apic
, reg
, 4, &low
))
1857 apic_reg_read(apic
, APIC_ICR2
, 4, &high
);
1859 *data
= (((u64
)high
) << 32) | low
;
1864 int kvm_hv_vapic_msr_write(struct kvm_vcpu
*vcpu
, u32 reg
, u64 data
)
1866 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1868 if (!kvm_vcpu_has_lapic(vcpu
))
1871 /* if this is ICR write vector before command */
1872 if (reg
== APIC_ICR
)
1873 apic_reg_write(apic
, APIC_ICR2
, (u32
)(data
>> 32));
1874 return apic_reg_write(apic
, reg
, (u32
)data
);
1877 int kvm_hv_vapic_msr_read(struct kvm_vcpu
*vcpu
, u32 reg
, u64
*data
)
1879 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1882 if (!kvm_vcpu_has_lapic(vcpu
))
1885 if (apic_reg_read(apic
, reg
, 4, &low
))
1887 if (reg
== APIC_ICR
)
1888 apic_reg_read(apic
, APIC_ICR2
, 4, &high
);
1890 *data
= (((u64
)high
) << 32) | low
;
1895 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu
*vcpu
, u64 data
)
1897 u64 addr
= data
& ~KVM_MSR_ENABLED
;
1898 if (!IS_ALIGNED(addr
, 4))
1901 vcpu
->arch
.pv_eoi
.msr_val
= data
;
1902 if (!pv_eoi_enabled(vcpu
))
1904 return kvm_gfn_to_hva_cache_init(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
,
1908 void kvm_apic_accept_events(struct kvm_vcpu
*vcpu
)
1910 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1911 unsigned int sipi_vector
;
1914 if (!kvm_vcpu_has_lapic(vcpu
) || !apic
->pending_events
)
1917 pe
= xchg(&apic
->pending_events
, 0);
1919 if (test_bit(KVM_APIC_INIT
, &pe
)) {
1920 kvm_lapic_reset(vcpu
);
1921 kvm_vcpu_reset(vcpu
);
1922 if (kvm_vcpu_is_bsp(apic
->vcpu
))
1923 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
1925 vcpu
->arch
.mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
1927 if (test_bit(KVM_APIC_SIPI
, &pe
) &&
1928 vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
) {
1929 /* evaluate pending_events before reading the vector */
1931 sipi_vector
= apic
->sipi_vector
;
1932 apic_debug("vcpu %d received sipi with vector # %x\n",
1933 vcpu
->vcpu_id
, sipi_vector
);
1934 kvm_vcpu_deliver_sipi_vector(vcpu
, sipi_vector
);
1935 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
1939 void kvm_lapic_init(void)
1941 /* do not patch jump label more than once per second */
1942 jump_label_rate_limit(&apic_hw_disabled
, HZ
);
1943 jump_label_rate_limit(&apic_sw_disabled
, HZ
);