KVM: SVM: Move svm_queue_exception
[deliverable/linux.git] / arch / x86 / kvm / x86.c
index e46282a565658bdcc3ed2bae40677948cfcb3bf9..efeeabd84ecd3cdc92971ba2652d7c43b7169f53 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/cpufreq.h>
 #include <linux/user-return-notifier.h>
 #include <linux/srcu.h>
+#include <linux/slab.h>
 #include <trace/events/kvm.h>
 #undef TRACE_INCLUDE_FILE
 #define CREATE_TRACE_POINTS
@@ -224,7 +225,7 @@ static void drop_user_return_notifiers(void *ignore)
 
 unsigned long segment_base(u16 selector)
 {
-       struct descriptor_table gdt;
+       struct desc_ptr gdt;
        struct desc_struct *d;
        unsigned long table_base;
        unsigned long v;
@@ -233,7 +234,7 @@ unsigned long segment_base(u16 selector)
                return 0;
 
        kvm_get_gdt(&gdt);
-       table_base = gdt.base;
+       table_base = gdt.address;
 
        if (selector & 4) {           /* from ldt */
                u16 ldt_selector = kvm_read_ldt();
@@ -432,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
 #ifdef CONFIG_X86_64
        if (cr0 & 0xffffffff00000000UL) {
-               printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
-                      cr0, kvm_read_cr0(vcpu));
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -442,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        cr0 &= ~CR0_RESERVED_BITS;
 
        if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
-               printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
-               printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
-                      "and a clear PE flag\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -460,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                        int cs_db, cs_l;
 
                        if (!is_pae(vcpu)) {
-                               printk(KERN_DEBUG "set_cr0: #GP, start paging "
-                                      "in long mode while PAE is disabled\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
                        if (cs_l) {
-                               printk(KERN_DEBUG "set_cr0: #GP, start paging "
-                                      "in long mode while CS.L == 1\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
 
@@ -476,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                } else
 #endif
                if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
-                       printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
-                              "reserved bits\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -504,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
 
        if (cr4 & CR4_RESERVED_BITS) {
-               printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (is_long_mode(vcpu)) {
                if (!(cr4 & X86_CR4_PAE)) {
-                       printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
-                              "in long mode\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
                   && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
-               printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (cr4 & X86_CR4_VMXE) {
-               printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -546,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 
        if (is_long_mode(vcpu)) {
                if (cr3 & CR3_L_MODE_RESERVED_BITS) {
-                       printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else {
                if (is_pae(vcpu)) {
                        if (cr3 & CR3_PAE_RESERVED_BITS) {
-                               printk(KERN_DEBUG
-                                      "set_cr3: #GP, reserved bits\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
-                               printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
-                                      "reserved bits\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
@@ -592,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
        if (cr8 & CR8_RESERVED_BITS) {
-               printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -648,15 +627,12 @@ static u32 emulated_msrs[] = {
 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        if (efer & efer_reserved_bits) {
-               printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
-                      efer);
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (is_paging(vcpu)
            && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
-               printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -666,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
                if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
-                       printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -677,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
                if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
-                       printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -966,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                if (msr >= MSR_IA32_MC0_CTL &&
                    msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
                        u32 offset = msr - MSR_IA32_MC0_CTL;
-                       /* only 0 or all 1s can be written to IA32_MCi_CTL */
+                       /* only 0 or all 1s can be written to IA32_MCi_CTL
+                        * some Linux kernels though clear bit 10 in bank 4 to
+                        * workaround a BIOS/GART TBL issue on AMD K8s, ignore
+                        * this to avoid an uncatched #GP in the guest
+                        */
                        if ((offset & 0x3) == 0 &&
-                           data != 0 && data != ~(u64)0)
+                           data != 0 && (data | (1 << 10)) != ~(u64)0)
                                return -1;
                        vcpu->arch.mce_banks[offset] = data;
                        break;
@@ -1570,6 +1548,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_HYPERV_VAPIC:
        case KVM_CAP_HYPERV_SPIN:
        case KVM_CAP_PCI_SEGMENT:
+       case KVM_CAP_DEBUGREGS:
        case KVM_CAP_X86_ROBUST_SINGLESTEP:
                r = 1;
                break;
@@ -2122,14 +2101,20 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 {
        vcpu_load(vcpu);
 
-       events->exception.injected = vcpu->arch.exception.pending;
+       events->exception.injected =
+               vcpu->arch.exception.pending &&
+               !kvm_exception_is_soft(vcpu->arch.exception.nr);
        events->exception.nr = vcpu->arch.exception.nr;
        events->exception.has_error_code = vcpu->arch.exception.has_error_code;
        events->exception.error_code = vcpu->arch.exception.error_code;
 
-       events->interrupt.injected = vcpu->arch.interrupt.pending;
+       events->interrupt.injected =
+               vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
        events->interrupt.nr = vcpu->arch.interrupt.nr;
-       events->interrupt.soft = vcpu->arch.interrupt.soft;
+       events->interrupt.soft = 0;
+       events->interrupt.shadow =
+               kvm_x86_ops->get_interrupt_shadow(vcpu,
+                       KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
 
        events->nmi.injected = vcpu->arch.nmi_injected;
        events->nmi.pending = vcpu->arch.nmi_pending;
@@ -2138,7 +2123,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        events->sipi_vector = vcpu->arch.sipi_vector;
 
        events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
-                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
+                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+                        | KVM_VCPUEVENT_VALID_SHADOW);
 
        vcpu_put(vcpu);
 }
@@ -2147,7 +2133,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
 {
        if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
-                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
+                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+                             | KVM_VCPUEVENT_VALID_SHADOW))
                return -EINVAL;
 
        vcpu_load(vcpu);
@@ -2162,6 +2149,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
        vcpu->arch.interrupt.soft = events->interrupt.soft;
        if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
                kvm_pic_clear_isr_ack(vcpu->kvm);
+       if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
+               kvm_x86_ops->set_interrupt_shadow(vcpu,
+                                                 events->interrupt.shadow);
 
        vcpu->arch.nmi_injected = events->nmi.injected;
        if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
@@ -2176,6 +2166,36 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
+                                            struct kvm_debugregs *dbgregs)
+{
+       vcpu_load(vcpu);
+
+       memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
+       dbgregs->dr6 = vcpu->arch.dr6;
+       dbgregs->dr7 = vcpu->arch.dr7;
+       dbgregs->flags = 0;
+
+       vcpu_put(vcpu);
+}
+
+static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+                                           struct kvm_debugregs *dbgregs)
+{
+       if (dbgregs->flags)
+               return -EINVAL;
+
+       vcpu_load(vcpu);
+
+       memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+       vcpu->arch.dr6 = dbgregs->dr6;
+       vcpu->arch.dr7 = dbgregs->dr7;
+
+       vcpu_put(vcpu);
+
+       return 0;
+}
+
 long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
 {
@@ -2354,6 +2374,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
                break;
        }
+       case KVM_GET_DEBUGREGS: {
+               struct kvm_debugregs dbgregs;
+
+               kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
+
+               r = -EFAULT;
+               if (copy_to_user(argp, &dbgregs,
+                                sizeof(struct kvm_debugregs)))
+                       break;
+               r = 0;
+               break;
+       }
+       case KVM_SET_DEBUGREGS: {
+               struct kvm_debugregs dbgregs;
+
+               r = -EFAULT;
+               if (copy_from_user(&dbgregs, argp,
+                                  sizeof(struct kvm_debugregs)))
+                       break;
+
+               r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
+               break;
+       }
        default:
                r = -EINVAL;
        }
@@ -2634,8 +2677,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                                      struct kvm_dirty_log *log)
 {
-       int r, n, i;
+       int r, i;
        struct kvm_memory_slot *memslot;
+       unsigned long n;
        unsigned long is_dirty = 0;
        unsigned long *dirty_bitmap = NULL;
 
@@ -2650,7 +2694,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        if (!memslot->dirty_bitmap)
                goto out;
 
-       n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+       n = kvm_dirty_bitmap_bytes(memslot);
 
        r = -ENOMEM;
        dirty_bitmap = vmalloc(n);
@@ -3464,7 +3508,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
        if (vcpu->arch.pio.string)
                return EMULATE_DO_MMIO;
 
-       if ((r || vcpu->mmio_is_write) && run) {
+       if (r || vcpu->mmio_is_write) {
                run->exit_reason = KVM_EXIT_MMIO;
                run->mmio.phys_addr = vcpu->mmio_phys_addr;
                memcpy(run->mmio.data, vcpu->mmio_data, 8);
@@ -3970,14 +4014,14 @@ static u64 mk_cr_64(u64 curr_cr, u32 new_val)
 
 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 {
-       struct descriptor_table dt = { limit, base };
+       struct desc_ptr dt = { limit, base };
 
        kvm_x86_ops->set_gdt(vcpu, &dt);
 }
 
 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 {
-       struct descriptor_table dt = { limit, base };
+       struct desc_ptr dt = { limit, base };
 
        kvm_x86_ops->set_idt(vcpu, &dt);
 }
@@ -4482,7 +4526,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                kvm_set_cr8(vcpu, kvm_run->cr8);
 
        if (vcpu->arch.pio.cur_count) {
+               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = complete_pio(vcpu);
+               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
                if (r)
                        goto out;
        }
@@ -4600,7 +4646,7 @@ EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
-       struct descriptor_table dt;
+       struct desc_ptr dt;
 
        vcpu_load(vcpu);
 
@@ -4615,11 +4661,11 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
 
        kvm_x86_ops->get_idt(vcpu, &dt);
-       sregs->idt.limit = dt.limit;
-       sregs->idt.base = dt.base;
+       sregs->idt.limit = dt.size;
+       sregs->idt.base = dt.address;
        kvm_x86_ops->get_gdt(vcpu, &dt);
-       sregs->gdt.limit = dt.limit;
-       sregs->gdt.base = dt.base;
+       sregs->gdt.limit = dt.size;
+       sregs->gdt.base = dt.address;
 
        sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr2 = vcpu->arch.cr2;
@@ -4691,7 +4737,7 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
 
 static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
                                          u16 selector,
-                                         struct descriptor_table *dtable)
+                                         struct desc_ptr *dtable)
 {
        if (selector & 1 << 2) {
                struct kvm_segment kvm_seg;
@@ -4699,10 +4745,10 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
                kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
 
                if (kvm_seg.unusable)
-                       dtable->limit = 0;
+                       dtable->size = 0;
                else
-                       dtable->limit = kvm_seg.limit;
-               dtable->base = kvm_seg.base;
+                       dtable->size = kvm_seg.limit;
+               dtable->address = kvm_seg.base;
        }
        else
                kvm_x86_ops->get_gdt(vcpu, dtable);
@@ -4712,7 +4758,7 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
-       struct descriptor_table dtable;
+       struct desc_ptr dtable;
        u16 index = selector >> 3;
        int ret;
        u32 err;
@@ -4720,7 +4766,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
 
        get_segment_descriptor_dtable(vcpu, selector, &dtable);
 
-       if (dtable.limit < index * 8 + 7) {
+       if (dtable.size < index * 8 + 7) {
                kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
                return X86EMUL_PROPAGATE_FAULT;
        }
@@ -4737,14 +4783,14 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
-       struct descriptor_table dtable;
+       struct desc_ptr dtable;
        u16 index = selector >> 3;
 
        get_segment_descriptor_dtable(vcpu, selector, &dtable);
 
-       if (dtable.limit < index * 8 + 7)
+       if (dtable.size < index * 8 + 7)
                return 1;
-       return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
+       return kvm_write_guest_virt(dtable.address + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
 }
 
 static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
@@ -5145,6 +5191,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        int ret = 0;
        u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
        u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
+       u32 desc_limit;
 
        old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
 
@@ -5167,7 +5214,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
                }
        }
 
-       if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
+       desc_limit = get_desc_limit(&nseg_desc);
+       if (!nseg_desc.p ||
+           ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
+            desc_limit < 0x2b)) {
                kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
                return 1;
        }
@@ -5219,15 +5269,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 {
        int mmu_reset_needed = 0;
        int pending_vec, max_bits;
-       struct descriptor_table dt;
+       struct desc_ptr dt;
 
        vcpu_load(vcpu);
 
-       dt.limit = sregs->idt.limit;
-       dt.base = sregs->idt.base;
+       dt.size = sregs->idt.limit;
+       dt.address = sregs->idt.base;
        kvm_x86_ops->set_idt(vcpu, &dt);
-       dt.limit = sregs->gdt.limit;
-       dt.base = sregs->gdt.base;
+       dt.size = sregs->gdt.limit;
+       dt.address = sregs->gdt.base;
        kvm_x86_ops->set_gdt(vcpu, &dt);
 
        vcpu->arch.cr2 = sregs->cr2;
This page took 0.031324 seconds and 5 git commands to generate.