KVM: s390: Push run loop into __vcpu_run
authorThomas Huth <thuth@linux.vnet.ibm.com>
Thu, 12 Sep 2013 08:33:44 +0000 (10:33 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 24 Sep 2013 17:12:18 +0000 (19:12 +0200)
Moved the do-while loop from kvm_arch_vcpu_ioctl_run into __vcpu_run
and the calling of kvm_handle_sie_intercept() into vcpu_post_run()
(so we can add the srcu locks in a proper way in the next patch).

Signed-off-by: Thomas Huth <thuth@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/s390/kvm/kvm-s390.c

index 69c7592e80d9eaa408ce45d70b903cc6a2d5c2ef..8eec7abc5664f36f23949093b677c0759a71e16f 100644 (file)
@@ -738,6 +738,13 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
 
        memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
 
+       if (rc == 0) {
+               if (kvm_is_ucontrol(vcpu->kvm))
+                       rc = -EOPNOTSUPP;
+               else
+                       rc = kvm_handle_sie_intercept(vcpu);
+       }
+
        return rc;
 }
 
@@ -745,21 +752,24 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
 {
        int rc, exit_reason;
 
-       rc = vcpu_pre_run(vcpu);
-       if (rc)
-               return rc;
-
-       /*
-        * As PF_VCPU will be used in fault handler, between guest_enter
-        * and guest_exit should be no uaccess.
-        */
-       preempt_disable();
-       kvm_guest_enter();
-       preempt_enable();
-       exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
-       kvm_guest_exit();
+       do {
+               rc = vcpu_pre_run(vcpu);
+               if (rc)
+                       break;
 
-       rc = vcpu_post_run(vcpu, exit_reason);
+               /*
+                * As PF_VCPU will be used in fault handler, between
+                * guest_enter and guest_exit should be no uaccess.
+                */
+               preempt_disable();
+               kvm_guest_enter();
+               preempt_enable();
+               exit_reason = sie64a(vcpu->arch.sie_block,
+                                    vcpu->run->s.regs.gprs);
+               kvm_guest_exit();
+
+               rc = vcpu_post_run(vcpu, exit_reason);
+       } while (!signal_pending(current) && !rc);
 
        return rc;
 }
@@ -801,16 +811,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        }
 
        might_fault();
-
-       do {
-               rc = __vcpu_run(vcpu);
-               if (rc)
-                       break;
-               if (kvm_is_ucontrol(vcpu->kvm))
-                       rc = -EOPNOTSUPP;
-               else
-                       rc = kvm_handle_sie_intercept(vcpu);
-       } while (!signal_pending(current) && !rc);
+       rc = __vcpu_run(vcpu);
 
        if (signal_pending(current) && !rc) {
                kvm_run->exit_reason = KVM_EXIT_INTR;
This page took 0.027414 seconds and 5 git commands to generate.