KVM: s390: Split up __vcpu_run into three parts
authorThomas Huth <thuth@linux.vnet.ibm.com>
Thu, 12 Sep 2013 08:33:43 +0000 (10:33 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 24 Sep 2013 17:12:18 +0000 (19:12 +0200)
In preparation for the following patch (which will change the indentation
of __vcpu_run quite a bit), this patch puts most of the code from __vcpu_run
into separate functions. The first function handles the code that runs
before the SIE instruction and the other one handles the code that runs
afterwards.

Signed-off-by: Thomas Huth <thuth@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/s390/kvm/kvm-s390.c

index e3e7ff77ba449ddd29c0f147013112df1d2eff68..69c7592e80d9eaa408ce45d70b903cc6a2d5c2ef 100644 (file)
@@ -689,9 +689,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static int __vcpu_run(struct kvm_vcpu *vcpu)
+static int vcpu_pre_run(struct kvm_vcpu *vcpu)
 {
-       int rc;
+       int rc, cpuflags;
 
        memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
 
@@ -709,28 +709,24 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                return rc;
 
        vcpu->arch.sie_block->icptcode = 0;
-       VCPU_EVENT(vcpu, 6, "entering sie flags %x",
-                  atomic_read(&vcpu->arch.sie_block->cpuflags));
-       trace_kvm_s390_sie_enter(vcpu,
-                                atomic_read(&vcpu->arch.sie_block->cpuflags));
+       cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
+       VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
+       trace_kvm_s390_sie_enter(vcpu, cpuflags);
 
-       /*
-        * As PF_VCPU will be used in fault handler, between guest_enter
-        * and guest_exit should be no uaccess.
-        */
-       preempt_disable();
-       kvm_guest_enter();
-       preempt_enable();
-       rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
-       kvm_guest_exit();
+       return 0;
+}
+
+static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
+{
+       int rc;
 
        VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
                   vcpu->arch.sie_block->icptcode);
        trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
 
-       if (rc > 0)
+       if (exit_reason >= 0) {
                rc = 0;
-       if (rc < 0) {
+       } else {
                if (kvm_is_ucontrol(vcpu->kvm)) {
                        rc = SIE_INTERCEPT_UCONTROL;
                } else {
@@ -741,6 +737,30 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        }
 
        memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
+
+       return rc;
+}
+
+static int __vcpu_run(struct kvm_vcpu *vcpu)
+{
+       int rc, exit_reason;
+
+       rc = vcpu_pre_run(vcpu);
+       if (rc)
+               return rc;
+
+       /*
+        * As PF_VCPU will be used in fault handler, between guest_enter
+        * and guest_exit should be no uaccess.
+        */
+       preempt_disable();
+       kvm_guest_enter();
+       preempt_enable();
+       exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
+       kvm_guest_exit();
+
+       rc = vcpu_post_run(vcpu, exit_reason);
+
        return rc;
 }
 
This page took 0.02837 seconds and 5 git commands to generate.