KVM: s390: Introduce helper function for faulting-in a guest page
authorThomas Huth <thuth@linux.vnet.ibm.com>
Tue, 6 May 2014 15:20:16 +0000 (17:20 +0200)
committerChristian Borntraeger <borntraeger@de.ibm.com>
Fri, 16 May 2014 12:57:20 +0000 (14:57 +0200)
Rework the function kvm_arch_fault_in_sync() to become a proper helper
function for faulting-in a guest page. Now it takes the guest address as
a parameter and does not ignore the possible error code from gmap_fault()
anymore (which could cause undetected error conditions before).

Signed-off-by: Thomas Huth <thuth@linux.vnet.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h

index 0a01744cbdd9e3512f4a59714e7d17d746ff3b46..d91feb2f03ea14435355a1de2d28b1ceecaa768e 100644 (file)
@@ -1045,15 +1045,30 @@ retry:
        return 0;
 }
 
-static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
+/**
+ * kvm_arch_fault_in_page - fault-in guest page if necessary
+ * @vcpu: The corresponding virtual cpu
+ * @gpa: Guest physical address
+ * @writable: Whether the page should be writable or not
+ *
+ * Make sure that a guest page has been faulted-in on the host.
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
 {
-       long rc;
-       hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
        struct mm_struct *mm = current->mm;
+       hva_t hva;
+       long rc;
+
+       hva = gmap_fault(gpa, vcpu->arch.gmap);
+       if (IS_ERR_VALUE(hva))
+               return (long)hva;
        down_read(&mm->mmap_sem);
-       rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
+       rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
        up_read(&mm->mmap_sem);
-       return rc;
+
+       return rc < 0 ? rc : 0;
 }
 
 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
@@ -1191,9 +1206,12 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
        } else if (current->thread.gmap_pfault) {
                trace_kvm_s390_major_guest_pfault(vcpu);
                current->thread.gmap_pfault = 0;
-               if (kvm_arch_setup_async_pf(vcpu) ||
-                   (kvm_arch_fault_in_sync(vcpu) >= 0))
+               if (kvm_arch_setup_async_pf(vcpu)) {
                        rc = 0;
+               } else {
+                       gpa_t gpa = current->thread.gmap_addr;
+                       rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
+               }
        }
 
        if (rc == -1) {
index 38b589d69951868190cf57b41e5e1b737b1a24cc..e489945921acfb5a4fc0b8e11d44ba0e8da6dc34 100644 (file)
@@ -156,6 +156,7 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
 
 /* implemented in kvm-s390.c */
+long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
This page took 0.027953 seconds and 5 git commands to generate.