mm: thp: kvm: fix memory corruption in KVM with THP enabled
[deliverable/linux.git] / arch / x86 / kvm / mmu.c
index 6bdfbc23ecaa8fc779085076bc08f340fc704513..b6f50e8b0a393675009a5dcaad7f30af315bc91d 100644 (file)
@@ -479,7 +479,7 @@ static bool spte_is_locklessly_modifiable(u64 spte)
 static bool spte_has_volatile_bits(u64 spte)
 {
        /*
-        * Always atomicly update spte if it can be updated
+        * Always atomically update spte if it can be updated
         * out of mmu-lock, it can ensure dirty bit is not lost,
         * also, it can help us to get a stable is_writable_pte()
         * to ensure tlb flush is not missed.
@@ -550,15 +550,22 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
 
        /*
         * For the spte updated out of mmu-lock is safe, since
-        * we always atomicly update it, see the comments in
+        * we always atomically update it, see the comments in
         * spte_has_volatile_bits().
         */
        if (spte_is_locklessly_modifiable(old_spte) &&
              !is_writable_pte(new_spte))
                ret = true;
 
-       if (!shadow_accessed_mask)
+       if (!shadow_accessed_mask) {
+               /*
+                * We don't set page dirty when dropping non-writable spte.
+                * So do it now if the new spte is becoming non-writable.
+                */
+               if (ret)
+                       kvm_set_pfn_dirty(spte_to_pfn(old_spte));
                return ret;
+       }
 
        /*
         * Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
 
        if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
-       if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
+       if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
+                                           PT_WRITABLE_MASK))
                kvm_set_pfn_dirty(pfn);
        return 1;
 }
@@ -2815,7 +2823,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
         */
        if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
            level == PT_PAGE_TABLE_LEVEL &&
-           PageTransCompound(pfn_to_page(pfn)) &&
+           PageTransCompoundMap(pfn_to_page(pfn)) &&
            !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
                unsigned long mask;
                /*
@@ -4777,7 +4785,7 @@ restart:
                 */
                if (sp->role.direct &&
                        !kvm_is_reserved_pfn(pfn) &&
-                       PageTransCompound(pfn_to_page(pfn))) {
+                       PageTransCompoundMap(pfn_to_page(pfn))) {
                        drop_spte(kvm, sptep);
                        need_tlb_flush = 1;
                        goto restart;
This page took 0.025033 seconds and 5 git commands to generate.