xen/evtchn: fix ring resize when binding new events
[deliverable/linux.git] / mm / migrate.c
index 568284ec75d419863866f43065003883b8d81939..6c822a7b27e066148d38fd9ed47f80a3ad4de055 100644 (file)
@@ -172,7 +172,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
        else
                page_add_file_rmap(new);
 
-       if (vma->vm_flags & VM_LOCKED)
+       if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
                mlock_vma_page(new);
 
        /* No need to invalidate - it was non-present before */
@@ -187,14 +187,17 @@ out:
  * Get rid of all migration entries and replace them by
  * references to the indicated page.
  */
-static void remove_migration_ptes(struct page *old, struct page *new)
+void remove_migration_ptes(struct page *old, struct page *new, bool locked)
 {
        struct rmap_walk_control rwc = {
                .rmap_one = remove_migration_pte,
                .arg = old,
        };
 
-       rmap_walk(new, &rwc);
+       if (locked)
+               rmap_walk_locked(new, &rwc);
+       else
+               rmap_walk(new, &rwc);
 }
 
 /*
@@ -349,7 +352,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
-       if (!page_freeze_refs(page, expected_count)) {
+       if (!page_ref_freeze(page, expected_count)) {
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -363,7 +366,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        if (mode == MIGRATE_ASYNC && head &&
                        !buffer_migrate_lock_buffers(head, mode)) {
-               page_unfreeze_refs(page, expected_count);
+               page_ref_unfreeze(page, expected_count);
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -397,7 +400,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_unfreeze_refs(page, expected_count - 1);
+       page_ref_unfreeze(page, expected_count - 1);
 
        spin_unlock(&mapping->tree_lock);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -451,7 +454,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
-       if (!page_freeze_refs(page, expected_count)) {
+       if (!page_ref_freeze(page, expected_count)) {
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -463,7 +466,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
 
        radix_tree_replace_slot(pslot, newpage);
 
-       page_unfreeze_refs(page, expected_count - 1);
+       page_ref_unfreeze(page, expected_count - 1);
 
        spin_unlock_irq(&mapping->tree_lock);
 
@@ -702,7 +705,7 @@ static int writeout(struct address_space *mapping, struct page *page)
         * At this point we know that the migration attempt cannot
         * be successful.
         */
-       remove_migration_ptes(page, page);
+       remove_migration_ptes(page, page, false);
 
        rc = mapping->a_ops->writepage(page, &wbc);
 
@@ -900,7 +903,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 
        if (page_was_mapped)
                remove_migration_ptes(page,
-                       rc == MIGRATEPAGE_SUCCESS ? newpage : page);
+                       rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
 
 out_unlock_both:
        unlock_page(newpage);
@@ -1070,7 +1073,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (page_was_mapped)
                remove_migration_ptes(hpage,
-                       rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage);
+                       rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
 
        unlock_page(new_hpage);
 
@@ -1773,7 +1776,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                put_page(new_page);
                goto out_fail;
        }
-
+       /*
+        * We are not sure a pending tlb flush here is for a huge page
+        * mapping or not. Hence use the tlb range variant
+        */
        if (mm_tlb_flush_pending(mm))
                flush_tlb_range(vma, mmun_start, mmun_end);
 
@@ -1829,12 +1835,11 @@ fail_putback:
        page_add_anon_rmap(new_page, vma, mmun_start, true);
        pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
        set_pmd_at(mm, mmun_start, pmd, entry);
-       flush_tlb_range(vma, mmun_start, mmun_end);
        update_mmu_cache_pmd(vma, address, &entry);
 
        if (page_count(page) != 2) {
                set_pmd_at(mm, mmun_start, pmd, orig_entry);
-               flush_tlb_range(vma, mmun_start, mmun_end);
+               flush_pmd_tlb_range(vma, mmun_start, mmun_end);
                mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
                update_mmu_cache_pmd(vma, address, &entry);
                page_remove_rmap(new_page, true);
This page took 0.025684 seconds and 5 git commands to generate.