mm: page migration trylock newpage at same level as oldpage
authorHugh Dickins <hughd@google.com>
Fri, 6 Nov 2015 02:49:49 +0000 (18:49 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Nov 2015 03:34:48 +0000 (19:34 -0800)
Clean up page migration a little by moving the trylock of newpage from
move_to_new_page() into __unmap_and_move(), where the old page has been
locked.  Adjust unmap_and_move_huge_page() and balloon_page_migrate()
accordingly.

But make one kind-of-functional change on the way: whereas trylock of
newpage used to BUG() if it failed, now simply return -EAGAIN if so.
Cutting out BUG()s is good, right?  But, to be honest, this is really to
extend the usefulness of the custom put_new_page feature, allowing a pool
of new pages to be shared perhaps with racing uses.

Use an "else" instead of that "skip_unmap" label.

Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rafael Aquini <aquini@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/balloon_compaction.c
mm/migrate.c

index fcad8322ef36781c5c59e92ddee26caeb4c3c5f2..d3116be5a00fa51646b5a0b45683a138a4ed3f7c 100644 (file)
@@ -199,23 +199,17 @@ int balloon_page_migrate(struct page *newpage,
        struct balloon_dev_info *balloon = balloon_page_device(page);
        int rc = -EAGAIN;
 
-       /*
-        * Block others from accessing the 'newpage' when we get around to
-        * establishing additional references. We should be the only one
-        * holding a reference to the 'newpage' at this point.
-        */
-       BUG_ON(!trylock_page(newpage));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 
        if (WARN_ON(!__is_movable_balloon_page(page))) {
                dump_page(page, "not movable balloon page");
-               unlock_page(newpage);
                return rc;
        }
 
        if (balloon && balloon->migratepage)
                rc = balloon->migratepage(balloon, newpage, page, mode);
 
-       unlock_page(newpage);
        return rc;
 }
 #endif /* CONFIG_BALLOON_COMPACTION */
index 2f2e2236daf74ea24ab55351151068e469c34cd0..6d7774ef0e6c107b2262a265d8e453e297151568 100644 (file)
@@ -727,13 +727,8 @@ static int move_to_new_page(struct page *newpage, struct page *page,
        struct address_space *mapping;
        int rc;
 
-       /*
-        * Block others from accessing the page when we get around to
-        * establishing additional references. We are the only one
-        * holding a reference to the new page at this point.
-        */
-       if (!trylock_page(newpage))
-               BUG();
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 
        /* Prepare mapping for the new page.*/
        newpage->index = page->index;
@@ -774,9 +769,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
                        remove_migration_ptes(page, newpage);
                page->mapping = NULL;
        }
-
-       unlock_page(newpage);
-
        return rc;
 }
 
@@ -861,6 +853,17 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                }
        }
 
+       /*
+        * Block others from accessing the new page when we get around to
+        * establishing additional references. We are usually the only one
+        * holding a reference to newpage at this point. We used to have a BUG
+        * here if trylock_page(newpage) fails, but would like to allow for
+        * cases where there might be a race with the previous use of newpage.
+        * This is much like races on refcount of oldpage: just don't BUG().
+        */
+       if (unlikely(!trylock_page(newpage)))
+               goto out_unlock;
+
        if (unlikely(isolated_balloon_page(page))) {
                /*
                 * A ballooned page does not need any special attention from
@@ -870,7 +873,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                 * the page migration right away (proteced by page lock).
                 */
                rc = balloon_page_migrate(newpage, page, mode);
-               goto out_unlock;
+               goto out_unlock_both;
        }
 
        /*
@@ -889,30 +892,27 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                VM_BUG_ON_PAGE(PageAnon(page), page);
                if (page_has_private(page)) {
                        try_to_free_buffers(page);
-                       goto out_unlock;
+                       goto out_unlock_both;
                }
-               goto skip_unmap;
-       }
-
-       /* Establish migration ptes or remove ptes */
-       if (page_mapped(page)) {
+       } else if (page_mapped(page)) {
+               /* Establish migration ptes */
                try_to_unmap(page,
                        TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
                page_was_mapped = 1;
        }
 
-skip_unmap:
        if (!page_mapped(page))
                rc = move_to_new_page(newpage, page, page_was_mapped, mode);
 
        if (rc && page_was_mapped)
                remove_migration_ptes(page, page);
 
+out_unlock_both:
+       unlock_page(newpage);
+out_unlock:
        /* Drop an anon_vma reference if we took one */
        if (anon_vma)
                put_anon_vma(anon_vma);
-
-out_unlock:
        unlock_page(page);
 out:
        return rc;
@@ -1056,6 +1056,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        if (PageAnon(hpage))
                anon_vma = page_get_anon_vma(hpage);
 
+       if (unlikely(!trylock_page(new_hpage)))
+               goto put_anon;
+
        if (page_mapped(hpage)) {
                try_to_unmap(hpage,
                        TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
@@ -1068,6 +1071,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
                remove_migration_ptes(hpage, hpage);
 
+       unlock_page(new_hpage);
+
+put_anon:
        if (anon_vma)
                put_anon_vma(anon_vma);
 
This page took 0.027607 seconds and 5 git commands to generate.