Merge tag 'powerpc-4.6-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[deliverable/linux.git] / mm / migrate.c
index b1034f9c77e7d5a9bdbe60692396e5584c6991fc..f9dfb18a4ebac9f2f36d798ce6fa6b4ecd4d3c77 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/balloon_compaction.h>
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
+#include <linux/page_owner.h>
 
 #include <asm/tlbflush.h>
 
@@ -171,7 +172,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
        else
                page_add_file_rmap(new);
 
-       if (vma->vm_flags & VM_LOCKED)
+       if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
                mlock_vma_page(new);
 
        /* No need to invalidate - it was non-present before */
@@ -186,14 +187,17 @@ out:
  * Get rid of all migration entries and replace them by
  * references to the indicated page.
  */
-static void remove_migration_ptes(struct page *old, struct page *new)
+void remove_migration_ptes(struct page *old, struct page *new, bool locked)
 {
        struct rmap_walk_control rwc = {
                .rmap_one = remove_migration_pte,
                .arg = old,
        };
 
-       rmap_walk(new, &rwc);
+       if (locked)
+               rmap_walk_locked(new, &rwc);
+       else
+               rmap_walk(new, &rwc);
 }
 
 /*
@@ -325,7 +329,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
                        return -EAGAIN;
 
                /* No turning back from here */
-               set_page_memcg(newpage, page_memcg(page));
                newpage->index = page->index;
                newpage->mapping = page->mapping;
                if (PageSwapBacked(page))
@@ -349,7 +352,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
-       if (!page_freeze_refs(page, expected_count)) {
+       if (!page_ref_freeze(page, expected_count)) {
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -363,7 +366,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        if (mode == MIGRATE_ASYNC && head &&
                        !buffer_migrate_lock_buffers(head, mode)) {
-               page_unfreeze_refs(page, expected_count);
+               page_ref_unfreeze(page, expected_count);
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -372,7 +375,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * Now we know that no one else is looking at the page:
         * no turning back from here.
         */
-       set_page_memcg(newpage, page_memcg(page));
        newpage->index = page->index;
        newpage->mapping = page->mapping;
        if (PageSwapBacked(page))
@@ -398,7 +400,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_unfreeze_refs(page, expected_count - 1);
+       page_ref_unfreeze(page, expected_count - 1);
 
        spin_unlock(&mapping->tree_lock);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -452,21 +454,22 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
-       if (!page_freeze_refs(page, expected_count)) {
+       if (!page_ref_freeze(page, expected_count)) {
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
 
-       set_page_memcg(newpage, page_memcg(page));
        newpage->index = page->index;
        newpage->mapping = page->mapping;
+
        get_page(newpage);
 
        radix_tree_replace_slot(pslot, newpage);
 
-       page_unfreeze_refs(page, expected_count - 1);
+       page_ref_unfreeze(page, expected_count - 1);
 
        spin_unlock_irq(&mapping->tree_lock);
+
        return MIGRATEPAGE_SUCCESS;
 }
 
@@ -578,6 +581,10 @@ void migrate_page_copy(struct page *newpage, struct page *page)
         */
        if (PageWriteback(newpage))
                end_page_writeback(newpage);
+
+       copy_page_owner(page, newpage);
+
+       mem_cgroup_migrate(page, newpage);
 }
 
 /************************************************************
@@ -698,7 +705,7 @@ static int writeout(struct address_space *mapping, struct page *page)
         * At this point we know that the migration attempt cannot
         * be successful.
         */
-       remove_migration_ptes(page, page);
+       remove_migration_ptes(page, page, false);
 
        rc = mapping->a_ops->writepage(page, &wbc);
 
@@ -772,7 +779,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
         * page is freed; but stats require that PageAnon be left as PageAnon.
         */
        if (rc == MIGRATEPAGE_SUCCESS) {
-               set_page_memcg(page, NULL);
                if (!PageAnon(page))
                        page->mapping = NULL;
        }
@@ -897,7 +903,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 
        if (page_was_mapped)
                remove_migration_ptes(page,
-                       rc == MIGRATEPAGE_SUCCESS ? newpage : page);
+                       rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
 
 out_unlock_both:
        unlock_page(newpage);
@@ -952,8 +958,10 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
        }
 
        rc = __unmap_and_move(page, newpage, force, mode);
-       if (rc == MIGRATEPAGE_SUCCESS)
+       if (rc == MIGRATEPAGE_SUCCESS) {
                put_new_page = NULL;
+               set_page_owner_migrate_reason(newpage, reason);
+       }
 
 out:
        if (rc != -EAGAIN) {
@@ -967,7 +975,13 @@ out:
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
                /* Soft-offlined page shouldn't go through lru cache list */
-               if (reason == MR_MEMORY_FAILURE) {
+               if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+                       /*
+                        * With this release, we free successfully migrated
+                        * page and set PG_HWPoison on just freed page
+                        * intentionally. Although it's rather weird, it's how
+                        * HWPoison flag works at the moment.
+                        */
                        put_page(page);
                        if (!test_set_page_hwpoison(page))
                                num_poisoned_pages_inc();
@@ -1018,7 +1032,7 @@ out:
 static int unmap_and_move_huge_page(new_page_t get_new_page,
                                free_page_t put_new_page, unsigned long private,
                                struct page *hpage, int force,
-                               enum migrate_mode mode)
+                               enum migrate_mode mode, int reason)
 {
        int rc = -EAGAIN;
        int *result = NULL;
@@ -1065,7 +1079,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (page_was_mapped)
                remove_migration_ptes(hpage,
-                       rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage);
+                       rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
 
        unlock_page(new_hpage);
 
@@ -1076,6 +1090,7 @@ put_anon:
        if (rc == MIGRATEPAGE_SUCCESS) {
                hugetlb_cgroup_migrate(hpage, new_hpage);
                put_new_page = NULL;
+               set_page_owner_migrate_reason(new_hpage, reason);
        }
 
        unlock_page(hpage);
@@ -1148,7 +1163,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
                        if (PageHuge(page))
                                rc = unmap_and_move_huge_page(get_new_page,
                                                put_new_page, private, page,
-                                               pass > 2, mode);
+                                               pass > 2, mode, reason);
                        else
                                rc = unmap_and_move(get_new_page, put_new_page,
                                                private, page, pass > 2, mode,
@@ -1582,7 +1597,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
                                         (GFP_HIGHUSER_MOVABLE |
                                          __GFP_THISNODE | __GFP_NOMEMALLOC |
                                          __GFP_NORETRY | __GFP_NOWARN) &
-                                        ~(__GFP_IO | __GFP_FS), 0);
+                                        ~__GFP_RECLAIM, 0);
 
        return newpage;
 }
@@ -1767,7 +1782,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                put_page(new_page);
                goto out_fail;
        }
-
+       /*
+        * We are not sure a pending tlb flush here is for a huge page
+        * mapping or not. Hence use the tlb range variant
+        */
        if (mm_tlb_flush_pending(mm))
                flush_tlb_range(vma, mmun_start, mmun_end);
 
@@ -1823,12 +1841,11 @@ fail_putback:
        page_add_anon_rmap(new_page, vma, mmun_start, true);
        pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
        set_pmd_at(mm, mmun_start, pmd, entry);
-       flush_tlb_range(vma, mmun_start, mmun_end);
        update_mmu_cache_pmd(vma, address, &entry);
 
        if (page_count(page) != 2) {
                set_pmd_at(mm, mmun_start, pmd, orig_entry);
-               flush_tlb_range(vma, mmun_start, mmun_end);
+               flush_pmd_tlb_range(vma, mmun_start, mmun_end);
                mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
                update_mmu_cache_pmd(vma, address, &entry);
                page_remove_rmap(new_page, true);
@@ -1836,9 +1853,8 @@ fail_putback:
        }
 
        mlock_migrate_page(new_page, page);
-       set_page_memcg(new_page, page_memcg(page));
-       set_page_memcg(page, NULL);
        page_remove_rmap(page, true);
+       set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
 
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
This page took 0.031626 seconds and 5 git commands to generate.