mm/page_alloc: introduce post allocation processing on page allocator
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Tue, 26 Jul 2016 22:23:58 +0000 (15:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Jul 2016 23:19:19 +0000 (16:19 -0700)
This patch is motivated from Hugh and Vlastimil's concern [1].

There are two ways to get freepage from the allocator.  One is using
normal memory allocation API and the other is __isolate_free_page()
which is internally used for compaction and pageblock isolation.  Later
usage is rather tricky since it doesn't do whole post allocation
processing done by normal API.

One problematic thing I already know is that poisoned page would not be
checked if it is allocated by __isolate_free_page().  Perhaps, there
would be more.

We could add more debug logic for allocated page in the future and this
separation would cause more problem.  I'd like to fix this situation at
this time.  Solution is simple.  This patch commonize some logic for
newly allocated page and uses it on all sites.  This will solve the
problem.

[1] http://marc.info/?i=alpine.LSU.2.11.1604270029350.7066%40eggly.anvils%3E

[iamjoonsoo.kim@lge.com: mm-page_alloc-introduce-post-allocation-processing-on-page-allocator-v3]
Link: http://lkml.kernel.org/r/1464230275-25791-7-git-send-email-iamjoonsoo.kim@lge.com
Link: http://lkml.kernel.org/r/1466150259-27727-9-git-send-email-iamjoonsoo.kim@lge.com
Link: http://lkml.kernel.org/r/1464230275-25791-7-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/compaction.c
mm/internal.h
mm/page_alloc.c
mm/page_isolation.c

index 4ae1294068a8652df2a365aefb15cb748d7d40a6..64df5fe052db656dcfd38999abdf561942dfd4c5 100644 (file)
@@ -74,14 +74,8 @@ static void map_pages(struct list_head *list)
 
                order = page_private(page);
                nr_pages = 1 << order;
-               set_page_private(page, 0);
-               set_page_refcounted(page);
 
-               arch_alloc_page(page, order);
-               kernel_map_pages(page, nr_pages, 1);
-               kasan_alloc_pages(page, order);
-
-               set_page_owner(page, order, __GFP_MOVABLE);
+               post_alloc_hook(page, order, __GFP_MOVABLE);
                if (order)
                        split_page(page, order);
 
index 2524ec880e242ceb6efb5531b21f4bade9e9a3c0..fbfba0cc2c35d88bd3db8d801219e54bcb253e14 100644 (file)
@@ -150,6 +150,8 @@ extern int __isolate_free_page(struct page *page, unsigned int order);
 extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
                                        unsigned int order);
 extern void prep_compound_page(struct page *page, unsigned int order);
+extern void post_alloc_hook(struct page *page, unsigned int order,
+                                       gfp_t gfp_flags);
 extern int user_min_free_kbytes;
 
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
index a82b303c19b168dd7e4ceebbb48571d1e2ab46ab..13cf4c665321ef718259c42a8dec14d31f4000c8 100644 (file)
@@ -1724,6 +1724,19 @@ static bool check_new_pages(struct page *page, unsigned int order)
        return false;
 }
 
+inline void post_alloc_hook(struct page *page, unsigned int order,
+                               gfp_t gfp_flags)
+{
+       set_page_private(page, 0);
+       set_page_refcounted(page);
+
+       arch_alloc_page(page, order);
+       kernel_map_pages(page, 1 << order, 1);
+       kernel_poison_pages(page, 1 << order, 1);
+       kasan_alloc_pages(page, order);
+       set_page_owner(page, order, gfp_flags);
+}
+
 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
                                                        unsigned int alloc_flags)
 {
@@ -1736,13 +1749,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
                        poisoned &= page_is_poisoned(p);
        }
 
-       set_page_private(page, 0);
-       set_page_refcounted(page);
-
-       arch_alloc_page(page, order);
-       kernel_map_pages(page, 1 << order, 1);
-       kernel_poison_pages(page, 1 << order, 1);
-       kasan_alloc_pages(page, order);
+       post_alloc_hook(page, order, gfp_flags);
 
        if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
                for (i = 0; i < (1 << order); i++)
@@ -1751,8 +1758,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
        if (order && (gfp_flags & __GFP_COMP))
                prep_compound_page(page, order);
 
-       set_page_owner(page, order, gfp_flags);
-
        /*
         * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
         * allocate the page. The expectation is that the caller is taking
index 927f5ee24c879a96e5c604b8b6c1a328d9c5d8c8..4639163b78f9c45f09834524633f22429a1543c9 100644 (file)
@@ -128,9 +128,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 out:
        spin_unlock_irqrestore(&zone->lock, flags);
        if (isolated_page) {
-               kernel_map_pages(page, (1 << order), 1);
-               set_page_refcounted(page);
-               set_page_owner(page, order, __GFP_MOVABLE);
+               post_alloc_hook(page, order, __GFP_MOVABLE);
                __free_pages(isolated_page, order);
        }
 }
This page took 0.030485 seconds and 5 git commands to generate.