mm, debug: replace dump_flags() with the new printk formats
[deliverable/linux.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/bootmem.h>
edbe7d23 24#include <linux/memblock.h>
1da177e4 25#include <linux/compiler.h>
9f158333 26#include <linux/kernel.h>
b1eeab67 27#include <linux/kmemcheck.h>
b8c73fc2 28#include <linux/kasan.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
a238ab5b 34#include <linux/ratelimit.h>
5a3135c2 35#include <linux/oom.h>
1da177e4
LT
36#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
bdc8cb98 41#include <linux/memory_hotplug.h>
1da177e4
LT
42#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
a6cccdc3 44#include <linux/vmstat.h>
4be38e35 45#include <linux/mempolicy.h>
4b94ffdc 46#include <linux/memremap.h>
6811378e 47#include <linux/stop_machine.h>
c713216d
MG
48#include <linux/sort.h>
49#include <linux/pfn.h>
3fcfab16 50#include <linux/backing-dev.h>
933e312e 51#include <linux/fault-inject.h>
a5d76b54 52#include <linux/page-isolation.h>
eefa864b 53#include <linux/page_ext.h>
3ac7fe5a 54#include <linux/debugobjects.h>
dbb1f81c 55#include <linux/kmemleak.h>
56de7263 56#include <linux/compaction.h>
0d3d062a 57#include <trace/events/kmem.h>
268bb0ce 58#include <linux/prefetch.h>
6e543d57 59#include <linux/mm_inline.h>
041d3a8c 60#include <linux/migrate.h>
e30825f1 61#include <linux/page_ext.h>
949f7ec5 62#include <linux/hugetlb.h>
8bd75c77 63#include <linux/sched/rt.h>
48c96a36 64#include <linux/page_owner.h>
0e1cc95b 65#include <linux/kthread.h>
1da177e4 66
7ee3d4e8 67#include <asm/sections.h>
1da177e4 68#include <asm/tlbflush.h>
ac924c60 69#include <asm/div64.h>
1da177e4
LT
70#include "internal.h"
71
c8e251fa
CS
72/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
73static DEFINE_MUTEX(pcp_batch_high_lock);
7cd2b0a3 74#define MIN_PERCPU_PAGELIST_FRACTION (8)
c8e251fa 75
72812019
LS
76#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
77DEFINE_PER_CPU(int, numa_node);
78EXPORT_PER_CPU_SYMBOL(numa_node);
79#endif
80
7aac7898
LS
81#ifdef CONFIG_HAVE_MEMORYLESS_NODES
82/*
83 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
84 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
85 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
86 * defined in <linux/topology.h>.
87 */
88DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
89EXPORT_PER_CPU_SYMBOL(_numa_mem_);
ad2c8144 90int _node_numa_mem_[MAX_NUMNODES];
7aac7898
LS
91#endif
92
1da177e4 93/*
13808910 94 * Array of node states.
1da177e4 95 */
13808910
CL
96nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
97 [N_POSSIBLE] = NODE_MASK_ALL,
98 [N_ONLINE] = { { [0] = 1UL } },
99#ifndef CONFIG_NUMA
100 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
101#ifdef CONFIG_HIGHMEM
102 [N_HIGH_MEMORY] = { { [0] = 1UL } },
20b2f52b
LJ
103#endif
104#ifdef CONFIG_MOVABLE_NODE
105 [N_MEMORY] = { { [0] = 1UL } },
13808910
CL
106#endif
107 [N_CPU] = { { [0] = 1UL } },
108#endif /* NUMA */
109};
110EXPORT_SYMBOL(node_states);
111
c3d5f5f0
JL
112/* Protect totalram_pages and zone->managed_pages */
113static DEFINE_SPINLOCK(managed_page_count_lock);
114
6c231b7b 115unsigned long totalram_pages __read_mostly;
cb45b0e9 116unsigned long totalreserve_pages __read_mostly;
e48322ab 117unsigned long totalcma_pages __read_mostly;
ab8fabd4 118
1b76b02f 119int percpu_pagelist_fraction;
dcce284a 120gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 121
bb14c2c7
VB
122/*
123 * A cached value of the page's pageblock's migratetype, used when the page is
124 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
125 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
126 * Also the migratetype set in the page does not necessarily match the pcplist
127 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
128 * other index - this ensures that it will be put on the correct CMA freelist.
129 */
130static inline int get_pcppage_migratetype(struct page *page)
131{
132 return page->index;
133}
134
135static inline void set_pcppage_migratetype(struct page *page, int migratetype)
136{
137 page->index = migratetype;
138}
139
452aa699
RW
140#ifdef CONFIG_PM_SLEEP
141/*
142 * The following functions are used by the suspend/hibernate code to temporarily
143 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
144 * while devices are suspended. To avoid races with the suspend/hibernate code,
145 * they should always be called with pm_mutex held (gfp_allowed_mask also should
146 * only be modified with pm_mutex held, unless the suspend/hibernate code is
147 * guaranteed not to run in parallel with that modification).
148 */
c9e664f1
RW
149
150static gfp_t saved_gfp_mask;
151
152void pm_restore_gfp_mask(void)
452aa699
RW
153{
154 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
155 if (saved_gfp_mask) {
156 gfp_allowed_mask = saved_gfp_mask;
157 saved_gfp_mask = 0;
158 }
452aa699
RW
159}
160
c9e664f1 161void pm_restrict_gfp_mask(void)
452aa699 162{
452aa699 163 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
164 WARN_ON(saved_gfp_mask);
165 saved_gfp_mask = gfp_allowed_mask;
d0164adc 166 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
452aa699 167}
f90ac398
MG
168
169bool pm_suspended_storage(void)
170{
d0164adc 171 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
f90ac398
MG
172 return false;
173 return true;
174}
452aa699
RW
175#endif /* CONFIG_PM_SLEEP */
176
d9c23400 177#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
d00181b9 178unsigned int pageblock_order __read_mostly;
d9c23400
MG
179#endif
180
d98c7a09 181static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 182
1da177e4
LT
183/*
184 * results with 256, 32 in the lowmem_reserve sysctl:
185 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
186 * 1G machine -> (16M dma, 784M normal, 224M high)
187 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
188 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
84109e15 189 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
190 *
191 * TBD: should special case ZONE_DMA32 machines here - in those we normally
192 * don't need any ZONE_NORMAL reservation
1da177e4 193 */
2f1b6248 194int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 195#ifdef CONFIG_ZONE_DMA
2f1b6248 196 256,
4b51d669 197#endif
fb0e7942 198#ifdef CONFIG_ZONE_DMA32
2f1b6248 199 256,
fb0e7942 200#endif
e53ef38d 201#ifdef CONFIG_HIGHMEM
2a1e274a 202 32,
e53ef38d 203#endif
2a1e274a 204 32,
2f1b6248 205};
1da177e4
LT
206
207EXPORT_SYMBOL(totalram_pages);
1da177e4 208
15ad7cdc 209static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 210#ifdef CONFIG_ZONE_DMA
2f1b6248 211 "DMA",
4b51d669 212#endif
fb0e7942 213#ifdef CONFIG_ZONE_DMA32
2f1b6248 214 "DMA32",
fb0e7942 215#endif
2f1b6248 216 "Normal",
e53ef38d 217#ifdef CONFIG_HIGHMEM
2a1e274a 218 "HighMem",
e53ef38d 219#endif
2a1e274a 220 "Movable",
033fbae9
DW
221#ifdef CONFIG_ZONE_DEVICE
222 "Device",
223#endif
2f1b6248
CL
224};
225
f1e61557
KS
226compound_page_dtor * const compound_page_dtors[] = {
227 NULL,
228 free_compound_page,
229#ifdef CONFIG_HUGETLB_PAGE
230 free_huge_page,
231#endif
9a982250
KS
232#ifdef CONFIG_TRANSPARENT_HUGEPAGE
233 free_transhuge_page,
234#endif
f1e61557
KS
235};
236
1da177e4 237int min_free_kbytes = 1024;
42aa83cb 238int user_min_free_kbytes = -1;
1da177e4 239
2c85f51d
JB
240static unsigned long __meminitdata nr_kernel_pages;
241static unsigned long __meminitdata nr_all_pages;
a3142c8e 242static unsigned long __meminitdata dma_reserve;
1da177e4 243
0ee332c1
TH
244#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
245static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
246static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
247static unsigned long __initdata required_kernelcore;
248static unsigned long __initdata required_movablecore;
249static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
342332e6 250static bool mirrored_kernelcore;
0ee332c1
TH
251
252/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
253int movable_zone;
254EXPORT_SYMBOL(movable_zone);
255#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 256
418508c1
MS
257#if MAX_NUMNODES > 1
258int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 259int nr_online_nodes __read_mostly = 1;
418508c1 260EXPORT_SYMBOL(nr_node_ids);
62bc62a8 261EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
262#endif
263
9ef9acb0
MG
264int page_group_by_mobility_disabled __read_mostly;
265
3a80a7fa
MG
266#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
267static inline void reset_deferred_meminit(pg_data_t *pgdat)
268{
269 pgdat->first_deferred_pfn = ULONG_MAX;
270}
271
272/* Returns true if the struct page for the pfn is uninitialised */
0e1cc95b 273static inline bool __meminit early_page_uninitialised(unsigned long pfn)
3a80a7fa 274{
ae026b2a 275 if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
3a80a7fa
MG
276 return true;
277
278 return false;
279}
280
7e18adb4
MG
281static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
282{
283 if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
284 return true;
285
286 return false;
287}
288
3a80a7fa
MG
289/*
290 * Returns false when the remaining initialisation should be deferred until
291 * later in the boot cycle when it can be parallelised.
292 */
293static inline bool update_defer_init(pg_data_t *pgdat,
294 unsigned long pfn, unsigned long zone_end,
295 unsigned long *nr_initialised)
296{
297 /* Always populate low zones for address-contrained allocations */
298 if (zone_end < pgdat_end_pfn(pgdat))
299 return true;
300
301 /* Initialise at least 2G of the highest zone */
302 (*nr_initialised)++;
303 if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
304 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
305 pgdat->first_deferred_pfn = pfn;
306 return false;
307 }
308
309 return true;
310}
311#else
312static inline void reset_deferred_meminit(pg_data_t *pgdat)
313{
314}
315
316static inline bool early_page_uninitialised(unsigned long pfn)
317{
318 return false;
319}
320
7e18adb4
MG
321static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
322{
323 return false;
324}
325
3a80a7fa
MG
326static inline bool update_defer_init(pg_data_t *pgdat,
327 unsigned long pfn, unsigned long zone_end,
328 unsigned long *nr_initialised)
329{
330 return true;
331}
332#endif
333
334
ee6f509c 335void set_pageblock_migratetype(struct page *page, int migratetype)
b2a0ac88 336{
5d0f3f72
KM
337 if (unlikely(page_group_by_mobility_disabled &&
338 migratetype < MIGRATE_PCPTYPES))
49255c61
MG
339 migratetype = MIGRATE_UNMOVABLE;
340
b2a0ac88
MG
341 set_pageblock_flags_group(page, (unsigned long)migratetype,
342 PB_migrate, PB_migrate_end);
343}
344
13e7444b 345#ifdef CONFIG_DEBUG_VM
c6a57e19 346static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 347{
bdc8cb98
DH
348 int ret = 0;
349 unsigned seq;
350 unsigned long pfn = page_to_pfn(page);
b5e6a5a2 351 unsigned long sp, start_pfn;
c6a57e19 352
bdc8cb98
DH
353 do {
354 seq = zone_span_seqbegin(zone);
b5e6a5a2
CS
355 start_pfn = zone->zone_start_pfn;
356 sp = zone->spanned_pages;
108bcc96 357 if (!zone_spans_pfn(zone, pfn))
bdc8cb98
DH
358 ret = 1;
359 } while (zone_span_seqretry(zone, seq));
360
b5e6a5a2 361 if (ret)
613813e8
DH
362 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
363 pfn, zone_to_nid(zone), zone->name,
364 start_pfn, start_pfn + sp);
b5e6a5a2 365
bdc8cb98 366 return ret;
c6a57e19
DH
367}
368
369static int page_is_consistent(struct zone *zone, struct page *page)
370{
14e07298 371 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 372 return 0;
1da177e4 373 if (zone != page_zone(page))
c6a57e19
DH
374 return 0;
375
376 return 1;
377}
378/*
379 * Temporary debugging check for pages not lying within a given zone.
380 */
381static int bad_range(struct zone *zone, struct page *page)
382{
383 if (page_outside_zone_boundaries(zone, page))
1da177e4 384 return 1;
c6a57e19
DH
385 if (!page_is_consistent(zone, page))
386 return 1;
387
1da177e4
LT
388 return 0;
389}
13e7444b
NP
390#else
391static inline int bad_range(struct zone *zone, struct page *page)
392{
393 return 0;
394}
395#endif
396
d230dec1
KS
397static void bad_page(struct page *page, const char *reason,
398 unsigned long bad_flags)
1da177e4 399{
d936cf9b
HD
400 static unsigned long resume;
401 static unsigned long nr_shown;
402 static unsigned long nr_unshown;
403
2a7684a2
WF
404 /* Don't complain about poisoned pages */
405 if (PageHWPoison(page)) {
22b751c3 406 page_mapcount_reset(page); /* remove PageBuddy */
2a7684a2
WF
407 return;
408 }
409
d936cf9b
HD
410 /*
411 * Allow a burst of 60 reports, then keep quiet for that minute;
412 * or allow a steady drip of one report per second.
413 */
414 if (nr_shown == 60) {
415 if (time_before(jiffies, resume)) {
416 nr_unshown++;
417 goto out;
418 }
419 if (nr_unshown) {
1e9e6365
HD
420 printk(KERN_ALERT
421 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
422 nr_unshown);
423 nr_unshown = 0;
424 }
425 nr_shown = 0;
426 }
427 if (nr_shown++ == 0)
428 resume = jiffies + 60 * HZ;
429
1e9e6365 430 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 431 current->comm, page_to_pfn(page));
f0b791a3 432 dump_page_badflags(page, reason, bad_flags);
3dc14741 433
4f31888c 434 print_modules();
1da177e4 435 dump_stack();
d936cf9b 436out:
8cc3b392 437 /* Leave bad fields for debug, except PageBuddy could make trouble */
22b751c3 438 page_mapcount_reset(page); /* remove PageBuddy */
373d4d09 439 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
440}
441
1da177e4
LT
442/*
443 * Higher-order pages are called "compound pages". They are structured thusly:
444 *
1d798ca3 445 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
1da177e4 446 *
1d798ca3
KS
447 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
448 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
1da177e4 449 *
1d798ca3
KS
450 * The first tail page's ->compound_dtor holds the offset in array of compound
451 * page destructors. See compound_page_dtors.
1da177e4 452 *
1d798ca3 453 * The first tail page's ->compound_order holds the order of allocation.
41d78ba5 454 * This usage means that zero-order pages may not be compound.
1da177e4 455 */
d98c7a09 456
9a982250 457void free_compound_page(struct page *page)
d98c7a09 458{
d85f3385 459 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
460}
461
d00181b9 462void prep_compound_page(struct page *page, unsigned int order)
18229df5
AW
463{
464 int i;
465 int nr_pages = 1 << order;
466
f1e61557 467 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
18229df5
AW
468 set_compound_order(page, order);
469 __SetPageHead(page);
470 for (i = 1; i < nr_pages; i++) {
471 struct page *p = page + i;
58a84aa9 472 set_page_count(p, 0);
1c290f64 473 p->mapping = TAIL_MAPPING;
1d798ca3 474 set_compound_head(p, page);
18229df5 475 }
53f9263b 476 atomic_set(compound_mapcount_ptr(page), -1);
18229df5
AW
477}
478
c0a32fc5
SG
479#ifdef CONFIG_DEBUG_PAGEALLOC
480unsigned int _debug_guardpage_minorder;
ea6eabb0
CB
481bool _debug_pagealloc_enabled __read_mostly
482 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
e30825f1
JK
483bool _debug_guardpage_enabled __read_mostly;
484
031bc574
JK
485static int __init early_debug_pagealloc(char *buf)
486{
487 if (!buf)
488 return -EINVAL;
489
490 if (strcmp(buf, "on") == 0)
491 _debug_pagealloc_enabled = true;
492
ea6eabb0
CB
493 if (strcmp(buf, "off") == 0)
494 _debug_pagealloc_enabled = false;
495
031bc574
JK
496 return 0;
497}
498early_param("debug_pagealloc", early_debug_pagealloc);
499
e30825f1
JK
500static bool need_debug_guardpage(void)
501{
031bc574
JK
502 /* If we don't use debug_pagealloc, we don't need guard page */
503 if (!debug_pagealloc_enabled())
504 return false;
505
e30825f1
JK
506 return true;
507}
508
509static void init_debug_guardpage(void)
510{
031bc574
JK
511 if (!debug_pagealloc_enabled())
512 return;
513
e30825f1
JK
514 _debug_guardpage_enabled = true;
515}
516
517struct page_ext_operations debug_guardpage_ops = {
518 .need = need_debug_guardpage,
519 .init = init_debug_guardpage,
520};
c0a32fc5
SG
521
522static int __init debug_guardpage_minorder_setup(char *buf)
523{
524 unsigned long res;
525
526 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
527 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
528 return 0;
529 }
530 _debug_guardpage_minorder = res;
531 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
532 return 0;
533}
534__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
535
2847cf95
JK
536static inline void set_page_guard(struct zone *zone, struct page *page,
537 unsigned int order, int migratetype)
c0a32fc5 538{
e30825f1
JK
539 struct page_ext *page_ext;
540
541 if (!debug_guardpage_enabled())
542 return;
543
544 page_ext = lookup_page_ext(page);
545 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
546
2847cf95
JK
547 INIT_LIST_HEAD(&page->lru);
548 set_page_private(page, order);
549 /* Guard pages are not available for any usage */
550 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
c0a32fc5
SG
551}
552
2847cf95
JK
553static inline void clear_page_guard(struct zone *zone, struct page *page,
554 unsigned int order, int migratetype)
c0a32fc5 555{
e30825f1
JK
556 struct page_ext *page_ext;
557
558 if (!debug_guardpage_enabled())
559 return;
560
561 page_ext = lookup_page_ext(page);
562 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
563
2847cf95
JK
564 set_page_private(page, 0);
565 if (!is_migrate_isolate(migratetype))
566 __mod_zone_freepage_state(zone, (1 << order), migratetype);
c0a32fc5
SG
567}
568#else
e30825f1 569struct page_ext_operations debug_guardpage_ops = { NULL, };
2847cf95
JK
570static inline void set_page_guard(struct zone *zone, struct page *page,
571 unsigned int order, int migratetype) {}
572static inline void clear_page_guard(struct zone *zone, struct page *page,
573 unsigned int order, int migratetype) {}
c0a32fc5
SG
574#endif
575
7aeb09f9 576static inline void set_page_order(struct page *page, unsigned int order)
6aa3001b 577{
4c21e2f2 578 set_page_private(page, order);
676165a8 579 __SetPageBuddy(page);
1da177e4
LT
580}
581
582static inline void rmv_page_order(struct page *page)
583{
676165a8 584 __ClearPageBuddy(page);
4c21e2f2 585 set_page_private(page, 0);
1da177e4
LT
586}
587
1da177e4
LT
588/*
589 * This function checks whether a page is free && is the buddy
590 * we can do coalesce a page and its buddy if
13e7444b 591 * (a) the buddy is not in a hole &&
676165a8 592 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
593 * (c) a page and its buddy have the same order &&
594 * (d) a page and its buddy are in the same zone.
676165a8 595 *
cf6fe945
WSH
596 * For recording whether a page is in the buddy system, we set ->_mapcount
597 * PAGE_BUDDY_MAPCOUNT_VALUE.
598 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
599 * serialized by zone->lock.
1da177e4 600 *
676165a8 601 * For recording page's order, we use page_private(page).
1da177e4 602 */
cb2b95e1 603static inline int page_is_buddy(struct page *page, struct page *buddy,
7aeb09f9 604 unsigned int order)
1da177e4 605{
14e07298 606 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 607 return 0;
13e7444b 608
c0a32fc5 609 if (page_is_guard(buddy) && page_order(buddy) == order) {
d34c5fa0
MG
610 if (page_zone_id(page) != page_zone_id(buddy))
611 return 0;
612
4c5018ce
WY
613 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
614
c0a32fc5
SG
615 return 1;
616 }
617
cb2b95e1 618 if (PageBuddy(buddy) && page_order(buddy) == order) {
d34c5fa0
MG
619 /*
620 * zone check is done late to avoid uselessly
621 * calculating zone/node ids for pages that could
622 * never merge.
623 */
624 if (page_zone_id(page) != page_zone_id(buddy))
625 return 0;
626
4c5018ce
WY
627 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
628
6aa3001b 629 return 1;
676165a8 630 }
6aa3001b 631 return 0;
1da177e4
LT
632}
633
634/*
635 * Freeing function for a buddy system allocator.
636 *
637 * The concept of a buddy system is to maintain direct-mapped table
638 * (containing bit values) for memory blocks of various "orders".
639 * The bottom level table contains the map for the smallest allocatable
640 * units of memory (here, pages), and each level above it describes
641 * pairs of units from the levels below, hence, "buddies".
642 * At a high level, all that happens here is marking the table entry
643 * at the bottom level available, and propagating the changes upward
644 * as necessary, plus some accounting needed to play nicely with other
645 * parts of the VM system.
646 * At each level, we keep a list of pages, which are heads of continuous
cf6fe945
WSH
647 * free pages of length of (1 << order) and marked with _mapcount
648 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
649 * field.
1da177e4 650 * So when we are allocating or freeing one, we can derive the state of the
5f63b720
MN
651 * other. That is, if we allocate a small block, and both were
652 * free, the remainder of the region must be split into blocks.
1da177e4 653 * If a block is freed, and its buddy is also free, then this
5f63b720 654 * triggers coalescing into a block of larger size.
1da177e4 655 *
6d49e352 656 * -- nyc
1da177e4
LT
657 */
658
48db57f8 659static inline void __free_one_page(struct page *page,
dc4b0caf 660 unsigned long pfn,
ed0ae21d
MG
661 struct zone *zone, unsigned int order,
662 int migratetype)
1da177e4
LT
663{
664 unsigned long page_idx;
6dda9d55 665 unsigned long combined_idx;
43506fad 666 unsigned long uninitialized_var(buddy_idx);
6dda9d55 667 struct page *buddy;
d00181b9 668 unsigned int max_order = MAX_ORDER;
1da177e4 669
d29bb978 670 VM_BUG_ON(!zone_is_initialized(zone));
6e9f0d58 671 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1da177e4 672
ed0ae21d 673 VM_BUG_ON(migratetype == -1);
3c605096
JK
674 if (is_migrate_isolate(migratetype)) {
675 /*
676 * We restrict max order of merging to prevent merge
677 * between freepages on isolate pageblock and normal
678 * pageblock. Without this, pageblock isolation
679 * could cause incorrect freepage accounting.
680 */
d00181b9 681 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
3c605096 682 } else {
8f82b55d 683 __mod_zone_freepage_state(zone, 1 << order, migratetype);
3c605096 684 }
ed0ae21d 685
3c605096 686 page_idx = pfn & ((1 << max_order) - 1);
1da177e4 687
309381fe
SL
688 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
689 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1da177e4 690
3c605096 691 while (order < max_order - 1) {
43506fad
KC
692 buddy_idx = __find_buddy_index(page_idx, order);
693 buddy = page + (buddy_idx - page_idx);
cb2b95e1 694 if (!page_is_buddy(page, buddy, order))
3c82d0ce 695 break;
c0a32fc5
SG
696 /*
697 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
698 * merge with it and move up one order.
699 */
700 if (page_is_guard(buddy)) {
2847cf95 701 clear_page_guard(zone, buddy, order, migratetype);
c0a32fc5
SG
702 } else {
703 list_del(&buddy->lru);
704 zone->free_area[order].nr_free--;
705 rmv_page_order(buddy);
706 }
43506fad 707 combined_idx = buddy_idx & page_idx;
1da177e4
LT
708 page = page + (combined_idx - page_idx);
709 page_idx = combined_idx;
710 order++;
711 }
712 set_page_order(page, order);
6dda9d55
CZ
713
714 /*
715 * If this is not the largest possible page, check if the buddy
716 * of the next-highest order is free. If it is, it's possible
717 * that pages are being freed that will coalesce soon. In case,
718 * that is happening, add the free page to the tail of the list
719 * so it's less likely to be used soon and more likely to be merged
720 * as a higher order page
721 */
b7f50cfa 722 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
6dda9d55 723 struct page *higher_page, *higher_buddy;
43506fad
KC
724 combined_idx = buddy_idx & page_idx;
725 higher_page = page + (combined_idx - page_idx);
726 buddy_idx = __find_buddy_index(combined_idx, order + 1);
0ba8f2d5 727 higher_buddy = higher_page + (buddy_idx - combined_idx);
6dda9d55
CZ
728 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
729 list_add_tail(&page->lru,
730 &zone->free_area[order].free_list[migratetype]);
731 goto out;
732 }
733 }
734
735 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
736out:
1da177e4
LT
737 zone->free_area[order].nr_free++;
738}
739
224abf92 740static inline int free_pages_check(struct page *page)
1da177e4 741{
d230dec1 742 const char *bad_reason = NULL;
f0b791a3
DH
743 unsigned long bad_flags = 0;
744
53f9263b 745 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
746 bad_reason = "nonzero mapcount";
747 if (unlikely(page->mapping != NULL))
748 bad_reason = "non-NULL mapping";
749 if (unlikely(atomic_read(&page->_count) != 0))
750 bad_reason = "nonzero _count";
751 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
752 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
753 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
754 }
9edad6ea
JW
755#ifdef CONFIG_MEMCG
756 if (unlikely(page->mem_cgroup))
757 bad_reason = "page still charged to cgroup";
758#endif
f0b791a3
DH
759 if (unlikely(bad_reason)) {
760 bad_page(page, bad_reason, bad_flags);
79f4b7bf 761 return 1;
8cc3b392 762 }
90572890 763 page_cpupid_reset_last(page);
79f4b7bf
HD
764 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
765 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
766 return 0;
1da177e4
LT
767}
768
769/*
5f8dcc21 770 * Frees a number of pages from the PCP lists
1da177e4 771 * Assumes all pages on list are in same zone, and of same order.
207f36ee 772 * count is the number of pages to free.
1da177e4
LT
773 *
774 * If the zone was previously in an "all pages pinned" state then look to
775 * see if this freeing clears that state.
776 *
777 * And clear the zone's pages_scanned counter, to hold off the "all pages are
778 * pinned" detection logic.
779 */
5f8dcc21
MG
780static void free_pcppages_bulk(struct zone *zone, int count,
781 struct per_cpu_pages *pcp)
1da177e4 782{
5f8dcc21 783 int migratetype = 0;
a6f9edd6 784 int batch_free = 0;
72853e29 785 int to_free = count;
0d5d823a 786 unsigned long nr_scanned;
5f8dcc21 787
c54ad30c 788 spin_lock(&zone->lock);
0d5d823a
MG
789 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
790 if (nr_scanned)
791 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
f2260e6b 792
72853e29 793 while (to_free) {
48db57f8 794 struct page *page;
5f8dcc21
MG
795 struct list_head *list;
796
797 /*
a6f9edd6
MG
798 * Remove pages from lists in a round-robin fashion. A
799 * batch_free count is maintained that is incremented when an
800 * empty list is encountered. This is so more pages are freed
801 * off fuller lists instead of spinning excessively around empty
802 * lists
5f8dcc21
MG
803 */
804 do {
a6f9edd6 805 batch_free++;
5f8dcc21
MG
806 if (++migratetype == MIGRATE_PCPTYPES)
807 migratetype = 0;
808 list = &pcp->lists[migratetype];
809 } while (list_empty(list));
48db57f8 810
1d16871d
NK
811 /* This is the only non-empty list. Free them all. */
812 if (batch_free == MIGRATE_PCPTYPES)
813 batch_free = to_free;
814
a6f9edd6 815 do {
770c8aaa
BZ
816 int mt; /* migratetype of the to-be-freed page */
817
a16601c5 818 page = list_last_entry(list, struct page, lru);
a6f9edd6
MG
819 /* must delete as __free_one_page list manipulates */
820 list_del(&page->lru);
aa016d14 821
bb14c2c7 822 mt = get_pcppage_migratetype(page);
aa016d14
VB
823 /* MIGRATE_ISOLATE page should not go to pcplists */
824 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
825 /* Pageblock could have been isolated meanwhile */
8f82b55d 826 if (unlikely(has_isolate_pageblock(zone)))
51bb1a40 827 mt = get_pageblock_migratetype(page);
51bb1a40 828
dc4b0caf 829 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
770c8aaa 830 trace_mm_page_pcpu_drain(page, 0, mt);
72853e29 831 } while (--to_free && --batch_free && !list_empty(list));
1da177e4 832 }
c54ad30c 833 spin_unlock(&zone->lock);
1da177e4
LT
834}
835
dc4b0caf
MG
836static void free_one_page(struct zone *zone,
837 struct page *page, unsigned long pfn,
7aeb09f9 838 unsigned int order,
ed0ae21d 839 int migratetype)
1da177e4 840{
0d5d823a 841 unsigned long nr_scanned;
006d22d9 842 spin_lock(&zone->lock);
0d5d823a
MG
843 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
844 if (nr_scanned)
845 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
f2260e6b 846
ad53f92e
JK
847 if (unlikely(has_isolate_pageblock(zone) ||
848 is_migrate_isolate(migratetype))) {
849 migratetype = get_pfnblock_migratetype(page, pfn);
ad53f92e 850 }
dc4b0caf 851 __free_one_page(page, pfn, zone, order, migratetype);
006d22d9 852 spin_unlock(&zone->lock);
48db57f8
NP
853}
854
81422f29
KS
855static int free_tail_pages_check(struct page *head_page, struct page *page)
856{
1d798ca3
KS
857 int ret = 1;
858
859 /*
860 * We rely page->lru.next never has bit 0 set, unless the page
861 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
862 */
863 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
864
865 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
866 ret = 0;
867 goto out;
868 }
9a982250
KS
869 switch (page - head_page) {
870 case 1:
871 /* the first tail page: ->mapping is compound_mapcount() */
53f9263b
KS
872 if (unlikely(compound_mapcount(page))) {
873 bad_page(page, "nonzero compound_mapcount", 0);
874 goto out;
875 }
9a982250
KS
876 break;
877 case 2:
878 /*
879 * the second tail page: ->mapping is
880 * page_deferred_list().next -- ignore value.
881 */
882 break;
883 default:
884 if (page->mapping != TAIL_MAPPING) {
885 bad_page(page, "corrupted mapping in tail page", 0);
886 goto out;
887 }
888 break;
1c290f64 889 }
81422f29
KS
890 if (unlikely(!PageTail(page))) {
891 bad_page(page, "PageTail not set", 0);
1d798ca3 892 goto out;
81422f29 893 }
1d798ca3
KS
894 if (unlikely(compound_head(page) != head_page)) {
895 bad_page(page, "compound_head not consistent", 0);
896 goto out;
81422f29 897 }
1d798ca3
KS
898 ret = 0;
899out:
1c290f64 900 page->mapping = NULL;
1d798ca3
KS
901 clear_compound_head(page);
902 return ret;
81422f29
KS
903}
904
1e8ce83c
RH
905static void __meminit __init_single_page(struct page *page, unsigned long pfn,
906 unsigned long zone, int nid)
907{
1e8ce83c 908 set_page_links(page, zone, nid, pfn);
1e8ce83c
RH
909 init_page_count(page);
910 page_mapcount_reset(page);
911 page_cpupid_reset_last(page);
1e8ce83c 912
1e8ce83c
RH
913 INIT_LIST_HEAD(&page->lru);
914#ifdef WANT_PAGE_VIRTUAL
915 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
916 if (!is_highmem_idx(zone))
917 set_page_address(page, __va(pfn << PAGE_SHIFT));
918#endif
919}
920
921static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
922 int nid)
923{
924 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
925}
926
7e18adb4
MG
927#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
928static void init_reserved_page(unsigned long pfn)
929{
930 pg_data_t *pgdat;
931 int nid, zid;
932
933 if (!early_page_uninitialised(pfn))
934 return;
935
936 nid = early_pfn_to_nid(pfn);
937 pgdat = NODE_DATA(nid);
938
939 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
940 struct zone *zone = &pgdat->node_zones[zid];
941
942 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
943 break;
944 }
945 __init_single_pfn(pfn, zid, nid);
946}
947#else
948static inline void init_reserved_page(unsigned long pfn)
949{
950}
951#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
952
92923ca3
NZ
953/*
954 * Initialised pages do not have PageReserved set. This function is
955 * called for each range allocated by the bootmem allocator and
956 * marks the pages PageReserved. The remaining valid pages are later
957 * sent to the buddy page allocator.
958 */
7e18adb4 959void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
92923ca3
NZ
960{
961 unsigned long start_pfn = PFN_DOWN(start);
962 unsigned long end_pfn = PFN_UP(end);
963
7e18adb4
MG
964 for (; start_pfn < end_pfn; start_pfn++) {
965 if (pfn_valid(start_pfn)) {
966 struct page *page = pfn_to_page(start_pfn);
967
968 init_reserved_page(start_pfn);
1d798ca3
KS
969
970 /* Avoid false-positive PageTail() */
971 INIT_LIST_HEAD(&page->lru);
972
7e18adb4
MG
973 SetPageReserved(page);
974 }
975 }
92923ca3
NZ
976}
977
ec95f53a 978static bool free_pages_prepare(struct page *page, unsigned int order)
48db57f8 979{
81422f29
KS
980 bool compound = PageCompound(page);
981 int i, bad = 0;
1da177e4 982
ab1f306f 983 VM_BUG_ON_PAGE(PageTail(page), page);
81422f29 984 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
ab1f306f 985
b413d48a 986 trace_mm_page_free(page, order);
b1eeab67 987 kmemcheck_free_shadow(page, order);
b8c73fc2 988 kasan_free_pages(page, order);
b1eeab67 989
8dd60a3a
AA
990 if (PageAnon(page))
991 page->mapping = NULL;
81422f29
KS
992 bad += free_pages_check(page);
993 for (i = 1; i < (1 << order); i++) {
994 if (compound)
995 bad += free_tail_pages_check(page, page + i);
8dd60a3a 996 bad += free_pages_check(page + i);
81422f29 997 }
8cc3b392 998 if (bad)
ec95f53a 999 return false;
689bcebf 1000
48c96a36
JK
1001 reset_page_owner(page, order);
1002
3ac7fe5a 1003 if (!PageHighMem(page)) {
b8af2941
PK
1004 debug_check_no_locks_freed(page_address(page),
1005 PAGE_SIZE << order);
3ac7fe5a
TG
1006 debug_check_no_obj_freed(page_address(page),
1007 PAGE_SIZE << order);
1008 }
dafb1367 1009 arch_free_page(page, order);
48db57f8 1010 kernel_map_pages(page, 1 << order, 0);
dafb1367 1011
ec95f53a
KM
1012 return true;
1013}
1014
1015static void __free_pages_ok(struct page *page, unsigned int order)
1016{
1017 unsigned long flags;
95e34412 1018 int migratetype;
dc4b0caf 1019 unsigned long pfn = page_to_pfn(page);
ec95f53a
KM
1020
1021 if (!free_pages_prepare(page, order))
1022 return;
1023
cfc47a28 1024 migratetype = get_pfnblock_migratetype(page, pfn);
c54ad30c 1025 local_irq_save(flags);
f8891e5e 1026 __count_vm_events(PGFREE, 1 << order);
dc4b0caf 1027 free_one_page(page_zone(page), page, pfn, order, migratetype);
c54ad30c 1028 local_irq_restore(flags);
1da177e4
LT
1029}
1030
0e1cc95b 1031static void __init __free_pages_boot_core(struct page *page,
3a80a7fa 1032 unsigned long pfn, unsigned int order)
a226f6c8 1033{
c3993076 1034 unsigned int nr_pages = 1 << order;
e2d0bd2b 1035 struct page *p = page;
c3993076 1036 unsigned int loop;
a226f6c8 1037
e2d0bd2b
YL
1038 prefetchw(p);
1039 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1040 prefetchw(p + 1);
c3993076
JW
1041 __ClearPageReserved(p);
1042 set_page_count(p, 0);
a226f6c8 1043 }
e2d0bd2b
YL
1044 __ClearPageReserved(p);
1045 set_page_count(p, 0);
c3993076 1046
e2d0bd2b 1047 page_zone(page)->managed_pages += nr_pages;
c3993076
JW
1048 set_page_refcounted(page);
1049 __free_pages(page, order);
a226f6c8
DH
1050}
1051
75a592a4
MG
1052#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1053 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
7ace9917 1054
75a592a4
MG
1055static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1056
1057int __meminit early_pfn_to_nid(unsigned long pfn)
1058{
7ace9917 1059 static DEFINE_SPINLOCK(early_pfn_lock);
75a592a4
MG
1060 int nid;
1061
7ace9917 1062 spin_lock(&early_pfn_lock);
75a592a4 1063 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
7ace9917
MG
1064 if (nid < 0)
1065 nid = 0;
1066 spin_unlock(&early_pfn_lock);
1067
1068 return nid;
75a592a4
MG
1069}
1070#endif
1071
1072#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1073static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1074 struct mminit_pfnnid_cache *state)
1075{
1076 int nid;
1077
1078 nid = __early_pfn_to_nid(pfn, state);
1079 if (nid >= 0 && nid != node)
1080 return false;
1081 return true;
1082}
1083
1084/* Only safe to use early in boot when initialisation is single-threaded */
1085static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1086{
1087 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1088}
1089
1090#else
1091
1092static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1093{
1094 return true;
1095}
1096static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1097 struct mminit_pfnnid_cache *state)
1098{
1099 return true;
1100}
1101#endif
1102
1103
0e1cc95b 1104void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
3a80a7fa
MG
1105 unsigned int order)
1106{
1107 if (early_page_uninitialised(pfn))
1108 return;
1109 return __free_pages_boot_core(page, pfn, order);
1110}
1111
7e18adb4 1112#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0e1cc95b 1113static void __init deferred_free_range(struct page *page,
a4de83dd
MG
1114 unsigned long pfn, int nr_pages)
1115{
1116 int i;
1117
1118 if (!page)
1119 return;
1120
1121 /* Free a large naturally-aligned chunk if possible */
1122 if (nr_pages == MAX_ORDER_NR_PAGES &&
1123 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
ac5d2539 1124 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
a4de83dd
MG
1125 __free_pages_boot_core(page, pfn, MAX_ORDER-1);
1126 return;
1127 }
1128
1129 for (i = 0; i < nr_pages; i++, page++, pfn++)
1130 __free_pages_boot_core(page, pfn, 0);
1131}
1132
d3cd131d
NS
1133/* Completion tracking for deferred_init_memmap() threads */
1134static atomic_t pgdat_init_n_undone __initdata;
1135static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1136
1137static inline void __init pgdat_init_report_one_done(void)
1138{
1139 if (atomic_dec_and_test(&pgdat_init_n_undone))
1140 complete(&pgdat_init_all_done_comp);
1141}
0e1cc95b 1142
7e18adb4 1143/* Initialise remaining memory on a node */
0e1cc95b 1144static int __init deferred_init_memmap(void *data)
7e18adb4 1145{
0e1cc95b
MG
1146 pg_data_t *pgdat = data;
1147 int nid = pgdat->node_id;
7e18adb4
MG
1148 struct mminit_pfnnid_cache nid_init_state = { };
1149 unsigned long start = jiffies;
1150 unsigned long nr_pages = 0;
1151 unsigned long walk_start, walk_end;
1152 int i, zid;
1153 struct zone *zone;
7e18adb4 1154 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
0e1cc95b 1155 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
7e18adb4 1156
0e1cc95b 1157 if (first_init_pfn == ULONG_MAX) {
d3cd131d 1158 pgdat_init_report_one_done();
0e1cc95b
MG
1159 return 0;
1160 }
1161
1162 /* Bind memory initialisation thread to a local node if possible */
1163 if (!cpumask_empty(cpumask))
1164 set_cpus_allowed_ptr(current, cpumask);
7e18adb4
MG
1165
1166 /* Sanity check boundaries */
1167 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1168 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1169 pgdat->first_deferred_pfn = ULONG_MAX;
1170
1171 /* Only the highest zone is deferred so find it */
1172 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1173 zone = pgdat->node_zones + zid;
1174 if (first_init_pfn < zone_end_pfn(zone))
1175 break;
1176 }
1177
1178 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1179 unsigned long pfn, end_pfn;
54608c3f 1180 struct page *page = NULL;
a4de83dd
MG
1181 struct page *free_base_page = NULL;
1182 unsigned long free_base_pfn = 0;
1183 int nr_to_free = 0;
7e18adb4
MG
1184
1185 end_pfn = min(walk_end, zone_end_pfn(zone));
1186 pfn = first_init_pfn;
1187 if (pfn < walk_start)
1188 pfn = walk_start;
1189 if (pfn < zone->zone_start_pfn)
1190 pfn = zone->zone_start_pfn;
1191
1192 for (; pfn < end_pfn; pfn++) {
54608c3f 1193 if (!pfn_valid_within(pfn))
a4de83dd 1194 goto free_range;
7e18adb4 1195
54608c3f
MG
1196 /*
1197 * Ensure pfn_valid is checked every
1198 * MAX_ORDER_NR_PAGES for memory holes
1199 */
1200 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
1201 if (!pfn_valid(pfn)) {
1202 page = NULL;
a4de83dd 1203 goto free_range;
54608c3f
MG
1204 }
1205 }
1206
1207 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1208 page = NULL;
a4de83dd 1209 goto free_range;
54608c3f
MG
1210 }
1211
1212 /* Minimise pfn page lookups and scheduler checks */
1213 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
1214 page++;
1215 } else {
a4de83dd
MG
1216 nr_pages += nr_to_free;
1217 deferred_free_range(free_base_page,
1218 free_base_pfn, nr_to_free);
1219 free_base_page = NULL;
1220 free_base_pfn = nr_to_free = 0;
1221
54608c3f
MG
1222 page = pfn_to_page(pfn);
1223 cond_resched();
1224 }
7e18adb4
MG
1225
1226 if (page->flags) {
1227 VM_BUG_ON(page_zone(page) != zone);
a4de83dd 1228 goto free_range;
7e18adb4
MG
1229 }
1230
1231 __init_single_page(page, pfn, zid, nid);
a4de83dd
MG
1232 if (!free_base_page) {
1233 free_base_page = page;
1234 free_base_pfn = pfn;
1235 nr_to_free = 0;
1236 }
1237 nr_to_free++;
1238
1239 /* Where possible, batch up pages for a single free */
1240 continue;
1241free_range:
1242 /* Free the current block of pages to allocator */
1243 nr_pages += nr_to_free;
1244 deferred_free_range(free_base_page, free_base_pfn,
1245 nr_to_free);
1246 free_base_page = NULL;
1247 free_base_pfn = nr_to_free = 0;
7e18adb4 1248 }
a4de83dd 1249
7e18adb4
MG
1250 first_init_pfn = max(end_pfn, first_init_pfn);
1251 }
1252
1253 /* Sanity check that the next zone really is unpopulated */
1254 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1255
0e1cc95b 1256 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
7e18adb4 1257 jiffies_to_msecs(jiffies - start));
d3cd131d
NS
1258
1259 pgdat_init_report_one_done();
0e1cc95b
MG
1260 return 0;
1261}
1262
1263void __init page_alloc_init_late(void)
1264{
1265 int nid;
1266
d3cd131d
NS
1267 /* There will be num_node_state(N_MEMORY) threads */
1268 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
0e1cc95b 1269 for_each_node_state(nid, N_MEMORY) {
0e1cc95b
MG
1270 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1271 }
1272
1273 /* Block until all are initialised */
d3cd131d 1274 wait_for_completion(&pgdat_init_all_done_comp);
4248b0da
MG
1275
1276 /* Reinit limits that are based on free pages after the kernel is up */
1277 files_maxfiles_init();
7e18adb4
MG
1278}
1279#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1280
47118af0 1281#ifdef CONFIG_CMA
9cf510a5 1282/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
47118af0
MN
1283void __init init_cma_reserved_pageblock(struct page *page)
1284{
1285 unsigned i = pageblock_nr_pages;
1286 struct page *p = page;
1287
1288 do {
1289 __ClearPageReserved(p);
1290 set_page_count(p, 0);
1291 } while (++p, --i);
1292
47118af0 1293 set_pageblock_migratetype(page, MIGRATE_CMA);
dc78327c
MN
1294
1295 if (pageblock_order >= MAX_ORDER) {
1296 i = pageblock_nr_pages;
1297 p = page;
1298 do {
1299 set_page_refcounted(p);
1300 __free_pages(p, MAX_ORDER - 1);
1301 p += MAX_ORDER_NR_PAGES;
1302 } while (i -= MAX_ORDER_NR_PAGES);
1303 } else {
1304 set_page_refcounted(page);
1305 __free_pages(page, pageblock_order);
1306 }
1307
3dcc0571 1308 adjust_managed_page_count(page, pageblock_nr_pages);
47118af0
MN
1309}
1310#endif
1da177e4
LT
1311
1312/*
1313 * The order of subdivision here is critical for the IO subsystem.
1314 * Please do not alter this order without good reasons and regression
1315 * testing. Specifically, as large blocks of memory are subdivided,
1316 * the order in which smaller blocks are delivered depends on the order
1317 * they're subdivided in this function. This is the primary factor
1318 * influencing the order in which pages are delivered to the IO
1319 * subsystem according to empirical testing, and this is also justified
1320 * by considering the behavior of a buddy system containing a single
1321 * large block of memory acted on by a series of small allocations.
1322 * This behavior is a critical factor in sglist merging's success.
1323 *
6d49e352 1324 * -- nyc
1da177e4 1325 */
085cc7d5 1326static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
1327 int low, int high, struct free_area *area,
1328 int migratetype)
1da177e4
LT
1329{
1330 unsigned long size = 1 << high;
1331
1332 while (high > low) {
1333 area--;
1334 high--;
1335 size >>= 1;
309381fe 1336 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
c0a32fc5 1337
2847cf95 1338 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
e30825f1 1339 debug_guardpage_enabled() &&
2847cf95 1340 high < debug_guardpage_minorder()) {
c0a32fc5
SG
1341 /*
1342 * Mark as guard pages (or page), that will allow to
1343 * merge back to allocator when buddy will be freed.
1344 * Corresponding page table entries will not be touched,
1345 * pages will stay not present in virtual address space
1346 */
2847cf95 1347 set_page_guard(zone, &page[size], high, migratetype);
c0a32fc5
SG
1348 continue;
1349 }
b2a0ac88 1350 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
1351 area->nr_free++;
1352 set_page_order(&page[size], high);
1353 }
1da177e4
LT
1354}
1355
1da177e4
LT
1356/*
1357 * This page is about to be returned from the page allocator
1358 */
2a7684a2 1359static inline int check_new_page(struct page *page)
1da177e4 1360{
d230dec1 1361 const char *bad_reason = NULL;
f0b791a3
DH
1362 unsigned long bad_flags = 0;
1363
53f9263b 1364 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
1365 bad_reason = "nonzero mapcount";
1366 if (unlikely(page->mapping != NULL))
1367 bad_reason = "non-NULL mapping";
1368 if (unlikely(atomic_read(&page->_count) != 0))
1369 bad_reason = "nonzero _count";
f4c18e6f
NH
1370 if (unlikely(page->flags & __PG_HWPOISON)) {
1371 bad_reason = "HWPoisoned (hardware-corrupted)";
1372 bad_flags = __PG_HWPOISON;
1373 }
f0b791a3
DH
1374 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1375 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1376 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1377 }
9edad6ea
JW
1378#ifdef CONFIG_MEMCG
1379 if (unlikely(page->mem_cgroup))
1380 bad_reason = "page still charged to cgroup";
1381#endif
f0b791a3
DH
1382 if (unlikely(bad_reason)) {
1383 bad_page(page, bad_reason, bad_flags);
689bcebf 1384 return 1;
8cc3b392 1385 }
2a7684a2
WF
1386 return 0;
1387}
1388
75379191
VB
1389static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1390 int alloc_flags)
2a7684a2
WF
1391{
1392 int i;
1393
1394 for (i = 0; i < (1 << order); i++) {
1395 struct page *p = page + i;
1396 if (unlikely(check_new_page(p)))
1397 return 1;
1398 }
689bcebf 1399
4c21e2f2 1400 set_page_private(page, 0);
7835e98b 1401 set_page_refcounted(page);
cc102509
NP
1402
1403 arch_alloc_page(page, order);
1da177e4 1404 kernel_map_pages(page, 1 << order, 1);
b8c73fc2 1405 kasan_alloc_pages(page, order);
17cf4406
NP
1406
1407 if (gfp_flags & __GFP_ZERO)
f4d2897b
AA
1408 for (i = 0; i < (1 << order); i++)
1409 clear_highpage(page + i);
17cf4406
NP
1410
1411 if (order && (gfp_flags & __GFP_COMP))
1412 prep_compound_page(page, order);
1413
48c96a36
JK
1414 set_page_owner(page, order, gfp_flags);
1415
75379191 1416 /*
2f064f34 1417 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
75379191
VB
1418 * allocate the page. The expectation is that the caller is taking
1419 * steps that will free more memory. The caller should avoid the page
1420 * being used for !PFMEMALLOC purposes.
1421 */
2f064f34
MH
1422 if (alloc_flags & ALLOC_NO_WATERMARKS)
1423 set_page_pfmemalloc(page);
1424 else
1425 clear_page_pfmemalloc(page);
75379191 1426
689bcebf 1427 return 0;
1da177e4
LT
1428}
1429
56fd56b8
MG
1430/*
1431 * Go through the free lists for the given migratetype and remove
1432 * the smallest available page from the freelists
1433 */
728ec980
MG
1434static inline
1435struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
1436 int migratetype)
1437{
1438 unsigned int current_order;
b8af2941 1439 struct free_area *area;
56fd56b8
MG
1440 struct page *page;
1441
1442 /* Find a page of the appropriate size in the preferred list */
1443 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1444 area = &(zone->free_area[current_order]);
a16601c5 1445 page = list_first_entry_or_null(&area->free_list[migratetype],
56fd56b8 1446 struct page, lru);
a16601c5
GT
1447 if (!page)
1448 continue;
56fd56b8
MG
1449 list_del(&page->lru);
1450 rmv_page_order(page);
1451 area->nr_free--;
56fd56b8 1452 expand(zone, page, order, current_order, area, migratetype);
bb14c2c7 1453 set_pcppage_migratetype(page, migratetype);
56fd56b8
MG
1454 return page;
1455 }
1456
1457 return NULL;
1458}
1459
1460
b2a0ac88
MG
1461/*
1462 * This array describes the order lists are fallen back to when
1463 * the free lists for the desirable migrate type are depleted
1464 */
47118af0 1465static int fallbacks[MIGRATE_TYPES][4] = {
974a786e
MG
1466 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1467 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1468 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
47118af0 1469#ifdef CONFIG_CMA
974a786e 1470 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
47118af0 1471#endif
194159fb 1472#ifdef CONFIG_MEMORY_ISOLATION
974a786e 1473 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
194159fb 1474#endif
b2a0ac88
MG
1475};
1476
dc67647b
JK
1477#ifdef CONFIG_CMA
1478static struct page *__rmqueue_cma_fallback(struct zone *zone,
1479 unsigned int order)
1480{
1481 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1482}
1483#else
1484static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1485 unsigned int order) { return NULL; }
1486#endif
1487
c361be55
MG
1488/*
1489 * Move the free pages in a range to the free lists of the requested type.
d9c23400 1490 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
1491 * boundary. If alignment is required, use move_freepages_block()
1492 */
435b405c 1493int move_freepages(struct zone *zone,
b69a7288
AB
1494 struct page *start_page, struct page *end_page,
1495 int migratetype)
c361be55
MG
1496{
1497 struct page *page;
d00181b9 1498 unsigned int order;
d100313f 1499 int pages_moved = 0;
c361be55
MG
1500
1501#ifndef CONFIG_HOLES_IN_ZONE
1502 /*
1503 * page_zone is not safe to call in this context when
1504 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1505 * anyway as we check zone boundaries in move_freepages_block().
1506 * Remove at a later date when no bug reports exist related to
ac0e5b7a 1507 * grouping pages by mobility
c361be55 1508 */
97ee4ba7 1509 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
c361be55
MG
1510#endif
1511
1512 for (page = start_page; page <= end_page;) {
344c790e 1513 /* Make sure we are not inadvertently changing nodes */
309381fe 1514 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
344c790e 1515
c361be55
MG
1516 if (!pfn_valid_within(page_to_pfn(page))) {
1517 page++;
1518 continue;
1519 }
1520
1521 if (!PageBuddy(page)) {
1522 page++;
1523 continue;
1524 }
1525
1526 order = page_order(page);
84be48d8
KS
1527 list_move(&page->lru,
1528 &zone->free_area[order].free_list[migratetype]);
c361be55 1529 page += 1 << order;
d100313f 1530 pages_moved += 1 << order;
c361be55
MG
1531 }
1532
d100313f 1533 return pages_moved;
c361be55
MG
1534}
1535
ee6f509c 1536int move_freepages_block(struct zone *zone, struct page *page,
68e3e926 1537 int migratetype)
c361be55
MG
1538{
1539 unsigned long start_pfn, end_pfn;
1540 struct page *start_page, *end_page;
1541
1542 start_pfn = page_to_pfn(page);
d9c23400 1543 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 1544 start_page = pfn_to_page(start_pfn);
d9c23400
MG
1545 end_page = start_page + pageblock_nr_pages - 1;
1546 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
1547
1548 /* Do not cross zone boundaries */
108bcc96 1549 if (!zone_spans_pfn(zone, start_pfn))
c361be55 1550 start_page = page;
108bcc96 1551 if (!zone_spans_pfn(zone, end_pfn))
c361be55
MG
1552 return 0;
1553
1554 return move_freepages(zone, start_page, end_page, migratetype);
1555}
1556
2f66a68f
MG
1557static void change_pageblock_range(struct page *pageblock_page,
1558 int start_order, int migratetype)
1559{
1560 int nr_pageblocks = 1 << (start_order - pageblock_order);
1561
1562 while (nr_pageblocks--) {
1563 set_pageblock_migratetype(pageblock_page, migratetype);
1564 pageblock_page += pageblock_nr_pages;
1565 }
1566}
1567
fef903ef 1568/*
9c0415eb
VB
1569 * When we are falling back to another migratetype during allocation, try to
1570 * steal extra free pages from the same pageblocks to satisfy further
1571 * allocations, instead of polluting multiple pageblocks.
1572 *
1573 * If we are stealing a relatively large buddy page, it is likely there will
1574 * be more free pages in the pageblock, so try to steal them all. For
1575 * reclaimable and unmovable allocations, we steal regardless of page size,
1576 * as fragmentation caused by those allocations polluting movable pageblocks
1577 * is worse than movable allocations stealing from unmovable and reclaimable
1578 * pageblocks.
fef903ef 1579 */
4eb7dce6
JK
1580static bool can_steal_fallback(unsigned int order, int start_mt)
1581{
1582 /*
1583 * Leaving this order check is intended, although there is
1584 * relaxed order check in next check. The reason is that
1585 * we can actually steal whole pageblock if this condition met,
1586 * but, below check doesn't guarantee it and that is just heuristic
1587 * so could be changed anytime.
1588 */
1589 if (order >= pageblock_order)
1590 return true;
1591
1592 if (order >= pageblock_order / 2 ||
1593 start_mt == MIGRATE_RECLAIMABLE ||
1594 start_mt == MIGRATE_UNMOVABLE ||
1595 page_group_by_mobility_disabled)
1596 return true;
1597
1598 return false;
1599}
1600
1601/*
1602 * This function implements actual steal behaviour. If order is large enough,
1603 * we can steal whole pageblock. If not, we first move freepages in this
1604 * pageblock and check whether half of pages are moved or not. If half of
1605 * pages are moved, we can change migratetype of pageblock and permanently
1606 * use it's pages as requested migratetype in the future.
1607 */
1608static void steal_suitable_fallback(struct zone *zone, struct page *page,
1609 int start_type)
fef903ef 1610{
d00181b9 1611 unsigned int current_order = page_order(page);
4eb7dce6 1612 int pages;
fef903ef 1613
fef903ef
SB
1614 /* Take ownership for orders >= pageblock_order */
1615 if (current_order >= pageblock_order) {
1616 change_pageblock_range(page, current_order, start_type);
3a1086fb 1617 return;
fef903ef
SB
1618 }
1619
4eb7dce6 1620 pages = move_freepages_block(zone, page, start_type);
fef903ef 1621
4eb7dce6
JK
1622 /* Claim the whole block if over half of it is free */
1623 if (pages >= (1 << (pageblock_order-1)) ||
1624 page_group_by_mobility_disabled)
1625 set_pageblock_migratetype(page, start_type);
1626}
1627
2149cdae
JK
1628/*
1629 * Check whether there is a suitable fallback freepage with requested order.
1630 * If only_stealable is true, this function returns fallback_mt only if
1631 * we can steal other freepages all together. This would help to reduce
1632 * fragmentation due to mixed migratetype pages in one pageblock.
1633 */
1634int find_suitable_fallback(struct free_area *area, unsigned int order,
1635 int migratetype, bool only_stealable, bool *can_steal)
4eb7dce6
JK
1636{
1637 int i;
1638 int fallback_mt;
1639
1640 if (area->nr_free == 0)
1641 return -1;
1642
1643 *can_steal = false;
1644 for (i = 0;; i++) {
1645 fallback_mt = fallbacks[migratetype][i];
974a786e 1646 if (fallback_mt == MIGRATE_TYPES)
4eb7dce6
JK
1647 break;
1648
1649 if (list_empty(&area->free_list[fallback_mt]))
1650 continue;
fef903ef 1651
4eb7dce6
JK
1652 if (can_steal_fallback(order, migratetype))
1653 *can_steal = true;
1654
2149cdae
JK
1655 if (!only_stealable)
1656 return fallback_mt;
1657
1658 if (*can_steal)
1659 return fallback_mt;
fef903ef 1660 }
4eb7dce6
JK
1661
1662 return -1;
fef903ef
SB
1663}
1664
0aaa29a5
MG
1665/*
1666 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1667 * there are no empty page blocks that contain a page with a suitable order
1668 */
1669static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
1670 unsigned int alloc_order)
1671{
1672 int mt;
1673 unsigned long max_managed, flags;
1674
1675 /*
1676 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
1677 * Check is race-prone but harmless.
1678 */
1679 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
1680 if (zone->nr_reserved_highatomic >= max_managed)
1681 return;
1682
1683 spin_lock_irqsave(&zone->lock, flags);
1684
1685 /* Recheck the nr_reserved_highatomic limit under the lock */
1686 if (zone->nr_reserved_highatomic >= max_managed)
1687 goto out_unlock;
1688
1689 /* Yoink! */
1690 mt = get_pageblock_migratetype(page);
1691 if (mt != MIGRATE_HIGHATOMIC &&
1692 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
1693 zone->nr_reserved_highatomic += pageblock_nr_pages;
1694 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
1695 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
1696 }
1697
1698out_unlock:
1699 spin_unlock_irqrestore(&zone->lock, flags);
1700}
1701
1702/*
1703 * Used when an allocation is about to fail under memory pressure. This
1704 * potentially hurts the reliability of high-order allocations when under
1705 * intense memory pressure but failed atomic allocations should be easier
1706 * to recover from than an OOM.
1707 */
1708static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
1709{
1710 struct zonelist *zonelist = ac->zonelist;
1711 unsigned long flags;
1712 struct zoneref *z;
1713 struct zone *zone;
1714 struct page *page;
1715 int order;
1716
1717 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
1718 ac->nodemask) {
1719 /* Preserve at least one pageblock */
1720 if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
1721 continue;
1722
1723 spin_lock_irqsave(&zone->lock, flags);
1724 for (order = 0; order < MAX_ORDER; order++) {
1725 struct free_area *area = &(zone->free_area[order]);
1726
a16601c5
GT
1727 page = list_first_entry_or_null(
1728 &area->free_list[MIGRATE_HIGHATOMIC],
1729 struct page, lru);
1730 if (!page)
0aaa29a5
MG
1731 continue;
1732
0aaa29a5
MG
1733 /*
1734 * It should never happen but changes to locking could
1735 * inadvertently allow a per-cpu drain to add pages
1736 * to MIGRATE_HIGHATOMIC while unreserving so be safe
1737 * and watch for underflows.
1738 */
1739 zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
1740 zone->nr_reserved_highatomic);
1741
1742 /*
1743 * Convert to ac->migratetype and avoid the normal
1744 * pageblock stealing heuristics. Minimally, the caller
1745 * is doing the work and needs the pages. More
1746 * importantly, if the block was always converted to
1747 * MIGRATE_UNMOVABLE or another type then the number
1748 * of pageblocks that cannot be completely freed
1749 * may increase.
1750 */
1751 set_pageblock_migratetype(page, ac->migratetype);
1752 move_freepages_block(zone, page, ac->migratetype);
1753 spin_unlock_irqrestore(&zone->lock, flags);
1754 return;
1755 }
1756 spin_unlock_irqrestore(&zone->lock, flags);
1757 }
1758}
1759
b2a0ac88 1760/* Remove an element from the buddy allocator from the fallback list */
0ac3a409 1761static inline struct page *
7aeb09f9 1762__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
b2a0ac88 1763{
b8af2941 1764 struct free_area *area;
7aeb09f9 1765 unsigned int current_order;
b2a0ac88 1766 struct page *page;
4eb7dce6
JK
1767 int fallback_mt;
1768 bool can_steal;
b2a0ac88
MG
1769
1770 /* Find the largest possible block of pages in the other list */
7aeb09f9
MG
1771 for (current_order = MAX_ORDER-1;
1772 current_order >= order && current_order <= MAX_ORDER-1;
1773 --current_order) {
4eb7dce6
JK
1774 area = &(zone->free_area[current_order]);
1775 fallback_mt = find_suitable_fallback(area, current_order,
2149cdae 1776 start_migratetype, false, &can_steal);
4eb7dce6
JK
1777 if (fallback_mt == -1)
1778 continue;
b2a0ac88 1779
a16601c5 1780 page = list_first_entry(&area->free_list[fallback_mt],
4eb7dce6
JK
1781 struct page, lru);
1782 if (can_steal)
1783 steal_suitable_fallback(zone, page, start_migratetype);
b2a0ac88 1784
4eb7dce6
JK
1785 /* Remove the page from the freelists */
1786 area->nr_free--;
1787 list_del(&page->lru);
1788 rmv_page_order(page);
3a1086fb 1789
4eb7dce6
JK
1790 expand(zone, page, order, current_order, area,
1791 start_migratetype);
1792 /*
bb14c2c7 1793 * The pcppage_migratetype may differ from pageblock's
4eb7dce6 1794 * migratetype depending on the decisions in
bb14c2c7
VB
1795 * find_suitable_fallback(). This is OK as long as it does not
1796 * differ for MIGRATE_CMA pageblocks. Those can be used as
1797 * fallback only via special __rmqueue_cma_fallback() function
4eb7dce6 1798 */
bb14c2c7 1799 set_pcppage_migratetype(page, start_migratetype);
e0fff1bd 1800
4eb7dce6
JK
1801 trace_mm_page_alloc_extfrag(page, order, current_order,
1802 start_migratetype, fallback_mt);
e0fff1bd 1803
4eb7dce6 1804 return page;
b2a0ac88
MG
1805 }
1806
728ec980 1807 return NULL;
b2a0ac88
MG
1808}
1809
56fd56b8 1810/*
1da177e4
LT
1811 * Do the hard work of removing an element from the buddy allocator.
1812 * Call me with the zone->lock already held.
1813 */
b2a0ac88 1814static struct page *__rmqueue(struct zone *zone, unsigned int order,
6ac0206b 1815 int migratetype)
1da177e4 1816{
1da177e4
LT
1817 struct page *page;
1818
56fd56b8 1819 page = __rmqueue_smallest(zone, order, migratetype);
974a786e 1820 if (unlikely(!page)) {
dc67647b
JK
1821 if (migratetype == MIGRATE_MOVABLE)
1822 page = __rmqueue_cma_fallback(zone, order);
1823
1824 if (!page)
1825 page = __rmqueue_fallback(zone, order, migratetype);
728ec980
MG
1826 }
1827
0d3d062a 1828 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 1829 return page;
1da177e4
LT
1830}
1831
5f63b720 1832/*
1da177e4
LT
1833 * Obtain a specified number of elements from the buddy allocator, all under
1834 * a single hold of the lock, for efficiency. Add them to the supplied list.
1835 * Returns the number of new pages which were placed at *list.
1836 */
5f63b720 1837static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 1838 unsigned long count, struct list_head *list,
b745bc85 1839 int migratetype, bool cold)
1da177e4 1840{
5bcc9f86 1841 int i;
5f63b720 1842
c54ad30c 1843 spin_lock(&zone->lock);
1da177e4 1844 for (i = 0; i < count; ++i) {
6ac0206b 1845 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 1846 if (unlikely(page == NULL))
1da177e4 1847 break;
81eabcbe
MG
1848
1849 /*
1850 * Split buddy pages returned by expand() are received here
1851 * in physical page order. The page is added to the callers and
1852 * list and the list head then moves forward. From the callers
1853 * perspective, the linked list is ordered by page number in
1854 * some conditions. This is useful for IO devices that can
1855 * merge IO requests if the physical pages are ordered
1856 * properly.
1857 */
b745bc85 1858 if (likely(!cold))
e084b2d9
MG
1859 list_add(&page->lru, list);
1860 else
1861 list_add_tail(&page->lru, list);
81eabcbe 1862 list = &page->lru;
bb14c2c7 1863 if (is_migrate_cma(get_pcppage_migratetype(page)))
d1ce749a
BZ
1864 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1865 -(1 << order));
1da177e4 1866 }
f2260e6b 1867 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
c54ad30c 1868 spin_unlock(&zone->lock);
085cc7d5 1869 return i;
1da177e4
LT
1870}
1871
4ae7c039 1872#ifdef CONFIG_NUMA
8fce4d8e 1873/*
4037d452
CL
1874 * Called from the vmstat counter updater to drain pagesets of this
1875 * currently executing processor on remote nodes after they have
1876 * expired.
1877 *
879336c3
CL
1878 * Note that this function must be called with the thread pinned to
1879 * a single processor.
8fce4d8e 1880 */
4037d452 1881void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 1882{
4ae7c039 1883 unsigned long flags;
7be12fc9 1884 int to_drain, batch;
4ae7c039 1885
4037d452 1886 local_irq_save(flags);
4db0c3c2 1887 batch = READ_ONCE(pcp->batch);
7be12fc9 1888 to_drain = min(pcp->count, batch);
2a13515c
KM
1889 if (to_drain > 0) {
1890 free_pcppages_bulk(zone, to_drain, pcp);
1891 pcp->count -= to_drain;
1892 }
4037d452 1893 local_irq_restore(flags);
4ae7c039
CL
1894}
1895#endif
1896
9f8f2172 1897/*
93481ff0 1898 * Drain pcplists of the indicated processor and zone.
9f8f2172
CL
1899 *
1900 * The processor must either be the current processor and the
1901 * thread pinned to the current processor or a processor that
1902 * is not online.
1903 */
93481ff0 1904static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1da177e4 1905{
c54ad30c 1906 unsigned long flags;
93481ff0
VB
1907 struct per_cpu_pageset *pset;
1908 struct per_cpu_pages *pcp;
1da177e4 1909
93481ff0
VB
1910 local_irq_save(flags);
1911 pset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 1912
93481ff0
VB
1913 pcp = &pset->pcp;
1914 if (pcp->count) {
1915 free_pcppages_bulk(zone, pcp->count, pcp);
1916 pcp->count = 0;
1917 }
1918 local_irq_restore(flags);
1919}
3dfa5721 1920
93481ff0
VB
1921/*
1922 * Drain pcplists of all zones on the indicated processor.
1923 *
1924 * The processor must either be the current processor and the
1925 * thread pinned to the current processor or a processor that
1926 * is not online.
1927 */
1928static void drain_pages(unsigned int cpu)
1929{
1930 struct zone *zone;
1931
1932 for_each_populated_zone(zone) {
1933 drain_pages_zone(cpu, zone);
1da177e4
LT
1934 }
1935}
1da177e4 1936
9f8f2172
CL
1937/*
1938 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
93481ff0
VB
1939 *
1940 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
1941 * the single zone's pages.
9f8f2172 1942 */
93481ff0 1943void drain_local_pages(struct zone *zone)
9f8f2172 1944{
93481ff0
VB
1945 int cpu = smp_processor_id();
1946
1947 if (zone)
1948 drain_pages_zone(cpu, zone);
1949 else
1950 drain_pages(cpu);
9f8f2172
CL
1951}
1952
1953/*
74046494
GBY
1954 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1955 *
93481ff0
VB
1956 * When zone parameter is non-NULL, spill just the single zone's pages.
1957 *
74046494
GBY
1958 * Note that this code is protected against sending an IPI to an offline
1959 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1960 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1961 * nothing keeps CPUs from showing up after we populated the cpumask and
1962 * before the call to on_each_cpu_mask().
9f8f2172 1963 */
93481ff0 1964void drain_all_pages(struct zone *zone)
9f8f2172 1965{
74046494 1966 int cpu;
74046494
GBY
1967
1968 /*
1969 * Allocate in the BSS so we wont require allocation in
1970 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1971 */
1972 static cpumask_t cpus_with_pcps;
1973
1974 /*
1975 * We don't care about racing with CPU hotplug event
1976 * as offline notification will cause the notified
1977 * cpu to drain that CPU pcps and on_each_cpu_mask
1978 * disables preemption as part of its processing
1979 */
1980 for_each_online_cpu(cpu) {
93481ff0
VB
1981 struct per_cpu_pageset *pcp;
1982 struct zone *z;
74046494 1983 bool has_pcps = false;
93481ff0
VB
1984
1985 if (zone) {
74046494 1986 pcp = per_cpu_ptr(zone->pageset, cpu);
93481ff0 1987 if (pcp->pcp.count)
74046494 1988 has_pcps = true;
93481ff0
VB
1989 } else {
1990 for_each_populated_zone(z) {
1991 pcp = per_cpu_ptr(z->pageset, cpu);
1992 if (pcp->pcp.count) {
1993 has_pcps = true;
1994 break;
1995 }
74046494
GBY
1996 }
1997 }
93481ff0 1998
74046494
GBY
1999 if (has_pcps)
2000 cpumask_set_cpu(cpu, &cpus_with_pcps);
2001 else
2002 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2003 }
93481ff0
VB
2004 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
2005 zone, 1);
9f8f2172
CL
2006}
2007
296699de 2008#ifdef CONFIG_HIBERNATION
1da177e4
LT
2009
2010void mark_free_pages(struct zone *zone)
2011{
f623f0db
RW
2012 unsigned long pfn, max_zone_pfn;
2013 unsigned long flags;
7aeb09f9 2014 unsigned int order, t;
86760a2c 2015 struct page *page;
1da177e4 2016
8080fc03 2017 if (zone_is_empty(zone))
1da177e4
LT
2018 return;
2019
2020 spin_lock_irqsave(&zone->lock, flags);
f623f0db 2021
108bcc96 2022 max_zone_pfn = zone_end_pfn(zone);
f623f0db
RW
2023 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2024 if (pfn_valid(pfn)) {
86760a2c 2025 page = pfn_to_page(pfn);
7be98234
RW
2026 if (!swsusp_page_is_forbidden(page))
2027 swsusp_unset_page_free(page);
f623f0db 2028 }
1da177e4 2029
b2a0ac88 2030 for_each_migratetype_order(order, t) {
86760a2c
GT
2031 list_for_each_entry(page,
2032 &zone->free_area[order].free_list[t], lru) {
f623f0db 2033 unsigned long i;
1da177e4 2034
86760a2c 2035 pfn = page_to_pfn(page);
f623f0db 2036 for (i = 0; i < (1UL << order); i++)
7be98234 2037 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 2038 }
b2a0ac88 2039 }
1da177e4
LT
2040 spin_unlock_irqrestore(&zone->lock, flags);
2041}
e2c55dc8 2042#endif /* CONFIG_PM */
1da177e4 2043
1da177e4
LT
2044/*
2045 * Free a 0-order page
b745bc85 2046 * cold == true ? free a cold page : free a hot page
1da177e4 2047 */
b745bc85 2048void free_hot_cold_page(struct page *page, bool cold)
1da177e4
LT
2049{
2050 struct zone *zone = page_zone(page);
2051 struct per_cpu_pages *pcp;
2052 unsigned long flags;
dc4b0caf 2053 unsigned long pfn = page_to_pfn(page);
5f8dcc21 2054 int migratetype;
1da177e4 2055
ec95f53a 2056 if (!free_pages_prepare(page, 0))
689bcebf
HD
2057 return;
2058
dc4b0caf 2059 migratetype = get_pfnblock_migratetype(page, pfn);
bb14c2c7 2060 set_pcppage_migratetype(page, migratetype);
1da177e4 2061 local_irq_save(flags);
f8891e5e 2062 __count_vm_event(PGFREE);
da456f14 2063
5f8dcc21
MG
2064 /*
2065 * We only track unmovable, reclaimable and movable on pcp lists.
2066 * Free ISOLATE pages back to the allocator because they are being
2067 * offlined but treat RESERVE as movable pages so we can get those
2068 * areas back if necessary. Otherwise, we may have to free
2069 * excessively into the page allocator
2070 */
2071 if (migratetype >= MIGRATE_PCPTYPES) {
194159fb 2072 if (unlikely(is_migrate_isolate(migratetype))) {
dc4b0caf 2073 free_one_page(zone, page, pfn, 0, migratetype);
5f8dcc21
MG
2074 goto out;
2075 }
2076 migratetype = MIGRATE_MOVABLE;
2077 }
2078
99dcc3e5 2079 pcp = &this_cpu_ptr(zone->pageset)->pcp;
b745bc85 2080 if (!cold)
5f8dcc21 2081 list_add(&page->lru, &pcp->lists[migratetype]);
b745bc85
MG
2082 else
2083 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1da177e4 2084 pcp->count++;
48db57f8 2085 if (pcp->count >= pcp->high) {
4db0c3c2 2086 unsigned long batch = READ_ONCE(pcp->batch);
998d39cb
CS
2087 free_pcppages_bulk(zone, batch, pcp);
2088 pcp->count -= batch;
48db57f8 2089 }
5f8dcc21
MG
2090
2091out:
1da177e4 2092 local_irq_restore(flags);
1da177e4
LT
2093}
2094
cc59850e
KK
2095/*
2096 * Free a list of 0-order pages
2097 */
b745bc85 2098void free_hot_cold_page_list(struct list_head *list, bool cold)
cc59850e
KK
2099{
2100 struct page *page, *next;
2101
2102 list_for_each_entry_safe(page, next, list, lru) {
b413d48a 2103 trace_mm_page_free_batched(page, cold);
cc59850e
KK
2104 free_hot_cold_page(page, cold);
2105 }
2106}
2107
8dfcc9ba
NP
2108/*
2109 * split_page takes a non-compound higher-order page, and splits it into
2110 * n (1<<order) sub-pages: page[0..n]
2111 * Each sub-page must be freed individually.
2112 *
2113 * Note: this is probably too low level an operation for use in drivers.
2114 * Please consult with lkml before using this in your driver.
2115 */
2116void split_page(struct page *page, unsigned int order)
2117{
2118 int i;
e2cfc911 2119 gfp_t gfp_mask;
8dfcc9ba 2120
309381fe
SL
2121 VM_BUG_ON_PAGE(PageCompound(page), page);
2122 VM_BUG_ON_PAGE(!page_count(page), page);
b1eeab67
VN
2123
2124#ifdef CONFIG_KMEMCHECK
2125 /*
2126 * Split shadow pages too, because free(page[0]) would
2127 * otherwise free the whole shadow.
2128 */
2129 if (kmemcheck_page_is_tracked(page))
2130 split_page(virt_to_page(page[0].shadow), order);
2131#endif
2132
e2cfc911
JK
2133 gfp_mask = get_page_owner_gfp(page);
2134 set_page_owner(page, 0, gfp_mask);
48c96a36 2135 for (i = 1; i < (1 << order); i++) {
7835e98b 2136 set_page_refcounted(page + i);
e2cfc911 2137 set_page_owner(page + i, 0, gfp_mask);
48c96a36 2138 }
8dfcc9ba 2139}
5853ff23 2140EXPORT_SYMBOL_GPL(split_page);
8dfcc9ba 2141
3c605096 2142int __isolate_free_page(struct page *page, unsigned int order)
748446bb 2143{
748446bb
MG
2144 unsigned long watermark;
2145 struct zone *zone;
2139cbe6 2146 int mt;
748446bb
MG
2147
2148 BUG_ON(!PageBuddy(page));
2149
2150 zone = page_zone(page);
2e30abd1 2151 mt = get_pageblock_migratetype(page);
748446bb 2152
194159fb 2153 if (!is_migrate_isolate(mt)) {
2e30abd1
MS
2154 /* Obey watermarks as if the page was being allocated */
2155 watermark = low_wmark_pages(zone) + (1 << order);
2156 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
2157 return 0;
2158
8fb74b9f 2159 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2e30abd1 2160 }
748446bb
MG
2161
2162 /* Remove page from free list */
2163 list_del(&page->lru);
2164 zone->free_area[order].nr_free--;
2165 rmv_page_order(page);
2139cbe6 2166
e2cfc911 2167 set_page_owner(page, order, __GFP_MOVABLE);
f3a14ced 2168
8fb74b9f 2169 /* Set the pageblock if the isolated page is at least a pageblock */
748446bb
MG
2170 if (order >= pageblock_order - 1) {
2171 struct page *endpage = page + (1 << order) - 1;
47118af0
MN
2172 for (; page < endpage; page += pageblock_nr_pages) {
2173 int mt = get_pageblock_migratetype(page);
194159fb 2174 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
47118af0
MN
2175 set_pageblock_migratetype(page,
2176 MIGRATE_MOVABLE);
2177 }
748446bb
MG
2178 }
2179
f3a14ced 2180
8fb74b9f 2181 return 1UL << order;
1fb3f8ca
MG
2182}
2183
2184/*
2185 * Similar to split_page except the page is already free. As this is only
2186 * being used for migration, the migratetype of the block also changes.
2187 * As this is called with interrupts disabled, the caller is responsible
2188 * for calling arch_alloc_page() and kernel_map_page() after interrupts
2189 * are enabled.
2190 *
2191 * Note: this is probably too low level an operation for use in drivers.
2192 * Please consult with lkml before using this in your driver.
2193 */
2194int split_free_page(struct page *page)
2195{
2196 unsigned int order;
2197 int nr_pages;
2198
1fb3f8ca
MG
2199 order = page_order(page);
2200
8fb74b9f 2201 nr_pages = __isolate_free_page(page, order);
1fb3f8ca
MG
2202 if (!nr_pages)
2203 return 0;
2204
2205 /* Split into individual pages */
2206 set_page_refcounted(page);
2207 split_page(page, order);
2208 return nr_pages;
748446bb
MG
2209}
2210
1da177e4 2211/*
75379191 2212 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
1da177e4 2213 */
0a15c3e9
MG
2214static inline
2215struct page *buffered_rmqueue(struct zone *preferred_zone,
7aeb09f9 2216 struct zone *zone, unsigned int order,
0aaa29a5 2217 gfp_t gfp_flags, int alloc_flags, int migratetype)
1da177e4
LT
2218{
2219 unsigned long flags;
689bcebf 2220 struct page *page;
b745bc85 2221 bool cold = ((gfp_flags & __GFP_COLD) != 0);
1da177e4 2222
48db57f8 2223 if (likely(order == 0)) {
1da177e4 2224 struct per_cpu_pages *pcp;
5f8dcc21 2225 struct list_head *list;
1da177e4 2226
1da177e4 2227 local_irq_save(flags);
99dcc3e5
CL
2228 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2229 list = &pcp->lists[migratetype];
5f8dcc21 2230 if (list_empty(list)) {
535131e6 2231 pcp->count += rmqueue_bulk(zone, 0,
5f8dcc21 2232 pcp->batch, list,
e084b2d9 2233 migratetype, cold);
5f8dcc21 2234 if (unlikely(list_empty(list)))
6fb332fa 2235 goto failed;
535131e6 2236 }
b92a6edd 2237
5f8dcc21 2238 if (cold)
a16601c5 2239 page = list_last_entry(list, struct page, lru);
5f8dcc21 2240 else
a16601c5 2241 page = list_first_entry(list, struct page, lru);
5f8dcc21 2242
b92a6edd
MG
2243 list_del(&page->lru);
2244 pcp->count--;
7fb1d9fc 2245 } else {
dab48dab
AM
2246 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
2247 /*
2248 * __GFP_NOFAIL is not to be used in new code.
2249 *
2250 * All __GFP_NOFAIL callers should be fixed so that they
2251 * properly detect and handle allocation failures.
2252 *
2253 * We most definitely don't want callers attempting to
4923abf9 2254 * allocate greater than order-1 page units with
dab48dab
AM
2255 * __GFP_NOFAIL.
2256 */
4923abf9 2257 WARN_ON_ONCE(order > 1);
dab48dab 2258 }
1da177e4 2259 spin_lock_irqsave(&zone->lock, flags);
0aaa29a5
MG
2260
2261 page = NULL;
2262 if (alloc_flags & ALLOC_HARDER) {
2263 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2264 if (page)
2265 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2266 }
2267 if (!page)
6ac0206b 2268 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
2269 spin_unlock(&zone->lock);
2270 if (!page)
2271 goto failed;
d1ce749a 2272 __mod_zone_freepage_state(zone, -(1 << order),
bb14c2c7 2273 get_pcppage_migratetype(page));
1da177e4
LT
2274 }
2275
3a025760 2276 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
abe5f972 2277 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
57054651
JW
2278 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
2279 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
27329369 2280
f8891e5e 2281 __count_zone_vm_events(PGALLOC, zone, 1 << order);
78afd561 2282 zone_statistics(preferred_zone, zone, gfp_flags);
a74609fa 2283 local_irq_restore(flags);
1da177e4 2284
309381fe 2285 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1da177e4 2286 return page;
a74609fa
NP
2287
2288failed:
2289 local_irq_restore(flags);
a74609fa 2290 return NULL;
1da177e4
LT
2291}
2292
933e312e
AM
2293#ifdef CONFIG_FAIL_PAGE_ALLOC
2294
b2588c4b 2295static struct {
933e312e
AM
2296 struct fault_attr attr;
2297
621a5f7a 2298 bool ignore_gfp_highmem;
71baba4b 2299 bool ignore_gfp_reclaim;
54114994 2300 u32 min_order;
933e312e
AM
2301} fail_page_alloc = {
2302 .attr = FAULT_ATTR_INITIALIZER,
71baba4b 2303 .ignore_gfp_reclaim = true,
621a5f7a 2304 .ignore_gfp_highmem = true,
54114994 2305 .min_order = 1,
933e312e
AM
2306};
2307
2308static int __init setup_fail_page_alloc(char *str)
2309{
2310 return setup_fault_attr(&fail_page_alloc.attr, str);
2311}
2312__setup("fail_page_alloc=", setup_fail_page_alloc);
2313
deaf386e 2314static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 2315{
54114994 2316 if (order < fail_page_alloc.min_order)
deaf386e 2317 return false;
933e312e 2318 if (gfp_mask & __GFP_NOFAIL)
deaf386e 2319 return false;
933e312e 2320 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
deaf386e 2321 return false;
71baba4b
MG
2322 if (fail_page_alloc.ignore_gfp_reclaim &&
2323 (gfp_mask & __GFP_DIRECT_RECLAIM))
deaf386e 2324 return false;
933e312e
AM
2325
2326 return should_fail(&fail_page_alloc.attr, 1 << order);
2327}
2328
2329#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2330
2331static int __init fail_page_alloc_debugfs(void)
2332{
f4ae40a6 2333 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
933e312e 2334 struct dentry *dir;
933e312e 2335
dd48c085
AM
2336 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2337 &fail_page_alloc.attr);
2338 if (IS_ERR(dir))
2339 return PTR_ERR(dir);
933e312e 2340
b2588c4b 2341 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
71baba4b 2342 &fail_page_alloc.ignore_gfp_reclaim))
b2588c4b
AM
2343 goto fail;
2344 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2345 &fail_page_alloc.ignore_gfp_highmem))
2346 goto fail;
2347 if (!debugfs_create_u32("min-order", mode, dir,
2348 &fail_page_alloc.min_order))
2349 goto fail;
2350
2351 return 0;
2352fail:
dd48c085 2353 debugfs_remove_recursive(dir);
933e312e 2354
b2588c4b 2355 return -ENOMEM;
933e312e
AM
2356}
2357
2358late_initcall(fail_page_alloc_debugfs);
2359
2360#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2361
2362#else /* CONFIG_FAIL_PAGE_ALLOC */
2363
deaf386e 2364static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 2365{
deaf386e 2366 return false;
933e312e
AM
2367}
2368
2369#endif /* CONFIG_FAIL_PAGE_ALLOC */
2370
1da177e4 2371/*
97a16fc8
MG
2372 * Return true if free base pages are above 'mark'. For high-order checks it
2373 * will return true of the order-0 watermark is reached and there is at least
2374 * one free page of a suitable size. Checking now avoids taking the zone lock
2375 * to check in the allocation paths if no pages are free.
1da177e4 2376 */
7aeb09f9
MG
2377static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2378 unsigned long mark, int classzone_idx, int alloc_flags,
2379 long free_pages)
1da177e4 2380{
d23ad423 2381 long min = mark;
1da177e4 2382 int o;
97a16fc8 2383 const int alloc_harder = (alloc_flags & ALLOC_HARDER);
1da177e4 2384
0aaa29a5 2385 /* free_pages may go negative - that's OK */
df0a6daa 2386 free_pages -= (1 << order) - 1;
0aaa29a5 2387
7fb1d9fc 2388 if (alloc_flags & ALLOC_HIGH)
1da177e4 2389 min -= min / 2;
0aaa29a5
MG
2390
2391 /*
2392 * If the caller does not have rights to ALLOC_HARDER then subtract
2393 * the high-atomic reserves. This will over-estimate the size of the
2394 * atomic reserve but it avoids a search.
2395 */
97a16fc8 2396 if (likely(!alloc_harder))
0aaa29a5
MG
2397 free_pages -= z->nr_reserved_highatomic;
2398 else
1da177e4 2399 min -= min / 4;
e2b19197 2400
d95ea5d1
BZ
2401#ifdef CONFIG_CMA
2402 /* If allocation can't use CMA areas don't use free CMA pages */
2403 if (!(alloc_flags & ALLOC_CMA))
97a16fc8 2404 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
d95ea5d1 2405#endif
026b0814 2406
97a16fc8
MG
2407 /*
2408 * Check watermarks for an order-0 allocation request. If these
2409 * are not met, then a high-order request also cannot go ahead
2410 * even if a suitable page happened to be free.
2411 */
2412 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
88f5acf8 2413 return false;
1da177e4 2414
97a16fc8
MG
2415 /* If this is an order-0 request then the watermark is fine */
2416 if (!order)
2417 return true;
2418
2419 /* For a high-order request, check at least one suitable page is free */
2420 for (o = order; o < MAX_ORDER; o++) {
2421 struct free_area *area = &z->free_area[o];
2422 int mt;
2423
2424 if (!area->nr_free)
2425 continue;
2426
2427 if (alloc_harder)
2428 return true;
1da177e4 2429
97a16fc8
MG
2430 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2431 if (!list_empty(&area->free_list[mt]))
2432 return true;
2433 }
2434
2435#ifdef CONFIG_CMA
2436 if ((alloc_flags & ALLOC_CMA) &&
2437 !list_empty(&area->free_list[MIGRATE_CMA])) {
2438 return true;
2439 }
2440#endif
1da177e4 2441 }
97a16fc8 2442 return false;
88f5acf8
MG
2443}
2444
7aeb09f9 2445bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
88f5acf8
MG
2446 int classzone_idx, int alloc_flags)
2447{
2448 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2449 zone_page_state(z, NR_FREE_PAGES));
2450}
2451
7aeb09f9 2452bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
e2b19197 2453 unsigned long mark, int classzone_idx)
88f5acf8
MG
2454{
2455 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2456
2457 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2458 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2459
e2b19197 2460 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
88f5acf8 2461 free_pages);
1da177e4
LT
2462}
2463
9276b1bc 2464#ifdef CONFIG_NUMA
81c0a2bb
JW
2465static bool zone_local(struct zone *local_zone, struct zone *zone)
2466{
fff4068c 2467 return local_zone->node == zone->node;
81c0a2bb
JW
2468}
2469
957f822a
DR
2470static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2471{
5f7a75ac
MG
2472 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
2473 RECLAIM_DISTANCE;
957f822a 2474}
9276b1bc 2475#else /* CONFIG_NUMA */
81c0a2bb
JW
2476static bool zone_local(struct zone *local_zone, struct zone *zone)
2477{
2478 return true;
2479}
2480
957f822a
DR
2481static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2482{
2483 return true;
2484}
9276b1bc
PJ
2485#endif /* CONFIG_NUMA */
2486
4ffeaf35
MG
2487static void reset_alloc_batches(struct zone *preferred_zone)
2488{
2489 struct zone *zone = preferred_zone->zone_pgdat->node_zones;
2490
2491 do {
2492 mod_zone_page_state(zone, NR_ALLOC_BATCH,
2493 high_wmark_pages(zone) - low_wmark_pages(zone) -
2494 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
57054651 2495 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
4ffeaf35
MG
2496 } while (zone++ != preferred_zone);
2497}
2498
7fb1d9fc 2499/*
0798e519 2500 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
2501 * a page.
2502 */
2503static struct page *
a9263751
VB
2504get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2505 const struct alloc_context *ac)
753ee728 2506{
a9263751 2507 struct zonelist *zonelist = ac->zonelist;
dd1a239f 2508 struct zoneref *z;
7fb1d9fc 2509 struct page *page = NULL;
5117f45d 2510 struct zone *zone;
4ffeaf35
MG
2511 int nr_fair_skipped = 0;
2512 bool zonelist_rescan;
54a6eb5c 2513
9276b1bc 2514zonelist_scan:
4ffeaf35
MG
2515 zonelist_rescan = false;
2516
7fb1d9fc 2517 /*
9276b1bc 2518 * Scan zonelist, looking for a zone with enough free.
344736f2 2519 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
7fb1d9fc 2520 */
a9263751
VB
2521 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2522 ac->nodemask) {
e085dbc5
JW
2523 unsigned long mark;
2524
664eedde
MG
2525 if (cpusets_enabled() &&
2526 (alloc_flags & ALLOC_CPUSET) &&
344736f2 2527 !cpuset_zone_allowed(zone, gfp_mask))
cd38b115 2528 continue;
81c0a2bb
JW
2529 /*
2530 * Distribute pages in proportion to the individual
2531 * zone size to ensure fair page aging. The zone a
2532 * page was allocated in should have no effect on the
2533 * time the page has in memory before being reclaimed.
81c0a2bb 2534 */
3a025760 2535 if (alloc_flags & ALLOC_FAIR) {
a9263751 2536 if (!zone_local(ac->preferred_zone, zone))
f7b5d647 2537 break;
57054651 2538 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
4ffeaf35 2539 nr_fair_skipped++;
3a025760 2540 continue;
4ffeaf35 2541 }
81c0a2bb 2542 }
a756cf59
JW
2543 /*
2544 * When allocating a page cache page for writing, we
2545 * want to get it from a zone that is within its dirty
2546 * limit, such that no single zone holds more than its
2547 * proportional share of globally allowed dirty pages.
2548 * The dirty limits take into account the zone's
2549 * lowmem reserves and high watermark so that kswapd
2550 * should be able to balance it without having to
2551 * write pages from its LRU list.
2552 *
2553 * This may look like it could increase pressure on
2554 * lower zones by failing allocations in higher zones
2555 * before they are full. But the pages that do spill
2556 * over are limited as the lower zones are protected
2557 * by this very same mechanism. It should not become
2558 * a practical burden to them.
2559 *
2560 * XXX: For now, allow allocations to potentially
2561 * exceed the per-zone dirty limit in the slowpath
c9ab0c4f 2562 * (spread_dirty_pages unset) before going into reclaim,
a756cf59
JW
2563 * which is important when on a NUMA setup the allowed
2564 * zones are together not big enough to reach the
2565 * global limit. The proper fix for these situations
2566 * will require awareness of zones in the
2567 * dirty-throttling and the flusher threads.
2568 */
c9ab0c4f 2569 if (ac->spread_dirty_pages && !zone_dirty_ok(zone))
800a1e75 2570 continue;
7fb1d9fc 2571
e085dbc5
JW
2572 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2573 if (!zone_watermark_ok(zone, order, mark,
a9263751 2574 ac->classzone_idx, alloc_flags)) {
fa5e084e
MG
2575 int ret;
2576
5dab2911
MG
2577 /* Checked here to keep the fast path fast */
2578 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2579 if (alloc_flags & ALLOC_NO_WATERMARKS)
2580 goto try_this_zone;
2581
957f822a 2582 if (zone_reclaim_mode == 0 ||
a9263751 2583 !zone_allows_reclaim(ac->preferred_zone, zone))
cd38b115
MG
2584 continue;
2585
fa5e084e
MG
2586 ret = zone_reclaim(zone, gfp_mask, order);
2587 switch (ret) {
2588 case ZONE_RECLAIM_NOSCAN:
2589 /* did not scan */
cd38b115 2590 continue;
fa5e084e
MG
2591 case ZONE_RECLAIM_FULL:
2592 /* scanned but unreclaimable */
cd38b115 2593 continue;
fa5e084e
MG
2594 default:
2595 /* did we reclaim enough */
fed2719e 2596 if (zone_watermark_ok(zone, order, mark,
a9263751 2597 ac->classzone_idx, alloc_flags))
fed2719e
MG
2598 goto try_this_zone;
2599
fed2719e 2600 continue;
0798e519 2601 }
7fb1d9fc
RS
2602 }
2603
fa5e084e 2604try_this_zone:
a9263751 2605 page = buffered_rmqueue(ac->preferred_zone, zone, order,
0aaa29a5 2606 gfp_mask, alloc_flags, ac->migratetype);
75379191
VB
2607 if (page) {
2608 if (prep_new_page(page, order, gfp_mask, alloc_flags))
2609 goto try_this_zone;
0aaa29a5
MG
2610
2611 /*
2612 * If this is a high-order atomic allocation then check
2613 * if the pageblock should be reserved for the future
2614 */
2615 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
2616 reserve_highatomic_pageblock(page, zone, order);
2617
75379191
VB
2618 return page;
2619 }
54a6eb5c 2620 }
9276b1bc 2621
4ffeaf35
MG
2622 /*
2623 * The first pass makes sure allocations are spread fairly within the
2624 * local node. However, the local node might have free pages left
2625 * after the fairness batches are exhausted, and remote zones haven't
2626 * even been considered yet. Try once more without fairness, and
2627 * include remote zones now, before entering the slowpath and waking
2628 * kswapd: prefer spilling to a remote zone over swapping locally.
2629 */
2630 if (alloc_flags & ALLOC_FAIR) {
2631 alloc_flags &= ~ALLOC_FAIR;
2632 if (nr_fair_skipped) {
2633 zonelist_rescan = true;
a9263751 2634 reset_alloc_batches(ac->preferred_zone);
4ffeaf35
MG
2635 }
2636 if (nr_online_nodes > 1)
2637 zonelist_rescan = true;
2638 }
2639
4ffeaf35
MG
2640 if (zonelist_rescan)
2641 goto zonelist_scan;
2642
2643 return NULL;
753ee728
MH
2644}
2645
29423e77
DR
2646/*
2647 * Large machines with many possible nodes should not always dump per-node
2648 * meminfo in irq context.
2649 */
2650static inline bool should_suppress_show_mem(void)
2651{
2652 bool ret = false;
2653
2654#if NODES_SHIFT > 8
2655 ret = in_interrupt();
2656#endif
2657 return ret;
2658}
2659
a238ab5b
DH
2660static DEFINE_RATELIMIT_STATE(nopage_rs,
2661 DEFAULT_RATELIMIT_INTERVAL,
2662 DEFAULT_RATELIMIT_BURST);
2663
d00181b9 2664void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
a238ab5b 2665{
a238ab5b
DH
2666 unsigned int filter = SHOW_MEM_FILTER_NODES;
2667
c0a32fc5
SG
2668 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2669 debug_guardpage_minorder() > 0)
a238ab5b
DH
2670 return;
2671
2672 /*
2673 * This documents exceptions given to allocations in certain
2674 * contexts that are allowed to allocate outside current's set
2675 * of allowed nodes.
2676 */
2677 if (!(gfp_mask & __GFP_NOMEMALLOC))
2678 if (test_thread_flag(TIF_MEMDIE) ||
2679 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2680 filter &= ~SHOW_MEM_FILTER_NODES;
d0164adc 2681 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
a238ab5b
DH
2682 filter &= ~SHOW_MEM_FILTER_NODES;
2683
2684 if (fmt) {
3ee9a4f0
JP
2685 struct va_format vaf;
2686 va_list args;
2687
a238ab5b 2688 va_start(args, fmt);
3ee9a4f0
JP
2689
2690 vaf.fmt = fmt;
2691 vaf.va = &args;
2692
2693 pr_warn("%pV", &vaf);
2694
a238ab5b
DH
2695 va_end(args);
2696 }
2697
d00181b9 2698 pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n",
3ee9a4f0 2699 current->comm, order, gfp_mask);
a238ab5b
DH
2700
2701 dump_stack();
2702 if (!should_suppress_show_mem())
2703 show_mem(filter);
2704}
2705
11e33f6a
MG
2706static inline struct page *
2707__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
a9263751 2708 const struct alloc_context *ac, unsigned long *did_some_progress)
11e33f6a 2709{
6e0fc46d
DR
2710 struct oom_control oc = {
2711 .zonelist = ac->zonelist,
2712 .nodemask = ac->nodemask,
2713 .gfp_mask = gfp_mask,
2714 .order = order,
6e0fc46d 2715 };
11e33f6a
MG
2716 struct page *page;
2717
9879de73
JW
2718 *did_some_progress = 0;
2719
9879de73 2720 /*
dc56401f
JW
2721 * Acquire the oom lock. If that fails, somebody else is
2722 * making progress for us.
9879de73 2723 */
dc56401f 2724 if (!mutex_trylock(&oom_lock)) {
9879de73 2725 *did_some_progress = 1;
11e33f6a 2726 schedule_timeout_uninterruptible(1);
1da177e4
LT
2727 return NULL;
2728 }
6b1de916 2729
11e33f6a
MG
2730 /*
2731 * Go through the zonelist yet one more time, keep very high watermark
2732 * here, this is only to catch a parallel oom killing, we must fail if
2733 * we're still under heavy pressure.
2734 */
a9263751
VB
2735 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
2736 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
7fb1d9fc 2737 if (page)
11e33f6a
MG
2738 goto out;
2739
4365a567 2740 if (!(gfp_mask & __GFP_NOFAIL)) {
9879de73
JW
2741 /* Coredumps can quickly deplete all memory reserves */
2742 if (current->flags & PF_DUMPCORE)
2743 goto out;
4365a567
KH
2744 /* The OOM killer will not help higher order allocs */
2745 if (order > PAGE_ALLOC_COSTLY_ORDER)
2746 goto out;
03668b3c 2747 /* The OOM killer does not needlessly kill tasks for lowmem */
a9263751 2748 if (ac->high_zoneidx < ZONE_NORMAL)
03668b3c 2749 goto out;
9083905a 2750 /* The OOM killer does not compensate for IO-less reclaim */
cc873177
JW
2751 if (!(gfp_mask & __GFP_FS)) {
2752 /*
2753 * XXX: Page reclaim didn't yield anything,
2754 * and the OOM killer can't be invoked, but
9083905a 2755 * keep looping as per tradition.
cc873177
JW
2756 */
2757 *did_some_progress = 1;
9879de73 2758 goto out;
cc873177 2759 }
9083905a
JW
2760 if (pm_suspended_storage())
2761 goto out;
4167e9b2 2762 /* The OOM killer may not free memory on a specific node */
4365a567
KH
2763 if (gfp_mask & __GFP_THISNODE)
2764 goto out;
2765 }
11e33f6a 2766 /* Exhausted what can be done so it's blamo time */
5020e285 2767 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
c32b3cbe 2768 *did_some_progress = 1;
5020e285
MH
2769
2770 if (gfp_mask & __GFP_NOFAIL) {
2771 page = get_page_from_freelist(gfp_mask, order,
2772 ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
2773 /*
2774 * fallback to ignore cpuset restriction if our nodes
2775 * are depleted
2776 */
2777 if (!page)
2778 page = get_page_from_freelist(gfp_mask, order,
2779 ALLOC_NO_WATERMARKS, ac);
2780 }
2781 }
11e33f6a 2782out:
dc56401f 2783 mutex_unlock(&oom_lock);
11e33f6a
MG
2784 return page;
2785}
2786
56de7263
MG
2787#ifdef CONFIG_COMPACTION
2788/* Try memory compaction for high-order allocations before reclaim */
2789static struct page *
2790__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
a9263751
VB
2791 int alloc_flags, const struct alloc_context *ac,
2792 enum migrate_mode mode, int *contended_compaction,
2793 bool *deferred_compaction)
56de7263 2794{
53853e2d 2795 unsigned long compact_result;
98dd3b48 2796 struct page *page;
53853e2d
VB
2797
2798 if (!order)
66199712 2799 return NULL;
66199712 2800
c06b1fca 2801 current->flags |= PF_MEMALLOC;
1a6d53a1
VB
2802 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
2803 mode, contended_compaction);
c06b1fca 2804 current->flags &= ~PF_MEMALLOC;
56de7263 2805
98dd3b48
VB
2806 switch (compact_result) {
2807 case COMPACT_DEFERRED:
53853e2d 2808 *deferred_compaction = true;
98dd3b48
VB
2809 /* fall-through */
2810 case COMPACT_SKIPPED:
2811 return NULL;
2812 default:
2813 break;
2814 }
53853e2d 2815
98dd3b48
VB
2816 /*
2817 * At least in one zone compaction wasn't deferred or skipped, so let's
2818 * count a compaction stall
2819 */
2820 count_vm_event(COMPACTSTALL);
8fb74b9f 2821
a9263751
VB
2822 page = get_page_from_freelist(gfp_mask, order,
2823 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
53853e2d 2824
98dd3b48
VB
2825 if (page) {
2826 struct zone *zone = page_zone(page);
53853e2d 2827
98dd3b48
VB
2828 zone->compact_blockskip_flush = false;
2829 compaction_defer_reset(zone, order, true);
2830 count_vm_event(COMPACTSUCCESS);
2831 return page;
2832 }
56de7263 2833
98dd3b48
VB
2834 /*
2835 * It's bad if compaction run occurs and fails. The most likely reason
2836 * is that pages exist, but not enough to satisfy watermarks.
2837 */
2838 count_vm_event(COMPACTFAIL);
66199712 2839
98dd3b48 2840 cond_resched();
56de7263
MG
2841
2842 return NULL;
2843}
2844#else
2845static inline struct page *
2846__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
a9263751
VB
2847 int alloc_flags, const struct alloc_context *ac,
2848 enum migrate_mode mode, int *contended_compaction,
2849 bool *deferred_compaction)
56de7263
MG
2850{
2851 return NULL;
2852}
2853#endif /* CONFIG_COMPACTION */
2854
bba90710
MS
2855/* Perform direct synchronous page reclaim */
2856static int
a9263751
VB
2857__perform_reclaim(gfp_t gfp_mask, unsigned int order,
2858 const struct alloc_context *ac)
11e33f6a 2859{
11e33f6a 2860 struct reclaim_state reclaim_state;
bba90710 2861 int progress;
11e33f6a
MG
2862
2863 cond_resched();
2864
2865 /* We now go into synchronous reclaim */
2866 cpuset_memory_pressure_bump();
c06b1fca 2867 current->flags |= PF_MEMALLOC;
11e33f6a
MG
2868 lockdep_set_current_reclaim_state(gfp_mask);
2869 reclaim_state.reclaimed_slab = 0;
c06b1fca 2870 current->reclaim_state = &reclaim_state;
11e33f6a 2871
a9263751
VB
2872 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
2873 ac->nodemask);
11e33f6a 2874
c06b1fca 2875 current->reclaim_state = NULL;
11e33f6a 2876 lockdep_clear_current_reclaim_state();
c06b1fca 2877 current->flags &= ~PF_MEMALLOC;
11e33f6a
MG
2878
2879 cond_resched();
2880
bba90710
MS
2881 return progress;
2882}
2883
2884/* The really slow allocator path where we enter direct reclaim */
2885static inline struct page *
2886__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
a9263751
VB
2887 int alloc_flags, const struct alloc_context *ac,
2888 unsigned long *did_some_progress)
bba90710
MS
2889{
2890 struct page *page = NULL;
2891 bool drained = false;
2892
a9263751 2893 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
9ee493ce
MG
2894 if (unlikely(!(*did_some_progress)))
2895 return NULL;
11e33f6a 2896
9ee493ce 2897retry:
a9263751
VB
2898 page = get_page_from_freelist(gfp_mask, order,
2899 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
9ee493ce
MG
2900
2901 /*
2902 * If an allocation failed after direct reclaim, it could be because
0aaa29a5
MG
2903 * pages are pinned on the per-cpu lists or in high alloc reserves.
2904 * Shrink them them and try again
9ee493ce
MG
2905 */
2906 if (!page && !drained) {
0aaa29a5 2907 unreserve_highatomic_pageblock(ac);
93481ff0 2908 drain_all_pages(NULL);
9ee493ce
MG
2909 drained = true;
2910 goto retry;
2911 }
2912
11e33f6a
MG
2913 return page;
2914}
2915
a9263751 2916static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3a025760
JW
2917{
2918 struct zoneref *z;
2919 struct zone *zone;
2920
a9263751
VB
2921 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2922 ac->high_zoneidx, ac->nodemask)
2923 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
3a025760
JW
2924}
2925
341ce06f
PZ
2926static inline int
2927gfp_to_alloc_flags(gfp_t gfp_mask)
2928{
341ce06f 2929 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1da177e4 2930
a56f57ff 2931 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
e6223a3b 2932 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
933e312e 2933
341ce06f
PZ
2934 /*
2935 * The caller may dip into page reserves a bit more if the caller
2936 * cannot run direct reclaim, or if the caller has realtime scheduling
2937 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
d0164adc 2938 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
341ce06f 2939 */
e6223a3b 2940 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1da177e4 2941
d0164adc 2942 if (gfp_mask & __GFP_ATOMIC) {
5c3240d9 2943 /*
b104a35d
DR
2944 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2945 * if it can't schedule.
5c3240d9 2946 */
b104a35d 2947 if (!(gfp_mask & __GFP_NOMEMALLOC))
5c3240d9 2948 alloc_flags |= ALLOC_HARDER;
523b9458 2949 /*
b104a35d 2950 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
344736f2 2951 * comment for __cpuset_node_allowed().
523b9458 2952 */
341ce06f 2953 alloc_flags &= ~ALLOC_CPUSET;
c06b1fca 2954 } else if (unlikely(rt_task(current)) && !in_interrupt())
341ce06f
PZ
2955 alloc_flags |= ALLOC_HARDER;
2956
b37f1dd0
MG
2957 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2958 if (gfp_mask & __GFP_MEMALLOC)
2959 alloc_flags |= ALLOC_NO_WATERMARKS;
907aed48
MG
2960 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2961 alloc_flags |= ALLOC_NO_WATERMARKS;
2962 else if (!in_interrupt() &&
2963 ((current->flags & PF_MEMALLOC) ||
2964 unlikely(test_thread_flag(TIF_MEMDIE))))
341ce06f 2965 alloc_flags |= ALLOC_NO_WATERMARKS;
1da177e4 2966 }
d95ea5d1 2967#ifdef CONFIG_CMA
43e7a34d 2968 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
d95ea5d1
BZ
2969 alloc_flags |= ALLOC_CMA;
2970#endif
341ce06f
PZ
2971 return alloc_flags;
2972}
2973
072bb0aa
MG
2974bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2975{
b37f1dd0 2976 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
072bb0aa
MG
2977}
2978
d0164adc
MG
2979static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
2980{
2981 return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE;
2982}
2983
11e33f6a
MG
2984static inline struct page *
2985__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
a9263751 2986 struct alloc_context *ac)
11e33f6a 2987{
d0164adc 2988 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
11e33f6a
MG
2989 struct page *page = NULL;
2990 int alloc_flags;
2991 unsigned long pages_reclaimed = 0;
2992 unsigned long did_some_progress;
e0b9daeb 2993 enum migrate_mode migration_mode = MIGRATE_ASYNC;
66199712 2994 bool deferred_compaction = false;
1f9efdef 2995 int contended_compaction = COMPACT_CONTENDED_NONE;
1da177e4 2996
72807a74
MG
2997 /*
2998 * In the slowpath, we sanity check order to avoid ever trying to
2999 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3000 * be using allocators in order of preference for an area that is
3001 * too large.
3002 */
1fc28b70
MG
3003 if (order >= MAX_ORDER) {
3004 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 3005 return NULL;
1fc28b70 3006 }
1da177e4 3007
d0164adc
MG
3008 /*
3009 * We also sanity check to catch abuse of atomic reserves being used by
3010 * callers that are not in atomic context.
3011 */
3012 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3013 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3014 gfp_mask &= ~__GFP_ATOMIC;
3015
952f3b51 3016 /*
4167e9b2
DR
3017 * If this allocation cannot block and it is for a specific node, then
3018 * fail early. There's no need to wakeup kswapd or retry for a
3019 * speculative node-specific allocation.
952f3b51 3020 */
d0164adc 3021 if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !can_direct_reclaim)
952f3b51
CL
3022 goto nopage;
3023
9879de73 3024retry:
d0164adc 3025 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
a9263751 3026 wake_all_kswapds(order, ac);
1da177e4 3027
9bf2229f 3028 /*
7fb1d9fc
RS
3029 * OK, we're below the kswapd watermark and have kicked background
3030 * reclaim. Now things get more complex, so set up alloc_flags according
3031 * to how we want to proceed.
9bf2229f 3032 */
341ce06f 3033 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1da177e4 3034
f33261d7
DR
3035 /*
3036 * Find the true preferred zone if the allocation is unconstrained by
3037 * cpusets.
3038 */
a9263751 3039 if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
d8846374 3040 struct zoneref *preferred_zoneref;
a9263751
VB
3041 preferred_zoneref = first_zones_zonelist(ac->zonelist,
3042 ac->high_zoneidx, NULL, &ac->preferred_zone);
3043 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
d8846374 3044 }
f33261d7 3045
341ce06f 3046 /* This is the last chance, in general, before the goto nopage. */
a9263751
VB
3047 page = get_page_from_freelist(gfp_mask, order,
3048 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
7fb1d9fc
RS
3049 if (page)
3050 goto got_pg;
1da177e4 3051
11e33f6a 3052 /* Allocate without watermarks if the context allows */
341ce06f 3053 if (alloc_flags & ALLOC_NO_WATERMARKS) {
183f6371
MG
3054 /*
3055 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
3056 * the allocation is high priority and these type of
3057 * allocations are system rather than user orientated
3058 */
a9263751 3059 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
33d53103
MH
3060 page = get_page_from_freelist(gfp_mask, order,
3061 ALLOC_NO_WATERMARKS, ac);
3062 if (page)
3063 goto got_pg;
1da177e4
LT
3064 }
3065
d0164adc
MG
3066 /* Caller is not willing to reclaim, we can't balance anything */
3067 if (!can_direct_reclaim) {
aed0a0e3 3068 /*
33d53103
MH
3069 * All existing users of the __GFP_NOFAIL are blockable, so warn
3070 * of any new users that actually allow this type of allocation
3071 * to fail.
aed0a0e3
DR
3072 */
3073 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
1da177e4 3074 goto nopage;
aed0a0e3 3075 }
1da177e4 3076
341ce06f 3077 /* Avoid recursion of direct reclaim */
33d53103
MH
3078 if (current->flags & PF_MEMALLOC) {
3079 /*
3080 * __GFP_NOFAIL request from this context is rather bizarre
3081 * because we cannot reclaim anything and only can loop waiting
3082 * for somebody to do a work for us.
3083 */
3084 if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3085 cond_resched();
3086 goto retry;
3087 }
341ce06f 3088 goto nopage;
33d53103 3089 }
341ce06f 3090
6583bb64
DR
3091 /* Avoid allocations with no watermarks from looping endlessly */
3092 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
3093 goto nopage;
3094
77f1fe6b
MG
3095 /*
3096 * Try direct compaction. The first pass is asynchronous. Subsequent
3097 * attempts after direct reclaim are synchronous
3098 */
a9263751
VB
3099 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3100 migration_mode,
3101 &contended_compaction,
53853e2d 3102 &deferred_compaction);
56de7263
MG
3103 if (page)
3104 goto got_pg;
75f30861 3105
1f9efdef 3106 /* Checks for THP-specific high-order allocations */
d0164adc 3107 if (is_thp_gfp_mask(gfp_mask)) {
1f9efdef
VB
3108 /*
3109 * If compaction is deferred for high-order allocations, it is
3110 * because sync compaction recently failed. If this is the case
3111 * and the caller requested a THP allocation, we do not want
3112 * to heavily disrupt the system, so we fail the allocation
3113 * instead of entering direct reclaim.
3114 */
3115 if (deferred_compaction)
3116 goto nopage;
3117
3118 /*
3119 * In all zones where compaction was attempted (and not
3120 * deferred or skipped), lock contention has been detected.
3121 * For THP allocation we do not want to disrupt the others
3122 * so we fallback to base pages instead.
3123 */
3124 if (contended_compaction == COMPACT_CONTENDED_LOCK)
3125 goto nopage;
3126
3127 /*
3128 * If compaction was aborted due to need_resched(), we do not
3129 * want to further increase allocation latency, unless it is
3130 * khugepaged trying to collapse.
3131 */
3132 if (contended_compaction == COMPACT_CONTENDED_SCHED
3133 && !(current->flags & PF_KTHREAD))
3134 goto nopage;
3135 }
66199712 3136
8fe78048
DR
3137 /*
3138 * It can become very expensive to allocate transparent hugepages at
3139 * fault, so use asynchronous memory compaction for THP unless it is
3140 * khugepaged trying to collapse.
3141 */
d0164adc 3142 if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD))
8fe78048
DR
3143 migration_mode = MIGRATE_SYNC_LIGHT;
3144
11e33f6a 3145 /* Try direct reclaim and then allocating */
a9263751
VB
3146 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3147 &did_some_progress);
11e33f6a
MG
3148 if (page)
3149 goto got_pg;
1da177e4 3150
9083905a
JW
3151 /* Do not loop if specifically requested */
3152 if (gfp_mask & __GFP_NORETRY)
3153 goto noretry;
3154
3155 /* Keep reclaiming pages as long as there is reasonable progress */
a41f24ea 3156 pages_reclaimed += did_some_progress;
9083905a
JW
3157 if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
3158 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
11e33f6a 3159 /* Wait for some write requests to complete then retry */
a9263751 3160 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
9879de73 3161 goto retry;
1da177e4
LT
3162 }
3163
9083905a
JW
3164 /* Reclaim has failed us, start killing things */
3165 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3166 if (page)
3167 goto got_pg;
3168
3169 /* Retry as long as the OOM killer is making progress */
3170 if (did_some_progress)
3171 goto retry;
3172
3173noretry:
3174 /*
3175 * High-order allocations do not necessarily loop after
3176 * direct reclaim and reclaim/compaction depends on compaction
3177 * being called after reclaim so call directly if necessary
3178 */
3179 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
3180 ac, migration_mode,
3181 &contended_compaction,
3182 &deferred_compaction);
3183 if (page)
3184 goto got_pg;
1da177e4 3185nopage:
a238ab5b 3186 warn_alloc_failed(gfp_mask, order, NULL);
1da177e4 3187got_pg:
072bb0aa 3188 return page;
1da177e4 3189}
11e33f6a
MG
3190
3191/*
3192 * This is the 'heart' of the zoned buddy allocator.
3193 */
3194struct page *
3195__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3196 struct zonelist *zonelist, nodemask_t *nodemask)
3197{
d8846374 3198 struct zoneref *preferred_zoneref;
cc9a6c87 3199 struct page *page = NULL;
cc9a6c87 3200 unsigned int cpuset_mems_cookie;
3a025760 3201 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
91fbdc0f 3202 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
a9263751
VB
3203 struct alloc_context ac = {
3204 .high_zoneidx = gfp_zone(gfp_mask),
3205 .nodemask = nodemask,
3206 .migratetype = gfpflags_to_migratetype(gfp_mask),
3207 };
11e33f6a 3208
dcce284a
BH
3209 gfp_mask &= gfp_allowed_mask;
3210
11e33f6a
MG
3211 lockdep_trace_alloc(gfp_mask);
3212
d0164adc 3213 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
11e33f6a
MG
3214
3215 if (should_fail_alloc_page(gfp_mask, order))
3216 return NULL;
3217
3218 /*
3219 * Check the zones suitable for the gfp_mask contain at least one
3220 * valid zone. It's possible to have an empty zonelist as a result
4167e9b2 3221 * of __GFP_THISNODE and a memoryless node
11e33f6a
MG
3222 */
3223 if (unlikely(!zonelist->_zonerefs->zone))
3224 return NULL;
3225
a9263751 3226 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
21bb9bd1
VB
3227 alloc_flags |= ALLOC_CMA;
3228
cc9a6c87 3229retry_cpuset:
d26914d1 3230 cpuset_mems_cookie = read_mems_allowed_begin();
cc9a6c87 3231
a9263751
VB
3232 /* We set it here, as __alloc_pages_slowpath might have changed it */
3233 ac.zonelist = zonelist;
c9ab0c4f
MG
3234
3235 /* Dirty zone balancing only done in the fast path */
3236 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3237
5117f45d 3238 /* The preferred zone is used for statistics later */
a9263751
VB
3239 preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
3240 ac.nodemask ? : &cpuset_current_mems_allowed,
3241 &ac.preferred_zone);
3242 if (!ac.preferred_zone)
cc9a6c87 3243 goto out;
a9263751 3244 ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
5117f45d
MG
3245
3246 /* First allocation attempt */
91fbdc0f 3247 alloc_mask = gfp_mask|__GFP_HARDWALL;
a9263751 3248 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
21caf2fc
ML
3249 if (unlikely(!page)) {
3250 /*
3251 * Runtime PM, block IO and its error handling path
3252 * can deadlock because I/O on the device might not
3253 * complete.
3254 */
91fbdc0f 3255 alloc_mask = memalloc_noio_flags(gfp_mask);
c9ab0c4f 3256 ac.spread_dirty_pages = false;
91fbdc0f 3257
a9263751 3258 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
21caf2fc 3259 }
11e33f6a 3260
23f086f9
XQ
3261 if (kmemcheck_enabled && page)
3262 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3263
a9263751 3264 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
cc9a6c87
MG
3265
3266out:
3267 /*
3268 * When updating a task's mems_allowed, it is possible to race with
3269 * parallel threads in such a way that an allocation can fail while
3270 * the mask is being updated. If a page allocation is about to fail,
3271 * check if the cpuset changed during allocation and if so, retry.
3272 */
d26914d1 3273 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
cc9a6c87
MG
3274 goto retry_cpuset;
3275
11e33f6a 3276 return page;
1da177e4 3277}
d239171e 3278EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
3279
3280/*
3281 * Common helper functions.
3282 */
920c7a5d 3283unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 3284{
945a1113
AM
3285 struct page *page;
3286
3287 /*
3288 * __get_free_pages() returns a 32-bit address, which cannot represent
3289 * a highmem page
3290 */
3291 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3292
1da177e4
LT
3293 page = alloc_pages(gfp_mask, order);
3294 if (!page)
3295 return 0;
3296 return (unsigned long) page_address(page);
3297}
1da177e4
LT
3298EXPORT_SYMBOL(__get_free_pages);
3299
920c7a5d 3300unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 3301{
945a1113 3302 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 3303}
1da177e4
LT
3304EXPORT_SYMBOL(get_zeroed_page);
3305
920c7a5d 3306void __free_pages(struct page *page, unsigned int order)
1da177e4 3307{
b5810039 3308 if (put_page_testzero(page)) {
1da177e4 3309 if (order == 0)
b745bc85 3310 free_hot_cold_page(page, false);
1da177e4
LT
3311 else
3312 __free_pages_ok(page, order);
3313 }
3314}
3315
3316EXPORT_SYMBOL(__free_pages);
3317
920c7a5d 3318void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
3319{
3320 if (addr != 0) {
725d704e 3321 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
3322 __free_pages(virt_to_page((void *)addr), order);
3323 }
3324}
3325
3326EXPORT_SYMBOL(free_pages);
3327
b63ae8ca
AD
3328/*
3329 * Page Fragment:
3330 * An arbitrary-length arbitrary-offset area of memory which resides
3331 * within a 0 or higher order page. Multiple fragments within that page
3332 * are individually refcounted, in the page's reference counter.
3333 *
3334 * The page_frag functions below provide a simple allocation framework for
3335 * page fragments. This is used by the network stack and network device
3336 * drivers to provide a backing region of memory for use as either an
3337 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
3338 */
3339static struct page *__page_frag_refill(struct page_frag_cache *nc,
3340 gfp_t gfp_mask)
3341{
3342 struct page *page = NULL;
3343 gfp_t gfp = gfp_mask;
3344
3345#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3346 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
3347 __GFP_NOMEMALLOC;
3348 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
3349 PAGE_FRAG_CACHE_MAX_ORDER);
3350 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
3351#endif
3352 if (unlikely(!page))
3353 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
3354
3355 nc->va = page ? page_address(page) : NULL;
3356
3357 return page;
3358}
3359
3360void *__alloc_page_frag(struct page_frag_cache *nc,
3361 unsigned int fragsz, gfp_t gfp_mask)
3362{
3363 unsigned int size = PAGE_SIZE;
3364 struct page *page;
3365 int offset;
3366
3367 if (unlikely(!nc->va)) {
3368refill:
3369 page = __page_frag_refill(nc, gfp_mask);
3370 if (!page)
3371 return NULL;
3372
3373#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3374 /* if size can vary use size else just use PAGE_SIZE */
3375 size = nc->size;
3376#endif
3377 /* Even if we own the page, we do not use atomic_set().
3378 * This would break get_page_unless_zero() users.
3379 */
3380 atomic_add(size - 1, &page->_count);
3381
3382 /* reset page count bias and offset to start of new frag */
2f064f34 3383 nc->pfmemalloc = page_is_pfmemalloc(page);
b63ae8ca
AD
3384 nc->pagecnt_bias = size;
3385 nc->offset = size;
3386 }
3387
3388 offset = nc->offset - fragsz;
3389 if (unlikely(offset < 0)) {
3390 page = virt_to_page(nc->va);
3391
3392 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
3393 goto refill;
3394
3395#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3396 /* if size can vary use size else just use PAGE_SIZE */
3397 size = nc->size;
3398#endif
3399 /* OK, page count is 0, we can safely set it */
3400 atomic_set(&page->_count, size);
3401
3402 /* reset page count bias and offset to start of new frag */
3403 nc->pagecnt_bias = size;
3404 offset = size - fragsz;
3405 }
3406
3407 nc->pagecnt_bias--;
3408 nc->offset = offset;
3409
3410 return nc->va + offset;
3411}
3412EXPORT_SYMBOL(__alloc_page_frag);
3413
3414/*
3415 * Frees a page fragment allocated out of either a compound or order 0 page.
3416 */
3417void __free_page_frag(void *addr)
3418{
3419 struct page *page = virt_to_head_page(addr);
3420
3421 if (unlikely(put_page_testzero(page)))
3422 __free_pages_ok(page, compound_order(page));
3423}
3424EXPORT_SYMBOL(__free_page_frag);
3425
6a1a0d3b 3426/*
52383431 3427 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
a9bb7e62
VD
3428 * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
3429 * equivalent to alloc_pages.
6a1a0d3b 3430 *
52383431
VD
3431 * It should be used when the caller would like to use kmalloc, but since the
3432 * allocation is large, it has to fall back to the page allocator.
3433 */
3434struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
3435{
3436 struct page *page;
52383431 3437
52383431 3438 page = alloc_pages(gfp_mask, order);
d05e83a6
VD
3439 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
3440 __free_pages(page, order);
3441 page = NULL;
3442 }
52383431
VD
3443 return page;
3444}
3445
3446struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
3447{
3448 struct page *page;
52383431 3449
52383431 3450 page = alloc_pages_node(nid, gfp_mask, order);
d05e83a6
VD
3451 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
3452 __free_pages(page, order);
3453 page = NULL;
3454 }
52383431
VD
3455 return page;
3456}
3457
3458/*
3459 * __free_kmem_pages and free_kmem_pages will free pages allocated with
3460 * alloc_kmem_pages.
6a1a0d3b 3461 */
52383431 3462void __free_kmem_pages(struct page *page, unsigned int order)
6a1a0d3b 3463{
d05e83a6 3464 memcg_kmem_uncharge(page, order);
6a1a0d3b
GC
3465 __free_pages(page, order);
3466}
3467
52383431 3468void free_kmem_pages(unsigned long addr, unsigned int order)
6a1a0d3b
GC
3469{
3470 if (addr != 0) {
3471 VM_BUG_ON(!virt_addr_valid((void *)addr));
52383431 3472 __free_kmem_pages(virt_to_page((void *)addr), order);
6a1a0d3b
GC
3473 }
3474}
3475
d00181b9
KS
3476static void *make_alloc_exact(unsigned long addr, unsigned int order,
3477 size_t size)
ee85c2e1
AK
3478{
3479 if (addr) {
3480 unsigned long alloc_end = addr + (PAGE_SIZE << order);
3481 unsigned long used = addr + PAGE_ALIGN(size);
3482
3483 split_page(virt_to_page((void *)addr), order);
3484 while (used < alloc_end) {
3485 free_page(used);
3486 used += PAGE_SIZE;
3487 }
3488 }
3489 return (void *)addr;
3490}
3491
2be0ffe2
TT
3492/**
3493 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3494 * @size: the number of bytes to allocate
3495 * @gfp_mask: GFP flags for the allocation
3496 *
3497 * This function is similar to alloc_pages(), except that it allocates the
3498 * minimum number of pages to satisfy the request. alloc_pages() can only
3499 * allocate memory in power-of-two pages.
3500 *
3501 * This function is also limited by MAX_ORDER.
3502 *
3503 * Memory allocated by this function must be released by free_pages_exact().
3504 */
3505void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3506{
3507 unsigned int order = get_order(size);
3508 unsigned long addr;
3509
3510 addr = __get_free_pages(gfp_mask, order);
ee85c2e1 3511 return make_alloc_exact(addr, order, size);
2be0ffe2
TT
3512}
3513EXPORT_SYMBOL(alloc_pages_exact);
3514
ee85c2e1
AK
3515/**
3516 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3517 * pages on a node.
b5e6ab58 3518 * @nid: the preferred node ID where memory should be allocated
ee85c2e1
AK
3519 * @size: the number of bytes to allocate
3520 * @gfp_mask: GFP flags for the allocation
3521 *
3522 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3523 * back.
ee85c2e1 3524 */
e1931811 3525void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
ee85c2e1 3526{
d00181b9 3527 unsigned int order = get_order(size);
ee85c2e1
AK
3528 struct page *p = alloc_pages_node(nid, gfp_mask, order);
3529 if (!p)
3530 return NULL;
3531 return make_alloc_exact((unsigned long)page_address(p), order, size);
3532}
ee85c2e1 3533
2be0ffe2
TT
3534/**
3535 * free_pages_exact - release memory allocated via alloc_pages_exact()
3536 * @virt: the value returned by alloc_pages_exact.
3537 * @size: size of allocation, same value as passed to alloc_pages_exact().
3538 *
3539 * Release the memory allocated by a previous call to alloc_pages_exact.
3540 */
3541void free_pages_exact(void *virt, size_t size)
3542{
3543 unsigned long addr = (unsigned long)virt;
3544 unsigned long end = addr + PAGE_ALIGN(size);
3545
3546 while (addr < end) {
3547 free_page(addr);
3548 addr += PAGE_SIZE;
3549 }
3550}
3551EXPORT_SYMBOL(free_pages_exact);
3552
e0fb5815
ZY
3553/**
3554 * nr_free_zone_pages - count number of pages beyond high watermark
3555 * @offset: The zone index of the highest zone
3556 *
3557 * nr_free_zone_pages() counts the number of counts pages which are beyond the
3558 * high watermark within all zones at or below a given zone index. For each
3559 * zone, the number of pages is calculated as:
834405c3 3560 * managed_pages - high_pages
e0fb5815 3561 */
ebec3862 3562static unsigned long nr_free_zone_pages(int offset)
1da177e4 3563{
dd1a239f 3564 struct zoneref *z;
54a6eb5c
MG
3565 struct zone *zone;
3566
e310fd43 3567 /* Just pick one node, since fallback list is circular */
ebec3862 3568 unsigned long sum = 0;
1da177e4 3569
0e88460d 3570 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 3571
54a6eb5c 3572 for_each_zone_zonelist(zone, z, zonelist, offset) {
b40da049 3573 unsigned long size = zone->managed_pages;
41858966 3574 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
3575 if (size > high)
3576 sum += size - high;
1da177e4
LT
3577 }
3578
3579 return sum;
3580}
3581
e0fb5815
ZY
3582/**
3583 * nr_free_buffer_pages - count number of pages beyond high watermark
3584 *
3585 * nr_free_buffer_pages() counts the number of pages which are beyond the high
3586 * watermark within ZONE_DMA and ZONE_NORMAL.
1da177e4 3587 */
ebec3862 3588unsigned long nr_free_buffer_pages(void)
1da177e4 3589{
af4ca457 3590 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 3591}
c2f1a551 3592EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4 3593
e0fb5815
ZY
3594/**
3595 * nr_free_pagecache_pages - count number of pages beyond high watermark
3596 *
3597 * nr_free_pagecache_pages() counts the number of pages which are beyond the
3598 * high watermark within all zones.
1da177e4 3599 */
ebec3862 3600unsigned long nr_free_pagecache_pages(void)
1da177e4 3601{
2a1e274a 3602 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 3603}
08e0f6a9
CL
3604
3605static inline void show_node(struct zone *zone)
1da177e4 3606{
e5adfffc 3607 if (IS_ENABLED(CONFIG_NUMA))
25ba77c1 3608 printk("Node %d ", zone_to_nid(zone));
1da177e4 3609}
1da177e4 3610
1da177e4
LT
3611void si_meminfo(struct sysinfo *val)
3612{
3613 val->totalram = totalram_pages;
cc7452b6 3614 val->sharedram = global_page_state(NR_SHMEM);
d23ad423 3615 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 3616 val->bufferram = nr_blockdev_pages();
1da177e4
LT
3617 val->totalhigh = totalhigh_pages;
3618 val->freehigh = nr_free_highpages();
1da177e4
LT
3619 val->mem_unit = PAGE_SIZE;
3620}
3621
3622EXPORT_SYMBOL(si_meminfo);
3623
3624#ifdef CONFIG_NUMA
3625void si_meminfo_node(struct sysinfo *val, int nid)
3626{
cdd91a77
JL
3627 int zone_type; /* needs to be signed */
3628 unsigned long managed_pages = 0;
1da177e4
LT
3629 pg_data_t *pgdat = NODE_DATA(nid);
3630
cdd91a77
JL
3631 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
3632 managed_pages += pgdat->node_zones[zone_type].managed_pages;
3633 val->totalram = managed_pages;
cc7452b6 3634 val->sharedram = node_page_state(nid, NR_SHMEM);
d23ad423 3635 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 3636#ifdef CONFIG_HIGHMEM
b40da049 3637 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
d23ad423
CL
3638 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
3639 NR_FREE_PAGES);
98d2b0eb
CL
3640#else
3641 val->totalhigh = 0;
3642 val->freehigh = 0;
3643#endif
1da177e4
LT
3644 val->mem_unit = PAGE_SIZE;
3645}
3646#endif
3647
ddd588b5 3648/*
7bf02ea2
DR
3649 * Determine whether the node should be displayed or not, depending on whether
3650 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
ddd588b5 3651 */
7bf02ea2 3652bool skip_free_areas_node(unsigned int flags, int nid)
ddd588b5
DR
3653{
3654 bool ret = false;
cc9a6c87 3655 unsigned int cpuset_mems_cookie;
ddd588b5
DR
3656
3657 if (!(flags & SHOW_MEM_FILTER_NODES))
3658 goto out;
3659
cc9a6c87 3660 do {
d26914d1 3661 cpuset_mems_cookie = read_mems_allowed_begin();
cc9a6c87 3662 ret = !node_isset(nid, cpuset_current_mems_allowed);
d26914d1 3663 } while (read_mems_allowed_retry(cpuset_mems_cookie));
ddd588b5
DR
3664out:
3665 return ret;
3666}
3667
1da177e4
LT
3668#define K(x) ((x) << (PAGE_SHIFT-10))
3669
377e4f16
RV
3670static void show_migration_types(unsigned char type)
3671{
3672 static const char types[MIGRATE_TYPES] = {
3673 [MIGRATE_UNMOVABLE] = 'U',
377e4f16 3674 [MIGRATE_MOVABLE] = 'M',
475a2f90
VB
3675 [MIGRATE_RECLAIMABLE] = 'E',
3676 [MIGRATE_HIGHATOMIC] = 'H',
377e4f16
RV
3677#ifdef CONFIG_CMA
3678 [MIGRATE_CMA] = 'C',
3679#endif
194159fb 3680#ifdef CONFIG_MEMORY_ISOLATION
377e4f16 3681 [MIGRATE_ISOLATE] = 'I',
194159fb 3682#endif
377e4f16
RV
3683 };
3684 char tmp[MIGRATE_TYPES + 1];
3685 char *p = tmp;
3686 int i;
3687
3688 for (i = 0; i < MIGRATE_TYPES; i++) {
3689 if (type & (1 << i))
3690 *p++ = types[i];
3691 }
3692
3693 *p = '\0';
3694 printk("(%s) ", tmp);
3695}
3696
1da177e4
LT
3697/*
3698 * Show free area list (used inside shift_scroll-lock stuff)
3699 * We also calculate the percentage fragmentation. We do this by counting the
3700 * memory on each free list with the exception of the first item on the list.
d1bfcdb8
KK
3701 *
3702 * Bits in @filter:
3703 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
3704 * cpuset.
1da177e4 3705 */
7bf02ea2 3706void show_free_areas(unsigned int filter)
1da177e4 3707{
d1bfcdb8 3708 unsigned long free_pcp = 0;
c7241913 3709 int cpu;
1da177e4
LT
3710 struct zone *zone;
3711
ee99c71c 3712 for_each_populated_zone(zone) {
7bf02ea2 3713 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3714 continue;
d1bfcdb8 3715
761b0677
KK
3716 for_each_online_cpu(cpu)
3717 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
1da177e4
LT
3718 }
3719
a731286d
KM
3720 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3721 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
d1bfcdb8
KK
3722 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
3723 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
d1ce749a 3724 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
d1bfcdb8 3725 " free:%lu free_pcp:%lu free_cma:%lu\n",
4f98a2fe 3726 global_page_state(NR_ACTIVE_ANON),
4f98a2fe 3727 global_page_state(NR_INACTIVE_ANON),
a731286d
KM
3728 global_page_state(NR_ISOLATED_ANON),
3729 global_page_state(NR_ACTIVE_FILE),
4f98a2fe 3730 global_page_state(NR_INACTIVE_FILE),
a731286d 3731 global_page_state(NR_ISOLATED_FILE),
7b854121 3732 global_page_state(NR_UNEVICTABLE),
b1e7a8fd 3733 global_page_state(NR_FILE_DIRTY),
ce866b34 3734 global_page_state(NR_WRITEBACK),
fd39fc85 3735 global_page_state(NR_UNSTABLE_NFS),
3701b033
KM
3736 global_page_state(NR_SLAB_RECLAIMABLE),
3737 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 3738 global_page_state(NR_FILE_MAPPED),
4b02108a 3739 global_page_state(NR_SHMEM),
a25700a5 3740 global_page_state(NR_PAGETABLE),
d1ce749a 3741 global_page_state(NR_BOUNCE),
d1bfcdb8
KK
3742 global_page_state(NR_FREE_PAGES),
3743 free_pcp,
d1ce749a 3744 global_page_state(NR_FREE_CMA_PAGES));
1da177e4 3745
ee99c71c 3746 for_each_populated_zone(zone) {
1da177e4
LT
3747 int i;
3748
7bf02ea2 3749 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3750 continue;
d1bfcdb8
KK
3751
3752 free_pcp = 0;
3753 for_each_online_cpu(cpu)
3754 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3755
1da177e4
LT
3756 show_node(zone);
3757 printk("%s"
3758 " free:%lukB"
3759 " min:%lukB"
3760 " low:%lukB"
3761 " high:%lukB"
4f98a2fe
RR
3762 " active_anon:%lukB"
3763 " inactive_anon:%lukB"
3764 " active_file:%lukB"
3765 " inactive_file:%lukB"
7b854121 3766 " unevictable:%lukB"
a731286d
KM
3767 " isolated(anon):%lukB"
3768 " isolated(file):%lukB"
1da177e4 3769 " present:%lukB"
9feedc9d 3770 " managed:%lukB"
4a0aa73f
KM
3771 " mlocked:%lukB"
3772 " dirty:%lukB"
3773 " writeback:%lukB"
3774 " mapped:%lukB"
4b02108a 3775 " shmem:%lukB"
4a0aa73f
KM
3776 " slab_reclaimable:%lukB"
3777 " slab_unreclaimable:%lukB"
c6a7f572 3778 " kernel_stack:%lukB"
4a0aa73f
KM
3779 " pagetables:%lukB"
3780 " unstable:%lukB"
3781 " bounce:%lukB"
d1bfcdb8
KK
3782 " free_pcp:%lukB"
3783 " local_pcp:%ukB"
d1ce749a 3784 " free_cma:%lukB"
4a0aa73f 3785 " writeback_tmp:%lukB"
1da177e4
LT
3786 " pages_scanned:%lu"
3787 " all_unreclaimable? %s"
3788 "\n",
3789 zone->name,
88f5acf8 3790 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
3791 K(min_wmark_pages(zone)),
3792 K(low_wmark_pages(zone)),
3793 K(high_wmark_pages(zone)),
4f98a2fe
RR
3794 K(zone_page_state(zone, NR_ACTIVE_ANON)),
3795 K(zone_page_state(zone, NR_INACTIVE_ANON)),
3796 K(zone_page_state(zone, NR_ACTIVE_FILE)),
3797 K(zone_page_state(zone, NR_INACTIVE_FILE)),
7b854121 3798 K(zone_page_state(zone, NR_UNEVICTABLE)),
a731286d
KM
3799 K(zone_page_state(zone, NR_ISOLATED_ANON)),
3800 K(zone_page_state(zone, NR_ISOLATED_FILE)),
1da177e4 3801 K(zone->present_pages),
9feedc9d 3802 K(zone->managed_pages),
4a0aa73f
KM
3803 K(zone_page_state(zone, NR_MLOCK)),
3804 K(zone_page_state(zone, NR_FILE_DIRTY)),
3805 K(zone_page_state(zone, NR_WRITEBACK)),
3806 K(zone_page_state(zone, NR_FILE_MAPPED)),
4b02108a 3807 K(zone_page_state(zone, NR_SHMEM)),
4a0aa73f
KM
3808 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3809 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
c6a7f572
KM
3810 zone_page_state(zone, NR_KERNEL_STACK) *
3811 THREAD_SIZE / 1024,
4a0aa73f
KM
3812 K(zone_page_state(zone, NR_PAGETABLE)),
3813 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3814 K(zone_page_state(zone, NR_BOUNCE)),
d1bfcdb8
KK
3815 K(free_pcp),
3816 K(this_cpu_read(zone->pageset->pcp.count)),
d1ce749a 3817 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
4a0aa73f 3818 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
0d5d823a 3819 K(zone_page_state(zone, NR_PAGES_SCANNED)),
6e543d57 3820 (!zone_reclaimable(zone) ? "yes" : "no")
1da177e4
LT
3821 );
3822 printk("lowmem_reserve[]:");
3823 for (i = 0; i < MAX_NR_ZONES; i++)
3484b2de 3824 printk(" %ld", zone->lowmem_reserve[i]);
1da177e4
LT
3825 printk("\n");
3826 }
3827
ee99c71c 3828 for_each_populated_zone(zone) {
d00181b9
KS
3829 unsigned int order;
3830 unsigned long nr[MAX_ORDER], flags, total = 0;
377e4f16 3831 unsigned char types[MAX_ORDER];
1da177e4 3832
7bf02ea2 3833 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3834 continue;
1da177e4
LT
3835 show_node(zone);
3836 printk("%s: ", zone->name);
1da177e4
LT
3837
3838 spin_lock_irqsave(&zone->lock, flags);
3839 for (order = 0; order < MAX_ORDER; order++) {
377e4f16
RV
3840 struct free_area *area = &zone->free_area[order];
3841 int type;
3842
3843 nr[order] = area->nr_free;
8f9de51a 3844 total += nr[order] << order;
377e4f16
RV
3845
3846 types[order] = 0;
3847 for (type = 0; type < MIGRATE_TYPES; type++) {
3848 if (!list_empty(&area->free_list[type]))
3849 types[order] |= 1 << type;
3850 }
1da177e4
LT
3851 }
3852 spin_unlock_irqrestore(&zone->lock, flags);
377e4f16 3853 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a 3854 printk("%lu*%lukB ", nr[order], K(1UL) << order);
377e4f16
RV
3855 if (nr[order])
3856 show_migration_types(types[order]);
3857 }
1da177e4
LT
3858 printk("= %lukB\n", K(total));
3859 }
3860
949f7ec5
DR
3861 hugetlb_show_meminfo();
3862
e6f3602d
LW
3863 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3864
1da177e4
LT
3865 show_swap_cache_info();
3866}
3867
19770b32
MG
3868static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3869{
3870 zoneref->zone = zone;
3871 zoneref->zone_idx = zone_idx(zone);
3872}
3873
1da177e4
LT
3874/*
3875 * Builds allocation fallback zone lists.
1a93205b
CL
3876 *
3877 * Add all populated zones of a node to the zonelist.
1da177e4 3878 */
f0c0b2b8 3879static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
bc732f1d 3880 int nr_zones)
1da177e4 3881{
1a93205b 3882 struct zone *zone;
bc732f1d 3883 enum zone_type zone_type = MAX_NR_ZONES;
02a68a5e
CL
3884
3885 do {
2f6726e5 3886 zone_type--;
070f8032 3887 zone = pgdat->node_zones + zone_type;
1a93205b 3888 if (populated_zone(zone)) {
dd1a239f
MG
3889 zoneref_set_zone(zone,
3890 &zonelist->_zonerefs[nr_zones++]);
070f8032 3891 check_highest_zone(zone_type);
1da177e4 3892 }
2f6726e5 3893 } while (zone_type);
bc732f1d 3894
070f8032 3895 return nr_zones;
1da177e4
LT
3896}
3897
f0c0b2b8
KH
3898
3899/*
3900 * zonelist_order:
3901 * 0 = automatic detection of better ordering.
3902 * 1 = order by ([node] distance, -zonetype)
3903 * 2 = order by (-zonetype, [node] distance)
3904 *
3905 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3906 * the same zonelist. So only NUMA can configure this param.
3907 */
3908#define ZONELIST_ORDER_DEFAULT 0
3909#define ZONELIST_ORDER_NODE 1
3910#define ZONELIST_ORDER_ZONE 2
3911
3912/* zonelist order in the kernel.
3913 * set_zonelist_order() will set this to NODE or ZONE.
3914 */
3915static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3916static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3917
3918
1da177e4 3919#ifdef CONFIG_NUMA
f0c0b2b8
KH
3920/* The value user specified ....changed by config */
3921static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3922/* string for sysctl */
3923#define NUMA_ZONELIST_ORDER_LEN 16
3924char numa_zonelist_order[16] = "default";
3925
3926/*
3927 * interface for configure zonelist ordering.
3928 * command line option "numa_zonelist_order"
3929 * = "[dD]efault - default, automatic configuration.
3930 * = "[nN]ode - order by node locality, then by zone within node
3931 * = "[zZ]one - order by zone, then by locality within zone
3932 */
3933
3934static int __parse_numa_zonelist_order(char *s)
3935{
3936 if (*s == 'd' || *s == 'D') {
3937 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3938 } else if (*s == 'n' || *s == 'N') {
3939 user_zonelist_order = ZONELIST_ORDER_NODE;
3940 } else if (*s == 'z' || *s == 'Z') {
3941 user_zonelist_order = ZONELIST_ORDER_ZONE;
3942 } else {
3943 printk(KERN_WARNING
3944 "Ignoring invalid numa_zonelist_order value: "
3945 "%s\n", s);
3946 return -EINVAL;
3947 }
3948 return 0;
3949}
3950
3951static __init int setup_numa_zonelist_order(char *s)
3952{
ecb256f8
VL
3953 int ret;
3954
3955 if (!s)
3956 return 0;
3957
3958 ret = __parse_numa_zonelist_order(s);
3959 if (ret == 0)
3960 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3961
3962 return ret;
f0c0b2b8
KH
3963}
3964early_param("numa_zonelist_order", setup_numa_zonelist_order);
3965
3966/*
3967 * sysctl handler for numa_zonelist_order
3968 */
cccad5b9 3969int numa_zonelist_order_handler(struct ctl_table *table, int write,
8d65af78 3970 void __user *buffer, size_t *length,
f0c0b2b8
KH
3971 loff_t *ppos)
3972{
3973 char saved_string[NUMA_ZONELIST_ORDER_LEN];
3974 int ret;
443c6f14 3975 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 3976
443c6f14 3977 mutex_lock(&zl_order_mutex);
dacbde09
CG
3978 if (write) {
3979 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
3980 ret = -EINVAL;
3981 goto out;
3982 }
3983 strcpy(saved_string, (char *)table->data);
3984 }
8d65af78 3985 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 3986 if (ret)
443c6f14 3987 goto out;
f0c0b2b8
KH
3988 if (write) {
3989 int oldval = user_zonelist_order;
dacbde09
CG
3990
3991 ret = __parse_numa_zonelist_order((char *)table->data);
3992 if (ret) {
f0c0b2b8
KH
3993 /*
3994 * bogus value. restore saved string
3995 */
dacbde09 3996 strncpy((char *)table->data, saved_string,
f0c0b2b8
KH
3997 NUMA_ZONELIST_ORDER_LEN);
3998 user_zonelist_order = oldval;
4eaf3f64
HL
3999 } else if (oldval != user_zonelist_order) {
4000 mutex_lock(&zonelists_mutex);
9adb62a5 4001 build_all_zonelists(NULL, NULL);
4eaf3f64
HL
4002 mutex_unlock(&zonelists_mutex);
4003 }
f0c0b2b8 4004 }
443c6f14
AK
4005out:
4006 mutex_unlock(&zl_order_mutex);
4007 return ret;
f0c0b2b8
KH
4008}
4009
4010
62bc62a8 4011#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
4012static int node_load[MAX_NUMNODES];
4013
1da177e4 4014/**
4dc3b16b 4015 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
4016 * @node: node whose fallback list we're appending
4017 * @used_node_mask: nodemask_t of already used nodes
4018 *
4019 * We use a number of factors to determine which is the next node that should
4020 * appear on a given node's fallback list. The node should not have appeared
4021 * already in @node's fallback list, and it should be the next closest node
4022 * according to the distance array (which contains arbitrary distance values
4023 * from each node to each node in the system), and should also prefer nodes
4024 * with no CPUs, since presumably they'll have very little allocation pressure
4025 * on them otherwise.
4026 * It returns -1 if no node is found.
4027 */
f0c0b2b8 4028static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 4029{
4cf808eb 4030 int n, val;
1da177e4 4031 int min_val = INT_MAX;
00ef2d2f 4032 int best_node = NUMA_NO_NODE;
a70f7302 4033 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 4034
4cf808eb
LT
4035 /* Use the local node if we haven't already */
4036 if (!node_isset(node, *used_node_mask)) {
4037 node_set(node, *used_node_mask);
4038 return node;
4039 }
1da177e4 4040
4b0ef1fe 4041 for_each_node_state(n, N_MEMORY) {
1da177e4
LT
4042
4043 /* Don't want a node to appear more than once */
4044 if (node_isset(n, *used_node_mask))
4045 continue;
4046
1da177e4
LT
4047 /* Use the distance array to find the distance */
4048 val = node_distance(node, n);
4049
4cf808eb
LT
4050 /* Penalize nodes under us ("prefer the next node") */
4051 val += (n < node);
4052
1da177e4 4053 /* Give preference to headless and unused nodes */
a70f7302
RR
4054 tmp = cpumask_of_node(n);
4055 if (!cpumask_empty(tmp))
1da177e4
LT
4056 val += PENALTY_FOR_NODE_WITH_CPUS;
4057
4058 /* Slight preference for less loaded node */
4059 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4060 val += node_load[n];
4061
4062 if (val < min_val) {
4063 min_val = val;
4064 best_node = n;
4065 }
4066 }
4067
4068 if (best_node >= 0)
4069 node_set(best_node, *used_node_mask);
4070
4071 return best_node;
4072}
4073
f0c0b2b8
KH
4074
4075/*
4076 * Build zonelists ordered by node and zones within node.
4077 * This results in maximum locality--normal zone overflows into local
4078 * DMA zone, if any--but risks exhausting DMA zone.
4079 */
4080static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 4081{
f0c0b2b8 4082 int j;
1da177e4 4083 struct zonelist *zonelist;
f0c0b2b8 4084
54a6eb5c 4085 zonelist = &pgdat->node_zonelists[0];
dd1a239f 4086 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c 4087 ;
bc732f1d 4088 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
dd1a239f
MG
4089 zonelist->_zonerefs[j].zone = NULL;
4090 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
4091}
4092
523b9458
CL
4093/*
4094 * Build gfp_thisnode zonelists
4095 */
4096static void build_thisnode_zonelists(pg_data_t *pgdat)
4097{
523b9458
CL
4098 int j;
4099 struct zonelist *zonelist;
4100
54a6eb5c 4101 zonelist = &pgdat->node_zonelists[1];
bc732f1d 4102 j = build_zonelists_node(pgdat, zonelist, 0);
dd1a239f
MG
4103 zonelist->_zonerefs[j].zone = NULL;
4104 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
4105}
4106
f0c0b2b8
KH
4107/*
4108 * Build zonelists ordered by zone and nodes within zones.
4109 * This results in conserving DMA zone[s] until all Normal memory is
4110 * exhausted, but results in overflowing to remote node while memory
4111 * may still exist in local DMA zone.
4112 */
4113static int node_order[MAX_NUMNODES];
4114
4115static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4116{
f0c0b2b8
KH
4117 int pos, j, node;
4118 int zone_type; /* needs to be signed */
4119 struct zone *z;
4120 struct zonelist *zonelist;
4121
54a6eb5c
MG
4122 zonelist = &pgdat->node_zonelists[0];
4123 pos = 0;
4124 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4125 for (j = 0; j < nr_nodes; j++) {
4126 node = node_order[j];
4127 z = &NODE_DATA(node)->node_zones[zone_type];
4128 if (populated_zone(z)) {
dd1a239f
MG
4129 zoneref_set_zone(z,
4130 &zonelist->_zonerefs[pos++]);
54a6eb5c 4131 check_highest_zone(zone_type);
f0c0b2b8
KH
4132 }
4133 }
f0c0b2b8 4134 }
dd1a239f
MG
4135 zonelist->_zonerefs[pos].zone = NULL;
4136 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
4137}
4138
3193913c
MG
4139#if defined(CONFIG_64BIT)
4140/*
4141 * Devices that require DMA32/DMA are relatively rare and do not justify a
4142 * penalty to every machine in case the specialised case applies. Default
4143 * to Node-ordering on 64-bit NUMA machines
4144 */
4145static int default_zonelist_order(void)
4146{
4147 return ZONELIST_ORDER_NODE;
4148}
4149#else
4150/*
4151 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
4152 * by the kernel. If processes running on node 0 deplete the low memory zone
4153 * then reclaim will occur more frequency increasing stalls and potentially
4154 * be easier to OOM if a large percentage of the zone is under writeback or
4155 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
4156 * Hence, default to zone ordering on 32-bit.
4157 */
f0c0b2b8
KH
4158static int default_zonelist_order(void)
4159{
f0c0b2b8
KH
4160 return ZONELIST_ORDER_ZONE;
4161}
3193913c 4162#endif /* CONFIG_64BIT */
f0c0b2b8
KH
4163
4164static void set_zonelist_order(void)
4165{
4166 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
4167 current_zonelist_order = default_zonelist_order();
4168 else
4169 current_zonelist_order = user_zonelist_order;
4170}
4171
4172static void build_zonelists(pg_data_t *pgdat)
4173{
c00eb15a 4174 int i, node, load;
1da177e4 4175 nodemask_t used_mask;
f0c0b2b8
KH
4176 int local_node, prev_node;
4177 struct zonelist *zonelist;
d00181b9 4178 unsigned int order = current_zonelist_order;
1da177e4
LT
4179
4180 /* initialize zonelists */
523b9458 4181 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 4182 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
4183 zonelist->_zonerefs[0].zone = NULL;
4184 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
4185 }
4186
4187 /* NUMA-aware ordering of nodes */
4188 local_node = pgdat->node_id;
62bc62a8 4189 load = nr_online_nodes;
1da177e4
LT
4190 prev_node = local_node;
4191 nodes_clear(used_mask);
f0c0b2b8 4192
f0c0b2b8 4193 memset(node_order, 0, sizeof(node_order));
c00eb15a 4194 i = 0;
f0c0b2b8 4195
1da177e4
LT
4196 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4197 /*
4198 * We don't want to pressure a particular node.
4199 * So adding penalty to the first node in same
4200 * distance group to make it round-robin.
4201 */
957f822a
DR
4202 if (node_distance(local_node, node) !=
4203 node_distance(local_node, prev_node))
f0c0b2b8
KH
4204 node_load[node] = load;
4205
1da177e4
LT
4206 prev_node = node;
4207 load--;
f0c0b2b8
KH
4208 if (order == ZONELIST_ORDER_NODE)
4209 build_zonelists_in_node_order(pgdat, node);
4210 else
c00eb15a 4211 node_order[i++] = node; /* remember order */
f0c0b2b8 4212 }
1da177e4 4213
f0c0b2b8
KH
4214 if (order == ZONELIST_ORDER_ZONE) {
4215 /* calculate node order -- i.e., DMA last! */
c00eb15a 4216 build_zonelists_in_zone_order(pgdat, i);
1da177e4 4217 }
523b9458
CL
4218
4219 build_thisnode_zonelists(pgdat);
1da177e4
LT
4220}
4221
7aac7898
LS
4222#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4223/*
4224 * Return node id of node used for "local" allocations.
4225 * I.e., first node id of first zone in arg node's generic zonelist.
4226 * Used for initializing percpu 'numa_mem', which is used primarily
4227 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
4228 */
4229int local_memory_node(int node)
4230{
4231 struct zone *zone;
4232
4233 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
4234 gfp_zone(GFP_KERNEL),
4235 NULL,
4236 &zone);
4237 return zone->node;
4238}
4239#endif
f0c0b2b8 4240
1da177e4
LT
4241#else /* CONFIG_NUMA */
4242
f0c0b2b8
KH
4243static void set_zonelist_order(void)
4244{
4245 current_zonelist_order = ZONELIST_ORDER_ZONE;
4246}
4247
4248static void build_zonelists(pg_data_t *pgdat)
1da177e4 4249{
19655d34 4250 int node, local_node;
54a6eb5c
MG
4251 enum zone_type j;
4252 struct zonelist *zonelist;
1da177e4
LT
4253
4254 local_node = pgdat->node_id;
1da177e4 4255
54a6eb5c 4256 zonelist = &pgdat->node_zonelists[0];
bc732f1d 4257 j = build_zonelists_node(pgdat, zonelist, 0);
1da177e4 4258
54a6eb5c
MG
4259 /*
4260 * Now we build the zonelist so that it contains the zones
4261 * of all the other nodes.
4262 * We don't want to pressure a particular node, so when
4263 * building the zones for node N, we make sure that the
4264 * zones coming right after the local ones are those from
4265 * node N+1 (modulo N)
4266 */
4267 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
4268 if (!node_online(node))
4269 continue;
bc732f1d 4270 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
1da177e4 4271 }
54a6eb5c
MG
4272 for (node = 0; node < local_node; node++) {
4273 if (!node_online(node))
4274 continue;
bc732f1d 4275 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
54a6eb5c
MG
4276 }
4277
dd1a239f
MG
4278 zonelist->_zonerefs[j].zone = NULL;
4279 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
4280}
4281
4282#endif /* CONFIG_NUMA */
4283
99dcc3e5
CL
4284/*
4285 * Boot pageset table. One per cpu which is going to be used for all
4286 * zones and all nodes. The parameters will be set in such a way
4287 * that an item put on a list will immediately be handed over to
4288 * the buddy list. This is safe since pageset manipulation is done
4289 * with interrupts disabled.
4290 *
4291 * The boot_pagesets must be kept even after bootup is complete for
4292 * unused processors and/or zones. They do play a role for bootstrapping
4293 * hotplugged processors.
4294 *
4295 * zoneinfo_show() and maybe other functions do
4296 * not check if the processor is online before following the pageset pointer.
4297 * Other parts of the kernel may not check if the zone is available.
4298 */
4299static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
4300static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
1f522509 4301static void setup_zone_pageset(struct zone *zone);
99dcc3e5 4302
4eaf3f64
HL
4303/*
4304 * Global mutex to protect against size modification of zonelists
4305 * as well as to serialize pageset setup for the new populated zone.
4306 */
4307DEFINE_MUTEX(zonelists_mutex);
4308
9b1a4d38 4309/* return values int ....just for stop_machine() */
4ed7e022 4310static int __build_all_zonelists(void *data)
1da177e4 4311{
6811378e 4312 int nid;
99dcc3e5 4313 int cpu;
9adb62a5 4314 pg_data_t *self = data;
9276b1bc 4315
7f9cfb31
BL
4316#ifdef CONFIG_NUMA
4317 memset(node_load, 0, sizeof(node_load));
4318#endif
9adb62a5
JL
4319
4320 if (self && !node_online(self->node_id)) {
4321 build_zonelists(self);
9adb62a5
JL
4322 }
4323
9276b1bc 4324 for_each_online_node(nid) {
7ea1530a
CL
4325 pg_data_t *pgdat = NODE_DATA(nid);
4326
4327 build_zonelists(pgdat);
9276b1bc 4328 }
99dcc3e5
CL
4329
4330 /*
4331 * Initialize the boot_pagesets that are going to be used
4332 * for bootstrapping processors. The real pagesets for
4333 * each zone will be allocated later when the per cpu
4334 * allocator is available.
4335 *
4336 * boot_pagesets are used also for bootstrapping offline
4337 * cpus if the system is already booted because the pagesets
4338 * are needed to initialize allocators on a specific cpu too.
4339 * F.e. the percpu allocator needs the page allocator which
4340 * needs the percpu allocator in order to allocate its pagesets
4341 * (a chicken-egg dilemma).
4342 */
7aac7898 4343 for_each_possible_cpu(cpu) {
99dcc3e5
CL
4344 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
4345
7aac7898
LS
4346#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4347 /*
4348 * We now know the "local memory node" for each node--
4349 * i.e., the node of the first zone in the generic zonelist.
4350 * Set up numa_mem percpu variable for on-line cpus. During
4351 * boot, only the boot cpu should be on-line; we'll init the
4352 * secondary cpus' numa_mem as they come on-line. During
4353 * node/memory hotplug, we'll fixup all on-line cpus.
4354 */
4355 if (cpu_online(cpu))
4356 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
4357#endif
4358 }
4359
6811378e
YG
4360 return 0;
4361}
4362
061f67bc
RV
4363static noinline void __init
4364build_all_zonelists_init(void)
4365{
4366 __build_all_zonelists(NULL);
4367 mminit_verify_zonelist();
4368 cpuset_init_current_mems_allowed();
4369}
4370
4eaf3f64
HL
4371/*
4372 * Called with zonelists_mutex held always
4373 * unless system_state == SYSTEM_BOOTING.
061f67bc
RV
4374 *
4375 * __ref due to (1) call of __meminit annotated setup_zone_pageset
4376 * [we're only called with non-NULL zone through __meminit paths] and
4377 * (2) call of __init annotated helper build_all_zonelists_init
4378 * [protected by SYSTEM_BOOTING].
4eaf3f64 4379 */
9adb62a5 4380void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
6811378e 4381{
f0c0b2b8
KH
4382 set_zonelist_order();
4383
6811378e 4384 if (system_state == SYSTEM_BOOTING) {
061f67bc 4385 build_all_zonelists_init();
6811378e 4386 } else {
e9959f0f 4387#ifdef CONFIG_MEMORY_HOTPLUG
9adb62a5
JL
4388 if (zone)
4389 setup_zone_pageset(zone);
e9959f0f 4390#endif
dd1895e2
CS
4391 /* we have to stop all cpus to guarantee there is no user
4392 of zonelist */
9adb62a5 4393 stop_machine(__build_all_zonelists, pgdat, NULL);
6811378e
YG
4394 /* cpuset refresh routine should be here */
4395 }
bd1e22b8 4396 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
4397 /*
4398 * Disable grouping by mobility if the number of pages in the
4399 * system is too low to allow the mechanism to work. It would be
4400 * more accurate, but expensive to check per-zone. This check is
4401 * made on memory-hotadd so a system can start with mobility
4402 * disabled and enable it later
4403 */
d9c23400 4404 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
4405 page_group_by_mobility_disabled = 1;
4406 else
4407 page_group_by_mobility_disabled = 0;
4408
f88dfff5 4409 pr_info("Built %i zonelists in %s order, mobility grouping %s. "
9ef9acb0 4410 "Total pages: %ld\n",
62bc62a8 4411 nr_online_nodes,
f0c0b2b8 4412 zonelist_order_name[current_zonelist_order],
9ef9acb0 4413 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
4414 vm_total_pages);
4415#ifdef CONFIG_NUMA
f88dfff5 4416 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
f0c0b2b8 4417#endif
1da177e4
LT
4418}
4419
4420/*
4421 * Helper functions to size the waitqueue hash table.
4422 * Essentially these want to choose hash table sizes sufficiently
4423 * large so that collisions trying to wait on pages are rare.
4424 * But in fact, the number of active page waitqueues on typical
4425 * systems is ridiculously low, less than 200. So this is even
4426 * conservative, even though it seems large.
4427 *
4428 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4429 * waitqueues, i.e. the size of the waitq table given the number of pages.
4430 */
4431#define PAGES_PER_WAITQUEUE 256
4432
cca448fe 4433#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 4434static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
4435{
4436 unsigned long size = 1;
4437
4438 pages /= PAGES_PER_WAITQUEUE;
4439
4440 while (size < pages)
4441 size <<= 1;
4442
4443 /*
4444 * Once we have dozens or even hundreds of threads sleeping
4445 * on IO we've got bigger problems than wait queue collision.
4446 * Limit the size of the wait table to a reasonable size.
4447 */
4448 size = min(size, 4096UL);
4449
4450 return max(size, 4UL);
4451}
cca448fe
YG
4452#else
4453/*
4454 * A zone's size might be changed by hot-add, so it is not possible to determine
4455 * a suitable size for its wait_table. So we use the maximum size now.
4456 *
4457 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
4458 *
4459 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
4460 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
4461 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
4462 *
4463 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
4464 * or more by the traditional way. (See above). It equals:
4465 *
4466 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
4467 * ia64(16K page size) : = ( 8G + 4M)byte.
4468 * powerpc (64K page size) : = (32G +16M)byte.
4469 */
4470static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4471{
4472 return 4096UL;
4473}
4474#endif
1da177e4
LT
4475
4476/*
4477 * This is an integer logarithm so that shifts can be used later
4478 * to extract the more random high bits from the multiplicative
4479 * hash function before the remainder is taken.
4480 */
4481static inline unsigned long wait_table_bits(unsigned long size)
4482{
4483 return ffz(~size);
4484}
4485
1da177e4
LT
4486/*
4487 * Initially all pages are reserved - free ones are freed
4488 * up by free_all_bootmem() once the early boot process is
4489 * done. Non-atomic initialization, single-pass.
4490 */
c09b4240 4491void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 4492 unsigned long start_pfn, enum memmap_context context)
1da177e4 4493{
4b94ffdc 4494 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
29751f69 4495 unsigned long end_pfn = start_pfn + size;
4b94ffdc 4496 pg_data_t *pgdat = NODE_DATA(nid);
29751f69 4497 unsigned long pfn;
3a80a7fa 4498 unsigned long nr_initialised = 0;
342332e6
TI
4499#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4500 struct memblock_region *r = NULL, *tmp;
4501#endif
1da177e4 4502
22b31eec
HD
4503 if (highest_memmap_pfn < end_pfn - 1)
4504 highest_memmap_pfn = end_pfn - 1;
4505
4b94ffdc
DW
4506 /*
4507 * Honor reservation requested by the driver for this ZONE_DEVICE
4508 * memory
4509 */
4510 if (altmap && start_pfn == altmap->base_pfn)
4511 start_pfn += altmap->reserve;
4512
cbe8dd4a 4513 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02 4514 /*
b72d0ffb
AM
4515 * There can be holes in boot-time mem_map[]s handed to this
4516 * function. They do not exist on hotplugged memory.
a2f3aa02 4517 */
b72d0ffb
AM
4518 if (context != MEMMAP_EARLY)
4519 goto not_early;
4520
4521 if (!early_pfn_valid(pfn))
4522 continue;
4523 if (!early_pfn_in_nid(pfn, nid))
4524 continue;
4525 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
4526 break;
342332e6
TI
4527
4528#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
b72d0ffb
AM
4529 /*
4530 * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
4531 * from zone_movable_pfn[nid] to end of each node should be
4532 * ZONE_MOVABLE not ZONE_NORMAL. skip it.
4533 */
4534 if (!mirrored_kernelcore && zone_movable_pfn[nid])
4535 if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
4536 continue;
342332e6 4537
b72d0ffb
AM
4538 /*
4539 * Check given memblock attribute by firmware which can affect
4540 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
4541 * mirrored, it's an overlapped memmap init. skip it.
4542 */
4543 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
4544 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
4545 for_each_memblock(memory, tmp)
4546 if (pfn < memblock_region_memory_end_pfn(tmp))
4547 break;
4548 r = tmp;
4549 }
4550 if (pfn >= memblock_region_memory_base_pfn(r) &&
4551 memblock_is_mirror(r)) {
4552 /* already initialized as NORMAL */
4553 pfn = memblock_region_memory_end_pfn(r);
4554 continue;
342332e6 4555 }
a2f3aa02 4556 }
b72d0ffb 4557#endif
ac5d2539 4558
b72d0ffb 4559not_early:
ac5d2539
MG
4560 /*
4561 * Mark the block movable so that blocks are reserved for
4562 * movable at startup. This will force kernel allocations
4563 * to reserve their blocks rather than leaking throughout
4564 * the address space during boot when many long-lived
974a786e 4565 * kernel allocations are made.
ac5d2539
MG
4566 *
4567 * bitmap is created for zone's valid pfn range. but memmap
4568 * can be created for invalid pages (for alignment)
4569 * check here not to call set_pageblock_migratetype() against
4570 * pfn out of zone.
4571 */
4572 if (!(pfn & (pageblock_nr_pages - 1))) {
4573 struct page *page = pfn_to_page(pfn);
4574
4575 __init_single_page(page, pfn, zone, nid);
4576 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4577 } else {
4578 __init_single_pfn(pfn, zone, nid);
4579 }
1da177e4
LT
4580 }
4581}
4582
1e548deb 4583static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 4584{
7aeb09f9 4585 unsigned int order, t;
b2a0ac88
MG
4586 for_each_migratetype_order(order, t) {
4587 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
4588 zone->free_area[order].nr_free = 0;
4589 }
4590}
4591
4592#ifndef __HAVE_ARCH_MEMMAP_INIT
4593#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 4594 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
4595#endif
4596
7cd2b0a3 4597static int zone_batchsize(struct zone *zone)
e7c8d5c9 4598{
3a6be87f 4599#ifdef CONFIG_MMU
e7c8d5c9
CL
4600 int batch;
4601
4602 /*
4603 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 4604 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
4605 *
4606 * OK, so we don't know how big the cache is. So guess.
4607 */
b40da049 4608 batch = zone->managed_pages / 1024;
ba56e91c
SR
4609 if (batch * PAGE_SIZE > 512 * 1024)
4610 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
4611 batch /= 4; /* We effectively *= 4 below */
4612 if (batch < 1)
4613 batch = 1;
4614
4615 /*
0ceaacc9
NP
4616 * Clamp the batch to a 2^n - 1 value. Having a power
4617 * of 2 value was found to be more likely to have
4618 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 4619 *
0ceaacc9
NP
4620 * For example if 2 tasks are alternately allocating
4621 * batches of pages, one task can end up with a lot
4622 * of pages of one half of the possible page colors
4623 * and the other with pages of the other colors.
e7c8d5c9 4624 */
9155203a 4625 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 4626
e7c8d5c9 4627 return batch;
3a6be87f
DH
4628
4629#else
4630 /* The deferral and batching of frees should be suppressed under NOMMU
4631 * conditions.
4632 *
4633 * The problem is that NOMMU needs to be able to allocate large chunks
4634 * of contiguous memory as there's no hardware page translation to
4635 * assemble apparent contiguous memory from discontiguous pages.
4636 *
4637 * Queueing large contiguous runs of pages for batching, however,
4638 * causes the pages to actually be freed in smaller chunks. As there
4639 * can be a significant delay between the individual batches being
4640 * recycled, this leads to the once large chunks of space being
4641 * fragmented and becoming unavailable for high-order allocations.
4642 */
4643 return 0;
4644#endif
e7c8d5c9
CL
4645}
4646
8d7a8fa9
CS
4647/*
4648 * pcp->high and pcp->batch values are related and dependent on one another:
4649 * ->batch must never be higher then ->high.
4650 * The following function updates them in a safe manner without read side
4651 * locking.
4652 *
4653 * Any new users of pcp->batch and pcp->high should ensure they can cope with
4654 * those fields changing asynchronously (acording the the above rule).
4655 *
4656 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
4657 * outside of boot time (or some other assurance that no concurrent updaters
4658 * exist).
4659 */
4660static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
4661 unsigned long batch)
4662{
4663 /* start with a fail safe value for batch */
4664 pcp->batch = 1;
4665 smp_wmb();
4666
4667 /* Update high, then batch, in order */
4668 pcp->high = high;
4669 smp_wmb();
4670
4671 pcp->batch = batch;
4672}
4673
3664033c 4674/* a companion to pageset_set_high() */
4008bab7
CS
4675static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
4676{
8d7a8fa9 4677 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
4008bab7
CS
4678}
4679
88c90dbc 4680static void pageset_init(struct per_cpu_pageset *p)
2caaad41
CL
4681{
4682 struct per_cpu_pages *pcp;
5f8dcc21 4683 int migratetype;
2caaad41 4684
1c6fe946
MD
4685 memset(p, 0, sizeof(*p));
4686
3dfa5721 4687 pcp = &p->pcp;
2caaad41 4688 pcp->count = 0;
5f8dcc21
MG
4689 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4690 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
4691}
4692
88c90dbc
CS
4693static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4694{
4695 pageset_init(p);
4696 pageset_set_batch(p, batch);
4697}
4698
8ad4b1fb 4699/*
3664033c 4700 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
8ad4b1fb
RS
4701 * to the value high for the pageset p.
4702 */
3664033c 4703static void pageset_set_high(struct per_cpu_pageset *p,
8ad4b1fb
RS
4704 unsigned long high)
4705{
8d7a8fa9
CS
4706 unsigned long batch = max(1UL, high / 4);
4707 if ((high / 4) > (PAGE_SHIFT * 8))
4708 batch = PAGE_SHIFT * 8;
8ad4b1fb 4709
8d7a8fa9 4710 pageset_update(&p->pcp, high, batch);
8ad4b1fb
RS
4711}
4712
7cd2b0a3
DR
4713static void pageset_set_high_and_batch(struct zone *zone,
4714 struct per_cpu_pageset *pcp)
56cef2b8 4715{
56cef2b8 4716 if (percpu_pagelist_fraction)
3664033c 4717 pageset_set_high(pcp,
56cef2b8
CS
4718 (zone->managed_pages /
4719 percpu_pagelist_fraction));
4720 else
4721 pageset_set_batch(pcp, zone_batchsize(zone));
4722}
4723
169f6c19
CS
4724static void __meminit zone_pageset_init(struct zone *zone, int cpu)
4725{
4726 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4727
4728 pageset_init(pcp);
4729 pageset_set_high_and_batch(zone, pcp);
4730}
4731
4ed7e022 4732static void __meminit setup_zone_pageset(struct zone *zone)
319774e2
WF
4733{
4734 int cpu;
319774e2 4735 zone->pageset = alloc_percpu(struct per_cpu_pageset);
56cef2b8
CS
4736 for_each_possible_cpu(cpu)
4737 zone_pageset_init(zone, cpu);
319774e2
WF
4738}
4739
2caaad41 4740/*
99dcc3e5
CL
4741 * Allocate per cpu pagesets and initialize them.
4742 * Before this call only boot pagesets were available.
e7c8d5c9 4743 */
99dcc3e5 4744void __init setup_per_cpu_pageset(void)
e7c8d5c9 4745{
99dcc3e5 4746 struct zone *zone;
e7c8d5c9 4747
319774e2
WF
4748 for_each_populated_zone(zone)
4749 setup_zone_pageset(zone);
e7c8d5c9
CL
4750}
4751
577a32f6 4752static noinline __init_refok
cca448fe 4753int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
4754{
4755 int i;
cca448fe 4756 size_t alloc_size;
ed8ece2e
DH
4757
4758 /*
4759 * The per-page waitqueue mechanism uses hashed waitqueues
4760 * per zone.
4761 */
02b694de
YG
4762 zone->wait_table_hash_nr_entries =
4763 wait_table_hash_nr_entries(zone_size_pages);
4764 zone->wait_table_bits =
4765 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
4766 alloc_size = zone->wait_table_hash_nr_entries
4767 * sizeof(wait_queue_head_t);
4768
cd94b9db 4769 if (!slab_is_available()) {
cca448fe 4770 zone->wait_table = (wait_queue_head_t *)
6782832e
SS
4771 memblock_virt_alloc_node_nopanic(
4772 alloc_size, zone->zone_pgdat->node_id);
cca448fe
YG
4773 } else {
4774 /*
4775 * This case means that a zone whose size was 0 gets new memory
4776 * via memory hot-add.
4777 * But it may be the case that a new node was hot-added. In
4778 * this case vmalloc() will not be able to use this new node's
4779 * memory - this wait_table must be initialized to use this new
4780 * node itself as well.
4781 * To use this new node's memory, further consideration will be
4782 * necessary.
4783 */
8691f3a7 4784 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
4785 }
4786 if (!zone->wait_table)
4787 return -ENOMEM;
ed8ece2e 4788
b8af2941 4789 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 4790 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
4791
4792 return 0;
ed8ece2e
DH
4793}
4794
c09b4240 4795static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 4796{
99dcc3e5
CL
4797 /*
4798 * per cpu subsystem is not up at this point. The following code
4799 * relies on the ability of the linker to provide the
4800 * offset of a (static) per cpu variable into the per cpu area.
4801 */
4802 zone->pageset = &boot_pageset;
ed8ece2e 4803
b38a8725 4804 if (populated_zone(zone))
99dcc3e5
CL
4805 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
4806 zone->name, zone->present_pages,
4807 zone_batchsize(zone));
ed8ece2e
DH
4808}
4809
4ed7e022 4810int __meminit init_currently_empty_zone(struct zone *zone,
718127cc 4811 unsigned long zone_start_pfn,
b171e409 4812 unsigned long size)
ed8ece2e
DH
4813{
4814 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
4815 int ret;
4816 ret = zone_wait_table_init(zone, size);
4817 if (ret)
4818 return ret;
ed8ece2e
DH
4819 pgdat->nr_zones = zone_idx(zone) + 1;
4820
ed8ece2e
DH
4821 zone->zone_start_pfn = zone_start_pfn;
4822
708614e6
MG
4823 mminit_dprintk(MMINIT_TRACE, "memmap_init",
4824 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4825 pgdat->node_id,
4826 (unsigned long)zone_idx(zone),
4827 zone_start_pfn, (zone_start_pfn + size));
4828
1e548deb 4829 zone_init_free_lists(zone);
718127cc
YG
4830
4831 return 0;
ed8ece2e
DH
4832}
4833
0ee332c1 4834#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d 4835#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
8a942fde 4836
c713216d
MG
4837/*
4838 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
c713216d 4839 */
8a942fde
MG
4840int __meminit __early_pfn_to_nid(unsigned long pfn,
4841 struct mminit_pfnnid_cache *state)
c713216d 4842{
c13291a5 4843 unsigned long start_pfn, end_pfn;
e76b63f8 4844 int nid;
7c243c71 4845
8a942fde
MG
4846 if (state->last_start <= pfn && pfn < state->last_end)
4847 return state->last_nid;
c713216d 4848
e76b63f8
YL
4849 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
4850 if (nid != -1) {
8a942fde
MG
4851 state->last_start = start_pfn;
4852 state->last_end = end_pfn;
4853 state->last_nid = nid;
e76b63f8
YL
4854 }
4855
4856 return nid;
c713216d
MG
4857}
4858#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4859
c713216d 4860/**
6782832e 4861 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
88ca3b94 4862 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
6782832e 4863 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
c713216d 4864 *
7d018176
ZZ
4865 * If an architecture guarantees that all ranges registered contain no holes
4866 * and may be freed, this this function may be used instead of calling
4867 * memblock_free_early_nid() manually.
c713216d 4868 */
c13291a5 4869void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
cc289894 4870{
c13291a5
TH
4871 unsigned long start_pfn, end_pfn;
4872 int i, this_nid;
edbe7d23 4873
c13291a5
TH
4874 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4875 start_pfn = min(start_pfn, max_low_pfn);
4876 end_pfn = min(end_pfn, max_low_pfn);
edbe7d23 4877
c13291a5 4878 if (start_pfn < end_pfn)
6782832e
SS
4879 memblock_free_early_nid(PFN_PHYS(start_pfn),
4880 (end_pfn - start_pfn) << PAGE_SHIFT,
4881 this_nid);
edbe7d23 4882 }
edbe7d23 4883}
edbe7d23 4884
c713216d
MG
4885/**
4886 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 4887 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d 4888 *
7d018176
ZZ
4889 * If an architecture guarantees that all ranges registered contain no holes and may
4890 * be freed, this function may be used instead of calling memory_present() manually.
c713216d
MG
4891 */
4892void __init sparse_memory_present_with_active_regions(int nid)
4893{
c13291a5
TH
4894 unsigned long start_pfn, end_pfn;
4895 int i, this_nid;
c713216d 4896
c13291a5
TH
4897 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4898 memory_present(this_nid, start_pfn, end_pfn);
c713216d
MG
4899}
4900
4901/**
4902 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
4903 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4904 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4905 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
4906 *
4907 * It returns the start and end page frame of a node based on information
7d018176 4908 * provided by memblock_set_node(). If called for a node
c713216d 4909 * with no available memory, a warning is printed and the start and end
88ca3b94 4910 * PFNs will be 0.
c713216d 4911 */
a3142c8e 4912void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
4913 unsigned long *start_pfn, unsigned long *end_pfn)
4914{
c13291a5 4915 unsigned long this_start_pfn, this_end_pfn;
c713216d 4916 int i;
c13291a5 4917
c713216d
MG
4918 *start_pfn = -1UL;
4919 *end_pfn = 0;
4920
c13291a5
TH
4921 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4922 *start_pfn = min(*start_pfn, this_start_pfn);
4923 *end_pfn = max(*end_pfn, this_end_pfn);
c713216d
MG
4924 }
4925
633c0666 4926 if (*start_pfn == -1UL)
c713216d 4927 *start_pfn = 0;
c713216d
MG
4928}
4929
2a1e274a
MG
4930/*
4931 * This finds a zone that can be used for ZONE_MOVABLE pages. The
4932 * assumption is made that zones within a node are ordered in monotonic
4933 * increasing memory addresses so that the "highest" populated zone is used
4934 */
b69a7288 4935static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
4936{
4937 int zone_index;
4938 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4939 if (zone_index == ZONE_MOVABLE)
4940 continue;
4941
4942 if (arch_zone_highest_possible_pfn[zone_index] >
4943 arch_zone_lowest_possible_pfn[zone_index])
4944 break;
4945 }
4946
4947 VM_BUG_ON(zone_index == -1);
4948 movable_zone = zone_index;
4949}
4950
4951/*
4952 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
25985edc 4953 * because it is sized independent of architecture. Unlike the other zones,
2a1e274a
MG
4954 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4955 * in each node depending on the size of each node and how evenly kernelcore
4956 * is distributed. This helper function adjusts the zone ranges
4957 * provided by the architecture for a given node by using the end of the
4958 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4959 * zones within a node are in order of monotonic increases memory addresses
4960 */
b69a7288 4961static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
4962 unsigned long zone_type,
4963 unsigned long node_start_pfn,
4964 unsigned long node_end_pfn,
4965 unsigned long *zone_start_pfn,
4966 unsigned long *zone_end_pfn)
4967{
4968 /* Only adjust if ZONE_MOVABLE is on this node */
4969 if (zone_movable_pfn[nid]) {
4970 /* Size ZONE_MOVABLE */
4971 if (zone_type == ZONE_MOVABLE) {
4972 *zone_start_pfn = zone_movable_pfn[nid];
4973 *zone_end_pfn = min(node_end_pfn,
4974 arch_zone_highest_possible_pfn[movable_zone]);
4975
2a1e274a
MG
4976 /* Check if this whole range is within ZONE_MOVABLE */
4977 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4978 *zone_start_pfn = *zone_end_pfn;
4979 }
4980}
4981
c713216d
MG
4982/*
4983 * Return the number of pages a zone spans in a node, including holes
4984 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4985 */
6ea6e688 4986static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d 4987 unsigned long zone_type,
7960aedd
ZY
4988 unsigned long node_start_pfn,
4989 unsigned long node_end_pfn,
d91749c1
TI
4990 unsigned long *zone_start_pfn,
4991 unsigned long *zone_end_pfn,
c713216d
MG
4992 unsigned long *ignored)
4993{
b5685e92 4994 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
4995 if (!node_start_pfn && !node_end_pfn)
4996 return 0;
4997
7960aedd 4998 /* Get the start and end of the zone */
d91749c1
TI
4999 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5000 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
5001 adjust_zone_range_for_zone_movable(nid, zone_type,
5002 node_start_pfn, node_end_pfn,
d91749c1 5003 zone_start_pfn, zone_end_pfn);
c713216d
MG
5004
5005 /* Check that this node has pages within the zone's required range */
d91749c1 5006 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
c713216d
MG
5007 return 0;
5008
5009 /* Move the zone boundaries inside the node if necessary */
d91749c1
TI
5010 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5011 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
c713216d
MG
5012
5013 /* Return the spanned pages */
d91749c1 5014 return *zone_end_pfn - *zone_start_pfn;
c713216d
MG
5015}
5016
5017/*
5018 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 5019 * then all holes in the requested range will be accounted for.
c713216d 5020 */
32996250 5021unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
5022 unsigned long range_start_pfn,
5023 unsigned long range_end_pfn)
5024{
96e907d1
TH
5025 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5026 unsigned long start_pfn, end_pfn;
5027 int i;
c713216d 5028
96e907d1
TH
5029 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5030 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5031 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5032 nr_absent -= end_pfn - start_pfn;
c713216d 5033 }
96e907d1 5034 return nr_absent;
c713216d
MG
5035}
5036
5037/**
5038 * absent_pages_in_range - Return number of page frames in holes within a range
5039 * @start_pfn: The start PFN to start searching for holes
5040 * @end_pfn: The end PFN to stop searching for holes
5041 *
88ca3b94 5042 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
5043 */
5044unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5045 unsigned long end_pfn)
5046{
5047 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5048}
5049
5050/* Return the number of page frames in holes in a zone on a node */
6ea6e688 5051static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d 5052 unsigned long zone_type,
7960aedd
ZY
5053 unsigned long node_start_pfn,
5054 unsigned long node_end_pfn,
c713216d
MG
5055 unsigned long *ignored)
5056{
96e907d1
TH
5057 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5058 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
9c7cd687 5059 unsigned long zone_start_pfn, zone_end_pfn;
342332e6 5060 unsigned long nr_absent;
9c7cd687 5061
b5685e92 5062 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
5063 if (!node_start_pfn && !node_end_pfn)
5064 return 0;
5065
96e907d1
TH
5066 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5067 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
9c7cd687 5068
2a1e274a
MG
5069 adjust_zone_range_for_zone_movable(nid, zone_type,
5070 node_start_pfn, node_end_pfn,
5071 &zone_start_pfn, &zone_end_pfn);
342332e6
TI
5072 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5073
5074 /*
5075 * ZONE_MOVABLE handling.
5076 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5077 * and vice versa.
5078 */
5079 if (zone_movable_pfn[nid]) {
5080 if (mirrored_kernelcore) {
5081 unsigned long start_pfn, end_pfn;
5082 struct memblock_region *r;
5083
5084 for_each_memblock(memory, r) {
5085 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5086 zone_start_pfn, zone_end_pfn);
5087 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5088 zone_start_pfn, zone_end_pfn);
5089
5090 if (zone_type == ZONE_MOVABLE &&
5091 memblock_is_mirror(r))
5092 nr_absent += end_pfn - start_pfn;
5093
5094 if (zone_type == ZONE_NORMAL &&
5095 !memblock_is_mirror(r))
5096 nr_absent += end_pfn - start_pfn;
5097 }
5098 } else {
5099 if (zone_type == ZONE_NORMAL)
5100 nr_absent += node_end_pfn - zone_movable_pfn[nid];
5101 }
5102 }
5103
5104 return nr_absent;
c713216d 5105}
0e0b864e 5106
0ee332c1 5107#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6ea6e688 5108static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d 5109 unsigned long zone_type,
7960aedd
ZY
5110 unsigned long node_start_pfn,
5111 unsigned long node_end_pfn,
d91749c1
TI
5112 unsigned long *zone_start_pfn,
5113 unsigned long *zone_end_pfn,
c713216d
MG
5114 unsigned long *zones_size)
5115{
d91749c1
TI
5116 unsigned int zone;
5117
5118 *zone_start_pfn = node_start_pfn;
5119 for (zone = 0; zone < zone_type; zone++)
5120 *zone_start_pfn += zones_size[zone];
5121
5122 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5123
c713216d
MG
5124 return zones_size[zone_type];
5125}
5126
6ea6e688 5127static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d 5128 unsigned long zone_type,
7960aedd
ZY
5129 unsigned long node_start_pfn,
5130 unsigned long node_end_pfn,
c713216d
MG
5131 unsigned long *zholes_size)
5132{
5133 if (!zholes_size)
5134 return 0;
5135
5136 return zholes_size[zone_type];
5137}
20e6926d 5138
0ee332c1 5139#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5140
a3142c8e 5141static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
7960aedd
ZY
5142 unsigned long node_start_pfn,
5143 unsigned long node_end_pfn,
5144 unsigned long *zones_size,
5145 unsigned long *zholes_size)
c713216d 5146{
febd5949 5147 unsigned long realtotalpages = 0, totalpages = 0;
c713216d
MG
5148 enum zone_type i;
5149
febd5949
GZ
5150 for (i = 0; i < MAX_NR_ZONES; i++) {
5151 struct zone *zone = pgdat->node_zones + i;
d91749c1 5152 unsigned long zone_start_pfn, zone_end_pfn;
febd5949 5153 unsigned long size, real_size;
c713216d 5154
febd5949
GZ
5155 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5156 node_start_pfn,
5157 node_end_pfn,
d91749c1
TI
5158 &zone_start_pfn,
5159 &zone_end_pfn,
febd5949
GZ
5160 zones_size);
5161 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
7960aedd
ZY
5162 node_start_pfn, node_end_pfn,
5163 zholes_size);
d91749c1
TI
5164 if (size)
5165 zone->zone_start_pfn = zone_start_pfn;
5166 else
5167 zone->zone_start_pfn = 0;
febd5949
GZ
5168 zone->spanned_pages = size;
5169 zone->present_pages = real_size;
5170
5171 totalpages += size;
5172 realtotalpages += real_size;
5173 }
5174
5175 pgdat->node_spanned_pages = totalpages;
c713216d
MG
5176 pgdat->node_present_pages = realtotalpages;
5177 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5178 realtotalpages);
5179}
5180
835c134e
MG
5181#ifndef CONFIG_SPARSEMEM
5182/*
5183 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
5184 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5185 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
5186 * round what is now in bits to nearest long in bits, then return it in
5187 * bytes.
5188 */
7c45512d 5189static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
835c134e
MG
5190{
5191 unsigned long usemapsize;
5192
7c45512d 5193 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
d9c23400
MG
5194 usemapsize = roundup(zonesize, pageblock_nr_pages);
5195 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
5196 usemapsize *= NR_PAGEBLOCK_BITS;
5197 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5198
5199 return usemapsize / 8;
5200}
5201
5202static void __init setup_usemap(struct pglist_data *pgdat,
7c45512d
LT
5203 struct zone *zone,
5204 unsigned long zone_start_pfn,
5205 unsigned long zonesize)
835c134e 5206{
7c45512d 5207 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
835c134e 5208 zone->pageblock_flags = NULL;
58a01a45 5209 if (usemapsize)
6782832e
SS
5210 zone->pageblock_flags =
5211 memblock_virt_alloc_node_nopanic(usemapsize,
5212 pgdat->node_id);
835c134e
MG
5213}
5214#else
7c45512d
LT
5215static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5216 unsigned long zone_start_pfn, unsigned long zonesize) {}
835c134e
MG
5217#endif /* CONFIG_SPARSEMEM */
5218
d9c23400 5219#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c 5220
d9c23400 5221/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
15ca220e 5222void __paginginit set_pageblock_order(void)
d9c23400 5223{
955c1cd7
AM
5224 unsigned int order;
5225
d9c23400
MG
5226 /* Check that pageblock_nr_pages has not already been setup */
5227 if (pageblock_order)
5228 return;
5229
955c1cd7
AM
5230 if (HPAGE_SHIFT > PAGE_SHIFT)
5231 order = HUGETLB_PAGE_ORDER;
5232 else
5233 order = MAX_ORDER - 1;
5234
d9c23400
MG
5235 /*
5236 * Assume the largest contiguous order of interest is a huge page.
955c1cd7
AM
5237 * This value may be variable depending on boot parameters on IA64 and
5238 * powerpc.
d9c23400
MG
5239 */
5240 pageblock_order = order;
5241}
5242#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5243
ba72cb8c
MG
5244/*
5245 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
955c1cd7
AM
5246 * is unused as pageblock_order is set at compile-time. See
5247 * include/linux/pageblock-flags.h for the values of pageblock_order based on
5248 * the kernel config
ba72cb8c 5249 */
15ca220e 5250void __paginginit set_pageblock_order(void)
ba72cb8c 5251{
ba72cb8c 5252}
d9c23400
MG
5253
5254#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5255
01cefaef
JL
5256static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5257 unsigned long present_pages)
5258{
5259 unsigned long pages = spanned_pages;
5260
5261 /*
5262 * Provide a more accurate estimation if there are holes within
5263 * the zone and SPARSEMEM is in use. If there are holes within the
5264 * zone, each populated memory region may cost us one or two extra
5265 * memmap pages due to alignment because memmap pages for each
5266 * populated regions may not naturally algined on page boundary.
5267 * So the (present_pages >> 4) heuristic is a tradeoff for that.
5268 */
5269 if (spanned_pages > present_pages + (present_pages >> 4) &&
5270 IS_ENABLED(CONFIG_SPARSEMEM))
5271 pages = present_pages;
5272
5273 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5274}
5275
1da177e4
LT
5276/*
5277 * Set up the zone data structures:
5278 * - mark all pages reserved
5279 * - mark all memory queues empty
5280 * - clear the memory bitmaps
6527af5d
MK
5281 *
5282 * NOTE: pgdat should get zeroed by caller.
1da177e4 5283 */
7f3eb55b 5284static void __paginginit free_area_init_core(struct pglist_data *pgdat)
1da177e4 5285{
2f1b6248 5286 enum zone_type j;
ed8ece2e 5287 int nid = pgdat->node_id;
718127cc 5288 int ret;
1da177e4 5289
208d54e5 5290 pgdat_resize_init(pgdat);
8177a420
AA
5291#ifdef CONFIG_NUMA_BALANCING
5292 spin_lock_init(&pgdat->numabalancing_migrate_lock);
5293 pgdat->numabalancing_migrate_nr_pages = 0;
5294 pgdat->numabalancing_migrate_next_window = jiffies;
a3d0a918
KS
5295#endif
5296#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5297 spin_lock_init(&pgdat->split_queue_lock);
5298 INIT_LIST_HEAD(&pgdat->split_queue);
5299 pgdat->split_queue_len = 0;
8177a420 5300#endif
1da177e4 5301 init_waitqueue_head(&pgdat->kswapd_wait);
5515061d 5302 init_waitqueue_head(&pgdat->pfmemalloc_wait);
eefa864b 5303 pgdat_page_ext_init(pgdat);
5f63b720 5304
1da177e4
LT
5305 for (j = 0; j < MAX_NR_ZONES; j++) {
5306 struct zone *zone = pgdat->node_zones + j;
9feedc9d 5307 unsigned long size, realsize, freesize, memmap_pages;
d91749c1 5308 unsigned long zone_start_pfn = zone->zone_start_pfn;
1da177e4 5309
febd5949
GZ
5310 size = zone->spanned_pages;
5311 realsize = freesize = zone->present_pages;
1da177e4 5312
0e0b864e 5313 /*
9feedc9d 5314 * Adjust freesize so that it accounts for how much memory
0e0b864e
MG
5315 * is used by this zone for memmap. This affects the watermark
5316 * and per-cpu initialisations
5317 */
01cefaef 5318 memmap_pages = calc_memmap_size(size, realsize);
ba914f48
ZH
5319 if (!is_highmem_idx(j)) {
5320 if (freesize >= memmap_pages) {
5321 freesize -= memmap_pages;
5322 if (memmap_pages)
5323 printk(KERN_DEBUG
5324 " %s zone: %lu pages used for memmap\n",
5325 zone_names[j], memmap_pages);
5326 } else
5327 printk(KERN_WARNING
5328 " %s zone: %lu pages exceeds freesize %lu\n",
5329 zone_names[j], memmap_pages, freesize);
5330 }
0e0b864e 5331
6267276f 5332 /* Account for reserved pages */
9feedc9d
JL
5333 if (j == 0 && freesize > dma_reserve) {
5334 freesize -= dma_reserve;
d903ef9f 5335 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 5336 zone_names[0], dma_reserve);
0e0b864e
MG
5337 }
5338
98d2b0eb 5339 if (!is_highmem_idx(j))
9feedc9d 5340 nr_kernel_pages += freesize;
01cefaef
JL
5341 /* Charge for highmem memmap if there are enough kernel pages */
5342 else if (nr_kernel_pages > memmap_pages * 2)
5343 nr_kernel_pages -= memmap_pages;
9feedc9d 5344 nr_all_pages += freesize;
1da177e4 5345
9feedc9d
JL
5346 /*
5347 * Set an approximate value for lowmem here, it will be adjusted
5348 * when the bootmem allocator frees pages into the buddy system.
5349 * And all highmem pages will be managed by the buddy system.
5350 */
5351 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
9614634f 5352#ifdef CONFIG_NUMA
d5f541ed 5353 zone->node = nid;
9feedc9d 5354 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
9614634f 5355 / 100;
9feedc9d 5356 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
9614634f 5357#endif
1da177e4
LT
5358 zone->name = zone_names[j];
5359 spin_lock_init(&zone->lock);
5360 spin_lock_init(&zone->lru_lock);
bdc8cb98 5361 zone_seqlock_init(zone);
1da177e4 5362 zone->zone_pgdat = pgdat;
ed8ece2e 5363 zone_pcp_init(zone);
81c0a2bb
JW
5364
5365 /* For bootup, initialized properly in watermark setup */
5366 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
5367
bea8c150 5368 lruvec_init(&zone->lruvec);
1da177e4
LT
5369 if (!size)
5370 continue;
5371
955c1cd7 5372 set_pageblock_order();
7c45512d 5373 setup_usemap(pgdat, zone, zone_start_pfn, size);
b171e409 5374 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
718127cc 5375 BUG_ON(ret);
76cdd58e 5376 memmap_init(size, nid, j, zone_start_pfn);
1da177e4
LT
5377 }
5378}
5379
577a32f6 5380static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 5381{
b0aeba74 5382 unsigned long __maybe_unused start = 0;
a1c34a3b
LA
5383 unsigned long __maybe_unused offset = 0;
5384
1da177e4
LT
5385 /* Skip empty nodes */
5386 if (!pgdat->node_spanned_pages)
5387 return;
5388
d41dee36 5389#ifdef CONFIG_FLAT_NODE_MEM_MAP
b0aeba74
TL
5390 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5391 offset = pgdat->node_start_pfn - start;
1da177e4
LT
5392 /* ia64 gets its own node_mem_map, before this, without bootmem */
5393 if (!pgdat->node_mem_map) {
b0aeba74 5394 unsigned long size, end;
d41dee36
AW
5395 struct page *map;
5396
e984bb43
BP
5397 /*
5398 * The zone's endpoints aren't required to be MAX_ORDER
5399 * aligned but the node_mem_map endpoints must be in order
5400 * for the buddy allocator to function correctly.
5401 */
108bcc96 5402 end = pgdat_end_pfn(pgdat);
e984bb43
BP
5403 end = ALIGN(end, MAX_ORDER_NR_PAGES);
5404 size = (end - start) * sizeof(struct page);
6f167ec7
DH
5405 map = alloc_remap(pgdat->node_id, size);
5406 if (!map)
6782832e
SS
5407 map = memblock_virt_alloc_node_nopanic(size,
5408 pgdat->node_id);
a1c34a3b 5409 pgdat->node_mem_map = map + offset;
1da177e4 5410 }
12d810c1 5411#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
5412 /*
5413 * With no DISCONTIG, the global mem_map is just set as node 0's
5414 */
c713216d 5415 if (pgdat == NODE_DATA(0)) {
1da177e4 5416 mem_map = NODE_DATA(0)->node_mem_map;
a1c34a3b 5417#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
c713216d 5418 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
a1c34a3b 5419 mem_map -= offset;
0ee332c1 5420#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5421 }
1da177e4 5422#endif
d41dee36 5423#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
5424}
5425
9109fb7b
JW
5426void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5427 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 5428{
9109fb7b 5429 pg_data_t *pgdat = NODE_DATA(nid);
7960aedd
ZY
5430 unsigned long start_pfn = 0;
5431 unsigned long end_pfn = 0;
9109fb7b 5432
88fdf75d 5433 /* pg_data_t should be reset to zero when it's allocated */
8783b6e2 5434 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
88fdf75d 5435
3a80a7fa 5436 reset_deferred_meminit(pgdat);
1da177e4
LT
5437 pgdat->node_id = nid;
5438 pgdat->node_start_pfn = node_start_pfn;
7960aedd
ZY
5439#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5440 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8d29e18a 5441 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
4ada0c5a
ZL
5442 (u64)start_pfn << PAGE_SHIFT,
5443 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
d91749c1
TI
5444#else
5445 start_pfn = node_start_pfn;
7960aedd
ZY
5446#endif
5447 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5448 zones_size, zholes_size);
1da177e4
LT
5449
5450 alloc_node_mem_map(pgdat);
e8c27ac9
YL
5451#ifdef CONFIG_FLAT_NODE_MEM_MAP
5452 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5453 nid, (unsigned long)pgdat,
5454 (unsigned long)pgdat->node_mem_map);
5455#endif
1da177e4 5456
7f3eb55b 5457 free_area_init_core(pgdat);
1da177e4
LT
5458}
5459
0ee332c1 5460#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
418508c1
MS
5461
5462#if MAX_NUMNODES > 1
5463/*
5464 * Figure out the number of possible node ids.
5465 */
f9872caf 5466void __init setup_nr_node_ids(void)
418508c1 5467{
904a9553 5468 unsigned int highest;
418508c1 5469
904a9553 5470 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
418508c1
MS
5471 nr_node_ids = highest + 1;
5472}
418508c1
MS
5473#endif
5474
1e01979c
TH
5475/**
5476 * node_map_pfn_alignment - determine the maximum internode alignment
5477 *
5478 * This function should be called after node map is populated and sorted.
5479 * It calculates the maximum power of two alignment which can distinguish
5480 * all the nodes.
5481 *
5482 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
5483 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
5484 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
5485 * shifted, 1GiB is enough and this function will indicate so.
5486 *
5487 * This is used to test whether pfn -> nid mapping of the chosen memory
5488 * model has fine enough granularity to avoid incorrect mapping for the
5489 * populated node map.
5490 *
5491 * Returns the determined alignment in pfn's. 0 if there is no alignment
5492 * requirement (single node).
5493 */
5494unsigned long __init node_map_pfn_alignment(void)
5495{
5496 unsigned long accl_mask = 0, last_end = 0;
c13291a5 5497 unsigned long start, end, mask;
1e01979c 5498 int last_nid = -1;
c13291a5 5499 int i, nid;
1e01979c 5500
c13291a5 5501 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1e01979c
TH
5502 if (!start || last_nid < 0 || last_nid == nid) {
5503 last_nid = nid;
5504 last_end = end;
5505 continue;
5506 }
5507
5508 /*
5509 * Start with a mask granular enough to pin-point to the
5510 * start pfn and tick off bits one-by-one until it becomes
5511 * too coarse to separate the current node from the last.
5512 */
5513 mask = ~((1 << __ffs(start)) - 1);
5514 while (mask && last_end <= (start & (mask << 1)))
5515 mask <<= 1;
5516
5517 /* accumulate all internode masks */
5518 accl_mask |= mask;
5519 }
5520
5521 /* convert mask to number of pages */
5522 return ~accl_mask + 1;
5523}
5524
a6af2bc3 5525/* Find the lowest pfn for a node */
b69a7288 5526static unsigned long __init find_min_pfn_for_node(int nid)
c713216d 5527{
a6af2bc3 5528 unsigned long min_pfn = ULONG_MAX;
c13291a5
TH
5529 unsigned long start_pfn;
5530 int i;
1abbfb41 5531
c13291a5
TH
5532 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5533 min_pfn = min(min_pfn, start_pfn);
c713216d 5534
a6af2bc3
MG
5535 if (min_pfn == ULONG_MAX) {
5536 printk(KERN_WARNING
2bc0d261 5537 "Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
5538 return 0;
5539 }
5540
5541 return min_pfn;
c713216d
MG
5542}
5543
5544/**
5545 * find_min_pfn_with_active_regions - Find the minimum PFN registered
5546 *
5547 * It returns the minimum PFN based on information provided via
7d018176 5548 * memblock_set_node().
c713216d
MG
5549 */
5550unsigned long __init find_min_pfn_with_active_regions(void)
5551{
5552 return find_min_pfn_for_node(MAX_NUMNODES);
5553}
5554
37b07e41
LS
5555/*
5556 * early_calculate_totalpages()
5557 * Sum pages in active regions for movable zone.
4b0ef1fe 5558 * Populate N_MEMORY for calculating usable_nodes.
37b07e41 5559 */
484f51f8 5560static unsigned long __init early_calculate_totalpages(void)
7e63efef 5561{
7e63efef 5562 unsigned long totalpages = 0;
c13291a5
TH
5563 unsigned long start_pfn, end_pfn;
5564 int i, nid;
5565
5566 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5567 unsigned long pages = end_pfn - start_pfn;
7e63efef 5568
37b07e41
LS
5569 totalpages += pages;
5570 if (pages)
4b0ef1fe 5571 node_set_state(nid, N_MEMORY);
37b07e41 5572 }
b8af2941 5573 return totalpages;
7e63efef
MG
5574}
5575
2a1e274a
MG
5576/*
5577 * Find the PFN the Movable zone begins in each node. Kernel memory
5578 * is spread evenly between nodes as long as the nodes have enough
5579 * memory. When they don't, some nodes will have more kernelcore than
5580 * others
5581 */
b224ef85 5582static void __init find_zone_movable_pfns_for_nodes(void)
2a1e274a
MG
5583{
5584 int i, nid;
5585 unsigned long usable_startpfn;
5586 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd 5587 /* save the state before borrow the nodemask */
4b0ef1fe 5588 nodemask_t saved_node_state = node_states[N_MEMORY];
37b07e41 5589 unsigned long totalpages = early_calculate_totalpages();
4b0ef1fe 5590 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
136199f0 5591 struct memblock_region *r;
b2f3eebe
TC
5592
5593 /* Need to find movable_zone earlier when movable_node is specified. */
5594 find_usable_zone_for_movable();
5595
5596 /*
5597 * If movable_node is specified, ignore kernelcore and movablecore
5598 * options.
5599 */
5600 if (movable_node_is_enabled()) {
136199f0
EM
5601 for_each_memblock(memory, r) {
5602 if (!memblock_is_hotpluggable(r))
b2f3eebe
TC
5603 continue;
5604
136199f0 5605 nid = r->nid;
b2f3eebe 5606
136199f0 5607 usable_startpfn = PFN_DOWN(r->base);
b2f3eebe
TC
5608 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5609 min(usable_startpfn, zone_movable_pfn[nid]) :
5610 usable_startpfn;
5611 }
5612
5613 goto out2;
5614 }
2a1e274a 5615
342332e6
TI
5616 /*
5617 * If kernelcore=mirror is specified, ignore movablecore option
5618 */
5619 if (mirrored_kernelcore) {
5620 bool mem_below_4gb_not_mirrored = false;
5621
5622 for_each_memblock(memory, r) {
5623 if (memblock_is_mirror(r))
5624 continue;
5625
5626 nid = r->nid;
5627
5628 usable_startpfn = memblock_region_memory_base_pfn(r);
5629
5630 if (usable_startpfn < 0x100000) {
5631 mem_below_4gb_not_mirrored = true;
5632 continue;
5633 }
5634
5635 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5636 min(usable_startpfn, zone_movable_pfn[nid]) :
5637 usable_startpfn;
5638 }
5639
5640 if (mem_below_4gb_not_mirrored)
5641 pr_warn("This configuration results in unmirrored kernel memory.");
5642
5643 goto out2;
5644 }
5645
7e63efef 5646 /*
b2f3eebe 5647 * If movablecore=nn[KMG] was specified, calculate what size of
7e63efef
MG
5648 * kernelcore that corresponds so that memory usable for
5649 * any allocation type is evenly spread. If both kernelcore
5650 * and movablecore are specified, then the value of kernelcore
5651 * will be used for required_kernelcore if it's greater than
5652 * what movablecore would have allowed.
5653 */
5654 if (required_movablecore) {
7e63efef
MG
5655 unsigned long corepages;
5656
5657 /*
5658 * Round-up so that ZONE_MOVABLE is at least as large as what
5659 * was requested by the user
5660 */
5661 required_movablecore =
5662 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
9fd745d4 5663 required_movablecore = min(totalpages, required_movablecore);
7e63efef
MG
5664 corepages = totalpages - required_movablecore;
5665
5666 required_kernelcore = max(required_kernelcore, corepages);
5667 }
5668
bde304bd
XQ
5669 /*
5670 * If kernelcore was not specified or kernelcore size is larger
5671 * than totalpages, there is no ZONE_MOVABLE.
5672 */
5673 if (!required_kernelcore || required_kernelcore >= totalpages)
66918dcd 5674 goto out;
2a1e274a
MG
5675
5676 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
2a1e274a
MG
5677 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
5678
5679restart:
5680 /* Spread kernelcore memory as evenly as possible throughout nodes */
5681 kernelcore_node = required_kernelcore / usable_nodes;
4b0ef1fe 5682 for_each_node_state(nid, N_MEMORY) {
c13291a5
TH
5683 unsigned long start_pfn, end_pfn;
5684
2a1e274a
MG
5685 /*
5686 * Recalculate kernelcore_node if the division per node
5687 * now exceeds what is necessary to satisfy the requested
5688 * amount of memory for the kernel
5689 */
5690 if (required_kernelcore < kernelcore_node)
5691 kernelcore_node = required_kernelcore / usable_nodes;
5692
5693 /*
5694 * As the map is walked, we track how much memory is usable
5695 * by the kernel using kernelcore_remaining. When it is
5696 * 0, the rest of the node is usable by ZONE_MOVABLE
5697 */
5698 kernelcore_remaining = kernelcore_node;
5699
5700 /* Go through each range of PFNs within this node */
c13291a5 5701 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2a1e274a
MG
5702 unsigned long size_pages;
5703
c13291a5 5704 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
2a1e274a
MG
5705 if (start_pfn >= end_pfn)
5706 continue;
5707
5708 /* Account for what is only usable for kernelcore */
5709 if (start_pfn < usable_startpfn) {
5710 unsigned long kernel_pages;
5711 kernel_pages = min(end_pfn, usable_startpfn)
5712 - start_pfn;
5713
5714 kernelcore_remaining -= min(kernel_pages,
5715 kernelcore_remaining);
5716 required_kernelcore -= min(kernel_pages,
5717 required_kernelcore);
5718
5719 /* Continue if range is now fully accounted */
5720 if (end_pfn <= usable_startpfn) {
5721
5722 /*
5723 * Push zone_movable_pfn to the end so
5724 * that if we have to rebalance
5725 * kernelcore across nodes, we will
5726 * not double account here
5727 */
5728 zone_movable_pfn[nid] = end_pfn;
5729 continue;
5730 }
5731 start_pfn = usable_startpfn;
5732 }
5733
5734 /*
5735 * The usable PFN range for ZONE_MOVABLE is from
5736 * start_pfn->end_pfn. Calculate size_pages as the
5737 * number of pages used as kernelcore
5738 */
5739 size_pages = end_pfn - start_pfn;
5740 if (size_pages > kernelcore_remaining)
5741 size_pages = kernelcore_remaining;
5742 zone_movable_pfn[nid] = start_pfn + size_pages;
5743
5744 /*
5745 * Some kernelcore has been met, update counts and
5746 * break if the kernelcore for this node has been
b8af2941 5747 * satisfied
2a1e274a
MG
5748 */
5749 required_kernelcore -= min(required_kernelcore,
5750 size_pages);
5751 kernelcore_remaining -= size_pages;
5752 if (!kernelcore_remaining)
5753 break;
5754 }
5755 }
5756
5757 /*
5758 * If there is still required_kernelcore, we do another pass with one
5759 * less node in the count. This will push zone_movable_pfn[nid] further
5760 * along on the nodes that still have memory until kernelcore is
b8af2941 5761 * satisfied
2a1e274a
MG
5762 */
5763 usable_nodes--;
5764 if (usable_nodes && required_kernelcore > usable_nodes)
5765 goto restart;
5766
b2f3eebe 5767out2:
2a1e274a
MG
5768 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5769 for (nid = 0; nid < MAX_NUMNODES; nid++)
5770 zone_movable_pfn[nid] =
5771 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd 5772
20e6926d 5773out:
66918dcd 5774 /* restore the node_state */
4b0ef1fe 5775 node_states[N_MEMORY] = saved_node_state;
2a1e274a
MG
5776}
5777
4b0ef1fe
LJ
5778/* Any regular or high memory on that node ? */
5779static void check_for_memory(pg_data_t *pgdat, int nid)
37b07e41 5780{
37b07e41
LS
5781 enum zone_type zone_type;
5782
4b0ef1fe
LJ
5783 if (N_MEMORY == N_NORMAL_MEMORY)
5784 return;
5785
5786 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
37b07e41 5787 struct zone *zone = &pgdat->node_zones[zone_type];
b38a8725 5788 if (populated_zone(zone)) {
4b0ef1fe
LJ
5789 node_set_state(nid, N_HIGH_MEMORY);
5790 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5791 zone_type <= ZONE_NORMAL)
5792 node_set_state(nid, N_NORMAL_MEMORY);
d0048b0e
BL
5793 break;
5794 }
37b07e41 5795 }
37b07e41
LS
5796}
5797
c713216d
MG
5798/**
5799 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 5800 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
5801 *
5802 * This will call free_area_init_node() for each active node in the system.
7d018176 5803 * Using the page ranges provided by memblock_set_node(), the size of each
c713216d
MG
5804 * zone in each node and their holes is calculated. If the maximum PFN
5805 * between two adjacent zones match, it is assumed that the zone is empty.
5806 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5807 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5808 * starts where the previous one ended. For example, ZONE_DMA32 starts
5809 * at arch_max_dma_pfn.
5810 */
5811void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5812{
c13291a5
TH
5813 unsigned long start_pfn, end_pfn;
5814 int i, nid;
a6af2bc3 5815
c713216d
MG
5816 /* Record where the zone boundaries are */
5817 memset(arch_zone_lowest_possible_pfn, 0,
5818 sizeof(arch_zone_lowest_possible_pfn));
5819 memset(arch_zone_highest_possible_pfn, 0,
5820 sizeof(arch_zone_highest_possible_pfn));
5821 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5822 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5823 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
5824 if (i == ZONE_MOVABLE)
5825 continue;
c713216d
MG
5826 arch_zone_lowest_possible_pfn[i] =
5827 arch_zone_highest_possible_pfn[i-1];
5828 arch_zone_highest_possible_pfn[i] =
5829 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5830 }
2a1e274a
MG
5831 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5832 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5833
5834 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
5835 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
b224ef85 5836 find_zone_movable_pfns_for_nodes();
c713216d 5837
c713216d 5838 /* Print out the zone ranges */
f88dfff5 5839 pr_info("Zone ranges:\n");
2a1e274a
MG
5840 for (i = 0; i < MAX_NR_ZONES; i++) {
5841 if (i == ZONE_MOVABLE)
5842 continue;
f88dfff5 5843 pr_info(" %-8s ", zone_names[i]);
72f0ba02
DR
5844 if (arch_zone_lowest_possible_pfn[i] ==
5845 arch_zone_highest_possible_pfn[i])
f88dfff5 5846 pr_cont("empty\n");
72f0ba02 5847 else
8d29e18a
JG
5848 pr_cont("[mem %#018Lx-%#018Lx]\n",
5849 (u64)arch_zone_lowest_possible_pfn[i]
5850 << PAGE_SHIFT,
5851 ((u64)arch_zone_highest_possible_pfn[i]
a62e2f4f 5852 << PAGE_SHIFT) - 1);
2a1e274a
MG
5853 }
5854
5855 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
f88dfff5 5856 pr_info("Movable zone start for each node\n");
2a1e274a
MG
5857 for (i = 0; i < MAX_NUMNODES; i++) {
5858 if (zone_movable_pfn[i])
8d29e18a
JG
5859 pr_info(" Node %d: %#018Lx\n", i,
5860 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
2a1e274a 5861 }
c713216d 5862
f2d52fe5 5863 /* Print out the early node map */
f88dfff5 5864 pr_info("Early memory node ranges\n");
c13291a5 5865 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
8d29e18a
JG
5866 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
5867 (u64)start_pfn << PAGE_SHIFT,
5868 ((u64)end_pfn << PAGE_SHIFT) - 1);
c713216d
MG
5869
5870 /* Initialise every node */
708614e6 5871 mminit_verify_pageflags_layout();
8ef82866 5872 setup_nr_node_ids();
c713216d
MG
5873 for_each_online_node(nid) {
5874 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 5875 free_area_init_node(nid, NULL,
c713216d 5876 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
5877
5878 /* Any memory on that node */
5879 if (pgdat->node_present_pages)
4b0ef1fe
LJ
5880 node_set_state(nid, N_MEMORY);
5881 check_for_memory(pgdat, nid);
c713216d
MG
5882 }
5883}
2a1e274a 5884
7e63efef 5885static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
5886{
5887 unsigned long long coremem;
5888 if (!p)
5889 return -EINVAL;
5890
5891 coremem = memparse(p, &p);
7e63efef 5892 *core = coremem >> PAGE_SHIFT;
2a1e274a 5893
7e63efef 5894 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
5895 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5896
5897 return 0;
5898}
ed7ed365 5899
7e63efef
MG
5900/*
5901 * kernelcore=size sets the amount of memory for use for allocations that
5902 * cannot be reclaimed or migrated.
5903 */
5904static int __init cmdline_parse_kernelcore(char *p)
5905{
342332e6
TI
5906 /* parse kernelcore=mirror */
5907 if (parse_option_str(p, "mirror")) {
5908 mirrored_kernelcore = true;
5909 return 0;
5910 }
5911
7e63efef
MG
5912 return cmdline_parse_core(p, &required_kernelcore);
5913}
5914
5915/*
5916 * movablecore=size sets the amount of memory for use for allocations that
5917 * can be reclaimed or migrated.
5918 */
5919static int __init cmdline_parse_movablecore(char *p)
5920{
5921 return cmdline_parse_core(p, &required_movablecore);
5922}
5923
ed7ed365 5924early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 5925early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 5926
0ee332c1 5927#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5928
c3d5f5f0
JL
5929void adjust_managed_page_count(struct page *page, long count)
5930{
5931 spin_lock(&managed_page_count_lock);
5932 page_zone(page)->managed_pages += count;
5933 totalram_pages += count;
3dcc0571
JL
5934#ifdef CONFIG_HIGHMEM
5935 if (PageHighMem(page))
5936 totalhigh_pages += count;
5937#endif
c3d5f5f0
JL
5938 spin_unlock(&managed_page_count_lock);
5939}
3dcc0571 5940EXPORT_SYMBOL(adjust_managed_page_count);
c3d5f5f0 5941
11199692 5942unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
69afade7 5943{
11199692
JL
5944 void *pos;
5945 unsigned long pages = 0;
69afade7 5946
11199692
JL
5947 start = (void *)PAGE_ALIGN((unsigned long)start);
5948 end = (void *)((unsigned long)end & PAGE_MASK);
5949 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
dbe67df4 5950 if ((unsigned int)poison <= 0xFF)
11199692
JL
5951 memset(pos, poison, PAGE_SIZE);
5952 free_reserved_page(virt_to_page(pos));
69afade7
JL
5953 }
5954
5955 if (pages && s)
11199692 5956 pr_info("Freeing %s memory: %ldK (%p - %p)\n",
69afade7
JL
5957 s, pages << (PAGE_SHIFT - 10), start, end);
5958
5959 return pages;
5960}
11199692 5961EXPORT_SYMBOL(free_reserved_area);
69afade7 5962
cfa11e08
JL
5963#ifdef CONFIG_HIGHMEM
5964void free_highmem_page(struct page *page)
5965{
5966 __free_reserved_page(page);
5967 totalram_pages++;
7b4b2a0d 5968 page_zone(page)->managed_pages++;
cfa11e08
JL
5969 totalhigh_pages++;
5970}
5971#endif
5972
7ee3d4e8
JL
5973
5974void __init mem_init_print_info(const char *str)
5975{
5976 unsigned long physpages, codesize, datasize, rosize, bss_size;
5977 unsigned long init_code_size, init_data_size;
5978
5979 physpages = get_num_physpages();
5980 codesize = _etext - _stext;
5981 datasize = _edata - _sdata;
5982 rosize = __end_rodata - __start_rodata;
5983 bss_size = __bss_stop - __bss_start;
5984 init_data_size = __init_end - __init_begin;
5985 init_code_size = _einittext - _sinittext;
5986
5987 /*
5988 * Detect special cases and adjust section sizes accordingly:
5989 * 1) .init.* may be embedded into .data sections
5990 * 2) .init.text.* may be out of [__init_begin, __init_end],
5991 * please refer to arch/tile/kernel/vmlinux.lds.S.
5992 * 3) .rodata.* may be embedded into .text or .data sections.
5993 */
5994#define adj_init_size(start, end, size, pos, adj) \
b8af2941
PK
5995 do { \
5996 if (start <= pos && pos < end && size > adj) \
5997 size -= adj; \
5998 } while (0)
7ee3d4e8
JL
5999
6000 adj_init_size(__init_begin, __init_end, init_data_size,
6001 _sinittext, init_code_size);
6002 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6003 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6004 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6005 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6006
6007#undef adj_init_size
6008
f88dfff5 6009 pr_info("Memory: %luK/%luK available "
7ee3d4e8 6010 "(%luK kernel code, %luK rwdata, %luK rodata, "
e48322ab 6011 "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
7ee3d4e8
JL
6012#ifdef CONFIG_HIGHMEM
6013 ", %luK highmem"
6014#endif
6015 "%s%s)\n",
6016 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
6017 codesize >> 10, datasize >> 10, rosize >> 10,
6018 (init_data_size + init_code_size) >> 10, bss_size >> 10,
e48322ab
PK
6019 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
6020 totalcma_pages << (PAGE_SHIFT-10),
7ee3d4e8
JL
6021#ifdef CONFIG_HIGHMEM
6022 totalhigh_pages << (PAGE_SHIFT-10),
6023#endif
6024 str ? ", " : "", str ? str : "");
6025}
6026
0e0b864e 6027/**
88ca3b94
RD
6028 * set_dma_reserve - set the specified number of pages reserved in the first zone
6029 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e 6030 *
013110a7 6031 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
0e0b864e
MG
6032 * In the DMA zone, a significant percentage may be consumed by kernel image
6033 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
6034 * function may optionally be used to account for unfreeable pages in the
6035 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6036 * smaller per-cpu batchsize.
0e0b864e
MG
6037 */
6038void __init set_dma_reserve(unsigned long new_dma_reserve)
6039{
6040 dma_reserve = new_dma_reserve;
6041}
6042
1da177e4
LT
6043void __init free_area_init(unsigned long *zones_size)
6044{
9109fb7b 6045 free_area_init_node(0, zones_size,
1da177e4
LT
6046 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6047}
1da177e4 6048
1da177e4
LT
6049static int page_alloc_cpu_notify(struct notifier_block *self,
6050 unsigned long action, void *hcpu)
6051{
6052 int cpu = (unsigned long)hcpu;
1da177e4 6053
8bb78442 6054 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
f0cb3c76 6055 lru_add_drain_cpu(cpu);
9f8f2172
CL
6056 drain_pages(cpu);
6057
6058 /*
6059 * Spill the event counters of the dead processor
6060 * into the current processors event counters.
6061 * This artificially elevates the count of the current
6062 * processor.
6063 */
f8891e5e 6064 vm_events_fold_cpu(cpu);
9f8f2172
CL
6065
6066 /*
6067 * Zero the differential counters of the dead processor
6068 * so that the vm statistics are consistent.
6069 *
6070 * This is only okay since the processor is dead and cannot
6071 * race with what we are doing.
6072 */
2bb921e5 6073 cpu_vm_stats_fold(cpu);
1da177e4
LT
6074 }
6075 return NOTIFY_OK;
6076}
1da177e4
LT
6077
6078void __init page_alloc_init(void)
6079{
6080 hotcpu_notifier(page_alloc_cpu_notify, 0);
6081}
6082
cb45b0e9 6083/*
34b10060 6084 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
cb45b0e9
HA
6085 * or min_free_kbytes changes.
6086 */
6087static void calculate_totalreserve_pages(void)
6088{
6089 struct pglist_data *pgdat;
6090 unsigned long reserve_pages = 0;
2f6726e5 6091 enum zone_type i, j;
cb45b0e9
HA
6092
6093 for_each_online_pgdat(pgdat) {
6094 for (i = 0; i < MAX_NR_ZONES; i++) {
6095 struct zone *zone = pgdat->node_zones + i;
3484b2de 6096 long max = 0;
cb45b0e9
HA
6097
6098 /* Find valid and maximum lowmem_reserve in the zone */
6099 for (j = i; j < MAX_NR_ZONES; j++) {
6100 if (zone->lowmem_reserve[j] > max)
6101 max = zone->lowmem_reserve[j];
6102 }
6103
41858966
MG
6104 /* we treat the high watermark as reserved pages. */
6105 max += high_wmark_pages(zone);
cb45b0e9 6106
b40da049
JL
6107 if (max > zone->managed_pages)
6108 max = zone->managed_pages;
a8d01437
JW
6109
6110 zone->totalreserve_pages = max;
6111
cb45b0e9
HA
6112 reserve_pages += max;
6113 }
6114 }
6115 totalreserve_pages = reserve_pages;
6116}
6117
1da177e4
LT
6118/*
6119 * setup_per_zone_lowmem_reserve - called whenever
34b10060 6120 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
1da177e4
LT
6121 * has a correct pages reserved value, so an adequate number of
6122 * pages are left in the zone after a successful __alloc_pages().
6123 */
6124static void setup_per_zone_lowmem_reserve(void)
6125{
6126 struct pglist_data *pgdat;
2f6726e5 6127 enum zone_type j, idx;
1da177e4 6128
ec936fc5 6129 for_each_online_pgdat(pgdat) {
1da177e4
LT
6130 for (j = 0; j < MAX_NR_ZONES; j++) {
6131 struct zone *zone = pgdat->node_zones + j;
b40da049 6132 unsigned long managed_pages = zone->managed_pages;
1da177e4
LT
6133
6134 zone->lowmem_reserve[j] = 0;
6135
2f6726e5
CL
6136 idx = j;
6137 while (idx) {
1da177e4
LT
6138 struct zone *lower_zone;
6139
2f6726e5
CL
6140 idx--;
6141
1da177e4
LT
6142 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6143 sysctl_lowmem_reserve_ratio[idx] = 1;
6144
6145 lower_zone = pgdat->node_zones + idx;
b40da049 6146 lower_zone->lowmem_reserve[j] = managed_pages /
1da177e4 6147 sysctl_lowmem_reserve_ratio[idx];
b40da049 6148 managed_pages += lower_zone->managed_pages;
1da177e4
LT
6149 }
6150 }
6151 }
cb45b0e9
HA
6152
6153 /* update totalreserve_pages */
6154 calculate_totalreserve_pages();
1da177e4
LT
6155}
6156
cfd3da1e 6157static void __setup_per_zone_wmarks(void)
1da177e4
LT
6158{
6159 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6160 unsigned long lowmem_pages = 0;
6161 struct zone *zone;
6162 unsigned long flags;
6163
6164 /* Calculate total number of !ZONE_HIGHMEM pages */
6165 for_each_zone(zone) {
6166 if (!is_highmem(zone))
b40da049 6167 lowmem_pages += zone->managed_pages;
1da177e4
LT
6168 }
6169
6170 for_each_zone(zone) {
ac924c60
AM
6171 u64 tmp;
6172
1125b4e3 6173 spin_lock_irqsave(&zone->lock, flags);
b40da049 6174 tmp = (u64)pages_min * zone->managed_pages;
ac924c60 6175 do_div(tmp, lowmem_pages);
1da177e4
LT
6176 if (is_highmem(zone)) {
6177 /*
669ed175
NP
6178 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6179 * need highmem pages, so cap pages_min to a small
6180 * value here.
6181 *
41858966 6182 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
42ff2703 6183 * deltas control asynch page reclaim, and so should
669ed175 6184 * not be capped for highmem.
1da177e4 6185 */
90ae8d67 6186 unsigned long min_pages;
1da177e4 6187
b40da049 6188 min_pages = zone->managed_pages / 1024;
90ae8d67 6189 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
41858966 6190 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 6191 } else {
669ed175
NP
6192 /*
6193 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
6194 * proportionate to the zone's size.
6195 */
41858966 6196 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
6197 }
6198
41858966
MG
6199 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
6200 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
49f223a9 6201
81c0a2bb 6202 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
abe5f972
JW
6203 high_wmark_pages(zone) - low_wmark_pages(zone) -
6204 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
81c0a2bb 6205
1125b4e3 6206 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 6207 }
cb45b0e9
HA
6208
6209 /* update totalreserve_pages */
6210 calculate_totalreserve_pages();
1da177e4
LT
6211}
6212
cfd3da1e
MG
6213/**
6214 * setup_per_zone_wmarks - called when min_free_kbytes changes
6215 * or when memory is hot-{added|removed}
6216 *
6217 * Ensures that the watermark[min,low,high] values for each zone are set
6218 * correctly with respect to min_free_kbytes.
6219 */
6220void setup_per_zone_wmarks(void)
6221{
6222 mutex_lock(&zonelists_mutex);
6223 __setup_per_zone_wmarks();
6224 mutex_unlock(&zonelists_mutex);
6225}
6226
55a4462a 6227/*
556adecb
RR
6228 * The inactive anon list should be small enough that the VM never has to
6229 * do too much work, but large enough that each inactive page has a chance
6230 * to be referenced again before it is swapped out.
6231 *
6232 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
6233 * INACTIVE_ANON pages on this zone's LRU, maintained by the
6234 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
6235 * the anonymous pages are kept on the inactive list.
6236 *
6237 * total target max
6238 * memory ratio inactive anon
6239 * -------------------------------------
6240 * 10MB 1 5MB
6241 * 100MB 1 50MB
6242 * 1GB 3 250MB
6243 * 10GB 10 0.9GB
6244 * 100GB 31 3GB
6245 * 1TB 101 10GB
6246 * 10TB 320 32GB
6247 */
1b79acc9 6248static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
556adecb 6249{
96cb4df5 6250 unsigned int gb, ratio;
556adecb 6251
96cb4df5 6252 /* Zone size in gigabytes */
b40da049 6253 gb = zone->managed_pages >> (30 - PAGE_SHIFT);
96cb4df5 6254 if (gb)
556adecb 6255 ratio = int_sqrt(10 * gb);
96cb4df5
MK
6256 else
6257 ratio = 1;
556adecb 6258
96cb4df5
MK
6259 zone->inactive_ratio = ratio;
6260}
556adecb 6261
839a4fcc 6262static void __meminit setup_per_zone_inactive_ratio(void)
96cb4df5
MK
6263{
6264 struct zone *zone;
6265
6266 for_each_zone(zone)
6267 calculate_zone_inactive_ratio(zone);
556adecb
RR
6268}
6269
1da177e4
LT
6270/*
6271 * Initialise min_free_kbytes.
6272 *
6273 * For small machines we want it small (128k min). For large machines
6274 * we want it large (64MB max). But it is not linear, because network
6275 * bandwidth does not increase linearly with machine size. We use
6276 *
b8af2941 6277 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
1da177e4
LT
6278 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6279 *
6280 * which yields
6281 *
6282 * 16MB: 512k
6283 * 32MB: 724k
6284 * 64MB: 1024k
6285 * 128MB: 1448k
6286 * 256MB: 2048k
6287 * 512MB: 2896k
6288 * 1024MB: 4096k
6289 * 2048MB: 5792k
6290 * 4096MB: 8192k
6291 * 8192MB: 11584k
6292 * 16384MB: 16384k
6293 */
1b79acc9 6294int __meminit init_per_zone_wmark_min(void)
1da177e4
LT
6295{
6296 unsigned long lowmem_kbytes;
5f12733e 6297 int new_min_free_kbytes;
1da177e4
LT
6298
6299 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5f12733e
MH
6300 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6301
6302 if (new_min_free_kbytes > user_min_free_kbytes) {
6303 min_free_kbytes = new_min_free_kbytes;
6304 if (min_free_kbytes < 128)
6305 min_free_kbytes = 128;
6306 if (min_free_kbytes > 65536)
6307 min_free_kbytes = 65536;
6308 } else {
6309 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6310 new_min_free_kbytes, user_min_free_kbytes);
6311 }
bc75d33f 6312 setup_per_zone_wmarks();
a6cccdc3 6313 refresh_zone_stat_thresholds();
1da177e4 6314 setup_per_zone_lowmem_reserve();
556adecb 6315 setup_per_zone_inactive_ratio();
1da177e4
LT
6316 return 0;
6317}
bc75d33f 6318module_init(init_per_zone_wmark_min)
1da177e4
LT
6319
6320/*
b8af2941 6321 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
1da177e4
LT
6322 * that we can call two helper functions whenever min_free_kbytes
6323 * changes.
6324 */
cccad5b9 6325int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6326 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 6327{
da8c757b
HP
6328 int rc;
6329
6330 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6331 if (rc)
6332 return rc;
6333
5f12733e
MH
6334 if (write) {
6335 user_min_free_kbytes = min_free_kbytes;
bc75d33f 6336 setup_per_zone_wmarks();
5f12733e 6337 }
1da177e4
LT
6338 return 0;
6339}
6340
9614634f 6341#ifdef CONFIG_NUMA
cccad5b9 6342int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6343 void __user *buffer, size_t *length, loff_t *ppos)
9614634f
CL
6344{
6345 struct zone *zone;
6346 int rc;
6347
8d65af78 6348 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
9614634f
CL
6349 if (rc)
6350 return rc;
6351
6352 for_each_zone(zone)
b40da049 6353 zone->min_unmapped_pages = (zone->managed_pages *
9614634f
CL
6354 sysctl_min_unmapped_ratio) / 100;
6355 return 0;
6356}
0ff38490 6357
cccad5b9 6358int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6359 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490
CL
6360{
6361 struct zone *zone;
6362 int rc;
6363
8d65af78 6364 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
6365 if (rc)
6366 return rc;
6367
6368 for_each_zone(zone)
b40da049 6369 zone->min_slab_pages = (zone->managed_pages *
0ff38490
CL
6370 sysctl_min_slab_ratio) / 100;
6371 return 0;
6372}
9614634f
CL
6373#endif
6374
1da177e4
LT
6375/*
6376 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6377 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6378 * whenever sysctl_lowmem_reserve_ratio changes.
6379 *
6380 * The reserve ratio obviously has absolutely no relation with the
41858966 6381 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
6382 * if in function of the boot time zone sizes.
6383 */
cccad5b9 6384int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6385 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 6386{
8d65af78 6387 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
6388 setup_per_zone_lowmem_reserve();
6389 return 0;
6390}
6391
8ad4b1fb
RS
6392/*
6393 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
b8af2941
PK
6394 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6395 * pagelist can have before it gets flushed back to buddy allocator.
8ad4b1fb 6396 */
cccad5b9 6397int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6398 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
6399{
6400 struct zone *zone;
7cd2b0a3 6401 int old_percpu_pagelist_fraction;
8ad4b1fb
RS
6402 int ret;
6403
7cd2b0a3
DR
6404 mutex_lock(&pcp_batch_high_lock);
6405 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
6406
8d65af78 6407 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7cd2b0a3
DR
6408 if (!write || ret < 0)
6409 goto out;
6410
6411 /* Sanity checking to avoid pcp imbalance */
6412 if (percpu_pagelist_fraction &&
6413 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
6414 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
6415 ret = -EINVAL;
6416 goto out;
6417 }
6418
6419 /* No change? */
6420 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
6421 goto out;
c8e251fa 6422
364df0eb 6423 for_each_populated_zone(zone) {
7cd2b0a3
DR
6424 unsigned int cpu;
6425
22a7f12b 6426 for_each_possible_cpu(cpu)
7cd2b0a3
DR
6427 pageset_set_high_and_batch(zone,
6428 per_cpu_ptr(zone->pageset, cpu));
8ad4b1fb 6429 }
7cd2b0a3 6430out:
c8e251fa 6431 mutex_unlock(&pcp_batch_high_lock);
7cd2b0a3 6432 return ret;
8ad4b1fb
RS
6433}
6434
a9919c79 6435#ifdef CONFIG_NUMA
f034b5d4 6436int hashdist = HASHDIST_DEFAULT;
1da177e4 6437
1da177e4
LT
6438static int __init set_hashdist(char *str)
6439{
6440 if (!str)
6441 return 0;
6442 hashdist = simple_strtoul(str, &str, 0);
6443 return 1;
6444}
6445__setup("hashdist=", set_hashdist);
6446#endif
6447
6448/*
6449 * allocate a large system hash table from bootmem
6450 * - it is assumed that the hash table must contain an exact power-of-2
6451 * quantity of entries
6452 * - limit is the number of hash buckets, not the total allocation size
6453 */
6454void *__init alloc_large_system_hash(const char *tablename,
6455 unsigned long bucketsize,
6456 unsigned long numentries,
6457 int scale,
6458 int flags,
6459 unsigned int *_hash_shift,
6460 unsigned int *_hash_mask,
31fe62b9
TB
6461 unsigned long low_limit,
6462 unsigned long high_limit)
1da177e4 6463{
31fe62b9 6464 unsigned long long max = high_limit;
1da177e4
LT
6465 unsigned long log2qty, size;
6466 void *table = NULL;
6467
6468 /* allow the kernel cmdline to have a say */
6469 if (!numentries) {
6470 /* round applicable memory size up to nearest megabyte */
04903664 6471 numentries = nr_kernel_pages;
a7e83318
JZ
6472
6473 /* It isn't necessary when PAGE_SIZE >= 1MB */
6474 if (PAGE_SHIFT < 20)
6475 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
1da177e4
LT
6476
6477 /* limit to 1 bucket per 2^scale bytes of low memory */
6478 if (scale > PAGE_SHIFT)
6479 numentries >>= (scale - PAGE_SHIFT);
6480 else
6481 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
6482
6483 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
6484 if (unlikely(flags & HASH_SMALL)) {
6485 /* Makes no sense without HASH_EARLY */
6486 WARN_ON(!(flags & HASH_EARLY));
6487 if (!(numentries >> *_hash_shift)) {
6488 numentries = 1UL << *_hash_shift;
6489 BUG_ON(!numentries);
6490 }
6491 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 6492 numentries = PAGE_SIZE / bucketsize;
1da177e4 6493 }
6e692ed3 6494 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
6495
6496 /* limit allocation size to 1/16 total memory by default */
6497 if (max == 0) {
6498 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6499 do_div(max, bucketsize);
6500 }
074b8517 6501 max = min(max, 0x80000000ULL);
1da177e4 6502
31fe62b9
TB
6503 if (numentries < low_limit)
6504 numentries = low_limit;
1da177e4
LT
6505 if (numentries > max)
6506 numentries = max;
6507
f0d1b0b3 6508 log2qty = ilog2(numentries);
1da177e4
LT
6509
6510 do {
6511 size = bucketsize << log2qty;
6512 if (flags & HASH_EARLY)
6782832e 6513 table = memblock_virt_alloc_nopanic(size, 0);
1da177e4
LT
6514 else if (hashdist)
6515 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6516 else {
1037b83b
ED
6517 /*
6518 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
6519 * some pages at the end of hash table which
6520 * alloc_pages_exact() automatically does
1037b83b 6521 */
264ef8a9 6522 if (get_order(size) < MAX_ORDER) {
a1dd268c 6523 table = alloc_pages_exact(size, GFP_ATOMIC);
264ef8a9
CM
6524 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6525 }
1da177e4
LT
6526 }
6527 } while (!table && size > PAGE_SIZE && --log2qty);
6528
6529 if (!table)
6530 panic("Failed to allocate %s hash table\n", tablename);
6531
f241e660 6532 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
1da177e4 6533 tablename,
f241e660 6534 (1UL << log2qty),
f0d1b0b3 6535 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
6536 size);
6537
6538 if (_hash_shift)
6539 *_hash_shift = log2qty;
6540 if (_hash_mask)
6541 *_hash_mask = (1 << log2qty) - 1;
6542
6543 return table;
6544}
a117e66e 6545
835c134e
MG
6546/* Return a pointer to the bitmap storing bits affecting a block of pages */
6547static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
6548 unsigned long pfn)
6549{
6550#ifdef CONFIG_SPARSEMEM
6551 return __pfn_to_section(pfn)->pageblock_flags;
6552#else
6553 return zone->pageblock_flags;
6554#endif /* CONFIG_SPARSEMEM */
6555}
6556
6557static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6558{
6559#ifdef CONFIG_SPARSEMEM
6560 pfn &= (PAGES_PER_SECTION-1);
d9c23400 6561 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e 6562#else
c060f943 6563 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
d9c23400 6564 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
6565#endif /* CONFIG_SPARSEMEM */
6566}
6567
6568/**
1aab4d77 6569 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e 6570 * @page: The page within the block of interest
1aab4d77
RD
6571 * @pfn: The target page frame number
6572 * @end_bitidx: The last bit of interest to retrieve
6573 * @mask: mask of bits that the caller is interested in
6574 *
6575 * Return: pageblock_bits flags
835c134e 6576 */
dc4b0caf 6577unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
e58469ba
MG
6578 unsigned long end_bitidx,
6579 unsigned long mask)
835c134e
MG
6580{
6581 struct zone *zone;
6582 unsigned long *bitmap;
dc4b0caf 6583 unsigned long bitidx, word_bitidx;
e58469ba 6584 unsigned long word;
835c134e
MG
6585
6586 zone = page_zone(page);
835c134e
MG
6587 bitmap = get_pageblock_bitmap(zone, pfn);
6588 bitidx = pfn_to_bitidx(zone, pfn);
e58469ba
MG
6589 word_bitidx = bitidx / BITS_PER_LONG;
6590 bitidx &= (BITS_PER_LONG-1);
835c134e 6591
e58469ba
MG
6592 word = bitmap[word_bitidx];
6593 bitidx += end_bitidx;
6594 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
835c134e
MG
6595}
6596
6597/**
dc4b0caf 6598 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e 6599 * @page: The page within the block of interest
835c134e 6600 * @flags: The flags to set
1aab4d77
RD
6601 * @pfn: The target page frame number
6602 * @end_bitidx: The last bit of interest
6603 * @mask: mask of bits that the caller is interested in
835c134e 6604 */
dc4b0caf
MG
6605void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6606 unsigned long pfn,
e58469ba
MG
6607 unsigned long end_bitidx,
6608 unsigned long mask)
835c134e
MG
6609{
6610 struct zone *zone;
6611 unsigned long *bitmap;
dc4b0caf 6612 unsigned long bitidx, word_bitidx;
e58469ba
MG
6613 unsigned long old_word, word;
6614
6615 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
835c134e
MG
6616
6617 zone = page_zone(page);
835c134e
MG
6618 bitmap = get_pageblock_bitmap(zone, pfn);
6619 bitidx = pfn_to_bitidx(zone, pfn);
e58469ba
MG
6620 word_bitidx = bitidx / BITS_PER_LONG;
6621 bitidx &= (BITS_PER_LONG-1);
6622
309381fe 6623 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
835c134e 6624
e58469ba
MG
6625 bitidx += end_bitidx;
6626 mask <<= (BITS_PER_LONG - bitidx - 1);
6627 flags <<= (BITS_PER_LONG - bitidx - 1);
6628
4db0c3c2 6629 word = READ_ONCE(bitmap[word_bitidx]);
e58469ba
MG
6630 for (;;) {
6631 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6632 if (word == old_word)
6633 break;
6634 word = old_word;
6635 }
835c134e 6636}
a5d76b54
KH
6637
6638/*
80934513
MK
6639 * This function checks whether pageblock includes unmovable pages or not.
6640 * If @count is not zero, it is okay to include less @count unmovable pages
6641 *
b8af2941 6642 * PageLRU check without isolation or lru_lock could race so that
80934513
MK
6643 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6644 * expect this function should be exact.
a5d76b54 6645 */
b023f468
WC
6646bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6647 bool skip_hwpoisoned_pages)
49ac8255
KH
6648{
6649 unsigned long pfn, iter, found;
47118af0
MN
6650 int mt;
6651
49ac8255
KH
6652 /*
6653 * For avoiding noise data, lru_add_drain_all() should be called
80934513 6654 * If ZONE_MOVABLE, the zone never contains unmovable pages
49ac8255
KH
6655 */
6656 if (zone_idx(zone) == ZONE_MOVABLE)
80934513 6657 return false;
47118af0
MN
6658 mt = get_pageblock_migratetype(page);
6659 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
80934513 6660 return false;
49ac8255
KH
6661
6662 pfn = page_to_pfn(page);
6663 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
6664 unsigned long check = pfn + iter;
6665
29723fcc 6666 if (!pfn_valid_within(check))
49ac8255 6667 continue;
29723fcc 6668
49ac8255 6669 page = pfn_to_page(check);
c8721bbb
NH
6670
6671 /*
6672 * Hugepages are not in LRU lists, but they're movable.
6673 * We need not scan over tail pages bacause we don't
6674 * handle each tail page individually in migration.
6675 */
6676 if (PageHuge(page)) {
6677 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
6678 continue;
6679 }
6680
97d255c8
MK
6681 /*
6682 * We can't use page_count without pin a page
6683 * because another CPU can free compound page.
6684 * This check already skips compound tails of THP
6685 * because their page->_count is zero at all time.
6686 */
6687 if (!atomic_read(&page->_count)) {
49ac8255
KH
6688 if (PageBuddy(page))
6689 iter += (1 << page_order(page)) - 1;
6690 continue;
6691 }
97d255c8 6692
b023f468
WC
6693 /*
6694 * The HWPoisoned page may be not in buddy system, and
6695 * page_count() is not 0.
6696 */
6697 if (skip_hwpoisoned_pages && PageHWPoison(page))
6698 continue;
6699
49ac8255
KH
6700 if (!PageLRU(page))
6701 found++;
6702 /*
6b4f7799
JW
6703 * If there are RECLAIMABLE pages, we need to check
6704 * it. But now, memory offline itself doesn't call
6705 * shrink_node_slabs() and it still to be fixed.
49ac8255
KH
6706 */
6707 /*
6708 * If the page is not RAM, page_count()should be 0.
6709 * we don't need more check. This is an _used_ not-movable page.
6710 *
6711 * The problematic thing here is PG_reserved pages. PG_reserved
6712 * is set to both of a memory hole page and a _used_ kernel
6713 * page at boot.
6714 */
6715 if (found > count)
80934513 6716 return true;
49ac8255 6717 }
80934513 6718 return false;
49ac8255
KH
6719}
6720
6721bool is_pageblock_removable_nolock(struct page *page)
6722{
656a0706
MH
6723 struct zone *zone;
6724 unsigned long pfn;
687875fb
MH
6725
6726 /*
6727 * We have to be careful here because we are iterating over memory
6728 * sections which are not zone aware so we might end up outside of
6729 * the zone but still within the section.
656a0706
MH
6730 * We have to take care about the node as well. If the node is offline
6731 * its NODE_DATA will be NULL - see page_zone.
687875fb 6732 */
656a0706
MH
6733 if (!node_online(page_to_nid(page)))
6734 return false;
6735
6736 zone = page_zone(page);
6737 pfn = page_to_pfn(page);
108bcc96 6738 if (!zone_spans_pfn(zone, pfn))
687875fb
MH
6739 return false;
6740
b023f468 6741 return !has_unmovable_pages(zone, page, 0, true);
a5d76b54 6742}
0c0e6195 6743
080fe206 6744#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
041d3a8c
MN
6745
6746static unsigned long pfn_max_align_down(unsigned long pfn)
6747{
6748 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6749 pageblock_nr_pages) - 1);
6750}
6751
6752static unsigned long pfn_max_align_up(unsigned long pfn)
6753{
6754 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6755 pageblock_nr_pages));
6756}
6757
041d3a8c 6758/* [start, end) must belong to a single zone. */
bb13ffeb
MG
6759static int __alloc_contig_migrate_range(struct compact_control *cc,
6760 unsigned long start, unsigned long end)
041d3a8c
MN
6761{
6762 /* This function is based on compact_zone() from compaction.c. */
beb51eaa 6763 unsigned long nr_reclaimed;
041d3a8c
MN
6764 unsigned long pfn = start;
6765 unsigned int tries = 0;
6766 int ret = 0;
6767
be49a6e1 6768 migrate_prep();
041d3a8c 6769
bb13ffeb 6770 while (pfn < end || !list_empty(&cc->migratepages)) {
041d3a8c
MN
6771 if (fatal_signal_pending(current)) {
6772 ret = -EINTR;
6773 break;
6774 }
6775
bb13ffeb
MG
6776 if (list_empty(&cc->migratepages)) {
6777 cc->nr_migratepages = 0;
edc2ca61 6778 pfn = isolate_migratepages_range(cc, pfn, end);
041d3a8c
MN
6779 if (!pfn) {
6780 ret = -EINTR;
6781 break;
6782 }
6783 tries = 0;
6784 } else if (++tries == 5) {
6785 ret = ret < 0 ? ret : -EBUSY;
6786 break;
6787 }
6788
beb51eaa
MK
6789 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6790 &cc->migratepages);
6791 cc->nr_migratepages -= nr_reclaimed;
02c6de8d 6792
9c620e2b 6793 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
e0b9daeb 6794 NULL, 0, cc->mode, MR_CMA);
041d3a8c 6795 }
2a6f5124
SP
6796 if (ret < 0) {
6797 putback_movable_pages(&cc->migratepages);
6798 return ret;
6799 }
6800 return 0;
041d3a8c
MN
6801}
6802
6803/**
6804 * alloc_contig_range() -- tries to allocate given range of pages
6805 * @start: start PFN to allocate
6806 * @end: one-past-the-last PFN to allocate
0815f3d8
MN
6807 * @migratetype: migratetype of the underlaying pageblocks (either
6808 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6809 * in range must have the same migratetype and it must
6810 * be either of the two.
041d3a8c
MN
6811 *
6812 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6813 * aligned, however it's the caller's responsibility to guarantee that
6814 * we are the only thread that changes migrate type of pageblocks the
6815 * pages fall in.
6816 *
6817 * The PFN range must belong to a single zone.
6818 *
6819 * Returns zero on success or negative error code. On success all
6820 * pages which PFN is in [start, end) are allocated for the caller and
6821 * need to be freed with free_contig_range().
6822 */
0815f3d8
MN
6823int alloc_contig_range(unsigned long start, unsigned long end,
6824 unsigned migratetype)
041d3a8c 6825{
041d3a8c 6826 unsigned long outer_start, outer_end;
d00181b9
KS
6827 unsigned int order;
6828 int ret = 0;
041d3a8c 6829
bb13ffeb
MG
6830 struct compact_control cc = {
6831 .nr_migratepages = 0,
6832 .order = -1,
6833 .zone = page_zone(pfn_to_page(start)),
e0b9daeb 6834 .mode = MIGRATE_SYNC,
bb13ffeb
MG
6835 .ignore_skip_hint = true,
6836 };
6837 INIT_LIST_HEAD(&cc.migratepages);
6838
041d3a8c
MN
6839 /*
6840 * What we do here is we mark all pageblocks in range as
6841 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6842 * have different sizes, and due to the way page allocator
6843 * work, we align the range to biggest of the two pages so
6844 * that page allocator won't try to merge buddies from
6845 * different pageblocks and change MIGRATE_ISOLATE to some
6846 * other migration type.
6847 *
6848 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6849 * migrate the pages from an unaligned range (ie. pages that
6850 * we are interested in). This will put all the pages in
6851 * range back to page allocator as MIGRATE_ISOLATE.
6852 *
6853 * When this is done, we take the pages in range from page
6854 * allocator removing them from the buddy system. This way
6855 * page allocator will never consider using them.
6856 *
6857 * This lets us mark the pageblocks back as
6858 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6859 * aligned range but not in the unaligned, original range are
6860 * put back to page allocator so that buddy can use them.
6861 */
6862
6863 ret = start_isolate_page_range(pfn_max_align_down(start),
b023f468
WC
6864 pfn_max_align_up(end), migratetype,
6865 false);
041d3a8c 6866 if (ret)
86a595f9 6867 return ret;
041d3a8c 6868
8ef5849f
JK
6869 /*
6870 * In case of -EBUSY, we'd like to know which page causes problem.
6871 * So, just fall through. We will check it in test_pages_isolated().
6872 */
bb13ffeb 6873 ret = __alloc_contig_migrate_range(&cc, start, end);
8ef5849f 6874 if (ret && ret != -EBUSY)
041d3a8c
MN
6875 goto done;
6876
6877 /*
6878 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
6879 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
6880 * more, all pages in [start, end) are free in page allocator.
6881 * What we are going to do is to allocate all pages from
6882 * [start, end) (that is remove them from page allocator).
6883 *
6884 * The only problem is that pages at the beginning and at the
6885 * end of interesting range may be not aligned with pages that
6886 * page allocator holds, ie. they can be part of higher order
6887 * pages. Because of this, we reserve the bigger range and
6888 * once this is done free the pages we are not interested in.
6889 *
6890 * We don't have to hold zone->lock here because the pages are
6891 * isolated thus they won't get removed from buddy.
6892 */
6893
6894 lru_add_drain_all();
510f5507 6895 drain_all_pages(cc.zone);
041d3a8c
MN
6896
6897 order = 0;
6898 outer_start = start;
6899 while (!PageBuddy(pfn_to_page(outer_start))) {
6900 if (++order >= MAX_ORDER) {
8ef5849f
JK
6901 outer_start = start;
6902 break;
041d3a8c
MN
6903 }
6904 outer_start &= ~0UL << order;
6905 }
6906
8ef5849f
JK
6907 if (outer_start != start) {
6908 order = page_order(pfn_to_page(outer_start));
6909
6910 /*
6911 * outer_start page could be small order buddy page and
6912 * it doesn't include start page. Adjust outer_start
6913 * in this case to report failed page properly
6914 * on tracepoint in test_pages_isolated()
6915 */
6916 if (outer_start + (1UL << order) <= start)
6917 outer_start = start;
6918 }
6919
041d3a8c 6920 /* Make sure the range is really isolated. */
b023f468 6921 if (test_pages_isolated(outer_start, end, false)) {
dae803e1
MN
6922 pr_info("%s: [%lx, %lx) PFNs busy\n",
6923 __func__, outer_start, end);
041d3a8c
MN
6924 ret = -EBUSY;
6925 goto done;
6926 }
6927
49f223a9 6928 /* Grab isolated pages from freelists. */
bb13ffeb 6929 outer_end = isolate_freepages_range(&cc, outer_start, end);
041d3a8c
MN
6930 if (!outer_end) {
6931 ret = -EBUSY;
6932 goto done;
6933 }
6934
6935 /* Free head and tail (if any) */
6936 if (start != outer_start)
6937 free_contig_range(outer_start, start - outer_start);
6938 if (end != outer_end)
6939 free_contig_range(end, outer_end - end);
6940
6941done:
6942 undo_isolate_page_range(pfn_max_align_down(start),
0815f3d8 6943 pfn_max_align_up(end), migratetype);
041d3a8c
MN
6944 return ret;
6945}
6946
6947void free_contig_range(unsigned long pfn, unsigned nr_pages)
6948{
bcc2b02f
MS
6949 unsigned int count = 0;
6950
6951 for (; nr_pages--; pfn++) {
6952 struct page *page = pfn_to_page(pfn);
6953
6954 count += page_count(page) != 1;
6955 __free_page(page);
6956 }
6957 WARN(count != 0, "%d pages are still in use!\n", count);
041d3a8c
MN
6958}
6959#endif
6960
4ed7e022 6961#ifdef CONFIG_MEMORY_HOTPLUG
0a647f38
CS
6962/*
6963 * The zone indicated has a new number of managed_pages; batch sizes and percpu
6964 * page high values need to be recalulated.
6965 */
4ed7e022
JL
6966void __meminit zone_pcp_update(struct zone *zone)
6967{
0a647f38 6968 unsigned cpu;
c8e251fa 6969 mutex_lock(&pcp_batch_high_lock);
0a647f38 6970 for_each_possible_cpu(cpu)
169f6c19
CS
6971 pageset_set_high_and_batch(zone,
6972 per_cpu_ptr(zone->pageset, cpu));
c8e251fa 6973 mutex_unlock(&pcp_batch_high_lock);
4ed7e022
JL
6974}
6975#endif
6976
340175b7
JL
6977void zone_pcp_reset(struct zone *zone)
6978{
6979 unsigned long flags;
5a883813
MK
6980 int cpu;
6981 struct per_cpu_pageset *pset;
340175b7
JL
6982
6983 /* avoid races with drain_pages() */
6984 local_irq_save(flags);
6985 if (zone->pageset != &boot_pageset) {
5a883813
MK
6986 for_each_online_cpu(cpu) {
6987 pset = per_cpu_ptr(zone->pageset, cpu);
6988 drain_zonestat(zone, pset);
6989 }
340175b7
JL
6990 free_percpu(zone->pageset);
6991 zone->pageset = &boot_pageset;
6992 }
6993 local_irq_restore(flags);
6994}
6995
6dcd73d7 6996#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195
KH
6997/*
6998 * All pages in the range must be isolated before calling this.
6999 */
7000void
7001__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7002{
7003 struct page *page;
7004 struct zone *zone;
7aeb09f9 7005 unsigned int order, i;
0c0e6195
KH
7006 unsigned long pfn;
7007 unsigned long flags;
7008 /* find the first valid pfn */
7009 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7010 if (pfn_valid(pfn))
7011 break;
7012 if (pfn == end_pfn)
7013 return;
7014 zone = page_zone(pfn_to_page(pfn));
7015 spin_lock_irqsave(&zone->lock, flags);
7016 pfn = start_pfn;
7017 while (pfn < end_pfn) {
7018 if (!pfn_valid(pfn)) {
7019 pfn++;
7020 continue;
7021 }
7022 page = pfn_to_page(pfn);
b023f468
WC
7023 /*
7024 * The HWPoisoned page may be not in buddy system, and
7025 * page_count() is not 0.
7026 */
7027 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7028 pfn++;
7029 SetPageReserved(page);
7030 continue;
7031 }
7032
0c0e6195
KH
7033 BUG_ON(page_count(page));
7034 BUG_ON(!PageBuddy(page));
7035 order = page_order(page);
7036#ifdef CONFIG_DEBUG_VM
7037 printk(KERN_INFO "remove from free list %lx %d %lx\n",
7038 pfn, 1 << order, end_pfn);
7039#endif
7040 list_del(&page->lru);
7041 rmv_page_order(page);
7042 zone->free_area[order].nr_free--;
0c0e6195
KH
7043 for (i = 0; i < (1 << order); i++)
7044 SetPageReserved((page+i));
7045 pfn += (1 << order);
7046 }
7047 spin_unlock_irqrestore(&zone->lock, flags);
7048}
7049#endif
8d22ba1b
WF
7050
7051#ifdef CONFIG_MEMORY_FAILURE
7052bool is_free_buddy_page(struct page *page)
7053{
7054 struct zone *zone = page_zone(page);
7055 unsigned long pfn = page_to_pfn(page);
7056 unsigned long flags;
7aeb09f9 7057 unsigned int order;
8d22ba1b
WF
7058
7059 spin_lock_irqsave(&zone->lock, flags);
7060 for (order = 0; order < MAX_ORDER; order++) {
7061 struct page *page_head = page - (pfn & ((1 << order) - 1));
7062
7063 if (PageBuddy(page_head) && page_order(page_head) >= order)
7064 break;
7065 }
7066 spin_unlock_irqrestore(&zone->lock, flags);
7067
7068 return order < MAX_ORDER;
7069}
7070#endif
This page took 1.649189 seconds and 5 git commands to generate.