thp: _GFP_NO_KSWAPD
[deliverable/linux.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/bootmem.h>
edbe7d23 24#include <linux/memblock.h>
1da177e4 25#include <linux/compiler.h>
9f158333 26#include <linux/kernel.h>
b1eeab67 27#include <linux/kmemcheck.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
5a3135c2 33#include <linux/oom.h>
1da177e4
LT
34#include <linux/notifier.h>
35#include <linux/topology.h>
36#include <linux/sysctl.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
bdc8cb98 39#include <linux/memory_hotplug.h>
1da177e4
LT
40#include <linux/nodemask.h>
41#include <linux/vmalloc.h>
4be38e35 42#include <linux/mempolicy.h>
6811378e 43#include <linux/stop_machine.h>
c713216d
MG
44#include <linux/sort.h>
45#include <linux/pfn.h>
3fcfab16 46#include <linux/backing-dev.h>
933e312e 47#include <linux/fault-inject.h>
a5d76b54 48#include <linux/page-isolation.h>
52d4b9ac 49#include <linux/page_cgroup.h>
3ac7fe5a 50#include <linux/debugobjects.h>
dbb1f81c 51#include <linux/kmemleak.h>
925cc71e 52#include <linux/memory.h>
56de7263 53#include <linux/compaction.h>
0d3d062a 54#include <trace/events/kmem.h>
718a3821 55#include <linux/ftrace_event.h>
1da177e4
LT
56
57#include <asm/tlbflush.h>
ac924c60 58#include <asm/div64.h>
1da177e4
LT
59#include "internal.h"
60
72812019
LS
61#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
62DEFINE_PER_CPU(int, numa_node);
63EXPORT_PER_CPU_SYMBOL(numa_node);
64#endif
65
7aac7898
LS
66#ifdef CONFIG_HAVE_MEMORYLESS_NODES
67/*
68 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
69 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
70 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
71 * defined in <linux/topology.h>.
72 */
73DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
74EXPORT_PER_CPU_SYMBOL(_numa_mem_);
75#endif
76
1da177e4 77/*
13808910 78 * Array of node states.
1da177e4 79 */
13808910
CL
80nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
81 [N_POSSIBLE] = NODE_MASK_ALL,
82 [N_ONLINE] = { { [0] = 1UL } },
83#ifndef CONFIG_NUMA
84 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
85#ifdef CONFIG_HIGHMEM
86 [N_HIGH_MEMORY] = { { [0] = 1UL } },
87#endif
88 [N_CPU] = { { [0] = 1UL } },
89#endif /* NUMA */
90};
91EXPORT_SYMBOL(node_states);
92
6c231b7b 93unsigned long totalram_pages __read_mostly;
cb45b0e9 94unsigned long totalreserve_pages __read_mostly;
8ad4b1fb 95int percpu_pagelist_fraction;
dcce284a 96gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 97
452aa699
RW
98#ifdef CONFIG_PM_SLEEP
99/*
100 * The following functions are used by the suspend/hibernate code to temporarily
101 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
102 * while devices are suspended. To avoid races with the suspend/hibernate code,
103 * they should always be called with pm_mutex held (gfp_allowed_mask also should
104 * only be modified with pm_mutex held, unless the suspend/hibernate code is
105 * guaranteed not to run in parallel with that modification).
106 */
c9e664f1
RW
107
108static gfp_t saved_gfp_mask;
109
110void pm_restore_gfp_mask(void)
452aa699
RW
111{
112 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
113 if (saved_gfp_mask) {
114 gfp_allowed_mask = saved_gfp_mask;
115 saved_gfp_mask = 0;
116 }
452aa699
RW
117}
118
c9e664f1 119void pm_restrict_gfp_mask(void)
452aa699 120{
452aa699 121 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
122 WARN_ON(saved_gfp_mask);
123 saved_gfp_mask = gfp_allowed_mask;
124 gfp_allowed_mask &= ~GFP_IOFS;
452aa699
RW
125}
126#endif /* CONFIG_PM_SLEEP */
127
d9c23400
MG
128#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
129int pageblock_order __read_mostly;
130#endif
131
d98c7a09 132static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 133
1da177e4
LT
134/*
135 * results with 256, 32 in the lowmem_reserve sysctl:
136 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
137 * 1G machine -> (16M dma, 784M normal, 224M high)
138 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
139 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
140 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
141 *
142 * TBD: should special case ZONE_DMA32 machines here - in those we normally
143 * don't need any ZONE_NORMAL reservation
1da177e4 144 */
2f1b6248 145int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 146#ifdef CONFIG_ZONE_DMA
2f1b6248 147 256,
4b51d669 148#endif
fb0e7942 149#ifdef CONFIG_ZONE_DMA32
2f1b6248 150 256,
fb0e7942 151#endif
e53ef38d 152#ifdef CONFIG_HIGHMEM
2a1e274a 153 32,
e53ef38d 154#endif
2a1e274a 155 32,
2f1b6248 156};
1da177e4
LT
157
158EXPORT_SYMBOL(totalram_pages);
1da177e4 159
15ad7cdc 160static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 161#ifdef CONFIG_ZONE_DMA
2f1b6248 162 "DMA",
4b51d669 163#endif
fb0e7942 164#ifdef CONFIG_ZONE_DMA32
2f1b6248 165 "DMA32",
fb0e7942 166#endif
2f1b6248 167 "Normal",
e53ef38d 168#ifdef CONFIG_HIGHMEM
2a1e274a 169 "HighMem",
e53ef38d 170#endif
2a1e274a 171 "Movable",
2f1b6248
CL
172};
173
1da177e4
LT
174int min_free_kbytes = 1024;
175
2c85f51d
JB
176static unsigned long __meminitdata nr_kernel_pages;
177static unsigned long __meminitdata nr_all_pages;
a3142c8e 178static unsigned long __meminitdata dma_reserve;
1da177e4 179
c713216d
MG
180#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
181 /*
183ff22b 182 * MAX_ACTIVE_REGIONS determines the maximum number of distinct
c713216d
MG
183 * ranges of memory (RAM) that may be registered with add_active_range().
184 * Ranges passed to add_active_range() will be merged if possible
185 * so the number of times add_active_range() can be called is
186 * related to the number of nodes and the number of holes
187 */
188 #ifdef CONFIG_MAX_ACTIVE_REGIONS
189 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
190 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
191 #else
192 #if MAX_NUMNODES >= 32
193 /* If there can be many nodes, allow up to 50 holes per node */
194 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
195 #else
196 /* By default, allow up to 256 distinct regions */
197 #define MAX_ACTIVE_REGIONS 256
198 #endif
199 #endif
200
98011f56
JB
201 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
202 static int __meminitdata nr_nodemap_entries;
203 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
204 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
b69a7288 205 static unsigned long __initdata required_kernelcore;
484f51f8 206 static unsigned long __initdata required_movablecore;
b69a7288 207 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
2a1e274a
MG
208
209 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
210 int movable_zone;
211 EXPORT_SYMBOL(movable_zone);
c713216d
MG
212#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
213
418508c1
MS
214#if MAX_NUMNODES > 1
215int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 216int nr_online_nodes __read_mostly = 1;
418508c1 217EXPORT_SYMBOL(nr_node_ids);
62bc62a8 218EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
219#endif
220
9ef9acb0
MG
221int page_group_by_mobility_disabled __read_mostly;
222
b2a0ac88
MG
223static void set_pageblock_migratetype(struct page *page, int migratetype)
224{
49255c61
MG
225
226 if (unlikely(page_group_by_mobility_disabled))
227 migratetype = MIGRATE_UNMOVABLE;
228
b2a0ac88
MG
229 set_pageblock_flags_group(page, (unsigned long)migratetype,
230 PB_migrate, PB_migrate_end);
231}
232
7f33d49a
RW
233bool oom_killer_disabled __read_mostly;
234
13e7444b 235#ifdef CONFIG_DEBUG_VM
c6a57e19 236static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 237{
bdc8cb98
DH
238 int ret = 0;
239 unsigned seq;
240 unsigned long pfn = page_to_pfn(page);
c6a57e19 241
bdc8cb98
DH
242 do {
243 seq = zone_span_seqbegin(zone);
244 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
245 ret = 1;
246 else if (pfn < zone->zone_start_pfn)
247 ret = 1;
248 } while (zone_span_seqretry(zone, seq));
249
250 return ret;
c6a57e19
DH
251}
252
253static int page_is_consistent(struct zone *zone, struct page *page)
254{
14e07298 255 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 256 return 0;
1da177e4 257 if (zone != page_zone(page))
c6a57e19
DH
258 return 0;
259
260 return 1;
261}
262/*
263 * Temporary debugging check for pages not lying within a given zone.
264 */
265static int bad_range(struct zone *zone, struct page *page)
266{
267 if (page_outside_zone_boundaries(zone, page))
1da177e4 268 return 1;
c6a57e19
DH
269 if (!page_is_consistent(zone, page))
270 return 1;
271
1da177e4
LT
272 return 0;
273}
13e7444b
NP
274#else
275static inline int bad_range(struct zone *zone, struct page *page)
276{
277 return 0;
278}
279#endif
280
224abf92 281static void bad_page(struct page *page)
1da177e4 282{
d936cf9b
HD
283 static unsigned long resume;
284 static unsigned long nr_shown;
285 static unsigned long nr_unshown;
286
2a7684a2
WF
287 /* Don't complain about poisoned pages */
288 if (PageHWPoison(page)) {
289 __ClearPageBuddy(page);
290 return;
291 }
292
d936cf9b
HD
293 /*
294 * Allow a burst of 60 reports, then keep quiet for that minute;
295 * or allow a steady drip of one report per second.
296 */
297 if (nr_shown == 60) {
298 if (time_before(jiffies, resume)) {
299 nr_unshown++;
300 goto out;
301 }
302 if (nr_unshown) {
1e9e6365
HD
303 printk(KERN_ALERT
304 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
305 nr_unshown);
306 nr_unshown = 0;
307 }
308 nr_shown = 0;
309 }
310 if (nr_shown++ == 0)
311 resume = jiffies + 60 * HZ;
312
1e9e6365 313 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 314 current->comm, page_to_pfn(page));
718a3821 315 dump_page(page);
3dc14741 316
1da177e4 317 dump_stack();
d936cf9b 318out:
8cc3b392
HD
319 /* Leave bad fields for debug, except PageBuddy could make trouble */
320 __ClearPageBuddy(page);
9f158333 321 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
322}
323
1da177e4
LT
324/*
325 * Higher-order pages are called "compound pages". They are structured thusly:
326 *
327 * The first PAGE_SIZE page is called the "head page".
328 *
329 * The remaining PAGE_SIZE pages are called "tail pages".
330 *
331 * All pages have PG_compound set. All pages have their ->private pointing at
332 * the head page (even the head page has this).
333 *
41d78ba5
HD
334 * The first tail page's ->lru.next holds the address of the compound page's
335 * put_page() function. Its ->lru.prev holds the order of allocation.
336 * This usage means that zero-order pages may not be compound.
1da177e4 337 */
d98c7a09
HD
338
339static void free_compound_page(struct page *page)
340{
d85f3385 341 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
342}
343
01ad1c08 344void prep_compound_page(struct page *page, unsigned long order)
18229df5
AW
345{
346 int i;
347 int nr_pages = 1 << order;
348
349 set_compound_page_dtor(page, free_compound_page);
350 set_compound_order(page, order);
351 __SetPageHead(page);
352 for (i = 1; i < nr_pages; i++) {
353 struct page *p = page + i;
354
355 __SetPageTail(p);
356 p->first_page = page;
357 }
358}
359
59ff4216 360/* update __split_huge_page_refcount if you change this function */
8cc3b392 361static int destroy_compound_page(struct page *page, unsigned long order)
1da177e4
LT
362{
363 int i;
364 int nr_pages = 1 << order;
8cc3b392 365 int bad = 0;
1da177e4 366
8cc3b392
HD
367 if (unlikely(compound_order(page) != order) ||
368 unlikely(!PageHead(page))) {
224abf92 369 bad_page(page);
8cc3b392
HD
370 bad++;
371 }
1da177e4 372
6d777953 373 __ClearPageHead(page);
8cc3b392 374
18229df5
AW
375 for (i = 1; i < nr_pages; i++) {
376 struct page *p = page + i;
1da177e4 377
e713a21d 378 if (unlikely(!PageTail(p) || (p->first_page != page))) {
224abf92 379 bad_page(page);
8cc3b392
HD
380 bad++;
381 }
d85f3385 382 __ClearPageTail(p);
1da177e4 383 }
8cc3b392
HD
384
385 return bad;
1da177e4 386}
1da177e4 387
17cf4406
NP
388static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
389{
390 int i;
391
6626c5d5
AM
392 /*
393 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
394 * and __GFP_HIGHMEM from hard or soft interrupt context.
395 */
725d704e 396 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
397 for (i = 0; i < (1 << order); i++)
398 clear_highpage(page + i);
399}
400
6aa3001b
AM
401static inline void set_page_order(struct page *page, int order)
402{
4c21e2f2 403 set_page_private(page, order);
676165a8 404 __SetPageBuddy(page);
1da177e4
LT
405}
406
407static inline void rmv_page_order(struct page *page)
408{
676165a8 409 __ClearPageBuddy(page);
4c21e2f2 410 set_page_private(page, 0);
1da177e4
LT
411}
412
413/*
414 * Locate the struct page for both the matching buddy in our
415 * pair (buddy1) and the combined O(n+1) page they form (page).
416 *
417 * 1) Any buddy B1 will have an order O twin B2 which satisfies
418 * the following equation:
419 * B2 = B1 ^ (1 << O)
420 * For example, if the starting buddy (buddy2) is #8 its order
421 * 1 buddy is #10:
422 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
423 *
424 * 2) Any buddy B will have an order O+1 parent P which
425 * satisfies the following equation:
426 * P = B & ~(1 << O)
427 *
d6e05edc 428 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
429 */
430static inline struct page *
431__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
432{
433 unsigned long buddy_idx = page_idx ^ (1 << order);
434
435 return page + (buddy_idx - page_idx);
436}
437
438static inline unsigned long
439__find_combined_index(unsigned long page_idx, unsigned int order)
440{
441 return (page_idx & ~(1 << order));
442}
443
444/*
445 * This function checks whether a page is free && is the buddy
446 * we can do coalesce a page and its buddy if
13e7444b 447 * (a) the buddy is not in a hole &&
676165a8 448 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
449 * (c) a page and its buddy have the same order &&
450 * (d) a page and its buddy are in the same zone.
676165a8
NP
451 *
452 * For recording whether a page is in the buddy system, we use PG_buddy.
453 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 454 *
676165a8 455 * For recording page's order, we use page_private(page).
1da177e4 456 */
cb2b95e1
AW
457static inline int page_is_buddy(struct page *page, struct page *buddy,
458 int order)
1da177e4 459{
14e07298 460 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 461 return 0;
13e7444b 462
cb2b95e1
AW
463 if (page_zone_id(page) != page_zone_id(buddy))
464 return 0;
465
466 if (PageBuddy(buddy) && page_order(buddy) == order) {
a3af9c38 467 VM_BUG_ON(page_count(buddy) != 0);
6aa3001b 468 return 1;
676165a8 469 }
6aa3001b 470 return 0;
1da177e4
LT
471}
472
473/*
474 * Freeing function for a buddy system allocator.
475 *
476 * The concept of a buddy system is to maintain direct-mapped table
477 * (containing bit values) for memory blocks of various "orders".
478 * The bottom level table contains the map for the smallest allocatable
479 * units of memory (here, pages), and each level above it describes
480 * pairs of units from the levels below, hence, "buddies".
481 * At a high level, all that happens here is marking the table entry
482 * at the bottom level available, and propagating the changes upward
483 * as necessary, plus some accounting needed to play nicely with other
484 * parts of the VM system.
485 * At each level, we keep a list of pages, which are heads of continuous
676165a8 486 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 487 * order is recorded in page_private(page) field.
1da177e4
LT
488 * So when we are allocating or freeing one, we can derive the state of the
489 * other. That is, if we allocate a small block, and both were
490 * free, the remainder of the region must be split into blocks.
491 * If a block is freed, and its buddy is also free, then this
492 * triggers coalescing into a block of larger size.
493 *
494 * -- wli
495 */
496
48db57f8 497static inline void __free_one_page(struct page *page,
ed0ae21d
MG
498 struct zone *zone, unsigned int order,
499 int migratetype)
1da177e4
LT
500{
501 unsigned long page_idx;
6dda9d55
CZ
502 unsigned long combined_idx;
503 struct page *buddy;
1da177e4 504
224abf92 505 if (unlikely(PageCompound(page)))
8cc3b392
HD
506 if (unlikely(destroy_compound_page(page, order)))
507 return;
1da177e4 508
ed0ae21d
MG
509 VM_BUG_ON(migratetype == -1);
510
1da177e4
LT
511 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
512
f2260e6b 513 VM_BUG_ON(page_idx & ((1 << order) - 1));
725d704e 514 VM_BUG_ON(bad_range(zone, page));
1da177e4 515
1da177e4 516 while (order < MAX_ORDER-1) {
1da177e4 517 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 518 if (!page_is_buddy(page, buddy, order))
3c82d0ce 519 break;
13e7444b 520
3c82d0ce 521 /* Our buddy is free, merge with it and move up one order. */
1da177e4 522 list_del(&buddy->lru);
b2a0ac88 523 zone->free_area[order].nr_free--;
1da177e4 524 rmv_page_order(buddy);
13e7444b 525 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
526 page = page + (combined_idx - page_idx);
527 page_idx = combined_idx;
528 order++;
529 }
530 set_page_order(page, order);
6dda9d55
CZ
531
532 /*
533 * If this is not the largest possible page, check if the buddy
534 * of the next-highest order is free. If it is, it's possible
535 * that pages are being freed that will coalesce soon. In case,
536 * that is happening, add the free page to the tail of the list
537 * so it's less likely to be used soon and more likely to be merged
538 * as a higher order page
539 */
b7f50cfa 540 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
6dda9d55
CZ
541 struct page *higher_page, *higher_buddy;
542 combined_idx = __find_combined_index(page_idx, order);
543 higher_page = page + combined_idx - page_idx;
544 higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
545 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
546 list_add_tail(&page->lru,
547 &zone->free_area[order].free_list[migratetype]);
548 goto out;
549 }
550 }
551
552 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
553out:
1da177e4
LT
554 zone->free_area[order].nr_free++;
555}
556
092cead6
KM
557/*
558 * free_page_mlock() -- clean up attempts to free and mlocked() page.
559 * Page should not be on lru, so no need to fix that up.
560 * free_pages_check() will verify...
561 */
562static inline void free_page_mlock(struct page *page)
563{
092cead6
KM
564 __dec_zone_page_state(page, NR_MLOCK);
565 __count_vm_event(UNEVICTABLE_MLOCKFREED);
566}
092cead6 567
224abf92 568static inline int free_pages_check(struct page *page)
1da177e4 569{
92be2e33
NP
570 if (unlikely(page_mapcount(page) |
571 (page->mapping != NULL) |
a3af9c38 572 (atomic_read(&page->_count) != 0) |
8cc3b392 573 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
224abf92 574 bad_page(page);
79f4b7bf 575 return 1;
8cc3b392 576 }
79f4b7bf
HD
577 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
578 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
579 return 0;
1da177e4
LT
580}
581
582/*
5f8dcc21 583 * Frees a number of pages from the PCP lists
1da177e4 584 * Assumes all pages on list are in same zone, and of same order.
207f36ee 585 * count is the number of pages to free.
1da177e4
LT
586 *
587 * If the zone was previously in an "all pages pinned" state then look to
588 * see if this freeing clears that state.
589 *
590 * And clear the zone's pages_scanned counter, to hold off the "all pages are
591 * pinned" detection logic.
592 */
5f8dcc21
MG
593static void free_pcppages_bulk(struct zone *zone, int count,
594 struct per_cpu_pages *pcp)
1da177e4 595{
5f8dcc21 596 int migratetype = 0;
a6f9edd6 597 int batch_free = 0;
72853e29 598 int to_free = count;
5f8dcc21 599
c54ad30c 600 spin_lock(&zone->lock);
93e4a89a 601 zone->all_unreclaimable = 0;
1da177e4 602 zone->pages_scanned = 0;
f2260e6b 603
72853e29 604 while (to_free) {
48db57f8 605 struct page *page;
5f8dcc21
MG
606 struct list_head *list;
607
608 /*
a6f9edd6
MG
609 * Remove pages from lists in a round-robin fashion. A
610 * batch_free count is maintained that is incremented when an
611 * empty list is encountered. This is so more pages are freed
612 * off fuller lists instead of spinning excessively around empty
613 * lists
5f8dcc21
MG
614 */
615 do {
a6f9edd6 616 batch_free++;
5f8dcc21
MG
617 if (++migratetype == MIGRATE_PCPTYPES)
618 migratetype = 0;
619 list = &pcp->lists[migratetype];
620 } while (list_empty(list));
48db57f8 621
a6f9edd6
MG
622 do {
623 page = list_entry(list->prev, struct page, lru);
624 /* must delete as __free_one_page list manipulates */
625 list_del(&page->lru);
a7016235
HD
626 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
627 __free_one_page(page, zone, 0, page_private(page));
628 trace_mm_page_pcpu_drain(page, 0, page_private(page));
72853e29 629 } while (--to_free && --batch_free && !list_empty(list));
1da177e4 630 }
72853e29 631 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
c54ad30c 632 spin_unlock(&zone->lock);
1da177e4
LT
633}
634
ed0ae21d
MG
635static void free_one_page(struct zone *zone, struct page *page, int order,
636 int migratetype)
1da177e4 637{
006d22d9 638 spin_lock(&zone->lock);
93e4a89a 639 zone->all_unreclaimable = 0;
006d22d9 640 zone->pages_scanned = 0;
f2260e6b 641
ed0ae21d 642 __free_one_page(page, zone, order, migratetype);
72853e29 643 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
006d22d9 644 spin_unlock(&zone->lock);
48db57f8
NP
645}
646
ec95f53a 647static bool free_pages_prepare(struct page *page, unsigned int order)
48db57f8 648{
1da177e4 649 int i;
8cc3b392 650 int bad = 0;
1da177e4 651
f650316c 652 trace_mm_page_free_direct(page, order);
b1eeab67
VN
653 kmemcheck_free_shadow(page, order);
654
8dd60a3a
AA
655 if (PageAnon(page))
656 page->mapping = NULL;
657 for (i = 0; i < (1 << order); i++)
658 bad += free_pages_check(page + i);
8cc3b392 659 if (bad)
ec95f53a 660 return false;
689bcebf 661
3ac7fe5a 662 if (!PageHighMem(page)) {
9858db50 663 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
3ac7fe5a
TG
664 debug_check_no_obj_freed(page_address(page),
665 PAGE_SIZE << order);
666 }
dafb1367 667 arch_free_page(page, order);
48db57f8 668 kernel_map_pages(page, 1 << order, 0);
dafb1367 669
ec95f53a
KM
670 return true;
671}
672
673static void __free_pages_ok(struct page *page, unsigned int order)
674{
675 unsigned long flags;
676 int wasMlocked = __TestClearPageMlocked(page);
677
678 if (!free_pages_prepare(page, order))
679 return;
680
c54ad30c 681 local_irq_save(flags);
c277331d 682 if (unlikely(wasMlocked))
da456f14 683 free_page_mlock(page);
f8891e5e 684 __count_vm_events(PGFREE, 1 << order);
ed0ae21d
MG
685 free_one_page(page_zone(page), page, order,
686 get_pageblock_migratetype(page));
c54ad30c 687 local_irq_restore(flags);
1da177e4
LT
688}
689
a226f6c8
DH
690/*
691 * permit the bootmem allocator to evade page validation on high-order frees
692 */
af370fb8 693void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
a226f6c8
DH
694{
695 if (order == 0) {
696 __ClearPageReserved(page);
697 set_page_count(page, 0);
7835e98b 698 set_page_refcounted(page);
545b1ea9 699 __free_page(page);
a226f6c8 700 } else {
a226f6c8
DH
701 int loop;
702
545b1ea9 703 prefetchw(page);
a226f6c8
DH
704 for (loop = 0; loop < BITS_PER_LONG; loop++) {
705 struct page *p = &page[loop];
706
545b1ea9
NP
707 if (loop + 1 < BITS_PER_LONG)
708 prefetchw(p + 1);
a226f6c8
DH
709 __ClearPageReserved(p);
710 set_page_count(p, 0);
711 }
712
7835e98b 713 set_page_refcounted(page);
545b1ea9 714 __free_pages(page, order);
a226f6c8
DH
715 }
716}
717
1da177e4
LT
718
719/*
720 * The order of subdivision here is critical for the IO subsystem.
721 * Please do not alter this order without good reasons and regression
722 * testing. Specifically, as large blocks of memory are subdivided,
723 * the order in which smaller blocks are delivered depends on the order
724 * they're subdivided in this function. This is the primary factor
725 * influencing the order in which pages are delivered to the IO
726 * subsystem according to empirical testing, and this is also justified
727 * by considering the behavior of a buddy system containing a single
728 * large block of memory acted on by a series of small allocations.
729 * This behavior is a critical factor in sglist merging's success.
730 *
731 * -- wli
732 */
085cc7d5 733static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
734 int low, int high, struct free_area *area,
735 int migratetype)
1da177e4
LT
736{
737 unsigned long size = 1 << high;
738
739 while (high > low) {
740 area--;
741 high--;
742 size >>= 1;
725d704e 743 VM_BUG_ON(bad_range(zone, &page[size]));
b2a0ac88 744 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
745 area->nr_free++;
746 set_page_order(&page[size], high);
747 }
1da177e4
LT
748}
749
1da177e4
LT
750/*
751 * This page is about to be returned from the page allocator
752 */
2a7684a2 753static inline int check_new_page(struct page *page)
1da177e4 754{
92be2e33
NP
755 if (unlikely(page_mapcount(page) |
756 (page->mapping != NULL) |
a3af9c38 757 (atomic_read(&page->_count) != 0) |
8cc3b392 758 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
224abf92 759 bad_page(page);
689bcebf 760 return 1;
8cc3b392 761 }
2a7684a2
WF
762 return 0;
763}
764
765static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
766{
767 int i;
768
769 for (i = 0; i < (1 << order); i++) {
770 struct page *p = page + i;
771 if (unlikely(check_new_page(p)))
772 return 1;
773 }
689bcebf 774
4c21e2f2 775 set_page_private(page, 0);
7835e98b 776 set_page_refcounted(page);
cc102509
NP
777
778 arch_alloc_page(page, order);
1da177e4 779 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
780
781 if (gfp_flags & __GFP_ZERO)
782 prep_zero_page(page, order, gfp_flags);
783
784 if (order && (gfp_flags & __GFP_COMP))
785 prep_compound_page(page, order);
786
689bcebf 787 return 0;
1da177e4
LT
788}
789
56fd56b8
MG
790/*
791 * Go through the free lists for the given migratetype and remove
792 * the smallest available page from the freelists
793 */
728ec980
MG
794static inline
795struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
796 int migratetype)
797{
798 unsigned int current_order;
799 struct free_area * area;
800 struct page *page;
801
802 /* Find a page of the appropriate size in the preferred list */
803 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
804 area = &(zone->free_area[current_order]);
805 if (list_empty(&area->free_list[migratetype]))
806 continue;
807
808 page = list_entry(area->free_list[migratetype].next,
809 struct page, lru);
810 list_del(&page->lru);
811 rmv_page_order(page);
812 area->nr_free--;
56fd56b8
MG
813 expand(zone, page, order, current_order, area, migratetype);
814 return page;
815 }
816
817 return NULL;
818}
819
820
b2a0ac88
MG
821/*
822 * This array describes the order lists are fallen back to when
823 * the free lists for the desirable migrate type are depleted
824 */
825static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
64c5e135
MG
826 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
827 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
828 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
829 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
b2a0ac88
MG
830};
831
c361be55
MG
832/*
833 * Move the free pages in a range to the free lists of the requested type.
d9c23400 834 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
835 * boundary. If alignment is required, use move_freepages_block()
836 */
b69a7288
AB
837static int move_freepages(struct zone *zone,
838 struct page *start_page, struct page *end_page,
839 int migratetype)
c361be55
MG
840{
841 struct page *page;
842 unsigned long order;
d100313f 843 int pages_moved = 0;
c361be55
MG
844
845#ifndef CONFIG_HOLES_IN_ZONE
846 /*
847 * page_zone is not safe to call in this context when
848 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
849 * anyway as we check zone boundaries in move_freepages_block().
850 * Remove at a later date when no bug reports exist related to
ac0e5b7a 851 * grouping pages by mobility
c361be55
MG
852 */
853 BUG_ON(page_zone(start_page) != page_zone(end_page));
854#endif
855
856 for (page = start_page; page <= end_page;) {
344c790e
AL
857 /* Make sure we are not inadvertently changing nodes */
858 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
859
c361be55
MG
860 if (!pfn_valid_within(page_to_pfn(page))) {
861 page++;
862 continue;
863 }
864
865 if (!PageBuddy(page)) {
866 page++;
867 continue;
868 }
869
870 order = page_order(page);
871 list_del(&page->lru);
872 list_add(&page->lru,
873 &zone->free_area[order].free_list[migratetype]);
874 page += 1 << order;
d100313f 875 pages_moved += 1 << order;
c361be55
MG
876 }
877
d100313f 878 return pages_moved;
c361be55
MG
879}
880
b69a7288
AB
881static int move_freepages_block(struct zone *zone, struct page *page,
882 int migratetype)
c361be55
MG
883{
884 unsigned long start_pfn, end_pfn;
885 struct page *start_page, *end_page;
886
887 start_pfn = page_to_pfn(page);
d9c23400 888 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 889 start_page = pfn_to_page(start_pfn);
d9c23400
MG
890 end_page = start_page + pageblock_nr_pages - 1;
891 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
892
893 /* Do not cross zone boundaries */
894 if (start_pfn < zone->zone_start_pfn)
895 start_page = page;
896 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
897 return 0;
898
899 return move_freepages(zone, start_page, end_page, migratetype);
900}
901
2f66a68f
MG
902static void change_pageblock_range(struct page *pageblock_page,
903 int start_order, int migratetype)
904{
905 int nr_pageblocks = 1 << (start_order - pageblock_order);
906
907 while (nr_pageblocks--) {
908 set_pageblock_migratetype(pageblock_page, migratetype);
909 pageblock_page += pageblock_nr_pages;
910 }
911}
912
b2a0ac88 913/* Remove an element from the buddy allocator from the fallback list */
0ac3a409
MG
914static inline struct page *
915__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
b2a0ac88
MG
916{
917 struct free_area * area;
918 int current_order;
919 struct page *page;
920 int migratetype, i;
921
922 /* Find the largest possible block of pages in the other list */
923 for (current_order = MAX_ORDER-1; current_order >= order;
924 --current_order) {
925 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
926 migratetype = fallbacks[start_migratetype][i];
927
56fd56b8
MG
928 /* MIGRATE_RESERVE handled later if necessary */
929 if (migratetype == MIGRATE_RESERVE)
930 continue;
e010487d 931
b2a0ac88
MG
932 area = &(zone->free_area[current_order]);
933 if (list_empty(&area->free_list[migratetype]))
934 continue;
935
936 page = list_entry(area->free_list[migratetype].next,
937 struct page, lru);
938 area->nr_free--;
939
940 /*
c361be55 941 * If breaking a large block of pages, move all free
46dafbca
MG
942 * pages to the preferred allocation list. If falling
943 * back for a reclaimable kernel allocation, be more
944 * agressive about taking ownership of free pages
b2a0ac88 945 */
d9c23400 946 if (unlikely(current_order >= (pageblock_order >> 1)) ||
dd5d241e
MG
947 start_migratetype == MIGRATE_RECLAIMABLE ||
948 page_group_by_mobility_disabled) {
46dafbca
MG
949 unsigned long pages;
950 pages = move_freepages_block(zone, page,
951 start_migratetype);
952
953 /* Claim the whole block if over half of it is free */
dd5d241e
MG
954 if (pages >= (1 << (pageblock_order-1)) ||
955 page_group_by_mobility_disabled)
46dafbca
MG
956 set_pageblock_migratetype(page,
957 start_migratetype);
958
b2a0ac88 959 migratetype = start_migratetype;
c361be55 960 }
b2a0ac88
MG
961
962 /* Remove the page from the freelists */
963 list_del(&page->lru);
964 rmv_page_order(page);
b2a0ac88 965
2f66a68f
MG
966 /* Take ownership for orders >= pageblock_order */
967 if (current_order >= pageblock_order)
968 change_pageblock_range(page, current_order,
b2a0ac88
MG
969 start_migratetype);
970
971 expand(zone, page, order, current_order, area, migratetype);
e0fff1bd
MG
972
973 trace_mm_page_alloc_extfrag(page, order, current_order,
974 start_migratetype, migratetype);
975
b2a0ac88
MG
976 return page;
977 }
978 }
979
728ec980 980 return NULL;
b2a0ac88
MG
981}
982
56fd56b8 983/*
1da177e4
LT
984 * Do the hard work of removing an element from the buddy allocator.
985 * Call me with the zone->lock already held.
986 */
b2a0ac88
MG
987static struct page *__rmqueue(struct zone *zone, unsigned int order,
988 int migratetype)
1da177e4 989{
1da177e4
LT
990 struct page *page;
991
728ec980 992retry_reserve:
56fd56b8 993 page = __rmqueue_smallest(zone, order, migratetype);
b2a0ac88 994
728ec980 995 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
56fd56b8 996 page = __rmqueue_fallback(zone, order, migratetype);
b2a0ac88 997
728ec980
MG
998 /*
999 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1000 * is used because __rmqueue_smallest is an inline function
1001 * and we want just one call site
1002 */
1003 if (!page) {
1004 migratetype = MIGRATE_RESERVE;
1005 goto retry_reserve;
1006 }
1007 }
1008
0d3d062a 1009 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 1010 return page;
1da177e4
LT
1011}
1012
1013/*
1014 * Obtain a specified number of elements from the buddy allocator, all under
1015 * a single hold of the lock, for efficiency. Add them to the supplied list.
1016 * Returns the number of new pages which were placed at *list.
1017 */
1018static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 1019 unsigned long count, struct list_head *list,
e084b2d9 1020 int migratetype, int cold)
1da177e4 1021{
1da177e4 1022 int i;
1da177e4 1023
c54ad30c 1024 spin_lock(&zone->lock);
1da177e4 1025 for (i = 0; i < count; ++i) {
b2a0ac88 1026 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 1027 if (unlikely(page == NULL))
1da177e4 1028 break;
81eabcbe
MG
1029
1030 /*
1031 * Split buddy pages returned by expand() are received here
1032 * in physical page order. The page is added to the callers and
1033 * list and the list head then moves forward. From the callers
1034 * perspective, the linked list is ordered by page number in
1035 * some conditions. This is useful for IO devices that can
1036 * merge IO requests if the physical pages are ordered
1037 * properly.
1038 */
e084b2d9
MG
1039 if (likely(cold == 0))
1040 list_add(&page->lru, list);
1041 else
1042 list_add_tail(&page->lru, list);
535131e6 1043 set_page_private(page, migratetype);
81eabcbe 1044 list = &page->lru;
1da177e4 1045 }
f2260e6b 1046 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
c54ad30c 1047 spin_unlock(&zone->lock);
085cc7d5 1048 return i;
1da177e4
LT
1049}
1050
4ae7c039 1051#ifdef CONFIG_NUMA
8fce4d8e 1052/*
4037d452
CL
1053 * Called from the vmstat counter updater to drain pagesets of this
1054 * currently executing processor on remote nodes after they have
1055 * expired.
1056 *
879336c3
CL
1057 * Note that this function must be called with the thread pinned to
1058 * a single processor.
8fce4d8e 1059 */
4037d452 1060void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 1061{
4ae7c039 1062 unsigned long flags;
4037d452 1063 int to_drain;
4ae7c039 1064
4037d452
CL
1065 local_irq_save(flags);
1066 if (pcp->count >= pcp->batch)
1067 to_drain = pcp->batch;
1068 else
1069 to_drain = pcp->count;
5f8dcc21 1070 free_pcppages_bulk(zone, to_drain, pcp);
4037d452
CL
1071 pcp->count -= to_drain;
1072 local_irq_restore(flags);
4ae7c039
CL
1073}
1074#endif
1075
9f8f2172
CL
1076/*
1077 * Drain pages of the indicated processor.
1078 *
1079 * The processor must either be the current processor and the
1080 * thread pinned to the current processor or a processor that
1081 * is not online.
1082 */
1083static void drain_pages(unsigned int cpu)
1da177e4 1084{
c54ad30c 1085 unsigned long flags;
1da177e4 1086 struct zone *zone;
1da177e4 1087
ee99c71c 1088 for_each_populated_zone(zone) {
1da177e4 1089 struct per_cpu_pageset *pset;
3dfa5721 1090 struct per_cpu_pages *pcp;
1da177e4 1091
99dcc3e5
CL
1092 local_irq_save(flags);
1093 pset = per_cpu_ptr(zone->pageset, cpu);
3dfa5721
CL
1094
1095 pcp = &pset->pcp;
5f8dcc21 1096 free_pcppages_bulk(zone, pcp->count, pcp);
3dfa5721
CL
1097 pcp->count = 0;
1098 local_irq_restore(flags);
1da177e4
LT
1099 }
1100}
1da177e4 1101
9f8f2172
CL
1102/*
1103 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1104 */
1105void drain_local_pages(void *arg)
1106{
1107 drain_pages(smp_processor_id());
1108}
1109
1110/*
1111 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1112 */
1113void drain_all_pages(void)
1114{
15c8b6c1 1115 on_each_cpu(drain_local_pages, NULL, 1);
9f8f2172
CL
1116}
1117
296699de 1118#ifdef CONFIG_HIBERNATION
1da177e4
LT
1119
1120void mark_free_pages(struct zone *zone)
1121{
f623f0db
RW
1122 unsigned long pfn, max_zone_pfn;
1123 unsigned long flags;
b2a0ac88 1124 int order, t;
1da177e4
LT
1125 struct list_head *curr;
1126
1127 if (!zone->spanned_pages)
1128 return;
1129
1130 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
1131
1132 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1133 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1134 if (pfn_valid(pfn)) {
1135 struct page *page = pfn_to_page(pfn);
1136
7be98234
RW
1137 if (!swsusp_page_is_forbidden(page))
1138 swsusp_unset_page_free(page);
f623f0db 1139 }
1da177e4 1140
b2a0ac88
MG
1141 for_each_migratetype_order(order, t) {
1142 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 1143 unsigned long i;
1da177e4 1144
f623f0db
RW
1145 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1146 for (i = 0; i < (1UL << order); i++)
7be98234 1147 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 1148 }
b2a0ac88 1149 }
1da177e4
LT
1150 spin_unlock_irqrestore(&zone->lock, flags);
1151}
e2c55dc8 1152#endif /* CONFIG_PM */
1da177e4 1153
1da177e4
LT
1154/*
1155 * Free a 0-order page
fc91668e 1156 * cold == 1 ? free a cold page : free a hot page
1da177e4 1157 */
fc91668e 1158void free_hot_cold_page(struct page *page, int cold)
1da177e4
LT
1159{
1160 struct zone *zone = page_zone(page);
1161 struct per_cpu_pages *pcp;
1162 unsigned long flags;
5f8dcc21 1163 int migratetype;
451ea25d 1164 int wasMlocked = __TestClearPageMlocked(page);
1da177e4 1165
ec95f53a 1166 if (!free_pages_prepare(page, 0))
689bcebf
HD
1167 return;
1168
5f8dcc21
MG
1169 migratetype = get_pageblock_migratetype(page);
1170 set_page_private(page, migratetype);
1da177e4 1171 local_irq_save(flags);
c277331d 1172 if (unlikely(wasMlocked))
da456f14 1173 free_page_mlock(page);
f8891e5e 1174 __count_vm_event(PGFREE);
da456f14 1175
5f8dcc21
MG
1176 /*
1177 * We only track unmovable, reclaimable and movable on pcp lists.
1178 * Free ISOLATE pages back to the allocator because they are being
1179 * offlined but treat RESERVE as movable pages so we can get those
1180 * areas back if necessary. Otherwise, we may have to free
1181 * excessively into the page allocator
1182 */
1183 if (migratetype >= MIGRATE_PCPTYPES) {
1184 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1185 free_one_page(zone, page, 0, migratetype);
1186 goto out;
1187 }
1188 migratetype = MIGRATE_MOVABLE;
1189 }
1190
99dcc3e5 1191 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3dfa5721 1192 if (cold)
5f8dcc21 1193 list_add_tail(&page->lru, &pcp->lists[migratetype]);
3dfa5721 1194 else
5f8dcc21 1195 list_add(&page->lru, &pcp->lists[migratetype]);
1da177e4 1196 pcp->count++;
48db57f8 1197 if (pcp->count >= pcp->high) {
5f8dcc21 1198 free_pcppages_bulk(zone, pcp->batch, pcp);
48db57f8
NP
1199 pcp->count -= pcp->batch;
1200 }
5f8dcc21
MG
1201
1202out:
1da177e4 1203 local_irq_restore(flags);
1da177e4
LT
1204}
1205
8dfcc9ba
NP
1206/*
1207 * split_page takes a non-compound higher-order page, and splits it into
1208 * n (1<<order) sub-pages: page[0..n]
1209 * Each sub-page must be freed individually.
1210 *
1211 * Note: this is probably too low level an operation for use in drivers.
1212 * Please consult with lkml before using this in your driver.
1213 */
1214void split_page(struct page *page, unsigned int order)
1215{
1216 int i;
1217
725d704e
NP
1218 VM_BUG_ON(PageCompound(page));
1219 VM_BUG_ON(!page_count(page));
b1eeab67
VN
1220
1221#ifdef CONFIG_KMEMCHECK
1222 /*
1223 * Split shadow pages too, because free(page[0]) would
1224 * otherwise free the whole shadow.
1225 */
1226 if (kmemcheck_page_is_tracked(page))
1227 split_page(virt_to_page(page[0].shadow), order);
1228#endif
1229
7835e98b
NP
1230 for (i = 1; i < (1 << order); i++)
1231 set_page_refcounted(page + i);
8dfcc9ba 1232}
8dfcc9ba 1233
748446bb
MG
1234/*
1235 * Similar to split_page except the page is already free. As this is only
1236 * being used for migration, the migratetype of the block also changes.
1237 * As this is called with interrupts disabled, the caller is responsible
1238 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1239 * are enabled.
1240 *
1241 * Note: this is probably too low level an operation for use in drivers.
1242 * Please consult with lkml before using this in your driver.
1243 */
1244int split_free_page(struct page *page)
1245{
1246 unsigned int order;
1247 unsigned long watermark;
1248 struct zone *zone;
1249
1250 BUG_ON(!PageBuddy(page));
1251
1252 zone = page_zone(page);
1253 order = page_order(page);
1254
1255 /* Obey watermarks as if the page was being allocated */
1256 watermark = low_wmark_pages(zone) + (1 << order);
1257 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1258 return 0;
1259
1260 /* Remove page from free list */
1261 list_del(&page->lru);
1262 zone->free_area[order].nr_free--;
1263 rmv_page_order(page);
1264 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1265
1266 /* Split into individual pages */
1267 set_page_refcounted(page);
1268 split_page(page, order);
1269
1270 if (order >= pageblock_order - 1) {
1271 struct page *endpage = page + (1 << order) - 1;
1272 for (; page < endpage; page += pageblock_nr_pages)
1273 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1274 }
1275
1276 return 1 << order;
1277}
1278
1da177e4
LT
1279/*
1280 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1281 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1282 * or two.
1283 */
0a15c3e9
MG
1284static inline
1285struct page *buffered_rmqueue(struct zone *preferred_zone,
3dd28266
MG
1286 struct zone *zone, int order, gfp_t gfp_flags,
1287 int migratetype)
1da177e4
LT
1288{
1289 unsigned long flags;
689bcebf 1290 struct page *page;
1da177e4
LT
1291 int cold = !!(gfp_flags & __GFP_COLD);
1292
689bcebf 1293again:
48db57f8 1294 if (likely(order == 0)) {
1da177e4 1295 struct per_cpu_pages *pcp;
5f8dcc21 1296 struct list_head *list;
1da177e4 1297
1da177e4 1298 local_irq_save(flags);
99dcc3e5
CL
1299 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1300 list = &pcp->lists[migratetype];
5f8dcc21 1301 if (list_empty(list)) {
535131e6 1302 pcp->count += rmqueue_bulk(zone, 0,
5f8dcc21 1303 pcp->batch, list,
e084b2d9 1304 migratetype, cold);
5f8dcc21 1305 if (unlikely(list_empty(list)))
6fb332fa 1306 goto failed;
535131e6 1307 }
b92a6edd 1308
5f8dcc21
MG
1309 if (cold)
1310 page = list_entry(list->prev, struct page, lru);
1311 else
1312 page = list_entry(list->next, struct page, lru);
1313
b92a6edd
MG
1314 list_del(&page->lru);
1315 pcp->count--;
7fb1d9fc 1316 } else {
dab48dab
AM
1317 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1318 /*
1319 * __GFP_NOFAIL is not to be used in new code.
1320 *
1321 * All __GFP_NOFAIL callers should be fixed so that they
1322 * properly detect and handle allocation failures.
1323 *
1324 * We most definitely don't want callers attempting to
4923abf9 1325 * allocate greater than order-1 page units with
dab48dab
AM
1326 * __GFP_NOFAIL.
1327 */
4923abf9 1328 WARN_ON_ONCE(order > 1);
dab48dab 1329 }
1da177e4 1330 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 1331 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
1332 spin_unlock(&zone->lock);
1333 if (!page)
1334 goto failed;
6ccf80eb 1335 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1da177e4
LT
1336 }
1337
f8891e5e 1338 __count_zone_vm_events(PGALLOC, zone, 1 << order);
18ea7e71 1339 zone_statistics(preferred_zone, zone);
a74609fa 1340 local_irq_restore(flags);
1da177e4 1341
725d704e 1342 VM_BUG_ON(bad_range(zone, page));
17cf4406 1343 if (prep_new_page(page, order, gfp_flags))
a74609fa 1344 goto again;
1da177e4 1345 return page;
a74609fa
NP
1346
1347failed:
1348 local_irq_restore(flags);
a74609fa 1349 return NULL;
1da177e4
LT
1350}
1351
41858966
MG
1352/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1353#define ALLOC_WMARK_MIN WMARK_MIN
1354#define ALLOC_WMARK_LOW WMARK_LOW
1355#define ALLOC_WMARK_HIGH WMARK_HIGH
1356#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1357
1358/* Mask to get the watermark bits */
1359#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1360
3148890b
NP
1361#define ALLOC_HARDER 0x10 /* try to alloc harder */
1362#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1363#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 1364
933e312e
AM
1365#ifdef CONFIG_FAIL_PAGE_ALLOC
1366
1367static struct fail_page_alloc_attr {
1368 struct fault_attr attr;
1369
1370 u32 ignore_gfp_highmem;
1371 u32 ignore_gfp_wait;
54114994 1372 u32 min_order;
933e312e
AM
1373
1374#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1375
1376 struct dentry *ignore_gfp_highmem_file;
1377 struct dentry *ignore_gfp_wait_file;
54114994 1378 struct dentry *min_order_file;
933e312e
AM
1379
1380#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1381
1382} fail_page_alloc = {
1383 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1384 .ignore_gfp_wait = 1,
1385 .ignore_gfp_highmem = 1,
54114994 1386 .min_order = 1,
933e312e
AM
1387};
1388
1389static int __init setup_fail_page_alloc(char *str)
1390{
1391 return setup_fault_attr(&fail_page_alloc.attr, str);
1392}
1393__setup("fail_page_alloc=", setup_fail_page_alloc);
1394
1395static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1396{
54114994
AM
1397 if (order < fail_page_alloc.min_order)
1398 return 0;
933e312e
AM
1399 if (gfp_mask & __GFP_NOFAIL)
1400 return 0;
1401 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1402 return 0;
1403 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1404 return 0;
1405
1406 return should_fail(&fail_page_alloc.attr, 1 << order);
1407}
1408
1409#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1410
1411static int __init fail_page_alloc_debugfs(void)
1412{
1413 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1414 struct dentry *dir;
1415 int err;
1416
1417 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1418 "fail_page_alloc");
1419 if (err)
1420 return err;
1421 dir = fail_page_alloc.attr.dentries.dir;
1422
1423 fail_page_alloc.ignore_gfp_wait_file =
1424 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1425 &fail_page_alloc.ignore_gfp_wait);
1426
1427 fail_page_alloc.ignore_gfp_highmem_file =
1428 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1429 &fail_page_alloc.ignore_gfp_highmem);
54114994
AM
1430 fail_page_alloc.min_order_file =
1431 debugfs_create_u32("min-order", mode, dir,
1432 &fail_page_alloc.min_order);
933e312e
AM
1433
1434 if (!fail_page_alloc.ignore_gfp_wait_file ||
54114994
AM
1435 !fail_page_alloc.ignore_gfp_highmem_file ||
1436 !fail_page_alloc.min_order_file) {
933e312e
AM
1437 err = -ENOMEM;
1438 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1439 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
54114994 1440 debugfs_remove(fail_page_alloc.min_order_file);
933e312e
AM
1441 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1442 }
1443
1444 return err;
1445}
1446
1447late_initcall(fail_page_alloc_debugfs);
1448
1449#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1450
1451#else /* CONFIG_FAIL_PAGE_ALLOC */
1452
1453static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1454{
1455 return 0;
1456}
1457
1458#endif /* CONFIG_FAIL_PAGE_ALLOC */
1459
1da177e4 1460/*
88f5acf8 1461 * Return true if free pages are above 'mark'. This takes into account the order
1da177e4
LT
1462 * of the allocation.
1463 */
88f5acf8
MG
1464static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1465 int classzone_idx, int alloc_flags, long free_pages)
1da177e4
LT
1466{
1467 /* free_pages my go negative - that's OK */
d23ad423 1468 long min = mark;
1da177e4
LT
1469 int o;
1470
88f5acf8 1471 free_pages -= (1 << order) + 1;
7fb1d9fc 1472 if (alloc_flags & ALLOC_HIGH)
1da177e4 1473 min -= min / 2;
7fb1d9fc 1474 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
1475 min -= min / 4;
1476
1477 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
88f5acf8 1478 return false;
1da177e4
LT
1479 for (o = 0; o < order; o++) {
1480 /* At the next order, this order's pages become unavailable */
1481 free_pages -= z->free_area[o].nr_free << o;
1482
1483 /* Require fewer higher order pages to be free */
1484 min >>= 1;
1485
1486 if (free_pages <= min)
88f5acf8 1487 return false;
1da177e4 1488 }
88f5acf8
MG
1489 return true;
1490}
1491
1492bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1493 int classzone_idx, int alloc_flags)
1494{
1495 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1496 zone_page_state(z, NR_FREE_PAGES));
1497}
1498
1499bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1500 int classzone_idx, int alloc_flags)
1501{
1502 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1503
1504 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1505 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1506
1507 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1508 free_pages);
1da177e4
LT
1509}
1510
9276b1bc
PJ
1511#ifdef CONFIG_NUMA
1512/*
1513 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1514 * skip over zones that are not allowed by the cpuset, or that have
1515 * been recently (in last second) found to be nearly full. See further
1516 * comments in mmzone.h. Reduces cache footprint of zonelist scans
183ff22b 1517 * that have to skip over a lot of full or unallowed zones.
9276b1bc
PJ
1518 *
1519 * If the zonelist cache is present in the passed in zonelist, then
1520 * returns a pointer to the allowed node mask (either the current
37b07e41 1521 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
9276b1bc
PJ
1522 *
1523 * If the zonelist cache is not available for this zonelist, does
1524 * nothing and returns NULL.
1525 *
1526 * If the fullzones BITMAP in the zonelist cache is stale (more than
1527 * a second since last zap'd) then we zap it out (clear its bits.)
1528 *
1529 * We hold off even calling zlc_setup, until after we've checked the
1530 * first zone in the zonelist, on the theory that most allocations will
1531 * be satisfied from that first zone, so best to examine that zone as
1532 * quickly as we can.
1533 */
1534static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1535{
1536 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1537 nodemask_t *allowednodes; /* zonelist_cache approximation */
1538
1539 zlc = zonelist->zlcache_ptr;
1540 if (!zlc)
1541 return NULL;
1542
f05111f5 1543 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
9276b1bc
PJ
1544 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1545 zlc->last_full_zap = jiffies;
1546 }
1547
1548 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1549 &cpuset_current_mems_allowed :
37b07e41 1550 &node_states[N_HIGH_MEMORY];
9276b1bc
PJ
1551 return allowednodes;
1552}
1553
1554/*
1555 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1556 * if it is worth looking at further for free memory:
1557 * 1) Check that the zone isn't thought to be full (doesn't have its
1558 * bit set in the zonelist_cache fullzones BITMAP).
1559 * 2) Check that the zones node (obtained from the zonelist_cache
1560 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1561 * Return true (non-zero) if zone is worth looking at further, or
1562 * else return false (zero) if it is not.
1563 *
1564 * This check -ignores- the distinction between various watermarks,
1565 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1566 * found to be full for any variation of these watermarks, it will
1567 * be considered full for up to one second by all requests, unless
1568 * we are so low on memory on all allowed nodes that we are forced
1569 * into the second scan of the zonelist.
1570 *
1571 * In the second scan we ignore this zonelist cache and exactly
1572 * apply the watermarks to all zones, even it is slower to do so.
1573 * We are low on memory in the second scan, and should leave no stone
1574 * unturned looking for a free page.
1575 */
dd1a239f 1576static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1577 nodemask_t *allowednodes)
1578{
1579 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1580 int i; /* index of *z in zonelist zones */
1581 int n; /* node that zone *z is on */
1582
1583 zlc = zonelist->zlcache_ptr;
1584 if (!zlc)
1585 return 1;
1586
dd1a239f 1587 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1588 n = zlc->z_to_n[i];
1589
1590 /* This zone is worth trying if it is allowed but not full */
1591 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1592}
1593
1594/*
1595 * Given 'z' scanning a zonelist, set the corresponding bit in
1596 * zlc->fullzones, so that subsequent attempts to allocate a page
1597 * from that zone don't waste time re-examining it.
1598 */
dd1a239f 1599static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1600{
1601 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1602 int i; /* index of *z in zonelist zones */
1603
1604 zlc = zonelist->zlcache_ptr;
1605 if (!zlc)
1606 return;
1607
dd1a239f 1608 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1609
1610 set_bit(i, zlc->fullzones);
1611}
1612
1613#else /* CONFIG_NUMA */
1614
1615static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1616{
1617 return NULL;
1618}
1619
dd1a239f 1620static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1621 nodemask_t *allowednodes)
1622{
1623 return 1;
1624}
1625
dd1a239f 1626static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1627{
1628}
1629#endif /* CONFIG_NUMA */
1630
7fb1d9fc 1631/*
0798e519 1632 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1633 * a page.
1634 */
1635static struct page *
19770b32 1636get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
5117f45d 1637 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
3dd28266 1638 struct zone *preferred_zone, int migratetype)
753ee728 1639{
dd1a239f 1640 struct zoneref *z;
7fb1d9fc 1641 struct page *page = NULL;
54a6eb5c 1642 int classzone_idx;
5117f45d 1643 struct zone *zone;
9276b1bc
PJ
1644 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1645 int zlc_active = 0; /* set if using zonelist_cache */
1646 int did_zlc_setup = 0; /* just call zlc_setup() one time */
54a6eb5c 1647
19770b32 1648 classzone_idx = zone_idx(preferred_zone);
9276b1bc 1649zonelist_scan:
7fb1d9fc 1650 /*
9276b1bc 1651 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1652 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1653 */
19770b32
MG
1654 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1655 high_zoneidx, nodemask) {
9276b1bc
PJ
1656 if (NUMA_BUILD && zlc_active &&
1657 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1658 continue;
7fb1d9fc 1659 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1660 !cpuset_zone_allowed_softwall(zone, gfp_mask))
9276b1bc 1661 goto try_next_zone;
7fb1d9fc 1662
41858966 1663 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
7fb1d9fc 1664 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b 1665 unsigned long mark;
fa5e084e
MG
1666 int ret;
1667
41858966 1668 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
fa5e084e
MG
1669 if (zone_watermark_ok(zone, order, mark,
1670 classzone_idx, alloc_flags))
1671 goto try_this_zone;
1672
1673 if (zone_reclaim_mode == 0)
1674 goto this_zone_full;
1675
1676 ret = zone_reclaim(zone, gfp_mask, order);
1677 switch (ret) {
1678 case ZONE_RECLAIM_NOSCAN:
1679 /* did not scan */
1680 goto try_next_zone;
1681 case ZONE_RECLAIM_FULL:
1682 /* scanned but unreclaimable */
1683 goto this_zone_full;
1684 default:
1685 /* did we reclaim enough */
1686 if (!zone_watermark_ok(zone, order, mark,
1687 classzone_idx, alloc_flags))
9276b1bc 1688 goto this_zone_full;
0798e519 1689 }
7fb1d9fc
RS
1690 }
1691
fa5e084e 1692try_this_zone:
3dd28266
MG
1693 page = buffered_rmqueue(preferred_zone, zone, order,
1694 gfp_mask, migratetype);
0798e519 1695 if (page)
7fb1d9fc 1696 break;
9276b1bc
PJ
1697this_zone_full:
1698 if (NUMA_BUILD)
1699 zlc_mark_zone_full(zonelist, z);
1700try_next_zone:
62bc62a8 1701 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
d395b734
MG
1702 /*
1703 * we do zlc_setup after the first zone is tried but only
1704 * if there are multiple nodes make it worthwhile
1705 */
9276b1bc
PJ
1706 allowednodes = zlc_setup(zonelist, alloc_flags);
1707 zlc_active = 1;
1708 did_zlc_setup = 1;
1709 }
54a6eb5c 1710 }
9276b1bc
PJ
1711
1712 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1713 /* Disable zlc cache for second zonelist scan */
1714 zlc_active = 0;
1715 goto zonelist_scan;
1716 }
7fb1d9fc 1717 return page;
753ee728
MH
1718}
1719
11e33f6a
MG
1720static inline int
1721should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1722 unsigned long pages_reclaimed)
1da177e4 1723{
11e33f6a
MG
1724 /* Do not loop if specifically requested */
1725 if (gfp_mask & __GFP_NORETRY)
1726 return 0;
1da177e4 1727
11e33f6a
MG
1728 /*
1729 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1730 * means __GFP_NOFAIL, but that may not be true in other
1731 * implementations.
1732 */
1733 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1734 return 1;
1735
1736 /*
1737 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1738 * specified, then we retry until we no longer reclaim any pages
1739 * (above), or we've reclaimed an order of pages at least as
1740 * large as the allocation's order. In both cases, if the
1741 * allocation still fails, we stop retrying.
1742 */
1743 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1744 return 1;
cf40bd16 1745
11e33f6a
MG
1746 /*
1747 * Don't let big-order allocations loop unless the caller
1748 * explicitly requests that.
1749 */
1750 if (gfp_mask & __GFP_NOFAIL)
1751 return 1;
1da177e4 1752
11e33f6a
MG
1753 return 0;
1754}
933e312e 1755
11e33f6a
MG
1756static inline struct page *
1757__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1758 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1759 nodemask_t *nodemask, struct zone *preferred_zone,
1760 int migratetype)
11e33f6a
MG
1761{
1762 struct page *page;
1763
1764 /* Acquire the OOM killer lock for the zones in zonelist */
ff321fea 1765 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
11e33f6a 1766 schedule_timeout_uninterruptible(1);
1da177e4
LT
1767 return NULL;
1768 }
6b1de916 1769
11e33f6a
MG
1770 /*
1771 * Go through the zonelist yet one more time, keep very high watermark
1772 * here, this is only to catch a parallel oom killing, we must fail if
1773 * we're still under heavy pressure.
1774 */
1775 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1776 order, zonelist, high_zoneidx,
5117f45d 1777 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
3dd28266 1778 preferred_zone, migratetype);
7fb1d9fc 1779 if (page)
11e33f6a
MG
1780 goto out;
1781
4365a567
KH
1782 if (!(gfp_mask & __GFP_NOFAIL)) {
1783 /* The OOM killer will not help higher order allocs */
1784 if (order > PAGE_ALLOC_COSTLY_ORDER)
1785 goto out;
03668b3c
DR
1786 /* The OOM killer does not needlessly kill tasks for lowmem */
1787 if (high_zoneidx < ZONE_NORMAL)
1788 goto out;
4365a567
KH
1789 /*
1790 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1791 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1792 * The caller should handle page allocation failure by itself if
1793 * it specifies __GFP_THISNODE.
1794 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1795 */
1796 if (gfp_mask & __GFP_THISNODE)
1797 goto out;
1798 }
11e33f6a 1799 /* Exhausted what can be done so it's blamo time */
4365a567 1800 out_of_memory(zonelist, gfp_mask, order, nodemask);
11e33f6a
MG
1801
1802out:
1803 clear_zonelist_oom(zonelist, gfp_mask);
1804 return page;
1805}
1806
56de7263
MG
1807#ifdef CONFIG_COMPACTION
1808/* Try memory compaction for high-order allocations before reclaim */
1809static struct page *
1810__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1811 struct zonelist *zonelist, enum zone_type high_zoneidx,
1812 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
77f1fe6b
MG
1813 int migratetype, unsigned long *did_some_progress,
1814 bool sync_migration)
56de7263
MG
1815{
1816 struct page *page;
3e7d3449 1817 struct task_struct *tsk = current;
56de7263 1818
4f92e258 1819 if (!order || compaction_deferred(preferred_zone))
56de7263
MG
1820 return NULL;
1821
3e7d3449 1822 tsk->flags |= PF_MEMALLOC;
56de7263 1823 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
77f1fe6b 1824 nodemask, sync_migration);
3e7d3449 1825 tsk->flags &= ~PF_MEMALLOC;
56de7263
MG
1826 if (*did_some_progress != COMPACT_SKIPPED) {
1827
1828 /* Page migration frees to the PCP lists but we want merging */
1829 drain_pages(get_cpu());
1830 put_cpu();
1831
1832 page = get_page_from_freelist(gfp_mask, nodemask,
1833 order, zonelist, high_zoneidx,
1834 alloc_flags, preferred_zone,
1835 migratetype);
1836 if (page) {
4f92e258
MG
1837 preferred_zone->compact_considered = 0;
1838 preferred_zone->compact_defer_shift = 0;
56de7263
MG
1839 count_vm_event(COMPACTSUCCESS);
1840 return page;
1841 }
1842
1843 /*
1844 * It's bad if compaction run occurs and fails.
1845 * The most likely reason is that pages exist,
1846 * but not enough to satisfy watermarks.
1847 */
1848 count_vm_event(COMPACTFAIL);
4f92e258 1849 defer_compaction(preferred_zone);
56de7263
MG
1850
1851 cond_resched();
1852 }
1853
1854 return NULL;
1855}
1856#else
1857static inline struct page *
1858__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1859 struct zonelist *zonelist, enum zone_type high_zoneidx,
1860 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
77f1fe6b
MG
1861 int migratetype, unsigned long *did_some_progress,
1862 bool sync_migration)
56de7263
MG
1863{
1864 return NULL;
1865}
1866#endif /* CONFIG_COMPACTION */
1867
11e33f6a
MG
1868/* The really slow allocator path where we enter direct reclaim */
1869static inline struct page *
1870__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1871 struct zonelist *zonelist, enum zone_type high_zoneidx,
5117f45d 1872 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
3dd28266 1873 int migratetype, unsigned long *did_some_progress)
11e33f6a
MG
1874{
1875 struct page *page = NULL;
1876 struct reclaim_state reclaim_state;
1877 struct task_struct *p = current;
9ee493ce 1878 bool drained = false;
11e33f6a
MG
1879
1880 cond_resched();
1881
1882 /* We now go into synchronous reclaim */
1883 cpuset_memory_pressure_bump();
11e33f6a
MG
1884 p->flags |= PF_MEMALLOC;
1885 lockdep_set_current_reclaim_state(gfp_mask);
1886 reclaim_state.reclaimed_slab = 0;
1887 p->reclaim_state = &reclaim_state;
1888
1889 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1890
1891 p->reclaim_state = NULL;
1892 lockdep_clear_current_reclaim_state();
1893 p->flags &= ~PF_MEMALLOC;
1894
1895 cond_resched();
1896
9ee493ce
MG
1897 if (unlikely(!(*did_some_progress)))
1898 return NULL;
11e33f6a 1899
9ee493ce
MG
1900retry:
1901 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 1902 zonelist, high_zoneidx,
3dd28266
MG
1903 alloc_flags, preferred_zone,
1904 migratetype);
9ee493ce
MG
1905
1906 /*
1907 * If an allocation failed after direct reclaim, it could be because
1908 * pages are pinned on the per-cpu lists. Drain them and try again
1909 */
1910 if (!page && !drained) {
1911 drain_all_pages();
1912 drained = true;
1913 goto retry;
1914 }
1915
11e33f6a
MG
1916 return page;
1917}
1918
1da177e4 1919/*
11e33f6a
MG
1920 * This is called in the allocator slow-path if the allocation request is of
1921 * sufficient urgency to ignore watermarks and take other desperate measures
1da177e4 1922 */
11e33f6a
MG
1923static inline struct page *
1924__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1925 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1926 nodemask_t *nodemask, struct zone *preferred_zone,
1927 int migratetype)
11e33f6a
MG
1928{
1929 struct page *page;
1930
1931 do {
1932 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 1933 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
3dd28266 1934 preferred_zone, migratetype);
11e33f6a
MG
1935
1936 if (!page && gfp_mask & __GFP_NOFAIL)
0e093d99 1937 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
11e33f6a
MG
1938 } while (!page && (gfp_mask & __GFP_NOFAIL));
1939
1940 return page;
1941}
1942
1943static inline
1944void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
99504748
MG
1945 enum zone_type high_zoneidx,
1946 enum zone_type classzone_idx)
1da177e4 1947{
dd1a239f
MG
1948 struct zoneref *z;
1949 struct zone *zone;
1da177e4 1950
11e33f6a 1951 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
99504748 1952 wakeup_kswapd(zone, order, classzone_idx);
11e33f6a 1953}
cf40bd16 1954
341ce06f
PZ
1955static inline int
1956gfp_to_alloc_flags(gfp_t gfp_mask)
1957{
1958 struct task_struct *p = current;
1959 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1960 const gfp_t wait = gfp_mask & __GFP_WAIT;
1da177e4 1961
a56f57ff 1962 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
e6223a3b 1963 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
933e312e 1964
341ce06f
PZ
1965 /*
1966 * The caller may dip into page reserves a bit more if the caller
1967 * cannot run direct reclaim, or if the caller has realtime scheduling
1968 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1969 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1970 */
e6223a3b 1971 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1da177e4 1972
341ce06f
PZ
1973 if (!wait) {
1974 alloc_flags |= ALLOC_HARDER;
523b9458 1975 /*
341ce06f
PZ
1976 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1977 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
523b9458 1978 */
341ce06f 1979 alloc_flags &= ~ALLOC_CPUSET;
9d0ed60f 1980 } else if (unlikely(rt_task(p)) && !in_interrupt())
341ce06f
PZ
1981 alloc_flags |= ALLOC_HARDER;
1982
1983 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1984 if (!in_interrupt() &&
1985 ((p->flags & PF_MEMALLOC) ||
1986 unlikely(test_thread_flag(TIF_MEMDIE))))
1987 alloc_flags |= ALLOC_NO_WATERMARKS;
1da177e4 1988 }
6b1de916 1989
341ce06f
PZ
1990 return alloc_flags;
1991}
1992
11e33f6a
MG
1993static inline struct page *
1994__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1995 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1996 nodemask_t *nodemask, struct zone *preferred_zone,
1997 int migratetype)
11e33f6a
MG
1998{
1999 const gfp_t wait = gfp_mask & __GFP_WAIT;
2000 struct page *page = NULL;
2001 int alloc_flags;
2002 unsigned long pages_reclaimed = 0;
2003 unsigned long did_some_progress;
2004 struct task_struct *p = current;
77f1fe6b 2005 bool sync_migration = false;
1da177e4 2006
72807a74
MG
2007 /*
2008 * In the slowpath, we sanity check order to avoid ever trying to
2009 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2010 * be using allocators in order of preference for an area that is
2011 * too large.
2012 */
1fc28b70
MG
2013 if (order >= MAX_ORDER) {
2014 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 2015 return NULL;
1fc28b70 2016 }
1da177e4 2017
952f3b51
CL
2018 /*
2019 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2020 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2021 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2022 * using a larger set of nodes after it has established that the
2023 * allowed per node queues are empty and that nodes are
2024 * over allocated.
2025 */
2026 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2027 goto nopage;
2028
cc4a6851 2029restart:
32dba98e
AA
2030 if (!(gfp_mask & __GFP_NO_KSWAPD))
2031 wake_all_kswapd(order, zonelist, high_zoneidx,
99504748 2032 zone_idx(preferred_zone));
1da177e4 2033
9bf2229f 2034 /*
7fb1d9fc
RS
2035 * OK, we're below the kswapd watermark and have kicked background
2036 * reclaim. Now things get more complex, so set up alloc_flags according
2037 * to how we want to proceed.
9bf2229f 2038 */
341ce06f 2039 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1da177e4 2040
341ce06f 2041 /* This is the last chance, in general, before the goto nopage. */
19770b32 2042 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
341ce06f
PZ
2043 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2044 preferred_zone, migratetype);
7fb1d9fc
RS
2045 if (page)
2046 goto got_pg;
1da177e4 2047
b43a57bb 2048rebalance:
11e33f6a 2049 /* Allocate without watermarks if the context allows */
341ce06f
PZ
2050 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2051 page = __alloc_pages_high_priority(gfp_mask, order,
2052 zonelist, high_zoneidx, nodemask,
2053 preferred_zone, migratetype);
2054 if (page)
2055 goto got_pg;
1da177e4
LT
2056 }
2057
2058 /* Atomic allocations - we can't balance anything */
2059 if (!wait)
2060 goto nopage;
2061
341ce06f
PZ
2062 /* Avoid recursion of direct reclaim */
2063 if (p->flags & PF_MEMALLOC)
2064 goto nopage;
2065
6583bb64
DR
2066 /* Avoid allocations with no watermarks from looping endlessly */
2067 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2068 goto nopage;
2069
77f1fe6b
MG
2070 /*
2071 * Try direct compaction. The first pass is asynchronous. Subsequent
2072 * attempts after direct reclaim are synchronous
2073 */
56de7263
MG
2074 page = __alloc_pages_direct_compact(gfp_mask, order,
2075 zonelist, high_zoneidx,
2076 nodemask,
2077 alloc_flags, preferred_zone,
77f1fe6b
MG
2078 migratetype, &did_some_progress,
2079 sync_migration);
56de7263
MG
2080 if (page)
2081 goto got_pg;
77f1fe6b 2082 sync_migration = true;
56de7263 2083
11e33f6a
MG
2084 /* Try direct reclaim and then allocating */
2085 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2086 zonelist, high_zoneidx,
2087 nodemask,
5117f45d 2088 alloc_flags, preferred_zone,
3dd28266 2089 migratetype, &did_some_progress);
11e33f6a
MG
2090 if (page)
2091 goto got_pg;
1da177e4 2092
e33c3b5e 2093 /*
11e33f6a
MG
2094 * If we failed to make any progress reclaiming, then we are
2095 * running out of options and have to consider going OOM
e33c3b5e 2096 */
11e33f6a
MG
2097 if (!did_some_progress) {
2098 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
7f33d49a
RW
2099 if (oom_killer_disabled)
2100 goto nopage;
11e33f6a
MG
2101 page = __alloc_pages_may_oom(gfp_mask, order,
2102 zonelist, high_zoneidx,
3dd28266
MG
2103 nodemask, preferred_zone,
2104 migratetype);
11e33f6a
MG
2105 if (page)
2106 goto got_pg;
1da177e4 2107
03668b3c
DR
2108 if (!(gfp_mask & __GFP_NOFAIL)) {
2109 /*
2110 * The oom killer is not called for high-order
2111 * allocations that may fail, so if no progress
2112 * is being made, there are no other options and
2113 * retrying is unlikely to help.
2114 */
2115 if (order > PAGE_ALLOC_COSTLY_ORDER)
2116 goto nopage;
2117 /*
2118 * The oom killer is not called for lowmem
2119 * allocations to prevent needlessly killing
2120 * innocent tasks.
2121 */
2122 if (high_zoneidx < ZONE_NORMAL)
2123 goto nopage;
2124 }
e2c55dc8 2125
ff0ceb9d
DR
2126 goto restart;
2127 }
1da177e4
LT
2128 }
2129
11e33f6a 2130 /* Check if we should retry the allocation */
a41f24ea 2131 pages_reclaimed += did_some_progress;
11e33f6a
MG
2132 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2133 /* Wait for some write requests to complete then retry */
0e093d99 2134 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
1da177e4 2135 goto rebalance;
3e7d3449
MG
2136 } else {
2137 /*
2138 * High-order allocations do not necessarily loop after
2139 * direct reclaim and reclaim/compaction depends on compaction
2140 * being called after reclaim so call directly if necessary
2141 */
2142 page = __alloc_pages_direct_compact(gfp_mask, order,
2143 zonelist, high_zoneidx,
2144 nodemask,
2145 alloc_flags, preferred_zone,
77f1fe6b
MG
2146 migratetype, &did_some_progress,
2147 sync_migration);
3e7d3449
MG
2148 if (page)
2149 goto got_pg;
1da177e4
LT
2150 }
2151
2152nopage:
2153 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2154 printk(KERN_WARNING "%s: page allocation failure."
2155 " order:%d, mode:0x%x\n",
2156 p->comm, order, gfp_mask);
2157 dump_stack();
578c2fd6 2158 show_mem();
1da177e4 2159 }
b1eeab67 2160 return page;
1da177e4 2161got_pg:
b1eeab67
VN
2162 if (kmemcheck_enabled)
2163 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1da177e4 2164 return page;
11e33f6a 2165
1da177e4 2166}
11e33f6a
MG
2167
2168/*
2169 * This is the 'heart' of the zoned buddy allocator.
2170 */
2171struct page *
2172__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2173 struct zonelist *zonelist, nodemask_t *nodemask)
2174{
2175 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5117f45d 2176 struct zone *preferred_zone;
11e33f6a 2177 struct page *page;
3dd28266 2178 int migratetype = allocflags_to_migratetype(gfp_mask);
11e33f6a 2179
dcce284a
BH
2180 gfp_mask &= gfp_allowed_mask;
2181
11e33f6a
MG
2182 lockdep_trace_alloc(gfp_mask);
2183
2184 might_sleep_if(gfp_mask & __GFP_WAIT);
2185
2186 if (should_fail_alloc_page(gfp_mask, order))
2187 return NULL;
2188
2189 /*
2190 * Check the zones suitable for the gfp_mask contain at least one
2191 * valid zone. It's possible to have an empty zonelist as a result
2192 * of GFP_THISNODE and a memoryless node
2193 */
2194 if (unlikely(!zonelist->_zonerefs->zone))
2195 return NULL;
2196
c0ff7453 2197 get_mems_allowed();
5117f45d
MG
2198 /* The preferred zone is used for statistics later */
2199 first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
c0ff7453
MX
2200 if (!preferred_zone) {
2201 put_mems_allowed();
5117f45d 2202 return NULL;
c0ff7453 2203 }
5117f45d
MG
2204
2205 /* First allocation attempt */
11e33f6a 2206 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
5117f45d 2207 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
3dd28266 2208 preferred_zone, migratetype);
11e33f6a
MG
2209 if (unlikely(!page))
2210 page = __alloc_pages_slowpath(gfp_mask, order,
5117f45d 2211 zonelist, high_zoneidx, nodemask,
3dd28266 2212 preferred_zone, migratetype);
c0ff7453 2213 put_mems_allowed();
11e33f6a 2214
4b4f278c 2215 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
11e33f6a 2216 return page;
1da177e4 2217}
d239171e 2218EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
2219
2220/*
2221 * Common helper functions.
2222 */
920c7a5d 2223unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 2224{
945a1113
AM
2225 struct page *page;
2226
2227 /*
2228 * __get_free_pages() returns a 32-bit address, which cannot represent
2229 * a highmem page
2230 */
2231 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2232
1da177e4
LT
2233 page = alloc_pages(gfp_mask, order);
2234 if (!page)
2235 return 0;
2236 return (unsigned long) page_address(page);
2237}
1da177e4
LT
2238EXPORT_SYMBOL(__get_free_pages);
2239
920c7a5d 2240unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 2241{
945a1113 2242 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 2243}
1da177e4
LT
2244EXPORT_SYMBOL(get_zeroed_page);
2245
2246void __pagevec_free(struct pagevec *pvec)
2247{
2248 int i = pagevec_count(pvec);
2249
4b4f278c
MG
2250 while (--i >= 0) {
2251 trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
1da177e4 2252 free_hot_cold_page(pvec->pages[i], pvec->cold);
4b4f278c 2253 }
1da177e4
LT
2254}
2255
920c7a5d 2256void __free_pages(struct page *page, unsigned int order)
1da177e4 2257{
b5810039 2258 if (put_page_testzero(page)) {
1da177e4 2259 if (order == 0)
fc91668e 2260 free_hot_cold_page(page, 0);
1da177e4
LT
2261 else
2262 __free_pages_ok(page, order);
2263 }
2264}
2265
2266EXPORT_SYMBOL(__free_pages);
2267
920c7a5d 2268void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
2269{
2270 if (addr != 0) {
725d704e 2271 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
2272 __free_pages(virt_to_page((void *)addr), order);
2273 }
2274}
2275
2276EXPORT_SYMBOL(free_pages);
2277
2be0ffe2
TT
2278/**
2279 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2280 * @size: the number of bytes to allocate
2281 * @gfp_mask: GFP flags for the allocation
2282 *
2283 * This function is similar to alloc_pages(), except that it allocates the
2284 * minimum number of pages to satisfy the request. alloc_pages() can only
2285 * allocate memory in power-of-two pages.
2286 *
2287 * This function is also limited by MAX_ORDER.
2288 *
2289 * Memory allocated by this function must be released by free_pages_exact().
2290 */
2291void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2292{
2293 unsigned int order = get_order(size);
2294 unsigned long addr;
2295
2296 addr = __get_free_pages(gfp_mask, order);
2297 if (addr) {
2298 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2299 unsigned long used = addr + PAGE_ALIGN(size);
2300
5bfd7560 2301 split_page(virt_to_page((void *)addr), order);
2be0ffe2
TT
2302 while (used < alloc_end) {
2303 free_page(used);
2304 used += PAGE_SIZE;
2305 }
2306 }
2307
2308 return (void *)addr;
2309}
2310EXPORT_SYMBOL(alloc_pages_exact);
2311
2312/**
2313 * free_pages_exact - release memory allocated via alloc_pages_exact()
2314 * @virt: the value returned by alloc_pages_exact.
2315 * @size: size of allocation, same value as passed to alloc_pages_exact().
2316 *
2317 * Release the memory allocated by a previous call to alloc_pages_exact.
2318 */
2319void free_pages_exact(void *virt, size_t size)
2320{
2321 unsigned long addr = (unsigned long)virt;
2322 unsigned long end = addr + PAGE_ALIGN(size);
2323
2324 while (addr < end) {
2325 free_page(addr);
2326 addr += PAGE_SIZE;
2327 }
2328}
2329EXPORT_SYMBOL(free_pages_exact);
2330
1da177e4
LT
2331static unsigned int nr_free_zone_pages(int offset)
2332{
dd1a239f 2333 struct zoneref *z;
54a6eb5c
MG
2334 struct zone *zone;
2335
e310fd43 2336 /* Just pick one node, since fallback list is circular */
1da177e4
LT
2337 unsigned int sum = 0;
2338
0e88460d 2339 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 2340
54a6eb5c 2341 for_each_zone_zonelist(zone, z, zonelist, offset) {
e310fd43 2342 unsigned long size = zone->present_pages;
41858966 2343 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
2344 if (size > high)
2345 sum += size - high;
1da177e4
LT
2346 }
2347
2348 return sum;
2349}
2350
2351/*
2352 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2353 */
2354unsigned int nr_free_buffer_pages(void)
2355{
af4ca457 2356 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 2357}
c2f1a551 2358EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4
LT
2359
2360/*
2361 * Amount of free RAM allocatable within all zones
2362 */
2363unsigned int nr_free_pagecache_pages(void)
2364{
2a1e274a 2365 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 2366}
08e0f6a9
CL
2367
2368static inline void show_node(struct zone *zone)
1da177e4 2369{
08e0f6a9 2370 if (NUMA_BUILD)
25ba77c1 2371 printk("Node %d ", zone_to_nid(zone));
1da177e4 2372}
1da177e4 2373
1da177e4
LT
2374void si_meminfo(struct sysinfo *val)
2375{
2376 val->totalram = totalram_pages;
2377 val->sharedram = 0;
d23ad423 2378 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 2379 val->bufferram = nr_blockdev_pages();
1da177e4
LT
2380 val->totalhigh = totalhigh_pages;
2381 val->freehigh = nr_free_highpages();
1da177e4
LT
2382 val->mem_unit = PAGE_SIZE;
2383}
2384
2385EXPORT_SYMBOL(si_meminfo);
2386
2387#ifdef CONFIG_NUMA
2388void si_meminfo_node(struct sysinfo *val, int nid)
2389{
2390 pg_data_t *pgdat = NODE_DATA(nid);
2391
2392 val->totalram = pgdat->node_present_pages;
d23ad423 2393 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 2394#ifdef CONFIG_HIGHMEM
1da177e4 2395 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
d23ad423
CL
2396 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2397 NR_FREE_PAGES);
98d2b0eb
CL
2398#else
2399 val->totalhigh = 0;
2400 val->freehigh = 0;
2401#endif
1da177e4
LT
2402 val->mem_unit = PAGE_SIZE;
2403}
2404#endif
2405
2406#define K(x) ((x) << (PAGE_SHIFT-10))
2407
2408/*
2409 * Show free area list (used inside shift_scroll-lock stuff)
2410 * We also calculate the percentage fragmentation. We do this by counting the
2411 * memory on each free list with the exception of the first item on the list.
2412 */
2413void show_free_areas(void)
2414{
c7241913 2415 int cpu;
1da177e4
LT
2416 struct zone *zone;
2417
ee99c71c 2418 for_each_populated_zone(zone) {
c7241913
JS
2419 show_node(zone);
2420 printk("%s per-cpu:\n", zone->name);
1da177e4 2421
6b482c67 2422 for_each_online_cpu(cpu) {
1da177e4
LT
2423 struct per_cpu_pageset *pageset;
2424
99dcc3e5 2425 pageset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 2426
3dfa5721
CL
2427 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2428 cpu, pageset->pcp.high,
2429 pageset->pcp.batch, pageset->pcp.count);
1da177e4
LT
2430 }
2431 }
2432
a731286d
KM
2433 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2434 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
7b854121 2435 " unevictable:%lu"
b76146ed 2436 " dirty:%lu writeback:%lu unstable:%lu\n"
3701b033 2437 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4b02108a 2438 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
4f98a2fe 2439 global_page_state(NR_ACTIVE_ANON),
4f98a2fe 2440 global_page_state(NR_INACTIVE_ANON),
a731286d
KM
2441 global_page_state(NR_ISOLATED_ANON),
2442 global_page_state(NR_ACTIVE_FILE),
4f98a2fe 2443 global_page_state(NR_INACTIVE_FILE),
a731286d 2444 global_page_state(NR_ISOLATED_FILE),
7b854121 2445 global_page_state(NR_UNEVICTABLE),
b1e7a8fd 2446 global_page_state(NR_FILE_DIRTY),
ce866b34 2447 global_page_state(NR_WRITEBACK),
fd39fc85 2448 global_page_state(NR_UNSTABLE_NFS),
d23ad423 2449 global_page_state(NR_FREE_PAGES),
3701b033
KM
2450 global_page_state(NR_SLAB_RECLAIMABLE),
2451 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 2452 global_page_state(NR_FILE_MAPPED),
4b02108a 2453 global_page_state(NR_SHMEM),
a25700a5
AM
2454 global_page_state(NR_PAGETABLE),
2455 global_page_state(NR_BOUNCE));
1da177e4 2456
ee99c71c 2457 for_each_populated_zone(zone) {
1da177e4
LT
2458 int i;
2459
2460 show_node(zone);
2461 printk("%s"
2462 " free:%lukB"
2463 " min:%lukB"
2464 " low:%lukB"
2465 " high:%lukB"
4f98a2fe
RR
2466 " active_anon:%lukB"
2467 " inactive_anon:%lukB"
2468 " active_file:%lukB"
2469 " inactive_file:%lukB"
7b854121 2470 " unevictable:%lukB"
a731286d
KM
2471 " isolated(anon):%lukB"
2472 " isolated(file):%lukB"
1da177e4 2473 " present:%lukB"
4a0aa73f
KM
2474 " mlocked:%lukB"
2475 " dirty:%lukB"
2476 " writeback:%lukB"
2477 " mapped:%lukB"
4b02108a 2478 " shmem:%lukB"
4a0aa73f
KM
2479 " slab_reclaimable:%lukB"
2480 " slab_unreclaimable:%lukB"
c6a7f572 2481 " kernel_stack:%lukB"
4a0aa73f
KM
2482 " pagetables:%lukB"
2483 " unstable:%lukB"
2484 " bounce:%lukB"
2485 " writeback_tmp:%lukB"
1da177e4
LT
2486 " pages_scanned:%lu"
2487 " all_unreclaimable? %s"
2488 "\n",
2489 zone->name,
88f5acf8 2490 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
2491 K(min_wmark_pages(zone)),
2492 K(low_wmark_pages(zone)),
2493 K(high_wmark_pages(zone)),
4f98a2fe
RR
2494 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2495 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2496 K(zone_page_state(zone, NR_ACTIVE_FILE)),
2497 K(zone_page_state(zone, NR_INACTIVE_FILE)),
7b854121 2498 K(zone_page_state(zone, NR_UNEVICTABLE)),
a731286d
KM
2499 K(zone_page_state(zone, NR_ISOLATED_ANON)),
2500 K(zone_page_state(zone, NR_ISOLATED_FILE)),
1da177e4 2501 K(zone->present_pages),
4a0aa73f
KM
2502 K(zone_page_state(zone, NR_MLOCK)),
2503 K(zone_page_state(zone, NR_FILE_DIRTY)),
2504 K(zone_page_state(zone, NR_WRITEBACK)),
2505 K(zone_page_state(zone, NR_FILE_MAPPED)),
4b02108a 2506 K(zone_page_state(zone, NR_SHMEM)),
4a0aa73f
KM
2507 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2508 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
c6a7f572
KM
2509 zone_page_state(zone, NR_KERNEL_STACK) *
2510 THREAD_SIZE / 1024,
4a0aa73f
KM
2511 K(zone_page_state(zone, NR_PAGETABLE)),
2512 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2513 K(zone_page_state(zone, NR_BOUNCE)),
2514 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
1da177e4 2515 zone->pages_scanned,
93e4a89a 2516 (zone->all_unreclaimable ? "yes" : "no")
1da177e4
LT
2517 );
2518 printk("lowmem_reserve[]:");
2519 for (i = 0; i < MAX_NR_ZONES; i++)
2520 printk(" %lu", zone->lowmem_reserve[i]);
2521 printk("\n");
2522 }
2523
ee99c71c 2524 for_each_populated_zone(zone) {
8f9de51a 2525 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4
LT
2526
2527 show_node(zone);
2528 printk("%s: ", zone->name);
1da177e4
LT
2529
2530 spin_lock_irqsave(&zone->lock, flags);
2531 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
2532 nr[order] = zone->free_area[order].nr_free;
2533 total += nr[order] << order;
1da177e4
LT
2534 }
2535 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
2536 for (order = 0; order < MAX_ORDER; order++)
2537 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
2538 printk("= %lukB\n", K(total));
2539 }
2540
e6f3602d
LW
2541 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2542
1da177e4
LT
2543 show_swap_cache_info();
2544}
2545
19770b32
MG
2546static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2547{
2548 zoneref->zone = zone;
2549 zoneref->zone_idx = zone_idx(zone);
2550}
2551
1da177e4
LT
2552/*
2553 * Builds allocation fallback zone lists.
1a93205b
CL
2554 *
2555 * Add all populated zones of a node to the zonelist.
1da177e4 2556 */
f0c0b2b8
KH
2557static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2558 int nr_zones, enum zone_type zone_type)
1da177e4 2559{
1a93205b
CL
2560 struct zone *zone;
2561
98d2b0eb 2562 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 2563 zone_type++;
02a68a5e
CL
2564
2565 do {
2f6726e5 2566 zone_type--;
070f8032 2567 zone = pgdat->node_zones + zone_type;
1a93205b 2568 if (populated_zone(zone)) {
dd1a239f
MG
2569 zoneref_set_zone(zone,
2570 &zonelist->_zonerefs[nr_zones++]);
070f8032 2571 check_highest_zone(zone_type);
1da177e4 2572 }
02a68a5e 2573
2f6726e5 2574 } while (zone_type);
070f8032 2575 return nr_zones;
1da177e4
LT
2576}
2577
f0c0b2b8
KH
2578
2579/*
2580 * zonelist_order:
2581 * 0 = automatic detection of better ordering.
2582 * 1 = order by ([node] distance, -zonetype)
2583 * 2 = order by (-zonetype, [node] distance)
2584 *
2585 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2586 * the same zonelist. So only NUMA can configure this param.
2587 */
2588#define ZONELIST_ORDER_DEFAULT 0
2589#define ZONELIST_ORDER_NODE 1
2590#define ZONELIST_ORDER_ZONE 2
2591
2592/* zonelist order in the kernel.
2593 * set_zonelist_order() will set this to NODE or ZONE.
2594 */
2595static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2596static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2597
2598
1da177e4 2599#ifdef CONFIG_NUMA
f0c0b2b8
KH
2600/* The value user specified ....changed by config */
2601static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2602/* string for sysctl */
2603#define NUMA_ZONELIST_ORDER_LEN 16
2604char numa_zonelist_order[16] = "default";
2605
2606/*
2607 * interface for configure zonelist ordering.
2608 * command line option "numa_zonelist_order"
2609 * = "[dD]efault - default, automatic configuration.
2610 * = "[nN]ode - order by node locality, then by zone within node
2611 * = "[zZ]one - order by zone, then by locality within zone
2612 */
2613
2614static int __parse_numa_zonelist_order(char *s)
2615{
2616 if (*s == 'd' || *s == 'D') {
2617 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2618 } else if (*s == 'n' || *s == 'N') {
2619 user_zonelist_order = ZONELIST_ORDER_NODE;
2620 } else if (*s == 'z' || *s == 'Z') {
2621 user_zonelist_order = ZONELIST_ORDER_ZONE;
2622 } else {
2623 printk(KERN_WARNING
2624 "Ignoring invalid numa_zonelist_order value: "
2625 "%s\n", s);
2626 return -EINVAL;
2627 }
2628 return 0;
2629}
2630
2631static __init int setup_numa_zonelist_order(char *s)
2632{
ecb256f8
VL
2633 int ret;
2634
2635 if (!s)
2636 return 0;
2637
2638 ret = __parse_numa_zonelist_order(s);
2639 if (ret == 0)
2640 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2641
2642 return ret;
f0c0b2b8
KH
2643}
2644early_param("numa_zonelist_order", setup_numa_zonelist_order);
2645
2646/*
2647 * sysctl handler for numa_zonelist_order
2648 */
2649int numa_zonelist_order_handler(ctl_table *table, int write,
8d65af78 2650 void __user *buffer, size_t *length,
f0c0b2b8
KH
2651 loff_t *ppos)
2652{
2653 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2654 int ret;
443c6f14 2655 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 2656
443c6f14 2657 mutex_lock(&zl_order_mutex);
f0c0b2b8 2658 if (write)
443c6f14 2659 strcpy(saved_string, (char*)table->data);
8d65af78 2660 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 2661 if (ret)
443c6f14 2662 goto out;
f0c0b2b8
KH
2663 if (write) {
2664 int oldval = user_zonelist_order;
2665 if (__parse_numa_zonelist_order((char*)table->data)) {
2666 /*
2667 * bogus value. restore saved string
2668 */
2669 strncpy((char*)table->data, saved_string,
2670 NUMA_ZONELIST_ORDER_LEN);
2671 user_zonelist_order = oldval;
4eaf3f64
HL
2672 } else if (oldval != user_zonelist_order) {
2673 mutex_lock(&zonelists_mutex);
1f522509 2674 build_all_zonelists(NULL);
4eaf3f64
HL
2675 mutex_unlock(&zonelists_mutex);
2676 }
f0c0b2b8 2677 }
443c6f14
AK
2678out:
2679 mutex_unlock(&zl_order_mutex);
2680 return ret;
f0c0b2b8
KH
2681}
2682
2683
62bc62a8 2684#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
2685static int node_load[MAX_NUMNODES];
2686
1da177e4 2687/**
4dc3b16b 2688 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
2689 * @node: node whose fallback list we're appending
2690 * @used_node_mask: nodemask_t of already used nodes
2691 *
2692 * We use a number of factors to determine which is the next node that should
2693 * appear on a given node's fallback list. The node should not have appeared
2694 * already in @node's fallback list, and it should be the next closest node
2695 * according to the distance array (which contains arbitrary distance values
2696 * from each node to each node in the system), and should also prefer nodes
2697 * with no CPUs, since presumably they'll have very little allocation pressure
2698 * on them otherwise.
2699 * It returns -1 if no node is found.
2700 */
f0c0b2b8 2701static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 2702{
4cf808eb 2703 int n, val;
1da177e4
LT
2704 int min_val = INT_MAX;
2705 int best_node = -1;
a70f7302 2706 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 2707
4cf808eb
LT
2708 /* Use the local node if we haven't already */
2709 if (!node_isset(node, *used_node_mask)) {
2710 node_set(node, *used_node_mask);
2711 return node;
2712 }
1da177e4 2713
37b07e41 2714 for_each_node_state(n, N_HIGH_MEMORY) {
1da177e4
LT
2715
2716 /* Don't want a node to appear more than once */
2717 if (node_isset(n, *used_node_mask))
2718 continue;
2719
1da177e4
LT
2720 /* Use the distance array to find the distance */
2721 val = node_distance(node, n);
2722
4cf808eb
LT
2723 /* Penalize nodes under us ("prefer the next node") */
2724 val += (n < node);
2725
1da177e4 2726 /* Give preference to headless and unused nodes */
a70f7302
RR
2727 tmp = cpumask_of_node(n);
2728 if (!cpumask_empty(tmp))
1da177e4
LT
2729 val += PENALTY_FOR_NODE_WITH_CPUS;
2730
2731 /* Slight preference for less loaded node */
2732 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2733 val += node_load[n];
2734
2735 if (val < min_val) {
2736 min_val = val;
2737 best_node = n;
2738 }
2739 }
2740
2741 if (best_node >= 0)
2742 node_set(best_node, *used_node_mask);
2743
2744 return best_node;
2745}
2746
f0c0b2b8
KH
2747
2748/*
2749 * Build zonelists ordered by node and zones within node.
2750 * This results in maximum locality--normal zone overflows into local
2751 * DMA zone, if any--but risks exhausting DMA zone.
2752 */
2753static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 2754{
f0c0b2b8 2755 int j;
1da177e4 2756 struct zonelist *zonelist;
f0c0b2b8 2757
54a6eb5c 2758 zonelist = &pgdat->node_zonelists[0];
dd1a239f 2759 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c
MG
2760 ;
2761 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2762 MAX_NR_ZONES - 1);
dd1a239f
MG
2763 zonelist->_zonerefs[j].zone = NULL;
2764 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
2765}
2766
523b9458
CL
2767/*
2768 * Build gfp_thisnode zonelists
2769 */
2770static void build_thisnode_zonelists(pg_data_t *pgdat)
2771{
523b9458
CL
2772 int j;
2773 struct zonelist *zonelist;
2774
54a6eb5c
MG
2775 zonelist = &pgdat->node_zonelists[1];
2776 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
dd1a239f
MG
2777 zonelist->_zonerefs[j].zone = NULL;
2778 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
2779}
2780
f0c0b2b8
KH
2781/*
2782 * Build zonelists ordered by zone and nodes within zones.
2783 * This results in conserving DMA zone[s] until all Normal memory is
2784 * exhausted, but results in overflowing to remote node while memory
2785 * may still exist in local DMA zone.
2786 */
2787static int node_order[MAX_NUMNODES];
2788
2789static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2790{
f0c0b2b8
KH
2791 int pos, j, node;
2792 int zone_type; /* needs to be signed */
2793 struct zone *z;
2794 struct zonelist *zonelist;
2795
54a6eb5c
MG
2796 zonelist = &pgdat->node_zonelists[0];
2797 pos = 0;
2798 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2799 for (j = 0; j < nr_nodes; j++) {
2800 node = node_order[j];
2801 z = &NODE_DATA(node)->node_zones[zone_type];
2802 if (populated_zone(z)) {
dd1a239f
MG
2803 zoneref_set_zone(z,
2804 &zonelist->_zonerefs[pos++]);
54a6eb5c 2805 check_highest_zone(zone_type);
f0c0b2b8
KH
2806 }
2807 }
f0c0b2b8 2808 }
dd1a239f
MG
2809 zonelist->_zonerefs[pos].zone = NULL;
2810 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
2811}
2812
2813static int default_zonelist_order(void)
2814{
2815 int nid, zone_type;
2816 unsigned long low_kmem_size,total_size;
2817 struct zone *z;
2818 int average_size;
2819 /*
88393161 2820 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
f0c0b2b8
KH
2821 * If they are really small and used heavily, the system can fall
2822 * into OOM very easily.
e325c90f 2823 * This function detect ZONE_DMA/DMA32 size and configures zone order.
f0c0b2b8
KH
2824 */
2825 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2826 low_kmem_size = 0;
2827 total_size = 0;
2828 for_each_online_node(nid) {
2829 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2830 z = &NODE_DATA(nid)->node_zones[zone_type];
2831 if (populated_zone(z)) {
2832 if (zone_type < ZONE_NORMAL)
2833 low_kmem_size += z->present_pages;
2834 total_size += z->present_pages;
e325c90f
DR
2835 } else if (zone_type == ZONE_NORMAL) {
2836 /*
2837 * If any node has only lowmem, then node order
2838 * is preferred to allow kernel allocations
2839 * locally; otherwise, they can easily infringe
2840 * on other nodes when there is an abundance of
2841 * lowmem available to allocate from.
2842 */
2843 return ZONELIST_ORDER_NODE;
f0c0b2b8
KH
2844 }
2845 }
2846 }
2847 if (!low_kmem_size || /* there are no DMA area. */
2848 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2849 return ZONELIST_ORDER_NODE;
2850 /*
2851 * look into each node's config.
2852 * If there is a node whose DMA/DMA32 memory is very big area on
2853 * local memory, NODE_ORDER may be suitable.
2854 */
37b07e41
LS
2855 average_size = total_size /
2856 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
f0c0b2b8
KH
2857 for_each_online_node(nid) {
2858 low_kmem_size = 0;
2859 total_size = 0;
2860 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2861 z = &NODE_DATA(nid)->node_zones[zone_type];
2862 if (populated_zone(z)) {
2863 if (zone_type < ZONE_NORMAL)
2864 low_kmem_size += z->present_pages;
2865 total_size += z->present_pages;
2866 }
2867 }
2868 if (low_kmem_size &&
2869 total_size > average_size && /* ignore small node */
2870 low_kmem_size > total_size * 70/100)
2871 return ZONELIST_ORDER_NODE;
2872 }
2873 return ZONELIST_ORDER_ZONE;
2874}
2875
2876static void set_zonelist_order(void)
2877{
2878 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2879 current_zonelist_order = default_zonelist_order();
2880 else
2881 current_zonelist_order = user_zonelist_order;
2882}
2883
2884static void build_zonelists(pg_data_t *pgdat)
2885{
2886 int j, node, load;
2887 enum zone_type i;
1da177e4 2888 nodemask_t used_mask;
f0c0b2b8
KH
2889 int local_node, prev_node;
2890 struct zonelist *zonelist;
2891 int order = current_zonelist_order;
1da177e4
LT
2892
2893 /* initialize zonelists */
523b9458 2894 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 2895 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
2896 zonelist->_zonerefs[0].zone = NULL;
2897 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
2898 }
2899
2900 /* NUMA-aware ordering of nodes */
2901 local_node = pgdat->node_id;
62bc62a8 2902 load = nr_online_nodes;
1da177e4
LT
2903 prev_node = local_node;
2904 nodes_clear(used_mask);
f0c0b2b8 2905
f0c0b2b8
KH
2906 memset(node_order, 0, sizeof(node_order));
2907 j = 0;
2908
1da177e4 2909 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
2910 int distance = node_distance(local_node, node);
2911
2912 /*
2913 * If another node is sufficiently far away then it is better
2914 * to reclaim pages in a zone before going off node.
2915 */
2916 if (distance > RECLAIM_DISTANCE)
2917 zone_reclaim_mode = 1;
2918
1da177e4
LT
2919 /*
2920 * We don't want to pressure a particular node.
2921 * So adding penalty to the first node in same
2922 * distance group to make it round-robin.
2923 */
9eeff239 2924 if (distance != node_distance(local_node, prev_node))
f0c0b2b8
KH
2925 node_load[node] = load;
2926
1da177e4
LT
2927 prev_node = node;
2928 load--;
f0c0b2b8
KH
2929 if (order == ZONELIST_ORDER_NODE)
2930 build_zonelists_in_node_order(pgdat, node);
2931 else
2932 node_order[j++] = node; /* remember order */
2933 }
1da177e4 2934
f0c0b2b8
KH
2935 if (order == ZONELIST_ORDER_ZONE) {
2936 /* calculate node order -- i.e., DMA last! */
2937 build_zonelists_in_zone_order(pgdat, j);
1da177e4 2938 }
523b9458
CL
2939
2940 build_thisnode_zonelists(pgdat);
1da177e4
LT
2941}
2942
9276b1bc 2943/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 2944static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 2945{
54a6eb5c
MG
2946 struct zonelist *zonelist;
2947 struct zonelist_cache *zlc;
dd1a239f 2948 struct zoneref *z;
9276b1bc 2949
54a6eb5c
MG
2950 zonelist = &pgdat->node_zonelists[0];
2951 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2952 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
dd1a239f
MG
2953 for (z = zonelist->_zonerefs; z->zone; z++)
2954 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
9276b1bc
PJ
2955}
2956
7aac7898
LS
2957#ifdef CONFIG_HAVE_MEMORYLESS_NODES
2958/*
2959 * Return node id of node used for "local" allocations.
2960 * I.e., first node id of first zone in arg node's generic zonelist.
2961 * Used for initializing percpu 'numa_mem', which is used primarily
2962 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
2963 */
2964int local_memory_node(int node)
2965{
2966 struct zone *zone;
2967
2968 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
2969 gfp_zone(GFP_KERNEL),
2970 NULL,
2971 &zone);
2972 return zone->node;
2973}
2974#endif
f0c0b2b8 2975
1da177e4
LT
2976#else /* CONFIG_NUMA */
2977
f0c0b2b8
KH
2978static void set_zonelist_order(void)
2979{
2980 current_zonelist_order = ZONELIST_ORDER_ZONE;
2981}
2982
2983static void build_zonelists(pg_data_t *pgdat)
1da177e4 2984{
19655d34 2985 int node, local_node;
54a6eb5c
MG
2986 enum zone_type j;
2987 struct zonelist *zonelist;
1da177e4
LT
2988
2989 local_node = pgdat->node_id;
1da177e4 2990
54a6eb5c
MG
2991 zonelist = &pgdat->node_zonelists[0];
2992 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
1da177e4 2993
54a6eb5c
MG
2994 /*
2995 * Now we build the zonelist so that it contains the zones
2996 * of all the other nodes.
2997 * We don't want to pressure a particular node, so when
2998 * building the zones for node N, we make sure that the
2999 * zones coming right after the local ones are those from
3000 * node N+1 (modulo N)
3001 */
3002 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3003 if (!node_online(node))
3004 continue;
3005 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3006 MAX_NR_ZONES - 1);
1da177e4 3007 }
54a6eb5c
MG
3008 for (node = 0; node < local_node; node++) {
3009 if (!node_online(node))
3010 continue;
3011 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3012 MAX_NR_ZONES - 1);
3013 }
3014
dd1a239f
MG
3015 zonelist->_zonerefs[j].zone = NULL;
3016 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
3017}
3018
9276b1bc 3019/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 3020static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 3021{
54a6eb5c 3022 pgdat->node_zonelists[0].zlcache_ptr = NULL;
9276b1bc
PJ
3023}
3024
1da177e4
LT
3025#endif /* CONFIG_NUMA */
3026
99dcc3e5
CL
3027/*
3028 * Boot pageset table. One per cpu which is going to be used for all
3029 * zones and all nodes. The parameters will be set in such a way
3030 * that an item put on a list will immediately be handed over to
3031 * the buddy list. This is safe since pageset manipulation is done
3032 * with interrupts disabled.
3033 *
3034 * The boot_pagesets must be kept even after bootup is complete for
3035 * unused processors and/or zones. They do play a role for bootstrapping
3036 * hotplugged processors.
3037 *
3038 * zoneinfo_show() and maybe other functions do
3039 * not check if the processor is online before following the pageset pointer.
3040 * Other parts of the kernel may not check if the zone is available.
3041 */
3042static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3043static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
1f522509 3044static void setup_zone_pageset(struct zone *zone);
99dcc3e5 3045
4eaf3f64
HL
3046/*
3047 * Global mutex to protect against size modification of zonelists
3048 * as well as to serialize pageset setup for the new populated zone.
3049 */
3050DEFINE_MUTEX(zonelists_mutex);
3051
9b1a4d38 3052/* return values int ....just for stop_machine() */
1f522509 3053static __init_refok int __build_all_zonelists(void *data)
1da177e4 3054{
6811378e 3055 int nid;
99dcc3e5 3056 int cpu;
9276b1bc 3057
7f9cfb31
BL
3058#ifdef CONFIG_NUMA
3059 memset(node_load, 0, sizeof(node_load));
3060#endif
9276b1bc 3061 for_each_online_node(nid) {
7ea1530a
CL
3062 pg_data_t *pgdat = NODE_DATA(nid);
3063
3064 build_zonelists(pgdat);
3065 build_zonelist_cache(pgdat);
9276b1bc 3066 }
99dcc3e5
CL
3067
3068 /*
3069 * Initialize the boot_pagesets that are going to be used
3070 * for bootstrapping processors. The real pagesets for
3071 * each zone will be allocated later when the per cpu
3072 * allocator is available.
3073 *
3074 * boot_pagesets are used also for bootstrapping offline
3075 * cpus if the system is already booted because the pagesets
3076 * are needed to initialize allocators on a specific cpu too.
3077 * F.e. the percpu allocator needs the page allocator which
3078 * needs the percpu allocator in order to allocate its pagesets
3079 * (a chicken-egg dilemma).
3080 */
7aac7898 3081 for_each_possible_cpu(cpu) {
99dcc3e5
CL
3082 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3083
7aac7898
LS
3084#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3085 /*
3086 * We now know the "local memory node" for each node--
3087 * i.e., the node of the first zone in the generic zonelist.
3088 * Set up numa_mem percpu variable for on-line cpus. During
3089 * boot, only the boot cpu should be on-line; we'll init the
3090 * secondary cpus' numa_mem as they come on-line. During
3091 * node/memory hotplug, we'll fixup all on-line cpus.
3092 */
3093 if (cpu_online(cpu))
3094 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3095#endif
3096 }
3097
6811378e
YG
3098 return 0;
3099}
3100
4eaf3f64
HL
3101/*
3102 * Called with zonelists_mutex held always
3103 * unless system_state == SYSTEM_BOOTING.
3104 */
1f522509 3105void build_all_zonelists(void *data)
6811378e 3106{
f0c0b2b8
KH
3107 set_zonelist_order();
3108
6811378e 3109 if (system_state == SYSTEM_BOOTING) {
423b41d7 3110 __build_all_zonelists(NULL);
68ad8df4 3111 mminit_verify_zonelist();
6811378e
YG
3112 cpuset_init_current_mems_allowed();
3113 } else {
183ff22b 3114 /* we have to stop all cpus to guarantee there is no user
6811378e 3115 of zonelist */
e9959f0f
KH
3116#ifdef CONFIG_MEMORY_HOTPLUG
3117 if (data)
3118 setup_zone_pageset((struct zone *)data);
3119#endif
3120 stop_machine(__build_all_zonelists, NULL, NULL);
6811378e
YG
3121 /* cpuset refresh routine should be here */
3122 }
bd1e22b8 3123 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
3124 /*
3125 * Disable grouping by mobility if the number of pages in the
3126 * system is too low to allow the mechanism to work. It would be
3127 * more accurate, but expensive to check per-zone. This check is
3128 * made on memory-hotadd so a system can start with mobility
3129 * disabled and enable it later
3130 */
d9c23400 3131 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
3132 page_group_by_mobility_disabled = 1;
3133 else
3134 page_group_by_mobility_disabled = 0;
3135
3136 printk("Built %i zonelists in %s order, mobility grouping %s. "
3137 "Total pages: %ld\n",
62bc62a8 3138 nr_online_nodes,
f0c0b2b8 3139 zonelist_order_name[current_zonelist_order],
9ef9acb0 3140 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
3141 vm_total_pages);
3142#ifdef CONFIG_NUMA
3143 printk("Policy zone: %s\n", zone_names[policy_zone]);
3144#endif
1da177e4
LT
3145}
3146
3147/*
3148 * Helper functions to size the waitqueue hash table.
3149 * Essentially these want to choose hash table sizes sufficiently
3150 * large so that collisions trying to wait on pages are rare.
3151 * But in fact, the number of active page waitqueues on typical
3152 * systems is ridiculously low, less than 200. So this is even
3153 * conservative, even though it seems large.
3154 *
3155 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3156 * waitqueues, i.e. the size of the waitq table given the number of pages.
3157 */
3158#define PAGES_PER_WAITQUEUE 256
3159
cca448fe 3160#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 3161static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
3162{
3163 unsigned long size = 1;
3164
3165 pages /= PAGES_PER_WAITQUEUE;
3166
3167 while (size < pages)
3168 size <<= 1;
3169
3170 /*
3171 * Once we have dozens or even hundreds of threads sleeping
3172 * on IO we've got bigger problems than wait queue collision.
3173 * Limit the size of the wait table to a reasonable size.
3174 */
3175 size = min(size, 4096UL);
3176
3177 return max(size, 4UL);
3178}
cca448fe
YG
3179#else
3180/*
3181 * A zone's size might be changed by hot-add, so it is not possible to determine
3182 * a suitable size for its wait_table. So we use the maximum size now.
3183 *
3184 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
3185 *
3186 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
3187 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3188 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
3189 *
3190 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3191 * or more by the traditional way. (See above). It equals:
3192 *
3193 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
3194 * ia64(16K page size) : = ( 8G + 4M)byte.
3195 * powerpc (64K page size) : = (32G +16M)byte.
3196 */
3197static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3198{
3199 return 4096UL;
3200}
3201#endif
1da177e4
LT
3202
3203/*
3204 * This is an integer logarithm so that shifts can be used later
3205 * to extract the more random high bits from the multiplicative
3206 * hash function before the remainder is taken.
3207 */
3208static inline unsigned long wait_table_bits(unsigned long size)
3209{
3210 return ffz(~size);
3211}
3212
3213#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3214
56fd56b8 3215/*
d9c23400 3216 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
41858966
MG
3217 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3218 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
56fd56b8
MG
3219 * higher will lead to a bigger reserve which will get freed as contiguous
3220 * blocks as reclaim kicks in
3221 */
3222static void setup_zone_migrate_reserve(struct zone *zone)
3223{
3224 unsigned long start_pfn, pfn, end_pfn;
3225 struct page *page;
78986a67
MG
3226 unsigned long block_migratetype;
3227 int reserve;
56fd56b8
MG
3228
3229 /* Get the start pfn, end pfn and the number of blocks to reserve */
3230 start_pfn = zone->zone_start_pfn;
3231 end_pfn = start_pfn + zone->spanned_pages;
41858966 3232 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
d9c23400 3233 pageblock_order;
56fd56b8 3234
78986a67
MG
3235 /*
3236 * Reserve blocks are generally in place to help high-order atomic
3237 * allocations that are short-lived. A min_free_kbytes value that
3238 * would result in more than 2 reserve blocks for atomic allocations
3239 * is assumed to be in place to help anti-fragmentation for the
3240 * future allocation of hugepages at runtime.
3241 */
3242 reserve = min(2, reserve);
3243
d9c23400 3244 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
56fd56b8
MG
3245 if (!pfn_valid(pfn))
3246 continue;
3247 page = pfn_to_page(pfn);
3248
344c790e
AL
3249 /* Watch out for overlapping nodes */
3250 if (page_to_nid(page) != zone_to_nid(zone))
3251 continue;
3252
56fd56b8
MG
3253 /* Blocks with reserved pages will never free, skip them. */
3254 if (PageReserved(page))
3255 continue;
3256
3257 block_migratetype = get_pageblock_migratetype(page);
3258
3259 /* If this block is reserved, account for it */
3260 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3261 reserve--;
3262 continue;
3263 }
3264
3265 /* Suitable for reserving if this block is movable */
3266 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3267 set_pageblock_migratetype(page, MIGRATE_RESERVE);
3268 move_freepages_block(zone, page, MIGRATE_RESERVE);
3269 reserve--;
3270 continue;
3271 }
3272
3273 /*
3274 * If the reserve is met and this is a previous reserved block,
3275 * take it back
3276 */
3277 if (block_migratetype == MIGRATE_RESERVE) {
3278 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3279 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3280 }
3281 }
3282}
ac0e5b7a 3283
1da177e4
LT
3284/*
3285 * Initially all pages are reserved - free ones are freed
3286 * up by free_all_bootmem() once the early boot process is
3287 * done. Non-atomic initialization, single-pass.
3288 */
c09b4240 3289void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 3290 unsigned long start_pfn, enum memmap_context context)
1da177e4 3291{
1da177e4 3292 struct page *page;
29751f69
AW
3293 unsigned long end_pfn = start_pfn + size;
3294 unsigned long pfn;
86051ca5 3295 struct zone *z;
1da177e4 3296
22b31eec
HD
3297 if (highest_memmap_pfn < end_pfn - 1)
3298 highest_memmap_pfn = end_pfn - 1;
3299
86051ca5 3300 z = &NODE_DATA(nid)->node_zones[zone];
cbe8dd4a 3301 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
3302 /*
3303 * There can be holes in boot-time mem_map[]s
3304 * handed to this function. They do not
3305 * exist on hotplugged memory.
3306 */
3307 if (context == MEMMAP_EARLY) {
3308 if (!early_pfn_valid(pfn))
3309 continue;
3310 if (!early_pfn_in_nid(pfn, nid))
3311 continue;
3312 }
d41dee36
AW
3313 page = pfn_to_page(pfn);
3314 set_page_links(page, zone, nid, pfn);
708614e6 3315 mminit_verify_page_links(page, zone, nid, pfn);
7835e98b 3316 init_page_count(page);
1da177e4
LT
3317 reset_page_mapcount(page);
3318 SetPageReserved(page);
b2a0ac88
MG
3319 /*
3320 * Mark the block movable so that blocks are reserved for
3321 * movable at startup. This will force kernel allocations
3322 * to reserve their blocks rather than leaking throughout
3323 * the address space during boot when many long-lived
56fd56b8
MG
3324 * kernel allocations are made. Later some blocks near
3325 * the start are marked MIGRATE_RESERVE by
3326 * setup_zone_migrate_reserve()
86051ca5
KH
3327 *
3328 * bitmap is created for zone's valid pfn range. but memmap
3329 * can be created for invalid pages (for alignment)
3330 * check here not to call set_pageblock_migratetype() against
3331 * pfn out of zone.
b2a0ac88 3332 */
86051ca5
KH
3333 if ((z->zone_start_pfn <= pfn)
3334 && (pfn < z->zone_start_pfn + z->spanned_pages)
3335 && !(pfn & (pageblock_nr_pages - 1)))
56fd56b8 3336 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
b2a0ac88 3337
1da177e4
LT
3338 INIT_LIST_HEAD(&page->lru);
3339#ifdef WANT_PAGE_VIRTUAL
3340 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3341 if (!is_highmem_idx(zone))
3212c6be 3342 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 3343#endif
1da177e4
LT
3344 }
3345}
3346
1e548deb 3347static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 3348{
b2a0ac88
MG
3349 int order, t;
3350 for_each_migratetype_order(order, t) {
3351 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
3352 zone->free_area[order].nr_free = 0;
3353 }
3354}
3355
3356#ifndef __HAVE_ARCH_MEMMAP_INIT
3357#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 3358 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
3359#endif
3360
1d6f4e60 3361static int zone_batchsize(struct zone *zone)
e7c8d5c9 3362{
3a6be87f 3363#ifdef CONFIG_MMU
e7c8d5c9
CL
3364 int batch;
3365
3366 /*
3367 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 3368 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
3369 *
3370 * OK, so we don't know how big the cache is. So guess.
3371 */
3372 batch = zone->present_pages / 1024;
ba56e91c
SR
3373 if (batch * PAGE_SIZE > 512 * 1024)
3374 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
3375 batch /= 4; /* We effectively *= 4 below */
3376 if (batch < 1)
3377 batch = 1;
3378
3379 /*
0ceaacc9
NP
3380 * Clamp the batch to a 2^n - 1 value. Having a power
3381 * of 2 value was found to be more likely to have
3382 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 3383 *
0ceaacc9
NP
3384 * For example if 2 tasks are alternately allocating
3385 * batches of pages, one task can end up with a lot
3386 * of pages of one half of the possible page colors
3387 * and the other with pages of the other colors.
e7c8d5c9 3388 */
9155203a 3389 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 3390
e7c8d5c9 3391 return batch;
3a6be87f
DH
3392
3393#else
3394 /* The deferral and batching of frees should be suppressed under NOMMU
3395 * conditions.
3396 *
3397 * The problem is that NOMMU needs to be able to allocate large chunks
3398 * of contiguous memory as there's no hardware page translation to
3399 * assemble apparent contiguous memory from discontiguous pages.
3400 *
3401 * Queueing large contiguous runs of pages for batching, however,
3402 * causes the pages to actually be freed in smaller chunks. As there
3403 * can be a significant delay between the individual batches being
3404 * recycled, this leads to the once large chunks of space being
3405 * fragmented and becoming unavailable for high-order allocations.
3406 */
3407 return 0;
3408#endif
e7c8d5c9
CL
3409}
3410
b69a7288 3411static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2caaad41
CL
3412{
3413 struct per_cpu_pages *pcp;
5f8dcc21 3414 int migratetype;
2caaad41 3415
1c6fe946
MD
3416 memset(p, 0, sizeof(*p));
3417
3dfa5721 3418 pcp = &p->pcp;
2caaad41 3419 pcp->count = 0;
2caaad41
CL
3420 pcp->high = 6 * batch;
3421 pcp->batch = max(1UL, 1 * batch);
5f8dcc21
MG
3422 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3423 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
3424}
3425
8ad4b1fb
RS
3426/*
3427 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3428 * to the value high for the pageset p.
3429 */
3430
3431static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3432 unsigned long high)
3433{
3434 struct per_cpu_pages *pcp;
3435
3dfa5721 3436 pcp = &p->pcp;
8ad4b1fb
RS
3437 pcp->high = high;
3438 pcp->batch = max(1UL, high/4);
3439 if ((high/4) > (PAGE_SHIFT * 8))
3440 pcp->batch = PAGE_SHIFT * 8;
3441}
3442
319774e2
WF
3443static __meminit void setup_zone_pageset(struct zone *zone)
3444{
3445 int cpu;
3446
3447 zone->pageset = alloc_percpu(struct per_cpu_pageset);
3448
3449 for_each_possible_cpu(cpu) {
3450 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3451
3452 setup_pageset(pcp, zone_batchsize(zone));
3453
3454 if (percpu_pagelist_fraction)
3455 setup_pagelist_highmark(pcp,
3456 (zone->present_pages /
3457 percpu_pagelist_fraction));
3458 }
3459}
3460
2caaad41 3461/*
99dcc3e5
CL
3462 * Allocate per cpu pagesets and initialize them.
3463 * Before this call only boot pagesets were available.
e7c8d5c9 3464 */
99dcc3e5 3465void __init setup_per_cpu_pageset(void)
e7c8d5c9 3466{
99dcc3e5 3467 struct zone *zone;
e7c8d5c9 3468
319774e2
WF
3469 for_each_populated_zone(zone)
3470 setup_zone_pageset(zone);
e7c8d5c9
CL
3471}
3472
577a32f6 3473static noinline __init_refok
cca448fe 3474int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
3475{
3476 int i;
3477 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 3478 size_t alloc_size;
ed8ece2e
DH
3479
3480 /*
3481 * The per-page waitqueue mechanism uses hashed waitqueues
3482 * per zone.
3483 */
02b694de
YG
3484 zone->wait_table_hash_nr_entries =
3485 wait_table_hash_nr_entries(zone_size_pages);
3486 zone->wait_table_bits =
3487 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
3488 alloc_size = zone->wait_table_hash_nr_entries
3489 * sizeof(wait_queue_head_t);
3490
cd94b9db 3491 if (!slab_is_available()) {
cca448fe
YG
3492 zone->wait_table = (wait_queue_head_t *)
3493 alloc_bootmem_node(pgdat, alloc_size);
3494 } else {
3495 /*
3496 * This case means that a zone whose size was 0 gets new memory
3497 * via memory hot-add.
3498 * But it may be the case that a new node was hot-added. In
3499 * this case vmalloc() will not be able to use this new node's
3500 * memory - this wait_table must be initialized to use this new
3501 * node itself as well.
3502 * To use this new node's memory, further consideration will be
3503 * necessary.
3504 */
8691f3a7 3505 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
3506 }
3507 if (!zone->wait_table)
3508 return -ENOMEM;
ed8ece2e 3509
02b694de 3510 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 3511 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
3512
3513 return 0;
ed8ece2e
DH
3514}
3515
112067f0
SL
3516static int __zone_pcp_update(void *data)
3517{
3518 struct zone *zone = data;
3519 int cpu;
3520 unsigned long batch = zone_batchsize(zone), flags;
3521
2d30a1f6 3522 for_each_possible_cpu(cpu) {
112067f0
SL
3523 struct per_cpu_pageset *pset;
3524 struct per_cpu_pages *pcp;
3525
99dcc3e5 3526 pset = per_cpu_ptr(zone->pageset, cpu);
112067f0
SL
3527 pcp = &pset->pcp;
3528
3529 local_irq_save(flags);
5f8dcc21 3530 free_pcppages_bulk(zone, pcp->count, pcp);
112067f0
SL
3531 setup_pageset(pset, batch);
3532 local_irq_restore(flags);
3533 }
3534 return 0;
3535}
3536
3537void zone_pcp_update(struct zone *zone)
3538{
3539 stop_machine(__zone_pcp_update, zone, NULL);
3540}
3541
c09b4240 3542static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 3543{
99dcc3e5
CL
3544 /*
3545 * per cpu subsystem is not up at this point. The following code
3546 * relies on the ability of the linker to provide the
3547 * offset of a (static) per cpu variable into the per cpu area.
3548 */
3549 zone->pageset = &boot_pageset;
ed8ece2e 3550
f5335c0f 3551 if (zone->present_pages)
99dcc3e5
CL
3552 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
3553 zone->name, zone->present_pages,
3554 zone_batchsize(zone));
ed8ece2e
DH
3555}
3556
718127cc
YG
3557__meminit int init_currently_empty_zone(struct zone *zone,
3558 unsigned long zone_start_pfn,
a2f3aa02
DH
3559 unsigned long size,
3560 enum memmap_context context)
ed8ece2e
DH
3561{
3562 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
3563 int ret;
3564 ret = zone_wait_table_init(zone, size);
3565 if (ret)
3566 return ret;
ed8ece2e
DH
3567 pgdat->nr_zones = zone_idx(zone) + 1;
3568
ed8ece2e
DH
3569 zone->zone_start_pfn = zone_start_pfn;
3570
708614e6
MG
3571 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3572 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3573 pgdat->node_id,
3574 (unsigned long)zone_idx(zone),
3575 zone_start_pfn, (zone_start_pfn + size));
3576
1e548deb 3577 zone_init_free_lists(zone);
718127cc
YG
3578
3579 return 0;
ed8ece2e
DH
3580}
3581
c713216d
MG
3582#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3583/*
3584 * Basic iterator support. Return the first range of PFNs for a node
3585 * Note: nid == MAX_NUMNODES returns first region regardless of node
3586 */
a3142c8e 3587static int __meminit first_active_region_index_in_nid(int nid)
c713216d
MG
3588{
3589 int i;
3590
3591 for (i = 0; i < nr_nodemap_entries; i++)
3592 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3593 return i;
3594
3595 return -1;
3596}
3597
3598/*
3599 * Basic iterator support. Return the next active range of PFNs for a node
183ff22b 3600 * Note: nid == MAX_NUMNODES returns next region regardless of node
c713216d 3601 */
a3142c8e 3602static int __meminit next_active_region_index_in_nid(int index, int nid)
c713216d
MG
3603{
3604 for (index = index + 1; index < nr_nodemap_entries; index++)
3605 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3606 return index;
3607
3608 return -1;
3609}
3610
3611#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3612/*
3613 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3614 * Architectures may implement their own version but if add_active_range()
3615 * was used and there are no special requirements, this is a convenient
3616 * alternative
3617 */
f2dbcfa7 3618int __meminit __early_pfn_to_nid(unsigned long pfn)
c713216d
MG
3619{
3620 int i;
3621
3622 for (i = 0; i < nr_nodemap_entries; i++) {
3623 unsigned long start_pfn = early_node_map[i].start_pfn;
3624 unsigned long end_pfn = early_node_map[i].end_pfn;
3625
3626 if (start_pfn <= pfn && pfn < end_pfn)
3627 return early_node_map[i].nid;
3628 }
cc2559bc
KH
3629 /* This is a memory hole */
3630 return -1;
c713216d
MG
3631}
3632#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3633
f2dbcfa7
KH
3634int __meminit early_pfn_to_nid(unsigned long pfn)
3635{
cc2559bc
KH
3636 int nid;
3637
3638 nid = __early_pfn_to_nid(pfn);
3639 if (nid >= 0)
3640 return nid;
3641 /* just returns 0 */
3642 return 0;
f2dbcfa7
KH
3643}
3644
cc2559bc
KH
3645#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3646bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3647{
3648 int nid;
3649
3650 nid = __early_pfn_to_nid(pfn);
3651 if (nid >= 0 && nid != node)
3652 return false;
3653 return true;
3654}
3655#endif
f2dbcfa7 3656
c713216d
MG
3657/* Basic iterator support to walk early_node_map[] */
3658#define for_each_active_range_index_in_nid(i, nid) \
3659 for (i = first_active_region_index_in_nid(nid); i != -1; \
3660 i = next_active_region_index_in_nid(i, nid))
3661
3662/**
3663 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
3664 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3665 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
3666 *
3667 * If an architecture guarantees that all ranges registered with
3668 * add_active_ranges() contain no holes and may be freed, this
3669 * this function may be used instead of calling free_bootmem() manually.
3670 */
3671void __init free_bootmem_with_active_regions(int nid,
3672 unsigned long max_low_pfn)
3673{
3674 int i;
3675
3676 for_each_active_range_index_in_nid(i, nid) {
3677 unsigned long size_pages = 0;
3678 unsigned long end_pfn = early_node_map[i].end_pfn;
3679
3680 if (early_node_map[i].start_pfn >= max_low_pfn)
3681 continue;
3682
3683 if (end_pfn > max_low_pfn)
3684 end_pfn = max_low_pfn;
3685
3686 size_pages = end_pfn - early_node_map[i].start_pfn;
3687 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3688 PFN_PHYS(early_node_map[i].start_pfn),
3689 size_pages << PAGE_SHIFT);
3690 }
3691}
3692
edbe7d23
YL
3693#ifdef CONFIG_HAVE_MEMBLOCK
3694u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3695 u64 goal, u64 limit)
3696{
3697 int i;
3698
3699 /* Need to go over early_node_map to find out good range for node */
3700 for_each_active_range_index_in_nid(i, nid) {
3701 u64 addr;
3702 u64 ei_start, ei_last;
3703 u64 final_start, final_end;
3704
3705 ei_last = early_node_map[i].end_pfn;
3706 ei_last <<= PAGE_SHIFT;
3707 ei_start = early_node_map[i].start_pfn;
3708 ei_start <<= PAGE_SHIFT;
3709
3710 final_start = max(ei_start, goal);
3711 final_end = min(ei_last, limit);
3712
3713 if (final_start >= final_end)
3714 continue;
3715
3716 addr = memblock_find_in_range(final_start, final_end, size, align);
3717
3718 if (addr == MEMBLOCK_ERROR)
3719 continue;
3720
3721 return addr;
3722 }
3723
3724 return MEMBLOCK_ERROR;
3725}
3726#endif
3727
08677214
YL
3728int __init add_from_early_node_map(struct range *range, int az,
3729 int nr_range, int nid)
3730{
3731 int i;
3732 u64 start, end;
3733
3734 /* need to go over early_node_map to find out good range for node */
3735 for_each_active_range_index_in_nid(i, nid) {
3736 start = early_node_map[i].start_pfn;
3737 end = early_node_map[i].end_pfn;
3738 nr_range = add_range(range, az, nr_range, start, end);
3739 }
3740 return nr_range;
3741}
3742
2ee78f7b 3743#ifdef CONFIG_NO_BOOTMEM
08677214
YL
3744void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
3745 u64 goal, u64 limit)
3746{
08677214 3747 void *ptr;
72d7c3b3 3748 u64 addr;
08677214 3749
72d7c3b3
YL
3750 if (limit > memblock.current_limit)
3751 limit = memblock.current_limit;
b8ab9f82 3752
72d7c3b3 3753 addr = find_memory_core_early(nid, size, align, goal, limit);
08677214 3754
72d7c3b3
YL
3755 if (addr == MEMBLOCK_ERROR)
3756 return NULL;
08677214 3757
72d7c3b3
YL
3758 ptr = phys_to_virt(addr);
3759 memset(ptr, 0, size);
3760 memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
3761 /*
3762 * The min_count is set to 0 so that bootmem allocated blocks
3763 * are never reported as leaks.
3764 */
3765 kmemleak_alloc(ptr, size, 0, 0);
3766 return ptr;
08677214 3767}
2ee78f7b 3768#endif
08677214
YL
3769
3770
b5bc6c0e
YL
3771void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3772{
3773 int i;
d52d53b8 3774 int ret;
b5bc6c0e 3775
d52d53b8
YL
3776 for_each_active_range_index_in_nid(i, nid) {
3777 ret = work_fn(early_node_map[i].start_pfn,
3778 early_node_map[i].end_pfn, data);
3779 if (ret)
3780 break;
3781 }
b5bc6c0e 3782}
c713216d
MG
3783/**
3784 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 3785 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
3786 *
3787 * If an architecture guarantees that all ranges registered with
3788 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 3789 * function may be used instead of calling memory_present() manually.
c713216d
MG
3790 */
3791void __init sparse_memory_present_with_active_regions(int nid)
3792{
3793 int i;
3794
3795 for_each_active_range_index_in_nid(i, nid)
3796 memory_present(early_node_map[i].nid,
3797 early_node_map[i].start_pfn,
3798 early_node_map[i].end_pfn);
3799}
3800
3801/**
3802 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
3803 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3804 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3805 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
3806 *
3807 * It returns the start and end page frame of a node based on information
3808 * provided by an arch calling add_active_range(). If called for a node
3809 * with no available memory, a warning is printed and the start and end
88ca3b94 3810 * PFNs will be 0.
c713216d 3811 */
a3142c8e 3812void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
3813 unsigned long *start_pfn, unsigned long *end_pfn)
3814{
3815 int i;
3816 *start_pfn = -1UL;
3817 *end_pfn = 0;
3818
3819 for_each_active_range_index_in_nid(i, nid) {
3820 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3821 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3822 }
3823
633c0666 3824 if (*start_pfn == -1UL)
c713216d 3825 *start_pfn = 0;
c713216d
MG
3826}
3827
2a1e274a
MG
3828/*
3829 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3830 * assumption is made that zones within a node are ordered in monotonic
3831 * increasing memory addresses so that the "highest" populated zone is used
3832 */
b69a7288 3833static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
3834{
3835 int zone_index;
3836 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3837 if (zone_index == ZONE_MOVABLE)
3838 continue;
3839
3840 if (arch_zone_highest_possible_pfn[zone_index] >
3841 arch_zone_lowest_possible_pfn[zone_index])
3842 break;
3843 }
3844
3845 VM_BUG_ON(zone_index == -1);
3846 movable_zone = zone_index;
3847}
3848
3849/*
3850 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3851 * because it is sized independant of architecture. Unlike the other zones,
3852 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3853 * in each node depending on the size of each node and how evenly kernelcore
3854 * is distributed. This helper function adjusts the zone ranges
3855 * provided by the architecture for a given node by using the end of the
3856 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3857 * zones within a node are in order of monotonic increases memory addresses
3858 */
b69a7288 3859static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
3860 unsigned long zone_type,
3861 unsigned long node_start_pfn,
3862 unsigned long node_end_pfn,
3863 unsigned long *zone_start_pfn,
3864 unsigned long *zone_end_pfn)
3865{
3866 /* Only adjust if ZONE_MOVABLE is on this node */
3867 if (zone_movable_pfn[nid]) {
3868 /* Size ZONE_MOVABLE */
3869 if (zone_type == ZONE_MOVABLE) {
3870 *zone_start_pfn = zone_movable_pfn[nid];
3871 *zone_end_pfn = min(node_end_pfn,
3872 arch_zone_highest_possible_pfn[movable_zone]);
3873
3874 /* Adjust for ZONE_MOVABLE starting within this range */
3875 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3876 *zone_end_pfn > zone_movable_pfn[nid]) {
3877 *zone_end_pfn = zone_movable_pfn[nid];
3878
3879 /* Check if this whole range is within ZONE_MOVABLE */
3880 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3881 *zone_start_pfn = *zone_end_pfn;
3882 }
3883}
3884
c713216d
MG
3885/*
3886 * Return the number of pages a zone spans in a node, including holes
3887 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3888 */
6ea6e688 3889static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3890 unsigned long zone_type,
3891 unsigned long *ignored)
3892{
3893 unsigned long node_start_pfn, node_end_pfn;
3894 unsigned long zone_start_pfn, zone_end_pfn;
3895
3896 /* Get the start and end of the node and zone */
3897 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3898 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3899 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
3900 adjust_zone_range_for_zone_movable(nid, zone_type,
3901 node_start_pfn, node_end_pfn,
3902 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
3903
3904 /* Check that this node has pages within the zone's required range */
3905 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3906 return 0;
3907
3908 /* Move the zone boundaries inside the node if necessary */
3909 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3910 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3911
3912 /* Return the spanned pages */
3913 return zone_end_pfn - zone_start_pfn;
3914}
3915
3916/*
3917 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 3918 * then all holes in the requested range will be accounted for.
c713216d 3919 */
32996250 3920unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
3921 unsigned long range_start_pfn,
3922 unsigned long range_end_pfn)
3923{
3924 int i = 0;
3925 unsigned long prev_end_pfn = 0, hole_pages = 0;
3926 unsigned long start_pfn;
3927
3928 /* Find the end_pfn of the first active range of pfns in the node */
3929 i = first_active_region_index_in_nid(nid);
3930 if (i == -1)
3931 return 0;
3932
b5445f95
MG
3933 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3934
9c7cd687
MG
3935 /* Account for ranges before physical memory on this node */
3936 if (early_node_map[i].start_pfn > range_start_pfn)
b5445f95 3937 hole_pages = prev_end_pfn - range_start_pfn;
c713216d
MG
3938
3939 /* Find all holes for the zone within the node */
3940 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3941
3942 /* No need to continue if prev_end_pfn is outside the zone */
3943 if (prev_end_pfn >= range_end_pfn)
3944 break;
3945
3946 /* Make sure the end of the zone is not within the hole */
3947 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3948 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3949
3950 /* Update the hole size cound and move on */
3951 if (start_pfn > range_start_pfn) {
3952 BUG_ON(prev_end_pfn > start_pfn);
3953 hole_pages += start_pfn - prev_end_pfn;
3954 }
3955 prev_end_pfn = early_node_map[i].end_pfn;
3956 }
3957
9c7cd687
MG
3958 /* Account for ranges past physical memory on this node */
3959 if (range_end_pfn > prev_end_pfn)
0c6cb974 3960 hole_pages += range_end_pfn -
9c7cd687
MG
3961 max(range_start_pfn, prev_end_pfn);
3962
c713216d
MG
3963 return hole_pages;
3964}
3965
3966/**
3967 * absent_pages_in_range - Return number of page frames in holes within a range
3968 * @start_pfn: The start PFN to start searching for holes
3969 * @end_pfn: The end PFN to stop searching for holes
3970 *
88ca3b94 3971 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
3972 */
3973unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3974 unsigned long end_pfn)
3975{
3976 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3977}
3978
3979/* Return the number of page frames in holes in a zone on a node */
6ea6e688 3980static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3981 unsigned long zone_type,
3982 unsigned long *ignored)
3983{
9c7cd687
MG
3984 unsigned long node_start_pfn, node_end_pfn;
3985 unsigned long zone_start_pfn, zone_end_pfn;
3986
3987 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3988 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3989 node_start_pfn);
3990 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3991 node_end_pfn);
3992
2a1e274a
MG
3993 adjust_zone_range_for_zone_movable(nid, zone_type,
3994 node_start_pfn, node_end_pfn,
3995 &zone_start_pfn, &zone_end_pfn);
9c7cd687 3996 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 3997}
0e0b864e 3998
c713216d 3999#else
6ea6e688 4000static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
4001 unsigned long zone_type,
4002 unsigned long *zones_size)
4003{
4004 return zones_size[zone_type];
4005}
4006
6ea6e688 4007static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
4008 unsigned long zone_type,
4009 unsigned long *zholes_size)
4010{
4011 if (!zholes_size)
4012 return 0;
4013
4014 return zholes_size[zone_type];
4015}
0e0b864e 4016
c713216d
MG
4017#endif
4018
a3142c8e 4019static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
4020 unsigned long *zones_size, unsigned long *zholes_size)
4021{
4022 unsigned long realtotalpages, totalpages = 0;
4023 enum zone_type i;
4024
4025 for (i = 0; i < MAX_NR_ZONES; i++)
4026 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4027 zones_size);
4028 pgdat->node_spanned_pages = totalpages;
4029
4030 realtotalpages = totalpages;
4031 for (i = 0; i < MAX_NR_ZONES; i++)
4032 realtotalpages -=
4033 zone_absent_pages_in_node(pgdat->node_id, i,
4034 zholes_size);
4035 pgdat->node_present_pages = realtotalpages;
4036 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4037 realtotalpages);
4038}
4039
835c134e
MG
4040#ifndef CONFIG_SPARSEMEM
4041/*
4042 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
4043 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4044 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
4045 * round what is now in bits to nearest long in bits, then return it in
4046 * bytes.
4047 */
4048static unsigned long __init usemap_size(unsigned long zonesize)
4049{
4050 unsigned long usemapsize;
4051
d9c23400
MG
4052 usemapsize = roundup(zonesize, pageblock_nr_pages);
4053 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
4054 usemapsize *= NR_PAGEBLOCK_BITS;
4055 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4056
4057 return usemapsize / 8;
4058}
4059
4060static void __init setup_usemap(struct pglist_data *pgdat,
4061 struct zone *zone, unsigned long zonesize)
4062{
4063 unsigned long usemapsize = usemap_size(zonesize);
4064 zone->pageblock_flags = NULL;
58a01a45 4065 if (usemapsize)
835c134e 4066 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
835c134e
MG
4067}
4068#else
fa9f90be 4069static inline void setup_usemap(struct pglist_data *pgdat,
835c134e
MG
4070 struct zone *zone, unsigned long zonesize) {}
4071#endif /* CONFIG_SPARSEMEM */
4072
d9c23400 4073#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c
MG
4074
4075/* Return a sensible default order for the pageblock size. */
4076static inline int pageblock_default_order(void)
4077{
4078 if (HPAGE_SHIFT > PAGE_SHIFT)
4079 return HUGETLB_PAGE_ORDER;
4080
4081 return MAX_ORDER-1;
4082}
4083
d9c23400
MG
4084/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4085static inline void __init set_pageblock_order(unsigned int order)
4086{
4087 /* Check that pageblock_nr_pages has not already been setup */
4088 if (pageblock_order)
4089 return;
4090
4091 /*
4092 * Assume the largest contiguous order of interest is a huge page.
4093 * This value may be variable depending on boot parameters on IA64
4094 */
4095 pageblock_order = order;
4096}
4097#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4098
ba72cb8c
MG
4099/*
4100 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4101 * and pageblock_default_order() are unused as pageblock_order is set
4102 * at compile-time. See include/linux/pageblock-flags.h for the values of
4103 * pageblock_order based on the kernel config
4104 */
4105static inline int pageblock_default_order(unsigned int order)
4106{
4107 return MAX_ORDER-1;
4108}
d9c23400
MG
4109#define set_pageblock_order(x) do {} while (0)
4110
4111#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4112
1da177e4
LT
4113/*
4114 * Set up the zone data structures:
4115 * - mark all pages reserved
4116 * - mark all memory queues empty
4117 * - clear the memory bitmaps
4118 */
b5a0e011 4119static void __paginginit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
4120 unsigned long *zones_size, unsigned long *zholes_size)
4121{
2f1b6248 4122 enum zone_type j;
ed8ece2e 4123 int nid = pgdat->node_id;
1da177e4 4124 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 4125 int ret;
1da177e4 4126
208d54e5 4127 pgdat_resize_init(pgdat);
1da177e4
LT
4128 pgdat->nr_zones = 0;
4129 init_waitqueue_head(&pgdat->kswapd_wait);
4130 pgdat->kswapd_max_order = 0;
52d4b9ac 4131 pgdat_page_cgroup_init(pgdat);
1da177e4
LT
4132
4133 for (j = 0; j < MAX_NR_ZONES; j++) {
4134 struct zone *zone = pgdat->node_zones + j;
0e0b864e 4135 unsigned long size, realsize, memmap_pages;
b69408e8 4136 enum lru_list l;
1da177e4 4137
c713216d
MG
4138 size = zone_spanned_pages_in_node(nid, j, zones_size);
4139 realsize = size - zone_absent_pages_in_node(nid, j,
4140 zholes_size);
1da177e4 4141
0e0b864e
MG
4142 /*
4143 * Adjust realsize so that it accounts for how much memory
4144 * is used by this zone for memmap. This affects the watermark
4145 * and per-cpu initialisations
4146 */
f7232154
JW
4147 memmap_pages =
4148 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
0e0b864e
MG
4149 if (realsize >= memmap_pages) {
4150 realsize -= memmap_pages;
5594c8c8
YL
4151 if (memmap_pages)
4152 printk(KERN_DEBUG
4153 " %s zone: %lu pages used for memmap\n",
4154 zone_names[j], memmap_pages);
0e0b864e
MG
4155 } else
4156 printk(KERN_WARNING
4157 " %s zone: %lu pages exceeds realsize %lu\n",
4158 zone_names[j], memmap_pages, realsize);
4159
6267276f
CL
4160 /* Account for reserved pages */
4161 if (j == 0 && realsize > dma_reserve) {
0e0b864e 4162 realsize -= dma_reserve;
d903ef9f 4163 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 4164 zone_names[0], dma_reserve);
0e0b864e
MG
4165 }
4166
98d2b0eb 4167 if (!is_highmem_idx(j))
1da177e4
LT
4168 nr_kernel_pages += realsize;
4169 nr_all_pages += realsize;
4170
4171 zone->spanned_pages = size;
4172 zone->present_pages = realsize;
9614634f 4173#ifdef CONFIG_NUMA
d5f541ed 4174 zone->node = nid;
8417bba4 4175 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 4176 / 100;
0ff38490 4177 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 4178#endif
1da177e4
LT
4179 zone->name = zone_names[j];
4180 spin_lock_init(&zone->lock);
4181 spin_lock_init(&zone->lru_lock);
bdc8cb98 4182 zone_seqlock_init(zone);
1da177e4 4183 zone->zone_pgdat = pgdat;
1da177e4 4184
ed8ece2e 4185 zone_pcp_init(zone);
b69408e8
CL
4186 for_each_lru(l) {
4187 INIT_LIST_HEAD(&zone->lru[l].list);
f8629631 4188 zone->reclaim_stat.nr_saved_scan[l] = 0;
b69408e8 4189 }
6e901571
KM
4190 zone->reclaim_stat.recent_rotated[0] = 0;
4191 zone->reclaim_stat.recent_rotated[1] = 0;
4192 zone->reclaim_stat.recent_scanned[0] = 0;
4193 zone->reclaim_stat.recent_scanned[1] = 0;
2244b95a 4194 zap_zone_vm_stats(zone);
e815af95 4195 zone->flags = 0;
1da177e4
LT
4196 if (!size)
4197 continue;
4198
ba72cb8c 4199 set_pageblock_order(pageblock_default_order());
835c134e 4200 setup_usemap(pgdat, zone, size);
a2f3aa02
DH
4201 ret = init_currently_empty_zone(zone, zone_start_pfn,
4202 size, MEMMAP_EARLY);
718127cc 4203 BUG_ON(ret);
76cdd58e 4204 memmap_init(size, nid, j, zone_start_pfn);
1da177e4 4205 zone_start_pfn += size;
1da177e4
LT
4206 }
4207}
4208
577a32f6 4209static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 4210{
1da177e4
LT
4211 /* Skip empty nodes */
4212 if (!pgdat->node_spanned_pages)
4213 return;
4214
d41dee36 4215#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
4216 /* ia64 gets its own node_mem_map, before this, without bootmem */
4217 if (!pgdat->node_mem_map) {
e984bb43 4218 unsigned long size, start, end;
d41dee36
AW
4219 struct page *map;
4220
e984bb43
BP
4221 /*
4222 * The zone's endpoints aren't required to be MAX_ORDER
4223 * aligned but the node_mem_map endpoints must be in order
4224 * for the buddy allocator to function correctly.
4225 */
4226 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4227 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4228 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4229 size = (end - start) * sizeof(struct page);
6f167ec7
DH
4230 map = alloc_remap(pgdat->node_id, size);
4231 if (!map)
4232 map = alloc_bootmem_node(pgdat, size);
e984bb43 4233 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 4234 }
12d810c1 4235#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
4236 /*
4237 * With no DISCONTIG, the global mem_map is just set as node 0's
4238 */
c713216d 4239 if (pgdat == NODE_DATA(0)) {
1da177e4 4240 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
4241#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4242 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
467bc461 4243 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
c713216d
MG
4244#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4245 }
1da177e4 4246#endif
d41dee36 4247#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
4248}
4249
9109fb7b
JW
4250void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4251 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 4252{
9109fb7b
JW
4253 pg_data_t *pgdat = NODE_DATA(nid);
4254
1da177e4
LT
4255 pgdat->node_id = nid;
4256 pgdat->node_start_pfn = node_start_pfn;
c713216d 4257 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
4258
4259 alloc_node_mem_map(pgdat);
e8c27ac9
YL
4260#ifdef CONFIG_FLAT_NODE_MEM_MAP
4261 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4262 nid, (unsigned long)pgdat,
4263 (unsigned long)pgdat->node_mem_map);
4264#endif
1da177e4
LT
4265
4266 free_area_init_core(pgdat, zones_size, zholes_size);
4267}
4268
c713216d 4269#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
418508c1
MS
4270
4271#if MAX_NUMNODES > 1
4272/*
4273 * Figure out the number of possible node ids.
4274 */
4275static void __init setup_nr_node_ids(void)
4276{
4277 unsigned int node;
4278 unsigned int highest = 0;
4279
4280 for_each_node_mask(node, node_possible_map)
4281 highest = node;
4282 nr_node_ids = highest + 1;
4283}
4284#else
4285static inline void setup_nr_node_ids(void)
4286{
4287}
4288#endif
4289
c713216d
MG
4290/**
4291 * add_active_range - Register a range of PFNs backed by physical memory
4292 * @nid: The node ID the range resides on
4293 * @start_pfn: The start PFN of the available physical memory
4294 * @end_pfn: The end PFN of the available physical memory
4295 *
4296 * These ranges are stored in an early_node_map[] and later used by
4297 * free_area_init_nodes() to calculate zone sizes and holes. If the
4298 * range spans a memory hole, it is up to the architecture to ensure
4299 * the memory is not freed by the bootmem allocator. If possible
4300 * the range being registered will be merged with existing ranges.
4301 */
4302void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4303 unsigned long end_pfn)
4304{
4305 int i;
4306
6b74ab97
MG
4307 mminit_dprintk(MMINIT_TRACE, "memory_register",
4308 "Entering add_active_range(%d, %#lx, %#lx) "
4309 "%d entries of %d used\n",
4310 nid, start_pfn, end_pfn,
4311 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
c713216d 4312
2dbb51c4
MG
4313 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4314
c713216d
MG
4315 /* Merge with existing active regions if possible */
4316 for (i = 0; i < nr_nodemap_entries; i++) {
4317 if (early_node_map[i].nid != nid)
4318 continue;
4319
4320 /* Skip if an existing region covers this new one */
4321 if (start_pfn >= early_node_map[i].start_pfn &&
4322 end_pfn <= early_node_map[i].end_pfn)
4323 return;
4324
4325 /* Merge forward if suitable */
4326 if (start_pfn <= early_node_map[i].end_pfn &&
4327 end_pfn > early_node_map[i].end_pfn) {
4328 early_node_map[i].end_pfn = end_pfn;
4329 return;
4330 }
4331
4332 /* Merge backward if suitable */
d2dbe08d 4333 if (start_pfn < early_node_map[i].start_pfn &&
c713216d
MG
4334 end_pfn >= early_node_map[i].start_pfn) {
4335 early_node_map[i].start_pfn = start_pfn;
4336 return;
4337 }
4338 }
4339
4340 /* Check that early_node_map is large enough */
4341 if (i >= MAX_ACTIVE_REGIONS) {
4342 printk(KERN_CRIT "More than %d memory regions, truncating\n",
4343 MAX_ACTIVE_REGIONS);
4344 return;
4345 }
4346
4347 early_node_map[i].nid = nid;
4348 early_node_map[i].start_pfn = start_pfn;
4349 early_node_map[i].end_pfn = end_pfn;
4350 nr_nodemap_entries = i + 1;
4351}
4352
4353/**
cc1050ba 4354 * remove_active_range - Shrink an existing registered range of PFNs
c713216d 4355 * @nid: The node id the range is on that should be shrunk
cc1050ba
YL
4356 * @start_pfn: The new PFN of the range
4357 * @end_pfn: The new PFN of the range
c713216d
MG
4358 *
4359 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
cc1a9d86
YL
4360 * The map is kept near the end physical page range that has already been
4361 * registered. This function allows an arch to shrink an existing registered
4362 * range.
c713216d 4363 */
cc1050ba
YL
4364void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4365 unsigned long end_pfn)
c713216d 4366{
cc1a9d86
YL
4367 int i, j;
4368 int removed = 0;
c713216d 4369
cc1050ba
YL
4370 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4371 nid, start_pfn, end_pfn);
4372
c713216d 4373 /* Find the old active region end and shrink */
cc1a9d86 4374 for_each_active_range_index_in_nid(i, nid) {
cc1050ba
YL
4375 if (early_node_map[i].start_pfn >= start_pfn &&
4376 early_node_map[i].end_pfn <= end_pfn) {
cc1a9d86 4377 /* clear it */
cc1050ba 4378 early_node_map[i].start_pfn = 0;
cc1a9d86
YL
4379 early_node_map[i].end_pfn = 0;
4380 removed = 1;
4381 continue;
4382 }
cc1050ba
YL
4383 if (early_node_map[i].start_pfn < start_pfn &&
4384 early_node_map[i].end_pfn > start_pfn) {
4385 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4386 early_node_map[i].end_pfn = start_pfn;
4387 if (temp_end_pfn > end_pfn)
4388 add_active_range(nid, end_pfn, temp_end_pfn);
4389 continue;
4390 }
4391 if (early_node_map[i].start_pfn >= start_pfn &&
4392 early_node_map[i].end_pfn > end_pfn &&
4393 early_node_map[i].start_pfn < end_pfn) {
4394 early_node_map[i].start_pfn = end_pfn;
cc1a9d86 4395 continue;
c713216d 4396 }
cc1a9d86
YL
4397 }
4398
4399 if (!removed)
4400 return;
4401
4402 /* remove the blank ones */
4403 for (i = nr_nodemap_entries - 1; i > 0; i--) {
4404 if (early_node_map[i].nid != nid)
4405 continue;
4406 if (early_node_map[i].end_pfn)
4407 continue;
4408 /* we found it, get rid of it */
4409 for (j = i; j < nr_nodemap_entries - 1; j++)
4410 memcpy(&early_node_map[j], &early_node_map[j+1],
4411 sizeof(early_node_map[j]));
4412 j = nr_nodemap_entries - 1;
4413 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4414 nr_nodemap_entries--;
4415 }
c713216d
MG
4416}
4417
4418/**
4419 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 4420 *
c713216d
MG
4421 * During discovery, it may be found that a table like SRAT is invalid
4422 * and an alternative discovery method must be used. This function removes
4423 * all currently registered regions.
4424 */
88ca3b94 4425void __init remove_all_active_ranges(void)
c713216d
MG
4426{
4427 memset(early_node_map, 0, sizeof(early_node_map));
4428 nr_nodemap_entries = 0;
4429}
4430
4431/* Compare two active node_active_regions */
4432static int __init cmp_node_active_region(const void *a, const void *b)
4433{
4434 struct node_active_region *arange = (struct node_active_region *)a;
4435 struct node_active_region *brange = (struct node_active_region *)b;
4436
4437 /* Done this way to avoid overflows */
4438 if (arange->start_pfn > brange->start_pfn)
4439 return 1;
4440 if (arange->start_pfn < brange->start_pfn)
4441 return -1;
4442
4443 return 0;
4444}
4445
4446/* sort the node_map by start_pfn */
32996250 4447void __init sort_node_map(void)
c713216d
MG
4448{
4449 sort(early_node_map, (size_t)nr_nodemap_entries,
4450 sizeof(struct node_active_region),
4451 cmp_node_active_region, NULL);
4452}
4453
a6af2bc3 4454/* Find the lowest pfn for a node */
b69a7288 4455static unsigned long __init find_min_pfn_for_node(int nid)
c713216d
MG
4456{
4457 int i;
a6af2bc3 4458 unsigned long min_pfn = ULONG_MAX;
1abbfb41 4459
c713216d
MG
4460 /* Assuming a sorted map, the first range found has the starting pfn */
4461 for_each_active_range_index_in_nid(i, nid)
a6af2bc3 4462 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
c713216d 4463
a6af2bc3
MG
4464 if (min_pfn == ULONG_MAX) {
4465 printk(KERN_WARNING
2bc0d261 4466 "Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
4467 return 0;
4468 }
4469
4470 return min_pfn;
c713216d
MG
4471}
4472
4473/**
4474 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4475 *
4476 * It returns the minimum PFN based on information provided via
88ca3b94 4477 * add_active_range().
c713216d
MG
4478 */
4479unsigned long __init find_min_pfn_with_active_regions(void)
4480{
4481 return find_min_pfn_for_node(MAX_NUMNODES);
4482}
4483
37b07e41
LS
4484/*
4485 * early_calculate_totalpages()
4486 * Sum pages in active regions for movable zone.
4487 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4488 */
484f51f8 4489static unsigned long __init early_calculate_totalpages(void)
7e63efef
MG
4490{
4491 int i;
4492 unsigned long totalpages = 0;
4493
37b07e41
LS
4494 for (i = 0; i < nr_nodemap_entries; i++) {
4495 unsigned long pages = early_node_map[i].end_pfn -
7e63efef 4496 early_node_map[i].start_pfn;
37b07e41
LS
4497 totalpages += pages;
4498 if (pages)
4499 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4500 }
4501 return totalpages;
7e63efef
MG
4502}
4503
2a1e274a
MG
4504/*
4505 * Find the PFN the Movable zone begins in each node. Kernel memory
4506 * is spread evenly between nodes as long as the nodes have enough
4507 * memory. When they don't, some nodes will have more kernelcore than
4508 * others
4509 */
b69a7288 4510static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
2a1e274a
MG
4511{
4512 int i, nid;
4513 unsigned long usable_startpfn;
4514 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd
YL
4515 /* save the state before borrow the nodemask */
4516 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
37b07e41
LS
4517 unsigned long totalpages = early_calculate_totalpages();
4518 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
2a1e274a 4519
7e63efef
MG
4520 /*
4521 * If movablecore was specified, calculate what size of
4522 * kernelcore that corresponds so that memory usable for
4523 * any allocation type is evenly spread. If both kernelcore
4524 * and movablecore are specified, then the value of kernelcore
4525 * will be used for required_kernelcore if it's greater than
4526 * what movablecore would have allowed.
4527 */
4528 if (required_movablecore) {
7e63efef
MG
4529 unsigned long corepages;
4530
4531 /*
4532 * Round-up so that ZONE_MOVABLE is at least as large as what
4533 * was requested by the user
4534 */
4535 required_movablecore =
4536 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4537 corepages = totalpages - required_movablecore;
4538
4539 required_kernelcore = max(required_kernelcore, corepages);
4540 }
4541
2a1e274a
MG
4542 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4543 if (!required_kernelcore)
66918dcd 4544 goto out;
2a1e274a
MG
4545
4546 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4547 find_usable_zone_for_movable();
4548 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4549
4550restart:
4551 /* Spread kernelcore memory as evenly as possible throughout nodes */
4552 kernelcore_node = required_kernelcore / usable_nodes;
37b07e41 4553 for_each_node_state(nid, N_HIGH_MEMORY) {
2a1e274a
MG
4554 /*
4555 * Recalculate kernelcore_node if the division per node
4556 * now exceeds what is necessary to satisfy the requested
4557 * amount of memory for the kernel
4558 */
4559 if (required_kernelcore < kernelcore_node)
4560 kernelcore_node = required_kernelcore / usable_nodes;
4561
4562 /*
4563 * As the map is walked, we track how much memory is usable
4564 * by the kernel using kernelcore_remaining. When it is
4565 * 0, the rest of the node is usable by ZONE_MOVABLE
4566 */
4567 kernelcore_remaining = kernelcore_node;
4568
4569 /* Go through each range of PFNs within this node */
4570 for_each_active_range_index_in_nid(i, nid) {
4571 unsigned long start_pfn, end_pfn;
4572 unsigned long size_pages;
4573
4574 start_pfn = max(early_node_map[i].start_pfn,
4575 zone_movable_pfn[nid]);
4576 end_pfn = early_node_map[i].end_pfn;
4577 if (start_pfn >= end_pfn)
4578 continue;
4579
4580 /* Account for what is only usable for kernelcore */
4581 if (start_pfn < usable_startpfn) {
4582 unsigned long kernel_pages;
4583 kernel_pages = min(end_pfn, usable_startpfn)
4584 - start_pfn;
4585
4586 kernelcore_remaining -= min(kernel_pages,
4587 kernelcore_remaining);
4588 required_kernelcore -= min(kernel_pages,
4589 required_kernelcore);
4590
4591 /* Continue if range is now fully accounted */
4592 if (end_pfn <= usable_startpfn) {
4593
4594 /*
4595 * Push zone_movable_pfn to the end so
4596 * that if we have to rebalance
4597 * kernelcore across nodes, we will
4598 * not double account here
4599 */
4600 zone_movable_pfn[nid] = end_pfn;
4601 continue;
4602 }
4603 start_pfn = usable_startpfn;
4604 }
4605
4606 /*
4607 * The usable PFN range for ZONE_MOVABLE is from
4608 * start_pfn->end_pfn. Calculate size_pages as the
4609 * number of pages used as kernelcore
4610 */
4611 size_pages = end_pfn - start_pfn;
4612 if (size_pages > kernelcore_remaining)
4613 size_pages = kernelcore_remaining;
4614 zone_movable_pfn[nid] = start_pfn + size_pages;
4615
4616 /*
4617 * Some kernelcore has been met, update counts and
4618 * break if the kernelcore for this node has been
4619 * satisified
4620 */
4621 required_kernelcore -= min(required_kernelcore,
4622 size_pages);
4623 kernelcore_remaining -= size_pages;
4624 if (!kernelcore_remaining)
4625 break;
4626 }
4627 }
4628
4629 /*
4630 * If there is still required_kernelcore, we do another pass with one
4631 * less node in the count. This will push zone_movable_pfn[nid] further
4632 * along on the nodes that still have memory until kernelcore is
4633 * satisified
4634 */
4635 usable_nodes--;
4636 if (usable_nodes && required_kernelcore > usable_nodes)
4637 goto restart;
4638
4639 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4640 for (nid = 0; nid < MAX_NUMNODES; nid++)
4641 zone_movable_pfn[nid] =
4642 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd
YL
4643
4644out:
4645 /* restore the node_state */
4646 node_states[N_HIGH_MEMORY] = saved_node_state;
2a1e274a
MG
4647}
4648
37b07e41
LS
4649/* Any regular memory on that node ? */
4650static void check_for_regular_memory(pg_data_t *pgdat)
4651{
4652#ifdef CONFIG_HIGHMEM
4653 enum zone_type zone_type;
4654
4655 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4656 struct zone *zone = &pgdat->node_zones[zone_type];
4657 if (zone->present_pages)
4658 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4659 }
4660#endif
4661}
4662
c713216d
MG
4663/**
4664 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 4665 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
4666 *
4667 * This will call free_area_init_node() for each active node in the system.
4668 * Using the page ranges provided by add_active_range(), the size of each
4669 * zone in each node and their holes is calculated. If the maximum PFN
4670 * between two adjacent zones match, it is assumed that the zone is empty.
4671 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4672 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4673 * starts where the previous one ended. For example, ZONE_DMA32 starts
4674 * at arch_max_dma_pfn.
4675 */
4676void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4677{
4678 unsigned long nid;
db99100d 4679 int i;
c713216d 4680
a6af2bc3
MG
4681 /* Sort early_node_map as initialisation assumes it is sorted */
4682 sort_node_map();
4683
c713216d
MG
4684 /* Record where the zone boundaries are */
4685 memset(arch_zone_lowest_possible_pfn, 0,
4686 sizeof(arch_zone_lowest_possible_pfn));
4687 memset(arch_zone_highest_possible_pfn, 0,
4688 sizeof(arch_zone_highest_possible_pfn));
4689 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4690 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4691 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
4692 if (i == ZONE_MOVABLE)
4693 continue;
c713216d
MG
4694 arch_zone_lowest_possible_pfn[i] =
4695 arch_zone_highest_possible_pfn[i-1];
4696 arch_zone_highest_possible_pfn[i] =
4697 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4698 }
2a1e274a
MG
4699 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4700 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4701
4702 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4703 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4704 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
c713216d 4705
c713216d
MG
4706 /* Print out the zone ranges */
4707 printk("Zone PFN ranges:\n");
2a1e274a
MG
4708 for (i = 0; i < MAX_NR_ZONES; i++) {
4709 if (i == ZONE_MOVABLE)
4710 continue;
72f0ba02
DR
4711 printk(" %-8s ", zone_names[i]);
4712 if (arch_zone_lowest_possible_pfn[i] ==
4713 arch_zone_highest_possible_pfn[i])
4714 printk("empty\n");
4715 else
4716 printk("%0#10lx -> %0#10lx\n",
c713216d
MG
4717 arch_zone_lowest_possible_pfn[i],
4718 arch_zone_highest_possible_pfn[i]);
2a1e274a
MG
4719 }
4720
4721 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4722 printk("Movable zone start PFN for each node\n");
4723 for (i = 0; i < MAX_NUMNODES; i++) {
4724 if (zone_movable_pfn[i])
4725 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4726 }
c713216d
MG
4727
4728 /* Print out the early_node_map[] */
4729 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4730 for (i = 0; i < nr_nodemap_entries; i++)
5dab8ec1 4731 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
c713216d
MG
4732 early_node_map[i].start_pfn,
4733 early_node_map[i].end_pfn);
4734
4735 /* Initialise every node */
708614e6 4736 mminit_verify_pageflags_layout();
8ef82866 4737 setup_nr_node_ids();
c713216d
MG
4738 for_each_online_node(nid) {
4739 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 4740 free_area_init_node(nid, NULL,
c713216d 4741 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
4742
4743 /* Any memory on that node */
4744 if (pgdat->node_present_pages)
4745 node_set_state(nid, N_HIGH_MEMORY);
4746 check_for_regular_memory(pgdat);
c713216d
MG
4747 }
4748}
2a1e274a 4749
7e63efef 4750static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
4751{
4752 unsigned long long coremem;
4753 if (!p)
4754 return -EINVAL;
4755
4756 coremem = memparse(p, &p);
7e63efef 4757 *core = coremem >> PAGE_SHIFT;
2a1e274a 4758
7e63efef 4759 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
4760 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4761
4762 return 0;
4763}
ed7ed365 4764
7e63efef
MG
4765/*
4766 * kernelcore=size sets the amount of memory for use for allocations that
4767 * cannot be reclaimed or migrated.
4768 */
4769static int __init cmdline_parse_kernelcore(char *p)
4770{
4771 return cmdline_parse_core(p, &required_kernelcore);
4772}
4773
4774/*
4775 * movablecore=size sets the amount of memory for use for allocations that
4776 * can be reclaimed or migrated.
4777 */
4778static int __init cmdline_parse_movablecore(char *p)
4779{
4780 return cmdline_parse_core(p, &required_movablecore);
4781}
4782
ed7ed365 4783early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 4784early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 4785
c713216d
MG
4786#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4787
0e0b864e 4788/**
88ca3b94
RD
4789 * set_dma_reserve - set the specified number of pages reserved in the first zone
4790 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
4791 *
4792 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4793 * In the DMA zone, a significant percentage may be consumed by kernel image
4794 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
4795 * function may optionally be used to account for unfreeable pages in the
4796 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4797 * smaller per-cpu batchsize.
0e0b864e
MG
4798 */
4799void __init set_dma_reserve(unsigned long new_dma_reserve)
4800{
4801 dma_reserve = new_dma_reserve;
4802}
4803
93b7504e 4804#ifndef CONFIG_NEED_MULTIPLE_NODES
08677214
YL
4805struct pglist_data __refdata contig_page_data = {
4806#ifndef CONFIG_NO_BOOTMEM
4807 .bdata = &bootmem_node_data[0]
4808#endif
4809 };
1da177e4 4810EXPORT_SYMBOL(contig_page_data);
93b7504e 4811#endif
1da177e4
LT
4812
4813void __init free_area_init(unsigned long *zones_size)
4814{
9109fb7b 4815 free_area_init_node(0, zones_size,
1da177e4
LT
4816 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4817}
1da177e4 4818
1da177e4
LT
4819static int page_alloc_cpu_notify(struct notifier_block *self,
4820 unsigned long action, void *hcpu)
4821{
4822 int cpu = (unsigned long)hcpu;
1da177e4 4823
8bb78442 4824 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
9f8f2172
CL
4825 drain_pages(cpu);
4826
4827 /*
4828 * Spill the event counters of the dead processor
4829 * into the current processors event counters.
4830 * This artificially elevates the count of the current
4831 * processor.
4832 */
f8891e5e 4833 vm_events_fold_cpu(cpu);
9f8f2172
CL
4834
4835 /*
4836 * Zero the differential counters of the dead processor
4837 * so that the vm statistics are consistent.
4838 *
4839 * This is only okay since the processor is dead and cannot
4840 * race with what we are doing.
4841 */
2244b95a 4842 refresh_cpu_vm_stats(cpu);
1da177e4
LT
4843 }
4844 return NOTIFY_OK;
4845}
1da177e4
LT
4846
4847void __init page_alloc_init(void)
4848{
4849 hotcpu_notifier(page_alloc_cpu_notify, 0);
4850}
4851
cb45b0e9
HA
4852/*
4853 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4854 * or min_free_kbytes changes.
4855 */
4856static void calculate_totalreserve_pages(void)
4857{
4858 struct pglist_data *pgdat;
4859 unsigned long reserve_pages = 0;
2f6726e5 4860 enum zone_type i, j;
cb45b0e9
HA
4861
4862 for_each_online_pgdat(pgdat) {
4863 for (i = 0; i < MAX_NR_ZONES; i++) {
4864 struct zone *zone = pgdat->node_zones + i;
4865 unsigned long max = 0;
4866
4867 /* Find valid and maximum lowmem_reserve in the zone */
4868 for (j = i; j < MAX_NR_ZONES; j++) {
4869 if (zone->lowmem_reserve[j] > max)
4870 max = zone->lowmem_reserve[j];
4871 }
4872
41858966
MG
4873 /* we treat the high watermark as reserved pages. */
4874 max += high_wmark_pages(zone);
cb45b0e9
HA
4875
4876 if (max > zone->present_pages)
4877 max = zone->present_pages;
4878 reserve_pages += max;
4879 }
4880 }
4881 totalreserve_pages = reserve_pages;
4882}
4883
1da177e4
LT
4884/*
4885 * setup_per_zone_lowmem_reserve - called whenever
4886 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
4887 * has a correct pages reserved value, so an adequate number of
4888 * pages are left in the zone after a successful __alloc_pages().
4889 */
4890static void setup_per_zone_lowmem_reserve(void)
4891{
4892 struct pglist_data *pgdat;
2f6726e5 4893 enum zone_type j, idx;
1da177e4 4894
ec936fc5 4895 for_each_online_pgdat(pgdat) {
1da177e4
LT
4896 for (j = 0; j < MAX_NR_ZONES; j++) {
4897 struct zone *zone = pgdat->node_zones + j;
4898 unsigned long present_pages = zone->present_pages;
4899
4900 zone->lowmem_reserve[j] = 0;
4901
2f6726e5
CL
4902 idx = j;
4903 while (idx) {
1da177e4
LT
4904 struct zone *lower_zone;
4905
2f6726e5
CL
4906 idx--;
4907
1da177e4
LT
4908 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4909 sysctl_lowmem_reserve_ratio[idx] = 1;
4910
4911 lower_zone = pgdat->node_zones + idx;
4912 lower_zone->lowmem_reserve[j] = present_pages /
4913 sysctl_lowmem_reserve_ratio[idx];
4914 present_pages += lower_zone->present_pages;
4915 }
4916 }
4917 }
cb45b0e9
HA
4918
4919 /* update totalreserve_pages */
4920 calculate_totalreserve_pages();
1da177e4
LT
4921}
4922
88ca3b94 4923/**
bc75d33f 4924 * setup_per_zone_wmarks - called when min_free_kbytes changes
bce7394a 4925 * or when memory is hot-{added|removed}
88ca3b94 4926 *
bc75d33f
MK
4927 * Ensures that the watermark[min,low,high] values for each zone are set
4928 * correctly with respect to min_free_kbytes.
1da177e4 4929 */
bc75d33f 4930void setup_per_zone_wmarks(void)
1da177e4
LT
4931{
4932 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4933 unsigned long lowmem_pages = 0;
4934 struct zone *zone;
4935 unsigned long flags;
4936
4937 /* Calculate total number of !ZONE_HIGHMEM pages */
4938 for_each_zone(zone) {
4939 if (!is_highmem(zone))
4940 lowmem_pages += zone->present_pages;
4941 }
4942
4943 for_each_zone(zone) {
ac924c60
AM
4944 u64 tmp;
4945
1125b4e3 4946 spin_lock_irqsave(&zone->lock, flags);
ac924c60
AM
4947 tmp = (u64)pages_min * zone->present_pages;
4948 do_div(tmp, lowmem_pages);
1da177e4
LT
4949 if (is_highmem(zone)) {
4950 /*
669ed175
NP
4951 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4952 * need highmem pages, so cap pages_min to a small
4953 * value here.
4954 *
41858966 4955 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
669ed175
NP
4956 * deltas controls asynch page reclaim, and so should
4957 * not be capped for highmem.
1da177e4
LT
4958 */
4959 int min_pages;
4960
4961 min_pages = zone->present_pages / 1024;
4962 if (min_pages < SWAP_CLUSTER_MAX)
4963 min_pages = SWAP_CLUSTER_MAX;
4964 if (min_pages > 128)
4965 min_pages = 128;
41858966 4966 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 4967 } else {
669ed175
NP
4968 /*
4969 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
4970 * proportionate to the zone's size.
4971 */
41858966 4972 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
4973 }
4974
41858966
MG
4975 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
4976 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
56fd56b8 4977 setup_zone_migrate_reserve(zone);
1125b4e3 4978 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 4979 }
cb45b0e9
HA
4980
4981 /* update totalreserve_pages */
4982 calculate_totalreserve_pages();
1da177e4
LT
4983}
4984
55a4462a 4985/*
556adecb
RR
4986 * The inactive anon list should be small enough that the VM never has to
4987 * do too much work, but large enough that each inactive page has a chance
4988 * to be referenced again before it is swapped out.
4989 *
4990 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4991 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4992 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4993 * the anonymous pages are kept on the inactive list.
4994 *
4995 * total target max
4996 * memory ratio inactive anon
4997 * -------------------------------------
4998 * 10MB 1 5MB
4999 * 100MB 1 50MB
5000 * 1GB 3 250MB
5001 * 10GB 10 0.9GB
5002 * 100GB 31 3GB
5003 * 1TB 101 10GB
5004 * 10TB 320 32GB
5005 */
96cb4df5 5006void calculate_zone_inactive_ratio(struct zone *zone)
556adecb 5007{
96cb4df5 5008 unsigned int gb, ratio;
556adecb 5009
96cb4df5
MK
5010 /* Zone size in gigabytes */
5011 gb = zone->present_pages >> (30 - PAGE_SHIFT);
5012 if (gb)
556adecb 5013 ratio = int_sqrt(10 * gb);
96cb4df5
MK
5014 else
5015 ratio = 1;
556adecb 5016
96cb4df5
MK
5017 zone->inactive_ratio = ratio;
5018}
556adecb 5019
96cb4df5
MK
5020static void __init setup_per_zone_inactive_ratio(void)
5021{
5022 struct zone *zone;
5023
5024 for_each_zone(zone)
5025 calculate_zone_inactive_ratio(zone);
556adecb
RR
5026}
5027
1da177e4
LT
5028/*
5029 * Initialise min_free_kbytes.
5030 *
5031 * For small machines we want it small (128k min). For large machines
5032 * we want it large (64MB max). But it is not linear, because network
5033 * bandwidth does not increase linearly with machine size. We use
5034 *
5035 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5036 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5037 *
5038 * which yields
5039 *
5040 * 16MB: 512k
5041 * 32MB: 724k
5042 * 64MB: 1024k
5043 * 128MB: 1448k
5044 * 256MB: 2048k
5045 * 512MB: 2896k
5046 * 1024MB: 4096k
5047 * 2048MB: 5792k
5048 * 4096MB: 8192k
5049 * 8192MB: 11584k
5050 * 16384MB: 16384k
5051 */
bc75d33f 5052static int __init init_per_zone_wmark_min(void)
1da177e4
LT
5053{
5054 unsigned long lowmem_kbytes;
5055
5056 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5057
5058 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5059 if (min_free_kbytes < 128)
5060 min_free_kbytes = 128;
5061 if (min_free_kbytes > 65536)
5062 min_free_kbytes = 65536;
bc75d33f 5063 setup_per_zone_wmarks();
1da177e4 5064 setup_per_zone_lowmem_reserve();
556adecb 5065 setup_per_zone_inactive_ratio();
1da177e4
LT
5066 return 0;
5067}
bc75d33f 5068module_init(init_per_zone_wmark_min)
1da177e4
LT
5069
5070/*
5071 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5072 * that we can call two helper functions whenever min_free_kbytes
5073 * changes.
5074 */
5075int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
8d65af78 5076 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5077{
8d65af78 5078 proc_dointvec(table, write, buffer, length, ppos);
3b1d92c5 5079 if (write)
bc75d33f 5080 setup_per_zone_wmarks();
1da177e4
LT
5081 return 0;
5082}
5083
9614634f
CL
5084#ifdef CONFIG_NUMA
5085int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5086 void __user *buffer, size_t *length, loff_t *ppos)
9614634f
CL
5087{
5088 struct zone *zone;
5089 int rc;
5090
8d65af78 5091 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
9614634f
CL
5092 if (rc)
5093 return rc;
5094
5095 for_each_zone(zone)
8417bba4 5096 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
5097 sysctl_min_unmapped_ratio) / 100;
5098 return 0;
5099}
0ff38490
CL
5100
5101int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5102 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490
CL
5103{
5104 struct zone *zone;
5105 int rc;
5106
8d65af78 5107 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
5108 if (rc)
5109 return rc;
5110
5111 for_each_zone(zone)
5112 zone->min_slab_pages = (zone->present_pages *
5113 sysctl_min_slab_ratio) / 100;
5114 return 0;
5115}
9614634f
CL
5116#endif
5117
1da177e4
LT
5118/*
5119 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5120 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5121 * whenever sysctl_lowmem_reserve_ratio changes.
5122 *
5123 * The reserve ratio obviously has absolutely no relation with the
41858966 5124 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
5125 * if in function of the boot time zone sizes.
5126 */
5127int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5128 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5129{
8d65af78 5130 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
5131 setup_per_zone_lowmem_reserve();
5132 return 0;
5133}
5134
8ad4b1fb
RS
5135/*
5136 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5137 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
5138 * can have before it gets flushed back to buddy allocator.
5139 */
5140
5141int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
8d65af78 5142 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
5143{
5144 struct zone *zone;
5145 unsigned int cpu;
5146 int ret;
5147
8d65af78 5148 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8ad4b1fb
RS
5149 if (!write || (ret == -EINVAL))
5150 return ret;
364df0eb 5151 for_each_populated_zone(zone) {
99dcc3e5 5152 for_each_possible_cpu(cpu) {
8ad4b1fb
RS
5153 unsigned long high;
5154 high = zone->present_pages / percpu_pagelist_fraction;
99dcc3e5
CL
5155 setup_pagelist_highmark(
5156 per_cpu_ptr(zone->pageset, cpu), high);
8ad4b1fb
RS
5157 }
5158 }
5159 return 0;
5160}
5161
f034b5d4 5162int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
5163
5164#ifdef CONFIG_NUMA
5165static int __init set_hashdist(char *str)
5166{
5167 if (!str)
5168 return 0;
5169 hashdist = simple_strtoul(str, &str, 0);
5170 return 1;
5171}
5172__setup("hashdist=", set_hashdist);
5173#endif
5174
5175/*
5176 * allocate a large system hash table from bootmem
5177 * - it is assumed that the hash table must contain an exact power-of-2
5178 * quantity of entries
5179 * - limit is the number of hash buckets, not the total allocation size
5180 */
5181void *__init alloc_large_system_hash(const char *tablename,
5182 unsigned long bucketsize,
5183 unsigned long numentries,
5184 int scale,
5185 int flags,
5186 unsigned int *_hash_shift,
5187 unsigned int *_hash_mask,
5188 unsigned long limit)
5189{
5190 unsigned long long max = limit;
5191 unsigned long log2qty, size;
5192 void *table = NULL;
5193
5194 /* allow the kernel cmdline to have a say */
5195 if (!numentries) {
5196 /* round applicable memory size up to nearest megabyte */
04903664 5197 numentries = nr_kernel_pages;
1da177e4
LT
5198 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5199 numentries >>= 20 - PAGE_SHIFT;
5200 numentries <<= 20 - PAGE_SHIFT;
5201
5202 /* limit to 1 bucket per 2^scale bytes of low memory */
5203 if (scale > PAGE_SHIFT)
5204 numentries >>= (scale - PAGE_SHIFT);
5205 else
5206 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
5207
5208 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
5209 if (unlikely(flags & HASH_SMALL)) {
5210 /* Makes no sense without HASH_EARLY */
5211 WARN_ON(!(flags & HASH_EARLY));
5212 if (!(numentries >> *_hash_shift)) {
5213 numentries = 1UL << *_hash_shift;
5214 BUG_ON(!numentries);
5215 }
5216 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 5217 numentries = PAGE_SIZE / bucketsize;
1da177e4 5218 }
6e692ed3 5219 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
5220
5221 /* limit allocation size to 1/16 total memory by default */
5222 if (max == 0) {
5223 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5224 do_div(max, bucketsize);
5225 }
5226
5227 if (numentries > max)
5228 numentries = max;
5229
f0d1b0b3 5230 log2qty = ilog2(numentries);
1da177e4
LT
5231
5232 do {
5233 size = bucketsize << log2qty;
5234 if (flags & HASH_EARLY)
74768ed8 5235 table = alloc_bootmem_nopanic(size);
1da177e4
LT
5236 else if (hashdist)
5237 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5238 else {
1037b83b
ED
5239 /*
5240 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
5241 * some pages at the end of hash table which
5242 * alloc_pages_exact() automatically does
1037b83b 5243 */
264ef8a9 5244 if (get_order(size) < MAX_ORDER) {
a1dd268c 5245 table = alloc_pages_exact(size, GFP_ATOMIC);
264ef8a9
CM
5246 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5247 }
1da177e4
LT
5248 }
5249 } while (!table && size > PAGE_SIZE && --log2qty);
5250
5251 if (!table)
5252 panic("Failed to allocate %s hash table\n", tablename);
5253
f241e660 5254 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
1da177e4 5255 tablename,
f241e660 5256 (1UL << log2qty),
f0d1b0b3 5257 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
5258 size);
5259
5260 if (_hash_shift)
5261 *_hash_shift = log2qty;
5262 if (_hash_mask)
5263 *_hash_mask = (1 << log2qty) - 1;
5264
5265 return table;
5266}
a117e66e 5267
835c134e
MG
5268/* Return a pointer to the bitmap storing bits affecting a block of pages */
5269static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5270 unsigned long pfn)
5271{
5272#ifdef CONFIG_SPARSEMEM
5273 return __pfn_to_section(pfn)->pageblock_flags;
5274#else
5275 return zone->pageblock_flags;
5276#endif /* CONFIG_SPARSEMEM */
5277}
5278
5279static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5280{
5281#ifdef CONFIG_SPARSEMEM
5282 pfn &= (PAGES_PER_SECTION-1);
d9c23400 5283 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5284#else
5285 pfn = pfn - zone->zone_start_pfn;
d9c23400 5286 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5287#endif /* CONFIG_SPARSEMEM */
5288}
5289
5290/**
d9c23400 5291 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e
MG
5292 * @page: The page within the block of interest
5293 * @start_bitidx: The first bit of interest to retrieve
5294 * @end_bitidx: The last bit of interest
5295 * returns pageblock_bits flags
5296 */
5297unsigned long get_pageblock_flags_group(struct page *page,
5298 int start_bitidx, int end_bitidx)
5299{
5300 struct zone *zone;
5301 unsigned long *bitmap;
5302 unsigned long pfn, bitidx;
5303 unsigned long flags = 0;
5304 unsigned long value = 1;
5305
5306 zone = page_zone(page);
5307 pfn = page_to_pfn(page);
5308 bitmap = get_pageblock_bitmap(zone, pfn);
5309 bitidx = pfn_to_bitidx(zone, pfn);
5310
5311 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5312 if (test_bit(bitidx + start_bitidx, bitmap))
5313 flags |= value;
6220ec78 5314
835c134e
MG
5315 return flags;
5316}
5317
5318/**
d9c23400 5319 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e
MG
5320 * @page: The page within the block of interest
5321 * @start_bitidx: The first bit of interest
5322 * @end_bitidx: The last bit of interest
5323 * @flags: The flags to set
5324 */
5325void set_pageblock_flags_group(struct page *page, unsigned long flags,
5326 int start_bitidx, int end_bitidx)
5327{
5328 struct zone *zone;
5329 unsigned long *bitmap;
5330 unsigned long pfn, bitidx;
5331 unsigned long value = 1;
5332
5333 zone = page_zone(page);
5334 pfn = page_to_pfn(page);
5335 bitmap = get_pageblock_bitmap(zone, pfn);
5336 bitidx = pfn_to_bitidx(zone, pfn);
86051ca5
KH
5337 VM_BUG_ON(pfn < zone->zone_start_pfn);
5338 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
835c134e
MG
5339
5340 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5341 if (flags & value)
5342 __set_bit(bitidx + start_bitidx, bitmap);
5343 else
5344 __clear_bit(bitidx + start_bitidx, bitmap);
5345}
a5d76b54
KH
5346
5347/*
5348 * This is designed as sub function...plz see page_isolation.c also.
5349 * set/clear page block's type to be ISOLATE.
5350 * page allocater never alloc memory from ISOLATE block.
5351 */
5352
49ac8255
KH
5353static int
5354__count_immobile_pages(struct zone *zone, struct page *page, int count)
5355{
5356 unsigned long pfn, iter, found;
5357 /*
5358 * For avoiding noise data, lru_add_drain_all() should be called
5359 * If ZONE_MOVABLE, the zone never contains immobile pages
5360 */
5361 if (zone_idx(zone) == ZONE_MOVABLE)
5362 return true;
5363
5364 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5365 return true;
5366
5367 pfn = page_to_pfn(page);
5368 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5369 unsigned long check = pfn + iter;
5370
5371 if (!pfn_valid_within(check)) {
5372 iter++;
5373 continue;
5374 }
5375 page = pfn_to_page(check);
5376 if (!page_count(page)) {
5377 if (PageBuddy(page))
5378 iter += (1 << page_order(page)) - 1;
5379 continue;
5380 }
5381 if (!PageLRU(page))
5382 found++;
5383 /*
5384 * If there are RECLAIMABLE pages, we need to check it.
5385 * But now, memory offline itself doesn't call shrink_slab()
5386 * and it still to be fixed.
5387 */
5388 /*
5389 * If the page is not RAM, page_count()should be 0.
5390 * we don't need more check. This is an _used_ not-movable page.
5391 *
5392 * The problematic thing here is PG_reserved pages. PG_reserved
5393 * is set to both of a memory hole page and a _used_ kernel
5394 * page at boot.
5395 */
5396 if (found > count)
5397 return false;
5398 }
5399 return true;
5400}
5401
5402bool is_pageblock_removable_nolock(struct page *page)
5403{
5404 struct zone *zone = page_zone(page);
5405 return __count_immobile_pages(zone, page, 0);
5406}
5407
a5d76b54
KH
5408int set_migratetype_isolate(struct page *page)
5409{
5410 struct zone *zone;
49ac8255 5411 unsigned long flags, pfn;
925cc71e
RJ
5412 struct memory_isolate_notify arg;
5413 int notifier_ret;
a5d76b54 5414 int ret = -EBUSY;
8e7e40d9 5415 int zone_idx;
a5d76b54
KH
5416
5417 zone = page_zone(page);
8e7e40d9 5418 zone_idx = zone_idx(zone);
925cc71e 5419
a5d76b54 5420 spin_lock_irqsave(&zone->lock, flags);
925cc71e
RJ
5421
5422 pfn = page_to_pfn(page);
5423 arg.start_pfn = pfn;
5424 arg.nr_pages = pageblock_nr_pages;
5425 arg.pages_found = 0;
5426
a5d76b54 5427 /*
925cc71e
RJ
5428 * It may be possible to isolate a pageblock even if the
5429 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5430 * notifier chain is used by balloon drivers to return the
5431 * number of pages in a range that are held by the balloon
5432 * driver to shrink memory. If all the pages are accounted for
5433 * by balloons, are free, or on the LRU, isolation can continue.
5434 * Later, for example, when memory hotplug notifier runs, these
5435 * pages reported as "can be isolated" should be isolated(freed)
5436 * by the balloon driver through the memory notifier chain.
a5d76b54 5437 */
925cc71e
RJ
5438 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5439 notifier_ret = notifier_to_errno(notifier_ret);
4b20477f 5440 if (notifier_ret)
a5d76b54 5441 goto out;
49ac8255
KH
5442 /*
5443 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5444 * We just check MOVABLE pages.
5445 */
5446 if (__count_immobile_pages(zone, page, arg.pages_found))
925cc71e
RJ
5447 ret = 0;
5448
49ac8255
KH
5449 /*
5450 * immobile means "not-on-lru" paes. If immobile is larger than
5451 * removable-by-driver pages reported by notifier, we'll fail.
5452 */
5453
a5d76b54 5454out:
925cc71e
RJ
5455 if (!ret) {
5456 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5457 move_freepages_block(zone, page, MIGRATE_ISOLATE);
5458 }
5459
a5d76b54
KH
5460 spin_unlock_irqrestore(&zone->lock, flags);
5461 if (!ret)
9f8f2172 5462 drain_all_pages();
a5d76b54
KH
5463 return ret;
5464}
5465
5466void unset_migratetype_isolate(struct page *page)
5467{
5468 struct zone *zone;
5469 unsigned long flags;
5470 zone = page_zone(page);
5471 spin_lock_irqsave(&zone->lock, flags);
5472 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5473 goto out;
5474 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5475 move_freepages_block(zone, page, MIGRATE_MOVABLE);
5476out:
5477 spin_unlock_irqrestore(&zone->lock, flags);
5478}
0c0e6195
KH
5479
5480#ifdef CONFIG_MEMORY_HOTREMOVE
5481/*
5482 * All pages in the range must be isolated before calling this.
5483 */
5484void
5485__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5486{
5487 struct page *page;
5488 struct zone *zone;
5489 int order, i;
5490 unsigned long pfn;
5491 unsigned long flags;
5492 /* find the first valid pfn */
5493 for (pfn = start_pfn; pfn < end_pfn; pfn++)
5494 if (pfn_valid(pfn))
5495 break;
5496 if (pfn == end_pfn)
5497 return;
5498 zone = page_zone(pfn_to_page(pfn));
5499 spin_lock_irqsave(&zone->lock, flags);
5500 pfn = start_pfn;
5501 while (pfn < end_pfn) {
5502 if (!pfn_valid(pfn)) {
5503 pfn++;
5504 continue;
5505 }
5506 page = pfn_to_page(pfn);
5507 BUG_ON(page_count(page));
5508 BUG_ON(!PageBuddy(page));
5509 order = page_order(page);
5510#ifdef CONFIG_DEBUG_VM
5511 printk(KERN_INFO "remove from free list %lx %d %lx\n",
5512 pfn, 1 << order, end_pfn);
5513#endif
5514 list_del(&page->lru);
5515 rmv_page_order(page);
5516 zone->free_area[order].nr_free--;
5517 __mod_zone_page_state(zone, NR_FREE_PAGES,
5518 - (1UL << order));
5519 for (i = 0; i < (1 << order); i++)
5520 SetPageReserved((page+i));
5521 pfn += (1 << order);
5522 }
5523 spin_unlock_irqrestore(&zone->lock, flags);
5524}
5525#endif
8d22ba1b
WF
5526
5527#ifdef CONFIG_MEMORY_FAILURE
5528bool is_free_buddy_page(struct page *page)
5529{
5530 struct zone *zone = page_zone(page);
5531 unsigned long pfn = page_to_pfn(page);
5532 unsigned long flags;
5533 int order;
5534
5535 spin_lock_irqsave(&zone->lock, flags);
5536 for (order = 0; order < MAX_ORDER; order++) {
5537 struct page *page_head = page - (pfn & ((1 << order) - 1));
5538
5539 if (PageBuddy(page_head) && page_order(page_head) >= order)
5540 break;
5541 }
5542 spin_unlock_irqrestore(&zone->lock, flags);
5543
5544 return order < MAX_ORDER;
5545}
5546#endif
718a3821
WF
5547
5548static struct trace_print_flags pageflag_names[] = {
5549 {1UL << PG_locked, "locked" },
5550 {1UL << PG_error, "error" },
5551 {1UL << PG_referenced, "referenced" },
5552 {1UL << PG_uptodate, "uptodate" },
5553 {1UL << PG_dirty, "dirty" },
5554 {1UL << PG_lru, "lru" },
5555 {1UL << PG_active, "active" },
5556 {1UL << PG_slab, "slab" },
5557 {1UL << PG_owner_priv_1, "owner_priv_1" },
5558 {1UL << PG_arch_1, "arch_1" },
5559 {1UL << PG_reserved, "reserved" },
5560 {1UL << PG_private, "private" },
5561 {1UL << PG_private_2, "private_2" },
5562 {1UL << PG_writeback, "writeback" },
5563#ifdef CONFIG_PAGEFLAGS_EXTENDED
5564 {1UL << PG_head, "head" },
5565 {1UL << PG_tail, "tail" },
5566#else
5567 {1UL << PG_compound, "compound" },
5568#endif
5569 {1UL << PG_swapcache, "swapcache" },
5570 {1UL << PG_mappedtodisk, "mappedtodisk" },
5571 {1UL << PG_reclaim, "reclaim" },
5572 {1UL << PG_buddy, "buddy" },
5573 {1UL << PG_swapbacked, "swapbacked" },
5574 {1UL << PG_unevictable, "unevictable" },
5575#ifdef CONFIG_MMU
5576 {1UL << PG_mlocked, "mlocked" },
5577#endif
5578#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5579 {1UL << PG_uncached, "uncached" },
5580#endif
5581#ifdef CONFIG_MEMORY_FAILURE
5582 {1UL << PG_hwpoison, "hwpoison" },
5583#endif
5584 {-1UL, NULL },
5585};
5586
5587static void dump_page_flags(unsigned long flags)
5588{
5589 const char *delim = "";
5590 unsigned long mask;
5591 int i;
5592
5593 printk(KERN_ALERT "page flags: %#lx(", flags);
5594
5595 /* remove zone id */
5596 flags &= (1UL << NR_PAGEFLAGS) - 1;
5597
5598 for (i = 0; pageflag_names[i].name && flags; i++) {
5599
5600 mask = pageflag_names[i].mask;
5601 if ((flags & mask) != mask)
5602 continue;
5603
5604 flags &= ~mask;
5605 printk("%s%s", delim, pageflag_names[i].name);
5606 delim = "|";
5607 }
5608
5609 /* check for left over flags */
5610 if (flags)
5611 printk("%s%#lx", delim, flags);
5612
5613 printk(")\n");
5614}
5615
5616void dump_page(struct page *page)
5617{
5618 printk(KERN_ALERT
5619 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
4e9f64c4 5620 page, atomic_read(&page->_count), page_mapcount(page),
718a3821
WF
5621 page->mapping, page->index);
5622 dump_page_flags(page->flags);
5623}
This page took 1.029408 seconds and 5 git commands to generate.