sched: remove long deprecated CLONE_STOPPED flag
[deliverable/linux.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/bootmem.h>
edbe7d23 24#include <linux/memblock.h>
1da177e4 25#include <linux/compiler.h>
9f158333 26#include <linux/kernel.h>
b1eeab67 27#include <linux/kmemcheck.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/suspend.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
5a3135c2 33#include <linux/oom.h>
1da177e4
LT
34#include <linux/notifier.h>
35#include <linux/topology.h>
36#include <linux/sysctl.h>
37#include <linux/cpu.h>
38#include <linux/cpuset.h>
bdc8cb98 39#include <linux/memory_hotplug.h>
1da177e4
LT
40#include <linux/nodemask.h>
41#include <linux/vmalloc.h>
4be38e35 42#include <linux/mempolicy.h>
6811378e 43#include <linux/stop_machine.h>
c713216d
MG
44#include <linux/sort.h>
45#include <linux/pfn.h>
3fcfab16 46#include <linux/backing-dev.h>
933e312e 47#include <linux/fault-inject.h>
a5d76b54 48#include <linux/page-isolation.h>
52d4b9ac 49#include <linux/page_cgroup.h>
3ac7fe5a 50#include <linux/debugobjects.h>
dbb1f81c 51#include <linux/kmemleak.h>
925cc71e 52#include <linux/memory.h>
56de7263 53#include <linux/compaction.h>
0d3d062a 54#include <trace/events/kmem.h>
718a3821 55#include <linux/ftrace_event.h>
1da177e4
LT
56
57#include <asm/tlbflush.h>
ac924c60 58#include <asm/div64.h>
1da177e4
LT
59#include "internal.h"
60
72812019
LS
61#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
62DEFINE_PER_CPU(int, numa_node);
63EXPORT_PER_CPU_SYMBOL(numa_node);
64#endif
65
7aac7898
LS
66#ifdef CONFIG_HAVE_MEMORYLESS_NODES
67/*
68 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
69 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
70 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
71 * defined in <linux/topology.h>.
72 */
73DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
74EXPORT_PER_CPU_SYMBOL(_numa_mem_);
75#endif
76
1da177e4 77/*
13808910 78 * Array of node states.
1da177e4 79 */
13808910
CL
80nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
81 [N_POSSIBLE] = NODE_MASK_ALL,
82 [N_ONLINE] = { { [0] = 1UL } },
83#ifndef CONFIG_NUMA
84 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
85#ifdef CONFIG_HIGHMEM
86 [N_HIGH_MEMORY] = { { [0] = 1UL } },
87#endif
88 [N_CPU] = { { [0] = 1UL } },
89#endif /* NUMA */
90};
91EXPORT_SYMBOL(node_states);
92
6c231b7b 93unsigned long totalram_pages __read_mostly;
cb45b0e9 94unsigned long totalreserve_pages __read_mostly;
8ad4b1fb 95int percpu_pagelist_fraction;
dcce284a 96gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 97
452aa699
RW
98#ifdef CONFIG_PM_SLEEP
99/*
100 * The following functions are used by the suspend/hibernate code to temporarily
101 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
102 * while devices are suspended. To avoid races with the suspend/hibernate code,
103 * they should always be called with pm_mutex held (gfp_allowed_mask also should
104 * only be modified with pm_mutex held, unless the suspend/hibernate code is
105 * guaranteed not to run in parallel with that modification).
106 */
c9e664f1
RW
107
108static gfp_t saved_gfp_mask;
109
110void pm_restore_gfp_mask(void)
452aa699
RW
111{
112 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
113 if (saved_gfp_mask) {
114 gfp_allowed_mask = saved_gfp_mask;
115 saved_gfp_mask = 0;
116 }
452aa699
RW
117}
118
c9e664f1 119void pm_restrict_gfp_mask(void)
452aa699 120{
452aa699 121 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
122 WARN_ON(saved_gfp_mask);
123 saved_gfp_mask = gfp_allowed_mask;
124 gfp_allowed_mask &= ~GFP_IOFS;
452aa699
RW
125}
126#endif /* CONFIG_PM_SLEEP */
127
d9c23400
MG
128#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
129int pageblock_order __read_mostly;
130#endif
131
d98c7a09 132static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 133
1da177e4
LT
134/*
135 * results with 256, 32 in the lowmem_reserve sysctl:
136 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
137 * 1G machine -> (16M dma, 784M normal, 224M high)
138 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
139 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
140 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
141 *
142 * TBD: should special case ZONE_DMA32 machines here - in those we normally
143 * don't need any ZONE_NORMAL reservation
1da177e4 144 */
2f1b6248 145int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 146#ifdef CONFIG_ZONE_DMA
2f1b6248 147 256,
4b51d669 148#endif
fb0e7942 149#ifdef CONFIG_ZONE_DMA32
2f1b6248 150 256,
fb0e7942 151#endif
e53ef38d 152#ifdef CONFIG_HIGHMEM
2a1e274a 153 32,
e53ef38d 154#endif
2a1e274a 155 32,
2f1b6248 156};
1da177e4
LT
157
158EXPORT_SYMBOL(totalram_pages);
1da177e4 159
15ad7cdc 160static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 161#ifdef CONFIG_ZONE_DMA
2f1b6248 162 "DMA",
4b51d669 163#endif
fb0e7942 164#ifdef CONFIG_ZONE_DMA32
2f1b6248 165 "DMA32",
fb0e7942 166#endif
2f1b6248 167 "Normal",
e53ef38d 168#ifdef CONFIG_HIGHMEM
2a1e274a 169 "HighMem",
e53ef38d 170#endif
2a1e274a 171 "Movable",
2f1b6248
CL
172};
173
1da177e4
LT
174int min_free_kbytes = 1024;
175
2c85f51d
JB
176static unsigned long __meminitdata nr_kernel_pages;
177static unsigned long __meminitdata nr_all_pages;
a3142c8e 178static unsigned long __meminitdata dma_reserve;
1da177e4 179
c713216d
MG
180#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
181 /*
183ff22b 182 * MAX_ACTIVE_REGIONS determines the maximum number of distinct
c713216d
MG
183 * ranges of memory (RAM) that may be registered with add_active_range().
184 * Ranges passed to add_active_range() will be merged if possible
185 * so the number of times add_active_range() can be called is
186 * related to the number of nodes and the number of holes
187 */
188 #ifdef CONFIG_MAX_ACTIVE_REGIONS
189 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
190 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
191 #else
192 #if MAX_NUMNODES >= 32
193 /* If there can be many nodes, allow up to 50 holes per node */
194 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
195 #else
196 /* By default, allow up to 256 distinct regions */
197 #define MAX_ACTIVE_REGIONS 256
198 #endif
199 #endif
200
98011f56
JB
201 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
202 static int __meminitdata nr_nodemap_entries;
203 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
204 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
b69a7288 205 static unsigned long __initdata required_kernelcore;
484f51f8 206 static unsigned long __initdata required_movablecore;
b69a7288 207 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
2a1e274a
MG
208
209 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
210 int movable_zone;
211 EXPORT_SYMBOL(movable_zone);
c713216d
MG
212#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
213
418508c1
MS
214#if MAX_NUMNODES > 1
215int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 216int nr_online_nodes __read_mostly = 1;
418508c1 217EXPORT_SYMBOL(nr_node_ids);
62bc62a8 218EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
219#endif
220
9ef9acb0
MG
221int page_group_by_mobility_disabled __read_mostly;
222
b2a0ac88
MG
223static void set_pageblock_migratetype(struct page *page, int migratetype)
224{
49255c61
MG
225
226 if (unlikely(page_group_by_mobility_disabled))
227 migratetype = MIGRATE_UNMOVABLE;
228
b2a0ac88
MG
229 set_pageblock_flags_group(page, (unsigned long)migratetype,
230 PB_migrate, PB_migrate_end);
231}
232
7f33d49a
RW
233bool oom_killer_disabled __read_mostly;
234
13e7444b 235#ifdef CONFIG_DEBUG_VM
c6a57e19 236static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 237{
bdc8cb98
DH
238 int ret = 0;
239 unsigned seq;
240 unsigned long pfn = page_to_pfn(page);
c6a57e19 241
bdc8cb98
DH
242 do {
243 seq = zone_span_seqbegin(zone);
244 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
245 ret = 1;
246 else if (pfn < zone->zone_start_pfn)
247 ret = 1;
248 } while (zone_span_seqretry(zone, seq));
249
250 return ret;
c6a57e19
DH
251}
252
253static int page_is_consistent(struct zone *zone, struct page *page)
254{
14e07298 255 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 256 return 0;
1da177e4 257 if (zone != page_zone(page))
c6a57e19
DH
258 return 0;
259
260 return 1;
261}
262/*
263 * Temporary debugging check for pages not lying within a given zone.
264 */
265static int bad_range(struct zone *zone, struct page *page)
266{
267 if (page_outside_zone_boundaries(zone, page))
1da177e4 268 return 1;
c6a57e19
DH
269 if (!page_is_consistent(zone, page))
270 return 1;
271
1da177e4
LT
272 return 0;
273}
13e7444b
NP
274#else
275static inline int bad_range(struct zone *zone, struct page *page)
276{
277 return 0;
278}
279#endif
280
224abf92 281static void bad_page(struct page *page)
1da177e4 282{
d936cf9b
HD
283 static unsigned long resume;
284 static unsigned long nr_shown;
285 static unsigned long nr_unshown;
286
2a7684a2
WF
287 /* Don't complain about poisoned pages */
288 if (PageHWPoison(page)) {
289 __ClearPageBuddy(page);
290 return;
291 }
292
d936cf9b
HD
293 /*
294 * Allow a burst of 60 reports, then keep quiet for that minute;
295 * or allow a steady drip of one report per second.
296 */
297 if (nr_shown == 60) {
298 if (time_before(jiffies, resume)) {
299 nr_unshown++;
300 goto out;
301 }
302 if (nr_unshown) {
1e9e6365
HD
303 printk(KERN_ALERT
304 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
305 nr_unshown);
306 nr_unshown = 0;
307 }
308 nr_shown = 0;
309 }
310 if (nr_shown++ == 0)
311 resume = jiffies + 60 * HZ;
312
1e9e6365 313 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 314 current->comm, page_to_pfn(page));
718a3821 315 dump_page(page);
3dc14741 316
1da177e4 317 dump_stack();
d936cf9b 318out:
8cc3b392
HD
319 /* Leave bad fields for debug, except PageBuddy could make trouble */
320 __ClearPageBuddy(page);
9f158333 321 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
322}
323
1da177e4
LT
324/*
325 * Higher-order pages are called "compound pages". They are structured thusly:
326 *
327 * The first PAGE_SIZE page is called the "head page".
328 *
329 * The remaining PAGE_SIZE pages are called "tail pages".
330 *
331 * All pages have PG_compound set. All pages have their ->private pointing at
332 * the head page (even the head page has this).
333 *
41d78ba5
HD
334 * The first tail page's ->lru.next holds the address of the compound page's
335 * put_page() function. Its ->lru.prev holds the order of allocation.
336 * This usage means that zero-order pages may not be compound.
1da177e4 337 */
d98c7a09
HD
338
339static void free_compound_page(struct page *page)
340{
d85f3385 341 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
342}
343
01ad1c08 344void prep_compound_page(struct page *page, unsigned long order)
18229df5
AW
345{
346 int i;
347 int nr_pages = 1 << order;
348
349 set_compound_page_dtor(page, free_compound_page);
350 set_compound_order(page, order);
351 __SetPageHead(page);
352 for (i = 1; i < nr_pages; i++) {
353 struct page *p = page + i;
354
355 __SetPageTail(p);
356 p->first_page = page;
357 }
358}
359
8cc3b392 360static int destroy_compound_page(struct page *page, unsigned long order)
1da177e4
LT
361{
362 int i;
363 int nr_pages = 1 << order;
8cc3b392 364 int bad = 0;
1da177e4 365
8cc3b392
HD
366 if (unlikely(compound_order(page) != order) ||
367 unlikely(!PageHead(page))) {
224abf92 368 bad_page(page);
8cc3b392
HD
369 bad++;
370 }
1da177e4 371
6d777953 372 __ClearPageHead(page);
8cc3b392 373
18229df5
AW
374 for (i = 1; i < nr_pages; i++) {
375 struct page *p = page + i;
1da177e4 376
e713a21d 377 if (unlikely(!PageTail(p) || (p->first_page != page))) {
224abf92 378 bad_page(page);
8cc3b392
HD
379 bad++;
380 }
d85f3385 381 __ClearPageTail(p);
1da177e4 382 }
8cc3b392
HD
383
384 return bad;
1da177e4 385}
1da177e4 386
17cf4406
NP
387static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
388{
389 int i;
390
6626c5d5
AM
391 /*
392 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
393 * and __GFP_HIGHMEM from hard or soft interrupt context.
394 */
725d704e 395 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
396 for (i = 0; i < (1 << order); i++)
397 clear_highpage(page + i);
398}
399
6aa3001b
AM
400static inline void set_page_order(struct page *page, int order)
401{
4c21e2f2 402 set_page_private(page, order);
676165a8 403 __SetPageBuddy(page);
1da177e4
LT
404}
405
406static inline void rmv_page_order(struct page *page)
407{
676165a8 408 __ClearPageBuddy(page);
4c21e2f2 409 set_page_private(page, 0);
1da177e4
LT
410}
411
412/*
413 * Locate the struct page for both the matching buddy in our
414 * pair (buddy1) and the combined O(n+1) page they form (page).
415 *
416 * 1) Any buddy B1 will have an order O twin B2 which satisfies
417 * the following equation:
418 * B2 = B1 ^ (1 << O)
419 * For example, if the starting buddy (buddy2) is #8 its order
420 * 1 buddy is #10:
421 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
422 *
423 * 2) Any buddy B will have an order O+1 parent P which
424 * satisfies the following equation:
425 * P = B & ~(1 << O)
426 *
d6e05edc 427 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
428 */
429static inline struct page *
430__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
431{
432 unsigned long buddy_idx = page_idx ^ (1 << order);
433
434 return page + (buddy_idx - page_idx);
435}
436
437static inline unsigned long
438__find_combined_index(unsigned long page_idx, unsigned int order)
439{
440 return (page_idx & ~(1 << order));
441}
442
443/*
444 * This function checks whether a page is free && is the buddy
445 * we can do coalesce a page and its buddy if
13e7444b 446 * (a) the buddy is not in a hole &&
676165a8 447 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
448 * (c) a page and its buddy have the same order &&
449 * (d) a page and its buddy are in the same zone.
676165a8
NP
450 *
451 * For recording whether a page is in the buddy system, we use PG_buddy.
452 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 453 *
676165a8 454 * For recording page's order, we use page_private(page).
1da177e4 455 */
cb2b95e1
AW
456static inline int page_is_buddy(struct page *page, struct page *buddy,
457 int order)
1da177e4 458{
14e07298 459 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 460 return 0;
13e7444b 461
cb2b95e1
AW
462 if (page_zone_id(page) != page_zone_id(buddy))
463 return 0;
464
465 if (PageBuddy(buddy) && page_order(buddy) == order) {
a3af9c38 466 VM_BUG_ON(page_count(buddy) != 0);
6aa3001b 467 return 1;
676165a8 468 }
6aa3001b 469 return 0;
1da177e4
LT
470}
471
472/*
473 * Freeing function for a buddy system allocator.
474 *
475 * The concept of a buddy system is to maintain direct-mapped table
476 * (containing bit values) for memory blocks of various "orders".
477 * The bottom level table contains the map for the smallest allocatable
478 * units of memory (here, pages), and each level above it describes
479 * pairs of units from the levels below, hence, "buddies".
480 * At a high level, all that happens here is marking the table entry
481 * at the bottom level available, and propagating the changes upward
482 * as necessary, plus some accounting needed to play nicely with other
483 * parts of the VM system.
484 * At each level, we keep a list of pages, which are heads of continuous
676165a8 485 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 486 * order is recorded in page_private(page) field.
1da177e4
LT
487 * So when we are allocating or freeing one, we can derive the state of the
488 * other. That is, if we allocate a small block, and both were
489 * free, the remainder of the region must be split into blocks.
490 * If a block is freed, and its buddy is also free, then this
491 * triggers coalescing into a block of larger size.
492 *
493 * -- wli
494 */
495
48db57f8 496static inline void __free_one_page(struct page *page,
ed0ae21d
MG
497 struct zone *zone, unsigned int order,
498 int migratetype)
1da177e4
LT
499{
500 unsigned long page_idx;
6dda9d55
CZ
501 unsigned long combined_idx;
502 struct page *buddy;
1da177e4 503
224abf92 504 if (unlikely(PageCompound(page)))
8cc3b392
HD
505 if (unlikely(destroy_compound_page(page, order)))
506 return;
1da177e4 507
ed0ae21d
MG
508 VM_BUG_ON(migratetype == -1);
509
1da177e4
LT
510 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
511
f2260e6b 512 VM_BUG_ON(page_idx & ((1 << order) - 1));
725d704e 513 VM_BUG_ON(bad_range(zone, page));
1da177e4 514
1da177e4 515 while (order < MAX_ORDER-1) {
1da177e4 516 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 517 if (!page_is_buddy(page, buddy, order))
3c82d0ce 518 break;
13e7444b 519
3c82d0ce 520 /* Our buddy is free, merge with it and move up one order. */
1da177e4 521 list_del(&buddy->lru);
b2a0ac88 522 zone->free_area[order].nr_free--;
1da177e4 523 rmv_page_order(buddy);
13e7444b 524 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
525 page = page + (combined_idx - page_idx);
526 page_idx = combined_idx;
527 order++;
528 }
529 set_page_order(page, order);
6dda9d55
CZ
530
531 /*
532 * If this is not the largest possible page, check if the buddy
533 * of the next-highest order is free. If it is, it's possible
534 * that pages are being freed that will coalesce soon. In case,
535 * that is happening, add the free page to the tail of the list
536 * so it's less likely to be used soon and more likely to be merged
537 * as a higher order page
538 */
b7f50cfa 539 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
6dda9d55
CZ
540 struct page *higher_page, *higher_buddy;
541 combined_idx = __find_combined_index(page_idx, order);
542 higher_page = page + combined_idx - page_idx;
543 higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
544 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
545 list_add_tail(&page->lru,
546 &zone->free_area[order].free_list[migratetype]);
547 goto out;
548 }
549 }
550
551 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
552out:
1da177e4
LT
553 zone->free_area[order].nr_free++;
554}
555
092cead6
KM
556/*
557 * free_page_mlock() -- clean up attempts to free and mlocked() page.
558 * Page should not be on lru, so no need to fix that up.
559 * free_pages_check() will verify...
560 */
561static inline void free_page_mlock(struct page *page)
562{
092cead6
KM
563 __dec_zone_page_state(page, NR_MLOCK);
564 __count_vm_event(UNEVICTABLE_MLOCKFREED);
565}
092cead6 566
224abf92 567static inline int free_pages_check(struct page *page)
1da177e4 568{
92be2e33
NP
569 if (unlikely(page_mapcount(page) |
570 (page->mapping != NULL) |
a3af9c38 571 (atomic_read(&page->_count) != 0) |
8cc3b392 572 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
224abf92 573 bad_page(page);
79f4b7bf 574 return 1;
8cc3b392 575 }
79f4b7bf
HD
576 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
577 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
578 return 0;
1da177e4
LT
579}
580
581/*
5f8dcc21 582 * Frees a number of pages from the PCP lists
1da177e4 583 * Assumes all pages on list are in same zone, and of same order.
207f36ee 584 * count is the number of pages to free.
1da177e4
LT
585 *
586 * If the zone was previously in an "all pages pinned" state then look to
587 * see if this freeing clears that state.
588 *
589 * And clear the zone's pages_scanned counter, to hold off the "all pages are
590 * pinned" detection logic.
591 */
5f8dcc21
MG
592static void free_pcppages_bulk(struct zone *zone, int count,
593 struct per_cpu_pages *pcp)
1da177e4 594{
5f8dcc21 595 int migratetype = 0;
a6f9edd6 596 int batch_free = 0;
72853e29 597 int to_free = count;
5f8dcc21 598
c54ad30c 599 spin_lock(&zone->lock);
93e4a89a 600 zone->all_unreclaimable = 0;
1da177e4 601 zone->pages_scanned = 0;
f2260e6b 602
72853e29 603 while (to_free) {
48db57f8 604 struct page *page;
5f8dcc21
MG
605 struct list_head *list;
606
607 /*
a6f9edd6
MG
608 * Remove pages from lists in a round-robin fashion. A
609 * batch_free count is maintained that is incremented when an
610 * empty list is encountered. This is so more pages are freed
611 * off fuller lists instead of spinning excessively around empty
612 * lists
5f8dcc21
MG
613 */
614 do {
a6f9edd6 615 batch_free++;
5f8dcc21
MG
616 if (++migratetype == MIGRATE_PCPTYPES)
617 migratetype = 0;
618 list = &pcp->lists[migratetype];
619 } while (list_empty(list));
48db57f8 620
a6f9edd6
MG
621 do {
622 page = list_entry(list->prev, struct page, lru);
623 /* must delete as __free_one_page list manipulates */
624 list_del(&page->lru);
a7016235
HD
625 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
626 __free_one_page(page, zone, 0, page_private(page));
627 trace_mm_page_pcpu_drain(page, 0, page_private(page));
72853e29 628 } while (--to_free && --batch_free && !list_empty(list));
1da177e4 629 }
72853e29 630 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
c54ad30c 631 spin_unlock(&zone->lock);
1da177e4
LT
632}
633
ed0ae21d
MG
634static void free_one_page(struct zone *zone, struct page *page, int order,
635 int migratetype)
1da177e4 636{
006d22d9 637 spin_lock(&zone->lock);
93e4a89a 638 zone->all_unreclaimable = 0;
006d22d9 639 zone->pages_scanned = 0;
f2260e6b 640
ed0ae21d 641 __free_one_page(page, zone, order, migratetype);
72853e29 642 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
006d22d9 643 spin_unlock(&zone->lock);
48db57f8
NP
644}
645
ec95f53a 646static bool free_pages_prepare(struct page *page, unsigned int order)
48db57f8 647{
1da177e4 648 int i;
8cc3b392 649 int bad = 0;
1da177e4 650
f650316c 651 trace_mm_page_free_direct(page, order);
b1eeab67
VN
652 kmemcheck_free_shadow(page, order);
653
ec95f53a
KM
654 for (i = 0; i < (1 << order); i++) {
655 struct page *pg = page + i;
656
657 if (PageAnon(pg))
658 pg->mapping = NULL;
659 bad += free_pages_check(pg);
660 }
8cc3b392 661 if (bad)
ec95f53a 662 return false;
689bcebf 663
3ac7fe5a 664 if (!PageHighMem(page)) {
9858db50 665 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
3ac7fe5a
TG
666 debug_check_no_obj_freed(page_address(page),
667 PAGE_SIZE << order);
668 }
dafb1367 669 arch_free_page(page, order);
48db57f8 670 kernel_map_pages(page, 1 << order, 0);
dafb1367 671
ec95f53a
KM
672 return true;
673}
674
675static void __free_pages_ok(struct page *page, unsigned int order)
676{
677 unsigned long flags;
678 int wasMlocked = __TestClearPageMlocked(page);
679
680 if (!free_pages_prepare(page, order))
681 return;
682
c54ad30c 683 local_irq_save(flags);
c277331d 684 if (unlikely(wasMlocked))
da456f14 685 free_page_mlock(page);
f8891e5e 686 __count_vm_events(PGFREE, 1 << order);
ed0ae21d
MG
687 free_one_page(page_zone(page), page, order,
688 get_pageblock_migratetype(page));
c54ad30c 689 local_irq_restore(flags);
1da177e4
LT
690}
691
a226f6c8
DH
692/*
693 * permit the bootmem allocator to evade page validation on high-order frees
694 */
af370fb8 695void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
a226f6c8
DH
696{
697 if (order == 0) {
698 __ClearPageReserved(page);
699 set_page_count(page, 0);
7835e98b 700 set_page_refcounted(page);
545b1ea9 701 __free_page(page);
a226f6c8 702 } else {
a226f6c8
DH
703 int loop;
704
545b1ea9 705 prefetchw(page);
a226f6c8
DH
706 for (loop = 0; loop < BITS_PER_LONG; loop++) {
707 struct page *p = &page[loop];
708
545b1ea9
NP
709 if (loop + 1 < BITS_PER_LONG)
710 prefetchw(p + 1);
a226f6c8
DH
711 __ClearPageReserved(p);
712 set_page_count(p, 0);
713 }
714
7835e98b 715 set_page_refcounted(page);
545b1ea9 716 __free_pages(page, order);
a226f6c8
DH
717 }
718}
719
1da177e4
LT
720
721/*
722 * The order of subdivision here is critical for the IO subsystem.
723 * Please do not alter this order without good reasons and regression
724 * testing. Specifically, as large blocks of memory are subdivided,
725 * the order in which smaller blocks are delivered depends on the order
726 * they're subdivided in this function. This is the primary factor
727 * influencing the order in which pages are delivered to the IO
728 * subsystem according to empirical testing, and this is also justified
729 * by considering the behavior of a buddy system containing a single
730 * large block of memory acted on by a series of small allocations.
731 * This behavior is a critical factor in sglist merging's success.
732 *
733 * -- wli
734 */
085cc7d5 735static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
736 int low, int high, struct free_area *area,
737 int migratetype)
1da177e4
LT
738{
739 unsigned long size = 1 << high;
740
741 while (high > low) {
742 area--;
743 high--;
744 size >>= 1;
725d704e 745 VM_BUG_ON(bad_range(zone, &page[size]));
b2a0ac88 746 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
747 area->nr_free++;
748 set_page_order(&page[size], high);
749 }
1da177e4
LT
750}
751
1da177e4
LT
752/*
753 * This page is about to be returned from the page allocator
754 */
2a7684a2 755static inline int check_new_page(struct page *page)
1da177e4 756{
92be2e33
NP
757 if (unlikely(page_mapcount(page) |
758 (page->mapping != NULL) |
a3af9c38 759 (atomic_read(&page->_count) != 0) |
8cc3b392 760 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
224abf92 761 bad_page(page);
689bcebf 762 return 1;
8cc3b392 763 }
2a7684a2
WF
764 return 0;
765}
766
767static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
768{
769 int i;
770
771 for (i = 0; i < (1 << order); i++) {
772 struct page *p = page + i;
773 if (unlikely(check_new_page(p)))
774 return 1;
775 }
689bcebf 776
4c21e2f2 777 set_page_private(page, 0);
7835e98b 778 set_page_refcounted(page);
cc102509
NP
779
780 arch_alloc_page(page, order);
1da177e4 781 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
782
783 if (gfp_flags & __GFP_ZERO)
784 prep_zero_page(page, order, gfp_flags);
785
786 if (order && (gfp_flags & __GFP_COMP))
787 prep_compound_page(page, order);
788
689bcebf 789 return 0;
1da177e4
LT
790}
791
56fd56b8
MG
792/*
793 * Go through the free lists for the given migratetype and remove
794 * the smallest available page from the freelists
795 */
728ec980
MG
796static inline
797struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
798 int migratetype)
799{
800 unsigned int current_order;
801 struct free_area * area;
802 struct page *page;
803
804 /* Find a page of the appropriate size in the preferred list */
805 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
806 area = &(zone->free_area[current_order]);
807 if (list_empty(&area->free_list[migratetype]))
808 continue;
809
810 page = list_entry(area->free_list[migratetype].next,
811 struct page, lru);
812 list_del(&page->lru);
813 rmv_page_order(page);
814 area->nr_free--;
56fd56b8
MG
815 expand(zone, page, order, current_order, area, migratetype);
816 return page;
817 }
818
819 return NULL;
820}
821
822
b2a0ac88
MG
823/*
824 * This array describes the order lists are fallen back to when
825 * the free lists for the desirable migrate type are depleted
826 */
827static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
64c5e135
MG
828 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
829 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
830 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
831 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
b2a0ac88
MG
832};
833
c361be55
MG
834/*
835 * Move the free pages in a range to the free lists of the requested type.
d9c23400 836 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
837 * boundary. If alignment is required, use move_freepages_block()
838 */
b69a7288
AB
839static int move_freepages(struct zone *zone,
840 struct page *start_page, struct page *end_page,
841 int migratetype)
c361be55
MG
842{
843 struct page *page;
844 unsigned long order;
d100313f 845 int pages_moved = 0;
c361be55
MG
846
847#ifndef CONFIG_HOLES_IN_ZONE
848 /*
849 * page_zone is not safe to call in this context when
850 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
851 * anyway as we check zone boundaries in move_freepages_block().
852 * Remove at a later date when no bug reports exist related to
ac0e5b7a 853 * grouping pages by mobility
c361be55
MG
854 */
855 BUG_ON(page_zone(start_page) != page_zone(end_page));
856#endif
857
858 for (page = start_page; page <= end_page;) {
344c790e
AL
859 /* Make sure we are not inadvertently changing nodes */
860 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
861
c361be55
MG
862 if (!pfn_valid_within(page_to_pfn(page))) {
863 page++;
864 continue;
865 }
866
867 if (!PageBuddy(page)) {
868 page++;
869 continue;
870 }
871
872 order = page_order(page);
873 list_del(&page->lru);
874 list_add(&page->lru,
875 &zone->free_area[order].free_list[migratetype]);
876 page += 1 << order;
d100313f 877 pages_moved += 1 << order;
c361be55
MG
878 }
879
d100313f 880 return pages_moved;
c361be55
MG
881}
882
b69a7288
AB
883static int move_freepages_block(struct zone *zone, struct page *page,
884 int migratetype)
c361be55
MG
885{
886 unsigned long start_pfn, end_pfn;
887 struct page *start_page, *end_page;
888
889 start_pfn = page_to_pfn(page);
d9c23400 890 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 891 start_page = pfn_to_page(start_pfn);
d9c23400
MG
892 end_page = start_page + pageblock_nr_pages - 1;
893 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
894
895 /* Do not cross zone boundaries */
896 if (start_pfn < zone->zone_start_pfn)
897 start_page = page;
898 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
899 return 0;
900
901 return move_freepages(zone, start_page, end_page, migratetype);
902}
903
2f66a68f
MG
904static void change_pageblock_range(struct page *pageblock_page,
905 int start_order, int migratetype)
906{
907 int nr_pageblocks = 1 << (start_order - pageblock_order);
908
909 while (nr_pageblocks--) {
910 set_pageblock_migratetype(pageblock_page, migratetype);
911 pageblock_page += pageblock_nr_pages;
912 }
913}
914
b2a0ac88 915/* Remove an element from the buddy allocator from the fallback list */
0ac3a409
MG
916static inline struct page *
917__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
b2a0ac88
MG
918{
919 struct free_area * area;
920 int current_order;
921 struct page *page;
922 int migratetype, i;
923
924 /* Find the largest possible block of pages in the other list */
925 for (current_order = MAX_ORDER-1; current_order >= order;
926 --current_order) {
927 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
928 migratetype = fallbacks[start_migratetype][i];
929
56fd56b8
MG
930 /* MIGRATE_RESERVE handled later if necessary */
931 if (migratetype == MIGRATE_RESERVE)
932 continue;
e010487d 933
b2a0ac88
MG
934 area = &(zone->free_area[current_order]);
935 if (list_empty(&area->free_list[migratetype]))
936 continue;
937
938 page = list_entry(area->free_list[migratetype].next,
939 struct page, lru);
940 area->nr_free--;
941
942 /*
c361be55 943 * If breaking a large block of pages, move all free
46dafbca
MG
944 * pages to the preferred allocation list. If falling
945 * back for a reclaimable kernel allocation, be more
946 * agressive about taking ownership of free pages
b2a0ac88 947 */
d9c23400 948 if (unlikely(current_order >= (pageblock_order >> 1)) ||
dd5d241e
MG
949 start_migratetype == MIGRATE_RECLAIMABLE ||
950 page_group_by_mobility_disabled) {
46dafbca
MG
951 unsigned long pages;
952 pages = move_freepages_block(zone, page,
953 start_migratetype);
954
955 /* Claim the whole block if over half of it is free */
dd5d241e
MG
956 if (pages >= (1 << (pageblock_order-1)) ||
957 page_group_by_mobility_disabled)
46dafbca
MG
958 set_pageblock_migratetype(page,
959 start_migratetype);
960
b2a0ac88 961 migratetype = start_migratetype;
c361be55 962 }
b2a0ac88
MG
963
964 /* Remove the page from the freelists */
965 list_del(&page->lru);
966 rmv_page_order(page);
b2a0ac88 967
2f66a68f
MG
968 /* Take ownership for orders >= pageblock_order */
969 if (current_order >= pageblock_order)
970 change_pageblock_range(page, current_order,
b2a0ac88
MG
971 start_migratetype);
972
973 expand(zone, page, order, current_order, area, migratetype);
e0fff1bd
MG
974
975 trace_mm_page_alloc_extfrag(page, order, current_order,
976 start_migratetype, migratetype);
977
b2a0ac88
MG
978 return page;
979 }
980 }
981
728ec980 982 return NULL;
b2a0ac88
MG
983}
984
56fd56b8 985/*
1da177e4
LT
986 * Do the hard work of removing an element from the buddy allocator.
987 * Call me with the zone->lock already held.
988 */
b2a0ac88
MG
989static struct page *__rmqueue(struct zone *zone, unsigned int order,
990 int migratetype)
1da177e4 991{
1da177e4
LT
992 struct page *page;
993
728ec980 994retry_reserve:
56fd56b8 995 page = __rmqueue_smallest(zone, order, migratetype);
b2a0ac88 996
728ec980 997 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
56fd56b8 998 page = __rmqueue_fallback(zone, order, migratetype);
b2a0ac88 999
728ec980
MG
1000 /*
1001 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1002 * is used because __rmqueue_smallest is an inline function
1003 * and we want just one call site
1004 */
1005 if (!page) {
1006 migratetype = MIGRATE_RESERVE;
1007 goto retry_reserve;
1008 }
1009 }
1010
0d3d062a 1011 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 1012 return page;
1da177e4
LT
1013}
1014
1015/*
1016 * Obtain a specified number of elements from the buddy allocator, all under
1017 * a single hold of the lock, for efficiency. Add them to the supplied list.
1018 * Returns the number of new pages which were placed at *list.
1019 */
1020static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 1021 unsigned long count, struct list_head *list,
e084b2d9 1022 int migratetype, int cold)
1da177e4 1023{
1da177e4 1024 int i;
1da177e4 1025
c54ad30c 1026 spin_lock(&zone->lock);
1da177e4 1027 for (i = 0; i < count; ++i) {
b2a0ac88 1028 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 1029 if (unlikely(page == NULL))
1da177e4 1030 break;
81eabcbe
MG
1031
1032 /*
1033 * Split buddy pages returned by expand() are received here
1034 * in physical page order. The page is added to the callers and
1035 * list and the list head then moves forward. From the callers
1036 * perspective, the linked list is ordered by page number in
1037 * some conditions. This is useful for IO devices that can
1038 * merge IO requests if the physical pages are ordered
1039 * properly.
1040 */
e084b2d9
MG
1041 if (likely(cold == 0))
1042 list_add(&page->lru, list);
1043 else
1044 list_add_tail(&page->lru, list);
535131e6 1045 set_page_private(page, migratetype);
81eabcbe 1046 list = &page->lru;
1da177e4 1047 }
f2260e6b 1048 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
c54ad30c 1049 spin_unlock(&zone->lock);
085cc7d5 1050 return i;
1da177e4
LT
1051}
1052
4ae7c039 1053#ifdef CONFIG_NUMA
8fce4d8e 1054/*
4037d452
CL
1055 * Called from the vmstat counter updater to drain pagesets of this
1056 * currently executing processor on remote nodes after they have
1057 * expired.
1058 *
879336c3
CL
1059 * Note that this function must be called with the thread pinned to
1060 * a single processor.
8fce4d8e 1061 */
4037d452 1062void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 1063{
4ae7c039 1064 unsigned long flags;
4037d452 1065 int to_drain;
4ae7c039 1066
4037d452
CL
1067 local_irq_save(flags);
1068 if (pcp->count >= pcp->batch)
1069 to_drain = pcp->batch;
1070 else
1071 to_drain = pcp->count;
5f8dcc21 1072 free_pcppages_bulk(zone, to_drain, pcp);
4037d452
CL
1073 pcp->count -= to_drain;
1074 local_irq_restore(flags);
4ae7c039
CL
1075}
1076#endif
1077
9f8f2172
CL
1078/*
1079 * Drain pages of the indicated processor.
1080 *
1081 * The processor must either be the current processor and the
1082 * thread pinned to the current processor or a processor that
1083 * is not online.
1084 */
1085static void drain_pages(unsigned int cpu)
1da177e4 1086{
c54ad30c 1087 unsigned long flags;
1da177e4 1088 struct zone *zone;
1da177e4 1089
ee99c71c 1090 for_each_populated_zone(zone) {
1da177e4 1091 struct per_cpu_pageset *pset;
3dfa5721 1092 struct per_cpu_pages *pcp;
1da177e4 1093
99dcc3e5
CL
1094 local_irq_save(flags);
1095 pset = per_cpu_ptr(zone->pageset, cpu);
3dfa5721
CL
1096
1097 pcp = &pset->pcp;
5f8dcc21 1098 free_pcppages_bulk(zone, pcp->count, pcp);
3dfa5721
CL
1099 pcp->count = 0;
1100 local_irq_restore(flags);
1da177e4
LT
1101 }
1102}
1da177e4 1103
9f8f2172
CL
1104/*
1105 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1106 */
1107void drain_local_pages(void *arg)
1108{
1109 drain_pages(smp_processor_id());
1110}
1111
1112/*
1113 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1114 */
1115void drain_all_pages(void)
1116{
15c8b6c1 1117 on_each_cpu(drain_local_pages, NULL, 1);
9f8f2172
CL
1118}
1119
296699de 1120#ifdef CONFIG_HIBERNATION
1da177e4
LT
1121
1122void mark_free_pages(struct zone *zone)
1123{
f623f0db
RW
1124 unsigned long pfn, max_zone_pfn;
1125 unsigned long flags;
b2a0ac88 1126 int order, t;
1da177e4
LT
1127 struct list_head *curr;
1128
1129 if (!zone->spanned_pages)
1130 return;
1131
1132 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
1133
1134 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1135 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1136 if (pfn_valid(pfn)) {
1137 struct page *page = pfn_to_page(pfn);
1138
7be98234
RW
1139 if (!swsusp_page_is_forbidden(page))
1140 swsusp_unset_page_free(page);
f623f0db 1141 }
1da177e4 1142
b2a0ac88
MG
1143 for_each_migratetype_order(order, t) {
1144 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 1145 unsigned long i;
1da177e4 1146
f623f0db
RW
1147 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1148 for (i = 0; i < (1UL << order); i++)
7be98234 1149 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 1150 }
b2a0ac88 1151 }
1da177e4
LT
1152 spin_unlock_irqrestore(&zone->lock, flags);
1153}
e2c55dc8 1154#endif /* CONFIG_PM */
1da177e4 1155
1da177e4
LT
1156/*
1157 * Free a 0-order page
fc91668e 1158 * cold == 1 ? free a cold page : free a hot page
1da177e4 1159 */
fc91668e 1160void free_hot_cold_page(struct page *page, int cold)
1da177e4
LT
1161{
1162 struct zone *zone = page_zone(page);
1163 struct per_cpu_pages *pcp;
1164 unsigned long flags;
5f8dcc21 1165 int migratetype;
451ea25d 1166 int wasMlocked = __TestClearPageMlocked(page);
1da177e4 1167
ec95f53a 1168 if (!free_pages_prepare(page, 0))
689bcebf
HD
1169 return;
1170
5f8dcc21
MG
1171 migratetype = get_pageblock_migratetype(page);
1172 set_page_private(page, migratetype);
1da177e4 1173 local_irq_save(flags);
c277331d 1174 if (unlikely(wasMlocked))
da456f14 1175 free_page_mlock(page);
f8891e5e 1176 __count_vm_event(PGFREE);
da456f14 1177
5f8dcc21
MG
1178 /*
1179 * We only track unmovable, reclaimable and movable on pcp lists.
1180 * Free ISOLATE pages back to the allocator because they are being
1181 * offlined but treat RESERVE as movable pages so we can get those
1182 * areas back if necessary. Otherwise, we may have to free
1183 * excessively into the page allocator
1184 */
1185 if (migratetype >= MIGRATE_PCPTYPES) {
1186 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1187 free_one_page(zone, page, 0, migratetype);
1188 goto out;
1189 }
1190 migratetype = MIGRATE_MOVABLE;
1191 }
1192
99dcc3e5 1193 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3dfa5721 1194 if (cold)
5f8dcc21 1195 list_add_tail(&page->lru, &pcp->lists[migratetype]);
3dfa5721 1196 else
5f8dcc21 1197 list_add(&page->lru, &pcp->lists[migratetype]);
1da177e4 1198 pcp->count++;
48db57f8 1199 if (pcp->count >= pcp->high) {
5f8dcc21 1200 free_pcppages_bulk(zone, pcp->batch, pcp);
48db57f8
NP
1201 pcp->count -= pcp->batch;
1202 }
5f8dcc21
MG
1203
1204out:
1da177e4 1205 local_irq_restore(flags);
1da177e4
LT
1206}
1207
8dfcc9ba
NP
1208/*
1209 * split_page takes a non-compound higher-order page, and splits it into
1210 * n (1<<order) sub-pages: page[0..n]
1211 * Each sub-page must be freed individually.
1212 *
1213 * Note: this is probably too low level an operation for use in drivers.
1214 * Please consult with lkml before using this in your driver.
1215 */
1216void split_page(struct page *page, unsigned int order)
1217{
1218 int i;
1219
725d704e
NP
1220 VM_BUG_ON(PageCompound(page));
1221 VM_BUG_ON(!page_count(page));
b1eeab67
VN
1222
1223#ifdef CONFIG_KMEMCHECK
1224 /*
1225 * Split shadow pages too, because free(page[0]) would
1226 * otherwise free the whole shadow.
1227 */
1228 if (kmemcheck_page_is_tracked(page))
1229 split_page(virt_to_page(page[0].shadow), order);
1230#endif
1231
7835e98b
NP
1232 for (i = 1; i < (1 << order); i++)
1233 set_page_refcounted(page + i);
8dfcc9ba 1234}
8dfcc9ba 1235
748446bb
MG
1236/*
1237 * Similar to split_page except the page is already free. As this is only
1238 * being used for migration, the migratetype of the block also changes.
1239 * As this is called with interrupts disabled, the caller is responsible
1240 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1241 * are enabled.
1242 *
1243 * Note: this is probably too low level an operation for use in drivers.
1244 * Please consult with lkml before using this in your driver.
1245 */
1246int split_free_page(struct page *page)
1247{
1248 unsigned int order;
1249 unsigned long watermark;
1250 struct zone *zone;
1251
1252 BUG_ON(!PageBuddy(page));
1253
1254 zone = page_zone(page);
1255 order = page_order(page);
1256
1257 /* Obey watermarks as if the page was being allocated */
1258 watermark = low_wmark_pages(zone) + (1 << order);
1259 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1260 return 0;
1261
1262 /* Remove page from free list */
1263 list_del(&page->lru);
1264 zone->free_area[order].nr_free--;
1265 rmv_page_order(page);
1266 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1267
1268 /* Split into individual pages */
1269 set_page_refcounted(page);
1270 split_page(page, order);
1271
1272 if (order >= pageblock_order - 1) {
1273 struct page *endpage = page + (1 << order) - 1;
1274 for (; page < endpage; page += pageblock_nr_pages)
1275 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1276 }
1277
1278 return 1 << order;
1279}
1280
1da177e4
LT
1281/*
1282 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1283 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1284 * or two.
1285 */
0a15c3e9
MG
1286static inline
1287struct page *buffered_rmqueue(struct zone *preferred_zone,
3dd28266
MG
1288 struct zone *zone, int order, gfp_t gfp_flags,
1289 int migratetype)
1da177e4
LT
1290{
1291 unsigned long flags;
689bcebf 1292 struct page *page;
1da177e4
LT
1293 int cold = !!(gfp_flags & __GFP_COLD);
1294
689bcebf 1295again:
48db57f8 1296 if (likely(order == 0)) {
1da177e4 1297 struct per_cpu_pages *pcp;
5f8dcc21 1298 struct list_head *list;
1da177e4 1299
1da177e4 1300 local_irq_save(flags);
99dcc3e5
CL
1301 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1302 list = &pcp->lists[migratetype];
5f8dcc21 1303 if (list_empty(list)) {
535131e6 1304 pcp->count += rmqueue_bulk(zone, 0,
5f8dcc21 1305 pcp->batch, list,
e084b2d9 1306 migratetype, cold);
5f8dcc21 1307 if (unlikely(list_empty(list)))
6fb332fa 1308 goto failed;
535131e6 1309 }
b92a6edd 1310
5f8dcc21
MG
1311 if (cold)
1312 page = list_entry(list->prev, struct page, lru);
1313 else
1314 page = list_entry(list->next, struct page, lru);
1315
b92a6edd
MG
1316 list_del(&page->lru);
1317 pcp->count--;
7fb1d9fc 1318 } else {
dab48dab
AM
1319 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1320 /*
1321 * __GFP_NOFAIL is not to be used in new code.
1322 *
1323 * All __GFP_NOFAIL callers should be fixed so that they
1324 * properly detect and handle allocation failures.
1325 *
1326 * We most definitely don't want callers attempting to
4923abf9 1327 * allocate greater than order-1 page units with
dab48dab
AM
1328 * __GFP_NOFAIL.
1329 */
4923abf9 1330 WARN_ON_ONCE(order > 1);
dab48dab 1331 }
1da177e4 1332 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 1333 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
1334 spin_unlock(&zone->lock);
1335 if (!page)
1336 goto failed;
6ccf80eb 1337 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1da177e4
LT
1338 }
1339
f8891e5e 1340 __count_zone_vm_events(PGALLOC, zone, 1 << order);
18ea7e71 1341 zone_statistics(preferred_zone, zone);
a74609fa 1342 local_irq_restore(flags);
1da177e4 1343
725d704e 1344 VM_BUG_ON(bad_range(zone, page));
17cf4406 1345 if (prep_new_page(page, order, gfp_flags))
a74609fa 1346 goto again;
1da177e4 1347 return page;
a74609fa
NP
1348
1349failed:
1350 local_irq_restore(flags);
a74609fa 1351 return NULL;
1da177e4
LT
1352}
1353
41858966
MG
1354/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1355#define ALLOC_WMARK_MIN WMARK_MIN
1356#define ALLOC_WMARK_LOW WMARK_LOW
1357#define ALLOC_WMARK_HIGH WMARK_HIGH
1358#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1359
1360/* Mask to get the watermark bits */
1361#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1362
3148890b
NP
1363#define ALLOC_HARDER 0x10 /* try to alloc harder */
1364#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1365#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 1366
933e312e
AM
1367#ifdef CONFIG_FAIL_PAGE_ALLOC
1368
1369static struct fail_page_alloc_attr {
1370 struct fault_attr attr;
1371
1372 u32 ignore_gfp_highmem;
1373 u32 ignore_gfp_wait;
54114994 1374 u32 min_order;
933e312e
AM
1375
1376#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1377
1378 struct dentry *ignore_gfp_highmem_file;
1379 struct dentry *ignore_gfp_wait_file;
54114994 1380 struct dentry *min_order_file;
933e312e
AM
1381
1382#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1383
1384} fail_page_alloc = {
1385 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1386 .ignore_gfp_wait = 1,
1387 .ignore_gfp_highmem = 1,
54114994 1388 .min_order = 1,
933e312e
AM
1389};
1390
1391static int __init setup_fail_page_alloc(char *str)
1392{
1393 return setup_fault_attr(&fail_page_alloc.attr, str);
1394}
1395__setup("fail_page_alloc=", setup_fail_page_alloc);
1396
1397static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1398{
54114994
AM
1399 if (order < fail_page_alloc.min_order)
1400 return 0;
933e312e
AM
1401 if (gfp_mask & __GFP_NOFAIL)
1402 return 0;
1403 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1404 return 0;
1405 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1406 return 0;
1407
1408 return should_fail(&fail_page_alloc.attr, 1 << order);
1409}
1410
1411#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1412
1413static int __init fail_page_alloc_debugfs(void)
1414{
1415 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1416 struct dentry *dir;
1417 int err;
1418
1419 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1420 "fail_page_alloc");
1421 if (err)
1422 return err;
1423 dir = fail_page_alloc.attr.dentries.dir;
1424
1425 fail_page_alloc.ignore_gfp_wait_file =
1426 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1427 &fail_page_alloc.ignore_gfp_wait);
1428
1429 fail_page_alloc.ignore_gfp_highmem_file =
1430 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1431 &fail_page_alloc.ignore_gfp_highmem);
54114994
AM
1432 fail_page_alloc.min_order_file =
1433 debugfs_create_u32("min-order", mode, dir,
1434 &fail_page_alloc.min_order);
933e312e
AM
1435
1436 if (!fail_page_alloc.ignore_gfp_wait_file ||
54114994
AM
1437 !fail_page_alloc.ignore_gfp_highmem_file ||
1438 !fail_page_alloc.min_order_file) {
933e312e
AM
1439 err = -ENOMEM;
1440 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1441 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
54114994 1442 debugfs_remove(fail_page_alloc.min_order_file);
933e312e
AM
1443 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1444 }
1445
1446 return err;
1447}
1448
1449late_initcall(fail_page_alloc_debugfs);
1450
1451#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1452
1453#else /* CONFIG_FAIL_PAGE_ALLOC */
1454
1455static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1456{
1457 return 0;
1458}
1459
1460#endif /* CONFIG_FAIL_PAGE_ALLOC */
1461
1da177e4
LT
1462/*
1463 * Return 1 if free pages are above 'mark'. This takes into account the order
1464 * of the allocation.
1465 */
1466int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
7fb1d9fc 1467 int classzone_idx, int alloc_flags)
1da177e4
LT
1468{
1469 /* free_pages my go negative - that's OK */
d23ad423 1470 long min = mark;
aa454840 1471 long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
1da177e4
LT
1472 int o;
1473
7fb1d9fc 1474 if (alloc_flags & ALLOC_HIGH)
1da177e4 1475 min -= min / 2;
7fb1d9fc 1476 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
1477 min -= min / 4;
1478
1479 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1480 return 0;
1481 for (o = 0; o < order; o++) {
1482 /* At the next order, this order's pages become unavailable */
1483 free_pages -= z->free_area[o].nr_free << o;
1484
1485 /* Require fewer higher order pages to be free */
1486 min >>= 1;
1487
1488 if (free_pages <= min)
1489 return 0;
1490 }
1491 return 1;
1492}
1493
9276b1bc
PJ
1494#ifdef CONFIG_NUMA
1495/*
1496 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1497 * skip over zones that are not allowed by the cpuset, or that have
1498 * been recently (in last second) found to be nearly full. See further
1499 * comments in mmzone.h. Reduces cache footprint of zonelist scans
183ff22b 1500 * that have to skip over a lot of full or unallowed zones.
9276b1bc
PJ
1501 *
1502 * If the zonelist cache is present in the passed in zonelist, then
1503 * returns a pointer to the allowed node mask (either the current
37b07e41 1504 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
9276b1bc
PJ
1505 *
1506 * If the zonelist cache is not available for this zonelist, does
1507 * nothing and returns NULL.
1508 *
1509 * If the fullzones BITMAP in the zonelist cache is stale (more than
1510 * a second since last zap'd) then we zap it out (clear its bits.)
1511 *
1512 * We hold off even calling zlc_setup, until after we've checked the
1513 * first zone in the zonelist, on the theory that most allocations will
1514 * be satisfied from that first zone, so best to examine that zone as
1515 * quickly as we can.
1516 */
1517static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1518{
1519 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1520 nodemask_t *allowednodes; /* zonelist_cache approximation */
1521
1522 zlc = zonelist->zlcache_ptr;
1523 if (!zlc)
1524 return NULL;
1525
f05111f5 1526 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
9276b1bc
PJ
1527 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1528 zlc->last_full_zap = jiffies;
1529 }
1530
1531 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1532 &cpuset_current_mems_allowed :
37b07e41 1533 &node_states[N_HIGH_MEMORY];
9276b1bc
PJ
1534 return allowednodes;
1535}
1536
1537/*
1538 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1539 * if it is worth looking at further for free memory:
1540 * 1) Check that the zone isn't thought to be full (doesn't have its
1541 * bit set in the zonelist_cache fullzones BITMAP).
1542 * 2) Check that the zones node (obtained from the zonelist_cache
1543 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1544 * Return true (non-zero) if zone is worth looking at further, or
1545 * else return false (zero) if it is not.
1546 *
1547 * This check -ignores- the distinction between various watermarks,
1548 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1549 * found to be full for any variation of these watermarks, it will
1550 * be considered full for up to one second by all requests, unless
1551 * we are so low on memory on all allowed nodes that we are forced
1552 * into the second scan of the zonelist.
1553 *
1554 * In the second scan we ignore this zonelist cache and exactly
1555 * apply the watermarks to all zones, even it is slower to do so.
1556 * We are low on memory in the second scan, and should leave no stone
1557 * unturned looking for a free page.
1558 */
dd1a239f 1559static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1560 nodemask_t *allowednodes)
1561{
1562 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1563 int i; /* index of *z in zonelist zones */
1564 int n; /* node that zone *z is on */
1565
1566 zlc = zonelist->zlcache_ptr;
1567 if (!zlc)
1568 return 1;
1569
dd1a239f 1570 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1571 n = zlc->z_to_n[i];
1572
1573 /* This zone is worth trying if it is allowed but not full */
1574 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1575}
1576
1577/*
1578 * Given 'z' scanning a zonelist, set the corresponding bit in
1579 * zlc->fullzones, so that subsequent attempts to allocate a page
1580 * from that zone don't waste time re-examining it.
1581 */
dd1a239f 1582static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1583{
1584 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1585 int i; /* index of *z in zonelist zones */
1586
1587 zlc = zonelist->zlcache_ptr;
1588 if (!zlc)
1589 return;
1590
dd1a239f 1591 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1592
1593 set_bit(i, zlc->fullzones);
1594}
1595
1596#else /* CONFIG_NUMA */
1597
1598static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1599{
1600 return NULL;
1601}
1602
dd1a239f 1603static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1604 nodemask_t *allowednodes)
1605{
1606 return 1;
1607}
1608
dd1a239f 1609static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1610{
1611}
1612#endif /* CONFIG_NUMA */
1613
7fb1d9fc 1614/*
0798e519 1615 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1616 * a page.
1617 */
1618static struct page *
19770b32 1619get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
5117f45d 1620 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
3dd28266 1621 struct zone *preferred_zone, int migratetype)
753ee728 1622{
dd1a239f 1623 struct zoneref *z;
7fb1d9fc 1624 struct page *page = NULL;
54a6eb5c 1625 int classzone_idx;
5117f45d 1626 struct zone *zone;
9276b1bc
PJ
1627 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1628 int zlc_active = 0; /* set if using zonelist_cache */
1629 int did_zlc_setup = 0; /* just call zlc_setup() one time */
54a6eb5c 1630
19770b32 1631 classzone_idx = zone_idx(preferred_zone);
9276b1bc 1632zonelist_scan:
7fb1d9fc 1633 /*
9276b1bc 1634 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1635 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1636 */
19770b32
MG
1637 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1638 high_zoneidx, nodemask) {
9276b1bc
PJ
1639 if (NUMA_BUILD && zlc_active &&
1640 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1641 continue;
7fb1d9fc 1642 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1643 !cpuset_zone_allowed_softwall(zone, gfp_mask))
9276b1bc 1644 goto try_next_zone;
7fb1d9fc 1645
41858966 1646 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
7fb1d9fc 1647 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b 1648 unsigned long mark;
fa5e084e
MG
1649 int ret;
1650
41858966 1651 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
fa5e084e
MG
1652 if (zone_watermark_ok(zone, order, mark,
1653 classzone_idx, alloc_flags))
1654 goto try_this_zone;
1655
1656 if (zone_reclaim_mode == 0)
1657 goto this_zone_full;
1658
1659 ret = zone_reclaim(zone, gfp_mask, order);
1660 switch (ret) {
1661 case ZONE_RECLAIM_NOSCAN:
1662 /* did not scan */
1663 goto try_next_zone;
1664 case ZONE_RECLAIM_FULL:
1665 /* scanned but unreclaimable */
1666 goto this_zone_full;
1667 default:
1668 /* did we reclaim enough */
1669 if (!zone_watermark_ok(zone, order, mark,
1670 classzone_idx, alloc_flags))
9276b1bc 1671 goto this_zone_full;
0798e519 1672 }
7fb1d9fc
RS
1673 }
1674
fa5e084e 1675try_this_zone:
3dd28266
MG
1676 page = buffered_rmqueue(preferred_zone, zone, order,
1677 gfp_mask, migratetype);
0798e519 1678 if (page)
7fb1d9fc 1679 break;
9276b1bc
PJ
1680this_zone_full:
1681 if (NUMA_BUILD)
1682 zlc_mark_zone_full(zonelist, z);
1683try_next_zone:
62bc62a8 1684 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
d395b734
MG
1685 /*
1686 * we do zlc_setup after the first zone is tried but only
1687 * if there are multiple nodes make it worthwhile
1688 */
9276b1bc
PJ
1689 allowednodes = zlc_setup(zonelist, alloc_flags);
1690 zlc_active = 1;
1691 did_zlc_setup = 1;
1692 }
54a6eb5c 1693 }
9276b1bc
PJ
1694
1695 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1696 /* Disable zlc cache for second zonelist scan */
1697 zlc_active = 0;
1698 goto zonelist_scan;
1699 }
7fb1d9fc 1700 return page;
753ee728
MH
1701}
1702
11e33f6a
MG
1703static inline int
1704should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1705 unsigned long pages_reclaimed)
1da177e4 1706{
11e33f6a
MG
1707 /* Do not loop if specifically requested */
1708 if (gfp_mask & __GFP_NORETRY)
1709 return 0;
1da177e4 1710
11e33f6a
MG
1711 /*
1712 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1713 * means __GFP_NOFAIL, but that may not be true in other
1714 * implementations.
1715 */
1716 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1717 return 1;
1718
1719 /*
1720 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1721 * specified, then we retry until we no longer reclaim any pages
1722 * (above), or we've reclaimed an order of pages at least as
1723 * large as the allocation's order. In both cases, if the
1724 * allocation still fails, we stop retrying.
1725 */
1726 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1727 return 1;
cf40bd16 1728
11e33f6a
MG
1729 /*
1730 * Don't let big-order allocations loop unless the caller
1731 * explicitly requests that.
1732 */
1733 if (gfp_mask & __GFP_NOFAIL)
1734 return 1;
1da177e4 1735
11e33f6a
MG
1736 return 0;
1737}
933e312e 1738
11e33f6a
MG
1739static inline struct page *
1740__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1741 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1742 nodemask_t *nodemask, struct zone *preferred_zone,
1743 int migratetype)
11e33f6a
MG
1744{
1745 struct page *page;
1746
1747 /* Acquire the OOM killer lock for the zones in zonelist */
ff321fea 1748 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
11e33f6a 1749 schedule_timeout_uninterruptible(1);
1da177e4
LT
1750 return NULL;
1751 }
6b1de916 1752
11e33f6a
MG
1753 /*
1754 * Go through the zonelist yet one more time, keep very high watermark
1755 * here, this is only to catch a parallel oom killing, we must fail if
1756 * we're still under heavy pressure.
1757 */
1758 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1759 order, zonelist, high_zoneidx,
5117f45d 1760 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
3dd28266 1761 preferred_zone, migratetype);
7fb1d9fc 1762 if (page)
11e33f6a
MG
1763 goto out;
1764
4365a567
KH
1765 if (!(gfp_mask & __GFP_NOFAIL)) {
1766 /* The OOM killer will not help higher order allocs */
1767 if (order > PAGE_ALLOC_COSTLY_ORDER)
1768 goto out;
03668b3c
DR
1769 /* The OOM killer does not needlessly kill tasks for lowmem */
1770 if (high_zoneidx < ZONE_NORMAL)
1771 goto out;
4365a567
KH
1772 /*
1773 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1774 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1775 * The caller should handle page allocation failure by itself if
1776 * it specifies __GFP_THISNODE.
1777 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1778 */
1779 if (gfp_mask & __GFP_THISNODE)
1780 goto out;
1781 }
11e33f6a 1782 /* Exhausted what can be done so it's blamo time */
4365a567 1783 out_of_memory(zonelist, gfp_mask, order, nodemask);
11e33f6a
MG
1784
1785out:
1786 clear_zonelist_oom(zonelist, gfp_mask);
1787 return page;
1788}
1789
56de7263
MG
1790#ifdef CONFIG_COMPACTION
1791/* Try memory compaction for high-order allocations before reclaim */
1792static struct page *
1793__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1794 struct zonelist *zonelist, enum zone_type high_zoneidx,
1795 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1796 int migratetype, unsigned long *did_some_progress)
1797{
1798 struct page *page;
1799
4f92e258 1800 if (!order || compaction_deferred(preferred_zone))
56de7263
MG
1801 return NULL;
1802
1803 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1804 nodemask);
1805 if (*did_some_progress != COMPACT_SKIPPED) {
1806
1807 /* Page migration frees to the PCP lists but we want merging */
1808 drain_pages(get_cpu());
1809 put_cpu();
1810
1811 page = get_page_from_freelist(gfp_mask, nodemask,
1812 order, zonelist, high_zoneidx,
1813 alloc_flags, preferred_zone,
1814 migratetype);
1815 if (page) {
4f92e258
MG
1816 preferred_zone->compact_considered = 0;
1817 preferred_zone->compact_defer_shift = 0;
56de7263
MG
1818 count_vm_event(COMPACTSUCCESS);
1819 return page;
1820 }
1821
1822 /*
1823 * It's bad if compaction run occurs and fails.
1824 * The most likely reason is that pages exist,
1825 * but not enough to satisfy watermarks.
1826 */
1827 count_vm_event(COMPACTFAIL);
4f92e258 1828 defer_compaction(preferred_zone);
56de7263
MG
1829
1830 cond_resched();
1831 }
1832
1833 return NULL;
1834}
1835#else
1836static inline struct page *
1837__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1838 struct zonelist *zonelist, enum zone_type high_zoneidx,
1839 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1840 int migratetype, unsigned long *did_some_progress)
1841{
1842 return NULL;
1843}
1844#endif /* CONFIG_COMPACTION */
1845
11e33f6a
MG
1846/* The really slow allocator path where we enter direct reclaim */
1847static inline struct page *
1848__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1849 struct zonelist *zonelist, enum zone_type high_zoneidx,
5117f45d 1850 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
3dd28266 1851 int migratetype, unsigned long *did_some_progress)
11e33f6a
MG
1852{
1853 struct page *page = NULL;
1854 struct reclaim_state reclaim_state;
1855 struct task_struct *p = current;
9ee493ce 1856 bool drained = false;
11e33f6a
MG
1857
1858 cond_resched();
1859
1860 /* We now go into synchronous reclaim */
1861 cpuset_memory_pressure_bump();
11e33f6a
MG
1862 p->flags |= PF_MEMALLOC;
1863 lockdep_set_current_reclaim_state(gfp_mask);
1864 reclaim_state.reclaimed_slab = 0;
1865 p->reclaim_state = &reclaim_state;
1866
1867 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1868
1869 p->reclaim_state = NULL;
1870 lockdep_clear_current_reclaim_state();
1871 p->flags &= ~PF_MEMALLOC;
1872
1873 cond_resched();
1874
9ee493ce
MG
1875 if (unlikely(!(*did_some_progress)))
1876 return NULL;
11e33f6a 1877
9ee493ce
MG
1878retry:
1879 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 1880 zonelist, high_zoneidx,
3dd28266
MG
1881 alloc_flags, preferred_zone,
1882 migratetype);
9ee493ce
MG
1883
1884 /*
1885 * If an allocation failed after direct reclaim, it could be because
1886 * pages are pinned on the per-cpu lists. Drain them and try again
1887 */
1888 if (!page && !drained) {
1889 drain_all_pages();
1890 drained = true;
1891 goto retry;
1892 }
1893
11e33f6a
MG
1894 return page;
1895}
1896
1da177e4 1897/*
11e33f6a
MG
1898 * This is called in the allocator slow-path if the allocation request is of
1899 * sufficient urgency to ignore watermarks and take other desperate measures
1da177e4 1900 */
11e33f6a
MG
1901static inline struct page *
1902__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1903 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1904 nodemask_t *nodemask, struct zone *preferred_zone,
1905 int migratetype)
11e33f6a
MG
1906{
1907 struct page *page;
1908
1909 do {
1910 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 1911 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
3dd28266 1912 preferred_zone, migratetype);
11e33f6a
MG
1913
1914 if (!page && gfp_mask & __GFP_NOFAIL)
0e093d99 1915 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
11e33f6a
MG
1916 } while (!page && (gfp_mask & __GFP_NOFAIL));
1917
1918 return page;
1919}
1920
1921static inline
1922void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1923 enum zone_type high_zoneidx)
1da177e4 1924{
dd1a239f
MG
1925 struct zoneref *z;
1926 struct zone *zone;
1da177e4 1927
11e33f6a
MG
1928 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1929 wakeup_kswapd(zone, order);
1930}
cf40bd16 1931
341ce06f
PZ
1932static inline int
1933gfp_to_alloc_flags(gfp_t gfp_mask)
1934{
1935 struct task_struct *p = current;
1936 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1937 const gfp_t wait = gfp_mask & __GFP_WAIT;
1da177e4 1938
a56f57ff 1939 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
e6223a3b 1940 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
933e312e 1941
341ce06f
PZ
1942 /*
1943 * The caller may dip into page reserves a bit more if the caller
1944 * cannot run direct reclaim, or if the caller has realtime scheduling
1945 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1946 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1947 */
e6223a3b 1948 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1da177e4 1949
341ce06f
PZ
1950 if (!wait) {
1951 alloc_flags |= ALLOC_HARDER;
523b9458 1952 /*
341ce06f
PZ
1953 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1954 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
523b9458 1955 */
341ce06f 1956 alloc_flags &= ~ALLOC_CPUSET;
9d0ed60f 1957 } else if (unlikely(rt_task(p)) && !in_interrupt())
341ce06f
PZ
1958 alloc_flags |= ALLOC_HARDER;
1959
1960 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1961 if (!in_interrupt() &&
1962 ((p->flags & PF_MEMALLOC) ||
1963 unlikely(test_thread_flag(TIF_MEMDIE))))
1964 alloc_flags |= ALLOC_NO_WATERMARKS;
1da177e4 1965 }
6b1de916 1966
341ce06f
PZ
1967 return alloc_flags;
1968}
1969
11e33f6a
MG
1970static inline struct page *
1971__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1972 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1973 nodemask_t *nodemask, struct zone *preferred_zone,
1974 int migratetype)
11e33f6a
MG
1975{
1976 const gfp_t wait = gfp_mask & __GFP_WAIT;
1977 struct page *page = NULL;
1978 int alloc_flags;
1979 unsigned long pages_reclaimed = 0;
1980 unsigned long did_some_progress;
1981 struct task_struct *p = current;
1da177e4 1982
72807a74
MG
1983 /*
1984 * In the slowpath, we sanity check order to avoid ever trying to
1985 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1986 * be using allocators in order of preference for an area that is
1987 * too large.
1988 */
1fc28b70
MG
1989 if (order >= MAX_ORDER) {
1990 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 1991 return NULL;
1fc28b70 1992 }
1da177e4 1993
952f3b51
CL
1994 /*
1995 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1996 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1997 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1998 * using a larger set of nodes after it has established that the
1999 * allowed per node queues are empty and that nodes are
2000 * over allocated.
2001 */
2002 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2003 goto nopage;
2004
cc4a6851 2005restart:
11e33f6a 2006 wake_all_kswapd(order, zonelist, high_zoneidx);
1da177e4 2007
9bf2229f 2008 /*
7fb1d9fc
RS
2009 * OK, we're below the kswapd watermark and have kicked background
2010 * reclaim. Now things get more complex, so set up alloc_flags according
2011 * to how we want to proceed.
9bf2229f 2012 */
341ce06f 2013 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1da177e4 2014
341ce06f 2015 /* This is the last chance, in general, before the goto nopage. */
19770b32 2016 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
341ce06f
PZ
2017 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2018 preferred_zone, migratetype);
7fb1d9fc
RS
2019 if (page)
2020 goto got_pg;
1da177e4 2021
b43a57bb 2022rebalance:
11e33f6a 2023 /* Allocate without watermarks if the context allows */
341ce06f
PZ
2024 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2025 page = __alloc_pages_high_priority(gfp_mask, order,
2026 zonelist, high_zoneidx, nodemask,
2027 preferred_zone, migratetype);
2028 if (page)
2029 goto got_pg;
1da177e4
LT
2030 }
2031
2032 /* Atomic allocations - we can't balance anything */
2033 if (!wait)
2034 goto nopage;
2035
341ce06f
PZ
2036 /* Avoid recursion of direct reclaim */
2037 if (p->flags & PF_MEMALLOC)
2038 goto nopage;
2039
6583bb64
DR
2040 /* Avoid allocations with no watermarks from looping endlessly */
2041 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2042 goto nopage;
2043
56de7263
MG
2044 /* Try direct compaction */
2045 page = __alloc_pages_direct_compact(gfp_mask, order,
2046 zonelist, high_zoneidx,
2047 nodemask,
2048 alloc_flags, preferred_zone,
2049 migratetype, &did_some_progress);
2050 if (page)
2051 goto got_pg;
2052
11e33f6a
MG
2053 /* Try direct reclaim and then allocating */
2054 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2055 zonelist, high_zoneidx,
2056 nodemask,
5117f45d 2057 alloc_flags, preferred_zone,
3dd28266 2058 migratetype, &did_some_progress);
11e33f6a
MG
2059 if (page)
2060 goto got_pg;
1da177e4 2061
e33c3b5e 2062 /*
11e33f6a
MG
2063 * If we failed to make any progress reclaiming, then we are
2064 * running out of options and have to consider going OOM
e33c3b5e 2065 */
11e33f6a
MG
2066 if (!did_some_progress) {
2067 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
7f33d49a
RW
2068 if (oom_killer_disabled)
2069 goto nopage;
11e33f6a
MG
2070 page = __alloc_pages_may_oom(gfp_mask, order,
2071 zonelist, high_zoneidx,
3dd28266
MG
2072 nodemask, preferred_zone,
2073 migratetype);
11e33f6a
MG
2074 if (page)
2075 goto got_pg;
1da177e4 2076
03668b3c
DR
2077 if (!(gfp_mask & __GFP_NOFAIL)) {
2078 /*
2079 * The oom killer is not called for high-order
2080 * allocations that may fail, so if no progress
2081 * is being made, there are no other options and
2082 * retrying is unlikely to help.
2083 */
2084 if (order > PAGE_ALLOC_COSTLY_ORDER)
2085 goto nopage;
2086 /*
2087 * The oom killer is not called for lowmem
2088 * allocations to prevent needlessly killing
2089 * innocent tasks.
2090 */
2091 if (high_zoneidx < ZONE_NORMAL)
2092 goto nopage;
2093 }
e2c55dc8 2094
ff0ceb9d
DR
2095 goto restart;
2096 }
1da177e4
LT
2097 }
2098
11e33f6a 2099 /* Check if we should retry the allocation */
a41f24ea 2100 pages_reclaimed += did_some_progress;
11e33f6a
MG
2101 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2102 /* Wait for some write requests to complete then retry */
0e093d99 2103 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
1da177e4
LT
2104 goto rebalance;
2105 }
2106
2107nopage:
2108 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2109 printk(KERN_WARNING "%s: page allocation failure."
2110 " order:%d, mode:0x%x\n",
2111 p->comm, order, gfp_mask);
2112 dump_stack();
578c2fd6 2113 show_mem();
1da177e4 2114 }
b1eeab67 2115 return page;
1da177e4 2116got_pg:
b1eeab67
VN
2117 if (kmemcheck_enabled)
2118 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1da177e4 2119 return page;
11e33f6a 2120
1da177e4 2121}
11e33f6a
MG
2122
2123/*
2124 * This is the 'heart' of the zoned buddy allocator.
2125 */
2126struct page *
2127__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2128 struct zonelist *zonelist, nodemask_t *nodemask)
2129{
2130 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5117f45d 2131 struct zone *preferred_zone;
11e33f6a 2132 struct page *page;
3dd28266 2133 int migratetype = allocflags_to_migratetype(gfp_mask);
11e33f6a 2134
dcce284a
BH
2135 gfp_mask &= gfp_allowed_mask;
2136
11e33f6a
MG
2137 lockdep_trace_alloc(gfp_mask);
2138
2139 might_sleep_if(gfp_mask & __GFP_WAIT);
2140
2141 if (should_fail_alloc_page(gfp_mask, order))
2142 return NULL;
2143
2144 /*
2145 * Check the zones suitable for the gfp_mask contain at least one
2146 * valid zone. It's possible to have an empty zonelist as a result
2147 * of GFP_THISNODE and a memoryless node
2148 */
2149 if (unlikely(!zonelist->_zonerefs->zone))
2150 return NULL;
2151
c0ff7453 2152 get_mems_allowed();
5117f45d
MG
2153 /* The preferred zone is used for statistics later */
2154 first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
c0ff7453
MX
2155 if (!preferred_zone) {
2156 put_mems_allowed();
5117f45d 2157 return NULL;
c0ff7453 2158 }
5117f45d
MG
2159
2160 /* First allocation attempt */
11e33f6a 2161 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
5117f45d 2162 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
3dd28266 2163 preferred_zone, migratetype);
11e33f6a
MG
2164 if (unlikely(!page))
2165 page = __alloc_pages_slowpath(gfp_mask, order,
5117f45d 2166 zonelist, high_zoneidx, nodemask,
3dd28266 2167 preferred_zone, migratetype);
c0ff7453 2168 put_mems_allowed();
11e33f6a 2169
4b4f278c 2170 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
11e33f6a 2171 return page;
1da177e4 2172}
d239171e 2173EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
2174
2175/*
2176 * Common helper functions.
2177 */
920c7a5d 2178unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 2179{
945a1113
AM
2180 struct page *page;
2181
2182 /*
2183 * __get_free_pages() returns a 32-bit address, which cannot represent
2184 * a highmem page
2185 */
2186 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2187
1da177e4
LT
2188 page = alloc_pages(gfp_mask, order);
2189 if (!page)
2190 return 0;
2191 return (unsigned long) page_address(page);
2192}
1da177e4
LT
2193EXPORT_SYMBOL(__get_free_pages);
2194
920c7a5d 2195unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 2196{
945a1113 2197 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 2198}
1da177e4
LT
2199EXPORT_SYMBOL(get_zeroed_page);
2200
2201void __pagevec_free(struct pagevec *pvec)
2202{
2203 int i = pagevec_count(pvec);
2204
4b4f278c
MG
2205 while (--i >= 0) {
2206 trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
1da177e4 2207 free_hot_cold_page(pvec->pages[i], pvec->cold);
4b4f278c 2208 }
1da177e4
LT
2209}
2210
920c7a5d 2211void __free_pages(struct page *page, unsigned int order)
1da177e4 2212{
b5810039 2213 if (put_page_testzero(page)) {
1da177e4 2214 if (order == 0)
fc91668e 2215 free_hot_cold_page(page, 0);
1da177e4
LT
2216 else
2217 __free_pages_ok(page, order);
2218 }
2219}
2220
2221EXPORT_SYMBOL(__free_pages);
2222
920c7a5d 2223void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
2224{
2225 if (addr != 0) {
725d704e 2226 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
2227 __free_pages(virt_to_page((void *)addr), order);
2228 }
2229}
2230
2231EXPORT_SYMBOL(free_pages);
2232
2be0ffe2
TT
2233/**
2234 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2235 * @size: the number of bytes to allocate
2236 * @gfp_mask: GFP flags for the allocation
2237 *
2238 * This function is similar to alloc_pages(), except that it allocates the
2239 * minimum number of pages to satisfy the request. alloc_pages() can only
2240 * allocate memory in power-of-two pages.
2241 *
2242 * This function is also limited by MAX_ORDER.
2243 *
2244 * Memory allocated by this function must be released by free_pages_exact().
2245 */
2246void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2247{
2248 unsigned int order = get_order(size);
2249 unsigned long addr;
2250
2251 addr = __get_free_pages(gfp_mask, order);
2252 if (addr) {
2253 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2254 unsigned long used = addr + PAGE_ALIGN(size);
2255
5bfd7560 2256 split_page(virt_to_page((void *)addr), order);
2be0ffe2
TT
2257 while (used < alloc_end) {
2258 free_page(used);
2259 used += PAGE_SIZE;
2260 }
2261 }
2262
2263 return (void *)addr;
2264}
2265EXPORT_SYMBOL(alloc_pages_exact);
2266
2267/**
2268 * free_pages_exact - release memory allocated via alloc_pages_exact()
2269 * @virt: the value returned by alloc_pages_exact.
2270 * @size: size of allocation, same value as passed to alloc_pages_exact().
2271 *
2272 * Release the memory allocated by a previous call to alloc_pages_exact.
2273 */
2274void free_pages_exact(void *virt, size_t size)
2275{
2276 unsigned long addr = (unsigned long)virt;
2277 unsigned long end = addr + PAGE_ALIGN(size);
2278
2279 while (addr < end) {
2280 free_page(addr);
2281 addr += PAGE_SIZE;
2282 }
2283}
2284EXPORT_SYMBOL(free_pages_exact);
2285
1da177e4
LT
2286static unsigned int nr_free_zone_pages(int offset)
2287{
dd1a239f 2288 struct zoneref *z;
54a6eb5c
MG
2289 struct zone *zone;
2290
e310fd43 2291 /* Just pick one node, since fallback list is circular */
1da177e4
LT
2292 unsigned int sum = 0;
2293
0e88460d 2294 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 2295
54a6eb5c 2296 for_each_zone_zonelist(zone, z, zonelist, offset) {
e310fd43 2297 unsigned long size = zone->present_pages;
41858966 2298 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
2299 if (size > high)
2300 sum += size - high;
1da177e4
LT
2301 }
2302
2303 return sum;
2304}
2305
2306/*
2307 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2308 */
2309unsigned int nr_free_buffer_pages(void)
2310{
af4ca457 2311 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 2312}
c2f1a551 2313EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4
LT
2314
2315/*
2316 * Amount of free RAM allocatable within all zones
2317 */
2318unsigned int nr_free_pagecache_pages(void)
2319{
2a1e274a 2320 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 2321}
08e0f6a9
CL
2322
2323static inline void show_node(struct zone *zone)
1da177e4 2324{
08e0f6a9 2325 if (NUMA_BUILD)
25ba77c1 2326 printk("Node %d ", zone_to_nid(zone));
1da177e4 2327}
1da177e4 2328
1da177e4
LT
2329void si_meminfo(struct sysinfo *val)
2330{
2331 val->totalram = totalram_pages;
2332 val->sharedram = 0;
d23ad423 2333 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 2334 val->bufferram = nr_blockdev_pages();
1da177e4
LT
2335 val->totalhigh = totalhigh_pages;
2336 val->freehigh = nr_free_highpages();
1da177e4
LT
2337 val->mem_unit = PAGE_SIZE;
2338}
2339
2340EXPORT_SYMBOL(si_meminfo);
2341
2342#ifdef CONFIG_NUMA
2343void si_meminfo_node(struct sysinfo *val, int nid)
2344{
2345 pg_data_t *pgdat = NODE_DATA(nid);
2346
2347 val->totalram = pgdat->node_present_pages;
d23ad423 2348 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 2349#ifdef CONFIG_HIGHMEM
1da177e4 2350 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
d23ad423
CL
2351 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2352 NR_FREE_PAGES);
98d2b0eb
CL
2353#else
2354 val->totalhigh = 0;
2355 val->freehigh = 0;
2356#endif
1da177e4
LT
2357 val->mem_unit = PAGE_SIZE;
2358}
2359#endif
2360
2361#define K(x) ((x) << (PAGE_SHIFT-10))
2362
2363/*
2364 * Show free area list (used inside shift_scroll-lock stuff)
2365 * We also calculate the percentage fragmentation. We do this by counting the
2366 * memory on each free list with the exception of the first item on the list.
2367 */
2368void show_free_areas(void)
2369{
c7241913 2370 int cpu;
1da177e4
LT
2371 struct zone *zone;
2372
ee99c71c 2373 for_each_populated_zone(zone) {
c7241913
JS
2374 show_node(zone);
2375 printk("%s per-cpu:\n", zone->name);
1da177e4 2376
6b482c67 2377 for_each_online_cpu(cpu) {
1da177e4
LT
2378 struct per_cpu_pageset *pageset;
2379
99dcc3e5 2380 pageset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 2381
3dfa5721
CL
2382 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2383 cpu, pageset->pcp.high,
2384 pageset->pcp.batch, pageset->pcp.count);
1da177e4
LT
2385 }
2386 }
2387
a731286d
KM
2388 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2389 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
7b854121 2390 " unevictable:%lu"
b76146ed 2391 " dirty:%lu writeback:%lu unstable:%lu\n"
3701b033 2392 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4b02108a 2393 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
4f98a2fe 2394 global_page_state(NR_ACTIVE_ANON),
4f98a2fe 2395 global_page_state(NR_INACTIVE_ANON),
a731286d
KM
2396 global_page_state(NR_ISOLATED_ANON),
2397 global_page_state(NR_ACTIVE_FILE),
4f98a2fe 2398 global_page_state(NR_INACTIVE_FILE),
a731286d 2399 global_page_state(NR_ISOLATED_FILE),
7b854121 2400 global_page_state(NR_UNEVICTABLE),
b1e7a8fd 2401 global_page_state(NR_FILE_DIRTY),
ce866b34 2402 global_page_state(NR_WRITEBACK),
fd39fc85 2403 global_page_state(NR_UNSTABLE_NFS),
d23ad423 2404 global_page_state(NR_FREE_PAGES),
3701b033
KM
2405 global_page_state(NR_SLAB_RECLAIMABLE),
2406 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 2407 global_page_state(NR_FILE_MAPPED),
4b02108a 2408 global_page_state(NR_SHMEM),
a25700a5
AM
2409 global_page_state(NR_PAGETABLE),
2410 global_page_state(NR_BOUNCE));
1da177e4 2411
ee99c71c 2412 for_each_populated_zone(zone) {
1da177e4
LT
2413 int i;
2414
2415 show_node(zone);
2416 printk("%s"
2417 " free:%lukB"
2418 " min:%lukB"
2419 " low:%lukB"
2420 " high:%lukB"
4f98a2fe
RR
2421 " active_anon:%lukB"
2422 " inactive_anon:%lukB"
2423 " active_file:%lukB"
2424 " inactive_file:%lukB"
7b854121 2425 " unevictable:%lukB"
a731286d
KM
2426 " isolated(anon):%lukB"
2427 " isolated(file):%lukB"
1da177e4 2428 " present:%lukB"
4a0aa73f
KM
2429 " mlocked:%lukB"
2430 " dirty:%lukB"
2431 " writeback:%lukB"
2432 " mapped:%lukB"
4b02108a 2433 " shmem:%lukB"
4a0aa73f
KM
2434 " slab_reclaimable:%lukB"
2435 " slab_unreclaimable:%lukB"
c6a7f572 2436 " kernel_stack:%lukB"
4a0aa73f
KM
2437 " pagetables:%lukB"
2438 " unstable:%lukB"
2439 " bounce:%lukB"
2440 " writeback_tmp:%lukB"
1da177e4
LT
2441 " pages_scanned:%lu"
2442 " all_unreclaimable? %s"
2443 "\n",
2444 zone->name,
aa454840 2445 K(zone_nr_free_pages(zone)),
41858966
MG
2446 K(min_wmark_pages(zone)),
2447 K(low_wmark_pages(zone)),
2448 K(high_wmark_pages(zone)),
4f98a2fe
RR
2449 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2450 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2451 K(zone_page_state(zone, NR_ACTIVE_FILE)),
2452 K(zone_page_state(zone, NR_INACTIVE_FILE)),
7b854121 2453 K(zone_page_state(zone, NR_UNEVICTABLE)),
a731286d
KM
2454 K(zone_page_state(zone, NR_ISOLATED_ANON)),
2455 K(zone_page_state(zone, NR_ISOLATED_FILE)),
1da177e4 2456 K(zone->present_pages),
4a0aa73f
KM
2457 K(zone_page_state(zone, NR_MLOCK)),
2458 K(zone_page_state(zone, NR_FILE_DIRTY)),
2459 K(zone_page_state(zone, NR_WRITEBACK)),
2460 K(zone_page_state(zone, NR_FILE_MAPPED)),
4b02108a 2461 K(zone_page_state(zone, NR_SHMEM)),
4a0aa73f
KM
2462 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2463 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
c6a7f572
KM
2464 zone_page_state(zone, NR_KERNEL_STACK) *
2465 THREAD_SIZE / 1024,
4a0aa73f
KM
2466 K(zone_page_state(zone, NR_PAGETABLE)),
2467 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2468 K(zone_page_state(zone, NR_BOUNCE)),
2469 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
1da177e4 2470 zone->pages_scanned,
93e4a89a 2471 (zone->all_unreclaimable ? "yes" : "no")
1da177e4
LT
2472 );
2473 printk("lowmem_reserve[]:");
2474 for (i = 0; i < MAX_NR_ZONES; i++)
2475 printk(" %lu", zone->lowmem_reserve[i]);
2476 printk("\n");
2477 }
2478
ee99c71c 2479 for_each_populated_zone(zone) {
8f9de51a 2480 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4
LT
2481
2482 show_node(zone);
2483 printk("%s: ", zone->name);
1da177e4
LT
2484
2485 spin_lock_irqsave(&zone->lock, flags);
2486 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
2487 nr[order] = zone->free_area[order].nr_free;
2488 total += nr[order] << order;
1da177e4
LT
2489 }
2490 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
2491 for (order = 0; order < MAX_ORDER; order++)
2492 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
2493 printk("= %lukB\n", K(total));
2494 }
2495
e6f3602d
LW
2496 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2497
1da177e4
LT
2498 show_swap_cache_info();
2499}
2500
19770b32
MG
2501static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2502{
2503 zoneref->zone = zone;
2504 zoneref->zone_idx = zone_idx(zone);
2505}
2506
1da177e4
LT
2507/*
2508 * Builds allocation fallback zone lists.
1a93205b
CL
2509 *
2510 * Add all populated zones of a node to the zonelist.
1da177e4 2511 */
f0c0b2b8
KH
2512static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2513 int nr_zones, enum zone_type zone_type)
1da177e4 2514{
1a93205b
CL
2515 struct zone *zone;
2516
98d2b0eb 2517 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 2518 zone_type++;
02a68a5e
CL
2519
2520 do {
2f6726e5 2521 zone_type--;
070f8032 2522 zone = pgdat->node_zones + zone_type;
1a93205b 2523 if (populated_zone(zone)) {
dd1a239f
MG
2524 zoneref_set_zone(zone,
2525 &zonelist->_zonerefs[nr_zones++]);
070f8032 2526 check_highest_zone(zone_type);
1da177e4 2527 }
02a68a5e 2528
2f6726e5 2529 } while (zone_type);
070f8032 2530 return nr_zones;
1da177e4
LT
2531}
2532
f0c0b2b8
KH
2533
2534/*
2535 * zonelist_order:
2536 * 0 = automatic detection of better ordering.
2537 * 1 = order by ([node] distance, -zonetype)
2538 * 2 = order by (-zonetype, [node] distance)
2539 *
2540 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2541 * the same zonelist. So only NUMA can configure this param.
2542 */
2543#define ZONELIST_ORDER_DEFAULT 0
2544#define ZONELIST_ORDER_NODE 1
2545#define ZONELIST_ORDER_ZONE 2
2546
2547/* zonelist order in the kernel.
2548 * set_zonelist_order() will set this to NODE or ZONE.
2549 */
2550static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2551static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2552
2553
1da177e4 2554#ifdef CONFIG_NUMA
f0c0b2b8
KH
2555/* The value user specified ....changed by config */
2556static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2557/* string for sysctl */
2558#define NUMA_ZONELIST_ORDER_LEN 16
2559char numa_zonelist_order[16] = "default";
2560
2561/*
2562 * interface for configure zonelist ordering.
2563 * command line option "numa_zonelist_order"
2564 * = "[dD]efault - default, automatic configuration.
2565 * = "[nN]ode - order by node locality, then by zone within node
2566 * = "[zZ]one - order by zone, then by locality within zone
2567 */
2568
2569static int __parse_numa_zonelist_order(char *s)
2570{
2571 if (*s == 'd' || *s == 'D') {
2572 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2573 } else if (*s == 'n' || *s == 'N') {
2574 user_zonelist_order = ZONELIST_ORDER_NODE;
2575 } else if (*s == 'z' || *s == 'Z') {
2576 user_zonelist_order = ZONELIST_ORDER_ZONE;
2577 } else {
2578 printk(KERN_WARNING
2579 "Ignoring invalid numa_zonelist_order value: "
2580 "%s\n", s);
2581 return -EINVAL;
2582 }
2583 return 0;
2584}
2585
2586static __init int setup_numa_zonelist_order(char *s)
2587{
2588 if (s)
2589 return __parse_numa_zonelist_order(s);
2590 return 0;
2591}
2592early_param("numa_zonelist_order", setup_numa_zonelist_order);
2593
2594/*
2595 * sysctl handler for numa_zonelist_order
2596 */
2597int numa_zonelist_order_handler(ctl_table *table, int write,
8d65af78 2598 void __user *buffer, size_t *length,
f0c0b2b8
KH
2599 loff_t *ppos)
2600{
2601 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2602 int ret;
443c6f14 2603 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 2604
443c6f14 2605 mutex_lock(&zl_order_mutex);
f0c0b2b8 2606 if (write)
443c6f14 2607 strcpy(saved_string, (char*)table->data);
8d65af78 2608 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 2609 if (ret)
443c6f14 2610 goto out;
f0c0b2b8
KH
2611 if (write) {
2612 int oldval = user_zonelist_order;
2613 if (__parse_numa_zonelist_order((char*)table->data)) {
2614 /*
2615 * bogus value. restore saved string
2616 */
2617 strncpy((char*)table->data, saved_string,
2618 NUMA_ZONELIST_ORDER_LEN);
2619 user_zonelist_order = oldval;
4eaf3f64
HL
2620 } else if (oldval != user_zonelist_order) {
2621 mutex_lock(&zonelists_mutex);
1f522509 2622 build_all_zonelists(NULL);
4eaf3f64
HL
2623 mutex_unlock(&zonelists_mutex);
2624 }
f0c0b2b8 2625 }
443c6f14
AK
2626out:
2627 mutex_unlock(&zl_order_mutex);
2628 return ret;
f0c0b2b8
KH
2629}
2630
2631
62bc62a8 2632#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
2633static int node_load[MAX_NUMNODES];
2634
1da177e4 2635/**
4dc3b16b 2636 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
2637 * @node: node whose fallback list we're appending
2638 * @used_node_mask: nodemask_t of already used nodes
2639 *
2640 * We use a number of factors to determine which is the next node that should
2641 * appear on a given node's fallback list. The node should not have appeared
2642 * already in @node's fallback list, and it should be the next closest node
2643 * according to the distance array (which contains arbitrary distance values
2644 * from each node to each node in the system), and should also prefer nodes
2645 * with no CPUs, since presumably they'll have very little allocation pressure
2646 * on them otherwise.
2647 * It returns -1 if no node is found.
2648 */
f0c0b2b8 2649static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 2650{
4cf808eb 2651 int n, val;
1da177e4
LT
2652 int min_val = INT_MAX;
2653 int best_node = -1;
a70f7302 2654 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 2655
4cf808eb
LT
2656 /* Use the local node if we haven't already */
2657 if (!node_isset(node, *used_node_mask)) {
2658 node_set(node, *used_node_mask);
2659 return node;
2660 }
1da177e4 2661
37b07e41 2662 for_each_node_state(n, N_HIGH_MEMORY) {
1da177e4
LT
2663
2664 /* Don't want a node to appear more than once */
2665 if (node_isset(n, *used_node_mask))
2666 continue;
2667
1da177e4
LT
2668 /* Use the distance array to find the distance */
2669 val = node_distance(node, n);
2670
4cf808eb
LT
2671 /* Penalize nodes under us ("prefer the next node") */
2672 val += (n < node);
2673
1da177e4 2674 /* Give preference to headless and unused nodes */
a70f7302
RR
2675 tmp = cpumask_of_node(n);
2676 if (!cpumask_empty(tmp))
1da177e4
LT
2677 val += PENALTY_FOR_NODE_WITH_CPUS;
2678
2679 /* Slight preference for less loaded node */
2680 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2681 val += node_load[n];
2682
2683 if (val < min_val) {
2684 min_val = val;
2685 best_node = n;
2686 }
2687 }
2688
2689 if (best_node >= 0)
2690 node_set(best_node, *used_node_mask);
2691
2692 return best_node;
2693}
2694
f0c0b2b8
KH
2695
2696/*
2697 * Build zonelists ordered by node and zones within node.
2698 * This results in maximum locality--normal zone overflows into local
2699 * DMA zone, if any--but risks exhausting DMA zone.
2700 */
2701static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 2702{
f0c0b2b8 2703 int j;
1da177e4 2704 struct zonelist *zonelist;
f0c0b2b8 2705
54a6eb5c 2706 zonelist = &pgdat->node_zonelists[0];
dd1a239f 2707 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c
MG
2708 ;
2709 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2710 MAX_NR_ZONES - 1);
dd1a239f
MG
2711 zonelist->_zonerefs[j].zone = NULL;
2712 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
2713}
2714
523b9458
CL
2715/*
2716 * Build gfp_thisnode zonelists
2717 */
2718static void build_thisnode_zonelists(pg_data_t *pgdat)
2719{
523b9458
CL
2720 int j;
2721 struct zonelist *zonelist;
2722
54a6eb5c
MG
2723 zonelist = &pgdat->node_zonelists[1];
2724 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
dd1a239f
MG
2725 zonelist->_zonerefs[j].zone = NULL;
2726 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
2727}
2728
f0c0b2b8
KH
2729/*
2730 * Build zonelists ordered by zone and nodes within zones.
2731 * This results in conserving DMA zone[s] until all Normal memory is
2732 * exhausted, but results in overflowing to remote node while memory
2733 * may still exist in local DMA zone.
2734 */
2735static int node_order[MAX_NUMNODES];
2736
2737static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2738{
f0c0b2b8
KH
2739 int pos, j, node;
2740 int zone_type; /* needs to be signed */
2741 struct zone *z;
2742 struct zonelist *zonelist;
2743
54a6eb5c
MG
2744 zonelist = &pgdat->node_zonelists[0];
2745 pos = 0;
2746 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2747 for (j = 0; j < nr_nodes; j++) {
2748 node = node_order[j];
2749 z = &NODE_DATA(node)->node_zones[zone_type];
2750 if (populated_zone(z)) {
dd1a239f
MG
2751 zoneref_set_zone(z,
2752 &zonelist->_zonerefs[pos++]);
54a6eb5c 2753 check_highest_zone(zone_type);
f0c0b2b8
KH
2754 }
2755 }
f0c0b2b8 2756 }
dd1a239f
MG
2757 zonelist->_zonerefs[pos].zone = NULL;
2758 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
2759}
2760
2761static int default_zonelist_order(void)
2762{
2763 int nid, zone_type;
2764 unsigned long low_kmem_size,total_size;
2765 struct zone *z;
2766 int average_size;
2767 /*
88393161 2768 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
f0c0b2b8
KH
2769 * If they are really small and used heavily, the system can fall
2770 * into OOM very easily.
e325c90f 2771 * This function detect ZONE_DMA/DMA32 size and configures zone order.
f0c0b2b8
KH
2772 */
2773 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2774 low_kmem_size = 0;
2775 total_size = 0;
2776 for_each_online_node(nid) {
2777 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2778 z = &NODE_DATA(nid)->node_zones[zone_type];
2779 if (populated_zone(z)) {
2780 if (zone_type < ZONE_NORMAL)
2781 low_kmem_size += z->present_pages;
2782 total_size += z->present_pages;
e325c90f
DR
2783 } else if (zone_type == ZONE_NORMAL) {
2784 /*
2785 * If any node has only lowmem, then node order
2786 * is preferred to allow kernel allocations
2787 * locally; otherwise, they can easily infringe
2788 * on other nodes when there is an abundance of
2789 * lowmem available to allocate from.
2790 */
2791 return ZONELIST_ORDER_NODE;
f0c0b2b8
KH
2792 }
2793 }
2794 }
2795 if (!low_kmem_size || /* there are no DMA area. */
2796 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2797 return ZONELIST_ORDER_NODE;
2798 /*
2799 * look into each node's config.
2800 * If there is a node whose DMA/DMA32 memory is very big area on
2801 * local memory, NODE_ORDER may be suitable.
2802 */
37b07e41
LS
2803 average_size = total_size /
2804 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
f0c0b2b8
KH
2805 for_each_online_node(nid) {
2806 low_kmem_size = 0;
2807 total_size = 0;
2808 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2809 z = &NODE_DATA(nid)->node_zones[zone_type];
2810 if (populated_zone(z)) {
2811 if (zone_type < ZONE_NORMAL)
2812 low_kmem_size += z->present_pages;
2813 total_size += z->present_pages;
2814 }
2815 }
2816 if (low_kmem_size &&
2817 total_size > average_size && /* ignore small node */
2818 low_kmem_size > total_size * 70/100)
2819 return ZONELIST_ORDER_NODE;
2820 }
2821 return ZONELIST_ORDER_ZONE;
2822}
2823
2824static void set_zonelist_order(void)
2825{
2826 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2827 current_zonelist_order = default_zonelist_order();
2828 else
2829 current_zonelist_order = user_zonelist_order;
2830}
2831
2832static void build_zonelists(pg_data_t *pgdat)
2833{
2834 int j, node, load;
2835 enum zone_type i;
1da177e4 2836 nodemask_t used_mask;
f0c0b2b8
KH
2837 int local_node, prev_node;
2838 struct zonelist *zonelist;
2839 int order = current_zonelist_order;
1da177e4
LT
2840
2841 /* initialize zonelists */
523b9458 2842 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 2843 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
2844 zonelist->_zonerefs[0].zone = NULL;
2845 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
2846 }
2847
2848 /* NUMA-aware ordering of nodes */
2849 local_node = pgdat->node_id;
62bc62a8 2850 load = nr_online_nodes;
1da177e4
LT
2851 prev_node = local_node;
2852 nodes_clear(used_mask);
f0c0b2b8 2853
f0c0b2b8
KH
2854 memset(node_order, 0, sizeof(node_order));
2855 j = 0;
2856
1da177e4 2857 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
2858 int distance = node_distance(local_node, node);
2859
2860 /*
2861 * If another node is sufficiently far away then it is better
2862 * to reclaim pages in a zone before going off node.
2863 */
2864 if (distance > RECLAIM_DISTANCE)
2865 zone_reclaim_mode = 1;
2866
1da177e4
LT
2867 /*
2868 * We don't want to pressure a particular node.
2869 * So adding penalty to the first node in same
2870 * distance group to make it round-robin.
2871 */
9eeff239 2872 if (distance != node_distance(local_node, prev_node))
f0c0b2b8
KH
2873 node_load[node] = load;
2874
1da177e4
LT
2875 prev_node = node;
2876 load--;
f0c0b2b8
KH
2877 if (order == ZONELIST_ORDER_NODE)
2878 build_zonelists_in_node_order(pgdat, node);
2879 else
2880 node_order[j++] = node; /* remember order */
2881 }
1da177e4 2882
f0c0b2b8
KH
2883 if (order == ZONELIST_ORDER_ZONE) {
2884 /* calculate node order -- i.e., DMA last! */
2885 build_zonelists_in_zone_order(pgdat, j);
1da177e4 2886 }
523b9458
CL
2887
2888 build_thisnode_zonelists(pgdat);
1da177e4
LT
2889}
2890
9276b1bc 2891/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 2892static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 2893{
54a6eb5c
MG
2894 struct zonelist *zonelist;
2895 struct zonelist_cache *zlc;
dd1a239f 2896 struct zoneref *z;
9276b1bc 2897
54a6eb5c
MG
2898 zonelist = &pgdat->node_zonelists[0];
2899 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2900 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
dd1a239f
MG
2901 for (z = zonelist->_zonerefs; z->zone; z++)
2902 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
9276b1bc
PJ
2903}
2904
7aac7898
LS
2905#ifdef CONFIG_HAVE_MEMORYLESS_NODES
2906/*
2907 * Return node id of node used for "local" allocations.
2908 * I.e., first node id of first zone in arg node's generic zonelist.
2909 * Used for initializing percpu 'numa_mem', which is used primarily
2910 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
2911 */
2912int local_memory_node(int node)
2913{
2914 struct zone *zone;
2915
2916 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
2917 gfp_zone(GFP_KERNEL),
2918 NULL,
2919 &zone);
2920 return zone->node;
2921}
2922#endif
f0c0b2b8 2923
1da177e4
LT
2924#else /* CONFIG_NUMA */
2925
f0c0b2b8
KH
2926static void set_zonelist_order(void)
2927{
2928 current_zonelist_order = ZONELIST_ORDER_ZONE;
2929}
2930
2931static void build_zonelists(pg_data_t *pgdat)
1da177e4 2932{
19655d34 2933 int node, local_node;
54a6eb5c
MG
2934 enum zone_type j;
2935 struct zonelist *zonelist;
1da177e4
LT
2936
2937 local_node = pgdat->node_id;
1da177e4 2938
54a6eb5c
MG
2939 zonelist = &pgdat->node_zonelists[0];
2940 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
1da177e4 2941
54a6eb5c
MG
2942 /*
2943 * Now we build the zonelist so that it contains the zones
2944 * of all the other nodes.
2945 * We don't want to pressure a particular node, so when
2946 * building the zones for node N, we make sure that the
2947 * zones coming right after the local ones are those from
2948 * node N+1 (modulo N)
2949 */
2950 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2951 if (!node_online(node))
2952 continue;
2953 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2954 MAX_NR_ZONES - 1);
1da177e4 2955 }
54a6eb5c
MG
2956 for (node = 0; node < local_node; node++) {
2957 if (!node_online(node))
2958 continue;
2959 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2960 MAX_NR_ZONES - 1);
2961 }
2962
dd1a239f
MG
2963 zonelist->_zonerefs[j].zone = NULL;
2964 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
2965}
2966
9276b1bc 2967/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 2968static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 2969{
54a6eb5c 2970 pgdat->node_zonelists[0].zlcache_ptr = NULL;
9276b1bc
PJ
2971}
2972
1da177e4
LT
2973#endif /* CONFIG_NUMA */
2974
99dcc3e5
CL
2975/*
2976 * Boot pageset table. One per cpu which is going to be used for all
2977 * zones and all nodes. The parameters will be set in such a way
2978 * that an item put on a list will immediately be handed over to
2979 * the buddy list. This is safe since pageset manipulation is done
2980 * with interrupts disabled.
2981 *
2982 * The boot_pagesets must be kept even after bootup is complete for
2983 * unused processors and/or zones. They do play a role for bootstrapping
2984 * hotplugged processors.
2985 *
2986 * zoneinfo_show() and maybe other functions do
2987 * not check if the processor is online before following the pageset pointer.
2988 * Other parts of the kernel may not check if the zone is available.
2989 */
2990static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
2991static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
1f522509 2992static void setup_zone_pageset(struct zone *zone);
99dcc3e5 2993
4eaf3f64
HL
2994/*
2995 * Global mutex to protect against size modification of zonelists
2996 * as well as to serialize pageset setup for the new populated zone.
2997 */
2998DEFINE_MUTEX(zonelists_mutex);
2999
9b1a4d38 3000/* return values int ....just for stop_machine() */
1f522509 3001static __init_refok int __build_all_zonelists(void *data)
1da177e4 3002{
6811378e 3003 int nid;
99dcc3e5 3004 int cpu;
9276b1bc 3005
7f9cfb31
BL
3006#ifdef CONFIG_NUMA
3007 memset(node_load, 0, sizeof(node_load));
3008#endif
9276b1bc 3009 for_each_online_node(nid) {
7ea1530a
CL
3010 pg_data_t *pgdat = NODE_DATA(nid);
3011
3012 build_zonelists(pgdat);
3013 build_zonelist_cache(pgdat);
9276b1bc 3014 }
99dcc3e5
CL
3015
3016 /*
3017 * Initialize the boot_pagesets that are going to be used
3018 * for bootstrapping processors. The real pagesets for
3019 * each zone will be allocated later when the per cpu
3020 * allocator is available.
3021 *
3022 * boot_pagesets are used also for bootstrapping offline
3023 * cpus if the system is already booted because the pagesets
3024 * are needed to initialize allocators on a specific cpu too.
3025 * F.e. the percpu allocator needs the page allocator which
3026 * needs the percpu allocator in order to allocate its pagesets
3027 * (a chicken-egg dilemma).
3028 */
7aac7898 3029 for_each_possible_cpu(cpu) {
99dcc3e5
CL
3030 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3031
7aac7898
LS
3032#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3033 /*
3034 * We now know the "local memory node" for each node--
3035 * i.e., the node of the first zone in the generic zonelist.
3036 * Set up numa_mem percpu variable for on-line cpus. During
3037 * boot, only the boot cpu should be on-line; we'll init the
3038 * secondary cpus' numa_mem as they come on-line. During
3039 * node/memory hotplug, we'll fixup all on-line cpus.
3040 */
3041 if (cpu_online(cpu))
3042 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3043#endif
3044 }
3045
6811378e
YG
3046 return 0;
3047}
3048
4eaf3f64
HL
3049/*
3050 * Called with zonelists_mutex held always
3051 * unless system_state == SYSTEM_BOOTING.
3052 */
1f522509 3053void build_all_zonelists(void *data)
6811378e 3054{
f0c0b2b8
KH
3055 set_zonelist_order();
3056
6811378e 3057 if (system_state == SYSTEM_BOOTING) {
423b41d7 3058 __build_all_zonelists(NULL);
68ad8df4 3059 mminit_verify_zonelist();
6811378e
YG
3060 cpuset_init_current_mems_allowed();
3061 } else {
183ff22b 3062 /* we have to stop all cpus to guarantee there is no user
6811378e 3063 of zonelist */
e9959f0f
KH
3064#ifdef CONFIG_MEMORY_HOTPLUG
3065 if (data)
3066 setup_zone_pageset((struct zone *)data);
3067#endif
3068 stop_machine(__build_all_zonelists, NULL, NULL);
6811378e
YG
3069 /* cpuset refresh routine should be here */
3070 }
bd1e22b8 3071 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
3072 /*
3073 * Disable grouping by mobility if the number of pages in the
3074 * system is too low to allow the mechanism to work. It would be
3075 * more accurate, but expensive to check per-zone. This check is
3076 * made on memory-hotadd so a system can start with mobility
3077 * disabled and enable it later
3078 */
d9c23400 3079 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
3080 page_group_by_mobility_disabled = 1;
3081 else
3082 page_group_by_mobility_disabled = 0;
3083
3084 printk("Built %i zonelists in %s order, mobility grouping %s. "
3085 "Total pages: %ld\n",
62bc62a8 3086 nr_online_nodes,
f0c0b2b8 3087 zonelist_order_name[current_zonelist_order],
9ef9acb0 3088 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
3089 vm_total_pages);
3090#ifdef CONFIG_NUMA
3091 printk("Policy zone: %s\n", zone_names[policy_zone]);
3092#endif
1da177e4
LT
3093}
3094
3095/*
3096 * Helper functions to size the waitqueue hash table.
3097 * Essentially these want to choose hash table sizes sufficiently
3098 * large so that collisions trying to wait on pages are rare.
3099 * But in fact, the number of active page waitqueues on typical
3100 * systems is ridiculously low, less than 200. So this is even
3101 * conservative, even though it seems large.
3102 *
3103 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3104 * waitqueues, i.e. the size of the waitq table given the number of pages.
3105 */
3106#define PAGES_PER_WAITQUEUE 256
3107
cca448fe 3108#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 3109static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
3110{
3111 unsigned long size = 1;
3112
3113 pages /= PAGES_PER_WAITQUEUE;
3114
3115 while (size < pages)
3116 size <<= 1;
3117
3118 /*
3119 * Once we have dozens or even hundreds of threads sleeping
3120 * on IO we've got bigger problems than wait queue collision.
3121 * Limit the size of the wait table to a reasonable size.
3122 */
3123 size = min(size, 4096UL);
3124
3125 return max(size, 4UL);
3126}
cca448fe
YG
3127#else
3128/*
3129 * A zone's size might be changed by hot-add, so it is not possible to determine
3130 * a suitable size for its wait_table. So we use the maximum size now.
3131 *
3132 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
3133 *
3134 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
3135 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3136 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
3137 *
3138 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3139 * or more by the traditional way. (See above). It equals:
3140 *
3141 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
3142 * ia64(16K page size) : = ( 8G + 4M)byte.
3143 * powerpc (64K page size) : = (32G +16M)byte.
3144 */
3145static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3146{
3147 return 4096UL;
3148}
3149#endif
1da177e4
LT
3150
3151/*
3152 * This is an integer logarithm so that shifts can be used later
3153 * to extract the more random high bits from the multiplicative
3154 * hash function before the remainder is taken.
3155 */
3156static inline unsigned long wait_table_bits(unsigned long size)
3157{
3158 return ffz(~size);
3159}
3160
3161#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3162
56fd56b8 3163/*
d9c23400 3164 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
41858966
MG
3165 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3166 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
56fd56b8
MG
3167 * higher will lead to a bigger reserve which will get freed as contiguous
3168 * blocks as reclaim kicks in
3169 */
3170static void setup_zone_migrate_reserve(struct zone *zone)
3171{
3172 unsigned long start_pfn, pfn, end_pfn;
3173 struct page *page;
78986a67
MG
3174 unsigned long block_migratetype;
3175 int reserve;
56fd56b8
MG
3176
3177 /* Get the start pfn, end pfn and the number of blocks to reserve */
3178 start_pfn = zone->zone_start_pfn;
3179 end_pfn = start_pfn + zone->spanned_pages;
41858966 3180 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
d9c23400 3181 pageblock_order;
56fd56b8 3182
78986a67
MG
3183 /*
3184 * Reserve blocks are generally in place to help high-order atomic
3185 * allocations that are short-lived. A min_free_kbytes value that
3186 * would result in more than 2 reserve blocks for atomic allocations
3187 * is assumed to be in place to help anti-fragmentation for the
3188 * future allocation of hugepages at runtime.
3189 */
3190 reserve = min(2, reserve);
3191
d9c23400 3192 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
56fd56b8
MG
3193 if (!pfn_valid(pfn))
3194 continue;
3195 page = pfn_to_page(pfn);
3196
344c790e
AL
3197 /* Watch out for overlapping nodes */
3198 if (page_to_nid(page) != zone_to_nid(zone))
3199 continue;
3200
56fd56b8
MG
3201 /* Blocks with reserved pages will never free, skip them. */
3202 if (PageReserved(page))
3203 continue;
3204
3205 block_migratetype = get_pageblock_migratetype(page);
3206
3207 /* If this block is reserved, account for it */
3208 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3209 reserve--;
3210 continue;
3211 }
3212
3213 /* Suitable for reserving if this block is movable */
3214 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3215 set_pageblock_migratetype(page, MIGRATE_RESERVE);
3216 move_freepages_block(zone, page, MIGRATE_RESERVE);
3217 reserve--;
3218 continue;
3219 }
3220
3221 /*
3222 * If the reserve is met and this is a previous reserved block,
3223 * take it back
3224 */
3225 if (block_migratetype == MIGRATE_RESERVE) {
3226 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3227 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3228 }
3229 }
3230}
ac0e5b7a 3231
1da177e4
LT
3232/*
3233 * Initially all pages are reserved - free ones are freed
3234 * up by free_all_bootmem() once the early boot process is
3235 * done. Non-atomic initialization, single-pass.
3236 */
c09b4240 3237void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 3238 unsigned long start_pfn, enum memmap_context context)
1da177e4 3239{
1da177e4 3240 struct page *page;
29751f69
AW
3241 unsigned long end_pfn = start_pfn + size;
3242 unsigned long pfn;
86051ca5 3243 struct zone *z;
1da177e4 3244
22b31eec
HD
3245 if (highest_memmap_pfn < end_pfn - 1)
3246 highest_memmap_pfn = end_pfn - 1;
3247
86051ca5 3248 z = &NODE_DATA(nid)->node_zones[zone];
cbe8dd4a 3249 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
3250 /*
3251 * There can be holes in boot-time mem_map[]s
3252 * handed to this function. They do not
3253 * exist on hotplugged memory.
3254 */
3255 if (context == MEMMAP_EARLY) {
3256 if (!early_pfn_valid(pfn))
3257 continue;
3258 if (!early_pfn_in_nid(pfn, nid))
3259 continue;
3260 }
d41dee36
AW
3261 page = pfn_to_page(pfn);
3262 set_page_links(page, zone, nid, pfn);
708614e6 3263 mminit_verify_page_links(page, zone, nid, pfn);
7835e98b 3264 init_page_count(page);
1da177e4
LT
3265 reset_page_mapcount(page);
3266 SetPageReserved(page);
b2a0ac88
MG
3267 /*
3268 * Mark the block movable so that blocks are reserved for
3269 * movable at startup. This will force kernel allocations
3270 * to reserve their blocks rather than leaking throughout
3271 * the address space during boot when many long-lived
56fd56b8
MG
3272 * kernel allocations are made. Later some blocks near
3273 * the start are marked MIGRATE_RESERVE by
3274 * setup_zone_migrate_reserve()
86051ca5
KH
3275 *
3276 * bitmap is created for zone's valid pfn range. but memmap
3277 * can be created for invalid pages (for alignment)
3278 * check here not to call set_pageblock_migratetype() against
3279 * pfn out of zone.
b2a0ac88 3280 */
86051ca5
KH
3281 if ((z->zone_start_pfn <= pfn)
3282 && (pfn < z->zone_start_pfn + z->spanned_pages)
3283 && !(pfn & (pageblock_nr_pages - 1)))
56fd56b8 3284 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
b2a0ac88 3285
1da177e4
LT
3286 INIT_LIST_HEAD(&page->lru);
3287#ifdef WANT_PAGE_VIRTUAL
3288 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3289 if (!is_highmem_idx(zone))
3212c6be 3290 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 3291#endif
1da177e4
LT
3292 }
3293}
3294
1e548deb 3295static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 3296{
b2a0ac88
MG
3297 int order, t;
3298 for_each_migratetype_order(order, t) {
3299 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
3300 zone->free_area[order].nr_free = 0;
3301 }
3302}
3303
3304#ifndef __HAVE_ARCH_MEMMAP_INIT
3305#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 3306 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
3307#endif
3308
1d6f4e60 3309static int zone_batchsize(struct zone *zone)
e7c8d5c9 3310{
3a6be87f 3311#ifdef CONFIG_MMU
e7c8d5c9
CL
3312 int batch;
3313
3314 /*
3315 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 3316 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
3317 *
3318 * OK, so we don't know how big the cache is. So guess.
3319 */
3320 batch = zone->present_pages / 1024;
ba56e91c
SR
3321 if (batch * PAGE_SIZE > 512 * 1024)
3322 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
3323 batch /= 4; /* We effectively *= 4 below */
3324 if (batch < 1)
3325 batch = 1;
3326
3327 /*
0ceaacc9
NP
3328 * Clamp the batch to a 2^n - 1 value. Having a power
3329 * of 2 value was found to be more likely to have
3330 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 3331 *
0ceaacc9
NP
3332 * For example if 2 tasks are alternately allocating
3333 * batches of pages, one task can end up with a lot
3334 * of pages of one half of the possible page colors
3335 * and the other with pages of the other colors.
e7c8d5c9 3336 */
9155203a 3337 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 3338
e7c8d5c9 3339 return batch;
3a6be87f
DH
3340
3341#else
3342 /* The deferral and batching of frees should be suppressed under NOMMU
3343 * conditions.
3344 *
3345 * The problem is that NOMMU needs to be able to allocate large chunks
3346 * of contiguous memory as there's no hardware page translation to
3347 * assemble apparent contiguous memory from discontiguous pages.
3348 *
3349 * Queueing large contiguous runs of pages for batching, however,
3350 * causes the pages to actually be freed in smaller chunks. As there
3351 * can be a significant delay between the individual batches being
3352 * recycled, this leads to the once large chunks of space being
3353 * fragmented and becoming unavailable for high-order allocations.
3354 */
3355 return 0;
3356#endif
e7c8d5c9
CL
3357}
3358
b69a7288 3359static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2caaad41
CL
3360{
3361 struct per_cpu_pages *pcp;
5f8dcc21 3362 int migratetype;
2caaad41 3363
1c6fe946
MD
3364 memset(p, 0, sizeof(*p));
3365
3dfa5721 3366 pcp = &p->pcp;
2caaad41 3367 pcp->count = 0;
2caaad41
CL
3368 pcp->high = 6 * batch;
3369 pcp->batch = max(1UL, 1 * batch);
5f8dcc21
MG
3370 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3371 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
3372}
3373
8ad4b1fb
RS
3374/*
3375 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3376 * to the value high for the pageset p.
3377 */
3378
3379static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3380 unsigned long high)
3381{
3382 struct per_cpu_pages *pcp;
3383
3dfa5721 3384 pcp = &p->pcp;
8ad4b1fb
RS
3385 pcp->high = high;
3386 pcp->batch = max(1UL, high/4);
3387 if ((high/4) > (PAGE_SHIFT * 8))
3388 pcp->batch = PAGE_SHIFT * 8;
3389}
3390
319774e2
WF
3391static __meminit void setup_zone_pageset(struct zone *zone)
3392{
3393 int cpu;
3394
3395 zone->pageset = alloc_percpu(struct per_cpu_pageset);
3396
3397 for_each_possible_cpu(cpu) {
3398 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3399
3400 setup_pageset(pcp, zone_batchsize(zone));
3401
3402 if (percpu_pagelist_fraction)
3403 setup_pagelist_highmark(pcp,
3404 (zone->present_pages /
3405 percpu_pagelist_fraction));
3406 }
3407}
3408
2caaad41 3409/*
99dcc3e5
CL
3410 * Allocate per cpu pagesets and initialize them.
3411 * Before this call only boot pagesets were available.
e7c8d5c9 3412 */
99dcc3e5 3413void __init setup_per_cpu_pageset(void)
e7c8d5c9 3414{
99dcc3e5 3415 struct zone *zone;
e7c8d5c9 3416
319774e2
WF
3417 for_each_populated_zone(zone)
3418 setup_zone_pageset(zone);
e7c8d5c9
CL
3419}
3420
577a32f6 3421static noinline __init_refok
cca448fe 3422int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
3423{
3424 int i;
3425 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 3426 size_t alloc_size;
ed8ece2e
DH
3427
3428 /*
3429 * The per-page waitqueue mechanism uses hashed waitqueues
3430 * per zone.
3431 */
02b694de
YG
3432 zone->wait_table_hash_nr_entries =
3433 wait_table_hash_nr_entries(zone_size_pages);
3434 zone->wait_table_bits =
3435 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
3436 alloc_size = zone->wait_table_hash_nr_entries
3437 * sizeof(wait_queue_head_t);
3438
cd94b9db 3439 if (!slab_is_available()) {
cca448fe
YG
3440 zone->wait_table = (wait_queue_head_t *)
3441 alloc_bootmem_node(pgdat, alloc_size);
3442 } else {
3443 /*
3444 * This case means that a zone whose size was 0 gets new memory
3445 * via memory hot-add.
3446 * But it may be the case that a new node was hot-added. In
3447 * this case vmalloc() will not be able to use this new node's
3448 * memory - this wait_table must be initialized to use this new
3449 * node itself as well.
3450 * To use this new node's memory, further consideration will be
3451 * necessary.
3452 */
8691f3a7 3453 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
3454 }
3455 if (!zone->wait_table)
3456 return -ENOMEM;
ed8ece2e 3457
02b694de 3458 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 3459 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
3460
3461 return 0;
ed8ece2e
DH
3462}
3463
112067f0
SL
3464static int __zone_pcp_update(void *data)
3465{
3466 struct zone *zone = data;
3467 int cpu;
3468 unsigned long batch = zone_batchsize(zone), flags;
3469
2d30a1f6 3470 for_each_possible_cpu(cpu) {
112067f0
SL
3471 struct per_cpu_pageset *pset;
3472 struct per_cpu_pages *pcp;
3473
99dcc3e5 3474 pset = per_cpu_ptr(zone->pageset, cpu);
112067f0
SL
3475 pcp = &pset->pcp;
3476
3477 local_irq_save(flags);
5f8dcc21 3478 free_pcppages_bulk(zone, pcp->count, pcp);
112067f0
SL
3479 setup_pageset(pset, batch);
3480 local_irq_restore(flags);
3481 }
3482 return 0;
3483}
3484
3485void zone_pcp_update(struct zone *zone)
3486{
3487 stop_machine(__zone_pcp_update, zone, NULL);
3488}
3489
c09b4240 3490static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 3491{
99dcc3e5
CL
3492 /*
3493 * per cpu subsystem is not up at this point. The following code
3494 * relies on the ability of the linker to provide the
3495 * offset of a (static) per cpu variable into the per cpu area.
3496 */
3497 zone->pageset = &boot_pageset;
ed8ece2e 3498
f5335c0f 3499 if (zone->present_pages)
99dcc3e5
CL
3500 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
3501 zone->name, zone->present_pages,
3502 zone_batchsize(zone));
ed8ece2e
DH
3503}
3504
718127cc
YG
3505__meminit int init_currently_empty_zone(struct zone *zone,
3506 unsigned long zone_start_pfn,
a2f3aa02
DH
3507 unsigned long size,
3508 enum memmap_context context)
ed8ece2e
DH
3509{
3510 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
3511 int ret;
3512 ret = zone_wait_table_init(zone, size);
3513 if (ret)
3514 return ret;
ed8ece2e
DH
3515 pgdat->nr_zones = zone_idx(zone) + 1;
3516
ed8ece2e
DH
3517 zone->zone_start_pfn = zone_start_pfn;
3518
708614e6
MG
3519 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3520 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3521 pgdat->node_id,
3522 (unsigned long)zone_idx(zone),
3523 zone_start_pfn, (zone_start_pfn + size));
3524
1e548deb 3525 zone_init_free_lists(zone);
718127cc
YG
3526
3527 return 0;
ed8ece2e
DH
3528}
3529
c713216d
MG
3530#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3531/*
3532 * Basic iterator support. Return the first range of PFNs for a node
3533 * Note: nid == MAX_NUMNODES returns first region regardless of node
3534 */
a3142c8e 3535static int __meminit first_active_region_index_in_nid(int nid)
c713216d
MG
3536{
3537 int i;
3538
3539 for (i = 0; i < nr_nodemap_entries; i++)
3540 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3541 return i;
3542
3543 return -1;
3544}
3545
3546/*
3547 * Basic iterator support. Return the next active range of PFNs for a node
183ff22b 3548 * Note: nid == MAX_NUMNODES returns next region regardless of node
c713216d 3549 */
a3142c8e 3550static int __meminit next_active_region_index_in_nid(int index, int nid)
c713216d
MG
3551{
3552 for (index = index + 1; index < nr_nodemap_entries; index++)
3553 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3554 return index;
3555
3556 return -1;
3557}
3558
3559#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3560/*
3561 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3562 * Architectures may implement their own version but if add_active_range()
3563 * was used and there are no special requirements, this is a convenient
3564 * alternative
3565 */
f2dbcfa7 3566int __meminit __early_pfn_to_nid(unsigned long pfn)
c713216d
MG
3567{
3568 int i;
3569
3570 for (i = 0; i < nr_nodemap_entries; i++) {
3571 unsigned long start_pfn = early_node_map[i].start_pfn;
3572 unsigned long end_pfn = early_node_map[i].end_pfn;
3573
3574 if (start_pfn <= pfn && pfn < end_pfn)
3575 return early_node_map[i].nid;
3576 }
cc2559bc
KH
3577 /* This is a memory hole */
3578 return -1;
c713216d
MG
3579}
3580#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3581
f2dbcfa7
KH
3582int __meminit early_pfn_to_nid(unsigned long pfn)
3583{
cc2559bc
KH
3584 int nid;
3585
3586 nid = __early_pfn_to_nid(pfn);
3587 if (nid >= 0)
3588 return nid;
3589 /* just returns 0 */
3590 return 0;
f2dbcfa7
KH
3591}
3592
cc2559bc
KH
3593#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3594bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3595{
3596 int nid;
3597
3598 nid = __early_pfn_to_nid(pfn);
3599 if (nid >= 0 && nid != node)
3600 return false;
3601 return true;
3602}
3603#endif
f2dbcfa7 3604
c713216d
MG
3605/* Basic iterator support to walk early_node_map[] */
3606#define for_each_active_range_index_in_nid(i, nid) \
3607 for (i = first_active_region_index_in_nid(nid); i != -1; \
3608 i = next_active_region_index_in_nid(i, nid))
3609
3610/**
3611 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
3612 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3613 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
3614 *
3615 * If an architecture guarantees that all ranges registered with
3616 * add_active_ranges() contain no holes and may be freed, this
3617 * this function may be used instead of calling free_bootmem() manually.
3618 */
3619void __init free_bootmem_with_active_regions(int nid,
3620 unsigned long max_low_pfn)
3621{
3622 int i;
3623
3624 for_each_active_range_index_in_nid(i, nid) {
3625 unsigned long size_pages = 0;
3626 unsigned long end_pfn = early_node_map[i].end_pfn;
3627
3628 if (early_node_map[i].start_pfn >= max_low_pfn)
3629 continue;
3630
3631 if (end_pfn > max_low_pfn)
3632 end_pfn = max_low_pfn;
3633
3634 size_pages = end_pfn - early_node_map[i].start_pfn;
3635 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3636 PFN_PHYS(early_node_map[i].start_pfn),
3637 size_pages << PAGE_SHIFT);
3638 }
3639}
3640
edbe7d23
YL
3641#ifdef CONFIG_HAVE_MEMBLOCK
3642u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3643 u64 goal, u64 limit)
3644{
3645 int i;
3646
3647 /* Need to go over early_node_map to find out good range for node */
3648 for_each_active_range_index_in_nid(i, nid) {
3649 u64 addr;
3650 u64 ei_start, ei_last;
3651 u64 final_start, final_end;
3652
3653 ei_last = early_node_map[i].end_pfn;
3654 ei_last <<= PAGE_SHIFT;
3655 ei_start = early_node_map[i].start_pfn;
3656 ei_start <<= PAGE_SHIFT;
3657
3658 final_start = max(ei_start, goal);
3659 final_end = min(ei_last, limit);
3660
3661 if (final_start >= final_end)
3662 continue;
3663
3664 addr = memblock_find_in_range(final_start, final_end, size, align);
3665
3666 if (addr == MEMBLOCK_ERROR)
3667 continue;
3668
3669 return addr;
3670 }
3671
3672 return MEMBLOCK_ERROR;
3673}
3674#endif
3675
08677214
YL
3676int __init add_from_early_node_map(struct range *range, int az,
3677 int nr_range, int nid)
3678{
3679 int i;
3680 u64 start, end;
3681
3682 /* need to go over early_node_map to find out good range for node */
3683 for_each_active_range_index_in_nid(i, nid) {
3684 start = early_node_map[i].start_pfn;
3685 end = early_node_map[i].end_pfn;
3686 nr_range = add_range(range, az, nr_range, start, end);
3687 }
3688 return nr_range;
3689}
3690
2ee78f7b 3691#ifdef CONFIG_NO_BOOTMEM
08677214
YL
3692void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
3693 u64 goal, u64 limit)
3694{
08677214 3695 void *ptr;
72d7c3b3 3696 u64 addr;
08677214 3697
72d7c3b3
YL
3698 if (limit > memblock.current_limit)
3699 limit = memblock.current_limit;
b8ab9f82 3700
72d7c3b3 3701 addr = find_memory_core_early(nid, size, align, goal, limit);
08677214 3702
72d7c3b3
YL
3703 if (addr == MEMBLOCK_ERROR)
3704 return NULL;
08677214 3705
72d7c3b3
YL
3706 ptr = phys_to_virt(addr);
3707 memset(ptr, 0, size);
3708 memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
3709 /*
3710 * The min_count is set to 0 so that bootmem allocated blocks
3711 * are never reported as leaks.
3712 */
3713 kmemleak_alloc(ptr, size, 0, 0);
3714 return ptr;
08677214 3715}
2ee78f7b 3716#endif
08677214
YL
3717
3718
b5bc6c0e
YL
3719void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3720{
3721 int i;
d52d53b8 3722 int ret;
b5bc6c0e 3723
d52d53b8
YL
3724 for_each_active_range_index_in_nid(i, nid) {
3725 ret = work_fn(early_node_map[i].start_pfn,
3726 early_node_map[i].end_pfn, data);
3727 if (ret)
3728 break;
3729 }
b5bc6c0e 3730}
c713216d
MG
3731/**
3732 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 3733 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
3734 *
3735 * If an architecture guarantees that all ranges registered with
3736 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 3737 * function may be used instead of calling memory_present() manually.
c713216d
MG
3738 */
3739void __init sparse_memory_present_with_active_regions(int nid)
3740{
3741 int i;
3742
3743 for_each_active_range_index_in_nid(i, nid)
3744 memory_present(early_node_map[i].nid,
3745 early_node_map[i].start_pfn,
3746 early_node_map[i].end_pfn);
3747}
3748
3749/**
3750 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
3751 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3752 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3753 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
3754 *
3755 * It returns the start and end page frame of a node based on information
3756 * provided by an arch calling add_active_range(). If called for a node
3757 * with no available memory, a warning is printed and the start and end
88ca3b94 3758 * PFNs will be 0.
c713216d 3759 */
a3142c8e 3760void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
3761 unsigned long *start_pfn, unsigned long *end_pfn)
3762{
3763 int i;
3764 *start_pfn = -1UL;
3765 *end_pfn = 0;
3766
3767 for_each_active_range_index_in_nid(i, nid) {
3768 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3769 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3770 }
3771
633c0666 3772 if (*start_pfn == -1UL)
c713216d 3773 *start_pfn = 0;
c713216d
MG
3774}
3775
2a1e274a
MG
3776/*
3777 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3778 * assumption is made that zones within a node are ordered in monotonic
3779 * increasing memory addresses so that the "highest" populated zone is used
3780 */
b69a7288 3781static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
3782{
3783 int zone_index;
3784 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3785 if (zone_index == ZONE_MOVABLE)
3786 continue;
3787
3788 if (arch_zone_highest_possible_pfn[zone_index] >
3789 arch_zone_lowest_possible_pfn[zone_index])
3790 break;
3791 }
3792
3793 VM_BUG_ON(zone_index == -1);
3794 movable_zone = zone_index;
3795}
3796
3797/*
3798 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3799 * because it is sized independant of architecture. Unlike the other zones,
3800 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3801 * in each node depending on the size of each node and how evenly kernelcore
3802 * is distributed. This helper function adjusts the zone ranges
3803 * provided by the architecture for a given node by using the end of the
3804 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3805 * zones within a node are in order of monotonic increases memory addresses
3806 */
b69a7288 3807static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
3808 unsigned long zone_type,
3809 unsigned long node_start_pfn,
3810 unsigned long node_end_pfn,
3811 unsigned long *zone_start_pfn,
3812 unsigned long *zone_end_pfn)
3813{
3814 /* Only adjust if ZONE_MOVABLE is on this node */
3815 if (zone_movable_pfn[nid]) {
3816 /* Size ZONE_MOVABLE */
3817 if (zone_type == ZONE_MOVABLE) {
3818 *zone_start_pfn = zone_movable_pfn[nid];
3819 *zone_end_pfn = min(node_end_pfn,
3820 arch_zone_highest_possible_pfn[movable_zone]);
3821
3822 /* Adjust for ZONE_MOVABLE starting within this range */
3823 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3824 *zone_end_pfn > zone_movable_pfn[nid]) {
3825 *zone_end_pfn = zone_movable_pfn[nid];
3826
3827 /* Check if this whole range is within ZONE_MOVABLE */
3828 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3829 *zone_start_pfn = *zone_end_pfn;
3830 }
3831}
3832
c713216d
MG
3833/*
3834 * Return the number of pages a zone spans in a node, including holes
3835 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3836 */
6ea6e688 3837static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3838 unsigned long zone_type,
3839 unsigned long *ignored)
3840{
3841 unsigned long node_start_pfn, node_end_pfn;
3842 unsigned long zone_start_pfn, zone_end_pfn;
3843
3844 /* Get the start and end of the node and zone */
3845 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3846 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3847 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
3848 adjust_zone_range_for_zone_movable(nid, zone_type,
3849 node_start_pfn, node_end_pfn,
3850 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
3851
3852 /* Check that this node has pages within the zone's required range */
3853 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3854 return 0;
3855
3856 /* Move the zone boundaries inside the node if necessary */
3857 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3858 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3859
3860 /* Return the spanned pages */
3861 return zone_end_pfn - zone_start_pfn;
3862}
3863
3864/*
3865 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 3866 * then all holes in the requested range will be accounted for.
c713216d 3867 */
32996250 3868unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
3869 unsigned long range_start_pfn,
3870 unsigned long range_end_pfn)
3871{
3872 int i = 0;
3873 unsigned long prev_end_pfn = 0, hole_pages = 0;
3874 unsigned long start_pfn;
3875
3876 /* Find the end_pfn of the first active range of pfns in the node */
3877 i = first_active_region_index_in_nid(nid);
3878 if (i == -1)
3879 return 0;
3880
b5445f95
MG
3881 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3882
9c7cd687
MG
3883 /* Account for ranges before physical memory on this node */
3884 if (early_node_map[i].start_pfn > range_start_pfn)
b5445f95 3885 hole_pages = prev_end_pfn - range_start_pfn;
c713216d
MG
3886
3887 /* Find all holes for the zone within the node */
3888 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3889
3890 /* No need to continue if prev_end_pfn is outside the zone */
3891 if (prev_end_pfn >= range_end_pfn)
3892 break;
3893
3894 /* Make sure the end of the zone is not within the hole */
3895 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3896 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3897
3898 /* Update the hole size cound and move on */
3899 if (start_pfn > range_start_pfn) {
3900 BUG_ON(prev_end_pfn > start_pfn);
3901 hole_pages += start_pfn - prev_end_pfn;
3902 }
3903 prev_end_pfn = early_node_map[i].end_pfn;
3904 }
3905
9c7cd687
MG
3906 /* Account for ranges past physical memory on this node */
3907 if (range_end_pfn > prev_end_pfn)
0c6cb974 3908 hole_pages += range_end_pfn -
9c7cd687
MG
3909 max(range_start_pfn, prev_end_pfn);
3910
c713216d
MG
3911 return hole_pages;
3912}
3913
3914/**
3915 * absent_pages_in_range - Return number of page frames in holes within a range
3916 * @start_pfn: The start PFN to start searching for holes
3917 * @end_pfn: The end PFN to stop searching for holes
3918 *
88ca3b94 3919 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
3920 */
3921unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3922 unsigned long end_pfn)
3923{
3924 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3925}
3926
3927/* Return the number of page frames in holes in a zone on a node */
6ea6e688 3928static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3929 unsigned long zone_type,
3930 unsigned long *ignored)
3931{
9c7cd687
MG
3932 unsigned long node_start_pfn, node_end_pfn;
3933 unsigned long zone_start_pfn, zone_end_pfn;
3934
3935 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3936 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3937 node_start_pfn);
3938 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3939 node_end_pfn);
3940
2a1e274a
MG
3941 adjust_zone_range_for_zone_movable(nid, zone_type,
3942 node_start_pfn, node_end_pfn,
3943 &zone_start_pfn, &zone_end_pfn);
9c7cd687 3944 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 3945}
0e0b864e 3946
c713216d 3947#else
6ea6e688 3948static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3949 unsigned long zone_type,
3950 unsigned long *zones_size)
3951{
3952 return zones_size[zone_type];
3953}
3954
6ea6e688 3955static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3956 unsigned long zone_type,
3957 unsigned long *zholes_size)
3958{
3959 if (!zholes_size)
3960 return 0;
3961
3962 return zholes_size[zone_type];
3963}
0e0b864e 3964
c713216d
MG
3965#endif
3966
a3142c8e 3967static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
3968 unsigned long *zones_size, unsigned long *zholes_size)
3969{
3970 unsigned long realtotalpages, totalpages = 0;
3971 enum zone_type i;
3972
3973 for (i = 0; i < MAX_NR_ZONES; i++)
3974 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3975 zones_size);
3976 pgdat->node_spanned_pages = totalpages;
3977
3978 realtotalpages = totalpages;
3979 for (i = 0; i < MAX_NR_ZONES; i++)
3980 realtotalpages -=
3981 zone_absent_pages_in_node(pgdat->node_id, i,
3982 zholes_size);
3983 pgdat->node_present_pages = realtotalpages;
3984 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3985 realtotalpages);
3986}
3987
835c134e
MG
3988#ifndef CONFIG_SPARSEMEM
3989/*
3990 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
3991 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3992 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
3993 * round what is now in bits to nearest long in bits, then return it in
3994 * bytes.
3995 */
3996static unsigned long __init usemap_size(unsigned long zonesize)
3997{
3998 unsigned long usemapsize;
3999
d9c23400
MG
4000 usemapsize = roundup(zonesize, pageblock_nr_pages);
4001 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
4002 usemapsize *= NR_PAGEBLOCK_BITS;
4003 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4004
4005 return usemapsize / 8;
4006}
4007
4008static void __init setup_usemap(struct pglist_data *pgdat,
4009 struct zone *zone, unsigned long zonesize)
4010{
4011 unsigned long usemapsize = usemap_size(zonesize);
4012 zone->pageblock_flags = NULL;
58a01a45 4013 if (usemapsize)
835c134e 4014 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
835c134e
MG
4015}
4016#else
fa9f90be 4017static inline void setup_usemap(struct pglist_data *pgdat,
835c134e
MG
4018 struct zone *zone, unsigned long zonesize) {}
4019#endif /* CONFIG_SPARSEMEM */
4020
d9c23400 4021#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c
MG
4022
4023/* Return a sensible default order for the pageblock size. */
4024static inline int pageblock_default_order(void)
4025{
4026 if (HPAGE_SHIFT > PAGE_SHIFT)
4027 return HUGETLB_PAGE_ORDER;
4028
4029 return MAX_ORDER-1;
4030}
4031
d9c23400
MG
4032/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4033static inline void __init set_pageblock_order(unsigned int order)
4034{
4035 /* Check that pageblock_nr_pages has not already been setup */
4036 if (pageblock_order)
4037 return;
4038
4039 /*
4040 * Assume the largest contiguous order of interest is a huge page.
4041 * This value may be variable depending on boot parameters on IA64
4042 */
4043 pageblock_order = order;
4044}
4045#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4046
ba72cb8c
MG
4047/*
4048 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4049 * and pageblock_default_order() are unused as pageblock_order is set
4050 * at compile-time. See include/linux/pageblock-flags.h for the values of
4051 * pageblock_order based on the kernel config
4052 */
4053static inline int pageblock_default_order(unsigned int order)
4054{
4055 return MAX_ORDER-1;
4056}
d9c23400
MG
4057#define set_pageblock_order(x) do {} while (0)
4058
4059#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4060
1da177e4
LT
4061/*
4062 * Set up the zone data structures:
4063 * - mark all pages reserved
4064 * - mark all memory queues empty
4065 * - clear the memory bitmaps
4066 */
b5a0e011 4067static void __paginginit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
4068 unsigned long *zones_size, unsigned long *zholes_size)
4069{
2f1b6248 4070 enum zone_type j;
ed8ece2e 4071 int nid = pgdat->node_id;
1da177e4 4072 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 4073 int ret;
1da177e4 4074
208d54e5 4075 pgdat_resize_init(pgdat);
1da177e4
LT
4076 pgdat->nr_zones = 0;
4077 init_waitqueue_head(&pgdat->kswapd_wait);
4078 pgdat->kswapd_max_order = 0;
52d4b9ac 4079 pgdat_page_cgroup_init(pgdat);
1da177e4
LT
4080
4081 for (j = 0; j < MAX_NR_ZONES; j++) {
4082 struct zone *zone = pgdat->node_zones + j;
0e0b864e 4083 unsigned long size, realsize, memmap_pages;
b69408e8 4084 enum lru_list l;
1da177e4 4085
c713216d
MG
4086 size = zone_spanned_pages_in_node(nid, j, zones_size);
4087 realsize = size - zone_absent_pages_in_node(nid, j,
4088 zholes_size);
1da177e4 4089
0e0b864e
MG
4090 /*
4091 * Adjust realsize so that it accounts for how much memory
4092 * is used by this zone for memmap. This affects the watermark
4093 * and per-cpu initialisations
4094 */
f7232154
JW
4095 memmap_pages =
4096 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
0e0b864e
MG
4097 if (realsize >= memmap_pages) {
4098 realsize -= memmap_pages;
5594c8c8
YL
4099 if (memmap_pages)
4100 printk(KERN_DEBUG
4101 " %s zone: %lu pages used for memmap\n",
4102 zone_names[j], memmap_pages);
0e0b864e
MG
4103 } else
4104 printk(KERN_WARNING
4105 " %s zone: %lu pages exceeds realsize %lu\n",
4106 zone_names[j], memmap_pages, realsize);
4107
6267276f
CL
4108 /* Account for reserved pages */
4109 if (j == 0 && realsize > dma_reserve) {
0e0b864e 4110 realsize -= dma_reserve;
d903ef9f 4111 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 4112 zone_names[0], dma_reserve);
0e0b864e
MG
4113 }
4114
98d2b0eb 4115 if (!is_highmem_idx(j))
1da177e4
LT
4116 nr_kernel_pages += realsize;
4117 nr_all_pages += realsize;
4118
4119 zone->spanned_pages = size;
4120 zone->present_pages = realsize;
9614634f 4121#ifdef CONFIG_NUMA
d5f541ed 4122 zone->node = nid;
8417bba4 4123 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 4124 / 100;
0ff38490 4125 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 4126#endif
1da177e4
LT
4127 zone->name = zone_names[j];
4128 spin_lock_init(&zone->lock);
4129 spin_lock_init(&zone->lru_lock);
bdc8cb98 4130 zone_seqlock_init(zone);
1da177e4 4131 zone->zone_pgdat = pgdat;
1da177e4 4132
ed8ece2e 4133 zone_pcp_init(zone);
b69408e8
CL
4134 for_each_lru(l) {
4135 INIT_LIST_HEAD(&zone->lru[l].list);
f8629631 4136 zone->reclaim_stat.nr_saved_scan[l] = 0;
b69408e8 4137 }
6e901571
KM
4138 zone->reclaim_stat.recent_rotated[0] = 0;
4139 zone->reclaim_stat.recent_rotated[1] = 0;
4140 zone->reclaim_stat.recent_scanned[0] = 0;
4141 zone->reclaim_stat.recent_scanned[1] = 0;
2244b95a 4142 zap_zone_vm_stats(zone);
e815af95 4143 zone->flags = 0;
1da177e4
LT
4144 if (!size)
4145 continue;
4146
ba72cb8c 4147 set_pageblock_order(pageblock_default_order());
835c134e 4148 setup_usemap(pgdat, zone, size);
a2f3aa02
DH
4149 ret = init_currently_empty_zone(zone, zone_start_pfn,
4150 size, MEMMAP_EARLY);
718127cc 4151 BUG_ON(ret);
76cdd58e 4152 memmap_init(size, nid, j, zone_start_pfn);
1da177e4 4153 zone_start_pfn += size;
1da177e4
LT
4154 }
4155}
4156
577a32f6 4157static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 4158{
1da177e4
LT
4159 /* Skip empty nodes */
4160 if (!pgdat->node_spanned_pages)
4161 return;
4162
d41dee36 4163#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
4164 /* ia64 gets its own node_mem_map, before this, without bootmem */
4165 if (!pgdat->node_mem_map) {
e984bb43 4166 unsigned long size, start, end;
d41dee36
AW
4167 struct page *map;
4168
e984bb43
BP
4169 /*
4170 * The zone's endpoints aren't required to be MAX_ORDER
4171 * aligned but the node_mem_map endpoints must be in order
4172 * for the buddy allocator to function correctly.
4173 */
4174 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4175 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4176 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4177 size = (end - start) * sizeof(struct page);
6f167ec7
DH
4178 map = alloc_remap(pgdat->node_id, size);
4179 if (!map)
4180 map = alloc_bootmem_node(pgdat, size);
e984bb43 4181 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 4182 }
12d810c1 4183#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
4184 /*
4185 * With no DISCONTIG, the global mem_map is just set as node 0's
4186 */
c713216d 4187 if (pgdat == NODE_DATA(0)) {
1da177e4 4188 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
4189#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4190 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
467bc461 4191 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
c713216d
MG
4192#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4193 }
1da177e4 4194#endif
d41dee36 4195#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
4196}
4197
9109fb7b
JW
4198void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4199 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 4200{
9109fb7b
JW
4201 pg_data_t *pgdat = NODE_DATA(nid);
4202
1da177e4
LT
4203 pgdat->node_id = nid;
4204 pgdat->node_start_pfn = node_start_pfn;
c713216d 4205 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
4206
4207 alloc_node_mem_map(pgdat);
e8c27ac9
YL
4208#ifdef CONFIG_FLAT_NODE_MEM_MAP
4209 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4210 nid, (unsigned long)pgdat,
4211 (unsigned long)pgdat->node_mem_map);
4212#endif
1da177e4
LT
4213
4214 free_area_init_core(pgdat, zones_size, zholes_size);
4215}
4216
c713216d 4217#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
418508c1
MS
4218
4219#if MAX_NUMNODES > 1
4220/*
4221 * Figure out the number of possible node ids.
4222 */
4223static void __init setup_nr_node_ids(void)
4224{
4225 unsigned int node;
4226 unsigned int highest = 0;
4227
4228 for_each_node_mask(node, node_possible_map)
4229 highest = node;
4230 nr_node_ids = highest + 1;
4231}
4232#else
4233static inline void setup_nr_node_ids(void)
4234{
4235}
4236#endif
4237
c713216d
MG
4238/**
4239 * add_active_range - Register a range of PFNs backed by physical memory
4240 * @nid: The node ID the range resides on
4241 * @start_pfn: The start PFN of the available physical memory
4242 * @end_pfn: The end PFN of the available physical memory
4243 *
4244 * These ranges are stored in an early_node_map[] and later used by
4245 * free_area_init_nodes() to calculate zone sizes and holes. If the
4246 * range spans a memory hole, it is up to the architecture to ensure
4247 * the memory is not freed by the bootmem allocator. If possible
4248 * the range being registered will be merged with existing ranges.
4249 */
4250void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4251 unsigned long end_pfn)
4252{
4253 int i;
4254
6b74ab97
MG
4255 mminit_dprintk(MMINIT_TRACE, "memory_register",
4256 "Entering add_active_range(%d, %#lx, %#lx) "
4257 "%d entries of %d used\n",
4258 nid, start_pfn, end_pfn,
4259 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
c713216d 4260
2dbb51c4
MG
4261 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4262
c713216d
MG
4263 /* Merge with existing active regions if possible */
4264 for (i = 0; i < nr_nodemap_entries; i++) {
4265 if (early_node_map[i].nid != nid)
4266 continue;
4267
4268 /* Skip if an existing region covers this new one */
4269 if (start_pfn >= early_node_map[i].start_pfn &&
4270 end_pfn <= early_node_map[i].end_pfn)
4271 return;
4272
4273 /* Merge forward if suitable */
4274 if (start_pfn <= early_node_map[i].end_pfn &&
4275 end_pfn > early_node_map[i].end_pfn) {
4276 early_node_map[i].end_pfn = end_pfn;
4277 return;
4278 }
4279
4280 /* Merge backward if suitable */
d2dbe08d 4281 if (start_pfn < early_node_map[i].start_pfn &&
c713216d
MG
4282 end_pfn >= early_node_map[i].start_pfn) {
4283 early_node_map[i].start_pfn = start_pfn;
4284 return;
4285 }
4286 }
4287
4288 /* Check that early_node_map is large enough */
4289 if (i >= MAX_ACTIVE_REGIONS) {
4290 printk(KERN_CRIT "More than %d memory regions, truncating\n",
4291 MAX_ACTIVE_REGIONS);
4292 return;
4293 }
4294
4295 early_node_map[i].nid = nid;
4296 early_node_map[i].start_pfn = start_pfn;
4297 early_node_map[i].end_pfn = end_pfn;
4298 nr_nodemap_entries = i + 1;
4299}
4300
4301/**
cc1050ba 4302 * remove_active_range - Shrink an existing registered range of PFNs
c713216d 4303 * @nid: The node id the range is on that should be shrunk
cc1050ba
YL
4304 * @start_pfn: The new PFN of the range
4305 * @end_pfn: The new PFN of the range
c713216d
MG
4306 *
4307 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
cc1a9d86
YL
4308 * The map is kept near the end physical page range that has already been
4309 * registered. This function allows an arch to shrink an existing registered
4310 * range.
c713216d 4311 */
cc1050ba
YL
4312void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4313 unsigned long end_pfn)
c713216d 4314{
cc1a9d86
YL
4315 int i, j;
4316 int removed = 0;
c713216d 4317
cc1050ba
YL
4318 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4319 nid, start_pfn, end_pfn);
4320
c713216d 4321 /* Find the old active region end and shrink */
cc1a9d86 4322 for_each_active_range_index_in_nid(i, nid) {
cc1050ba
YL
4323 if (early_node_map[i].start_pfn >= start_pfn &&
4324 early_node_map[i].end_pfn <= end_pfn) {
cc1a9d86 4325 /* clear it */
cc1050ba 4326 early_node_map[i].start_pfn = 0;
cc1a9d86
YL
4327 early_node_map[i].end_pfn = 0;
4328 removed = 1;
4329 continue;
4330 }
cc1050ba
YL
4331 if (early_node_map[i].start_pfn < start_pfn &&
4332 early_node_map[i].end_pfn > start_pfn) {
4333 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4334 early_node_map[i].end_pfn = start_pfn;
4335 if (temp_end_pfn > end_pfn)
4336 add_active_range(nid, end_pfn, temp_end_pfn);
4337 continue;
4338 }
4339 if (early_node_map[i].start_pfn >= start_pfn &&
4340 early_node_map[i].end_pfn > end_pfn &&
4341 early_node_map[i].start_pfn < end_pfn) {
4342 early_node_map[i].start_pfn = end_pfn;
cc1a9d86 4343 continue;
c713216d 4344 }
cc1a9d86
YL
4345 }
4346
4347 if (!removed)
4348 return;
4349
4350 /* remove the blank ones */
4351 for (i = nr_nodemap_entries - 1; i > 0; i--) {
4352 if (early_node_map[i].nid != nid)
4353 continue;
4354 if (early_node_map[i].end_pfn)
4355 continue;
4356 /* we found it, get rid of it */
4357 for (j = i; j < nr_nodemap_entries - 1; j++)
4358 memcpy(&early_node_map[j], &early_node_map[j+1],
4359 sizeof(early_node_map[j]));
4360 j = nr_nodemap_entries - 1;
4361 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4362 nr_nodemap_entries--;
4363 }
c713216d
MG
4364}
4365
4366/**
4367 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 4368 *
c713216d
MG
4369 * During discovery, it may be found that a table like SRAT is invalid
4370 * and an alternative discovery method must be used. This function removes
4371 * all currently registered regions.
4372 */
88ca3b94 4373void __init remove_all_active_ranges(void)
c713216d
MG
4374{
4375 memset(early_node_map, 0, sizeof(early_node_map));
4376 nr_nodemap_entries = 0;
4377}
4378
4379/* Compare two active node_active_regions */
4380static int __init cmp_node_active_region(const void *a, const void *b)
4381{
4382 struct node_active_region *arange = (struct node_active_region *)a;
4383 struct node_active_region *brange = (struct node_active_region *)b;
4384
4385 /* Done this way to avoid overflows */
4386 if (arange->start_pfn > brange->start_pfn)
4387 return 1;
4388 if (arange->start_pfn < brange->start_pfn)
4389 return -1;
4390
4391 return 0;
4392}
4393
4394/* sort the node_map by start_pfn */
32996250 4395void __init sort_node_map(void)
c713216d
MG
4396{
4397 sort(early_node_map, (size_t)nr_nodemap_entries,
4398 sizeof(struct node_active_region),
4399 cmp_node_active_region, NULL);
4400}
4401
a6af2bc3 4402/* Find the lowest pfn for a node */
b69a7288 4403static unsigned long __init find_min_pfn_for_node(int nid)
c713216d
MG
4404{
4405 int i;
a6af2bc3 4406 unsigned long min_pfn = ULONG_MAX;
1abbfb41 4407
c713216d
MG
4408 /* Assuming a sorted map, the first range found has the starting pfn */
4409 for_each_active_range_index_in_nid(i, nid)
a6af2bc3 4410 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
c713216d 4411
a6af2bc3
MG
4412 if (min_pfn == ULONG_MAX) {
4413 printk(KERN_WARNING
2bc0d261 4414 "Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
4415 return 0;
4416 }
4417
4418 return min_pfn;
c713216d
MG
4419}
4420
4421/**
4422 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4423 *
4424 * It returns the minimum PFN based on information provided via
88ca3b94 4425 * add_active_range().
c713216d
MG
4426 */
4427unsigned long __init find_min_pfn_with_active_regions(void)
4428{
4429 return find_min_pfn_for_node(MAX_NUMNODES);
4430}
4431
37b07e41
LS
4432/*
4433 * early_calculate_totalpages()
4434 * Sum pages in active regions for movable zone.
4435 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4436 */
484f51f8 4437static unsigned long __init early_calculate_totalpages(void)
7e63efef
MG
4438{
4439 int i;
4440 unsigned long totalpages = 0;
4441
37b07e41
LS
4442 for (i = 0; i < nr_nodemap_entries; i++) {
4443 unsigned long pages = early_node_map[i].end_pfn -
7e63efef 4444 early_node_map[i].start_pfn;
37b07e41
LS
4445 totalpages += pages;
4446 if (pages)
4447 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4448 }
4449 return totalpages;
7e63efef
MG
4450}
4451
2a1e274a
MG
4452/*
4453 * Find the PFN the Movable zone begins in each node. Kernel memory
4454 * is spread evenly between nodes as long as the nodes have enough
4455 * memory. When they don't, some nodes will have more kernelcore than
4456 * others
4457 */
b69a7288 4458static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
2a1e274a
MG
4459{
4460 int i, nid;
4461 unsigned long usable_startpfn;
4462 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd
YL
4463 /* save the state before borrow the nodemask */
4464 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
37b07e41
LS
4465 unsigned long totalpages = early_calculate_totalpages();
4466 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
2a1e274a 4467
7e63efef
MG
4468 /*
4469 * If movablecore was specified, calculate what size of
4470 * kernelcore that corresponds so that memory usable for
4471 * any allocation type is evenly spread. If both kernelcore
4472 * and movablecore are specified, then the value of kernelcore
4473 * will be used for required_kernelcore if it's greater than
4474 * what movablecore would have allowed.
4475 */
4476 if (required_movablecore) {
7e63efef
MG
4477 unsigned long corepages;
4478
4479 /*
4480 * Round-up so that ZONE_MOVABLE is at least as large as what
4481 * was requested by the user
4482 */
4483 required_movablecore =
4484 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4485 corepages = totalpages - required_movablecore;
4486
4487 required_kernelcore = max(required_kernelcore, corepages);
4488 }
4489
2a1e274a
MG
4490 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4491 if (!required_kernelcore)
66918dcd 4492 goto out;
2a1e274a
MG
4493
4494 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4495 find_usable_zone_for_movable();
4496 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4497
4498restart:
4499 /* Spread kernelcore memory as evenly as possible throughout nodes */
4500 kernelcore_node = required_kernelcore / usable_nodes;
37b07e41 4501 for_each_node_state(nid, N_HIGH_MEMORY) {
2a1e274a
MG
4502 /*
4503 * Recalculate kernelcore_node if the division per node
4504 * now exceeds what is necessary to satisfy the requested
4505 * amount of memory for the kernel
4506 */
4507 if (required_kernelcore < kernelcore_node)
4508 kernelcore_node = required_kernelcore / usable_nodes;
4509
4510 /*
4511 * As the map is walked, we track how much memory is usable
4512 * by the kernel using kernelcore_remaining. When it is
4513 * 0, the rest of the node is usable by ZONE_MOVABLE
4514 */
4515 kernelcore_remaining = kernelcore_node;
4516
4517 /* Go through each range of PFNs within this node */
4518 for_each_active_range_index_in_nid(i, nid) {
4519 unsigned long start_pfn, end_pfn;
4520 unsigned long size_pages;
4521
4522 start_pfn = max(early_node_map[i].start_pfn,
4523 zone_movable_pfn[nid]);
4524 end_pfn = early_node_map[i].end_pfn;
4525 if (start_pfn >= end_pfn)
4526 continue;
4527
4528 /* Account for what is only usable for kernelcore */
4529 if (start_pfn < usable_startpfn) {
4530 unsigned long kernel_pages;
4531 kernel_pages = min(end_pfn, usable_startpfn)
4532 - start_pfn;
4533
4534 kernelcore_remaining -= min(kernel_pages,
4535 kernelcore_remaining);
4536 required_kernelcore -= min(kernel_pages,
4537 required_kernelcore);
4538
4539 /* Continue if range is now fully accounted */
4540 if (end_pfn <= usable_startpfn) {
4541
4542 /*
4543 * Push zone_movable_pfn to the end so
4544 * that if we have to rebalance
4545 * kernelcore across nodes, we will
4546 * not double account here
4547 */
4548 zone_movable_pfn[nid] = end_pfn;
4549 continue;
4550 }
4551 start_pfn = usable_startpfn;
4552 }
4553
4554 /*
4555 * The usable PFN range for ZONE_MOVABLE is from
4556 * start_pfn->end_pfn. Calculate size_pages as the
4557 * number of pages used as kernelcore
4558 */
4559 size_pages = end_pfn - start_pfn;
4560 if (size_pages > kernelcore_remaining)
4561 size_pages = kernelcore_remaining;
4562 zone_movable_pfn[nid] = start_pfn + size_pages;
4563
4564 /*
4565 * Some kernelcore has been met, update counts and
4566 * break if the kernelcore for this node has been
4567 * satisified
4568 */
4569 required_kernelcore -= min(required_kernelcore,
4570 size_pages);
4571 kernelcore_remaining -= size_pages;
4572 if (!kernelcore_remaining)
4573 break;
4574 }
4575 }
4576
4577 /*
4578 * If there is still required_kernelcore, we do another pass with one
4579 * less node in the count. This will push zone_movable_pfn[nid] further
4580 * along on the nodes that still have memory until kernelcore is
4581 * satisified
4582 */
4583 usable_nodes--;
4584 if (usable_nodes && required_kernelcore > usable_nodes)
4585 goto restart;
4586
4587 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4588 for (nid = 0; nid < MAX_NUMNODES; nid++)
4589 zone_movable_pfn[nid] =
4590 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd
YL
4591
4592out:
4593 /* restore the node_state */
4594 node_states[N_HIGH_MEMORY] = saved_node_state;
2a1e274a
MG
4595}
4596
37b07e41
LS
4597/* Any regular memory on that node ? */
4598static void check_for_regular_memory(pg_data_t *pgdat)
4599{
4600#ifdef CONFIG_HIGHMEM
4601 enum zone_type zone_type;
4602
4603 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4604 struct zone *zone = &pgdat->node_zones[zone_type];
4605 if (zone->present_pages)
4606 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4607 }
4608#endif
4609}
4610
c713216d
MG
4611/**
4612 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 4613 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
4614 *
4615 * This will call free_area_init_node() for each active node in the system.
4616 * Using the page ranges provided by add_active_range(), the size of each
4617 * zone in each node and their holes is calculated. If the maximum PFN
4618 * between two adjacent zones match, it is assumed that the zone is empty.
4619 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4620 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4621 * starts where the previous one ended. For example, ZONE_DMA32 starts
4622 * at arch_max_dma_pfn.
4623 */
4624void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4625{
4626 unsigned long nid;
db99100d 4627 int i;
c713216d 4628
a6af2bc3
MG
4629 /* Sort early_node_map as initialisation assumes it is sorted */
4630 sort_node_map();
4631
c713216d
MG
4632 /* Record where the zone boundaries are */
4633 memset(arch_zone_lowest_possible_pfn, 0,
4634 sizeof(arch_zone_lowest_possible_pfn));
4635 memset(arch_zone_highest_possible_pfn, 0,
4636 sizeof(arch_zone_highest_possible_pfn));
4637 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4638 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4639 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
4640 if (i == ZONE_MOVABLE)
4641 continue;
c713216d
MG
4642 arch_zone_lowest_possible_pfn[i] =
4643 arch_zone_highest_possible_pfn[i-1];
4644 arch_zone_highest_possible_pfn[i] =
4645 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4646 }
2a1e274a
MG
4647 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4648 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4649
4650 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4651 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4652 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
c713216d 4653
c713216d
MG
4654 /* Print out the zone ranges */
4655 printk("Zone PFN ranges:\n");
2a1e274a
MG
4656 for (i = 0; i < MAX_NR_ZONES; i++) {
4657 if (i == ZONE_MOVABLE)
4658 continue;
72f0ba02
DR
4659 printk(" %-8s ", zone_names[i]);
4660 if (arch_zone_lowest_possible_pfn[i] ==
4661 arch_zone_highest_possible_pfn[i])
4662 printk("empty\n");
4663 else
4664 printk("%0#10lx -> %0#10lx\n",
c713216d
MG
4665 arch_zone_lowest_possible_pfn[i],
4666 arch_zone_highest_possible_pfn[i]);
2a1e274a
MG
4667 }
4668
4669 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4670 printk("Movable zone start PFN for each node\n");
4671 for (i = 0; i < MAX_NUMNODES; i++) {
4672 if (zone_movable_pfn[i])
4673 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4674 }
c713216d
MG
4675
4676 /* Print out the early_node_map[] */
4677 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4678 for (i = 0; i < nr_nodemap_entries; i++)
5dab8ec1 4679 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
c713216d
MG
4680 early_node_map[i].start_pfn,
4681 early_node_map[i].end_pfn);
4682
4683 /* Initialise every node */
708614e6 4684 mminit_verify_pageflags_layout();
8ef82866 4685 setup_nr_node_ids();
c713216d
MG
4686 for_each_online_node(nid) {
4687 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 4688 free_area_init_node(nid, NULL,
c713216d 4689 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
4690
4691 /* Any memory on that node */
4692 if (pgdat->node_present_pages)
4693 node_set_state(nid, N_HIGH_MEMORY);
4694 check_for_regular_memory(pgdat);
c713216d
MG
4695 }
4696}
2a1e274a 4697
7e63efef 4698static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
4699{
4700 unsigned long long coremem;
4701 if (!p)
4702 return -EINVAL;
4703
4704 coremem = memparse(p, &p);
7e63efef 4705 *core = coremem >> PAGE_SHIFT;
2a1e274a 4706
7e63efef 4707 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
4708 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4709
4710 return 0;
4711}
ed7ed365 4712
7e63efef
MG
4713/*
4714 * kernelcore=size sets the amount of memory for use for allocations that
4715 * cannot be reclaimed or migrated.
4716 */
4717static int __init cmdline_parse_kernelcore(char *p)
4718{
4719 return cmdline_parse_core(p, &required_kernelcore);
4720}
4721
4722/*
4723 * movablecore=size sets the amount of memory for use for allocations that
4724 * can be reclaimed or migrated.
4725 */
4726static int __init cmdline_parse_movablecore(char *p)
4727{
4728 return cmdline_parse_core(p, &required_movablecore);
4729}
4730
ed7ed365 4731early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 4732early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 4733
c713216d
MG
4734#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4735
0e0b864e 4736/**
88ca3b94
RD
4737 * set_dma_reserve - set the specified number of pages reserved in the first zone
4738 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
4739 *
4740 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4741 * In the DMA zone, a significant percentage may be consumed by kernel image
4742 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
4743 * function may optionally be used to account for unfreeable pages in the
4744 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4745 * smaller per-cpu batchsize.
0e0b864e
MG
4746 */
4747void __init set_dma_reserve(unsigned long new_dma_reserve)
4748{
4749 dma_reserve = new_dma_reserve;
4750}
4751
93b7504e 4752#ifndef CONFIG_NEED_MULTIPLE_NODES
08677214
YL
4753struct pglist_data __refdata contig_page_data = {
4754#ifndef CONFIG_NO_BOOTMEM
4755 .bdata = &bootmem_node_data[0]
4756#endif
4757 };
1da177e4 4758EXPORT_SYMBOL(contig_page_data);
93b7504e 4759#endif
1da177e4
LT
4760
4761void __init free_area_init(unsigned long *zones_size)
4762{
9109fb7b 4763 free_area_init_node(0, zones_size,
1da177e4
LT
4764 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4765}
1da177e4 4766
1da177e4
LT
4767static int page_alloc_cpu_notify(struct notifier_block *self,
4768 unsigned long action, void *hcpu)
4769{
4770 int cpu = (unsigned long)hcpu;
1da177e4 4771
8bb78442 4772 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
9f8f2172
CL
4773 drain_pages(cpu);
4774
4775 /*
4776 * Spill the event counters of the dead processor
4777 * into the current processors event counters.
4778 * This artificially elevates the count of the current
4779 * processor.
4780 */
f8891e5e 4781 vm_events_fold_cpu(cpu);
9f8f2172
CL
4782
4783 /*
4784 * Zero the differential counters of the dead processor
4785 * so that the vm statistics are consistent.
4786 *
4787 * This is only okay since the processor is dead and cannot
4788 * race with what we are doing.
4789 */
2244b95a 4790 refresh_cpu_vm_stats(cpu);
1da177e4
LT
4791 }
4792 return NOTIFY_OK;
4793}
1da177e4
LT
4794
4795void __init page_alloc_init(void)
4796{
4797 hotcpu_notifier(page_alloc_cpu_notify, 0);
4798}
4799
cb45b0e9
HA
4800/*
4801 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4802 * or min_free_kbytes changes.
4803 */
4804static void calculate_totalreserve_pages(void)
4805{
4806 struct pglist_data *pgdat;
4807 unsigned long reserve_pages = 0;
2f6726e5 4808 enum zone_type i, j;
cb45b0e9
HA
4809
4810 for_each_online_pgdat(pgdat) {
4811 for (i = 0; i < MAX_NR_ZONES; i++) {
4812 struct zone *zone = pgdat->node_zones + i;
4813 unsigned long max = 0;
4814
4815 /* Find valid and maximum lowmem_reserve in the zone */
4816 for (j = i; j < MAX_NR_ZONES; j++) {
4817 if (zone->lowmem_reserve[j] > max)
4818 max = zone->lowmem_reserve[j];
4819 }
4820
41858966
MG
4821 /* we treat the high watermark as reserved pages. */
4822 max += high_wmark_pages(zone);
cb45b0e9
HA
4823
4824 if (max > zone->present_pages)
4825 max = zone->present_pages;
4826 reserve_pages += max;
4827 }
4828 }
4829 totalreserve_pages = reserve_pages;
4830}
4831
1da177e4
LT
4832/*
4833 * setup_per_zone_lowmem_reserve - called whenever
4834 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
4835 * has a correct pages reserved value, so an adequate number of
4836 * pages are left in the zone after a successful __alloc_pages().
4837 */
4838static void setup_per_zone_lowmem_reserve(void)
4839{
4840 struct pglist_data *pgdat;
2f6726e5 4841 enum zone_type j, idx;
1da177e4 4842
ec936fc5 4843 for_each_online_pgdat(pgdat) {
1da177e4
LT
4844 for (j = 0; j < MAX_NR_ZONES; j++) {
4845 struct zone *zone = pgdat->node_zones + j;
4846 unsigned long present_pages = zone->present_pages;
4847
4848 zone->lowmem_reserve[j] = 0;
4849
2f6726e5
CL
4850 idx = j;
4851 while (idx) {
1da177e4
LT
4852 struct zone *lower_zone;
4853
2f6726e5
CL
4854 idx--;
4855
1da177e4
LT
4856 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4857 sysctl_lowmem_reserve_ratio[idx] = 1;
4858
4859 lower_zone = pgdat->node_zones + idx;
4860 lower_zone->lowmem_reserve[j] = present_pages /
4861 sysctl_lowmem_reserve_ratio[idx];
4862 present_pages += lower_zone->present_pages;
4863 }
4864 }
4865 }
cb45b0e9
HA
4866
4867 /* update totalreserve_pages */
4868 calculate_totalreserve_pages();
1da177e4
LT
4869}
4870
88ca3b94 4871/**
bc75d33f 4872 * setup_per_zone_wmarks - called when min_free_kbytes changes
bce7394a 4873 * or when memory is hot-{added|removed}
88ca3b94 4874 *
bc75d33f
MK
4875 * Ensures that the watermark[min,low,high] values for each zone are set
4876 * correctly with respect to min_free_kbytes.
1da177e4 4877 */
bc75d33f 4878void setup_per_zone_wmarks(void)
1da177e4
LT
4879{
4880 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4881 unsigned long lowmem_pages = 0;
4882 struct zone *zone;
4883 unsigned long flags;
4884
4885 /* Calculate total number of !ZONE_HIGHMEM pages */
4886 for_each_zone(zone) {
4887 if (!is_highmem(zone))
4888 lowmem_pages += zone->present_pages;
4889 }
4890
4891 for_each_zone(zone) {
ac924c60
AM
4892 u64 tmp;
4893
1125b4e3 4894 spin_lock_irqsave(&zone->lock, flags);
ac924c60
AM
4895 tmp = (u64)pages_min * zone->present_pages;
4896 do_div(tmp, lowmem_pages);
1da177e4
LT
4897 if (is_highmem(zone)) {
4898 /*
669ed175
NP
4899 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4900 * need highmem pages, so cap pages_min to a small
4901 * value here.
4902 *
41858966 4903 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
669ed175
NP
4904 * deltas controls asynch page reclaim, and so should
4905 * not be capped for highmem.
1da177e4
LT
4906 */
4907 int min_pages;
4908
4909 min_pages = zone->present_pages / 1024;
4910 if (min_pages < SWAP_CLUSTER_MAX)
4911 min_pages = SWAP_CLUSTER_MAX;
4912 if (min_pages > 128)
4913 min_pages = 128;
41858966 4914 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 4915 } else {
669ed175
NP
4916 /*
4917 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
4918 * proportionate to the zone's size.
4919 */
41858966 4920 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
4921 }
4922
41858966
MG
4923 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
4924 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
56fd56b8 4925 setup_zone_migrate_reserve(zone);
1125b4e3 4926 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 4927 }
cb45b0e9
HA
4928
4929 /* update totalreserve_pages */
4930 calculate_totalreserve_pages();
1da177e4
LT
4931}
4932
55a4462a 4933/*
556adecb
RR
4934 * The inactive anon list should be small enough that the VM never has to
4935 * do too much work, but large enough that each inactive page has a chance
4936 * to be referenced again before it is swapped out.
4937 *
4938 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4939 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4940 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4941 * the anonymous pages are kept on the inactive list.
4942 *
4943 * total target max
4944 * memory ratio inactive anon
4945 * -------------------------------------
4946 * 10MB 1 5MB
4947 * 100MB 1 50MB
4948 * 1GB 3 250MB
4949 * 10GB 10 0.9GB
4950 * 100GB 31 3GB
4951 * 1TB 101 10GB
4952 * 10TB 320 32GB
4953 */
96cb4df5 4954void calculate_zone_inactive_ratio(struct zone *zone)
556adecb 4955{
96cb4df5 4956 unsigned int gb, ratio;
556adecb 4957
96cb4df5
MK
4958 /* Zone size in gigabytes */
4959 gb = zone->present_pages >> (30 - PAGE_SHIFT);
4960 if (gb)
556adecb 4961 ratio = int_sqrt(10 * gb);
96cb4df5
MK
4962 else
4963 ratio = 1;
556adecb 4964
96cb4df5
MK
4965 zone->inactive_ratio = ratio;
4966}
556adecb 4967
96cb4df5
MK
4968static void __init setup_per_zone_inactive_ratio(void)
4969{
4970 struct zone *zone;
4971
4972 for_each_zone(zone)
4973 calculate_zone_inactive_ratio(zone);
556adecb
RR
4974}
4975
1da177e4
LT
4976/*
4977 * Initialise min_free_kbytes.
4978 *
4979 * For small machines we want it small (128k min). For large machines
4980 * we want it large (64MB max). But it is not linear, because network
4981 * bandwidth does not increase linearly with machine size. We use
4982 *
4983 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4984 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
4985 *
4986 * which yields
4987 *
4988 * 16MB: 512k
4989 * 32MB: 724k
4990 * 64MB: 1024k
4991 * 128MB: 1448k
4992 * 256MB: 2048k
4993 * 512MB: 2896k
4994 * 1024MB: 4096k
4995 * 2048MB: 5792k
4996 * 4096MB: 8192k
4997 * 8192MB: 11584k
4998 * 16384MB: 16384k
4999 */
bc75d33f 5000static int __init init_per_zone_wmark_min(void)
1da177e4
LT
5001{
5002 unsigned long lowmem_kbytes;
5003
5004 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5005
5006 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5007 if (min_free_kbytes < 128)
5008 min_free_kbytes = 128;
5009 if (min_free_kbytes > 65536)
5010 min_free_kbytes = 65536;
bc75d33f 5011 setup_per_zone_wmarks();
1da177e4 5012 setup_per_zone_lowmem_reserve();
556adecb 5013 setup_per_zone_inactive_ratio();
1da177e4
LT
5014 return 0;
5015}
bc75d33f 5016module_init(init_per_zone_wmark_min)
1da177e4
LT
5017
5018/*
5019 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5020 * that we can call two helper functions whenever min_free_kbytes
5021 * changes.
5022 */
5023int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
8d65af78 5024 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5025{
8d65af78 5026 proc_dointvec(table, write, buffer, length, ppos);
3b1d92c5 5027 if (write)
bc75d33f 5028 setup_per_zone_wmarks();
1da177e4
LT
5029 return 0;
5030}
5031
9614634f
CL
5032#ifdef CONFIG_NUMA
5033int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5034 void __user *buffer, size_t *length, loff_t *ppos)
9614634f
CL
5035{
5036 struct zone *zone;
5037 int rc;
5038
8d65af78 5039 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
9614634f
CL
5040 if (rc)
5041 return rc;
5042
5043 for_each_zone(zone)
8417bba4 5044 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
5045 sysctl_min_unmapped_ratio) / 100;
5046 return 0;
5047}
0ff38490
CL
5048
5049int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5050 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490
CL
5051{
5052 struct zone *zone;
5053 int rc;
5054
8d65af78 5055 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
5056 if (rc)
5057 return rc;
5058
5059 for_each_zone(zone)
5060 zone->min_slab_pages = (zone->present_pages *
5061 sysctl_min_slab_ratio) / 100;
5062 return 0;
5063}
9614634f
CL
5064#endif
5065
1da177e4
LT
5066/*
5067 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5068 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5069 * whenever sysctl_lowmem_reserve_ratio changes.
5070 *
5071 * The reserve ratio obviously has absolutely no relation with the
41858966 5072 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
5073 * if in function of the boot time zone sizes.
5074 */
5075int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5076 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5077{
8d65af78 5078 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
5079 setup_per_zone_lowmem_reserve();
5080 return 0;
5081}
5082
8ad4b1fb
RS
5083/*
5084 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5085 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
5086 * can have before it gets flushed back to buddy allocator.
5087 */
5088
5089int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
8d65af78 5090 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
5091{
5092 struct zone *zone;
5093 unsigned int cpu;
5094 int ret;
5095
8d65af78 5096 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8ad4b1fb
RS
5097 if (!write || (ret == -EINVAL))
5098 return ret;
364df0eb 5099 for_each_populated_zone(zone) {
99dcc3e5 5100 for_each_possible_cpu(cpu) {
8ad4b1fb
RS
5101 unsigned long high;
5102 high = zone->present_pages / percpu_pagelist_fraction;
99dcc3e5
CL
5103 setup_pagelist_highmark(
5104 per_cpu_ptr(zone->pageset, cpu), high);
8ad4b1fb
RS
5105 }
5106 }
5107 return 0;
5108}
5109
f034b5d4 5110int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
5111
5112#ifdef CONFIG_NUMA
5113static int __init set_hashdist(char *str)
5114{
5115 if (!str)
5116 return 0;
5117 hashdist = simple_strtoul(str, &str, 0);
5118 return 1;
5119}
5120__setup("hashdist=", set_hashdist);
5121#endif
5122
5123/*
5124 * allocate a large system hash table from bootmem
5125 * - it is assumed that the hash table must contain an exact power-of-2
5126 * quantity of entries
5127 * - limit is the number of hash buckets, not the total allocation size
5128 */
5129void *__init alloc_large_system_hash(const char *tablename,
5130 unsigned long bucketsize,
5131 unsigned long numentries,
5132 int scale,
5133 int flags,
5134 unsigned int *_hash_shift,
5135 unsigned int *_hash_mask,
5136 unsigned long limit)
5137{
5138 unsigned long long max = limit;
5139 unsigned long log2qty, size;
5140 void *table = NULL;
5141
5142 /* allow the kernel cmdline to have a say */
5143 if (!numentries) {
5144 /* round applicable memory size up to nearest megabyte */
04903664 5145 numentries = nr_kernel_pages;
1da177e4
LT
5146 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5147 numentries >>= 20 - PAGE_SHIFT;
5148 numentries <<= 20 - PAGE_SHIFT;
5149
5150 /* limit to 1 bucket per 2^scale bytes of low memory */
5151 if (scale > PAGE_SHIFT)
5152 numentries >>= (scale - PAGE_SHIFT);
5153 else
5154 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
5155
5156 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
5157 if (unlikely(flags & HASH_SMALL)) {
5158 /* Makes no sense without HASH_EARLY */
5159 WARN_ON(!(flags & HASH_EARLY));
5160 if (!(numentries >> *_hash_shift)) {
5161 numentries = 1UL << *_hash_shift;
5162 BUG_ON(!numentries);
5163 }
5164 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 5165 numentries = PAGE_SIZE / bucketsize;
1da177e4 5166 }
6e692ed3 5167 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
5168
5169 /* limit allocation size to 1/16 total memory by default */
5170 if (max == 0) {
5171 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5172 do_div(max, bucketsize);
5173 }
5174
5175 if (numentries > max)
5176 numentries = max;
5177
f0d1b0b3 5178 log2qty = ilog2(numentries);
1da177e4
LT
5179
5180 do {
5181 size = bucketsize << log2qty;
5182 if (flags & HASH_EARLY)
74768ed8 5183 table = alloc_bootmem_nopanic(size);
1da177e4
LT
5184 else if (hashdist)
5185 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5186 else {
1037b83b
ED
5187 /*
5188 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
5189 * some pages at the end of hash table which
5190 * alloc_pages_exact() automatically does
1037b83b 5191 */
264ef8a9 5192 if (get_order(size) < MAX_ORDER) {
a1dd268c 5193 table = alloc_pages_exact(size, GFP_ATOMIC);
264ef8a9
CM
5194 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5195 }
1da177e4
LT
5196 }
5197 } while (!table && size > PAGE_SIZE && --log2qty);
5198
5199 if (!table)
5200 panic("Failed to allocate %s hash table\n", tablename);
5201
f241e660 5202 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
1da177e4 5203 tablename,
f241e660 5204 (1UL << log2qty),
f0d1b0b3 5205 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
5206 size);
5207
5208 if (_hash_shift)
5209 *_hash_shift = log2qty;
5210 if (_hash_mask)
5211 *_hash_mask = (1 << log2qty) - 1;
5212
5213 return table;
5214}
a117e66e 5215
835c134e
MG
5216/* Return a pointer to the bitmap storing bits affecting a block of pages */
5217static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5218 unsigned long pfn)
5219{
5220#ifdef CONFIG_SPARSEMEM
5221 return __pfn_to_section(pfn)->pageblock_flags;
5222#else
5223 return zone->pageblock_flags;
5224#endif /* CONFIG_SPARSEMEM */
5225}
5226
5227static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5228{
5229#ifdef CONFIG_SPARSEMEM
5230 pfn &= (PAGES_PER_SECTION-1);
d9c23400 5231 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5232#else
5233 pfn = pfn - zone->zone_start_pfn;
d9c23400 5234 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5235#endif /* CONFIG_SPARSEMEM */
5236}
5237
5238/**
d9c23400 5239 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e
MG
5240 * @page: The page within the block of interest
5241 * @start_bitidx: The first bit of interest to retrieve
5242 * @end_bitidx: The last bit of interest
5243 * returns pageblock_bits flags
5244 */
5245unsigned long get_pageblock_flags_group(struct page *page,
5246 int start_bitidx, int end_bitidx)
5247{
5248 struct zone *zone;
5249 unsigned long *bitmap;
5250 unsigned long pfn, bitidx;
5251 unsigned long flags = 0;
5252 unsigned long value = 1;
5253
5254 zone = page_zone(page);
5255 pfn = page_to_pfn(page);
5256 bitmap = get_pageblock_bitmap(zone, pfn);
5257 bitidx = pfn_to_bitidx(zone, pfn);
5258
5259 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5260 if (test_bit(bitidx + start_bitidx, bitmap))
5261 flags |= value;
6220ec78 5262
835c134e
MG
5263 return flags;
5264}
5265
5266/**
d9c23400 5267 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e
MG
5268 * @page: The page within the block of interest
5269 * @start_bitidx: The first bit of interest
5270 * @end_bitidx: The last bit of interest
5271 * @flags: The flags to set
5272 */
5273void set_pageblock_flags_group(struct page *page, unsigned long flags,
5274 int start_bitidx, int end_bitidx)
5275{
5276 struct zone *zone;
5277 unsigned long *bitmap;
5278 unsigned long pfn, bitidx;
5279 unsigned long value = 1;
5280
5281 zone = page_zone(page);
5282 pfn = page_to_pfn(page);
5283 bitmap = get_pageblock_bitmap(zone, pfn);
5284 bitidx = pfn_to_bitidx(zone, pfn);
86051ca5
KH
5285 VM_BUG_ON(pfn < zone->zone_start_pfn);
5286 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
835c134e
MG
5287
5288 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5289 if (flags & value)
5290 __set_bit(bitidx + start_bitidx, bitmap);
5291 else
5292 __clear_bit(bitidx + start_bitidx, bitmap);
5293}
a5d76b54
KH
5294
5295/*
5296 * This is designed as sub function...plz see page_isolation.c also.
5297 * set/clear page block's type to be ISOLATE.
5298 * page allocater never alloc memory from ISOLATE block.
5299 */
5300
49ac8255
KH
5301static int
5302__count_immobile_pages(struct zone *zone, struct page *page, int count)
5303{
5304 unsigned long pfn, iter, found;
5305 /*
5306 * For avoiding noise data, lru_add_drain_all() should be called
5307 * If ZONE_MOVABLE, the zone never contains immobile pages
5308 */
5309 if (zone_idx(zone) == ZONE_MOVABLE)
5310 return true;
5311
5312 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
5313 return true;
5314
5315 pfn = page_to_pfn(page);
5316 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5317 unsigned long check = pfn + iter;
5318
5319 if (!pfn_valid_within(check)) {
5320 iter++;
5321 continue;
5322 }
5323 page = pfn_to_page(check);
5324 if (!page_count(page)) {
5325 if (PageBuddy(page))
5326 iter += (1 << page_order(page)) - 1;
5327 continue;
5328 }
5329 if (!PageLRU(page))
5330 found++;
5331 /*
5332 * If there are RECLAIMABLE pages, we need to check it.
5333 * But now, memory offline itself doesn't call shrink_slab()
5334 * and it still to be fixed.
5335 */
5336 /*
5337 * If the page is not RAM, page_count()should be 0.
5338 * we don't need more check. This is an _used_ not-movable page.
5339 *
5340 * The problematic thing here is PG_reserved pages. PG_reserved
5341 * is set to both of a memory hole page and a _used_ kernel
5342 * page at boot.
5343 */
5344 if (found > count)
5345 return false;
5346 }
5347 return true;
5348}
5349
5350bool is_pageblock_removable_nolock(struct page *page)
5351{
5352 struct zone *zone = page_zone(page);
5353 return __count_immobile_pages(zone, page, 0);
5354}
5355
a5d76b54
KH
5356int set_migratetype_isolate(struct page *page)
5357{
5358 struct zone *zone;
49ac8255 5359 unsigned long flags, pfn;
925cc71e
RJ
5360 struct memory_isolate_notify arg;
5361 int notifier_ret;
a5d76b54 5362 int ret = -EBUSY;
8e7e40d9 5363 int zone_idx;
a5d76b54
KH
5364
5365 zone = page_zone(page);
8e7e40d9 5366 zone_idx = zone_idx(zone);
925cc71e 5367
a5d76b54 5368 spin_lock_irqsave(&zone->lock, flags);
925cc71e
RJ
5369
5370 pfn = page_to_pfn(page);
5371 arg.start_pfn = pfn;
5372 arg.nr_pages = pageblock_nr_pages;
5373 arg.pages_found = 0;
5374
a5d76b54 5375 /*
925cc71e
RJ
5376 * It may be possible to isolate a pageblock even if the
5377 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5378 * notifier chain is used by balloon drivers to return the
5379 * number of pages in a range that are held by the balloon
5380 * driver to shrink memory. If all the pages are accounted for
5381 * by balloons, are free, or on the LRU, isolation can continue.
5382 * Later, for example, when memory hotplug notifier runs, these
5383 * pages reported as "can be isolated" should be isolated(freed)
5384 * by the balloon driver through the memory notifier chain.
a5d76b54 5385 */
925cc71e
RJ
5386 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5387 notifier_ret = notifier_to_errno(notifier_ret);
4b20477f 5388 if (notifier_ret)
a5d76b54 5389 goto out;
49ac8255
KH
5390 /*
5391 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5392 * We just check MOVABLE pages.
5393 */
5394 if (__count_immobile_pages(zone, page, arg.pages_found))
925cc71e
RJ
5395 ret = 0;
5396
49ac8255
KH
5397 /*
5398 * immobile means "not-on-lru" paes. If immobile is larger than
5399 * removable-by-driver pages reported by notifier, we'll fail.
5400 */
5401
a5d76b54 5402out:
925cc71e
RJ
5403 if (!ret) {
5404 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5405 move_freepages_block(zone, page, MIGRATE_ISOLATE);
5406 }
5407
a5d76b54
KH
5408 spin_unlock_irqrestore(&zone->lock, flags);
5409 if (!ret)
9f8f2172 5410 drain_all_pages();
a5d76b54
KH
5411 return ret;
5412}
5413
5414void unset_migratetype_isolate(struct page *page)
5415{
5416 struct zone *zone;
5417 unsigned long flags;
5418 zone = page_zone(page);
5419 spin_lock_irqsave(&zone->lock, flags);
5420 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5421 goto out;
5422 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5423 move_freepages_block(zone, page, MIGRATE_MOVABLE);
5424out:
5425 spin_unlock_irqrestore(&zone->lock, flags);
5426}
0c0e6195
KH
5427
5428#ifdef CONFIG_MEMORY_HOTREMOVE
5429/*
5430 * All pages in the range must be isolated before calling this.
5431 */
5432void
5433__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5434{
5435 struct page *page;
5436 struct zone *zone;
5437 int order, i;
5438 unsigned long pfn;
5439 unsigned long flags;
5440 /* find the first valid pfn */
5441 for (pfn = start_pfn; pfn < end_pfn; pfn++)
5442 if (pfn_valid(pfn))
5443 break;
5444 if (pfn == end_pfn)
5445 return;
5446 zone = page_zone(pfn_to_page(pfn));
5447 spin_lock_irqsave(&zone->lock, flags);
5448 pfn = start_pfn;
5449 while (pfn < end_pfn) {
5450 if (!pfn_valid(pfn)) {
5451 pfn++;
5452 continue;
5453 }
5454 page = pfn_to_page(pfn);
5455 BUG_ON(page_count(page));
5456 BUG_ON(!PageBuddy(page));
5457 order = page_order(page);
5458#ifdef CONFIG_DEBUG_VM
5459 printk(KERN_INFO "remove from free list %lx %d %lx\n",
5460 pfn, 1 << order, end_pfn);
5461#endif
5462 list_del(&page->lru);
5463 rmv_page_order(page);
5464 zone->free_area[order].nr_free--;
5465 __mod_zone_page_state(zone, NR_FREE_PAGES,
5466 - (1UL << order));
5467 for (i = 0; i < (1 << order); i++)
5468 SetPageReserved((page+i));
5469 pfn += (1 << order);
5470 }
5471 spin_unlock_irqrestore(&zone->lock, flags);
5472}
5473#endif
8d22ba1b
WF
5474
5475#ifdef CONFIG_MEMORY_FAILURE
5476bool is_free_buddy_page(struct page *page)
5477{
5478 struct zone *zone = page_zone(page);
5479 unsigned long pfn = page_to_pfn(page);
5480 unsigned long flags;
5481 int order;
5482
5483 spin_lock_irqsave(&zone->lock, flags);
5484 for (order = 0; order < MAX_ORDER; order++) {
5485 struct page *page_head = page - (pfn & ((1 << order) - 1));
5486
5487 if (PageBuddy(page_head) && page_order(page_head) >= order)
5488 break;
5489 }
5490 spin_unlock_irqrestore(&zone->lock, flags);
5491
5492 return order < MAX_ORDER;
5493}
5494#endif
718a3821
WF
5495
5496static struct trace_print_flags pageflag_names[] = {
5497 {1UL << PG_locked, "locked" },
5498 {1UL << PG_error, "error" },
5499 {1UL << PG_referenced, "referenced" },
5500 {1UL << PG_uptodate, "uptodate" },
5501 {1UL << PG_dirty, "dirty" },
5502 {1UL << PG_lru, "lru" },
5503 {1UL << PG_active, "active" },
5504 {1UL << PG_slab, "slab" },
5505 {1UL << PG_owner_priv_1, "owner_priv_1" },
5506 {1UL << PG_arch_1, "arch_1" },
5507 {1UL << PG_reserved, "reserved" },
5508 {1UL << PG_private, "private" },
5509 {1UL << PG_private_2, "private_2" },
5510 {1UL << PG_writeback, "writeback" },
5511#ifdef CONFIG_PAGEFLAGS_EXTENDED
5512 {1UL << PG_head, "head" },
5513 {1UL << PG_tail, "tail" },
5514#else
5515 {1UL << PG_compound, "compound" },
5516#endif
5517 {1UL << PG_swapcache, "swapcache" },
5518 {1UL << PG_mappedtodisk, "mappedtodisk" },
5519 {1UL << PG_reclaim, "reclaim" },
5520 {1UL << PG_buddy, "buddy" },
5521 {1UL << PG_swapbacked, "swapbacked" },
5522 {1UL << PG_unevictable, "unevictable" },
5523#ifdef CONFIG_MMU
5524 {1UL << PG_mlocked, "mlocked" },
5525#endif
5526#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5527 {1UL << PG_uncached, "uncached" },
5528#endif
5529#ifdef CONFIG_MEMORY_FAILURE
5530 {1UL << PG_hwpoison, "hwpoison" },
5531#endif
5532 {-1UL, NULL },
5533};
5534
5535static void dump_page_flags(unsigned long flags)
5536{
5537 const char *delim = "";
5538 unsigned long mask;
5539 int i;
5540
5541 printk(KERN_ALERT "page flags: %#lx(", flags);
5542
5543 /* remove zone id */
5544 flags &= (1UL << NR_PAGEFLAGS) - 1;
5545
5546 for (i = 0; pageflag_names[i].name && flags; i++) {
5547
5548 mask = pageflag_names[i].mask;
5549 if ((flags & mask) != mask)
5550 continue;
5551
5552 flags &= ~mask;
5553 printk("%s%s", delim, pageflag_names[i].name);
5554 delim = "|";
5555 }
5556
5557 /* check for left over flags */
5558 if (flags)
5559 printk("%s%#lx", delim, flags);
5560
5561 printk(")\n");
5562}
5563
5564void dump_page(struct page *page)
5565{
5566 printk(KERN_ALERT
5567 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5568 page, page_count(page), page_mapcount(page),
5569 page->mapping, page->index);
5570 dump_page_flags(page->flags);
5571}
This page took 0.981294 seconds and 5 git commands to generate.