Split the free lists for movable and unmovable allocations
[deliverable/linux.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/compiler.h>
9f158333 24#include <linux/kernel.h>
1da177e4
LT
25#include <linux/module.h>
26#include <linux/suspend.h>
27#include <linux/pagevec.h>
28#include <linux/blkdev.h>
29#include <linux/slab.h>
30#include <linux/notifier.h>
31#include <linux/topology.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
bdc8cb98 35#include <linux/memory_hotplug.h>
1da177e4
LT
36#include <linux/nodemask.h>
37#include <linux/vmalloc.h>
4be38e35 38#include <linux/mempolicy.h>
6811378e 39#include <linux/stop_machine.h>
c713216d
MG
40#include <linux/sort.h>
41#include <linux/pfn.h>
3fcfab16 42#include <linux/backing-dev.h>
933e312e 43#include <linux/fault-inject.h>
1da177e4
LT
44
45#include <asm/tlbflush.h>
ac924c60 46#include <asm/div64.h>
1da177e4
LT
47#include "internal.h"
48
49/*
13808910 50 * Array of node states.
1da177e4 51 */
13808910
CL
52nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
53 [N_POSSIBLE] = NODE_MASK_ALL,
54 [N_ONLINE] = { { [0] = 1UL } },
55#ifndef CONFIG_NUMA
56 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
57#ifdef CONFIG_HIGHMEM
58 [N_HIGH_MEMORY] = { { [0] = 1UL } },
59#endif
60 [N_CPU] = { { [0] = 1UL } },
61#endif /* NUMA */
62};
63EXPORT_SYMBOL(node_states);
64
6c231b7b 65unsigned long totalram_pages __read_mostly;
cb45b0e9 66unsigned long totalreserve_pages __read_mostly;
1da177e4 67long nr_swap_pages;
8ad4b1fb 68int percpu_pagelist_fraction;
1da177e4 69
d98c7a09 70static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 71
1da177e4
LT
72/*
73 * results with 256, 32 in the lowmem_reserve sysctl:
74 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
75 * 1G machine -> (16M dma, 784M normal, 224M high)
76 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
77 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
78 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
79 *
80 * TBD: should special case ZONE_DMA32 machines here - in those we normally
81 * don't need any ZONE_NORMAL reservation
1da177e4 82 */
2f1b6248 83int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 84#ifdef CONFIG_ZONE_DMA
2f1b6248 85 256,
4b51d669 86#endif
fb0e7942 87#ifdef CONFIG_ZONE_DMA32
2f1b6248 88 256,
fb0e7942 89#endif
e53ef38d 90#ifdef CONFIG_HIGHMEM
2a1e274a 91 32,
e53ef38d 92#endif
2a1e274a 93 32,
2f1b6248 94};
1da177e4
LT
95
96EXPORT_SYMBOL(totalram_pages);
1da177e4 97
15ad7cdc 98static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 99#ifdef CONFIG_ZONE_DMA
2f1b6248 100 "DMA",
4b51d669 101#endif
fb0e7942 102#ifdef CONFIG_ZONE_DMA32
2f1b6248 103 "DMA32",
fb0e7942 104#endif
2f1b6248 105 "Normal",
e53ef38d 106#ifdef CONFIG_HIGHMEM
2a1e274a 107 "HighMem",
e53ef38d 108#endif
2a1e274a 109 "Movable",
2f1b6248
CL
110};
111
1da177e4
LT
112int min_free_kbytes = 1024;
113
86356ab1
YG
114unsigned long __meminitdata nr_kernel_pages;
115unsigned long __meminitdata nr_all_pages;
a3142c8e 116static unsigned long __meminitdata dma_reserve;
1da177e4 117
c713216d
MG
118#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
119 /*
120 * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
121 * ranges of memory (RAM) that may be registered with add_active_range().
122 * Ranges passed to add_active_range() will be merged if possible
123 * so the number of times add_active_range() can be called is
124 * related to the number of nodes and the number of holes
125 */
126 #ifdef CONFIG_MAX_ACTIVE_REGIONS
127 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
128 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
129 #else
130 #if MAX_NUMNODES >= 32
131 /* If there can be many nodes, allow up to 50 holes per node */
132 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
133 #else
134 /* By default, allow up to 256 distinct regions */
135 #define MAX_ACTIVE_REGIONS 256
136 #endif
137 #endif
138
98011f56
JB
139 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
140 static int __meminitdata nr_nodemap_entries;
141 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
142 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
fb01439c 143#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
98011f56
JB
144 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
145 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
fb01439c 146#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
2a1e274a 147 unsigned long __initdata required_kernelcore;
7e63efef 148 unsigned long __initdata required_movablecore;
e228929b 149 unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
2a1e274a
MG
150
151 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
152 int movable_zone;
153 EXPORT_SYMBOL(movable_zone);
c713216d
MG
154#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
155
418508c1
MS
156#if MAX_NUMNODES > 1
157int nr_node_ids __read_mostly = MAX_NUMNODES;
158EXPORT_SYMBOL(nr_node_ids);
159#endif
160
b2a0ac88
MG
161static inline int get_pageblock_migratetype(struct page *page)
162{
163 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
164}
165
166static void set_pageblock_migratetype(struct page *page, int migratetype)
167{
168 set_pageblock_flags_group(page, (unsigned long)migratetype,
169 PB_migrate, PB_migrate_end);
170}
171
172static inline int gfpflags_to_migratetype(gfp_t gfp_flags)
173{
174 return ((gfp_flags & __GFP_MOVABLE) != 0);
175}
176
13e7444b 177#ifdef CONFIG_DEBUG_VM
c6a57e19 178static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 179{
bdc8cb98
DH
180 int ret = 0;
181 unsigned seq;
182 unsigned long pfn = page_to_pfn(page);
c6a57e19 183
bdc8cb98
DH
184 do {
185 seq = zone_span_seqbegin(zone);
186 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
187 ret = 1;
188 else if (pfn < zone->zone_start_pfn)
189 ret = 1;
190 } while (zone_span_seqretry(zone, seq));
191
192 return ret;
c6a57e19
DH
193}
194
195static int page_is_consistent(struct zone *zone, struct page *page)
196{
14e07298 197 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 198 return 0;
1da177e4 199 if (zone != page_zone(page))
c6a57e19
DH
200 return 0;
201
202 return 1;
203}
204/*
205 * Temporary debugging check for pages not lying within a given zone.
206 */
207static int bad_range(struct zone *zone, struct page *page)
208{
209 if (page_outside_zone_boundaries(zone, page))
1da177e4 210 return 1;
c6a57e19
DH
211 if (!page_is_consistent(zone, page))
212 return 1;
213
1da177e4
LT
214 return 0;
215}
13e7444b
NP
216#else
217static inline int bad_range(struct zone *zone, struct page *page)
218{
219 return 0;
220}
221#endif
222
224abf92 223static void bad_page(struct page *page)
1da177e4 224{
224abf92 225 printk(KERN_EMERG "Bad page state in process '%s'\n"
7365f3d1
HD
226 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
227 KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
228 KERN_EMERG "Backtrace:\n",
224abf92
NP
229 current->comm, page, (int)(2*sizeof(unsigned long)),
230 (unsigned long)page->flags, page->mapping,
231 page_mapcount(page), page_count(page));
1da177e4 232 dump_stack();
334795ec
HD
233 page->flags &= ~(1 << PG_lru |
234 1 << PG_private |
1da177e4 235 1 << PG_locked |
1da177e4
LT
236 1 << PG_active |
237 1 << PG_dirty |
334795ec
HD
238 1 << PG_reclaim |
239 1 << PG_slab |
1da177e4 240 1 << PG_swapcache |
676165a8
NP
241 1 << PG_writeback |
242 1 << PG_buddy );
1da177e4
LT
243 set_page_count(page, 0);
244 reset_page_mapcount(page);
245 page->mapping = NULL;
9f158333 246 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
247}
248
1da177e4
LT
249/*
250 * Higher-order pages are called "compound pages". They are structured thusly:
251 *
252 * The first PAGE_SIZE page is called the "head page".
253 *
254 * The remaining PAGE_SIZE pages are called "tail pages".
255 *
256 * All pages have PG_compound set. All pages have their ->private pointing at
257 * the head page (even the head page has this).
258 *
41d78ba5
HD
259 * The first tail page's ->lru.next holds the address of the compound page's
260 * put_page() function. Its ->lru.prev holds the order of allocation.
261 * This usage means that zero-order pages may not be compound.
1da177e4 262 */
d98c7a09
HD
263
264static void free_compound_page(struct page *page)
265{
d85f3385 266 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
267}
268
1da177e4
LT
269static void prep_compound_page(struct page *page, unsigned long order)
270{
271 int i;
272 int nr_pages = 1 << order;
273
33f2ef89 274 set_compound_page_dtor(page, free_compound_page);
d85f3385 275 set_compound_order(page, order);
6d777953 276 __SetPageHead(page);
d85f3385 277 for (i = 1; i < nr_pages; i++) {
1da177e4
LT
278 struct page *p = page + i;
279
d85f3385 280 __SetPageTail(p);
d85f3385 281 p->first_page = page;
1da177e4
LT
282 }
283}
284
285static void destroy_compound_page(struct page *page, unsigned long order)
286{
287 int i;
288 int nr_pages = 1 << order;
289
d85f3385 290 if (unlikely(compound_order(page) != order))
224abf92 291 bad_page(page);
1da177e4 292
6d777953 293 if (unlikely(!PageHead(page)))
d85f3385 294 bad_page(page);
6d777953 295 __ClearPageHead(page);
d85f3385 296 for (i = 1; i < nr_pages; i++) {
1da177e4
LT
297 struct page *p = page + i;
298
6d777953 299 if (unlikely(!PageTail(p) |
d85f3385 300 (p->first_page != page)))
224abf92 301 bad_page(page);
d85f3385 302 __ClearPageTail(p);
1da177e4
LT
303 }
304}
1da177e4 305
17cf4406
NP
306static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
307{
308 int i;
309
725d704e 310 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
6626c5d5
AM
311 /*
312 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
313 * and __GFP_HIGHMEM from hard or soft interrupt context.
314 */
725d704e 315 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
316 for (i = 0; i < (1 << order); i++)
317 clear_highpage(page + i);
318}
319
1da177e4
LT
320/*
321 * function for dealing with page's order in buddy system.
322 * zone->lock is already acquired when we use these.
323 * So, we don't need atomic page->flags operations here.
324 */
6aa3001b
AM
325static inline unsigned long page_order(struct page *page)
326{
4c21e2f2 327 return page_private(page);
1da177e4
LT
328}
329
6aa3001b
AM
330static inline void set_page_order(struct page *page, int order)
331{
4c21e2f2 332 set_page_private(page, order);
676165a8 333 __SetPageBuddy(page);
1da177e4
LT
334}
335
336static inline void rmv_page_order(struct page *page)
337{
676165a8 338 __ClearPageBuddy(page);
4c21e2f2 339 set_page_private(page, 0);
1da177e4
LT
340}
341
342/*
343 * Locate the struct page for both the matching buddy in our
344 * pair (buddy1) and the combined O(n+1) page they form (page).
345 *
346 * 1) Any buddy B1 will have an order O twin B2 which satisfies
347 * the following equation:
348 * B2 = B1 ^ (1 << O)
349 * For example, if the starting buddy (buddy2) is #8 its order
350 * 1 buddy is #10:
351 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
352 *
353 * 2) Any buddy B will have an order O+1 parent P which
354 * satisfies the following equation:
355 * P = B & ~(1 << O)
356 *
d6e05edc 357 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
358 */
359static inline struct page *
360__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
361{
362 unsigned long buddy_idx = page_idx ^ (1 << order);
363
364 return page + (buddy_idx - page_idx);
365}
366
367static inline unsigned long
368__find_combined_index(unsigned long page_idx, unsigned int order)
369{
370 return (page_idx & ~(1 << order));
371}
372
373/*
374 * This function checks whether a page is free && is the buddy
375 * we can do coalesce a page and its buddy if
13e7444b 376 * (a) the buddy is not in a hole &&
676165a8 377 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
378 * (c) a page and its buddy have the same order &&
379 * (d) a page and its buddy are in the same zone.
676165a8
NP
380 *
381 * For recording whether a page is in the buddy system, we use PG_buddy.
382 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 383 *
676165a8 384 * For recording page's order, we use page_private(page).
1da177e4 385 */
cb2b95e1
AW
386static inline int page_is_buddy(struct page *page, struct page *buddy,
387 int order)
1da177e4 388{
14e07298 389 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 390 return 0;
13e7444b 391
cb2b95e1
AW
392 if (page_zone_id(page) != page_zone_id(buddy))
393 return 0;
394
395 if (PageBuddy(buddy) && page_order(buddy) == order) {
396 BUG_ON(page_count(buddy) != 0);
6aa3001b 397 return 1;
676165a8 398 }
6aa3001b 399 return 0;
1da177e4
LT
400}
401
402/*
403 * Freeing function for a buddy system allocator.
404 *
405 * The concept of a buddy system is to maintain direct-mapped table
406 * (containing bit values) for memory blocks of various "orders".
407 * The bottom level table contains the map for the smallest allocatable
408 * units of memory (here, pages), and each level above it describes
409 * pairs of units from the levels below, hence, "buddies".
410 * At a high level, all that happens here is marking the table entry
411 * at the bottom level available, and propagating the changes upward
412 * as necessary, plus some accounting needed to play nicely with other
413 * parts of the VM system.
414 * At each level, we keep a list of pages, which are heads of continuous
676165a8 415 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 416 * order is recorded in page_private(page) field.
1da177e4
LT
417 * So when we are allocating or freeing one, we can derive the state of the
418 * other. That is, if we allocate a small block, and both were
419 * free, the remainder of the region must be split into blocks.
420 * If a block is freed, and its buddy is also free, then this
421 * triggers coalescing into a block of larger size.
422 *
423 * -- wli
424 */
425
48db57f8 426static inline void __free_one_page(struct page *page,
1da177e4
LT
427 struct zone *zone, unsigned int order)
428{
429 unsigned long page_idx;
430 int order_size = 1 << order;
b2a0ac88 431 int migratetype = get_pageblock_migratetype(page);
1da177e4 432
224abf92 433 if (unlikely(PageCompound(page)))
1da177e4
LT
434 destroy_compound_page(page, order);
435
436 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
437
725d704e
NP
438 VM_BUG_ON(page_idx & (order_size - 1));
439 VM_BUG_ON(bad_range(zone, page));
1da177e4 440
d23ad423 441 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
1da177e4
LT
442 while (order < MAX_ORDER-1) {
443 unsigned long combined_idx;
1da177e4
LT
444 struct page *buddy;
445
1da177e4 446 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 447 if (!page_is_buddy(page, buddy, order))
1da177e4 448 break; /* Move the buddy up one level. */
13e7444b 449
1da177e4 450 list_del(&buddy->lru);
b2a0ac88 451 zone->free_area[order].nr_free--;
1da177e4 452 rmv_page_order(buddy);
13e7444b 453 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
454 page = page + (combined_idx - page_idx);
455 page_idx = combined_idx;
456 order++;
457 }
458 set_page_order(page, order);
b2a0ac88
MG
459 list_add(&page->lru,
460 &zone->free_area[order].free_list[migratetype]);
1da177e4
LT
461 zone->free_area[order].nr_free++;
462}
463
224abf92 464static inline int free_pages_check(struct page *page)
1da177e4 465{
92be2e33
NP
466 if (unlikely(page_mapcount(page) |
467 (page->mapping != NULL) |
468 (page_count(page) != 0) |
1da177e4
LT
469 (page->flags & (
470 1 << PG_lru |
471 1 << PG_private |
472 1 << PG_locked |
473 1 << PG_active |
1da177e4
LT
474 1 << PG_slab |
475 1 << PG_swapcache |
b5810039 476 1 << PG_writeback |
676165a8
NP
477 1 << PG_reserved |
478 1 << PG_buddy ))))
224abf92 479 bad_page(page);
1da177e4 480 if (PageDirty(page))
242e5468 481 __ClearPageDirty(page);
689bcebf
HD
482 /*
483 * For now, we report if PG_reserved was found set, but do not
484 * clear it, and do not free the page. But we shall soon need
485 * to do more, for when the ZERO_PAGE count wraps negative.
486 */
487 return PageReserved(page);
1da177e4
LT
488}
489
490/*
491 * Frees a list of pages.
492 * Assumes all pages on list are in same zone, and of same order.
207f36ee 493 * count is the number of pages to free.
1da177e4
LT
494 *
495 * If the zone was previously in an "all pages pinned" state then look to
496 * see if this freeing clears that state.
497 *
498 * And clear the zone's pages_scanned counter, to hold off the "all pages are
499 * pinned" detection logic.
500 */
48db57f8
NP
501static void free_pages_bulk(struct zone *zone, int count,
502 struct list_head *list, int order)
1da177e4 503{
c54ad30c 504 spin_lock(&zone->lock);
1da177e4
LT
505 zone->all_unreclaimable = 0;
506 zone->pages_scanned = 0;
48db57f8
NP
507 while (count--) {
508 struct page *page;
509
725d704e 510 VM_BUG_ON(list_empty(list));
1da177e4 511 page = list_entry(list->prev, struct page, lru);
48db57f8 512 /* have to delete it as __free_one_page list manipulates */
1da177e4 513 list_del(&page->lru);
48db57f8 514 __free_one_page(page, zone, order);
1da177e4 515 }
c54ad30c 516 spin_unlock(&zone->lock);
1da177e4
LT
517}
518
48db57f8 519static void free_one_page(struct zone *zone, struct page *page, int order)
1da177e4 520{
006d22d9
CL
521 spin_lock(&zone->lock);
522 zone->all_unreclaimable = 0;
523 zone->pages_scanned = 0;
0798e519 524 __free_one_page(page, zone, order);
006d22d9 525 spin_unlock(&zone->lock);
48db57f8
NP
526}
527
528static void __free_pages_ok(struct page *page, unsigned int order)
529{
530 unsigned long flags;
1da177e4 531 int i;
689bcebf 532 int reserved = 0;
1da177e4 533
1da177e4 534 for (i = 0 ; i < (1 << order) ; ++i)
224abf92 535 reserved += free_pages_check(page + i);
689bcebf
HD
536 if (reserved)
537 return;
538
9858db50
NP
539 if (!PageHighMem(page))
540 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
dafb1367 541 arch_free_page(page, order);
48db57f8 542 kernel_map_pages(page, 1 << order, 0);
dafb1367 543
c54ad30c 544 local_irq_save(flags);
f8891e5e 545 __count_vm_events(PGFREE, 1 << order);
48db57f8 546 free_one_page(page_zone(page), page, order);
c54ad30c 547 local_irq_restore(flags);
1da177e4
LT
548}
549
a226f6c8
DH
550/*
551 * permit the bootmem allocator to evade page validation on high-order frees
552 */
553void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
554{
555 if (order == 0) {
556 __ClearPageReserved(page);
557 set_page_count(page, 0);
7835e98b 558 set_page_refcounted(page);
545b1ea9 559 __free_page(page);
a226f6c8 560 } else {
a226f6c8
DH
561 int loop;
562
545b1ea9 563 prefetchw(page);
a226f6c8
DH
564 for (loop = 0; loop < BITS_PER_LONG; loop++) {
565 struct page *p = &page[loop];
566
545b1ea9
NP
567 if (loop + 1 < BITS_PER_LONG)
568 prefetchw(p + 1);
a226f6c8
DH
569 __ClearPageReserved(p);
570 set_page_count(p, 0);
571 }
572
7835e98b 573 set_page_refcounted(page);
545b1ea9 574 __free_pages(page, order);
a226f6c8
DH
575 }
576}
577
1da177e4
LT
578
579/*
580 * The order of subdivision here is critical for the IO subsystem.
581 * Please do not alter this order without good reasons and regression
582 * testing. Specifically, as large blocks of memory are subdivided,
583 * the order in which smaller blocks are delivered depends on the order
584 * they're subdivided in this function. This is the primary factor
585 * influencing the order in which pages are delivered to the IO
586 * subsystem according to empirical testing, and this is also justified
587 * by considering the behavior of a buddy system containing a single
588 * large block of memory acted on by a series of small allocations.
589 * This behavior is a critical factor in sglist merging's success.
590 *
591 * -- wli
592 */
085cc7d5 593static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
594 int low, int high, struct free_area *area,
595 int migratetype)
1da177e4
LT
596{
597 unsigned long size = 1 << high;
598
599 while (high > low) {
600 area--;
601 high--;
602 size >>= 1;
725d704e 603 VM_BUG_ON(bad_range(zone, &page[size]));
b2a0ac88 604 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
605 area->nr_free++;
606 set_page_order(&page[size], high);
607 }
1da177e4
LT
608}
609
1da177e4
LT
610/*
611 * This page is about to be returned from the page allocator
612 */
17cf4406 613static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
1da177e4 614{
92be2e33
NP
615 if (unlikely(page_mapcount(page) |
616 (page->mapping != NULL) |
617 (page_count(page) != 0) |
334795ec
HD
618 (page->flags & (
619 1 << PG_lru |
1da177e4
LT
620 1 << PG_private |
621 1 << PG_locked |
1da177e4
LT
622 1 << PG_active |
623 1 << PG_dirty |
334795ec 624 1 << PG_slab |
1da177e4 625 1 << PG_swapcache |
b5810039 626 1 << PG_writeback |
676165a8
NP
627 1 << PG_reserved |
628 1 << PG_buddy ))))
224abf92 629 bad_page(page);
1da177e4 630
689bcebf
HD
631 /*
632 * For now, we report if PG_reserved was found set, but do not
633 * clear it, and do not allocate the page: as a safety net.
634 */
635 if (PageReserved(page))
636 return 1;
637
d77c2d7c 638 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
1da177e4 639 1 << PG_referenced | 1 << PG_arch_1 |
5409bae0 640 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
4c21e2f2 641 set_page_private(page, 0);
7835e98b 642 set_page_refcounted(page);
cc102509
NP
643
644 arch_alloc_page(page, order);
1da177e4 645 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
646
647 if (gfp_flags & __GFP_ZERO)
648 prep_zero_page(page, order, gfp_flags);
649
650 if (order && (gfp_flags & __GFP_COMP))
651 prep_compound_page(page, order);
652
689bcebf 653 return 0;
1da177e4
LT
654}
655
b2a0ac88
MG
656/*
657 * This array describes the order lists are fallen back to when
658 * the free lists for the desirable migrate type are depleted
659 */
660static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
661 [MIGRATE_UNMOVABLE] = { MIGRATE_MOVABLE },
662 [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE },
663};
664
665/* Remove an element from the buddy allocator from the fallback list */
666static struct page *__rmqueue_fallback(struct zone *zone, int order,
667 int start_migratetype)
668{
669 struct free_area * area;
670 int current_order;
671 struct page *page;
672 int migratetype, i;
673
674 /* Find the largest possible block of pages in the other list */
675 for (current_order = MAX_ORDER-1; current_order >= order;
676 --current_order) {
677 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
678 migratetype = fallbacks[start_migratetype][i];
679
680 area = &(zone->free_area[current_order]);
681 if (list_empty(&area->free_list[migratetype]))
682 continue;
683
684 page = list_entry(area->free_list[migratetype].next,
685 struct page, lru);
686 area->nr_free--;
687
688 /*
689 * If breaking a large block of pages, place the buddies
690 * on the preferred allocation list
691 */
692 if (unlikely(current_order >= MAX_ORDER / 2))
693 migratetype = start_migratetype;
694
695 /* Remove the page from the freelists */
696 list_del(&page->lru);
697 rmv_page_order(page);
698 __mod_zone_page_state(zone, NR_FREE_PAGES,
699 -(1UL << order));
700
701 if (current_order == MAX_ORDER - 1)
702 set_pageblock_migratetype(page,
703 start_migratetype);
704
705 expand(zone, page, order, current_order, area, migratetype);
706 return page;
707 }
708 }
709
710 return NULL;
711}
712
1da177e4
LT
713/*
714 * Do the hard work of removing an element from the buddy allocator.
715 * Call me with the zone->lock already held.
716 */
b2a0ac88
MG
717static struct page *__rmqueue(struct zone *zone, unsigned int order,
718 int migratetype)
1da177e4
LT
719{
720 struct free_area * area;
721 unsigned int current_order;
722 struct page *page;
723
b2a0ac88 724 /* Find a page of the appropriate size in the preferred list */
1da177e4 725 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
b2a0ac88
MG
726 area = &(zone->free_area[current_order]);
727 if (list_empty(&area->free_list[migratetype]))
1da177e4
LT
728 continue;
729
b2a0ac88
MG
730 page = list_entry(area->free_list[migratetype].next,
731 struct page, lru);
1da177e4
LT
732 list_del(&page->lru);
733 rmv_page_order(page);
734 area->nr_free--;
d23ad423 735 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
b2a0ac88
MG
736 expand(zone, page, order, current_order, area, migratetype);
737 goto got_page;
1da177e4
LT
738 }
739
b2a0ac88
MG
740 page = __rmqueue_fallback(zone, order, migratetype);
741
742got_page:
743
744 return page;
1da177e4
LT
745}
746
747/*
748 * Obtain a specified number of elements from the buddy allocator, all under
749 * a single hold of the lock, for efficiency. Add them to the supplied list.
750 * Returns the number of new pages which were placed at *list.
751 */
752static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88
MG
753 unsigned long count, struct list_head *list,
754 int migratetype)
1da177e4 755{
1da177e4 756 int i;
1da177e4 757
c54ad30c 758 spin_lock(&zone->lock);
1da177e4 759 for (i = 0; i < count; ++i) {
b2a0ac88 760 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 761 if (unlikely(page == NULL))
1da177e4 762 break;
1da177e4
LT
763 list_add_tail(&page->lru, list);
764 }
c54ad30c 765 spin_unlock(&zone->lock);
085cc7d5 766 return i;
1da177e4
LT
767}
768
4ae7c039 769#ifdef CONFIG_NUMA
8fce4d8e 770/*
4037d452
CL
771 * Called from the vmstat counter updater to drain pagesets of this
772 * currently executing processor on remote nodes after they have
773 * expired.
774 *
879336c3
CL
775 * Note that this function must be called with the thread pinned to
776 * a single processor.
8fce4d8e 777 */
4037d452 778void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 779{
4ae7c039 780 unsigned long flags;
4037d452 781 int to_drain;
4ae7c039 782
4037d452
CL
783 local_irq_save(flags);
784 if (pcp->count >= pcp->batch)
785 to_drain = pcp->batch;
786 else
787 to_drain = pcp->count;
788 free_pages_bulk(zone, to_drain, &pcp->list, 0);
789 pcp->count -= to_drain;
790 local_irq_restore(flags);
4ae7c039
CL
791}
792#endif
793
1da177e4
LT
794static void __drain_pages(unsigned int cpu)
795{
c54ad30c 796 unsigned long flags;
1da177e4
LT
797 struct zone *zone;
798 int i;
799
800 for_each_zone(zone) {
801 struct per_cpu_pageset *pset;
802
f2e12bb2
CL
803 if (!populated_zone(zone))
804 continue;
805
e7c8d5c9 806 pset = zone_pcp(zone, cpu);
1da177e4
LT
807 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
808 struct per_cpu_pages *pcp;
809
810 pcp = &pset->pcp[i];
c54ad30c 811 local_irq_save(flags);
48db57f8
NP
812 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
813 pcp->count = 0;
c54ad30c 814 local_irq_restore(flags);
1da177e4
LT
815 }
816 }
817}
1da177e4 818
296699de 819#ifdef CONFIG_HIBERNATION
1da177e4
LT
820
821void mark_free_pages(struct zone *zone)
822{
f623f0db
RW
823 unsigned long pfn, max_zone_pfn;
824 unsigned long flags;
b2a0ac88 825 int order, t;
1da177e4
LT
826 struct list_head *curr;
827
828 if (!zone->spanned_pages)
829 return;
830
831 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
832
833 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
834 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
835 if (pfn_valid(pfn)) {
836 struct page *page = pfn_to_page(pfn);
837
7be98234
RW
838 if (!swsusp_page_is_forbidden(page))
839 swsusp_unset_page_free(page);
f623f0db 840 }
1da177e4 841
b2a0ac88
MG
842 for_each_migratetype_order(order, t) {
843 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 844 unsigned long i;
1da177e4 845
f623f0db
RW
846 pfn = page_to_pfn(list_entry(curr, struct page, lru));
847 for (i = 0; i < (1UL << order); i++)
7be98234 848 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 849 }
b2a0ac88 850 }
1da177e4
LT
851 spin_unlock_irqrestore(&zone->lock, flags);
852}
853
854/*
855 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
856 */
857void drain_local_pages(void)
858{
859 unsigned long flags;
860
861 local_irq_save(flags);
862 __drain_pages(smp_processor_id());
863 local_irq_restore(flags);
864}
296699de 865#endif /* CONFIG_HIBERNATION */
1da177e4 866
1da177e4
LT
867/*
868 * Free a 0-order page
869 */
1da177e4
LT
870static void fastcall free_hot_cold_page(struct page *page, int cold)
871{
872 struct zone *zone = page_zone(page);
873 struct per_cpu_pages *pcp;
874 unsigned long flags;
875
1da177e4
LT
876 if (PageAnon(page))
877 page->mapping = NULL;
224abf92 878 if (free_pages_check(page))
689bcebf
HD
879 return;
880
9858db50
NP
881 if (!PageHighMem(page))
882 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
dafb1367 883 arch_free_page(page, 0);
689bcebf
HD
884 kernel_map_pages(page, 1, 0);
885
e7c8d5c9 886 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
1da177e4 887 local_irq_save(flags);
f8891e5e 888 __count_vm_event(PGFREE);
1da177e4
LT
889 list_add(&page->lru, &pcp->list);
890 pcp->count++;
48db57f8
NP
891 if (pcp->count >= pcp->high) {
892 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
893 pcp->count -= pcp->batch;
894 }
1da177e4
LT
895 local_irq_restore(flags);
896 put_cpu();
897}
898
899void fastcall free_hot_page(struct page *page)
900{
901 free_hot_cold_page(page, 0);
902}
903
904void fastcall free_cold_page(struct page *page)
905{
906 free_hot_cold_page(page, 1);
907}
908
8dfcc9ba
NP
909/*
910 * split_page takes a non-compound higher-order page, and splits it into
911 * n (1<<order) sub-pages: page[0..n]
912 * Each sub-page must be freed individually.
913 *
914 * Note: this is probably too low level an operation for use in drivers.
915 * Please consult with lkml before using this in your driver.
916 */
917void split_page(struct page *page, unsigned int order)
918{
919 int i;
920
725d704e
NP
921 VM_BUG_ON(PageCompound(page));
922 VM_BUG_ON(!page_count(page));
7835e98b
NP
923 for (i = 1; i < (1 << order); i++)
924 set_page_refcounted(page + i);
8dfcc9ba 925}
8dfcc9ba 926
1da177e4
LT
927/*
928 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
929 * we cheat by calling it from here, in the order > 0 path. Saves a branch
930 * or two.
931 */
a74609fa
NP
932static struct page *buffered_rmqueue(struct zonelist *zonelist,
933 struct zone *zone, int order, gfp_t gfp_flags)
1da177e4
LT
934{
935 unsigned long flags;
689bcebf 936 struct page *page;
1da177e4 937 int cold = !!(gfp_flags & __GFP_COLD);
a74609fa 938 int cpu;
b2a0ac88 939 int migratetype = gfpflags_to_migratetype(gfp_flags);
1da177e4 940
689bcebf 941again:
a74609fa 942 cpu = get_cpu();
48db57f8 943 if (likely(order == 0)) {
1da177e4
LT
944 struct per_cpu_pages *pcp;
945
a74609fa 946 pcp = &zone_pcp(zone, cpu)->pcp[cold];
1da177e4 947 local_irq_save(flags);
a74609fa 948 if (!pcp->count) {
941c7105 949 pcp->count = rmqueue_bulk(zone, 0,
b2a0ac88 950 pcp->batch, &pcp->list, migratetype);
a74609fa
NP
951 if (unlikely(!pcp->count))
952 goto failed;
1da177e4 953 }
a74609fa
NP
954 page = list_entry(pcp->list.next, struct page, lru);
955 list_del(&page->lru);
956 pcp->count--;
7fb1d9fc 957 } else {
1da177e4 958 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 959 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
960 spin_unlock(&zone->lock);
961 if (!page)
962 goto failed;
1da177e4
LT
963 }
964
f8891e5e 965 __count_zone_vm_events(PGALLOC, zone, 1 << order);
ca889e6c 966 zone_statistics(zonelist, zone);
a74609fa
NP
967 local_irq_restore(flags);
968 put_cpu();
1da177e4 969
725d704e 970 VM_BUG_ON(bad_range(zone, page));
17cf4406 971 if (prep_new_page(page, order, gfp_flags))
a74609fa 972 goto again;
1da177e4 973 return page;
a74609fa
NP
974
975failed:
976 local_irq_restore(flags);
977 put_cpu();
978 return NULL;
1da177e4
LT
979}
980
7fb1d9fc 981#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
3148890b
NP
982#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
983#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
984#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
985#define ALLOC_HARDER 0x10 /* try to alloc harder */
986#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
987#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 988
933e312e
AM
989#ifdef CONFIG_FAIL_PAGE_ALLOC
990
991static struct fail_page_alloc_attr {
992 struct fault_attr attr;
993
994 u32 ignore_gfp_highmem;
995 u32 ignore_gfp_wait;
54114994 996 u32 min_order;
933e312e
AM
997
998#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
999
1000 struct dentry *ignore_gfp_highmem_file;
1001 struct dentry *ignore_gfp_wait_file;
54114994 1002 struct dentry *min_order_file;
933e312e
AM
1003
1004#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1005
1006} fail_page_alloc = {
1007 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1008 .ignore_gfp_wait = 1,
1009 .ignore_gfp_highmem = 1,
54114994 1010 .min_order = 1,
933e312e
AM
1011};
1012
1013static int __init setup_fail_page_alloc(char *str)
1014{
1015 return setup_fault_attr(&fail_page_alloc.attr, str);
1016}
1017__setup("fail_page_alloc=", setup_fail_page_alloc);
1018
1019static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1020{
54114994
AM
1021 if (order < fail_page_alloc.min_order)
1022 return 0;
933e312e
AM
1023 if (gfp_mask & __GFP_NOFAIL)
1024 return 0;
1025 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1026 return 0;
1027 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1028 return 0;
1029
1030 return should_fail(&fail_page_alloc.attr, 1 << order);
1031}
1032
1033#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1034
1035static int __init fail_page_alloc_debugfs(void)
1036{
1037 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1038 struct dentry *dir;
1039 int err;
1040
1041 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1042 "fail_page_alloc");
1043 if (err)
1044 return err;
1045 dir = fail_page_alloc.attr.dentries.dir;
1046
1047 fail_page_alloc.ignore_gfp_wait_file =
1048 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1049 &fail_page_alloc.ignore_gfp_wait);
1050
1051 fail_page_alloc.ignore_gfp_highmem_file =
1052 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1053 &fail_page_alloc.ignore_gfp_highmem);
54114994
AM
1054 fail_page_alloc.min_order_file =
1055 debugfs_create_u32("min-order", mode, dir,
1056 &fail_page_alloc.min_order);
933e312e
AM
1057
1058 if (!fail_page_alloc.ignore_gfp_wait_file ||
54114994
AM
1059 !fail_page_alloc.ignore_gfp_highmem_file ||
1060 !fail_page_alloc.min_order_file) {
933e312e
AM
1061 err = -ENOMEM;
1062 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1063 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
54114994 1064 debugfs_remove(fail_page_alloc.min_order_file);
933e312e
AM
1065 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1066 }
1067
1068 return err;
1069}
1070
1071late_initcall(fail_page_alloc_debugfs);
1072
1073#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1074
1075#else /* CONFIG_FAIL_PAGE_ALLOC */
1076
1077static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1078{
1079 return 0;
1080}
1081
1082#endif /* CONFIG_FAIL_PAGE_ALLOC */
1083
1da177e4
LT
1084/*
1085 * Return 1 if free pages are above 'mark'. This takes into account the order
1086 * of the allocation.
1087 */
1088int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
7fb1d9fc 1089 int classzone_idx, int alloc_flags)
1da177e4
LT
1090{
1091 /* free_pages my go negative - that's OK */
d23ad423
CL
1092 long min = mark;
1093 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1da177e4
LT
1094 int o;
1095
7fb1d9fc 1096 if (alloc_flags & ALLOC_HIGH)
1da177e4 1097 min -= min / 2;
7fb1d9fc 1098 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
1099 min -= min / 4;
1100
1101 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1102 return 0;
1103 for (o = 0; o < order; o++) {
1104 /* At the next order, this order's pages become unavailable */
1105 free_pages -= z->free_area[o].nr_free << o;
1106
1107 /* Require fewer higher order pages to be free */
1108 min >>= 1;
1109
1110 if (free_pages <= min)
1111 return 0;
1112 }
1113 return 1;
1114}
1115
9276b1bc
PJ
1116#ifdef CONFIG_NUMA
1117/*
1118 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1119 * skip over zones that are not allowed by the cpuset, or that have
1120 * been recently (in last second) found to be nearly full. See further
1121 * comments in mmzone.h. Reduces cache footprint of zonelist scans
1122 * that have to skip over alot of full or unallowed zones.
1123 *
1124 * If the zonelist cache is present in the passed in zonelist, then
1125 * returns a pointer to the allowed node mask (either the current
37b07e41 1126 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
9276b1bc
PJ
1127 *
1128 * If the zonelist cache is not available for this zonelist, does
1129 * nothing and returns NULL.
1130 *
1131 * If the fullzones BITMAP in the zonelist cache is stale (more than
1132 * a second since last zap'd) then we zap it out (clear its bits.)
1133 *
1134 * We hold off even calling zlc_setup, until after we've checked the
1135 * first zone in the zonelist, on the theory that most allocations will
1136 * be satisfied from that first zone, so best to examine that zone as
1137 * quickly as we can.
1138 */
1139static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1140{
1141 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1142 nodemask_t *allowednodes; /* zonelist_cache approximation */
1143
1144 zlc = zonelist->zlcache_ptr;
1145 if (!zlc)
1146 return NULL;
1147
1148 if (jiffies - zlc->last_full_zap > 1 * HZ) {
1149 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1150 zlc->last_full_zap = jiffies;
1151 }
1152
1153 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1154 &cpuset_current_mems_allowed :
37b07e41 1155 &node_states[N_HIGH_MEMORY];
9276b1bc
PJ
1156 return allowednodes;
1157}
1158
1159/*
1160 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1161 * if it is worth looking at further for free memory:
1162 * 1) Check that the zone isn't thought to be full (doesn't have its
1163 * bit set in the zonelist_cache fullzones BITMAP).
1164 * 2) Check that the zones node (obtained from the zonelist_cache
1165 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1166 * Return true (non-zero) if zone is worth looking at further, or
1167 * else return false (zero) if it is not.
1168 *
1169 * This check -ignores- the distinction between various watermarks,
1170 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1171 * found to be full for any variation of these watermarks, it will
1172 * be considered full for up to one second by all requests, unless
1173 * we are so low on memory on all allowed nodes that we are forced
1174 * into the second scan of the zonelist.
1175 *
1176 * In the second scan we ignore this zonelist cache and exactly
1177 * apply the watermarks to all zones, even it is slower to do so.
1178 * We are low on memory in the second scan, and should leave no stone
1179 * unturned looking for a free page.
1180 */
1181static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1182 nodemask_t *allowednodes)
1183{
1184 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1185 int i; /* index of *z in zonelist zones */
1186 int n; /* node that zone *z is on */
1187
1188 zlc = zonelist->zlcache_ptr;
1189 if (!zlc)
1190 return 1;
1191
1192 i = z - zonelist->zones;
1193 n = zlc->z_to_n[i];
1194
1195 /* This zone is worth trying if it is allowed but not full */
1196 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1197}
1198
1199/*
1200 * Given 'z' scanning a zonelist, set the corresponding bit in
1201 * zlc->fullzones, so that subsequent attempts to allocate a page
1202 * from that zone don't waste time re-examining it.
1203 */
1204static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1205{
1206 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1207 int i; /* index of *z in zonelist zones */
1208
1209 zlc = zonelist->zlcache_ptr;
1210 if (!zlc)
1211 return;
1212
1213 i = z - zonelist->zones;
1214
1215 set_bit(i, zlc->fullzones);
1216}
1217
1218#else /* CONFIG_NUMA */
1219
1220static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1221{
1222 return NULL;
1223}
1224
1225static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1226 nodemask_t *allowednodes)
1227{
1228 return 1;
1229}
1230
1231static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1232{
1233}
1234#endif /* CONFIG_NUMA */
1235
7fb1d9fc 1236/*
0798e519 1237 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1238 * a page.
1239 */
1240static struct page *
1241get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
1242 struct zonelist *zonelist, int alloc_flags)
753ee728 1243{
9276b1bc 1244 struct zone **z;
7fb1d9fc 1245 struct page *page = NULL;
9276b1bc 1246 int classzone_idx = zone_idx(zonelist->zones[0]);
1192d526 1247 struct zone *zone;
9276b1bc
PJ
1248 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1249 int zlc_active = 0; /* set if using zonelist_cache */
1250 int did_zlc_setup = 0; /* just call zlc_setup() one time */
b377fd39 1251 enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */
7fb1d9fc 1252
9276b1bc 1253zonelist_scan:
7fb1d9fc 1254 /*
9276b1bc 1255 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1256 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1257 */
9276b1bc
PJ
1258 z = zonelist->zones;
1259
7fb1d9fc 1260 do {
b377fd39
MG
1261 /*
1262 * In NUMA, this could be a policy zonelist which contains
1263 * zones that may not be allowed by the current gfp_mask.
1264 * Check the zone is allowed by the current flags
1265 */
1266 if (unlikely(alloc_should_filter_zonelist(zonelist))) {
1267 if (highest_zoneidx == -1)
1268 highest_zoneidx = gfp_zone(gfp_mask);
1269 if (zone_idx(*z) > highest_zoneidx)
1270 continue;
1271 }
1272
9276b1bc
PJ
1273 if (NUMA_BUILD && zlc_active &&
1274 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1275 continue;
1192d526 1276 zone = *z;
7fb1d9fc 1277 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1278 !cpuset_zone_allowed_softwall(zone, gfp_mask))
9276b1bc 1279 goto try_next_zone;
7fb1d9fc
RS
1280
1281 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b
NP
1282 unsigned long mark;
1283 if (alloc_flags & ALLOC_WMARK_MIN)
1192d526 1284 mark = zone->pages_min;
3148890b 1285 else if (alloc_flags & ALLOC_WMARK_LOW)
1192d526 1286 mark = zone->pages_low;
3148890b 1287 else
1192d526 1288 mark = zone->pages_high;
0798e519
PJ
1289 if (!zone_watermark_ok(zone, order, mark,
1290 classzone_idx, alloc_flags)) {
9eeff239 1291 if (!zone_reclaim_mode ||
1192d526 1292 !zone_reclaim(zone, gfp_mask, order))
9276b1bc 1293 goto this_zone_full;
0798e519 1294 }
7fb1d9fc
RS
1295 }
1296
1192d526 1297 page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
0798e519 1298 if (page)
7fb1d9fc 1299 break;
9276b1bc
PJ
1300this_zone_full:
1301 if (NUMA_BUILD)
1302 zlc_mark_zone_full(zonelist, z);
1303try_next_zone:
1304 if (NUMA_BUILD && !did_zlc_setup) {
1305 /* we do zlc_setup after the first zone is tried */
1306 allowednodes = zlc_setup(zonelist, alloc_flags);
1307 zlc_active = 1;
1308 did_zlc_setup = 1;
1309 }
7fb1d9fc 1310 } while (*(++z) != NULL);
9276b1bc
PJ
1311
1312 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1313 /* Disable zlc cache for second zonelist scan */
1314 zlc_active = 0;
1315 goto zonelist_scan;
1316 }
7fb1d9fc 1317 return page;
753ee728
MH
1318}
1319
1da177e4
LT
1320/*
1321 * This is the 'heart' of the zoned buddy allocator.
1322 */
1323struct page * fastcall
dd0fc66f 1324__alloc_pages(gfp_t gfp_mask, unsigned int order,
1da177e4
LT
1325 struct zonelist *zonelist)
1326{
260b2367 1327 const gfp_t wait = gfp_mask & __GFP_WAIT;
7fb1d9fc 1328 struct zone **z;
1da177e4
LT
1329 struct page *page;
1330 struct reclaim_state reclaim_state;
1331 struct task_struct *p = current;
1da177e4 1332 int do_retry;
7fb1d9fc 1333 int alloc_flags;
1da177e4
LT
1334 int did_some_progress;
1335
1336 might_sleep_if(wait);
1337
933e312e
AM
1338 if (should_fail_alloc_page(gfp_mask, order))
1339 return NULL;
1340
6b1de916 1341restart:
7fb1d9fc 1342 z = zonelist->zones; /* the list of zones suitable for gfp_mask */
1da177e4 1343
7fb1d9fc 1344 if (unlikely(*z == NULL)) {
523b9458
CL
1345 /*
1346 * Happens if we have an empty zonelist as a result of
1347 * GFP_THISNODE being used on a memoryless node
1348 */
1da177e4
LT
1349 return NULL;
1350 }
6b1de916 1351
7fb1d9fc 1352 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 1353 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
7fb1d9fc
RS
1354 if (page)
1355 goto got_pg;
1da177e4 1356
952f3b51
CL
1357 /*
1358 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1359 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1360 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1361 * using a larger set of nodes after it has established that the
1362 * allowed per node queues are empty and that nodes are
1363 * over allocated.
1364 */
1365 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1366 goto nopage;
1367
0798e519 1368 for (z = zonelist->zones; *z; z++)
43b0bc00 1369 wakeup_kswapd(*z, order);
1da177e4 1370
9bf2229f 1371 /*
7fb1d9fc
RS
1372 * OK, we're below the kswapd watermark and have kicked background
1373 * reclaim. Now things get more complex, so set up alloc_flags according
1374 * to how we want to proceed.
1375 *
1376 * The caller may dip into page reserves a bit more if the caller
1377 * cannot run direct reclaim, or if the caller has realtime scheduling
4eac915d
PJ
1378 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1379 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
9bf2229f 1380 */
3148890b 1381 alloc_flags = ALLOC_WMARK_MIN;
7fb1d9fc
RS
1382 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1383 alloc_flags |= ALLOC_HARDER;
1384 if (gfp_mask & __GFP_HIGH)
1385 alloc_flags |= ALLOC_HIGH;
bdd804f4
PJ
1386 if (wait)
1387 alloc_flags |= ALLOC_CPUSET;
1da177e4
LT
1388
1389 /*
1390 * Go through the zonelist again. Let __GFP_HIGH and allocations
7fb1d9fc 1391 * coming from realtime tasks go deeper into reserves.
1da177e4
LT
1392 *
1393 * This is the last chance, in general, before the goto nopage.
1394 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
9bf2229f 1395 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1da177e4 1396 */
7fb1d9fc
RS
1397 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
1398 if (page)
1399 goto got_pg;
1da177e4
LT
1400
1401 /* This allocation should allow future memory freeing. */
b84a35be 1402
b43a57bb 1403rebalance:
b84a35be
NP
1404 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1405 && !in_interrupt()) {
1406 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
885036d3 1407nofail_alloc:
b84a35be 1408 /* go through the zonelist yet again, ignoring mins */
7fb1d9fc 1409 page = get_page_from_freelist(gfp_mask, order,
47f3a867 1410 zonelist, ALLOC_NO_WATERMARKS);
7fb1d9fc
RS
1411 if (page)
1412 goto got_pg;
885036d3 1413 if (gfp_mask & __GFP_NOFAIL) {
3fcfab16 1414 congestion_wait(WRITE, HZ/50);
885036d3
KK
1415 goto nofail_alloc;
1416 }
1da177e4
LT
1417 }
1418 goto nopage;
1419 }
1420
1421 /* Atomic allocations - we can't balance anything */
1422 if (!wait)
1423 goto nopage;
1424
1da177e4
LT
1425 cond_resched();
1426
1427 /* We now go into synchronous reclaim */
3e0d98b9 1428 cpuset_memory_pressure_bump();
1da177e4
LT
1429 p->flags |= PF_MEMALLOC;
1430 reclaim_state.reclaimed_slab = 0;
1431 p->reclaim_state = &reclaim_state;
1432
5ad333eb 1433 did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
1da177e4
LT
1434
1435 p->reclaim_state = NULL;
1436 p->flags &= ~PF_MEMALLOC;
1437
1438 cond_resched();
1439
1440 if (likely(did_some_progress)) {
7fb1d9fc
RS
1441 page = get_page_from_freelist(gfp_mask, order,
1442 zonelist, alloc_flags);
1443 if (page)
1444 goto got_pg;
1da177e4
LT
1445 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1446 /*
1447 * Go through the zonelist yet one more time, keep
1448 * very high watermark here, this is only to catch
1449 * a parallel oom killing, we must fail if we're still
1450 * under heavy pressure.
1451 */
7fb1d9fc 1452 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 1453 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
7fb1d9fc
RS
1454 if (page)
1455 goto got_pg;
1da177e4 1456
a8bbf72a
MG
1457 /* The OOM killer will not help higher order allocs so fail */
1458 if (order > PAGE_ALLOC_COSTLY_ORDER)
1459 goto nopage;
1460
9b0f8b04 1461 out_of_memory(zonelist, gfp_mask, order);
1da177e4
LT
1462 goto restart;
1463 }
1464
1465 /*
1466 * Don't let big-order allocations loop unless the caller explicitly
1467 * requests that. Wait for some write requests to complete then retry.
1468 *
1469 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
1470 * <= 3, but that may not be true in other implementations.
1471 */
1472 do_retry = 0;
1473 if (!(gfp_mask & __GFP_NORETRY)) {
5ad333eb
AW
1474 if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
1475 (gfp_mask & __GFP_REPEAT))
1da177e4
LT
1476 do_retry = 1;
1477 if (gfp_mask & __GFP_NOFAIL)
1478 do_retry = 1;
1479 }
1480 if (do_retry) {
3fcfab16 1481 congestion_wait(WRITE, HZ/50);
1da177e4
LT
1482 goto rebalance;
1483 }
1484
1485nopage:
1486 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1487 printk(KERN_WARNING "%s: page allocation failure."
1488 " order:%d, mode:0x%x\n",
1489 p->comm, order, gfp_mask);
1490 dump_stack();
578c2fd6 1491 show_mem();
1da177e4 1492 }
1da177e4 1493got_pg:
1da177e4
LT
1494 return page;
1495}
1496
1497EXPORT_SYMBOL(__alloc_pages);
1498
1499/*
1500 * Common helper functions.
1501 */
dd0fc66f 1502fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4
LT
1503{
1504 struct page * page;
1505 page = alloc_pages(gfp_mask, order);
1506 if (!page)
1507 return 0;
1508 return (unsigned long) page_address(page);
1509}
1510
1511EXPORT_SYMBOL(__get_free_pages);
1512
dd0fc66f 1513fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4
LT
1514{
1515 struct page * page;
1516
1517 /*
1518 * get_zeroed_page() returns a 32-bit address, which cannot represent
1519 * a highmem page
1520 */
725d704e 1521 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1da177e4
LT
1522
1523 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1524 if (page)
1525 return (unsigned long) page_address(page);
1526 return 0;
1527}
1528
1529EXPORT_SYMBOL(get_zeroed_page);
1530
1531void __pagevec_free(struct pagevec *pvec)
1532{
1533 int i = pagevec_count(pvec);
1534
1535 while (--i >= 0)
1536 free_hot_cold_page(pvec->pages[i], pvec->cold);
1537}
1538
1539fastcall void __free_pages(struct page *page, unsigned int order)
1540{
b5810039 1541 if (put_page_testzero(page)) {
1da177e4
LT
1542 if (order == 0)
1543 free_hot_page(page);
1544 else
1545 __free_pages_ok(page, order);
1546 }
1547}
1548
1549EXPORT_SYMBOL(__free_pages);
1550
1551fastcall void free_pages(unsigned long addr, unsigned int order)
1552{
1553 if (addr != 0) {
725d704e 1554 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
1555 __free_pages(virt_to_page((void *)addr), order);
1556 }
1557}
1558
1559EXPORT_SYMBOL(free_pages);
1560
1da177e4
LT
1561static unsigned int nr_free_zone_pages(int offset)
1562{
e310fd43
MB
1563 /* Just pick one node, since fallback list is circular */
1564 pg_data_t *pgdat = NODE_DATA(numa_node_id());
1da177e4
LT
1565 unsigned int sum = 0;
1566
e310fd43
MB
1567 struct zonelist *zonelist = pgdat->node_zonelists + offset;
1568 struct zone **zonep = zonelist->zones;
1569 struct zone *zone;
1da177e4 1570
e310fd43
MB
1571 for (zone = *zonep++; zone; zone = *zonep++) {
1572 unsigned long size = zone->present_pages;
1573 unsigned long high = zone->pages_high;
1574 if (size > high)
1575 sum += size - high;
1da177e4
LT
1576 }
1577
1578 return sum;
1579}
1580
1581/*
1582 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1583 */
1584unsigned int nr_free_buffer_pages(void)
1585{
af4ca457 1586 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 1587}
c2f1a551 1588EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4
LT
1589
1590/*
1591 * Amount of free RAM allocatable within all zones
1592 */
1593unsigned int nr_free_pagecache_pages(void)
1594{
2a1e274a 1595 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 1596}
08e0f6a9
CL
1597
1598static inline void show_node(struct zone *zone)
1da177e4 1599{
08e0f6a9 1600 if (NUMA_BUILD)
25ba77c1 1601 printk("Node %d ", zone_to_nid(zone));
1da177e4 1602}
1da177e4 1603
1da177e4
LT
1604void si_meminfo(struct sysinfo *val)
1605{
1606 val->totalram = totalram_pages;
1607 val->sharedram = 0;
d23ad423 1608 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 1609 val->bufferram = nr_blockdev_pages();
1da177e4
LT
1610 val->totalhigh = totalhigh_pages;
1611 val->freehigh = nr_free_highpages();
1da177e4
LT
1612 val->mem_unit = PAGE_SIZE;
1613}
1614
1615EXPORT_SYMBOL(si_meminfo);
1616
1617#ifdef CONFIG_NUMA
1618void si_meminfo_node(struct sysinfo *val, int nid)
1619{
1620 pg_data_t *pgdat = NODE_DATA(nid);
1621
1622 val->totalram = pgdat->node_present_pages;
d23ad423 1623 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 1624#ifdef CONFIG_HIGHMEM
1da177e4 1625 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
d23ad423
CL
1626 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1627 NR_FREE_PAGES);
98d2b0eb
CL
1628#else
1629 val->totalhigh = 0;
1630 val->freehigh = 0;
1631#endif
1da177e4
LT
1632 val->mem_unit = PAGE_SIZE;
1633}
1634#endif
1635
1636#define K(x) ((x) << (PAGE_SHIFT-10))
1637
1638/*
1639 * Show free area list (used inside shift_scroll-lock stuff)
1640 * We also calculate the percentage fragmentation. We do this by counting the
1641 * memory on each free list with the exception of the first item on the list.
1642 */
1643void show_free_areas(void)
1644{
c7241913 1645 int cpu;
1da177e4
LT
1646 struct zone *zone;
1647
1648 for_each_zone(zone) {
c7241913 1649 if (!populated_zone(zone))
1da177e4 1650 continue;
c7241913
JS
1651
1652 show_node(zone);
1653 printk("%s per-cpu:\n", zone->name);
1da177e4 1654
6b482c67 1655 for_each_online_cpu(cpu) {
1da177e4
LT
1656 struct per_cpu_pageset *pageset;
1657
e7c8d5c9 1658 pageset = zone_pcp(zone, cpu);
1da177e4 1659
c7241913
JS
1660 printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d "
1661 "Cold: hi:%5d, btch:%4d usd:%4d\n",
1662 cpu, pageset->pcp[0].high,
1663 pageset->pcp[0].batch, pageset->pcp[0].count,
1664 pageset->pcp[1].high, pageset->pcp[1].batch,
1665 pageset->pcp[1].count);
1da177e4
LT
1666 }
1667 }
1668
a25700a5 1669 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
d23ad423 1670 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
65e458d4
CL
1671 global_page_state(NR_ACTIVE),
1672 global_page_state(NR_INACTIVE),
b1e7a8fd 1673 global_page_state(NR_FILE_DIRTY),
ce866b34 1674 global_page_state(NR_WRITEBACK),
fd39fc85 1675 global_page_state(NR_UNSTABLE_NFS),
d23ad423 1676 global_page_state(NR_FREE_PAGES),
972d1a7b
CL
1677 global_page_state(NR_SLAB_RECLAIMABLE) +
1678 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 1679 global_page_state(NR_FILE_MAPPED),
a25700a5
AM
1680 global_page_state(NR_PAGETABLE),
1681 global_page_state(NR_BOUNCE));
1da177e4
LT
1682
1683 for_each_zone(zone) {
1684 int i;
1685
c7241913
JS
1686 if (!populated_zone(zone))
1687 continue;
1688
1da177e4
LT
1689 show_node(zone);
1690 printk("%s"
1691 " free:%lukB"
1692 " min:%lukB"
1693 " low:%lukB"
1694 " high:%lukB"
1695 " active:%lukB"
1696 " inactive:%lukB"
1697 " present:%lukB"
1698 " pages_scanned:%lu"
1699 " all_unreclaimable? %s"
1700 "\n",
1701 zone->name,
d23ad423 1702 K(zone_page_state(zone, NR_FREE_PAGES)),
1da177e4
LT
1703 K(zone->pages_min),
1704 K(zone->pages_low),
1705 K(zone->pages_high),
c8785385
CL
1706 K(zone_page_state(zone, NR_ACTIVE)),
1707 K(zone_page_state(zone, NR_INACTIVE)),
1da177e4
LT
1708 K(zone->present_pages),
1709 zone->pages_scanned,
1710 (zone->all_unreclaimable ? "yes" : "no")
1711 );
1712 printk("lowmem_reserve[]:");
1713 for (i = 0; i < MAX_NR_ZONES; i++)
1714 printk(" %lu", zone->lowmem_reserve[i]);
1715 printk("\n");
1716 }
1717
1718 for_each_zone(zone) {
8f9de51a 1719 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4 1720
c7241913
JS
1721 if (!populated_zone(zone))
1722 continue;
1723
1da177e4
LT
1724 show_node(zone);
1725 printk("%s: ", zone->name);
1da177e4
LT
1726
1727 spin_lock_irqsave(&zone->lock, flags);
1728 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
1729 nr[order] = zone->free_area[order].nr_free;
1730 total += nr[order] << order;
1da177e4
LT
1731 }
1732 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
1733 for (order = 0; order < MAX_ORDER; order++)
1734 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
1735 printk("= %lukB\n", K(total));
1736 }
1737
1738 show_swap_cache_info();
1739}
1740
1741/*
1742 * Builds allocation fallback zone lists.
1a93205b
CL
1743 *
1744 * Add all populated zones of a node to the zonelist.
1da177e4 1745 */
f0c0b2b8
KH
1746static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1747 int nr_zones, enum zone_type zone_type)
1da177e4 1748{
1a93205b
CL
1749 struct zone *zone;
1750
98d2b0eb 1751 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 1752 zone_type++;
02a68a5e
CL
1753
1754 do {
2f6726e5 1755 zone_type--;
070f8032 1756 zone = pgdat->node_zones + zone_type;
1a93205b 1757 if (populated_zone(zone)) {
070f8032
CL
1758 zonelist->zones[nr_zones++] = zone;
1759 check_highest_zone(zone_type);
1da177e4 1760 }
02a68a5e 1761
2f6726e5 1762 } while (zone_type);
070f8032 1763 return nr_zones;
1da177e4
LT
1764}
1765
f0c0b2b8
KH
1766
1767/*
1768 * zonelist_order:
1769 * 0 = automatic detection of better ordering.
1770 * 1 = order by ([node] distance, -zonetype)
1771 * 2 = order by (-zonetype, [node] distance)
1772 *
1773 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
1774 * the same zonelist. So only NUMA can configure this param.
1775 */
1776#define ZONELIST_ORDER_DEFAULT 0
1777#define ZONELIST_ORDER_NODE 1
1778#define ZONELIST_ORDER_ZONE 2
1779
1780/* zonelist order in the kernel.
1781 * set_zonelist_order() will set this to NODE or ZONE.
1782 */
1783static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
1784static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
1785
1786
1da177e4 1787#ifdef CONFIG_NUMA
f0c0b2b8
KH
1788/* The value user specified ....changed by config */
1789static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1790/* string for sysctl */
1791#define NUMA_ZONELIST_ORDER_LEN 16
1792char numa_zonelist_order[16] = "default";
1793
1794/*
1795 * interface for configure zonelist ordering.
1796 * command line option "numa_zonelist_order"
1797 * = "[dD]efault - default, automatic configuration.
1798 * = "[nN]ode - order by node locality, then by zone within node
1799 * = "[zZ]one - order by zone, then by locality within zone
1800 */
1801
1802static int __parse_numa_zonelist_order(char *s)
1803{
1804 if (*s == 'd' || *s == 'D') {
1805 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1806 } else if (*s == 'n' || *s == 'N') {
1807 user_zonelist_order = ZONELIST_ORDER_NODE;
1808 } else if (*s == 'z' || *s == 'Z') {
1809 user_zonelist_order = ZONELIST_ORDER_ZONE;
1810 } else {
1811 printk(KERN_WARNING
1812 "Ignoring invalid numa_zonelist_order value: "
1813 "%s\n", s);
1814 return -EINVAL;
1815 }
1816 return 0;
1817}
1818
1819static __init int setup_numa_zonelist_order(char *s)
1820{
1821 if (s)
1822 return __parse_numa_zonelist_order(s);
1823 return 0;
1824}
1825early_param("numa_zonelist_order", setup_numa_zonelist_order);
1826
1827/*
1828 * sysctl handler for numa_zonelist_order
1829 */
1830int numa_zonelist_order_handler(ctl_table *table, int write,
1831 struct file *file, void __user *buffer, size_t *length,
1832 loff_t *ppos)
1833{
1834 char saved_string[NUMA_ZONELIST_ORDER_LEN];
1835 int ret;
1836
1837 if (write)
1838 strncpy(saved_string, (char*)table->data,
1839 NUMA_ZONELIST_ORDER_LEN);
1840 ret = proc_dostring(table, write, file, buffer, length, ppos);
1841 if (ret)
1842 return ret;
1843 if (write) {
1844 int oldval = user_zonelist_order;
1845 if (__parse_numa_zonelist_order((char*)table->data)) {
1846 /*
1847 * bogus value. restore saved string
1848 */
1849 strncpy((char*)table->data, saved_string,
1850 NUMA_ZONELIST_ORDER_LEN);
1851 user_zonelist_order = oldval;
1852 } else if (oldval != user_zonelist_order)
1853 build_all_zonelists();
1854 }
1855 return 0;
1856}
1857
1858
1da177e4 1859#define MAX_NODE_LOAD (num_online_nodes())
f0c0b2b8
KH
1860static int node_load[MAX_NUMNODES];
1861
1da177e4 1862/**
4dc3b16b 1863 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
1864 * @node: node whose fallback list we're appending
1865 * @used_node_mask: nodemask_t of already used nodes
1866 *
1867 * We use a number of factors to determine which is the next node that should
1868 * appear on a given node's fallback list. The node should not have appeared
1869 * already in @node's fallback list, and it should be the next closest node
1870 * according to the distance array (which contains arbitrary distance values
1871 * from each node to each node in the system), and should also prefer nodes
1872 * with no CPUs, since presumably they'll have very little allocation pressure
1873 * on them otherwise.
1874 * It returns -1 if no node is found.
1875 */
f0c0b2b8 1876static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 1877{
4cf808eb 1878 int n, val;
1da177e4
LT
1879 int min_val = INT_MAX;
1880 int best_node = -1;
1881
4cf808eb
LT
1882 /* Use the local node if we haven't already */
1883 if (!node_isset(node, *used_node_mask)) {
1884 node_set(node, *used_node_mask);
1885 return node;
1886 }
1da177e4 1887
37b07e41 1888 for_each_node_state(n, N_HIGH_MEMORY) {
4cf808eb 1889 cpumask_t tmp;
1da177e4
LT
1890
1891 /* Don't want a node to appear more than once */
1892 if (node_isset(n, *used_node_mask))
1893 continue;
1894
1da177e4
LT
1895 /* Use the distance array to find the distance */
1896 val = node_distance(node, n);
1897
4cf808eb
LT
1898 /* Penalize nodes under us ("prefer the next node") */
1899 val += (n < node);
1900
1da177e4
LT
1901 /* Give preference to headless and unused nodes */
1902 tmp = node_to_cpumask(n);
1903 if (!cpus_empty(tmp))
1904 val += PENALTY_FOR_NODE_WITH_CPUS;
1905
1906 /* Slight preference for less loaded node */
1907 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
1908 val += node_load[n];
1909
1910 if (val < min_val) {
1911 min_val = val;
1912 best_node = n;
1913 }
1914 }
1915
1916 if (best_node >= 0)
1917 node_set(best_node, *used_node_mask);
1918
1919 return best_node;
1920}
1921
f0c0b2b8
KH
1922
1923/*
1924 * Build zonelists ordered by node and zones within node.
1925 * This results in maximum locality--normal zone overflows into local
1926 * DMA zone, if any--but risks exhausting DMA zone.
1927 */
1928static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 1929{
19655d34 1930 enum zone_type i;
f0c0b2b8 1931 int j;
1da177e4 1932 struct zonelist *zonelist;
f0c0b2b8
KH
1933
1934 for (i = 0; i < MAX_NR_ZONES; i++) {
1935 zonelist = pgdat->node_zonelists + i;
1936 for (j = 0; zonelist->zones[j] != NULL; j++)
1937 ;
1938 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1939 zonelist->zones[j] = NULL;
1940 }
1941}
1942
523b9458
CL
1943/*
1944 * Build gfp_thisnode zonelists
1945 */
1946static void build_thisnode_zonelists(pg_data_t *pgdat)
1947{
1948 enum zone_type i;
1949 int j;
1950 struct zonelist *zonelist;
1951
1952 for (i = 0; i < MAX_NR_ZONES; i++) {
1953 zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i;
1954 j = build_zonelists_node(pgdat, zonelist, 0, i);
1955 zonelist->zones[j] = NULL;
1956 }
1957}
1958
f0c0b2b8
KH
1959/*
1960 * Build zonelists ordered by zone and nodes within zones.
1961 * This results in conserving DMA zone[s] until all Normal memory is
1962 * exhausted, but results in overflowing to remote node while memory
1963 * may still exist in local DMA zone.
1964 */
1965static int node_order[MAX_NUMNODES];
1966
1967static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
1968{
1969 enum zone_type i;
1970 int pos, j, node;
1971 int zone_type; /* needs to be signed */
1972 struct zone *z;
1973 struct zonelist *zonelist;
1974
1975 for (i = 0; i < MAX_NR_ZONES; i++) {
1976 zonelist = pgdat->node_zonelists + i;
1977 pos = 0;
1978 for (zone_type = i; zone_type >= 0; zone_type--) {
1979 for (j = 0; j < nr_nodes; j++) {
1980 node = node_order[j];
1981 z = &NODE_DATA(node)->node_zones[zone_type];
1982 if (populated_zone(z)) {
1983 zonelist->zones[pos++] = z;
1984 check_highest_zone(zone_type);
1985 }
1986 }
1987 }
1988 zonelist->zones[pos] = NULL;
1989 }
1990}
1991
1992static int default_zonelist_order(void)
1993{
1994 int nid, zone_type;
1995 unsigned long low_kmem_size,total_size;
1996 struct zone *z;
1997 int average_size;
1998 /*
1999 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2000 * If they are really small and used heavily, the system can fall
2001 * into OOM very easily.
2002 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2003 */
2004 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2005 low_kmem_size = 0;
2006 total_size = 0;
2007 for_each_online_node(nid) {
2008 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2009 z = &NODE_DATA(nid)->node_zones[zone_type];
2010 if (populated_zone(z)) {
2011 if (zone_type < ZONE_NORMAL)
2012 low_kmem_size += z->present_pages;
2013 total_size += z->present_pages;
2014 }
2015 }
2016 }
2017 if (!low_kmem_size || /* there are no DMA area. */
2018 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2019 return ZONELIST_ORDER_NODE;
2020 /*
2021 * look into each node's config.
2022 * If there is a node whose DMA/DMA32 memory is very big area on
2023 * local memory, NODE_ORDER may be suitable.
2024 */
37b07e41
LS
2025 average_size = total_size /
2026 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
f0c0b2b8
KH
2027 for_each_online_node(nid) {
2028 low_kmem_size = 0;
2029 total_size = 0;
2030 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2031 z = &NODE_DATA(nid)->node_zones[zone_type];
2032 if (populated_zone(z)) {
2033 if (zone_type < ZONE_NORMAL)
2034 low_kmem_size += z->present_pages;
2035 total_size += z->present_pages;
2036 }
2037 }
2038 if (low_kmem_size &&
2039 total_size > average_size && /* ignore small node */
2040 low_kmem_size > total_size * 70/100)
2041 return ZONELIST_ORDER_NODE;
2042 }
2043 return ZONELIST_ORDER_ZONE;
2044}
2045
2046static void set_zonelist_order(void)
2047{
2048 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2049 current_zonelist_order = default_zonelist_order();
2050 else
2051 current_zonelist_order = user_zonelist_order;
2052}
2053
2054static void build_zonelists(pg_data_t *pgdat)
2055{
2056 int j, node, load;
2057 enum zone_type i;
1da177e4 2058 nodemask_t used_mask;
f0c0b2b8
KH
2059 int local_node, prev_node;
2060 struct zonelist *zonelist;
2061 int order = current_zonelist_order;
1da177e4
LT
2062
2063 /* initialize zonelists */
523b9458 2064 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4
LT
2065 zonelist = pgdat->node_zonelists + i;
2066 zonelist->zones[0] = NULL;
2067 }
2068
2069 /* NUMA-aware ordering of nodes */
2070 local_node = pgdat->node_id;
2071 load = num_online_nodes();
2072 prev_node = local_node;
2073 nodes_clear(used_mask);
f0c0b2b8
KH
2074
2075 memset(node_load, 0, sizeof(node_load));
2076 memset(node_order, 0, sizeof(node_order));
2077 j = 0;
2078
1da177e4 2079 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
2080 int distance = node_distance(local_node, node);
2081
2082 /*
2083 * If another node is sufficiently far away then it is better
2084 * to reclaim pages in a zone before going off node.
2085 */
2086 if (distance > RECLAIM_DISTANCE)
2087 zone_reclaim_mode = 1;
2088
1da177e4
LT
2089 /*
2090 * We don't want to pressure a particular node.
2091 * So adding penalty to the first node in same
2092 * distance group to make it round-robin.
2093 */
9eeff239 2094 if (distance != node_distance(local_node, prev_node))
f0c0b2b8
KH
2095 node_load[node] = load;
2096
1da177e4
LT
2097 prev_node = node;
2098 load--;
f0c0b2b8
KH
2099 if (order == ZONELIST_ORDER_NODE)
2100 build_zonelists_in_node_order(pgdat, node);
2101 else
2102 node_order[j++] = node; /* remember order */
2103 }
1da177e4 2104
f0c0b2b8
KH
2105 if (order == ZONELIST_ORDER_ZONE) {
2106 /* calculate node order -- i.e., DMA last! */
2107 build_zonelists_in_zone_order(pgdat, j);
1da177e4 2108 }
523b9458
CL
2109
2110 build_thisnode_zonelists(pgdat);
1da177e4
LT
2111}
2112
9276b1bc 2113/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 2114static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc
PJ
2115{
2116 int i;
2117
2118 for (i = 0; i < MAX_NR_ZONES; i++) {
2119 struct zonelist *zonelist;
2120 struct zonelist_cache *zlc;
2121 struct zone **z;
2122
2123 zonelist = pgdat->node_zonelists + i;
2124 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2125 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2126 for (z = zonelist->zones; *z; z++)
2127 zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
2128 }
2129}
2130
f0c0b2b8 2131
1da177e4
LT
2132#else /* CONFIG_NUMA */
2133
f0c0b2b8
KH
2134static void set_zonelist_order(void)
2135{
2136 current_zonelist_order = ZONELIST_ORDER_ZONE;
2137}
2138
2139static void build_zonelists(pg_data_t *pgdat)
1da177e4 2140{
19655d34
CL
2141 int node, local_node;
2142 enum zone_type i,j;
1da177e4
LT
2143
2144 local_node = pgdat->node_id;
19655d34 2145 for (i = 0; i < MAX_NR_ZONES; i++) {
1da177e4
LT
2146 struct zonelist *zonelist;
2147
2148 zonelist = pgdat->node_zonelists + i;
2149
19655d34 2150 j = build_zonelists_node(pgdat, zonelist, 0, i);
1da177e4
LT
2151 /*
2152 * Now we build the zonelist so that it contains the zones
2153 * of all the other nodes.
2154 * We don't want to pressure a particular node, so when
2155 * building the zones for node N, we make sure that the
2156 * zones coming right after the local ones are those from
2157 * node N+1 (modulo N)
2158 */
2159 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2160 if (!node_online(node))
2161 continue;
19655d34 2162 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
2163 }
2164 for (node = 0; node < local_node; node++) {
2165 if (!node_online(node))
2166 continue;
19655d34 2167 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
2168 }
2169
2170 zonelist->zones[j] = NULL;
2171 }
2172}
2173
9276b1bc 2174/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 2175static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc
PJ
2176{
2177 int i;
2178
2179 for (i = 0; i < MAX_NR_ZONES; i++)
2180 pgdat->node_zonelists[i].zlcache_ptr = NULL;
2181}
2182
1da177e4
LT
2183#endif /* CONFIG_NUMA */
2184
6811378e 2185/* return values int ....just for stop_machine_run() */
f0c0b2b8 2186static int __build_all_zonelists(void *dummy)
1da177e4 2187{
6811378e 2188 int nid;
9276b1bc
PJ
2189
2190 for_each_online_node(nid) {
7ea1530a
CL
2191 pg_data_t *pgdat = NODE_DATA(nid);
2192
2193 build_zonelists(pgdat);
2194 build_zonelist_cache(pgdat);
9276b1bc 2195 }
6811378e
YG
2196 return 0;
2197}
2198
f0c0b2b8 2199void build_all_zonelists(void)
6811378e 2200{
f0c0b2b8
KH
2201 set_zonelist_order();
2202
6811378e 2203 if (system_state == SYSTEM_BOOTING) {
423b41d7 2204 __build_all_zonelists(NULL);
6811378e
YG
2205 cpuset_init_current_mems_allowed();
2206 } else {
2207 /* we have to stop all cpus to guaranntee there is no user
2208 of zonelist */
2209 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
2210 /* cpuset refresh routine should be here */
2211 }
bd1e22b8 2212 vm_total_pages = nr_free_pagecache_pages();
f0c0b2b8
KH
2213 printk("Built %i zonelists in %s order. Total pages: %ld\n",
2214 num_online_nodes(),
2215 zonelist_order_name[current_zonelist_order],
2216 vm_total_pages);
2217#ifdef CONFIG_NUMA
2218 printk("Policy zone: %s\n", zone_names[policy_zone]);
2219#endif
1da177e4
LT
2220}
2221
2222/*
2223 * Helper functions to size the waitqueue hash table.
2224 * Essentially these want to choose hash table sizes sufficiently
2225 * large so that collisions trying to wait on pages are rare.
2226 * But in fact, the number of active page waitqueues on typical
2227 * systems is ridiculously low, less than 200. So this is even
2228 * conservative, even though it seems large.
2229 *
2230 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2231 * waitqueues, i.e. the size of the waitq table given the number of pages.
2232 */
2233#define PAGES_PER_WAITQUEUE 256
2234
cca448fe 2235#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 2236static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
2237{
2238 unsigned long size = 1;
2239
2240 pages /= PAGES_PER_WAITQUEUE;
2241
2242 while (size < pages)
2243 size <<= 1;
2244
2245 /*
2246 * Once we have dozens or even hundreds of threads sleeping
2247 * on IO we've got bigger problems than wait queue collision.
2248 * Limit the size of the wait table to a reasonable size.
2249 */
2250 size = min(size, 4096UL);
2251
2252 return max(size, 4UL);
2253}
cca448fe
YG
2254#else
2255/*
2256 * A zone's size might be changed by hot-add, so it is not possible to determine
2257 * a suitable size for its wait_table. So we use the maximum size now.
2258 *
2259 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
2260 *
2261 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
2262 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2263 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
2264 *
2265 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2266 * or more by the traditional way. (See above). It equals:
2267 *
2268 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
2269 * ia64(16K page size) : = ( 8G + 4M)byte.
2270 * powerpc (64K page size) : = (32G +16M)byte.
2271 */
2272static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2273{
2274 return 4096UL;
2275}
2276#endif
1da177e4
LT
2277
2278/*
2279 * This is an integer logarithm so that shifts can be used later
2280 * to extract the more random high bits from the multiplicative
2281 * hash function before the remainder is taken.
2282 */
2283static inline unsigned long wait_table_bits(unsigned long size)
2284{
2285 return ffz(~size);
2286}
2287
2288#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2289
1da177e4
LT
2290/*
2291 * Initially all pages are reserved - free ones are freed
2292 * up by free_all_bootmem() once the early boot process is
2293 * done. Non-atomic initialization, single-pass.
2294 */
c09b4240 2295void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 2296 unsigned long start_pfn, enum memmap_context context)
1da177e4 2297{
1da177e4 2298 struct page *page;
29751f69
AW
2299 unsigned long end_pfn = start_pfn + size;
2300 unsigned long pfn;
1da177e4 2301
cbe8dd4a 2302 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
2303 /*
2304 * There can be holes in boot-time mem_map[]s
2305 * handed to this function. They do not
2306 * exist on hotplugged memory.
2307 */
2308 if (context == MEMMAP_EARLY) {
2309 if (!early_pfn_valid(pfn))
2310 continue;
2311 if (!early_pfn_in_nid(pfn, nid))
2312 continue;
2313 }
d41dee36
AW
2314 page = pfn_to_page(pfn);
2315 set_page_links(page, zone, nid, pfn);
7835e98b 2316 init_page_count(page);
1da177e4
LT
2317 reset_page_mapcount(page);
2318 SetPageReserved(page);
b2a0ac88
MG
2319
2320 /*
2321 * Mark the block movable so that blocks are reserved for
2322 * movable at startup. This will force kernel allocations
2323 * to reserve their blocks rather than leaking throughout
2324 * the address space during boot when many long-lived
2325 * kernel allocations are made
2326 */
2327 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2328
1da177e4
LT
2329 INIT_LIST_HEAD(&page->lru);
2330#ifdef WANT_PAGE_VIRTUAL
2331 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
2332 if (!is_highmem_idx(zone))
3212c6be 2333 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 2334#endif
1da177e4
LT
2335 }
2336}
2337
6ea6e688
PM
2338static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
2339 struct zone *zone, unsigned long size)
1da177e4 2340{
b2a0ac88
MG
2341 int order, t;
2342 for_each_migratetype_order(order, t) {
2343 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
2344 zone->free_area[order].nr_free = 0;
2345 }
2346}
2347
2348#ifndef __HAVE_ARCH_MEMMAP_INIT
2349#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 2350 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
2351#endif
2352
d09c6b80 2353static int __devinit zone_batchsize(struct zone *zone)
e7c8d5c9
CL
2354{
2355 int batch;
2356
2357 /*
2358 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 2359 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
2360 *
2361 * OK, so we don't know how big the cache is. So guess.
2362 */
2363 batch = zone->present_pages / 1024;
ba56e91c
SR
2364 if (batch * PAGE_SIZE > 512 * 1024)
2365 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
2366 batch /= 4; /* We effectively *= 4 below */
2367 if (batch < 1)
2368 batch = 1;
2369
2370 /*
0ceaacc9
NP
2371 * Clamp the batch to a 2^n - 1 value. Having a power
2372 * of 2 value was found to be more likely to have
2373 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 2374 *
0ceaacc9
NP
2375 * For example if 2 tasks are alternately allocating
2376 * batches of pages, one task can end up with a lot
2377 * of pages of one half of the possible page colors
2378 * and the other with pages of the other colors.
e7c8d5c9 2379 */
0ceaacc9 2380 batch = (1 << (fls(batch + batch/2)-1)) - 1;
ba56e91c 2381
e7c8d5c9
CL
2382 return batch;
2383}
2384
2caaad41
CL
2385inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2386{
2387 struct per_cpu_pages *pcp;
2388
1c6fe946
MD
2389 memset(p, 0, sizeof(*p));
2390
2caaad41
CL
2391 pcp = &p->pcp[0]; /* hot */
2392 pcp->count = 0;
2caaad41
CL
2393 pcp->high = 6 * batch;
2394 pcp->batch = max(1UL, 1 * batch);
2395 INIT_LIST_HEAD(&pcp->list);
2396
2397 pcp = &p->pcp[1]; /* cold*/
2398 pcp->count = 0;
2caaad41 2399 pcp->high = 2 * batch;
e46a5e28 2400 pcp->batch = max(1UL, batch/2);
2caaad41
CL
2401 INIT_LIST_HEAD(&pcp->list);
2402}
2403
8ad4b1fb
RS
2404/*
2405 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2406 * to the value high for the pageset p.
2407 */
2408
2409static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2410 unsigned long high)
2411{
2412 struct per_cpu_pages *pcp;
2413
2414 pcp = &p->pcp[0]; /* hot list */
2415 pcp->high = high;
2416 pcp->batch = max(1UL, high/4);
2417 if ((high/4) > (PAGE_SHIFT * 8))
2418 pcp->batch = PAGE_SHIFT * 8;
2419}
2420
2421
e7c8d5c9
CL
2422#ifdef CONFIG_NUMA
2423/*
2caaad41
CL
2424 * Boot pageset table. One per cpu which is going to be used for all
2425 * zones and all nodes. The parameters will be set in such a way
2426 * that an item put on a list will immediately be handed over to
2427 * the buddy list. This is safe since pageset manipulation is done
2428 * with interrupts disabled.
2429 *
2430 * Some NUMA counter updates may also be caught by the boot pagesets.
b7c84c6a
CL
2431 *
2432 * The boot_pagesets must be kept even after bootup is complete for
2433 * unused processors and/or zones. They do play a role for bootstrapping
2434 * hotplugged processors.
2435 *
2436 * zoneinfo_show() and maybe other functions do
2437 * not check if the processor is online before following the pageset pointer.
2438 * Other parts of the kernel may not check if the zone is available.
2caaad41 2439 */
88a2a4ac 2440static struct per_cpu_pageset boot_pageset[NR_CPUS];
2caaad41
CL
2441
2442/*
2443 * Dynamically allocate memory for the
e7c8d5c9
CL
2444 * per cpu pageset array in struct zone.
2445 */
6292d9aa 2446static int __cpuinit process_zones(int cpu)
e7c8d5c9
CL
2447{
2448 struct zone *zone, *dzone;
37c0708d
CL
2449 int node = cpu_to_node(cpu);
2450
2451 node_set_state(node, N_CPU); /* this node has a cpu */
e7c8d5c9
CL
2452
2453 for_each_zone(zone) {
e7c8d5c9 2454
66a55030
CL
2455 if (!populated_zone(zone))
2456 continue;
2457
23316bc8 2458 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
37c0708d 2459 GFP_KERNEL, node);
23316bc8 2460 if (!zone_pcp(zone, cpu))
e7c8d5c9 2461 goto bad;
e7c8d5c9 2462
23316bc8 2463 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
8ad4b1fb
RS
2464
2465 if (percpu_pagelist_fraction)
2466 setup_pagelist_highmark(zone_pcp(zone, cpu),
2467 (zone->present_pages / percpu_pagelist_fraction));
e7c8d5c9
CL
2468 }
2469
2470 return 0;
2471bad:
2472 for_each_zone(dzone) {
64191688
AM
2473 if (!populated_zone(dzone))
2474 continue;
e7c8d5c9
CL
2475 if (dzone == zone)
2476 break;
23316bc8
NP
2477 kfree(zone_pcp(dzone, cpu));
2478 zone_pcp(dzone, cpu) = NULL;
e7c8d5c9
CL
2479 }
2480 return -ENOMEM;
2481}
2482
2483static inline void free_zone_pagesets(int cpu)
2484{
e7c8d5c9
CL
2485 struct zone *zone;
2486
2487 for_each_zone(zone) {
2488 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2489
f3ef9ead
DR
2490 /* Free per_cpu_pageset if it is slab allocated */
2491 if (pset != &boot_pageset[cpu])
2492 kfree(pset);
e7c8d5c9 2493 zone_pcp(zone, cpu) = NULL;
e7c8d5c9 2494 }
e7c8d5c9
CL
2495}
2496
9c7b216d 2497static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
e7c8d5c9
CL
2498 unsigned long action,
2499 void *hcpu)
2500{
2501 int cpu = (long)hcpu;
2502 int ret = NOTIFY_OK;
2503
2504 switch (action) {
ce421c79 2505 case CPU_UP_PREPARE:
8bb78442 2506 case CPU_UP_PREPARE_FROZEN:
ce421c79
AW
2507 if (process_zones(cpu))
2508 ret = NOTIFY_BAD;
2509 break;
2510 case CPU_UP_CANCELED:
8bb78442 2511 case CPU_UP_CANCELED_FROZEN:
ce421c79 2512 case CPU_DEAD:
8bb78442 2513 case CPU_DEAD_FROZEN:
ce421c79
AW
2514 free_zone_pagesets(cpu);
2515 break;
2516 default:
2517 break;
e7c8d5c9
CL
2518 }
2519 return ret;
2520}
2521
74b85f37 2522static struct notifier_block __cpuinitdata pageset_notifier =
e7c8d5c9
CL
2523 { &pageset_cpuup_callback, NULL, 0 };
2524
78d9955b 2525void __init setup_per_cpu_pageset(void)
e7c8d5c9
CL
2526{
2527 int err;
2528
2529 /* Initialize per_cpu_pageset for cpu 0.
2530 * A cpuup callback will do this for every cpu
2531 * as it comes online
2532 */
2533 err = process_zones(smp_processor_id());
2534 BUG_ON(err);
2535 register_cpu_notifier(&pageset_notifier);
2536}
2537
2538#endif
2539
577a32f6 2540static noinline __init_refok
cca448fe 2541int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
2542{
2543 int i;
2544 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 2545 size_t alloc_size;
ed8ece2e
DH
2546
2547 /*
2548 * The per-page waitqueue mechanism uses hashed waitqueues
2549 * per zone.
2550 */
02b694de
YG
2551 zone->wait_table_hash_nr_entries =
2552 wait_table_hash_nr_entries(zone_size_pages);
2553 zone->wait_table_bits =
2554 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
2555 alloc_size = zone->wait_table_hash_nr_entries
2556 * sizeof(wait_queue_head_t);
2557
2558 if (system_state == SYSTEM_BOOTING) {
2559 zone->wait_table = (wait_queue_head_t *)
2560 alloc_bootmem_node(pgdat, alloc_size);
2561 } else {
2562 /*
2563 * This case means that a zone whose size was 0 gets new memory
2564 * via memory hot-add.
2565 * But it may be the case that a new node was hot-added. In
2566 * this case vmalloc() will not be able to use this new node's
2567 * memory - this wait_table must be initialized to use this new
2568 * node itself as well.
2569 * To use this new node's memory, further consideration will be
2570 * necessary.
2571 */
8691f3a7 2572 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
2573 }
2574 if (!zone->wait_table)
2575 return -ENOMEM;
ed8ece2e 2576
02b694de 2577 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 2578 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
2579
2580 return 0;
ed8ece2e
DH
2581}
2582
c09b4240 2583static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e
DH
2584{
2585 int cpu;
2586 unsigned long batch = zone_batchsize(zone);
2587
2588 for (cpu = 0; cpu < NR_CPUS; cpu++) {
2589#ifdef CONFIG_NUMA
2590 /* Early boot. Slab allocator not functional yet */
23316bc8 2591 zone_pcp(zone, cpu) = &boot_pageset[cpu];
ed8ece2e
DH
2592 setup_pageset(&boot_pageset[cpu],0);
2593#else
2594 setup_pageset(zone_pcp(zone,cpu), batch);
2595#endif
2596 }
f5335c0f
AB
2597 if (zone->present_pages)
2598 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
2599 zone->name, zone->present_pages, batch);
ed8ece2e
DH
2600}
2601
718127cc
YG
2602__meminit int init_currently_empty_zone(struct zone *zone,
2603 unsigned long zone_start_pfn,
a2f3aa02
DH
2604 unsigned long size,
2605 enum memmap_context context)
ed8ece2e
DH
2606{
2607 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
2608 int ret;
2609 ret = zone_wait_table_init(zone, size);
2610 if (ret)
2611 return ret;
ed8ece2e
DH
2612 pgdat->nr_zones = zone_idx(zone) + 1;
2613
ed8ece2e
DH
2614 zone->zone_start_pfn = zone_start_pfn;
2615
2616 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2617
2618 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
718127cc
YG
2619
2620 return 0;
ed8ece2e
DH
2621}
2622
c713216d
MG
2623#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2624/*
2625 * Basic iterator support. Return the first range of PFNs for a node
2626 * Note: nid == MAX_NUMNODES returns first region regardless of node
2627 */
a3142c8e 2628static int __meminit first_active_region_index_in_nid(int nid)
c713216d
MG
2629{
2630 int i;
2631
2632 for (i = 0; i < nr_nodemap_entries; i++)
2633 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2634 return i;
2635
2636 return -1;
2637}
2638
2639/*
2640 * Basic iterator support. Return the next active range of PFNs for a node
2641 * Note: nid == MAX_NUMNODES returns next region regardles of node
2642 */
a3142c8e 2643static int __meminit next_active_region_index_in_nid(int index, int nid)
c713216d
MG
2644{
2645 for (index = index + 1; index < nr_nodemap_entries; index++)
2646 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2647 return index;
2648
2649 return -1;
2650}
2651
2652#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2653/*
2654 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2655 * Architectures may implement their own version but if add_active_range()
2656 * was used and there are no special requirements, this is a convenient
2657 * alternative
2658 */
6f076f5d 2659int __meminit early_pfn_to_nid(unsigned long pfn)
c713216d
MG
2660{
2661 int i;
2662
2663 for (i = 0; i < nr_nodemap_entries; i++) {
2664 unsigned long start_pfn = early_node_map[i].start_pfn;
2665 unsigned long end_pfn = early_node_map[i].end_pfn;
2666
2667 if (start_pfn <= pfn && pfn < end_pfn)
2668 return early_node_map[i].nid;
2669 }
2670
2671 return 0;
2672}
2673#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2674
2675/* Basic iterator support to walk early_node_map[] */
2676#define for_each_active_range_index_in_nid(i, nid) \
2677 for (i = first_active_region_index_in_nid(nid); i != -1; \
2678 i = next_active_region_index_in_nid(i, nid))
2679
2680/**
2681 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
2682 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2683 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
2684 *
2685 * If an architecture guarantees that all ranges registered with
2686 * add_active_ranges() contain no holes and may be freed, this
2687 * this function may be used instead of calling free_bootmem() manually.
2688 */
2689void __init free_bootmem_with_active_regions(int nid,
2690 unsigned long max_low_pfn)
2691{
2692 int i;
2693
2694 for_each_active_range_index_in_nid(i, nid) {
2695 unsigned long size_pages = 0;
2696 unsigned long end_pfn = early_node_map[i].end_pfn;
2697
2698 if (early_node_map[i].start_pfn >= max_low_pfn)
2699 continue;
2700
2701 if (end_pfn > max_low_pfn)
2702 end_pfn = max_low_pfn;
2703
2704 size_pages = end_pfn - early_node_map[i].start_pfn;
2705 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
2706 PFN_PHYS(early_node_map[i].start_pfn),
2707 size_pages << PAGE_SHIFT);
2708 }
2709}
2710
2711/**
2712 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 2713 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
2714 *
2715 * If an architecture guarantees that all ranges registered with
2716 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 2717 * function may be used instead of calling memory_present() manually.
c713216d
MG
2718 */
2719void __init sparse_memory_present_with_active_regions(int nid)
2720{
2721 int i;
2722
2723 for_each_active_range_index_in_nid(i, nid)
2724 memory_present(early_node_map[i].nid,
2725 early_node_map[i].start_pfn,
2726 early_node_map[i].end_pfn);
2727}
2728
fb01439c
MG
2729/**
2730 * push_node_boundaries - Push node boundaries to at least the requested boundary
2731 * @nid: The nid of the node to push the boundary for
2732 * @start_pfn: The start pfn of the node
2733 * @end_pfn: The end pfn of the node
2734 *
2735 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
2736 * time. Specifically, on x86_64, SRAT will report ranges that can potentially
2737 * be hotplugged even though no physical memory exists. This function allows
2738 * an arch to push out the node boundaries so mem_map is allocated that can
2739 * be used later.
2740 */
2741#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
2742void __init push_node_boundaries(unsigned int nid,
2743 unsigned long start_pfn, unsigned long end_pfn)
2744{
2745 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
2746 nid, start_pfn, end_pfn);
2747
2748 /* Initialise the boundary for this node if necessary */
2749 if (node_boundary_end_pfn[nid] == 0)
2750 node_boundary_start_pfn[nid] = -1UL;
2751
2752 /* Update the boundaries */
2753 if (node_boundary_start_pfn[nid] > start_pfn)
2754 node_boundary_start_pfn[nid] = start_pfn;
2755 if (node_boundary_end_pfn[nid] < end_pfn)
2756 node_boundary_end_pfn[nid] = end_pfn;
2757}
2758
2759/* If necessary, push the node boundary out for reserve hotadd */
98011f56 2760static void __meminit account_node_boundary(unsigned int nid,
fb01439c
MG
2761 unsigned long *start_pfn, unsigned long *end_pfn)
2762{
2763 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
2764 nid, *start_pfn, *end_pfn);
2765
2766 /* Return if boundary information has not been provided */
2767 if (node_boundary_end_pfn[nid] == 0)
2768 return;
2769
2770 /* Check the boundaries and update if necessary */
2771 if (node_boundary_start_pfn[nid] < *start_pfn)
2772 *start_pfn = node_boundary_start_pfn[nid];
2773 if (node_boundary_end_pfn[nid] > *end_pfn)
2774 *end_pfn = node_boundary_end_pfn[nid];
2775}
2776#else
2777void __init push_node_boundaries(unsigned int nid,
2778 unsigned long start_pfn, unsigned long end_pfn) {}
2779
98011f56 2780static void __meminit account_node_boundary(unsigned int nid,
fb01439c
MG
2781 unsigned long *start_pfn, unsigned long *end_pfn) {}
2782#endif
2783
2784
c713216d
MG
2785/**
2786 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
2787 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
2788 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
2789 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
2790 *
2791 * It returns the start and end page frame of a node based on information
2792 * provided by an arch calling add_active_range(). If called for a node
2793 * with no available memory, a warning is printed and the start and end
88ca3b94 2794 * PFNs will be 0.
c713216d 2795 */
a3142c8e 2796void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
2797 unsigned long *start_pfn, unsigned long *end_pfn)
2798{
2799 int i;
2800 *start_pfn = -1UL;
2801 *end_pfn = 0;
2802
2803 for_each_active_range_index_in_nid(i, nid) {
2804 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
2805 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
2806 }
2807
633c0666 2808 if (*start_pfn == -1UL)
c713216d 2809 *start_pfn = 0;
fb01439c
MG
2810
2811 /* Push the node boundaries out if requested */
2812 account_node_boundary(nid, start_pfn, end_pfn);
c713216d
MG
2813}
2814
2a1e274a
MG
2815/*
2816 * This finds a zone that can be used for ZONE_MOVABLE pages. The
2817 * assumption is made that zones within a node are ordered in monotonic
2818 * increasing memory addresses so that the "highest" populated zone is used
2819 */
2820void __init find_usable_zone_for_movable(void)
2821{
2822 int zone_index;
2823 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
2824 if (zone_index == ZONE_MOVABLE)
2825 continue;
2826
2827 if (arch_zone_highest_possible_pfn[zone_index] >
2828 arch_zone_lowest_possible_pfn[zone_index])
2829 break;
2830 }
2831
2832 VM_BUG_ON(zone_index == -1);
2833 movable_zone = zone_index;
2834}
2835
2836/*
2837 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
2838 * because it is sized independant of architecture. Unlike the other zones,
2839 * the starting point for ZONE_MOVABLE is not fixed. It may be different
2840 * in each node depending on the size of each node and how evenly kernelcore
2841 * is distributed. This helper function adjusts the zone ranges
2842 * provided by the architecture for a given node by using the end of the
2843 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
2844 * zones within a node are in order of monotonic increases memory addresses
2845 */
2846void __meminit adjust_zone_range_for_zone_movable(int nid,
2847 unsigned long zone_type,
2848 unsigned long node_start_pfn,
2849 unsigned long node_end_pfn,
2850 unsigned long *zone_start_pfn,
2851 unsigned long *zone_end_pfn)
2852{
2853 /* Only adjust if ZONE_MOVABLE is on this node */
2854 if (zone_movable_pfn[nid]) {
2855 /* Size ZONE_MOVABLE */
2856 if (zone_type == ZONE_MOVABLE) {
2857 *zone_start_pfn = zone_movable_pfn[nid];
2858 *zone_end_pfn = min(node_end_pfn,
2859 arch_zone_highest_possible_pfn[movable_zone]);
2860
2861 /* Adjust for ZONE_MOVABLE starting within this range */
2862 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
2863 *zone_end_pfn > zone_movable_pfn[nid]) {
2864 *zone_end_pfn = zone_movable_pfn[nid];
2865
2866 /* Check if this whole range is within ZONE_MOVABLE */
2867 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
2868 *zone_start_pfn = *zone_end_pfn;
2869 }
2870}
2871
c713216d
MG
2872/*
2873 * Return the number of pages a zone spans in a node, including holes
2874 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
2875 */
6ea6e688 2876static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
2877 unsigned long zone_type,
2878 unsigned long *ignored)
2879{
2880 unsigned long node_start_pfn, node_end_pfn;
2881 unsigned long zone_start_pfn, zone_end_pfn;
2882
2883 /* Get the start and end of the node and zone */
2884 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
2885 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
2886 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
2887 adjust_zone_range_for_zone_movable(nid, zone_type,
2888 node_start_pfn, node_end_pfn,
2889 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
2890
2891 /* Check that this node has pages within the zone's required range */
2892 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
2893 return 0;
2894
2895 /* Move the zone boundaries inside the node if necessary */
2896 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
2897 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
2898
2899 /* Return the spanned pages */
2900 return zone_end_pfn - zone_start_pfn;
2901}
2902
2903/*
2904 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 2905 * then all holes in the requested range will be accounted for.
c713216d 2906 */
a3142c8e 2907unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
2908 unsigned long range_start_pfn,
2909 unsigned long range_end_pfn)
2910{
2911 int i = 0;
2912 unsigned long prev_end_pfn = 0, hole_pages = 0;
2913 unsigned long start_pfn;
2914
2915 /* Find the end_pfn of the first active range of pfns in the node */
2916 i = first_active_region_index_in_nid(nid);
2917 if (i == -1)
2918 return 0;
2919
b5445f95
MG
2920 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
2921
9c7cd687
MG
2922 /* Account for ranges before physical memory on this node */
2923 if (early_node_map[i].start_pfn > range_start_pfn)
b5445f95 2924 hole_pages = prev_end_pfn - range_start_pfn;
c713216d
MG
2925
2926 /* Find all holes for the zone within the node */
2927 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
2928
2929 /* No need to continue if prev_end_pfn is outside the zone */
2930 if (prev_end_pfn >= range_end_pfn)
2931 break;
2932
2933 /* Make sure the end of the zone is not within the hole */
2934 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
2935 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
2936
2937 /* Update the hole size cound and move on */
2938 if (start_pfn > range_start_pfn) {
2939 BUG_ON(prev_end_pfn > start_pfn);
2940 hole_pages += start_pfn - prev_end_pfn;
2941 }
2942 prev_end_pfn = early_node_map[i].end_pfn;
2943 }
2944
9c7cd687
MG
2945 /* Account for ranges past physical memory on this node */
2946 if (range_end_pfn > prev_end_pfn)
0c6cb974 2947 hole_pages += range_end_pfn -
9c7cd687
MG
2948 max(range_start_pfn, prev_end_pfn);
2949
c713216d
MG
2950 return hole_pages;
2951}
2952
2953/**
2954 * absent_pages_in_range - Return number of page frames in holes within a range
2955 * @start_pfn: The start PFN to start searching for holes
2956 * @end_pfn: The end PFN to stop searching for holes
2957 *
88ca3b94 2958 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
2959 */
2960unsigned long __init absent_pages_in_range(unsigned long start_pfn,
2961 unsigned long end_pfn)
2962{
2963 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
2964}
2965
2966/* Return the number of page frames in holes in a zone on a node */
6ea6e688 2967static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
2968 unsigned long zone_type,
2969 unsigned long *ignored)
2970{
9c7cd687
MG
2971 unsigned long node_start_pfn, node_end_pfn;
2972 unsigned long zone_start_pfn, zone_end_pfn;
2973
2974 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
2975 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
2976 node_start_pfn);
2977 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
2978 node_end_pfn);
2979
2a1e274a
MG
2980 adjust_zone_range_for_zone_movable(nid, zone_type,
2981 node_start_pfn, node_end_pfn,
2982 &zone_start_pfn, &zone_end_pfn);
9c7cd687 2983 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 2984}
0e0b864e 2985
c713216d 2986#else
6ea6e688 2987static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
2988 unsigned long zone_type,
2989 unsigned long *zones_size)
2990{
2991 return zones_size[zone_type];
2992}
2993
6ea6e688 2994static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
2995 unsigned long zone_type,
2996 unsigned long *zholes_size)
2997{
2998 if (!zholes_size)
2999 return 0;
3000
3001 return zholes_size[zone_type];
3002}
0e0b864e 3003
c713216d
MG
3004#endif
3005
a3142c8e 3006static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
3007 unsigned long *zones_size, unsigned long *zholes_size)
3008{
3009 unsigned long realtotalpages, totalpages = 0;
3010 enum zone_type i;
3011
3012 for (i = 0; i < MAX_NR_ZONES; i++)
3013 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3014 zones_size);
3015 pgdat->node_spanned_pages = totalpages;
3016
3017 realtotalpages = totalpages;
3018 for (i = 0; i < MAX_NR_ZONES; i++)
3019 realtotalpages -=
3020 zone_absent_pages_in_node(pgdat->node_id, i,
3021 zholes_size);
3022 pgdat->node_present_pages = realtotalpages;
3023 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3024 realtotalpages);
3025}
3026
835c134e
MG
3027#ifndef CONFIG_SPARSEMEM
3028/*
3029 * Calculate the size of the zone->blockflags rounded to an unsigned long
3030 * Start by making sure zonesize is a multiple of MAX_ORDER-1 by rounding up
3031 * Then figure 1 NR_PAGEBLOCK_BITS worth of bits per MAX_ORDER-1, finally
3032 * round what is now in bits to nearest long in bits, then return it in
3033 * bytes.
3034 */
3035static unsigned long __init usemap_size(unsigned long zonesize)
3036{
3037 unsigned long usemapsize;
3038
3039 usemapsize = roundup(zonesize, MAX_ORDER_NR_PAGES);
3040 usemapsize = usemapsize >> (MAX_ORDER-1);
3041 usemapsize *= NR_PAGEBLOCK_BITS;
3042 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3043
3044 return usemapsize / 8;
3045}
3046
3047static void __init setup_usemap(struct pglist_data *pgdat,
3048 struct zone *zone, unsigned long zonesize)
3049{
3050 unsigned long usemapsize = usemap_size(zonesize);
3051 zone->pageblock_flags = NULL;
3052 if (usemapsize) {
3053 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3054 memset(zone->pageblock_flags, 0, usemapsize);
3055 }
3056}
3057#else
3058static void inline setup_usemap(struct pglist_data *pgdat,
3059 struct zone *zone, unsigned long zonesize) {}
3060#endif /* CONFIG_SPARSEMEM */
3061
1da177e4
LT
3062/*
3063 * Set up the zone data structures:
3064 * - mark all pages reserved
3065 * - mark all memory queues empty
3066 * - clear the memory bitmaps
3067 */
86356ab1 3068static void __meminit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
3069 unsigned long *zones_size, unsigned long *zholes_size)
3070{
2f1b6248 3071 enum zone_type j;
ed8ece2e 3072 int nid = pgdat->node_id;
1da177e4 3073 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 3074 int ret;
1da177e4 3075
208d54e5 3076 pgdat_resize_init(pgdat);
1da177e4
LT
3077 pgdat->nr_zones = 0;
3078 init_waitqueue_head(&pgdat->kswapd_wait);
3079 pgdat->kswapd_max_order = 0;
3080
3081 for (j = 0; j < MAX_NR_ZONES; j++) {
3082 struct zone *zone = pgdat->node_zones + j;
0e0b864e 3083 unsigned long size, realsize, memmap_pages;
1da177e4 3084
c713216d
MG
3085 size = zone_spanned_pages_in_node(nid, j, zones_size);
3086 realsize = size - zone_absent_pages_in_node(nid, j,
3087 zholes_size);
1da177e4 3088
0e0b864e
MG
3089 /*
3090 * Adjust realsize so that it accounts for how much memory
3091 * is used by this zone for memmap. This affects the watermark
3092 * and per-cpu initialisations
3093 */
3094 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
3095 if (realsize >= memmap_pages) {
3096 realsize -= memmap_pages;
3097 printk(KERN_DEBUG
3098 " %s zone: %lu pages used for memmap\n",
3099 zone_names[j], memmap_pages);
3100 } else
3101 printk(KERN_WARNING
3102 " %s zone: %lu pages exceeds realsize %lu\n",
3103 zone_names[j], memmap_pages, realsize);
3104
6267276f
CL
3105 /* Account for reserved pages */
3106 if (j == 0 && realsize > dma_reserve) {
0e0b864e 3107 realsize -= dma_reserve;
6267276f
CL
3108 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
3109 zone_names[0], dma_reserve);
0e0b864e
MG
3110 }
3111
98d2b0eb 3112 if (!is_highmem_idx(j))
1da177e4
LT
3113 nr_kernel_pages += realsize;
3114 nr_all_pages += realsize;
3115
3116 zone->spanned_pages = size;
3117 zone->present_pages = realsize;
9614634f 3118#ifdef CONFIG_NUMA
d5f541ed 3119 zone->node = nid;
8417bba4 3120 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 3121 / 100;
0ff38490 3122 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 3123#endif
1da177e4
LT
3124 zone->name = zone_names[j];
3125 spin_lock_init(&zone->lock);
3126 spin_lock_init(&zone->lru_lock);
bdc8cb98 3127 zone_seqlock_init(zone);
1da177e4 3128 zone->zone_pgdat = pgdat;
1da177e4 3129
3bb1a852 3130 zone->prev_priority = DEF_PRIORITY;
1da177e4 3131
ed8ece2e 3132 zone_pcp_init(zone);
1da177e4
LT
3133 INIT_LIST_HEAD(&zone->active_list);
3134 INIT_LIST_HEAD(&zone->inactive_list);
3135 zone->nr_scan_active = 0;
3136 zone->nr_scan_inactive = 0;
2244b95a 3137 zap_zone_vm_stats(zone);
53e9a615 3138 atomic_set(&zone->reclaim_in_progress, 0);
1da177e4
LT
3139 if (!size)
3140 continue;
3141
835c134e 3142 setup_usemap(pgdat, zone, size);
a2f3aa02
DH
3143 ret = init_currently_empty_zone(zone, zone_start_pfn,
3144 size, MEMMAP_EARLY);
718127cc 3145 BUG_ON(ret);
1da177e4 3146 zone_start_pfn += size;
1da177e4
LT
3147 }
3148}
3149
577a32f6 3150static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 3151{
1da177e4
LT
3152 /* Skip empty nodes */
3153 if (!pgdat->node_spanned_pages)
3154 return;
3155
d41dee36 3156#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
3157 /* ia64 gets its own node_mem_map, before this, without bootmem */
3158 if (!pgdat->node_mem_map) {
e984bb43 3159 unsigned long size, start, end;
d41dee36
AW
3160 struct page *map;
3161
e984bb43
BP
3162 /*
3163 * The zone's endpoints aren't required to be MAX_ORDER
3164 * aligned but the node_mem_map endpoints must be in order
3165 * for the buddy allocator to function correctly.
3166 */
3167 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3168 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3169 end = ALIGN(end, MAX_ORDER_NR_PAGES);
3170 size = (end - start) * sizeof(struct page);
6f167ec7
DH
3171 map = alloc_remap(pgdat->node_id, size);
3172 if (!map)
3173 map = alloc_bootmem_node(pgdat, size);
e984bb43 3174 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 3175 }
12d810c1 3176#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
3177 /*
3178 * With no DISCONTIG, the global mem_map is just set as node 0's
3179 */
c713216d 3180 if (pgdat == NODE_DATA(0)) {
1da177e4 3181 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
3182#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3183 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3184 mem_map -= pgdat->node_start_pfn;
3185#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3186 }
1da177e4 3187#endif
d41dee36 3188#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
3189}
3190
86356ab1 3191void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
1da177e4
LT
3192 unsigned long *zones_size, unsigned long node_start_pfn,
3193 unsigned long *zholes_size)
3194{
3195 pgdat->node_id = nid;
3196 pgdat->node_start_pfn = node_start_pfn;
c713216d 3197 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
3198
3199 alloc_node_mem_map(pgdat);
3200
3201 free_area_init_core(pgdat, zones_size, zholes_size);
3202}
3203
c713216d 3204#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
418508c1
MS
3205
3206#if MAX_NUMNODES > 1
3207/*
3208 * Figure out the number of possible node ids.
3209 */
3210static void __init setup_nr_node_ids(void)
3211{
3212 unsigned int node;
3213 unsigned int highest = 0;
3214
3215 for_each_node_mask(node, node_possible_map)
3216 highest = node;
3217 nr_node_ids = highest + 1;
3218}
3219#else
3220static inline void setup_nr_node_ids(void)
3221{
3222}
3223#endif
3224
c713216d
MG
3225/**
3226 * add_active_range - Register a range of PFNs backed by physical memory
3227 * @nid: The node ID the range resides on
3228 * @start_pfn: The start PFN of the available physical memory
3229 * @end_pfn: The end PFN of the available physical memory
3230 *
3231 * These ranges are stored in an early_node_map[] and later used by
3232 * free_area_init_nodes() to calculate zone sizes and holes. If the
3233 * range spans a memory hole, it is up to the architecture to ensure
3234 * the memory is not freed by the bootmem allocator. If possible
3235 * the range being registered will be merged with existing ranges.
3236 */
3237void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3238 unsigned long end_pfn)
3239{
3240 int i;
3241
3242 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
3243 "%d entries of %d used\n",
3244 nid, start_pfn, end_pfn,
3245 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3246
3247 /* Merge with existing active regions if possible */
3248 for (i = 0; i < nr_nodemap_entries; i++) {
3249 if (early_node_map[i].nid != nid)
3250 continue;
3251
3252 /* Skip if an existing region covers this new one */
3253 if (start_pfn >= early_node_map[i].start_pfn &&
3254 end_pfn <= early_node_map[i].end_pfn)
3255 return;
3256
3257 /* Merge forward if suitable */
3258 if (start_pfn <= early_node_map[i].end_pfn &&
3259 end_pfn > early_node_map[i].end_pfn) {
3260 early_node_map[i].end_pfn = end_pfn;
3261 return;
3262 }
3263
3264 /* Merge backward if suitable */
3265 if (start_pfn < early_node_map[i].end_pfn &&
3266 end_pfn >= early_node_map[i].start_pfn) {
3267 early_node_map[i].start_pfn = start_pfn;
3268 return;
3269 }
3270 }
3271
3272 /* Check that early_node_map is large enough */
3273 if (i >= MAX_ACTIVE_REGIONS) {
3274 printk(KERN_CRIT "More than %d memory regions, truncating\n",
3275 MAX_ACTIVE_REGIONS);
3276 return;
3277 }
3278
3279 early_node_map[i].nid = nid;
3280 early_node_map[i].start_pfn = start_pfn;
3281 early_node_map[i].end_pfn = end_pfn;
3282 nr_nodemap_entries = i + 1;
3283}
3284
3285/**
3286 * shrink_active_range - Shrink an existing registered range of PFNs
3287 * @nid: The node id the range is on that should be shrunk
3288 * @old_end_pfn: The old end PFN of the range
3289 * @new_end_pfn: The new PFN of the range
3290 *
3291 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3292 * The map is kept at the end physical page range that has already been
3293 * registered with add_active_range(). This function allows an arch to shrink
3294 * an existing registered range.
3295 */
3296void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
3297 unsigned long new_end_pfn)
3298{
3299 int i;
3300
3301 /* Find the old active region end and shrink */
3302 for_each_active_range_index_in_nid(i, nid)
3303 if (early_node_map[i].end_pfn == old_end_pfn) {
3304 early_node_map[i].end_pfn = new_end_pfn;
3305 break;
3306 }
3307}
3308
3309/**
3310 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 3311 *
c713216d
MG
3312 * During discovery, it may be found that a table like SRAT is invalid
3313 * and an alternative discovery method must be used. This function removes
3314 * all currently registered regions.
3315 */
88ca3b94 3316void __init remove_all_active_ranges(void)
c713216d
MG
3317{
3318 memset(early_node_map, 0, sizeof(early_node_map));
3319 nr_nodemap_entries = 0;
fb01439c
MG
3320#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3321 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3322 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3323#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
c713216d
MG
3324}
3325
3326/* Compare two active node_active_regions */
3327static int __init cmp_node_active_region(const void *a, const void *b)
3328{
3329 struct node_active_region *arange = (struct node_active_region *)a;
3330 struct node_active_region *brange = (struct node_active_region *)b;
3331
3332 /* Done this way to avoid overflows */
3333 if (arange->start_pfn > brange->start_pfn)
3334 return 1;
3335 if (arange->start_pfn < brange->start_pfn)
3336 return -1;
3337
3338 return 0;
3339}
3340
3341/* sort the node_map by start_pfn */
3342static void __init sort_node_map(void)
3343{
3344 sort(early_node_map, (size_t)nr_nodemap_entries,
3345 sizeof(struct node_active_region),
3346 cmp_node_active_region, NULL);
3347}
3348
a6af2bc3 3349/* Find the lowest pfn for a node */
c713216d
MG
3350unsigned long __init find_min_pfn_for_node(unsigned long nid)
3351{
3352 int i;
a6af2bc3 3353 unsigned long min_pfn = ULONG_MAX;
1abbfb41 3354
c713216d
MG
3355 /* Assuming a sorted map, the first range found has the starting pfn */
3356 for_each_active_range_index_in_nid(i, nid)
a6af2bc3 3357 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
c713216d 3358
a6af2bc3
MG
3359 if (min_pfn == ULONG_MAX) {
3360 printk(KERN_WARNING
3361 "Could not find start_pfn for node %lu\n", nid);
3362 return 0;
3363 }
3364
3365 return min_pfn;
c713216d
MG
3366}
3367
3368/**
3369 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3370 *
3371 * It returns the minimum PFN based on information provided via
88ca3b94 3372 * add_active_range().
c713216d
MG
3373 */
3374unsigned long __init find_min_pfn_with_active_regions(void)
3375{
3376 return find_min_pfn_for_node(MAX_NUMNODES);
3377}
3378
3379/**
3380 * find_max_pfn_with_active_regions - Find the maximum PFN registered
3381 *
3382 * It returns the maximum PFN based on information provided via
88ca3b94 3383 * add_active_range().
c713216d
MG
3384 */
3385unsigned long __init find_max_pfn_with_active_regions(void)
3386{
3387 int i;
3388 unsigned long max_pfn = 0;
3389
3390 for (i = 0; i < nr_nodemap_entries; i++)
3391 max_pfn = max(max_pfn, early_node_map[i].end_pfn);
3392
3393 return max_pfn;
3394}
3395
37b07e41
LS
3396/*
3397 * early_calculate_totalpages()
3398 * Sum pages in active regions for movable zone.
3399 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3400 */
7e63efef
MG
3401unsigned long __init early_calculate_totalpages(void)
3402{
3403 int i;
3404 unsigned long totalpages = 0;
3405
37b07e41
LS
3406 for (i = 0; i < nr_nodemap_entries; i++) {
3407 unsigned long pages = early_node_map[i].end_pfn -
7e63efef 3408 early_node_map[i].start_pfn;
37b07e41
LS
3409 totalpages += pages;
3410 if (pages)
3411 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3412 }
3413 return totalpages;
7e63efef
MG
3414}
3415
2a1e274a
MG
3416/*
3417 * Find the PFN the Movable zone begins in each node. Kernel memory
3418 * is spread evenly between nodes as long as the nodes have enough
3419 * memory. When they don't, some nodes will have more kernelcore than
3420 * others
3421 */
3422void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3423{
3424 int i, nid;
3425 unsigned long usable_startpfn;
3426 unsigned long kernelcore_node, kernelcore_remaining;
37b07e41
LS
3427 unsigned long totalpages = early_calculate_totalpages();
3428 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
2a1e274a 3429
7e63efef
MG
3430 /*
3431 * If movablecore was specified, calculate what size of
3432 * kernelcore that corresponds so that memory usable for
3433 * any allocation type is evenly spread. If both kernelcore
3434 * and movablecore are specified, then the value of kernelcore
3435 * will be used for required_kernelcore if it's greater than
3436 * what movablecore would have allowed.
3437 */
3438 if (required_movablecore) {
7e63efef
MG
3439 unsigned long corepages;
3440
3441 /*
3442 * Round-up so that ZONE_MOVABLE is at least as large as what
3443 * was requested by the user
3444 */
3445 required_movablecore =
3446 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3447 corepages = totalpages - required_movablecore;
3448
3449 required_kernelcore = max(required_kernelcore, corepages);
3450 }
3451
2a1e274a
MG
3452 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
3453 if (!required_kernelcore)
3454 return;
3455
3456 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3457 find_usable_zone_for_movable();
3458 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3459
3460restart:
3461 /* Spread kernelcore memory as evenly as possible throughout nodes */
3462 kernelcore_node = required_kernelcore / usable_nodes;
37b07e41 3463 for_each_node_state(nid, N_HIGH_MEMORY) {
2a1e274a
MG
3464 /*
3465 * Recalculate kernelcore_node if the division per node
3466 * now exceeds what is necessary to satisfy the requested
3467 * amount of memory for the kernel
3468 */
3469 if (required_kernelcore < kernelcore_node)
3470 kernelcore_node = required_kernelcore / usable_nodes;
3471
3472 /*
3473 * As the map is walked, we track how much memory is usable
3474 * by the kernel using kernelcore_remaining. When it is
3475 * 0, the rest of the node is usable by ZONE_MOVABLE
3476 */
3477 kernelcore_remaining = kernelcore_node;
3478
3479 /* Go through each range of PFNs within this node */
3480 for_each_active_range_index_in_nid(i, nid) {
3481 unsigned long start_pfn, end_pfn;
3482 unsigned long size_pages;
3483
3484 start_pfn = max(early_node_map[i].start_pfn,
3485 zone_movable_pfn[nid]);
3486 end_pfn = early_node_map[i].end_pfn;
3487 if (start_pfn >= end_pfn)
3488 continue;
3489
3490 /* Account for what is only usable for kernelcore */
3491 if (start_pfn < usable_startpfn) {
3492 unsigned long kernel_pages;
3493 kernel_pages = min(end_pfn, usable_startpfn)
3494 - start_pfn;
3495
3496 kernelcore_remaining -= min(kernel_pages,
3497 kernelcore_remaining);
3498 required_kernelcore -= min(kernel_pages,
3499 required_kernelcore);
3500
3501 /* Continue if range is now fully accounted */
3502 if (end_pfn <= usable_startpfn) {
3503
3504 /*
3505 * Push zone_movable_pfn to the end so
3506 * that if we have to rebalance
3507 * kernelcore across nodes, we will
3508 * not double account here
3509 */
3510 zone_movable_pfn[nid] = end_pfn;
3511 continue;
3512 }
3513 start_pfn = usable_startpfn;
3514 }
3515
3516 /*
3517 * The usable PFN range for ZONE_MOVABLE is from
3518 * start_pfn->end_pfn. Calculate size_pages as the
3519 * number of pages used as kernelcore
3520 */
3521 size_pages = end_pfn - start_pfn;
3522 if (size_pages > kernelcore_remaining)
3523 size_pages = kernelcore_remaining;
3524 zone_movable_pfn[nid] = start_pfn + size_pages;
3525
3526 /*
3527 * Some kernelcore has been met, update counts and
3528 * break if the kernelcore for this node has been
3529 * satisified
3530 */
3531 required_kernelcore -= min(required_kernelcore,
3532 size_pages);
3533 kernelcore_remaining -= size_pages;
3534 if (!kernelcore_remaining)
3535 break;
3536 }
3537 }
3538
3539 /*
3540 * If there is still required_kernelcore, we do another pass with one
3541 * less node in the count. This will push zone_movable_pfn[nid] further
3542 * along on the nodes that still have memory until kernelcore is
3543 * satisified
3544 */
3545 usable_nodes--;
3546 if (usable_nodes && required_kernelcore > usable_nodes)
3547 goto restart;
3548
3549 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3550 for (nid = 0; nid < MAX_NUMNODES; nid++)
3551 zone_movable_pfn[nid] =
3552 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3553}
3554
37b07e41
LS
3555/* Any regular memory on that node ? */
3556static void check_for_regular_memory(pg_data_t *pgdat)
3557{
3558#ifdef CONFIG_HIGHMEM
3559 enum zone_type zone_type;
3560
3561 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3562 struct zone *zone = &pgdat->node_zones[zone_type];
3563 if (zone->present_pages)
3564 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3565 }
3566#endif
3567}
3568
c713216d
MG
3569/**
3570 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 3571 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
3572 *
3573 * This will call free_area_init_node() for each active node in the system.
3574 * Using the page ranges provided by add_active_range(), the size of each
3575 * zone in each node and their holes is calculated. If the maximum PFN
3576 * between two adjacent zones match, it is assumed that the zone is empty.
3577 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3578 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3579 * starts where the previous one ended. For example, ZONE_DMA32 starts
3580 * at arch_max_dma_pfn.
3581 */
3582void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3583{
3584 unsigned long nid;
3585 enum zone_type i;
3586
a6af2bc3
MG
3587 /* Sort early_node_map as initialisation assumes it is sorted */
3588 sort_node_map();
3589
c713216d
MG
3590 /* Record where the zone boundaries are */
3591 memset(arch_zone_lowest_possible_pfn, 0,
3592 sizeof(arch_zone_lowest_possible_pfn));
3593 memset(arch_zone_highest_possible_pfn, 0,
3594 sizeof(arch_zone_highest_possible_pfn));
3595 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3596 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3597 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
3598 if (i == ZONE_MOVABLE)
3599 continue;
c713216d
MG
3600 arch_zone_lowest_possible_pfn[i] =
3601 arch_zone_highest_possible_pfn[i-1];
3602 arch_zone_highest_possible_pfn[i] =
3603 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3604 }
2a1e274a
MG
3605 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3606 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3607
3608 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
3609 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
3610 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
c713216d 3611
c713216d
MG
3612 /* Print out the zone ranges */
3613 printk("Zone PFN ranges:\n");
2a1e274a
MG
3614 for (i = 0; i < MAX_NR_ZONES; i++) {
3615 if (i == ZONE_MOVABLE)
3616 continue;
c713216d
MG
3617 printk(" %-8s %8lu -> %8lu\n",
3618 zone_names[i],
3619 arch_zone_lowest_possible_pfn[i],
3620 arch_zone_highest_possible_pfn[i]);
2a1e274a
MG
3621 }
3622
3623 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
3624 printk("Movable zone start PFN for each node\n");
3625 for (i = 0; i < MAX_NUMNODES; i++) {
3626 if (zone_movable_pfn[i])
3627 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
3628 }
c713216d
MG
3629
3630 /* Print out the early_node_map[] */
3631 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3632 for (i = 0; i < nr_nodemap_entries; i++)
3633 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid,
3634 early_node_map[i].start_pfn,
3635 early_node_map[i].end_pfn);
3636
3637 /* Initialise every node */
8ef82866 3638 setup_nr_node_ids();
c713216d
MG
3639 for_each_online_node(nid) {
3640 pg_data_t *pgdat = NODE_DATA(nid);
3641 free_area_init_node(nid, pgdat, NULL,
3642 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
3643
3644 /* Any memory on that node */
3645 if (pgdat->node_present_pages)
3646 node_set_state(nid, N_HIGH_MEMORY);
3647 check_for_regular_memory(pgdat);
c713216d
MG
3648 }
3649}
2a1e274a 3650
7e63efef 3651static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
3652{
3653 unsigned long long coremem;
3654 if (!p)
3655 return -EINVAL;
3656
3657 coremem = memparse(p, &p);
7e63efef 3658 *core = coremem >> PAGE_SHIFT;
2a1e274a 3659
7e63efef 3660 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
3661 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
3662
3663 return 0;
3664}
ed7ed365 3665
7e63efef
MG
3666/*
3667 * kernelcore=size sets the amount of memory for use for allocations that
3668 * cannot be reclaimed or migrated.
3669 */
3670static int __init cmdline_parse_kernelcore(char *p)
3671{
3672 return cmdline_parse_core(p, &required_kernelcore);
3673}
3674
3675/*
3676 * movablecore=size sets the amount of memory for use for allocations that
3677 * can be reclaimed or migrated.
3678 */
3679static int __init cmdline_parse_movablecore(char *p)
3680{
3681 return cmdline_parse_core(p, &required_movablecore);
3682}
3683
ed7ed365 3684early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 3685early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 3686
c713216d
MG
3687#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3688
0e0b864e 3689/**
88ca3b94
RD
3690 * set_dma_reserve - set the specified number of pages reserved in the first zone
3691 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
3692 *
3693 * The per-cpu batchsize and zone watermarks are determined by present_pages.
3694 * In the DMA zone, a significant percentage may be consumed by kernel image
3695 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
3696 * function may optionally be used to account for unfreeable pages in the
3697 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
3698 * smaller per-cpu batchsize.
0e0b864e
MG
3699 */
3700void __init set_dma_reserve(unsigned long new_dma_reserve)
3701{
3702 dma_reserve = new_dma_reserve;
3703}
3704
93b7504e 3705#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
3706static bootmem_data_t contig_bootmem_data;
3707struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
3708
3709EXPORT_SYMBOL(contig_page_data);
93b7504e 3710#endif
1da177e4
LT
3711
3712void __init free_area_init(unsigned long *zones_size)
3713{
93b7504e 3714 free_area_init_node(0, NODE_DATA(0), zones_size,
1da177e4
LT
3715 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
3716}
1da177e4 3717
1da177e4
LT
3718static int page_alloc_cpu_notify(struct notifier_block *self,
3719 unsigned long action, void *hcpu)
3720{
3721 int cpu = (unsigned long)hcpu;
1da177e4 3722
8bb78442 3723 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1da177e4
LT
3724 local_irq_disable();
3725 __drain_pages(cpu);
f8891e5e 3726 vm_events_fold_cpu(cpu);
1da177e4 3727 local_irq_enable();
2244b95a 3728 refresh_cpu_vm_stats(cpu);
1da177e4
LT
3729 }
3730 return NOTIFY_OK;
3731}
1da177e4
LT
3732
3733void __init page_alloc_init(void)
3734{
3735 hotcpu_notifier(page_alloc_cpu_notify, 0);
3736}
3737
cb45b0e9
HA
3738/*
3739 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
3740 * or min_free_kbytes changes.
3741 */
3742static void calculate_totalreserve_pages(void)
3743{
3744 struct pglist_data *pgdat;
3745 unsigned long reserve_pages = 0;
2f6726e5 3746 enum zone_type i, j;
cb45b0e9
HA
3747
3748 for_each_online_pgdat(pgdat) {
3749 for (i = 0; i < MAX_NR_ZONES; i++) {
3750 struct zone *zone = pgdat->node_zones + i;
3751 unsigned long max = 0;
3752
3753 /* Find valid and maximum lowmem_reserve in the zone */
3754 for (j = i; j < MAX_NR_ZONES; j++) {
3755 if (zone->lowmem_reserve[j] > max)
3756 max = zone->lowmem_reserve[j];
3757 }
3758
3759 /* we treat pages_high as reserved pages. */
3760 max += zone->pages_high;
3761
3762 if (max > zone->present_pages)
3763 max = zone->present_pages;
3764 reserve_pages += max;
3765 }
3766 }
3767 totalreserve_pages = reserve_pages;
3768}
3769
1da177e4
LT
3770/*
3771 * setup_per_zone_lowmem_reserve - called whenever
3772 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
3773 * has a correct pages reserved value, so an adequate number of
3774 * pages are left in the zone after a successful __alloc_pages().
3775 */
3776static void setup_per_zone_lowmem_reserve(void)
3777{
3778 struct pglist_data *pgdat;
2f6726e5 3779 enum zone_type j, idx;
1da177e4 3780
ec936fc5 3781 for_each_online_pgdat(pgdat) {
1da177e4
LT
3782 for (j = 0; j < MAX_NR_ZONES; j++) {
3783 struct zone *zone = pgdat->node_zones + j;
3784 unsigned long present_pages = zone->present_pages;
3785
3786 zone->lowmem_reserve[j] = 0;
3787
2f6726e5
CL
3788 idx = j;
3789 while (idx) {
1da177e4
LT
3790 struct zone *lower_zone;
3791
2f6726e5
CL
3792 idx--;
3793
1da177e4
LT
3794 if (sysctl_lowmem_reserve_ratio[idx] < 1)
3795 sysctl_lowmem_reserve_ratio[idx] = 1;
3796
3797 lower_zone = pgdat->node_zones + idx;
3798 lower_zone->lowmem_reserve[j] = present_pages /
3799 sysctl_lowmem_reserve_ratio[idx];
3800 present_pages += lower_zone->present_pages;
3801 }
3802 }
3803 }
cb45b0e9
HA
3804
3805 /* update totalreserve_pages */
3806 calculate_totalreserve_pages();
1da177e4
LT
3807}
3808
88ca3b94
RD
3809/**
3810 * setup_per_zone_pages_min - called when min_free_kbytes changes.
3811 *
3812 * Ensures that the pages_{min,low,high} values for each zone are set correctly
3813 * with respect to min_free_kbytes.
1da177e4 3814 */
3947be19 3815void setup_per_zone_pages_min(void)
1da177e4
LT
3816{
3817 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
3818 unsigned long lowmem_pages = 0;
3819 struct zone *zone;
3820 unsigned long flags;
3821
3822 /* Calculate total number of !ZONE_HIGHMEM pages */
3823 for_each_zone(zone) {
3824 if (!is_highmem(zone))
3825 lowmem_pages += zone->present_pages;
3826 }
3827
3828 for_each_zone(zone) {
ac924c60
AM
3829 u64 tmp;
3830
1da177e4 3831 spin_lock_irqsave(&zone->lru_lock, flags);
ac924c60
AM
3832 tmp = (u64)pages_min * zone->present_pages;
3833 do_div(tmp, lowmem_pages);
1da177e4
LT
3834 if (is_highmem(zone)) {
3835 /*
669ed175
NP
3836 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
3837 * need highmem pages, so cap pages_min to a small
3838 * value here.
3839 *
3840 * The (pages_high-pages_low) and (pages_low-pages_min)
3841 * deltas controls asynch page reclaim, and so should
3842 * not be capped for highmem.
1da177e4
LT
3843 */
3844 int min_pages;
3845
3846 min_pages = zone->present_pages / 1024;
3847 if (min_pages < SWAP_CLUSTER_MAX)
3848 min_pages = SWAP_CLUSTER_MAX;
3849 if (min_pages > 128)
3850 min_pages = 128;
3851 zone->pages_min = min_pages;
3852 } else {
669ed175
NP
3853 /*
3854 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
3855 * proportionate to the zone's size.
3856 */
669ed175 3857 zone->pages_min = tmp;
1da177e4
LT
3858 }
3859
ac924c60
AM
3860 zone->pages_low = zone->pages_min + (tmp >> 2);
3861 zone->pages_high = zone->pages_min + (tmp >> 1);
1da177e4
LT
3862 spin_unlock_irqrestore(&zone->lru_lock, flags);
3863 }
cb45b0e9
HA
3864
3865 /* update totalreserve_pages */
3866 calculate_totalreserve_pages();
1da177e4
LT
3867}
3868
3869/*
3870 * Initialise min_free_kbytes.
3871 *
3872 * For small machines we want it small (128k min). For large machines
3873 * we want it large (64MB max). But it is not linear, because network
3874 * bandwidth does not increase linearly with machine size. We use
3875 *
3876 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
3877 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
3878 *
3879 * which yields
3880 *
3881 * 16MB: 512k
3882 * 32MB: 724k
3883 * 64MB: 1024k
3884 * 128MB: 1448k
3885 * 256MB: 2048k
3886 * 512MB: 2896k
3887 * 1024MB: 4096k
3888 * 2048MB: 5792k
3889 * 4096MB: 8192k
3890 * 8192MB: 11584k
3891 * 16384MB: 16384k
3892 */
3893static int __init init_per_zone_pages_min(void)
3894{
3895 unsigned long lowmem_kbytes;
3896
3897 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
3898
3899 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
3900 if (min_free_kbytes < 128)
3901 min_free_kbytes = 128;
3902 if (min_free_kbytes > 65536)
3903 min_free_kbytes = 65536;
3904 setup_per_zone_pages_min();
3905 setup_per_zone_lowmem_reserve();
3906 return 0;
3907}
3908module_init(init_per_zone_pages_min)
3909
3910/*
3911 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
3912 * that we can call two helper functions whenever min_free_kbytes
3913 * changes.
3914 */
3915int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
3916 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3917{
3918 proc_dointvec(table, write, file, buffer, length, ppos);
3b1d92c5
MG
3919 if (write)
3920 setup_per_zone_pages_min();
1da177e4
LT
3921 return 0;
3922}
3923
9614634f
CL
3924#ifdef CONFIG_NUMA
3925int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
3926 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3927{
3928 struct zone *zone;
3929 int rc;
3930
3931 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
3932 if (rc)
3933 return rc;
3934
3935 for_each_zone(zone)
8417bba4 3936 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
3937 sysctl_min_unmapped_ratio) / 100;
3938 return 0;
3939}
0ff38490
CL
3940
3941int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
3942 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3943{
3944 struct zone *zone;
3945 int rc;
3946
3947 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
3948 if (rc)
3949 return rc;
3950
3951 for_each_zone(zone)
3952 zone->min_slab_pages = (zone->present_pages *
3953 sysctl_min_slab_ratio) / 100;
3954 return 0;
3955}
9614634f
CL
3956#endif
3957
1da177e4
LT
3958/*
3959 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
3960 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
3961 * whenever sysctl_lowmem_reserve_ratio changes.
3962 *
3963 * The reserve ratio obviously has absolutely no relation with the
3964 * pages_min watermarks. The lowmem reserve ratio can only make sense
3965 * if in function of the boot time zone sizes.
3966 */
3967int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
3968 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3969{
3970 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
3971 setup_per_zone_lowmem_reserve();
3972 return 0;
3973}
3974
8ad4b1fb
RS
3975/*
3976 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
3977 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
3978 * can have before it gets flushed back to buddy allocator.
3979 */
3980
3981int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
3982 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3983{
3984 struct zone *zone;
3985 unsigned int cpu;
3986 int ret;
3987
3988 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
3989 if (!write || (ret == -EINVAL))
3990 return ret;
3991 for_each_zone(zone) {
3992 for_each_online_cpu(cpu) {
3993 unsigned long high;
3994 high = zone->present_pages / percpu_pagelist_fraction;
3995 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
3996 }
3997 }
3998 return 0;
3999}
4000
f034b5d4 4001int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
4002
4003#ifdef CONFIG_NUMA
4004static int __init set_hashdist(char *str)
4005{
4006 if (!str)
4007 return 0;
4008 hashdist = simple_strtoul(str, &str, 0);
4009 return 1;
4010}
4011__setup("hashdist=", set_hashdist);
4012#endif
4013
4014/*
4015 * allocate a large system hash table from bootmem
4016 * - it is assumed that the hash table must contain an exact power-of-2
4017 * quantity of entries
4018 * - limit is the number of hash buckets, not the total allocation size
4019 */
4020void *__init alloc_large_system_hash(const char *tablename,
4021 unsigned long bucketsize,
4022 unsigned long numentries,
4023 int scale,
4024 int flags,
4025 unsigned int *_hash_shift,
4026 unsigned int *_hash_mask,
4027 unsigned long limit)
4028{
4029 unsigned long long max = limit;
4030 unsigned long log2qty, size;
4031 void *table = NULL;
4032
4033 /* allow the kernel cmdline to have a say */
4034 if (!numentries) {
4035 /* round applicable memory size up to nearest megabyte */
04903664 4036 numentries = nr_kernel_pages;
1da177e4
LT
4037 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4038 numentries >>= 20 - PAGE_SHIFT;
4039 numentries <<= 20 - PAGE_SHIFT;
4040
4041 /* limit to 1 bucket per 2^scale bytes of low memory */
4042 if (scale > PAGE_SHIFT)
4043 numentries >>= (scale - PAGE_SHIFT);
4044 else
4045 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
4046
4047 /* Make sure we've got at least a 0-order allocation.. */
4048 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4049 numentries = PAGE_SIZE / bucketsize;
1da177e4 4050 }
6e692ed3 4051 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
4052
4053 /* limit allocation size to 1/16 total memory by default */
4054 if (max == 0) {
4055 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4056 do_div(max, bucketsize);
4057 }
4058
4059 if (numentries > max)
4060 numentries = max;
4061
f0d1b0b3 4062 log2qty = ilog2(numentries);
1da177e4
LT
4063
4064 do {
4065 size = bucketsize << log2qty;
4066 if (flags & HASH_EARLY)
4067 table = alloc_bootmem(size);
4068 else if (hashdist)
4069 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4070 else {
4071 unsigned long order;
4072 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
4073 ;
4074 table = (void*) __get_free_pages(GFP_ATOMIC, order);
1037b83b
ED
4075 /*
4076 * If bucketsize is not a power-of-two, we may free
4077 * some pages at the end of hash table.
4078 */
4079 if (table) {
4080 unsigned long alloc_end = (unsigned long)table +
4081 (PAGE_SIZE << order);
4082 unsigned long used = (unsigned long)table +
4083 PAGE_ALIGN(size);
4084 split_page(virt_to_page(table), order);
4085 while (used < alloc_end) {
4086 free_page(used);
4087 used += PAGE_SIZE;
4088 }
4089 }
1da177e4
LT
4090 }
4091 } while (!table && size > PAGE_SIZE && --log2qty);
4092
4093 if (!table)
4094 panic("Failed to allocate %s hash table\n", tablename);
4095
b49ad484 4096 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
1da177e4
LT
4097 tablename,
4098 (1U << log2qty),
f0d1b0b3 4099 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
4100 size);
4101
4102 if (_hash_shift)
4103 *_hash_shift = log2qty;
4104 if (_hash_mask)
4105 *_hash_mask = (1 << log2qty) - 1;
4106
4107 return table;
4108}
a117e66e
KH
4109
4110#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
a117e66e
KH
4111struct page *pfn_to_page(unsigned long pfn)
4112{
67de6482 4113 return __pfn_to_page(pfn);
a117e66e
KH
4114}
4115unsigned long page_to_pfn(struct page *page)
4116{
67de6482 4117 return __page_to_pfn(page);
a117e66e 4118}
a117e66e
KH
4119EXPORT_SYMBOL(pfn_to_page);
4120EXPORT_SYMBOL(page_to_pfn);
4121#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
6220ec78 4122
835c134e
MG
4123/* Return a pointer to the bitmap storing bits affecting a block of pages */
4124static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4125 unsigned long pfn)
4126{
4127#ifdef CONFIG_SPARSEMEM
4128 return __pfn_to_section(pfn)->pageblock_flags;
4129#else
4130 return zone->pageblock_flags;
4131#endif /* CONFIG_SPARSEMEM */
4132}
4133
4134static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4135{
4136#ifdef CONFIG_SPARSEMEM
4137 pfn &= (PAGES_PER_SECTION-1);
4138 return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS;
4139#else
4140 pfn = pfn - zone->zone_start_pfn;
4141 return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS;
4142#endif /* CONFIG_SPARSEMEM */
4143}
4144
4145/**
4146 * get_pageblock_flags_group - Return the requested group of flags for the MAX_ORDER_NR_PAGES block of pages
4147 * @page: The page within the block of interest
4148 * @start_bitidx: The first bit of interest to retrieve
4149 * @end_bitidx: The last bit of interest
4150 * returns pageblock_bits flags
4151 */
4152unsigned long get_pageblock_flags_group(struct page *page,
4153 int start_bitidx, int end_bitidx)
4154{
4155 struct zone *zone;
4156 unsigned long *bitmap;
4157 unsigned long pfn, bitidx;
4158 unsigned long flags = 0;
4159 unsigned long value = 1;
4160
4161 zone = page_zone(page);
4162 pfn = page_to_pfn(page);
4163 bitmap = get_pageblock_bitmap(zone, pfn);
4164 bitidx = pfn_to_bitidx(zone, pfn);
4165
4166 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4167 if (test_bit(bitidx + start_bitidx, bitmap))
4168 flags |= value;
6220ec78 4169
835c134e
MG
4170 return flags;
4171}
4172
4173/**
4174 * set_pageblock_flags_group - Set the requested group of flags for a MAX_ORDER_NR_PAGES block of pages
4175 * @page: The page within the block of interest
4176 * @start_bitidx: The first bit of interest
4177 * @end_bitidx: The last bit of interest
4178 * @flags: The flags to set
4179 */
4180void set_pageblock_flags_group(struct page *page, unsigned long flags,
4181 int start_bitidx, int end_bitidx)
4182{
4183 struct zone *zone;
4184 unsigned long *bitmap;
4185 unsigned long pfn, bitidx;
4186 unsigned long value = 1;
4187
4188 zone = page_zone(page);
4189 pfn = page_to_pfn(page);
4190 bitmap = get_pageblock_bitmap(zone, pfn);
4191 bitidx = pfn_to_bitidx(zone, pfn);
4192
4193 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4194 if (flags & value)
4195 __set_bit(bitidx + start_bitidx, bitmap);
4196 else
4197 __clear_bit(bitidx + start_bitidx, bitmap);
4198}
This page took 0.554461 seconds and 5 git commands to generate.