Choose pages from the per-cpu list based on migration type
[deliverable/linux.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/compiler.h>
9f158333 24#include <linux/kernel.h>
1da177e4
LT
25#include <linux/module.h>
26#include <linux/suspend.h>
27#include <linux/pagevec.h>
28#include <linux/blkdev.h>
29#include <linux/slab.h>
30#include <linux/notifier.h>
31#include <linux/topology.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
bdc8cb98 35#include <linux/memory_hotplug.h>
1da177e4
LT
36#include <linux/nodemask.h>
37#include <linux/vmalloc.h>
4be38e35 38#include <linux/mempolicy.h>
6811378e 39#include <linux/stop_machine.h>
c713216d
MG
40#include <linux/sort.h>
41#include <linux/pfn.h>
3fcfab16 42#include <linux/backing-dev.h>
933e312e 43#include <linux/fault-inject.h>
1da177e4
LT
44
45#include <asm/tlbflush.h>
ac924c60 46#include <asm/div64.h>
1da177e4
LT
47#include "internal.h"
48
49/*
13808910 50 * Array of node states.
1da177e4 51 */
13808910
CL
52nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
53 [N_POSSIBLE] = NODE_MASK_ALL,
54 [N_ONLINE] = { { [0] = 1UL } },
55#ifndef CONFIG_NUMA
56 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
57#ifdef CONFIG_HIGHMEM
58 [N_HIGH_MEMORY] = { { [0] = 1UL } },
59#endif
60 [N_CPU] = { { [0] = 1UL } },
61#endif /* NUMA */
62};
63EXPORT_SYMBOL(node_states);
64
6c231b7b 65unsigned long totalram_pages __read_mostly;
cb45b0e9 66unsigned long totalreserve_pages __read_mostly;
1da177e4 67long nr_swap_pages;
8ad4b1fb 68int percpu_pagelist_fraction;
1da177e4 69
d98c7a09 70static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 71
1da177e4
LT
72/*
73 * results with 256, 32 in the lowmem_reserve sysctl:
74 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
75 * 1G machine -> (16M dma, 784M normal, 224M high)
76 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
77 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
78 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
79 *
80 * TBD: should special case ZONE_DMA32 machines here - in those we normally
81 * don't need any ZONE_NORMAL reservation
1da177e4 82 */
2f1b6248 83int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 84#ifdef CONFIG_ZONE_DMA
2f1b6248 85 256,
4b51d669 86#endif
fb0e7942 87#ifdef CONFIG_ZONE_DMA32
2f1b6248 88 256,
fb0e7942 89#endif
e53ef38d 90#ifdef CONFIG_HIGHMEM
2a1e274a 91 32,
e53ef38d 92#endif
2a1e274a 93 32,
2f1b6248 94};
1da177e4
LT
95
96EXPORT_SYMBOL(totalram_pages);
1da177e4 97
15ad7cdc 98static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 99#ifdef CONFIG_ZONE_DMA
2f1b6248 100 "DMA",
4b51d669 101#endif
fb0e7942 102#ifdef CONFIG_ZONE_DMA32
2f1b6248 103 "DMA32",
fb0e7942 104#endif
2f1b6248 105 "Normal",
e53ef38d 106#ifdef CONFIG_HIGHMEM
2a1e274a 107 "HighMem",
e53ef38d 108#endif
2a1e274a 109 "Movable",
2f1b6248
CL
110};
111
1da177e4
LT
112int min_free_kbytes = 1024;
113
86356ab1
YG
114unsigned long __meminitdata nr_kernel_pages;
115unsigned long __meminitdata nr_all_pages;
a3142c8e 116static unsigned long __meminitdata dma_reserve;
1da177e4 117
c713216d
MG
118#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
119 /*
120 * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
121 * ranges of memory (RAM) that may be registered with add_active_range().
122 * Ranges passed to add_active_range() will be merged if possible
123 * so the number of times add_active_range() can be called is
124 * related to the number of nodes and the number of holes
125 */
126 #ifdef CONFIG_MAX_ACTIVE_REGIONS
127 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
128 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
129 #else
130 #if MAX_NUMNODES >= 32
131 /* If there can be many nodes, allow up to 50 holes per node */
132 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
133 #else
134 /* By default, allow up to 256 distinct regions */
135 #define MAX_ACTIVE_REGIONS 256
136 #endif
137 #endif
138
98011f56
JB
139 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
140 static int __meminitdata nr_nodemap_entries;
141 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
142 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
fb01439c 143#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
98011f56
JB
144 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
145 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
fb01439c 146#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
2a1e274a 147 unsigned long __initdata required_kernelcore;
7e63efef 148 unsigned long __initdata required_movablecore;
e228929b 149 unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
2a1e274a
MG
150
151 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
152 int movable_zone;
153 EXPORT_SYMBOL(movable_zone);
c713216d
MG
154#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
155
418508c1
MS
156#if MAX_NUMNODES > 1
157int nr_node_ids __read_mostly = MAX_NUMNODES;
158EXPORT_SYMBOL(nr_node_ids);
159#endif
160
b2a0ac88
MG
161static inline int get_pageblock_migratetype(struct page *page)
162{
163 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
164}
165
166static void set_pageblock_migratetype(struct page *page, int migratetype)
167{
168 set_pageblock_flags_group(page, (unsigned long)migratetype,
169 PB_migrate, PB_migrate_end);
170}
171
172static inline int gfpflags_to_migratetype(gfp_t gfp_flags)
173{
174 return ((gfp_flags & __GFP_MOVABLE) != 0);
175}
176
13e7444b 177#ifdef CONFIG_DEBUG_VM
c6a57e19 178static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 179{
bdc8cb98
DH
180 int ret = 0;
181 unsigned seq;
182 unsigned long pfn = page_to_pfn(page);
c6a57e19 183
bdc8cb98
DH
184 do {
185 seq = zone_span_seqbegin(zone);
186 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
187 ret = 1;
188 else if (pfn < zone->zone_start_pfn)
189 ret = 1;
190 } while (zone_span_seqretry(zone, seq));
191
192 return ret;
c6a57e19
DH
193}
194
195static int page_is_consistent(struct zone *zone, struct page *page)
196{
14e07298 197 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 198 return 0;
1da177e4 199 if (zone != page_zone(page))
c6a57e19
DH
200 return 0;
201
202 return 1;
203}
204/*
205 * Temporary debugging check for pages not lying within a given zone.
206 */
207static int bad_range(struct zone *zone, struct page *page)
208{
209 if (page_outside_zone_boundaries(zone, page))
1da177e4 210 return 1;
c6a57e19
DH
211 if (!page_is_consistent(zone, page))
212 return 1;
213
1da177e4
LT
214 return 0;
215}
13e7444b
NP
216#else
217static inline int bad_range(struct zone *zone, struct page *page)
218{
219 return 0;
220}
221#endif
222
224abf92 223static void bad_page(struct page *page)
1da177e4 224{
224abf92 225 printk(KERN_EMERG "Bad page state in process '%s'\n"
7365f3d1
HD
226 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
227 KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
228 KERN_EMERG "Backtrace:\n",
224abf92
NP
229 current->comm, page, (int)(2*sizeof(unsigned long)),
230 (unsigned long)page->flags, page->mapping,
231 page_mapcount(page), page_count(page));
1da177e4 232 dump_stack();
334795ec
HD
233 page->flags &= ~(1 << PG_lru |
234 1 << PG_private |
1da177e4 235 1 << PG_locked |
1da177e4
LT
236 1 << PG_active |
237 1 << PG_dirty |
334795ec
HD
238 1 << PG_reclaim |
239 1 << PG_slab |
1da177e4 240 1 << PG_swapcache |
676165a8
NP
241 1 << PG_writeback |
242 1 << PG_buddy );
1da177e4
LT
243 set_page_count(page, 0);
244 reset_page_mapcount(page);
245 page->mapping = NULL;
9f158333 246 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
247}
248
1da177e4
LT
249/*
250 * Higher-order pages are called "compound pages". They are structured thusly:
251 *
252 * The first PAGE_SIZE page is called the "head page".
253 *
254 * The remaining PAGE_SIZE pages are called "tail pages".
255 *
256 * All pages have PG_compound set. All pages have their ->private pointing at
257 * the head page (even the head page has this).
258 *
41d78ba5
HD
259 * The first tail page's ->lru.next holds the address of the compound page's
260 * put_page() function. Its ->lru.prev holds the order of allocation.
261 * This usage means that zero-order pages may not be compound.
1da177e4 262 */
d98c7a09
HD
263
264static void free_compound_page(struct page *page)
265{
d85f3385 266 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
267}
268
1da177e4
LT
269static void prep_compound_page(struct page *page, unsigned long order)
270{
271 int i;
272 int nr_pages = 1 << order;
273
33f2ef89 274 set_compound_page_dtor(page, free_compound_page);
d85f3385 275 set_compound_order(page, order);
6d777953 276 __SetPageHead(page);
d85f3385 277 for (i = 1; i < nr_pages; i++) {
1da177e4
LT
278 struct page *p = page + i;
279
d85f3385 280 __SetPageTail(p);
d85f3385 281 p->first_page = page;
1da177e4
LT
282 }
283}
284
285static void destroy_compound_page(struct page *page, unsigned long order)
286{
287 int i;
288 int nr_pages = 1 << order;
289
d85f3385 290 if (unlikely(compound_order(page) != order))
224abf92 291 bad_page(page);
1da177e4 292
6d777953 293 if (unlikely(!PageHead(page)))
d85f3385 294 bad_page(page);
6d777953 295 __ClearPageHead(page);
d85f3385 296 for (i = 1; i < nr_pages; i++) {
1da177e4
LT
297 struct page *p = page + i;
298
6d777953 299 if (unlikely(!PageTail(p) |
d85f3385 300 (p->first_page != page)))
224abf92 301 bad_page(page);
d85f3385 302 __ClearPageTail(p);
1da177e4
LT
303 }
304}
1da177e4 305
17cf4406
NP
306static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
307{
308 int i;
309
725d704e 310 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
6626c5d5
AM
311 /*
312 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
313 * and __GFP_HIGHMEM from hard or soft interrupt context.
314 */
725d704e 315 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
316 for (i = 0; i < (1 << order); i++)
317 clear_highpage(page + i);
318}
319
1da177e4
LT
320/*
321 * function for dealing with page's order in buddy system.
322 * zone->lock is already acquired when we use these.
323 * So, we don't need atomic page->flags operations here.
324 */
6aa3001b
AM
325static inline unsigned long page_order(struct page *page)
326{
4c21e2f2 327 return page_private(page);
1da177e4
LT
328}
329
6aa3001b
AM
330static inline void set_page_order(struct page *page, int order)
331{
4c21e2f2 332 set_page_private(page, order);
676165a8 333 __SetPageBuddy(page);
1da177e4
LT
334}
335
336static inline void rmv_page_order(struct page *page)
337{
676165a8 338 __ClearPageBuddy(page);
4c21e2f2 339 set_page_private(page, 0);
1da177e4
LT
340}
341
342/*
343 * Locate the struct page for both the matching buddy in our
344 * pair (buddy1) and the combined O(n+1) page they form (page).
345 *
346 * 1) Any buddy B1 will have an order O twin B2 which satisfies
347 * the following equation:
348 * B2 = B1 ^ (1 << O)
349 * For example, if the starting buddy (buddy2) is #8 its order
350 * 1 buddy is #10:
351 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
352 *
353 * 2) Any buddy B will have an order O+1 parent P which
354 * satisfies the following equation:
355 * P = B & ~(1 << O)
356 *
d6e05edc 357 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
358 */
359static inline struct page *
360__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
361{
362 unsigned long buddy_idx = page_idx ^ (1 << order);
363
364 return page + (buddy_idx - page_idx);
365}
366
367static inline unsigned long
368__find_combined_index(unsigned long page_idx, unsigned int order)
369{
370 return (page_idx & ~(1 << order));
371}
372
373/*
374 * This function checks whether a page is free && is the buddy
375 * we can do coalesce a page and its buddy if
13e7444b 376 * (a) the buddy is not in a hole &&
676165a8 377 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
378 * (c) a page and its buddy have the same order &&
379 * (d) a page and its buddy are in the same zone.
676165a8
NP
380 *
381 * For recording whether a page is in the buddy system, we use PG_buddy.
382 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 383 *
676165a8 384 * For recording page's order, we use page_private(page).
1da177e4 385 */
cb2b95e1
AW
386static inline int page_is_buddy(struct page *page, struct page *buddy,
387 int order)
1da177e4 388{
14e07298 389 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 390 return 0;
13e7444b 391
cb2b95e1
AW
392 if (page_zone_id(page) != page_zone_id(buddy))
393 return 0;
394
395 if (PageBuddy(buddy) && page_order(buddy) == order) {
396 BUG_ON(page_count(buddy) != 0);
6aa3001b 397 return 1;
676165a8 398 }
6aa3001b 399 return 0;
1da177e4
LT
400}
401
402/*
403 * Freeing function for a buddy system allocator.
404 *
405 * The concept of a buddy system is to maintain direct-mapped table
406 * (containing bit values) for memory blocks of various "orders".
407 * The bottom level table contains the map for the smallest allocatable
408 * units of memory (here, pages), and each level above it describes
409 * pairs of units from the levels below, hence, "buddies".
410 * At a high level, all that happens here is marking the table entry
411 * at the bottom level available, and propagating the changes upward
412 * as necessary, plus some accounting needed to play nicely with other
413 * parts of the VM system.
414 * At each level, we keep a list of pages, which are heads of continuous
676165a8 415 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 416 * order is recorded in page_private(page) field.
1da177e4
LT
417 * So when we are allocating or freeing one, we can derive the state of the
418 * other. That is, if we allocate a small block, and both were
419 * free, the remainder of the region must be split into blocks.
420 * If a block is freed, and its buddy is also free, then this
421 * triggers coalescing into a block of larger size.
422 *
423 * -- wli
424 */
425
48db57f8 426static inline void __free_one_page(struct page *page,
1da177e4
LT
427 struct zone *zone, unsigned int order)
428{
429 unsigned long page_idx;
430 int order_size = 1 << order;
b2a0ac88 431 int migratetype = get_pageblock_migratetype(page);
1da177e4 432
224abf92 433 if (unlikely(PageCompound(page)))
1da177e4
LT
434 destroy_compound_page(page, order);
435
436 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
437
725d704e
NP
438 VM_BUG_ON(page_idx & (order_size - 1));
439 VM_BUG_ON(bad_range(zone, page));
1da177e4 440
d23ad423 441 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
1da177e4
LT
442 while (order < MAX_ORDER-1) {
443 unsigned long combined_idx;
1da177e4
LT
444 struct page *buddy;
445
1da177e4 446 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 447 if (!page_is_buddy(page, buddy, order))
1da177e4 448 break; /* Move the buddy up one level. */
13e7444b 449
1da177e4 450 list_del(&buddy->lru);
b2a0ac88 451 zone->free_area[order].nr_free--;
1da177e4 452 rmv_page_order(buddy);
13e7444b 453 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
454 page = page + (combined_idx - page_idx);
455 page_idx = combined_idx;
456 order++;
457 }
458 set_page_order(page, order);
b2a0ac88
MG
459 list_add(&page->lru,
460 &zone->free_area[order].free_list[migratetype]);
1da177e4
LT
461 zone->free_area[order].nr_free++;
462}
463
224abf92 464static inline int free_pages_check(struct page *page)
1da177e4 465{
92be2e33
NP
466 if (unlikely(page_mapcount(page) |
467 (page->mapping != NULL) |
468 (page_count(page) != 0) |
1da177e4
LT
469 (page->flags & (
470 1 << PG_lru |
471 1 << PG_private |
472 1 << PG_locked |
473 1 << PG_active |
1da177e4
LT
474 1 << PG_slab |
475 1 << PG_swapcache |
b5810039 476 1 << PG_writeback |
676165a8
NP
477 1 << PG_reserved |
478 1 << PG_buddy ))))
224abf92 479 bad_page(page);
1da177e4 480 if (PageDirty(page))
242e5468 481 __ClearPageDirty(page);
689bcebf
HD
482 /*
483 * For now, we report if PG_reserved was found set, but do not
484 * clear it, and do not free the page. But we shall soon need
485 * to do more, for when the ZERO_PAGE count wraps negative.
486 */
487 return PageReserved(page);
1da177e4
LT
488}
489
490/*
491 * Frees a list of pages.
492 * Assumes all pages on list are in same zone, and of same order.
207f36ee 493 * count is the number of pages to free.
1da177e4
LT
494 *
495 * If the zone was previously in an "all pages pinned" state then look to
496 * see if this freeing clears that state.
497 *
498 * And clear the zone's pages_scanned counter, to hold off the "all pages are
499 * pinned" detection logic.
500 */
48db57f8
NP
501static void free_pages_bulk(struct zone *zone, int count,
502 struct list_head *list, int order)
1da177e4 503{
c54ad30c 504 spin_lock(&zone->lock);
1da177e4
LT
505 zone->all_unreclaimable = 0;
506 zone->pages_scanned = 0;
48db57f8
NP
507 while (count--) {
508 struct page *page;
509
725d704e 510 VM_BUG_ON(list_empty(list));
1da177e4 511 page = list_entry(list->prev, struct page, lru);
48db57f8 512 /* have to delete it as __free_one_page list manipulates */
1da177e4 513 list_del(&page->lru);
48db57f8 514 __free_one_page(page, zone, order);
1da177e4 515 }
c54ad30c 516 spin_unlock(&zone->lock);
1da177e4
LT
517}
518
48db57f8 519static void free_one_page(struct zone *zone, struct page *page, int order)
1da177e4 520{
006d22d9
CL
521 spin_lock(&zone->lock);
522 zone->all_unreclaimable = 0;
523 zone->pages_scanned = 0;
0798e519 524 __free_one_page(page, zone, order);
006d22d9 525 spin_unlock(&zone->lock);
48db57f8
NP
526}
527
528static void __free_pages_ok(struct page *page, unsigned int order)
529{
530 unsigned long flags;
1da177e4 531 int i;
689bcebf 532 int reserved = 0;
1da177e4 533
1da177e4 534 for (i = 0 ; i < (1 << order) ; ++i)
224abf92 535 reserved += free_pages_check(page + i);
689bcebf
HD
536 if (reserved)
537 return;
538
9858db50
NP
539 if (!PageHighMem(page))
540 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
dafb1367 541 arch_free_page(page, order);
48db57f8 542 kernel_map_pages(page, 1 << order, 0);
dafb1367 543
c54ad30c 544 local_irq_save(flags);
f8891e5e 545 __count_vm_events(PGFREE, 1 << order);
48db57f8 546 free_one_page(page_zone(page), page, order);
c54ad30c 547 local_irq_restore(flags);
1da177e4
LT
548}
549
a226f6c8
DH
550/*
551 * permit the bootmem allocator to evade page validation on high-order frees
552 */
553void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
554{
555 if (order == 0) {
556 __ClearPageReserved(page);
557 set_page_count(page, 0);
7835e98b 558 set_page_refcounted(page);
545b1ea9 559 __free_page(page);
a226f6c8 560 } else {
a226f6c8
DH
561 int loop;
562
545b1ea9 563 prefetchw(page);
a226f6c8
DH
564 for (loop = 0; loop < BITS_PER_LONG; loop++) {
565 struct page *p = &page[loop];
566
545b1ea9
NP
567 if (loop + 1 < BITS_PER_LONG)
568 prefetchw(p + 1);
a226f6c8
DH
569 __ClearPageReserved(p);
570 set_page_count(p, 0);
571 }
572
7835e98b 573 set_page_refcounted(page);
545b1ea9 574 __free_pages(page, order);
a226f6c8
DH
575 }
576}
577
1da177e4
LT
578
579/*
580 * The order of subdivision here is critical for the IO subsystem.
581 * Please do not alter this order without good reasons and regression
582 * testing. Specifically, as large blocks of memory are subdivided,
583 * the order in which smaller blocks are delivered depends on the order
584 * they're subdivided in this function. This is the primary factor
585 * influencing the order in which pages are delivered to the IO
586 * subsystem according to empirical testing, and this is also justified
587 * by considering the behavior of a buddy system containing a single
588 * large block of memory acted on by a series of small allocations.
589 * This behavior is a critical factor in sglist merging's success.
590 *
591 * -- wli
592 */
085cc7d5 593static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
594 int low, int high, struct free_area *area,
595 int migratetype)
1da177e4
LT
596{
597 unsigned long size = 1 << high;
598
599 while (high > low) {
600 area--;
601 high--;
602 size >>= 1;
725d704e 603 VM_BUG_ON(bad_range(zone, &page[size]));
b2a0ac88 604 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
605 area->nr_free++;
606 set_page_order(&page[size], high);
607 }
1da177e4
LT
608}
609
1da177e4
LT
610/*
611 * This page is about to be returned from the page allocator
612 */
17cf4406 613static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
1da177e4 614{
92be2e33
NP
615 if (unlikely(page_mapcount(page) |
616 (page->mapping != NULL) |
617 (page_count(page) != 0) |
334795ec
HD
618 (page->flags & (
619 1 << PG_lru |
1da177e4
LT
620 1 << PG_private |
621 1 << PG_locked |
1da177e4
LT
622 1 << PG_active |
623 1 << PG_dirty |
334795ec 624 1 << PG_slab |
1da177e4 625 1 << PG_swapcache |
b5810039 626 1 << PG_writeback |
676165a8
NP
627 1 << PG_reserved |
628 1 << PG_buddy ))))
224abf92 629 bad_page(page);
1da177e4 630
689bcebf
HD
631 /*
632 * For now, we report if PG_reserved was found set, but do not
633 * clear it, and do not allocate the page: as a safety net.
634 */
635 if (PageReserved(page))
636 return 1;
637
d77c2d7c 638 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
1da177e4 639 1 << PG_referenced | 1 << PG_arch_1 |
5409bae0 640 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
4c21e2f2 641 set_page_private(page, 0);
7835e98b 642 set_page_refcounted(page);
cc102509
NP
643
644 arch_alloc_page(page, order);
1da177e4 645 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
646
647 if (gfp_flags & __GFP_ZERO)
648 prep_zero_page(page, order, gfp_flags);
649
650 if (order && (gfp_flags & __GFP_COMP))
651 prep_compound_page(page, order);
652
689bcebf 653 return 0;
1da177e4
LT
654}
655
b2a0ac88
MG
656/*
657 * This array describes the order lists are fallen back to when
658 * the free lists for the desirable migrate type are depleted
659 */
660static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
661 [MIGRATE_UNMOVABLE] = { MIGRATE_MOVABLE },
662 [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE },
663};
664
665/* Remove an element from the buddy allocator from the fallback list */
666static struct page *__rmqueue_fallback(struct zone *zone, int order,
667 int start_migratetype)
668{
669 struct free_area * area;
670 int current_order;
671 struct page *page;
672 int migratetype, i;
673
674 /* Find the largest possible block of pages in the other list */
675 for (current_order = MAX_ORDER-1; current_order >= order;
676 --current_order) {
677 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
678 migratetype = fallbacks[start_migratetype][i];
679
680 area = &(zone->free_area[current_order]);
681 if (list_empty(&area->free_list[migratetype]))
682 continue;
683
684 page = list_entry(area->free_list[migratetype].next,
685 struct page, lru);
686 area->nr_free--;
687
688 /*
689 * If breaking a large block of pages, place the buddies
690 * on the preferred allocation list
691 */
692 if (unlikely(current_order >= MAX_ORDER / 2))
693 migratetype = start_migratetype;
694
695 /* Remove the page from the freelists */
696 list_del(&page->lru);
697 rmv_page_order(page);
698 __mod_zone_page_state(zone, NR_FREE_PAGES,
699 -(1UL << order));
700
701 if (current_order == MAX_ORDER - 1)
702 set_pageblock_migratetype(page,
703 start_migratetype);
704
705 expand(zone, page, order, current_order, area, migratetype);
706 return page;
707 }
708 }
709
710 return NULL;
711}
712
1da177e4
LT
713/*
714 * Do the hard work of removing an element from the buddy allocator.
715 * Call me with the zone->lock already held.
716 */
b2a0ac88
MG
717static struct page *__rmqueue(struct zone *zone, unsigned int order,
718 int migratetype)
1da177e4
LT
719{
720 struct free_area * area;
721 unsigned int current_order;
722 struct page *page;
723
b2a0ac88 724 /* Find a page of the appropriate size in the preferred list */
1da177e4 725 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
b2a0ac88
MG
726 area = &(zone->free_area[current_order]);
727 if (list_empty(&area->free_list[migratetype]))
1da177e4
LT
728 continue;
729
b2a0ac88
MG
730 page = list_entry(area->free_list[migratetype].next,
731 struct page, lru);
1da177e4
LT
732 list_del(&page->lru);
733 rmv_page_order(page);
734 area->nr_free--;
d23ad423 735 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
b2a0ac88
MG
736 expand(zone, page, order, current_order, area, migratetype);
737 goto got_page;
1da177e4
LT
738 }
739
b2a0ac88
MG
740 page = __rmqueue_fallback(zone, order, migratetype);
741
742got_page:
743
744 return page;
1da177e4
LT
745}
746
747/*
748 * Obtain a specified number of elements from the buddy allocator, all under
749 * a single hold of the lock, for efficiency. Add them to the supplied list.
750 * Returns the number of new pages which were placed at *list.
751 */
752static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88
MG
753 unsigned long count, struct list_head *list,
754 int migratetype)
1da177e4 755{
1da177e4 756 int i;
1da177e4 757
c54ad30c 758 spin_lock(&zone->lock);
1da177e4 759 for (i = 0; i < count; ++i) {
b2a0ac88 760 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 761 if (unlikely(page == NULL))
1da177e4 762 break;
535131e6
MG
763 list_add(&page->lru, list);
764 set_page_private(page, migratetype);
1da177e4 765 }
c54ad30c 766 spin_unlock(&zone->lock);
085cc7d5 767 return i;
1da177e4
LT
768}
769
4ae7c039 770#ifdef CONFIG_NUMA
8fce4d8e 771/*
4037d452
CL
772 * Called from the vmstat counter updater to drain pagesets of this
773 * currently executing processor on remote nodes after they have
774 * expired.
775 *
879336c3
CL
776 * Note that this function must be called with the thread pinned to
777 * a single processor.
8fce4d8e 778 */
4037d452 779void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 780{
4ae7c039 781 unsigned long flags;
4037d452 782 int to_drain;
4ae7c039 783
4037d452
CL
784 local_irq_save(flags);
785 if (pcp->count >= pcp->batch)
786 to_drain = pcp->batch;
787 else
788 to_drain = pcp->count;
789 free_pages_bulk(zone, to_drain, &pcp->list, 0);
790 pcp->count -= to_drain;
791 local_irq_restore(flags);
4ae7c039
CL
792}
793#endif
794
1da177e4
LT
795static void __drain_pages(unsigned int cpu)
796{
c54ad30c 797 unsigned long flags;
1da177e4
LT
798 struct zone *zone;
799 int i;
800
801 for_each_zone(zone) {
802 struct per_cpu_pageset *pset;
803
f2e12bb2
CL
804 if (!populated_zone(zone))
805 continue;
806
e7c8d5c9 807 pset = zone_pcp(zone, cpu);
1da177e4
LT
808 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
809 struct per_cpu_pages *pcp;
810
811 pcp = &pset->pcp[i];
c54ad30c 812 local_irq_save(flags);
48db57f8
NP
813 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
814 pcp->count = 0;
c54ad30c 815 local_irq_restore(flags);
1da177e4
LT
816 }
817 }
818}
1da177e4 819
296699de 820#ifdef CONFIG_HIBERNATION
1da177e4
LT
821
822void mark_free_pages(struct zone *zone)
823{
f623f0db
RW
824 unsigned long pfn, max_zone_pfn;
825 unsigned long flags;
b2a0ac88 826 int order, t;
1da177e4
LT
827 struct list_head *curr;
828
829 if (!zone->spanned_pages)
830 return;
831
832 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
833
834 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
835 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
836 if (pfn_valid(pfn)) {
837 struct page *page = pfn_to_page(pfn);
838
7be98234
RW
839 if (!swsusp_page_is_forbidden(page))
840 swsusp_unset_page_free(page);
f623f0db 841 }
1da177e4 842
b2a0ac88
MG
843 for_each_migratetype_order(order, t) {
844 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 845 unsigned long i;
1da177e4 846
f623f0db
RW
847 pfn = page_to_pfn(list_entry(curr, struct page, lru));
848 for (i = 0; i < (1UL << order); i++)
7be98234 849 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 850 }
b2a0ac88 851 }
1da177e4
LT
852 spin_unlock_irqrestore(&zone->lock, flags);
853}
854
855/*
856 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
857 */
858void drain_local_pages(void)
859{
860 unsigned long flags;
861
862 local_irq_save(flags);
863 __drain_pages(smp_processor_id());
864 local_irq_restore(flags);
865}
296699de 866#endif /* CONFIG_HIBERNATION */
1da177e4 867
1da177e4
LT
868/*
869 * Free a 0-order page
870 */
1da177e4
LT
871static void fastcall free_hot_cold_page(struct page *page, int cold)
872{
873 struct zone *zone = page_zone(page);
874 struct per_cpu_pages *pcp;
875 unsigned long flags;
876
1da177e4
LT
877 if (PageAnon(page))
878 page->mapping = NULL;
224abf92 879 if (free_pages_check(page))
689bcebf
HD
880 return;
881
9858db50
NP
882 if (!PageHighMem(page))
883 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
dafb1367 884 arch_free_page(page, 0);
689bcebf
HD
885 kernel_map_pages(page, 1, 0);
886
e7c8d5c9 887 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
1da177e4 888 local_irq_save(flags);
f8891e5e 889 __count_vm_event(PGFREE);
1da177e4 890 list_add(&page->lru, &pcp->list);
535131e6 891 set_page_private(page, get_pageblock_migratetype(page));
1da177e4 892 pcp->count++;
48db57f8
NP
893 if (pcp->count >= pcp->high) {
894 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
895 pcp->count -= pcp->batch;
896 }
1da177e4
LT
897 local_irq_restore(flags);
898 put_cpu();
899}
900
901void fastcall free_hot_page(struct page *page)
902{
903 free_hot_cold_page(page, 0);
904}
905
906void fastcall free_cold_page(struct page *page)
907{
908 free_hot_cold_page(page, 1);
909}
910
8dfcc9ba
NP
911/*
912 * split_page takes a non-compound higher-order page, and splits it into
913 * n (1<<order) sub-pages: page[0..n]
914 * Each sub-page must be freed individually.
915 *
916 * Note: this is probably too low level an operation for use in drivers.
917 * Please consult with lkml before using this in your driver.
918 */
919void split_page(struct page *page, unsigned int order)
920{
921 int i;
922
725d704e
NP
923 VM_BUG_ON(PageCompound(page));
924 VM_BUG_ON(!page_count(page));
7835e98b
NP
925 for (i = 1; i < (1 << order); i++)
926 set_page_refcounted(page + i);
8dfcc9ba 927}
8dfcc9ba 928
1da177e4
LT
929/*
930 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
931 * we cheat by calling it from here, in the order > 0 path. Saves a branch
932 * or two.
933 */
a74609fa
NP
934static struct page *buffered_rmqueue(struct zonelist *zonelist,
935 struct zone *zone, int order, gfp_t gfp_flags)
1da177e4
LT
936{
937 unsigned long flags;
689bcebf 938 struct page *page;
1da177e4 939 int cold = !!(gfp_flags & __GFP_COLD);
a74609fa 940 int cpu;
b2a0ac88 941 int migratetype = gfpflags_to_migratetype(gfp_flags);
1da177e4 942
689bcebf 943again:
a74609fa 944 cpu = get_cpu();
48db57f8 945 if (likely(order == 0)) {
1da177e4
LT
946 struct per_cpu_pages *pcp;
947
a74609fa 948 pcp = &zone_pcp(zone, cpu)->pcp[cold];
1da177e4 949 local_irq_save(flags);
a74609fa 950 if (!pcp->count) {
941c7105 951 pcp->count = rmqueue_bulk(zone, 0,
b2a0ac88 952 pcp->batch, &pcp->list, migratetype);
a74609fa
NP
953 if (unlikely(!pcp->count))
954 goto failed;
1da177e4 955 }
535131e6
MG
956 /* Find a page of the appropriate migrate type */
957 list_for_each_entry(page, &pcp->list, lru) {
958 if (page_private(page) == migratetype) {
959 list_del(&page->lru);
960 pcp->count--;
961 break;
962 }
963 }
964
965 /*
966 * Check if a page of the appropriate migrate type
967 * was found. If not, allocate more to the pcp list
968 */
969 if (&page->lru == &pcp->list) {
970 pcp->count += rmqueue_bulk(zone, 0,
971 pcp->batch, &pcp->list, migratetype);
972 page = list_entry(pcp->list.next, struct page, lru);
973 VM_BUG_ON(page_private(page) != migratetype);
974 list_del(&page->lru);
975 pcp->count--;
976 }
7fb1d9fc 977 } else {
1da177e4 978 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 979 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
980 spin_unlock(&zone->lock);
981 if (!page)
982 goto failed;
1da177e4
LT
983 }
984
f8891e5e 985 __count_zone_vm_events(PGALLOC, zone, 1 << order);
ca889e6c 986 zone_statistics(zonelist, zone);
a74609fa
NP
987 local_irq_restore(flags);
988 put_cpu();
1da177e4 989
725d704e 990 VM_BUG_ON(bad_range(zone, page));
17cf4406 991 if (prep_new_page(page, order, gfp_flags))
a74609fa 992 goto again;
1da177e4 993 return page;
a74609fa
NP
994
995failed:
996 local_irq_restore(flags);
997 put_cpu();
998 return NULL;
1da177e4
LT
999}
1000
7fb1d9fc 1001#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
3148890b
NP
1002#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
1003#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
1004#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
1005#define ALLOC_HARDER 0x10 /* try to alloc harder */
1006#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1007#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 1008
933e312e
AM
1009#ifdef CONFIG_FAIL_PAGE_ALLOC
1010
1011static struct fail_page_alloc_attr {
1012 struct fault_attr attr;
1013
1014 u32 ignore_gfp_highmem;
1015 u32 ignore_gfp_wait;
54114994 1016 u32 min_order;
933e312e
AM
1017
1018#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1019
1020 struct dentry *ignore_gfp_highmem_file;
1021 struct dentry *ignore_gfp_wait_file;
54114994 1022 struct dentry *min_order_file;
933e312e
AM
1023
1024#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1025
1026} fail_page_alloc = {
1027 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1028 .ignore_gfp_wait = 1,
1029 .ignore_gfp_highmem = 1,
54114994 1030 .min_order = 1,
933e312e
AM
1031};
1032
1033static int __init setup_fail_page_alloc(char *str)
1034{
1035 return setup_fault_attr(&fail_page_alloc.attr, str);
1036}
1037__setup("fail_page_alloc=", setup_fail_page_alloc);
1038
1039static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1040{
54114994
AM
1041 if (order < fail_page_alloc.min_order)
1042 return 0;
933e312e
AM
1043 if (gfp_mask & __GFP_NOFAIL)
1044 return 0;
1045 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1046 return 0;
1047 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1048 return 0;
1049
1050 return should_fail(&fail_page_alloc.attr, 1 << order);
1051}
1052
1053#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1054
1055static int __init fail_page_alloc_debugfs(void)
1056{
1057 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1058 struct dentry *dir;
1059 int err;
1060
1061 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1062 "fail_page_alloc");
1063 if (err)
1064 return err;
1065 dir = fail_page_alloc.attr.dentries.dir;
1066
1067 fail_page_alloc.ignore_gfp_wait_file =
1068 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1069 &fail_page_alloc.ignore_gfp_wait);
1070
1071 fail_page_alloc.ignore_gfp_highmem_file =
1072 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1073 &fail_page_alloc.ignore_gfp_highmem);
54114994
AM
1074 fail_page_alloc.min_order_file =
1075 debugfs_create_u32("min-order", mode, dir,
1076 &fail_page_alloc.min_order);
933e312e
AM
1077
1078 if (!fail_page_alloc.ignore_gfp_wait_file ||
54114994
AM
1079 !fail_page_alloc.ignore_gfp_highmem_file ||
1080 !fail_page_alloc.min_order_file) {
933e312e
AM
1081 err = -ENOMEM;
1082 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1083 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
54114994 1084 debugfs_remove(fail_page_alloc.min_order_file);
933e312e
AM
1085 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1086 }
1087
1088 return err;
1089}
1090
1091late_initcall(fail_page_alloc_debugfs);
1092
1093#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1094
1095#else /* CONFIG_FAIL_PAGE_ALLOC */
1096
1097static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1098{
1099 return 0;
1100}
1101
1102#endif /* CONFIG_FAIL_PAGE_ALLOC */
1103
1da177e4
LT
1104/*
1105 * Return 1 if free pages are above 'mark'. This takes into account the order
1106 * of the allocation.
1107 */
1108int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
7fb1d9fc 1109 int classzone_idx, int alloc_flags)
1da177e4
LT
1110{
1111 /* free_pages my go negative - that's OK */
d23ad423
CL
1112 long min = mark;
1113 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1da177e4
LT
1114 int o;
1115
7fb1d9fc 1116 if (alloc_flags & ALLOC_HIGH)
1da177e4 1117 min -= min / 2;
7fb1d9fc 1118 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
1119 min -= min / 4;
1120
1121 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1122 return 0;
1123 for (o = 0; o < order; o++) {
1124 /* At the next order, this order's pages become unavailable */
1125 free_pages -= z->free_area[o].nr_free << o;
1126
1127 /* Require fewer higher order pages to be free */
1128 min >>= 1;
1129
1130 if (free_pages <= min)
1131 return 0;
1132 }
1133 return 1;
1134}
1135
9276b1bc
PJ
1136#ifdef CONFIG_NUMA
1137/*
1138 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1139 * skip over zones that are not allowed by the cpuset, or that have
1140 * been recently (in last second) found to be nearly full. See further
1141 * comments in mmzone.h. Reduces cache footprint of zonelist scans
1142 * that have to skip over alot of full or unallowed zones.
1143 *
1144 * If the zonelist cache is present in the passed in zonelist, then
1145 * returns a pointer to the allowed node mask (either the current
37b07e41 1146 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
9276b1bc
PJ
1147 *
1148 * If the zonelist cache is not available for this zonelist, does
1149 * nothing and returns NULL.
1150 *
1151 * If the fullzones BITMAP in the zonelist cache is stale (more than
1152 * a second since last zap'd) then we zap it out (clear its bits.)
1153 *
1154 * We hold off even calling zlc_setup, until after we've checked the
1155 * first zone in the zonelist, on the theory that most allocations will
1156 * be satisfied from that first zone, so best to examine that zone as
1157 * quickly as we can.
1158 */
1159static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1160{
1161 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1162 nodemask_t *allowednodes; /* zonelist_cache approximation */
1163
1164 zlc = zonelist->zlcache_ptr;
1165 if (!zlc)
1166 return NULL;
1167
1168 if (jiffies - zlc->last_full_zap > 1 * HZ) {
1169 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1170 zlc->last_full_zap = jiffies;
1171 }
1172
1173 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1174 &cpuset_current_mems_allowed :
37b07e41 1175 &node_states[N_HIGH_MEMORY];
9276b1bc
PJ
1176 return allowednodes;
1177}
1178
1179/*
1180 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1181 * if it is worth looking at further for free memory:
1182 * 1) Check that the zone isn't thought to be full (doesn't have its
1183 * bit set in the zonelist_cache fullzones BITMAP).
1184 * 2) Check that the zones node (obtained from the zonelist_cache
1185 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1186 * Return true (non-zero) if zone is worth looking at further, or
1187 * else return false (zero) if it is not.
1188 *
1189 * This check -ignores- the distinction between various watermarks,
1190 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1191 * found to be full for any variation of these watermarks, it will
1192 * be considered full for up to one second by all requests, unless
1193 * we are so low on memory on all allowed nodes that we are forced
1194 * into the second scan of the zonelist.
1195 *
1196 * In the second scan we ignore this zonelist cache and exactly
1197 * apply the watermarks to all zones, even it is slower to do so.
1198 * We are low on memory in the second scan, and should leave no stone
1199 * unturned looking for a free page.
1200 */
1201static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1202 nodemask_t *allowednodes)
1203{
1204 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1205 int i; /* index of *z in zonelist zones */
1206 int n; /* node that zone *z is on */
1207
1208 zlc = zonelist->zlcache_ptr;
1209 if (!zlc)
1210 return 1;
1211
1212 i = z - zonelist->zones;
1213 n = zlc->z_to_n[i];
1214
1215 /* This zone is worth trying if it is allowed but not full */
1216 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1217}
1218
1219/*
1220 * Given 'z' scanning a zonelist, set the corresponding bit in
1221 * zlc->fullzones, so that subsequent attempts to allocate a page
1222 * from that zone don't waste time re-examining it.
1223 */
1224static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1225{
1226 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1227 int i; /* index of *z in zonelist zones */
1228
1229 zlc = zonelist->zlcache_ptr;
1230 if (!zlc)
1231 return;
1232
1233 i = z - zonelist->zones;
1234
1235 set_bit(i, zlc->fullzones);
1236}
1237
1238#else /* CONFIG_NUMA */
1239
1240static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1241{
1242 return NULL;
1243}
1244
1245static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1246 nodemask_t *allowednodes)
1247{
1248 return 1;
1249}
1250
1251static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1252{
1253}
1254#endif /* CONFIG_NUMA */
1255
7fb1d9fc 1256/*
0798e519 1257 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1258 * a page.
1259 */
1260static struct page *
1261get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
1262 struct zonelist *zonelist, int alloc_flags)
753ee728 1263{
9276b1bc 1264 struct zone **z;
7fb1d9fc 1265 struct page *page = NULL;
9276b1bc 1266 int classzone_idx = zone_idx(zonelist->zones[0]);
1192d526 1267 struct zone *zone;
9276b1bc
PJ
1268 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1269 int zlc_active = 0; /* set if using zonelist_cache */
1270 int did_zlc_setup = 0; /* just call zlc_setup() one time */
b377fd39 1271 enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */
7fb1d9fc 1272
9276b1bc 1273zonelist_scan:
7fb1d9fc 1274 /*
9276b1bc 1275 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1276 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1277 */
9276b1bc
PJ
1278 z = zonelist->zones;
1279
7fb1d9fc 1280 do {
b377fd39
MG
1281 /*
1282 * In NUMA, this could be a policy zonelist which contains
1283 * zones that may not be allowed by the current gfp_mask.
1284 * Check the zone is allowed by the current flags
1285 */
1286 if (unlikely(alloc_should_filter_zonelist(zonelist))) {
1287 if (highest_zoneidx == -1)
1288 highest_zoneidx = gfp_zone(gfp_mask);
1289 if (zone_idx(*z) > highest_zoneidx)
1290 continue;
1291 }
1292
9276b1bc
PJ
1293 if (NUMA_BUILD && zlc_active &&
1294 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1295 continue;
1192d526 1296 zone = *z;
7fb1d9fc 1297 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1298 !cpuset_zone_allowed_softwall(zone, gfp_mask))
9276b1bc 1299 goto try_next_zone;
7fb1d9fc
RS
1300
1301 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b
NP
1302 unsigned long mark;
1303 if (alloc_flags & ALLOC_WMARK_MIN)
1192d526 1304 mark = zone->pages_min;
3148890b 1305 else if (alloc_flags & ALLOC_WMARK_LOW)
1192d526 1306 mark = zone->pages_low;
3148890b 1307 else
1192d526 1308 mark = zone->pages_high;
0798e519
PJ
1309 if (!zone_watermark_ok(zone, order, mark,
1310 classzone_idx, alloc_flags)) {
9eeff239 1311 if (!zone_reclaim_mode ||
1192d526 1312 !zone_reclaim(zone, gfp_mask, order))
9276b1bc 1313 goto this_zone_full;
0798e519 1314 }
7fb1d9fc
RS
1315 }
1316
1192d526 1317 page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
0798e519 1318 if (page)
7fb1d9fc 1319 break;
9276b1bc
PJ
1320this_zone_full:
1321 if (NUMA_BUILD)
1322 zlc_mark_zone_full(zonelist, z);
1323try_next_zone:
1324 if (NUMA_BUILD && !did_zlc_setup) {
1325 /* we do zlc_setup after the first zone is tried */
1326 allowednodes = zlc_setup(zonelist, alloc_flags);
1327 zlc_active = 1;
1328 did_zlc_setup = 1;
1329 }
7fb1d9fc 1330 } while (*(++z) != NULL);
9276b1bc
PJ
1331
1332 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1333 /* Disable zlc cache for second zonelist scan */
1334 zlc_active = 0;
1335 goto zonelist_scan;
1336 }
7fb1d9fc 1337 return page;
753ee728
MH
1338}
1339
1da177e4
LT
1340/*
1341 * This is the 'heart' of the zoned buddy allocator.
1342 */
1343struct page * fastcall
dd0fc66f 1344__alloc_pages(gfp_t gfp_mask, unsigned int order,
1da177e4
LT
1345 struct zonelist *zonelist)
1346{
260b2367 1347 const gfp_t wait = gfp_mask & __GFP_WAIT;
7fb1d9fc 1348 struct zone **z;
1da177e4
LT
1349 struct page *page;
1350 struct reclaim_state reclaim_state;
1351 struct task_struct *p = current;
1da177e4 1352 int do_retry;
7fb1d9fc 1353 int alloc_flags;
1da177e4
LT
1354 int did_some_progress;
1355
1356 might_sleep_if(wait);
1357
933e312e
AM
1358 if (should_fail_alloc_page(gfp_mask, order))
1359 return NULL;
1360
6b1de916 1361restart:
7fb1d9fc 1362 z = zonelist->zones; /* the list of zones suitable for gfp_mask */
1da177e4 1363
7fb1d9fc 1364 if (unlikely(*z == NULL)) {
523b9458
CL
1365 /*
1366 * Happens if we have an empty zonelist as a result of
1367 * GFP_THISNODE being used on a memoryless node
1368 */
1da177e4
LT
1369 return NULL;
1370 }
6b1de916 1371
7fb1d9fc 1372 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 1373 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
7fb1d9fc
RS
1374 if (page)
1375 goto got_pg;
1da177e4 1376
952f3b51
CL
1377 /*
1378 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1379 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1380 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1381 * using a larger set of nodes after it has established that the
1382 * allowed per node queues are empty and that nodes are
1383 * over allocated.
1384 */
1385 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1386 goto nopage;
1387
0798e519 1388 for (z = zonelist->zones; *z; z++)
43b0bc00 1389 wakeup_kswapd(*z, order);
1da177e4 1390
9bf2229f 1391 /*
7fb1d9fc
RS
1392 * OK, we're below the kswapd watermark and have kicked background
1393 * reclaim. Now things get more complex, so set up alloc_flags according
1394 * to how we want to proceed.
1395 *
1396 * The caller may dip into page reserves a bit more if the caller
1397 * cannot run direct reclaim, or if the caller has realtime scheduling
4eac915d
PJ
1398 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1399 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
9bf2229f 1400 */
3148890b 1401 alloc_flags = ALLOC_WMARK_MIN;
7fb1d9fc
RS
1402 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1403 alloc_flags |= ALLOC_HARDER;
1404 if (gfp_mask & __GFP_HIGH)
1405 alloc_flags |= ALLOC_HIGH;
bdd804f4
PJ
1406 if (wait)
1407 alloc_flags |= ALLOC_CPUSET;
1da177e4
LT
1408
1409 /*
1410 * Go through the zonelist again. Let __GFP_HIGH and allocations
7fb1d9fc 1411 * coming from realtime tasks go deeper into reserves.
1da177e4
LT
1412 *
1413 * This is the last chance, in general, before the goto nopage.
1414 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
9bf2229f 1415 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1da177e4 1416 */
7fb1d9fc
RS
1417 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
1418 if (page)
1419 goto got_pg;
1da177e4
LT
1420
1421 /* This allocation should allow future memory freeing. */
b84a35be 1422
b43a57bb 1423rebalance:
b84a35be
NP
1424 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1425 && !in_interrupt()) {
1426 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
885036d3 1427nofail_alloc:
b84a35be 1428 /* go through the zonelist yet again, ignoring mins */
7fb1d9fc 1429 page = get_page_from_freelist(gfp_mask, order,
47f3a867 1430 zonelist, ALLOC_NO_WATERMARKS);
7fb1d9fc
RS
1431 if (page)
1432 goto got_pg;
885036d3 1433 if (gfp_mask & __GFP_NOFAIL) {
3fcfab16 1434 congestion_wait(WRITE, HZ/50);
885036d3
KK
1435 goto nofail_alloc;
1436 }
1da177e4
LT
1437 }
1438 goto nopage;
1439 }
1440
1441 /* Atomic allocations - we can't balance anything */
1442 if (!wait)
1443 goto nopage;
1444
1da177e4
LT
1445 cond_resched();
1446
1447 /* We now go into synchronous reclaim */
3e0d98b9 1448 cpuset_memory_pressure_bump();
1da177e4
LT
1449 p->flags |= PF_MEMALLOC;
1450 reclaim_state.reclaimed_slab = 0;
1451 p->reclaim_state = &reclaim_state;
1452
5ad333eb 1453 did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
1da177e4
LT
1454
1455 p->reclaim_state = NULL;
1456 p->flags &= ~PF_MEMALLOC;
1457
1458 cond_resched();
1459
1460 if (likely(did_some_progress)) {
7fb1d9fc
RS
1461 page = get_page_from_freelist(gfp_mask, order,
1462 zonelist, alloc_flags);
1463 if (page)
1464 goto got_pg;
1da177e4
LT
1465 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1466 /*
1467 * Go through the zonelist yet one more time, keep
1468 * very high watermark here, this is only to catch
1469 * a parallel oom killing, we must fail if we're still
1470 * under heavy pressure.
1471 */
7fb1d9fc 1472 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 1473 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
7fb1d9fc
RS
1474 if (page)
1475 goto got_pg;
1da177e4 1476
a8bbf72a
MG
1477 /* The OOM killer will not help higher order allocs so fail */
1478 if (order > PAGE_ALLOC_COSTLY_ORDER)
1479 goto nopage;
1480
9b0f8b04 1481 out_of_memory(zonelist, gfp_mask, order);
1da177e4
LT
1482 goto restart;
1483 }
1484
1485 /*
1486 * Don't let big-order allocations loop unless the caller explicitly
1487 * requests that. Wait for some write requests to complete then retry.
1488 *
1489 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
1490 * <= 3, but that may not be true in other implementations.
1491 */
1492 do_retry = 0;
1493 if (!(gfp_mask & __GFP_NORETRY)) {
5ad333eb
AW
1494 if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
1495 (gfp_mask & __GFP_REPEAT))
1da177e4
LT
1496 do_retry = 1;
1497 if (gfp_mask & __GFP_NOFAIL)
1498 do_retry = 1;
1499 }
1500 if (do_retry) {
3fcfab16 1501 congestion_wait(WRITE, HZ/50);
1da177e4
LT
1502 goto rebalance;
1503 }
1504
1505nopage:
1506 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1507 printk(KERN_WARNING "%s: page allocation failure."
1508 " order:%d, mode:0x%x\n",
1509 p->comm, order, gfp_mask);
1510 dump_stack();
578c2fd6 1511 show_mem();
1da177e4 1512 }
1da177e4 1513got_pg:
1da177e4
LT
1514 return page;
1515}
1516
1517EXPORT_SYMBOL(__alloc_pages);
1518
1519/*
1520 * Common helper functions.
1521 */
dd0fc66f 1522fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4
LT
1523{
1524 struct page * page;
1525 page = alloc_pages(gfp_mask, order);
1526 if (!page)
1527 return 0;
1528 return (unsigned long) page_address(page);
1529}
1530
1531EXPORT_SYMBOL(__get_free_pages);
1532
dd0fc66f 1533fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4
LT
1534{
1535 struct page * page;
1536
1537 /*
1538 * get_zeroed_page() returns a 32-bit address, which cannot represent
1539 * a highmem page
1540 */
725d704e 1541 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1da177e4
LT
1542
1543 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1544 if (page)
1545 return (unsigned long) page_address(page);
1546 return 0;
1547}
1548
1549EXPORT_SYMBOL(get_zeroed_page);
1550
1551void __pagevec_free(struct pagevec *pvec)
1552{
1553 int i = pagevec_count(pvec);
1554
1555 while (--i >= 0)
1556 free_hot_cold_page(pvec->pages[i], pvec->cold);
1557}
1558
1559fastcall void __free_pages(struct page *page, unsigned int order)
1560{
b5810039 1561 if (put_page_testzero(page)) {
1da177e4
LT
1562 if (order == 0)
1563 free_hot_page(page);
1564 else
1565 __free_pages_ok(page, order);
1566 }
1567}
1568
1569EXPORT_SYMBOL(__free_pages);
1570
1571fastcall void free_pages(unsigned long addr, unsigned int order)
1572{
1573 if (addr != 0) {
725d704e 1574 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
1575 __free_pages(virt_to_page((void *)addr), order);
1576 }
1577}
1578
1579EXPORT_SYMBOL(free_pages);
1580
1da177e4
LT
1581static unsigned int nr_free_zone_pages(int offset)
1582{
e310fd43
MB
1583 /* Just pick one node, since fallback list is circular */
1584 pg_data_t *pgdat = NODE_DATA(numa_node_id());
1da177e4
LT
1585 unsigned int sum = 0;
1586
e310fd43
MB
1587 struct zonelist *zonelist = pgdat->node_zonelists + offset;
1588 struct zone **zonep = zonelist->zones;
1589 struct zone *zone;
1da177e4 1590
e310fd43
MB
1591 for (zone = *zonep++; zone; zone = *zonep++) {
1592 unsigned long size = zone->present_pages;
1593 unsigned long high = zone->pages_high;
1594 if (size > high)
1595 sum += size - high;
1da177e4
LT
1596 }
1597
1598 return sum;
1599}
1600
1601/*
1602 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1603 */
1604unsigned int nr_free_buffer_pages(void)
1605{
af4ca457 1606 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 1607}
c2f1a551 1608EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4
LT
1609
1610/*
1611 * Amount of free RAM allocatable within all zones
1612 */
1613unsigned int nr_free_pagecache_pages(void)
1614{
2a1e274a 1615 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 1616}
08e0f6a9
CL
1617
1618static inline void show_node(struct zone *zone)
1da177e4 1619{
08e0f6a9 1620 if (NUMA_BUILD)
25ba77c1 1621 printk("Node %d ", zone_to_nid(zone));
1da177e4 1622}
1da177e4 1623
1da177e4
LT
1624void si_meminfo(struct sysinfo *val)
1625{
1626 val->totalram = totalram_pages;
1627 val->sharedram = 0;
d23ad423 1628 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 1629 val->bufferram = nr_blockdev_pages();
1da177e4
LT
1630 val->totalhigh = totalhigh_pages;
1631 val->freehigh = nr_free_highpages();
1da177e4
LT
1632 val->mem_unit = PAGE_SIZE;
1633}
1634
1635EXPORT_SYMBOL(si_meminfo);
1636
1637#ifdef CONFIG_NUMA
1638void si_meminfo_node(struct sysinfo *val, int nid)
1639{
1640 pg_data_t *pgdat = NODE_DATA(nid);
1641
1642 val->totalram = pgdat->node_present_pages;
d23ad423 1643 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 1644#ifdef CONFIG_HIGHMEM
1da177e4 1645 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
d23ad423
CL
1646 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1647 NR_FREE_PAGES);
98d2b0eb
CL
1648#else
1649 val->totalhigh = 0;
1650 val->freehigh = 0;
1651#endif
1da177e4
LT
1652 val->mem_unit = PAGE_SIZE;
1653}
1654#endif
1655
1656#define K(x) ((x) << (PAGE_SHIFT-10))
1657
1658/*
1659 * Show free area list (used inside shift_scroll-lock stuff)
1660 * We also calculate the percentage fragmentation. We do this by counting the
1661 * memory on each free list with the exception of the first item on the list.
1662 */
1663void show_free_areas(void)
1664{
c7241913 1665 int cpu;
1da177e4
LT
1666 struct zone *zone;
1667
1668 for_each_zone(zone) {
c7241913 1669 if (!populated_zone(zone))
1da177e4 1670 continue;
c7241913
JS
1671
1672 show_node(zone);
1673 printk("%s per-cpu:\n", zone->name);
1da177e4 1674
6b482c67 1675 for_each_online_cpu(cpu) {
1da177e4
LT
1676 struct per_cpu_pageset *pageset;
1677
e7c8d5c9 1678 pageset = zone_pcp(zone, cpu);
1da177e4 1679
c7241913
JS
1680 printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d "
1681 "Cold: hi:%5d, btch:%4d usd:%4d\n",
1682 cpu, pageset->pcp[0].high,
1683 pageset->pcp[0].batch, pageset->pcp[0].count,
1684 pageset->pcp[1].high, pageset->pcp[1].batch,
1685 pageset->pcp[1].count);
1da177e4
LT
1686 }
1687 }
1688
a25700a5 1689 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
d23ad423 1690 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
65e458d4
CL
1691 global_page_state(NR_ACTIVE),
1692 global_page_state(NR_INACTIVE),
b1e7a8fd 1693 global_page_state(NR_FILE_DIRTY),
ce866b34 1694 global_page_state(NR_WRITEBACK),
fd39fc85 1695 global_page_state(NR_UNSTABLE_NFS),
d23ad423 1696 global_page_state(NR_FREE_PAGES),
972d1a7b
CL
1697 global_page_state(NR_SLAB_RECLAIMABLE) +
1698 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 1699 global_page_state(NR_FILE_MAPPED),
a25700a5
AM
1700 global_page_state(NR_PAGETABLE),
1701 global_page_state(NR_BOUNCE));
1da177e4
LT
1702
1703 for_each_zone(zone) {
1704 int i;
1705
c7241913
JS
1706 if (!populated_zone(zone))
1707 continue;
1708
1da177e4
LT
1709 show_node(zone);
1710 printk("%s"
1711 " free:%lukB"
1712 " min:%lukB"
1713 " low:%lukB"
1714 " high:%lukB"
1715 " active:%lukB"
1716 " inactive:%lukB"
1717 " present:%lukB"
1718 " pages_scanned:%lu"
1719 " all_unreclaimable? %s"
1720 "\n",
1721 zone->name,
d23ad423 1722 K(zone_page_state(zone, NR_FREE_PAGES)),
1da177e4
LT
1723 K(zone->pages_min),
1724 K(zone->pages_low),
1725 K(zone->pages_high),
c8785385
CL
1726 K(zone_page_state(zone, NR_ACTIVE)),
1727 K(zone_page_state(zone, NR_INACTIVE)),
1da177e4
LT
1728 K(zone->present_pages),
1729 zone->pages_scanned,
1730 (zone->all_unreclaimable ? "yes" : "no")
1731 );
1732 printk("lowmem_reserve[]:");
1733 for (i = 0; i < MAX_NR_ZONES; i++)
1734 printk(" %lu", zone->lowmem_reserve[i]);
1735 printk("\n");
1736 }
1737
1738 for_each_zone(zone) {
8f9de51a 1739 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4 1740
c7241913
JS
1741 if (!populated_zone(zone))
1742 continue;
1743
1da177e4
LT
1744 show_node(zone);
1745 printk("%s: ", zone->name);
1da177e4
LT
1746
1747 spin_lock_irqsave(&zone->lock, flags);
1748 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
1749 nr[order] = zone->free_area[order].nr_free;
1750 total += nr[order] << order;
1da177e4
LT
1751 }
1752 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
1753 for (order = 0; order < MAX_ORDER; order++)
1754 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
1755 printk("= %lukB\n", K(total));
1756 }
1757
1758 show_swap_cache_info();
1759}
1760
1761/*
1762 * Builds allocation fallback zone lists.
1a93205b
CL
1763 *
1764 * Add all populated zones of a node to the zonelist.
1da177e4 1765 */
f0c0b2b8
KH
1766static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1767 int nr_zones, enum zone_type zone_type)
1da177e4 1768{
1a93205b
CL
1769 struct zone *zone;
1770
98d2b0eb 1771 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 1772 zone_type++;
02a68a5e
CL
1773
1774 do {
2f6726e5 1775 zone_type--;
070f8032 1776 zone = pgdat->node_zones + zone_type;
1a93205b 1777 if (populated_zone(zone)) {
070f8032
CL
1778 zonelist->zones[nr_zones++] = zone;
1779 check_highest_zone(zone_type);
1da177e4 1780 }
02a68a5e 1781
2f6726e5 1782 } while (zone_type);
070f8032 1783 return nr_zones;
1da177e4
LT
1784}
1785
f0c0b2b8
KH
1786
1787/*
1788 * zonelist_order:
1789 * 0 = automatic detection of better ordering.
1790 * 1 = order by ([node] distance, -zonetype)
1791 * 2 = order by (-zonetype, [node] distance)
1792 *
1793 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
1794 * the same zonelist. So only NUMA can configure this param.
1795 */
1796#define ZONELIST_ORDER_DEFAULT 0
1797#define ZONELIST_ORDER_NODE 1
1798#define ZONELIST_ORDER_ZONE 2
1799
1800/* zonelist order in the kernel.
1801 * set_zonelist_order() will set this to NODE or ZONE.
1802 */
1803static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
1804static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
1805
1806
1da177e4 1807#ifdef CONFIG_NUMA
f0c0b2b8
KH
1808/* The value user specified ....changed by config */
1809static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1810/* string for sysctl */
1811#define NUMA_ZONELIST_ORDER_LEN 16
1812char numa_zonelist_order[16] = "default";
1813
1814/*
1815 * interface for configure zonelist ordering.
1816 * command line option "numa_zonelist_order"
1817 * = "[dD]efault - default, automatic configuration.
1818 * = "[nN]ode - order by node locality, then by zone within node
1819 * = "[zZ]one - order by zone, then by locality within zone
1820 */
1821
1822static int __parse_numa_zonelist_order(char *s)
1823{
1824 if (*s == 'd' || *s == 'D') {
1825 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1826 } else if (*s == 'n' || *s == 'N') {
1827 user_zonelist_order = ZONELIST_ORDER_NODE;
1828 } else if (*s == 'z' || *s == 'Z') {
1829 user_zonelist_order = ZONELIST_ORDER_ZONE;
1830 } else {
1831 printk(KERN_WARNING
1832 "Ignoring invalid numa_zonelist_order value: "
1833 "%s\n", s);
1834 return -EINVAL;
1835 }
1836 return 0;
1837}
1838
1839static __init int setup_numa_zonelist_order(char *s)
1840{
1841 if (s)
1842 return __parse_numa_zonelist_order(s);
1843 return 0;
1844}
1845early_param("numa_zonelist_order", setup_numa_zonelist_order);
1846
1847/*
1848 * sysctl handler for numa_zonelist_order
1849 */
1850int numa_zonelist_order_handler(ctl_table *table, int write,
1851 struct file *file, void __user *buffer, size_t *length,
1852 loff_t *ppos)
1853{
1854 char saved_string[NUMA_ZONELIST_ORDER_LEN];
1855 int ret;
1856
1857 if (write)
1858 strncpy(saved_string, (char*)table->data,
1859 NUMA_ZONELIST_ORDER_LEN);
1860 ret = proc_dostring(table, write, file, buffer, length, ppos);
1861 if (ret)
1862 return ret;
1863 if (write) {
1864 int oldval = user_zonelist_order;
1865 if (__parse_numa_zonelist_order((char*)table->data)) {
1866 /*
1867 * bogus value. restore saved string
1868 */
1869 strncpy((char*)table->data, saved_string,
1870 NUMA_ZONELIST_ORDER_LEN);
1871 user_zonelist_order = oldval;
1872 } else if (oldval != user_zonelist_order)
1873 build_all_zonelists();
1874 }
1875 return 0;
1876}
1877
1878
1da177e4 1879#define MAX_NODE_LOAD (num_online_nodes())
f0c0b2b8
KH
1880static int node_load[MAX_NUMNODES];
1881
1da177e4 1882/**
4dc3b16b 1883 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
1884 * @node: node whose fallback list we're appending
1885 * @used_node_mask: nodemask_t of already used nodes
1886 *
1887 * We use a number of factors to determine which is the next node that should
1888 * appear on a given node's fallback list. The node should not have appeared
1889 * already in @node's fallback list, and it should be the next closest node
1890 * according to the distance array (which contains arbitrary distance values
1891 * from each node to each node in the system), and should also prefer nodes
1892 * with no CPUs, since presumably they'll have very little allocation pressure
1893 * on them otherwise.
1894 * It returns -1 if no node is found.
1895 */
f0c0b2b8 1896static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 1897{
4cf808eb 1898 int n, val;
1da177e4
LT
1899 int min_val = INT_MAX;
1900 int best_node = -1;
1901
4cf808eb
LT
1902 /* Use the local node if we haven't already */
1903 if (!node_isset(node, *used_node_mask)) {
1904 node_set(node, *used_node_mask);
1905 return node;
1906 }
1da177e4 1907
37b07e41 1908 for_each_node_state(n, N_HIGH_MEMORY) {
4cf808eb 1909 cpumask_t tmp;
1da177e4
LT
1910
1911 /* Don't want a node to appear more than once */
1912 if (node_isset(n, *used_node_mask))
1913 continue;
1914
1da177e4
LT
1915 /* Use the distance array to find the distance */
1916 val = node_distance(node, n);
1917
4cf808eb
LT
1918 /* Penalize nodes under us ("prefer the next node") */
1919 val += (n < node);
1920
1da177e4
LT
1921 /* Give preference to headless and unused nodes */
1922 tmp = node_to_cpumask(n);
1923 if (!cpus_empty(tmp))
1924 val += PENALTY_FOR_NODE_WITH_CPUS;
1925
1926 /* Slight preference for less loaded node */
1927 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
1928 val += node_load[n];
1929
1930 if (val < min_val) {
1931 min_val = val;
1932 best_node = n;
1933 }
1934 }
1935
1936 if (best_node >= 0)
1937 node_set(best_node, *used_node_mask);
1938
1939 return best_node;
1940}
1941
f0c0b2b8
KH
1942
1943/*
1944 * Build zonelists ordered by node and zones within node.
1945 * This results in maximum locality--normal zone overflows into local
1946 * DMA zone, if any--but risks exhausting DMA zone.
1947 */
1948static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 1949{
19655d34 1950 enum zone_type i;
f0c0b2b8 1951 int j;
1da177e4 1952 struct zonelist *zonelist;
f0c0b2b8
KH
1953
1954 for (i = 0; i < MAX_NR_ZONES; i++) {
1955 zonelist = pgdat->node_zonelists + i;
1956 for (j = 0; zonelist->zones[j] != NULL; j++)
1957 ;
1958 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1959 zonelist->zones[j] = NULL;
1960 }
1961}
1962
523b9458
CL
1963/*
1964 * Build gfp_thisnode zonelists
1965 */
1966static void build_thisnode_zonelists(pg_data_t *pgdat)
1967{
1968 enum zone_type i;
1969 int j;
1970 struct zonelist *zonelist;
1971
1972 for (i = 0; i < MAX_NR_ZONES; i++) {
1973 zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i;
1974 j = build_zonelists_node(pgdat, zonelist, 0, i);
1975 zonelist->zones[j] = NULL;
1976 }
1977}
1978
f0c0b2b8
KH
1979/*
1980 * Build zonelists ordered by zone and nodes within zones.
1981 * This results in conserving DMA zone[s] until all Normal memory is
1982 * exhausted, but results in overflowing to remote node while memory
1983 * may still exist in local DMA zone.
1984 */
1985static int node_order[MAX_NUMNODES];
1986
1987static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
1988{
1989 enum zone_type i;
1990 int pos, j, node;
1991 int zone_type; /* needs to be signed */
1992 struct zone *z;
1993 struct zonelist *zonelist;
1994
1995 for (i = 0; i < MAX_NR_ZONES; i++) {
1996 zonelist = pgdat->node_zonelists + i;
1997 pos = 0;
1998 for (zone_type = i; zone_type >= 0; zone_type--) {
1999 for (j = 0; j < nr_nodes; j++) {
2000 node = node_order[j];
2001 z = &NODE_DATA(node)->node_zones[zone_type];
2002 if (populated_zone(z)) {
2003 zonelist->zones[pos++] = z;
2004 check_highest_zone(zone_type);
2005 }
2006 }
2007 }
2008 zonelist->zones[pos] = NULL;
2009 }
2010}
2011
2012static int default_zonelist_order(void)
2013{
2014 int nid, zone_type;
2015 unsigned long low_kmem_size,total_size;
2016 struct zone *z;
2017 int average_size;
2018 /*
2019 * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2020 * If they are really small and used heavily, the system can fall
2021 * into OOM very easily.
2022 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2023 */
2024 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2025 low_kmem_size = 0;
2026 total_size = 0;
2027 for_each_online_node(nid) {
2028 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2029 z = &NODE_DATA(nid)->node_zones[zone_type];
2030 if (populated_zone(z)) {
2031 if (zone_type < ZONE_NORMAL)
2032 low_kmem_size += z->present_pages;
2033 total_size += z->present_pages;
2034 }
2035 }
2036 }
2037 if (!low_kmem_size || /* there are no DMA area. */
2038 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2039 return ZONELIST_ORDER_NODE;
2040 /*
2041 * look into each node's config.
2042 * If there is a node whose DMA/DMA32 memory is very big area on
2043 * local memory, NODE_ORDER may be suitable.
2044 */
37b07e41
LS
2045 average_size = total_size /
2046 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
f0c0b2b8
KH
2047 for_each_online_node(nid) {
2048 low_kmem_size = 0;
2049 total_size = 0;
2050 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2051 z = &NODE_DATA(nid)->node_zones[zone_type];
2052 if (populated_zone(z)) {
2053 if (zone_type < ZONE_NORMAL)
2054 low_kmem_size += z->present_pages;
2055 total_size += z->present_pages;
2056 }
2057 }
2058 if (low_kmem_size &&
2059 total_size > average_size && /* ignore small node */
2060 low_kmem_size > total_size * 70/100)
2061 return ZONELIST_ORDER_NODE;
2062 }
2063 return ZONELIST_ORDER_ZONE;
2064}
2065
2066static void set_zonelist_order(void)
2067{
2068 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2069 current_zonelist_order = default_zonelist_order();
2070 else
2071 current_zonelist_order = user_zonelist_order;
2072}
2073
2074static void build_zonelists(pg_data_t *pgdat)
2075{
2076 int j, node, load;
2077 enum zone_type i;
1da177e4 2078 nodemask_t used_mask;
f0c0b2b8
KH
2079 int local_node, prev_node;
2080 struct zonelist *zonelist;
2081 int order = current_zonelist_order;
1da177e4
LT
2082
2083 /* initialize zonelists */
523b9458 2084 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4
LT
2085 zonelist = pgdat->node_zonelists + i;
2086 zonelist->zones[0] = NULL;
2087 }
2088
2089 /* NUMA-aware ordering of nodes */
2090 local_node = pgdat->node_id;
2091 load = num_online_nodes();
2092 prev_node = local_node;
2093 nodes_clear(used_mask);
f0c0b2b8
KH
2094
2095 memset(node_load, 0, sizeof(node_load));
2096 memset(node_order, 0, sizeof(node_order));
2097 j = 0;
2098
1da177e4 2099 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
2100 int distance = node_distance(local_node, node);
2101
2102 /*
2103 * If another node is sufficiently far away then it is better
2104 * to reclaim pages in a zone before going off node.
2105 */
2106 if (distance > RECLAIM_DISTANCE)
2107 zone_reclaim_mode = 1;
2108
1da177e4
LT
2109 /*
2110 * We don't want to pressure a particular node.
2111 * So adding penalty to the first node in same
2112 * distance group to make it round-robin.
2113 */
9eeff239 2114 if (distance != node_distance(local_node, prev_node))
f0c0b2b8
KH
2115 node_load[node] = load;
2116
1da177e4
LT
2117 prev_node = node;
2118 load--;
f0c0b2b8
KH
2119 if (order == ZONELIST_ORDER_NODE)
2120 build_zonelists_in_node_order(pgdat, node);
2121 else
2122 node_order[j++] = node; /* remember order */
2123 }
1da177e4 2124
f0c0b2b8
KH
2125 if (order == ZONELIST_ORDER_ZONE) {
2126 /* calculate node order -- i.e., DMA last! */
2127 build_zonelists_in_zone_order(pgdat, j);
1da177e4 2128 }
523b9458
CL
2129
2130 build_thisnode_zonelists(pgdat);
1da177e4
LT
2131}
2132
9276b1bc 2133/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 2134static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc
PJ
2135{
2136 int i;
2137
2138 for (i = 0; i < MAX_NR_ZONES; i++) {
2139 struct zonelist *zonelist;
2140 struct zonelist_cache *zlc;
2141 struct zone **z;
2142
2143 zonelist = pgdat->node_zonelists + i;
2144 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2145 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2146 for (z = zonelist->zones; *z; z++)
2147 zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
2148 }
2149}
2150
f0c0b2b8 2151
1da177e4
LT
2152#else /* CONFIG_NUMA */
2153
f0c0b2b8
KH
2154static void set_zonelist_order(void)
2155{
2156 current_zonelist_order = ZONELIST_ORDER_ZONE;
2157}
2158
2159static void build_zonelists(pg_data_t *pgdat)
1da177e4 2160{
19655d34
CL
2161 int node, local_node;
2162 enum zone_type i,j;
1da177e4
LT
2163
2164 local_node = pgdat->node_id;
19655d34 2165 for (i = 0; i < MAX_NR_ZONES; i++) {
1da177e4
LT
2166 struct zonelist *zonelist;
2167
2168 zonelist = pgdat->node_zonelists + i;
2169
19655d34 2170 j = build_zonelists_node(pgdat, zonelist, 0, i);
1da177e4
LT
2171 /*
2172 * Now we build the zonelist so that it contains the zones
2173 * of all the other nodes.
2174 * We don't want to pressure a particular node, so when
2175 * building the zones for node N, we make sure that the
2176 * zones coming right after the local ones are those from
2177 * node N+1 (modulo N)
2178 */
2179 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2180 if (!node_online(node))
2181 continue;
19655d34 2182 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
2183 }
2184 for (node = 0; node < local_node; node++) {
2185 if (!node_online(node))
2186 continue;
19655d34 2187 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
2188 }
2189
2190 zonelist->zones[j] = NULL;
2191 }
2192}
2193
9276b1bc 2194/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 2195static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc
PJ
2196{
2197 int i;
2198
2199 for (i = 0; i < MAX_NR_ZONES; i++)
2200 pgdat->node_zonelists[i].zlcache_ptr = NULL;
2201}
2202
1da177e4
LT
2203#endif /* CONFIG_NUMA */
2204
6811378e 2205/* return values int ....just for stop_machine_run() */
f0c0b2b8 2206static int __build_all_zonelists(void *dummy)
1da177e4 2207{
6811378e 2208 int nid;
9276b1bc
PJ
2209
2210 for_each_online_node(nid) {
7ea1530a
CL
2211 pg_data_t *pgdat = NODE_DATA(nid);
2212
2213 build_zonelists(pgdat);
2214 build_zonelist_cache(pgdat);
9276b1bc 2215 }
6811378e
YG
2216 return 0;
2217}
2218
f0c0b2b8 2219void build_all_zonelists(void)
6811378e 2220{
f0c0b2b8
KH
2221 set_zonelist_order();
2222
6811378e 2223 if (system_state == SYSTEM_BOOTING) {
423b41d7 2224 __build_all_zonelists(NULL);
6811378e
YG
2225 cpuset_init_current_mems_allowed();
2226 } else {
2227 /* we have to stop all cpus to guaranntee there is no user
2228 of zonelist */
2229 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
2230 /* cpuset refresh routine should be here */
2231 }
bd1e22b8 2232 vm_total_pages = nr_free_pagecache_pages();
f0c0b2b8
KH
2233 printk("Built %i zonelists in %s order. Total pages: %ld\n",
2234 num_online_nodes(),
2235 zonelist_order_name[current_zonelist_order],
2236 vm_total_pages);
2237#ifdef CONFIG_NUMA
2238 printk("Policy zone: %s\n", zone_names[policy_zone]);
2239#endif
1da177e4
LT
2240}
2241
2242/*
2243 * Helper functions to size the waitqueue hash table.
2244 * Essentially these want to choose hash table sizes sufficiently
2245 * large so that collisions trying to wait on pages are rare.
2246 * But in fact, the number of active page waitqueues on typical
2247 * systems is ridiculously low, less than 200. So this is even
2248 * conservative, even though it seems large.
2249 *
2250 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2251 * waitqueues, i.e. the size of the waitq table given the number of pages.
2252 */
2253#define PAGES_PER_WAITQUEUE 256
2254
cca448fe 2255#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 2256static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
2257{
2258 unsigned long size = 1;
2259
2260 pages /= PAGES_PER_WAITQUEUE;
2261
2262 while (size < pages)
2263 size <<= 1;
2264
2265 /*
2266 * Once we have dozens or even hundreds of threads sleeping
2267 * on IO we've got bigger problems than wait queue collision.
2268 * Limit the size of the wait table to a reasonable size.
2269 */
2270 size = min(size, 4096UL);
2271
2272 return max(size, 4UL);
2273}
cca448fe
YG
2274#else
2275/*
2276 * A zone's size might be changed by hot-add, so it is not possible to determine
2277 * a suitable size for its wait_table. So we use the maximum size now.
2278 *
2279 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
2280 *
2281 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
2282 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2283 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
2284 *
2285 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2286 * or more by the traditional way. (See above). It equals:
2287 *
2288 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
2289 * ia64(16K page size) : = ( 8G + 4M)byte.
2290 * powerpc (64K page size) : = (32G +16M)byte.
2291 */
2292static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2293{
2294 return 4096UL;
2295}
2296#endif
1da177e4
LT
2297
2298/*
2299 * This is an integer logarithm so that shifts can be used later
2300 * to extract the more random high bits from the multiplicative
2301 * hash function before the remainder is taken.
2302 */
2303static inline unsigned long wait_table_bits(unsigned long size)
2304{
2305 return ffz(~size);
2306}
2307
2308#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2309
1da177e4
LT
2310/*
2311 * Initially all pages are reserved - free ones are freed
2312 * up by free_all_bootmem() once the early boot process is
2313 * done. Non-atomic initialization, single-pass.
2314 */
c09b4240 2315void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 2316 unsigned long start_pfn, enum memmap_context context)
1da177e4 2317{
1da177e4 2318 struct page *page;
29751f69
AW
2319 unsigned long end_pfn = start_pfn + size;
2320 unsigned long pfn;
1da177e4 2321
cbe8dd4a 2322 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
2323 /*
2324 * There can be holes in boot-time mem_map[]s
2325 * handed to this function. They do not
2326 * exist on hotplugged memory.
2327 */
2328 if (context == MEMMAP_EARLY) {
2329 if (!early_pfn_valid(pfn))
2330 continue;
2331 if (!early_pfn_in_nid(pfn, nid))
2332 continue;
2333 }
d41dee36
AW
2334 page = pfn_to_page(pfn);
2335 set_page_links(page, zone, nid, pfn);
7835e98b 2336 init_page_count(page);
1da177e4
LT
2337 reset_page_mapcount(page);
2338 SetPageReserved(page);
b2a0ac88
MG
2339
2340 /*
2341 * Mark the block movable so that blocks are reserved for
2342 * movable at startup. This will force kernel allocations
2343 * to reserve their blocks rather than leaking throughout
2344 * the address space during boot when many long-lived
2345 * kernel allocations are made
2346 */
2347 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2348
1da177e4
LT
2349 INIT_LIST_HEAD(&page->lru);
2350#ifdef WANT_PAGE_VIRTUAL
2351 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
2352 if (!is_highmem_idx(zone))
3212c6be 2353 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 2354#endif
1da177e4
LT
2355 }
2356}
2357
6ea6e688
PM
2358static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
2359 struct zone *zone, unsigned long size)
1da177e4 2360{
b2a0ac88
MG
2361 int order, t;
2362 for_each_migratetype_order(order, t) {
2363 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
2364 zone->free_area[order].nr_free = 0;
2365 }
2366}
2367
2368#ifndef __HAVE_ARCH_MEMMAP_INIT
2369#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 2370 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
2371#endif
2372
d09c6b80 2373static int __devinit zone_batchsize(struct zone *zone)
e7c8d5c9
CL
2374{
2375 int batch;
2376
2377 /*
2378 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 2379 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
2380 *
2381 * OK, so we don't know how big the cache is. So guess.
2382 */
2383 batch = zone->present_pages / 1024;
ba56e91c
SR
2384 if (batch * PAGE_SIZE > 512 * 1024)
2385 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
2386 batch /= 4; /* We effectively *= 4 below */
2387 if (batch < 1)
2388 batch = 1;
2389
2390 /*
0ceaacc9
NP
2391 * Clamp the batch to a 2^n - 1 value. Having a power
2392 * of 2 value was found to be more likely to have
2393 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 2394 *
0ceaacc9
NP
2395 * For example if 2 tasks are alternately allocating
2396 * batches of pages, one task can end up with a lot
2397 * of pages of one half of the possible page colors
2398 * and the other with pages of the other colors.
e7c8d5c9 2399 */
0ceaacc9 2400 batch = (1 << (fls(batch + batch/2)-1)) - 1;
ba56e91c 2401
e7c8d5c9
CL
2402 return batch;
2403}
2404
2caaad41
CL
2405inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2406{
2407 struct per_cpu_pages *pcp;
2408
1c6fe946
MD
2409 memset(p, 0, sizeof(*p));
2410
2caaad41
CL
2411 pcp = &p->pcp[0]; /* hot */
2412 pcp->count = 0;
2caaad41
CL
2413 pcp->high = 6 * batch;
2414 pcp->batch = max(1UL, 1 * batch);
2415 INIT_LIST_HEAD(&pcp->list);
2416
2417 pcp = &p->pcp[1]; /* cold*/
2418 pcp->count = 0;
2caaad41 2419 pcp->high = 2 * batch;
e46a5e28 2420 pcp->batch = max(1UL, batch/2);
2caaad41
CL
2421 INIT_LIST_HEAD(&pcp->list);
2422}
2423
8ad4b1fb
RS
2424/*
2425 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2426 * to the value high for the pageset p.
2427 */
2428
2429static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2430 unsigned long high)
2431{
2432 struct per_cpu_pages *pcp;
2433
2434 pcp = &p->pcp[0]; /* hot list */
2435 pcp->high = high;
2436 pcp->batch = max(1UL, high/4);
2437 if ((high/4) > (PAGE_SHIFT * 8))
2438 pcp->batch = PAGE_SHIFT * 8;
2439}
2440
2441
e7c8d5c9
CL
2442#ifdef CONFIG_NUMA
2443/*
2caaad41
CL
2444 * Boot pageset table. One per cpu which is going to be used for all
2445 * zones and all nodes. The parameters will be set in such a way
2446 * that an item put on a list will immediately be handed over to
2447 * the buddy list. This is safe since pageset manipulation is done
2448 * with interrupts disabled.
2449 *
2450 * Some NUMA counter updates may also be caught by the boot pagesets.
b7c84c6a
CL
2451 *
2452 * The boot_pagesets must be kept even after bootup is complete for
2453 * unused processors and/or zones. They do play a role for bootstrapping
2454 * hotplugged processors.
2455 *
2456 * zoneinfo_show() and maybe other functions do
2457 * not check if the processor is online before following the pageset pointer.
2458 * Other parts of the kernel may not check if the zone is available.
2caaad41 2459 */
88a2a4ac 2460static struct per_cpu_pageset boot_pageset[NR_CPUS];
2caaad41
CL
2461
2462/*
2463 * Dynamically allocate memory for the
e7c8d5c9
CL
2464 * per cpu pageset array in struct zone.
2465 */
6292d9aa 2466static int __cpuinit process_zones(int cpu)
e7c8d5c9
CL
2467{
2468 struct zone *zone, *dzone;
37c0708d
CL
2469 int node = cpu_to_node(cpu);
2470
2471 node_set_state(node, N_CPU); /* this node has a cpu */
e7c8d5c9
CL
2472
2473 for_each_zone(zone) {
e7c8d5c9 2474
66a55030
CL
2475 if (!populated_zone(zone))
2476 continue;
2477
23316bc8 2478 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
37c0708d 2479 GFP_KERNEL, node);
23316bc8 2480 if (!zone_pcp(zone, cpu))
e7c8d5c9 2481 goto bad;
e7c8d5c9 2482
23316bc8 2483 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
8ad4b1fb
RS
2484
2485 if (percpu_pagelist_fraction)
2486 setup_pagelist_highmark(zone_pcp(zone, cpu),
2487 (zone->present_pages / percpu_pagelist_fraction));
e7c8d5c9
CL
2488 }
2489
2490 return 0;
2491bad:
2492 for_each_zone(dzone) {
64191688
AM
2493 if (!populated_zone(dzone))
2494 continue;
e7c8d5c9
CL
2495 if (dzone == zone)
2496 break;
23316bc8
NP
2497 kfree(zone_pcp(dzone, cpu));
2498 zone_pcp(dzone, cpu) = NULL;
e7c8d5c9
CL
2499 }
2500 return -ENOMEM;
2501}
2502
2503static inline void free_zone_pagesets(int cpu)
2504{
e7c8d5c9
CL
2505 struct zone *zone;
2506
2507 for_each_zone(zone) {
2508 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2509
f3ef9ead
DR
2510 /* Free per_cpu_pageset if it is slab allocated */
2511 if (pset != &boot_pageset[cpu])
2512 kfree(pset);
e7c8d5c9 2513 zone_pcp(zone, cpu) = NULL;
e7c8d5c9 2514 }
e7c8d5c9
CL
2515}
2516
9c7b216d 2517static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
e7c8d5c9
CL
2518 unsigned long action,
2519 void *hcpu)
2520{
2521 int cpu = (long)hcpu;
2522 int ret = NOTIFY_OK;
2523
2524 switch (action) {
ce421c79 2525 case CPU_UP_PREPARE:
8bb78442 2526 case CPU_UP_PREPARE_FROZEN:
ce421c79
AW
2527 if (process_zones(cpu))
2528 ret = NOTIFY_BAD;
2529 break;
2530 case CPU_UP_CANCELED:
8bb78442 2531 case CPU_UP_CANCELED_FROZEN:
ce421c79 2532 case CPU_DEAD:
8bb78442 2533 case CPU_DEAD_FROZEN:
ce421c79
AW
2534 free_zone_pagesets(cpu);
2535 break;
2536 default:
2537 break;
e7c8d5c9
CL
2538 }
2539 return ret;
2540}
2541
74b85f37 2542static struct notifier_block __cpuinitdata pageset_notifier =
e7c8d5c9
CL
2543 { &pageset_cpuup_callback, NULL, 0 };
2544
78d9955b 2545void __init setup_per_cpu_pageset(void)
e7c8d5c9
CL
2546{
2547 int err;
2548
2549 /* Initialize per_cpu_pageset for cpu 0.
2550 * A cpuup callback will do this for every cpu
2551 * as it comes online
2552 */
2553 err = process_zones(smp_processor_id());
2554 BUG_ON(err);
2555 register_cpu_notifier(&pageset_notifier);
2556}
2557
2558#endif
2559
577a32f6 2560static noinline __init_refok
cca448fe 2561int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
2562{
2563 int i;
2564 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 2565 size_t alloc_size;
ed8ece2e
DH
2566
2567 /*
2568 * The per-page waitqueue mechanism uses hashed waitqueues
2569 * per zone.
2570 */
02b694de
YG
2571 zone->wait_table_hash_nr_entries =
2572 wait_table_hash_nr_entries(zone_size_pages);
2573 zone->wait_table_bits =
2574 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
2575 alloc_size = zone->wait_table_hash_nr_entries
2576 * sizeof(wait_queue_head_t);
2577
2578 if (system_state == SYSTEM_BOOTING) {
2579 zone->wait_table = (wait_queue_head_t *)
2580 alloc_bootmem_node(pgdat, alloc_size);
2581 } else {
2582 /*
2583 * This case means that a zone whose size was 0 gets new memory
2584 * via memory hot-add.
2585 * But it may be the case that a new node was hot-added. In
2586 * this case vmalloc() will not be able to use this new node's
2587 * memory - this wait_table must be initialized to use this new
2588 * node itself as well.
2589 * To use this new node's memory, further consideration will be
2590 * necessary.
2591 */
8691f3a7 2592 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
2593 }
2594 if (!zone->wait_table)
2595 return -ENOMEM;
ed8ece2e 2596
02b694de 2597 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 2598 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
2599
2600 return 0;
ed8ece2e
DH
2601}
2602
c09b4240 2603static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e
DH
2604{
2605 int cpu;
2606 unsigned long batch = zone_batchsize(zone);
2607
2608 for (cpu = 0; cpu < NR_CPUS; cpu++) {
2609#ifdef CONFIG_NUMA
2610 /* Early boot. Slab allocator not functional yet */
23316bc8 2611 zone_pcp(zone, cpu) = &boot_pageset[cpu];
ed8ece2e
DH
2612 setup_pageset(&boot_pageset[cpu],0);
2613#else
2614 setup_pageset(zone_pcp(zone,cpu), batch);
2615#endif
2616 }
f5335c0f
AB
2617 if (zone->present_pages)
2618 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
2619 zone->name, zone->present_pages, batch);
ed8ece2e
DH
2620}
2621
718127cc
YG
2622__meminit int init_currently_empty_zone(struct zone *zone,
2623 unsigned long zone_start_pfn,
a2f3aa02
DH
2624 unsigned long size,
2625 enum memmap_context context)
ed8ece2e
DH
2626{
2627 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
2628 int ret;
2629 ret = zone_wait_table_init(zone, size);
2630 if (ret)
2631 return ret;
ed8ece2e
DH
2632 pgdat->nr_zones = zone_idx(zone) + 1;
2633
ed8ece2e
DH
2634 zone->zone_start_pfn = zone_start_pfn;
2635
2636 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2637
2638 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
718127cc
YG
2639
2640 return 0;
ed8ece2e
DH
2641}
2642
c713216d
MG
2643#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2644/*
2645 * Basic iterator support. Return the first range of PFNs for a node
2646 * Note: nid == MAX_NUMNODES returns first region regardless of node
2647 */
a3142c8e 2648static int __meminit first_active_region_index_in_nid(int nid)
c713216d
MG
2649{
2650 int i;
2651
2652 for (i = 0; i < nr_nodemap_entries; i++)
2653 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2654 return i;
2655
2656 return -1;
2657}
2658
2659/*
2660 * Basic iterator support. Return the next active range of PFNs for a node
2661 * Note: nid == MAX_NUMNODES returns next region regardles of node
2662 */
a3142c8e 2663static int __meminit next_active_region_index_in_nid(int index, int nid)
c713216d
MG
2664{
2665 for (index = index + 1; index < nr_nodemap_entries; index++)
2666 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2667 return index;
2668
2669 return -1;
2670}
2671
2672#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2673/*
2674 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2675 * Architectures may implement their own version but if add_active_range()
2676 * was used and there are no special requirements, this is a convenient
2677 * alternative
2678 */
6f076f5d 2679int __meminit early_pfn_to_nid(unsigned long pfn)
c713216d
MG
2680{
2681 int i;
2682
2683 for (i = 0; i < nr_nodemap_entries; i++) {
2684 unsigned long start_pfn = early_node_map[i].start_pfn;
2685 unsigned long end_pfn = early_node_map[i].end_pfn;
2686
2687 if (start_pfn <= pfn && pfn < end_pfn)
2688 return early_node_map[i].nid;
2689 }
2690
2691 return 0;
2692}
2693#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2694
2695/* Basic iterator support to walk early_node_map[] */
2696#define for_each_active_range_index_in_nid(i, nid) \
2697 for (i = first_active_region_index_in_nid(nid); i != -1; \
2698 i = next_active_region_index_in_nid(i, nid))
2699
2700/**
2701 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
2702 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2703 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
2704 *
2705 * If an architecture guarantees that all ranges registered with
2706 * add_active_ranges() contain no holes and may be freed, this
2707 * this function may be used instead of calling free_bootmem() manually.
2708 */
2709void __init free_bootmem_with_active_regions(int nid,
2710 unsigned long max_low_pfn)
2711{
2712 int i;
2713
2714 for_each_active_range_index_in_nid(i, nid) {
2715 unsigned long size_pages = 0;
2716 unsigned long end_pfn = early_node_map[i].end_pfn;
2717
2718 if (early_node_map[i].start_pfn >= max_low_pfn)
2719 continue;
2720
2721 if (end_pfn > max_low_pfn)
2722 end_pfn = max_low_pfn;
2723
2724 size_pages = end_pfn - early_node_map[i].start_pfn;
2725 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
2726 PFN_PHYS(early_node_map[i].start_pfn),
2727 size_pages << PAGE_SHIFT);
2728 }
2729}
2730
2731/**
2732 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 2733 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
2734 *
2735 * If an architecture guarantees that all ranges registered with
2736 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 2737 * function may be used instead of calling memory_present() manually.
c713216d
MG
2738 */
2739void __init sparse_memory_present_with_active_regions(int nid)
2740{
2741 int i;
2742
2743 for_each_active_range_index_in_nid(i, nid)
2744 memory_present(early_node_map[i].nid,
2745 early_node_map[i].start_pfn,
2746 early_node_map[i].end_pfn);
2747}
2748
fb01439c
MG
2749/**
2750 * push_node_boundaries - Push node boundaries to at least the requested boundary
2751 * @nid: The nid of the node to push the boundary for
2752 * @start_pfn: The start pfn of the node
2753 * @end_pfn: The end pfn of the node
2754 *
2755 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
2756 * time. Specifically, on x86_64, SRAT will report ranges that can potentially
2757 * be hotplugged even though no physical memory exists. This function allows
2758 * an arch to push out the node boundaries so mem_map is allocated that can
2759 * be used later.
2760 */
2761#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
2762void __init push_node_boundaries(unsigned int nid,
2763 unsigned long start_pfn, unsigned long end_pfn)
2764{
2765 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
2766 nid, start_pfn, end_pfn);
2767
2768 /* Initialise the boundary for this node if necessary */
2769 if (node_boundary_end_pfn[nid] == 0)
2770 node_boundary_start_pfn[nid] = -1UL;
2771
2772 /* Update the boundaries */
2773 if (node_boundary_start_pfn[nid] > start_pfn)
2774 node_boundary_start_pfn[nid] = start_pfn;
2775 if (node_boundary_end_pfn[nid] < end_pfn)
2776 node_boundary_end_pfn[nid] = end_pfn;
2777}
2778
2779/* If necessary, push the node boundary out for reserve hotadd */
98011f56 2780static void __meminit account_node_boundary(unsigned int nid,
fb01439c
MG
2781 unsigned long *start_pfn, unsigned long *end_pfn)
2782{
2783 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
2784 nid, *start_pfn, *end_pfn);
2785
2786 /* Return if boundary information has not been provided */
2787 if (node_boundary_end_pfn[nid] == 0)
2788 return;
2789
2790 /* Check the boundaries and update if necessary */
2791 if (node_boundary_start_pfn[nid] < *start_pfn)
2792 *start_pfn = node_boundary_start_pfn[nid];
2793 if (node_boundary_end_pfn[nid] > *end_pfn)
2794 *end_pfn = node_boundary_end_pfn[nid];
2795}
2796#else
2797void __init push_node_boundaries(unsigned int nid,
2798 unsigned long start_pfn, unsigned long end_pfn) {}
2799
98011f56 2800static void __meminit account_node_boundary(unsigned int nid,
fb01439c
MG
2801 unsigned long *start_pfn, unsigned long *end_pfn) {}
2802#endif
2803
2804
c713216d
MG
2805/**
2806 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
2807 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
2808 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
2809 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
2810 *
2811 * It returns the start and end page frame of a node based on information
2812 * provided by an arch calling add_active_range(). If called for a node
2813 * with no available memory, a warning is printed and the start and end
88ca3b94 2814 * PFNs will be 0.
c713216d 2815 */
a3142c8e 2816void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
2817 unsigned long *start_pfn, unsigned long *end_pfn)
2818{
2819 int i;
2820 *start_pfn = -1UL;
2821 *end_pfn = 0;
2822
2823 for_each_active_range_index_in_nid(i, nid) {
2824 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
2825 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
2826 }
2827
633c0666 2828 if (*start_pfn == -1UL)
c713216d 2829 *start_pfn = 0;
fb01439c
MG
2830
2831 /* Push the node boundaries out if requested */
2832 account_node_boundary(nid, start_pfn, end_pfn);
c713216d
MG
2833}
2834
2a1e274a
MG
2835/*
2836 * This finds a zone that can be used for ZONE_MOVABLE pages. The
2837 * assumption is made that zones within a node are ordered in monotonic
2838 * increasing memory addresses so that the "highest" populated zone is used
2839 */
2840void __init find_usable_zone_for_movable(void)
2841{
2842 int zone_index;
2843 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
2844 if (zone_index == ZONE_MOVABLE)
2845 continue;
2846
2847 if (arch_zone_highest_possible_pfn[zone_index] >
2848 arch_zone_lowest_possible_pfn[zone_index])
2849 break;
2850 }
2851
2852 VM_BUG_ON(zone_index == -1);
2853 movable_zone = zone_index;
2854}
2855
2856/*
2857 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
2858 * because it is sized independant of architecture. Unlike the other zones,
2859 * the starting point for ZONE_MOVABLE is not fixed. It may be different
2860 * in each node depending on the size of each node and how evenly kernelcore
2861 * is distributed. This helper function adjusts the zone ranges
2862 * provided by the architecture for a given node by using the end of the
2863 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
2864 * zones within a node are in order of monotonic increases memory addresses
2865 */
2866void __meminit adjust_zone_range_for_zone_movable(int nid,
2867 unsigned long zone_type,
2868 unsigned long node_start_pfn,
2869 unsigned long node_end_pfn,
2870 unsigned long *zone_start_pfn,
2871 unsigned long *zone_end_pfn)
2872{
2873 /* Only adjust if ZONE_MOVABLE is on this node */
2874 if (zone_movable_pfn[nid]) {
2875 /* Size ZONE_MOVABLE */
2876 if (zone_type == ZONE_MOVABLE) {
2877 *zone_start_pfn = zone_movable_pfn[nid];
2878 *zone_end_pfn = min(node_end_pfn,
2879 arch_zone_highest_possible_pfn[movable_zone]);
2880
2881 /* Adjust for ZONE_MOVABLE starting within this range */
2882 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
2883 *zone_end_pfn > zone_movable_pfn[nid]) {
2884 *zone_end_pfn = zone_movable_pfn[nid];
2885
2886 /* Check if this whole range is within ZONE_MOVABLE */
2887 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
2888 *zone_start_pfn = *zone_end_pfn;
2889 }
2890}
2891
c713216d
MG
2892/*
2893 * Return the number of pages a zone spans in a node, including holes
2894 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
2895 */
6ea6e688 2896static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
2897 unsigned long zone_type,
2898 unsigned long *ignored)
2899{
2900 unsigned long node_start_pfn, node_end_pfn;
2901 unsigned long zone_start_pfn, zone_end_pfn;
2902
2903 /* Get the start and end of the node and zone */
2904 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
2905 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
2906 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
2907 adjust_zone_range_for_zone_movable(nid, zone_type,
2908 node_start_pfn, node_end_pfn,
2909 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
2910
2911 /* Check that this node has pages within the zone's required range */
2912 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
2913 return 0;
2914
2915 /* Move the zone boundaries inside the node if necessary */
2916 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
2917 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
2918
2919 /* Return the spanned pages */
2920 return zone_end_pfn - zone_start_pfn;
2921}
2922
2923/*
2924 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 2925 * then all holes in the requested range will be accounted for.
c713216d 2926 */
a3142c8e 2927unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
2928 unsigned long range_start_pfn,
2929 unsigned long range_end_pfn)
2930{
2931 int i = 0;
2932 unsigned long prev_end_pfn = 0, hole_pages = 0;
2933 unsigned long start_pfn;
2934
2935 /* Find the end_pfn of the first active range of pfns in the node */
2936 i = first_active_region_index_in_nid(nid);
2937 if (i == -1)
2938 return 0;
2939
b5445f95
MG
2940 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
2941
9c7cd687
MG
2942 /* Account for ranges before physical memory on this node */
2943 if (early_node_map[i].start_pfn > range_start_pfn)
b5445f95 2944 hole_pages = prev_end_pfn - range_start_pfn;
c713216d
MG
2945
2946 /* Find all holes for the zone within the node */
2947 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
2948
2949 /* No need to continue if prev_end_pfn is outside the zone */
2950 if (prev_end_pfn >= range_end_pfn)
2951 break;
2952
2953 /* Make sure the end of the zone is not within the hole */
2954 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
2955 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
2956
2957 /* Update the hole size cound and move on */
2958 if (start_pfn > range_start_pfn) {
2959 BUG_ON(prev_end_pfn > start_pfn);
2960 hole_pages += start_pfn - prev_end_pfn;
2961 }
2962 prev_end_pfn = early_node_map[i].end_pfn;
2963 }
2964
9c7cd687
MG
2965 /* Account for ranges past physical memory on this node */
2966 if (range_end_pfn > prev_end_pfn)
0c6cb974 2967 hole_pages += range_end_pfn -
9c7cd687
MG
2968 max(range_start_pfn, prev_end_pfn);
2969
c713216d
MG
2970 return hole_pages;
2971}
2972
2973/**
2974 * absent_pages_in_range - Return number of page frames in holes within a range
2975 * @start_pfn: The start PFN to start searching for holes
2976 * @end_pfn: The end PFN to stop searching for holes
2977 *
88ca3b94 2978 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
2979 */
2980unsigned long __init absent_pages_in_range(unsigned long start_pfn,
2981 unsigned long end_pfn)
2982{
2983 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
2984}
2985
2986/* Return the number of page frames in holes in a zone on a node */
6ea6e688 2987static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
2988 unsigned long zone_type,
2989 unsigned long *ignored)
2990{
9c7cd687
MG
2991 unsigned long node_start_pfn, node_end_pfn;
2992 unsigned long zone_start_pfn, zone_end_pfn;
2993
2994 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
2995 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
2996 node_start_pfn);
2997 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
2998 node_end_pfn);
2999
2a1e274a
MG
3000 adjust_zone_range_for_zone_movable(nid, zone_type,
3001 node_start_pfn, node_end_pfn,
3002 &zone_start_pfn, &zone_end_pfn);
9c7cd687 3003 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 3004}
0e0b864e 3005
c713216d 3006#else
6ea6e688 3007static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3008 unsigned long zone_type,
3009 unsigned long *zones_size)
3010{
3011 return zones_size[zone_type];
3012}
3013
6ea6e688 3014static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3015 unsigned long zone_type,
3016 unsigned long *zholes_size)
3017{
3018 if (!zholes_size)
3019 return 0;
3020
3021 return zholes_size[zone_type];
3022}
0e0b864e 3023
c713216d
MG
3024#endif
3025
a3142c8e 3026static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
3027 unsigned long *zones_size, unsigned long *zholes_size)
3028{
3029 unsigned long realtotalpages, totalpages = 0;
3030 enum zone_type i;
3031
3032 for (i = 0; i < MAX_NR_ZONES; i++)
3033 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3034 zones_size);
3035 pgdat->node_spanned_pages = totalpages;
3036
3037 realtotalpages = totalpages;
3038 for (i = 0; i < MAX_NR_ZONES; i++)
3039 realtotalpages -=
3040 zone_absent_pages_in_node(pgdat->node_id, i,
3041 zholes_size);
3042 pgdat->node_present_pages = realtotalpages;
3043 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3044 realtotalpages);
3045}
3046
835c134e
MG
3047#ifndef CONFIG_SPARSEMEM
3048/*
3049 * Calculate the size of the zone->blockflags rounded to an unsigned long
3050 * Start by making sure zonesize is a multiple of MAX_ORDER-1 by rounding up
3051 * Then figure 1 NR_PAGEBLOCK_BITS worth of bits per MAX_ORDER-1, finally
3052 * round what is now in bits to nearest long in bits, then return it in
3053 * bytes.
3054 */
3055static unsigned long __init usemap_size(unsigned long zonesize)
3056{
3057 unsigned long usemapsize;
3058
3059 usemapsize = roundup(zonesize, MAX_ORDER_NR_PAGES);
3060 usemapsize = usemapsize >> (MAX_ORDER-1);
3061 usemapsize *= NR_PAGEBLOCK_BITS;
3062 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3063
3064 return usemapsize / 8;
3065}
3066
3067static void __init setup_usemap(struct pglist_data *pgdat,
3068 struct zone *zone, unsigned long zonesize)
3069{
3070 unsigned long usemapsize = usemap_size(zonesize);
3071 zone->pageblock_flags = NULL;
3072 if (usemapsize) {
3073 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3074 memset(zone->pageblock_flags, 0, usemapsize);
3075 }
3076}
3077#else
3078static void inline setup_usemap(struct pglist_data *pgdat,
3079 struct zone *zone, unsigned long zonesize) {}
3080#endif /* CONFIG_SPARSEMEM */
3081
1da177e4
LT
3082/*
3083 * Set up the zone data structures:
3084 * - mark all pages reserved
3085 * - mark all memory queues empty
3086 * - clear the memory bitmaps
3087 */
86356ab1 3088static void __meminit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
3089 unsigned long *zones_size, unsigned long *zholes_size)
3090{
2f1b6248 3091 enum zone_type j;
ed8ece2e 3092 int nid = pgdat->node_id;
1da177e4 3093 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 3094 int ret;
1da177e4 3095
208d54e5 3096 pgdat_resize_init(pgdat);
1da177e4
LT
3097 pgdat->nr_zones = 0;
3098 init_waitqueue_head(&pgdat->kswapd_wait);
3099 pgdat->kswapd_max_order = 0;
3100
3101 for (j = 0; j < MAX_NR_ZONES; j++) {
3102 struct zone *zone = pgdat->node_zones + j;
0e0b864e 3103 unsigned long size, realsize, memmap_pages;
1da177e4 3104
c713216d
MG
3105 size = zone_spanned_pages_in_node(nid, j, zones_size);
3106 realsize = size - zone_absent_pages_in_node(nid, j,
3107 zholes_size);
1da177e4 3108
0e0b864e
MG
3109 /*
3110 * Adjust realsize so that it accounts for how much memory
3111 * is used by this zone for memmap. This affects the watermark
3112 * and per-cpu initialisations
3113 */
3114 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
3115 if (realsize >= memmap_pages) {
3116 realsize -= memmap_pages;
3117 printk(KERN_DEBUG
3118 " %s zone: %lu pages used for memmap\n",
3119 zone_names[j], memmap_pages);
3120 } else
3121 printk(KERN_WARNING
3122 " %s zone: %lu pages exceeds realsize %lu\n",
3123 zone_names[j], memmap_pages, realsize);
3124
6267276f
CL
3125 /* Account for reserved pages */
3126 if (j == 0 && realsize > dma_reserve) {
0e0b864e 3127 realsize -= dma_reserve;
6267276f
CL
3128 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
3129 zone_names[0], dma_reserve);
0e0b864e
MG
3130 }
3131
98d2b0eb 3132 if (!is_highmem_idx(j))
1da177e4
LT
3133 nr_kernel_pages += realsize;
3134 nr_all_pages += realsize;
3135
3136 zone->spanned_pages = size;
3137 zone->present_pages = realsize;
9614634f 3138#ifdef CONFIG_NUMA
d5f541ed 3139 zone->node = nid;
8417bba4 3140 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 3141 / 100;
0ff38490 3142 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 3143#endif
1da177e4
LT
3144 zone->name = zone_names[j];
3145 spin_lock_init(&zone->lock);
3146 spin_lock_init(&zone->lru_lock);
bdc8cb98 3147 zone_seqlock_init(zone);
1da177e4 3148 zone->zone_pgdat = pgdat;
1da177e4 3149
3bb1a852 3150 zone->prev_priority = DEF_PRIORITY;
1da177e4 3151
ed8ece2e 3152 zone_pcp_init(zone);
1da177e4
LT
3153 INIT_LIST_HEAD(&zone->active_list);
3154 INIT_LIST_HEAD(&zone->inactive_list);
3155 zone->nr_scan_active = 0;
3156 zone->nr_scan_inactive = 0;
2244b95a 3157 zap_zone_vm_stats(zone);
53e9a615 3158 atomic_set(&zone->reclaim_in_progress, 0);
1da177e4
LT
3159 if (!size)
3160 continue;
3161
835c134e 3162 setup_usemap(pgdat, zone, size);
a2f3aa02
DH
3163 ret = init_currently_empty_zone(zone, zone_start_pfn,
3164 size, MEMMAP_EARLY);
718127cc 3165 BUG_ON(ret);
1da177e4 3166 zone_start_pfn += size;
1da177e4
LT
3167 }
3168}
3169
577a32f6 3170static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 3171{
1da177e4
LT
3172 /* Skip empty nodes */
3173 if (!pgdat->node_spanned_pages)
3174 return;
3175
d41dee36 3176#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
3177 /* ia64 gets its own node_mem_map, before this, without bootmem */
3178 if (!pgdat->node_mem_map) {
e984bb43 3179 unsigned long size, start, end;
d41dee36
AW
3180 struct page *map;
3181
e984bb43
BP
3182 /*
3183 * The zone's endpoints aren't required to be MAX_ORDER
3184 * aligned but the node_mem_map endpoints must be in order
3185 * for the buddy allocator to function correctly.
3186 */
3187 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3188 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3189 end = ALIGN(end, MAX_ORDER_NR_PAGES);
3190 size = (end - start) * sizeof(struct page);
6f167ec7
DH
3191 map = alloc_remap(pgdat->node_id, size);
3192 if (!map)
3193 map = alloc_bootmem_node(pgdat, size);
e984bb43 3194 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 3195 }
12d810c1 3196#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
3197 /*
3198 * With no DISCONTIG, the global mem_map is just set as node 0's
3199 */
c713216d 3200 if (pgdat == NODE_DATA(0)) {
1da177e4 3201 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
3202#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3203 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3204 mem_map -= pgdat->node_start_pfn;
3205#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3206 }
1da177e4 3207#endif
d41dee36 3208#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
3209}
3210
86356ab1 3211void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
1da177e4
LT
3212 unsigned long *zones_size, unsigned long node_start_pfn,
3213 unsigned long *zholes_size)
3214{
3215 pgdat->node_id = nid;
3216 pgdat->node_start_pfn = node_start_pfn;
c713216d 3217 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
3218
3219 alloc_node_mem_map(pgdat);
3220
3221 free_area_init_core(pgdat, zones_size, zholes_size);
3222}
3223
c713216d 3224#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
418508c1
MS
3225
3226#if MAX_NUMNODES > 1
3227/*
3228 * Figure out the number of possible node ids.
3229 */
3230static void __init setup_nr_node_ids(void)
3231{
3232 unsigned int node;
3233 unsigned int highest = 0;
3234
3235 for_each_node_mask(node, node_possible_map)
3236 highest = node;
3237 nr_node_ids = highest + 1;
3238}
3239#else
3240static inline void setup_nr_node_ids(void)
3241{
3242}
3243#endif
3244
c713216d
MG
3245/**
3246 * add_active_range - Register a range of PFNs backed by physical memory
3247 * @nid: The node ID the range resides on
3248 * @start_pfn: The start PFN of the available physical memory
3249 * @end_pfn: The end PFN of the available physical memory
3250 *
3251 * These ranges are stored in an early_node_map[] and later used by
3252 * free_area_init_nodes() to calculate zone sizes and holes. If the
3253 * range spans a memory hole, it is up to the architecture to ensure
3254 * the memory is not freed by the bootmem allocator. If possible
3255 * the range being registered will be merged with existing ranges.
3256 */
3257void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3258 unsigned long end_pfn)
3259{
3260 int i;
3261
3262 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
3263 "%d entries of %d used\n",
3264 nid, start_pfn, end_pfn,
3265 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3266
3267 /* Merge with existing active regions if possible */
3268 for (i = 0; i < nr_nodemap_entries; i++) {
3269 if (early_node_map[i].nid != nid)
3270 continue;
3271
3272 /* Skip if an existing region covers this new one */
3273 if (start_pfn >= early_node_map[i].start_pfn &&
3274 end_pfn <= early_node_map[i].end_pfn)
3275 return;
3276
3277 /* Merge forward if suitable */
3278 if (start_pfn <= early_node_map[i].end_pfn &&
3279 end_pfn > early_node_map[i].end_pfn) {
3280 early_node_map[i].end_pfn = end_pfn;
3281 return;
3282 }
3283
3284 /* Merge backward if suitable */
3285 if (start_pfn < early_node_map[i].end_pfn &&
3286 end_pfn >= early_node_map[i].start_pfn) {
3287 early_node_map[i].start_pfn = start_pfn;
3288 return;
3289 }
3290 }
3291
3292 /* Check that early_node_map is large enough */
3293 if (i >= MAX_ACTIVE_REGIONS) {
3294 printk(KERN_CRIT "More than %d memory regions, truncating\n",
3295 MAX_ACTIVE_REGIONS);
3296 return;
3297 }
3298
3299 early_node_map[i].nid = nid;
3300 early_node_map[i].start_pfn = start_pfn;
3301 early_node_map[i].end_pfn = end_pfn;
3302 nr_nodemap_entries = i + 1;
3303}
3304
3305/**
3306 * shrink_active_range - Shrink an existing registered range of PFNs
3307 * @nid: The node id the range is on that should be shrunk
3308 * @old_end_pfn: The old end PFN of the range
3309 * @new_end_pfn: The new PFN of the range
3310 *
3311 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3312 * The map is kept at the end physical page range that has already been
3313 * registered with add_active_range(). This function allows an arch to shrink
3314 * an existing registered range.
3315 */
3316void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
3317 unsigned long new_end_pfn)
3318{
3319 int i;
3320
3321 /* Find the old active region end and shrink */
3322 for_each_active_range_index_in_nid(i, nid)
3323 if (early_node_map[i].end_pfn == old_end_pfn) {
3324 early_node_map[i].end_pfn = new_end_pfn;
3325 break;
3326 }
3327}
3328
3329/**
3330 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 3331 *
c713216d
MG
3332 * During discovery, it may be found that a table like SRAT is invalid
3333 * and an alternative discovery method must be used. This function removes
3334 * all currently registered regions.
3335 */
88ca3b94 3336void __init remove_all_active_ranges(void)
c713216d
MG
3337{
3338 memset(early_node_map, 0, sizeof(early_node_map));
3339 nr_nodemap_entries = 0;
fb01439c
MG
3340#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3341 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3342 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3343#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
c713216d
MG
3344}
3345
3346/* Compare two active node_active_regions */
3347static int __init cmp_node_active_region(const void *a, const void *b)
3348{
3349 struct node_active_region *arange = (struct node_active_region *)a;
3350 struct node_active_region *brange = (struct node_active_region *)b;
3351
3352 /* Done this way to avoid overflows */
3353 if (arange->start_pfn > brange->start_pfn)
3354 return 1;
3355 if (arange->start_pfn < brange->start_pfn)
3356 return -1;
3357
3358 return 0;
3359}
3360
3361/* sort the node_map by start_pfn */
3362static void __init sort_node_map(void)
3363{
3364 sort(early_node_map, (size_t)nr_nodemap_entries,
3365 sizeof(struct node_active_region),
3366 cmp_node_active_region, NULL);
3367}
3368
a6af2bc3 3369/* Find the lowest pfn for a node */
c713216d
MG
3370unsigned long __init find_min_pfn_for_node(unsigned long nid)
3371{
3372 int i;
a6af2bc3 3373 unsigned long min_pfn = ULONG_MAX;
1abbfb41 3374
c713216d
MG
3375 /* Assuming a sorted map, the first range found has the starting pfn */
3376 for_each_active_range_index_in_nid(i, nid)
a6af2bc3 3377 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
c713216d 3378
a6af2bc3
MG
3379 if (min_pfn == ULONG_MAX) {
3380 printk(KERN_WARNING
3381 "Could not find start_pfn for node %lu\n", nid);
3382 return 0;
3383 }
3384
3385 return min_pfn;
c713216d
MG
3386}
3387
3388/**
3389 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3390 *
3391 * It returns the minimum PFN based on information provided via
88ca3b94 3392 * add_active_range().
c713216d
MG
3393 */
3394unsigned long __init find_min_pfn_with_active_regions(void)
3395{
3396 return find_min_pfn_for_node(MAX_NUMNODES);
3397}
3398
3399/**
3400 * find_max_pfn_with_active_regions - Find the maximum PFN registered
3401 *
3402 * It returns the maximum PFN based on information provided via
88ca3b94 3403 * add_active_range().
c713216d
MG
3404 */
3405unsigned long __init find_max_pfn_with_active_regions(void)
3406{
3407 int i;
3408 unsigned long max_pfn = 0;
3409
3410 for (i = 0; i < nr_nodemap_entries; i++)
3411 max_pfn = max(max_pfn, early_node_map[i].end_pfn);
3412
3413 return max_pfn;
3414}
3415
37b07e41
LS
3416/*
3417 * early_calculate_totalpages()
3418 * Sum pages in active regions for movable zone.
3419 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3420 */
7e63efef
MG
3421unsigned long __init early_calculate_totalpages(void)
3422{
3423 int i;
3424 unsigned long totalpages = 0;
3425
37b07e41
LS
3426 for (i = 0; i < nr_nodemap_entries; i++) {
3427 unsigned long pages = early_node_map[i].end_pfn -
7e63efef 3428 early_node_map[i].start_pfn;
37b07e41
LS
3429 totalpages += pages;
3430 if (pages)
3431 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3432 }
3433 return totalpages;
7e63efef
MG
3434}
3435
2a1e274a
MG
3436/*
3437 * Find the PFN the Movable zone begins in each node. Kernel memory
3438 * is spread evenly between nodes as long as the nodes have enough
3439 * memory. When they don't, some nodes will have more kernelcore than
3440 * others
3441 */
3442void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3443{
3444 int i, nid;
3445 unsigned long usable_startpfn;
3446 unsigned long kernelcore_node, kernelcore_remaining;
37b07e41
LS
3447 unsigned long totalpages = early_calculate_totalpages();
3448 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
2a1e274a 3449
7e63efef
MG
3450 /*
3451 * If movablecore was specified, calculate what size of
3452 * kernelcore that corresponds so that memory usable for
3453 * any allocation type is evenly spread. If both kernelcore
3454 * and movablecore are specified, then the value of kernelcore
3455 * will be used for required_kernelcore if it's greater than
3456 * what movablecore would have allowed.
3457 */
3458 if (required_movablecore) {
7e63efef
MG
3459 unsigned long corepages;
3460
3461 /*
3462 * Round-up so that ZONE_MOVABLE is at least as large as what
3463 * was requested by the user
3464 */
3465 required_movablecore =
3466 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3467 corepages = totalpages - required_movablecore;
3468
3469 required_kernelcore = max(required_kernelcore, corepages);
3470 }
3471
2a1e274a
MG
3472 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
3473 if (!required_kernelcore)
3474 return;
3475
3476 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3477 find_usable_zone_for_movable();
3478 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3479
3480restart:
3481 /* Spread kernelcore memory as evenly as possible throughout nodes */
3482 kernelcore_node = required_kernelcore / usable_nodes;
37b07e41 3483 for_each_node_state(nid, N_HIGH_MEMORY) {
2a1e274a
MG
3484 /*
3485 * Recalculate kernelcore_node if the division per node
3486 * now exceeds what is necessary to satisfy the requested
3487 * amount of memory for the kernel
3488 */
3489 if (required_kernelcore < kernelcore_node)
3490 kernelcore_node = required_kernelcore / usable_nodes;
3491
3492 /*
3493 * As the map is walked, we track how much memory is usable
3494 * by the kernel using kernelcore_remaining. When it is
3495 * 0, the rest of the node is usable by ZONE_MOVABLE
3496 */
3497 kernelcore_remaining = kernelcore_node;
3498
3499 /* Go through each range of PFNs within this node */
3500 for_each_active_range_index_in_nid(i, nid) {
3501 unsigned long start_pfn, end_pfn;
3502 unsigned long size_pages;
3503
3504 start_pfn = max(early_node_map[i].start_pfn,
3505 zone_movable_pfn[nid]);
3506 end_pfn = early_node_map[i].end_pfn;
3507 if (start_pfn >= end_pfn)
3508 continue;
3509
3510 /* Account for what is only usable for kernelcore */
3511 if (start_pfn < usable_startpfn) {
3512 unsigned long kernel_pages;
3513 kernel_pages = min(end_pfn, usable_startpfn)
3514 - start_pfn;
3515
3516 kernelcore_remaining -= min(kernel_pages,
3517 kernelcore_remaining);
3518 required_kernelcore -= min(kernel_pages,
3519 required_kernelcore);
3520
3521 /* Continue if range is now fully accounted */
3522 if (end_pfn <= usable_startpfn) {
3523
3524 /*
3525 * Push zone_movable_pfn to the end so
3526 * that if we have to rebalance
3527 * kernelcore across nodes, we will
3528 * not double account here
3529 */
3530 zone_movable_pfn[nid] = end_pfn;
3531 continue;
3532 }
3533 start_pfn = usable_startpfn;
3534 }
3535
3536 /*
3537 * The usable PFN range for ZONE_MOVABLE is from
3538 * start_pfn->end_pfn. Calculate size_pages as the
3539 * number of pages used as kernelcore
3540 */
3541 size_pages = end_pfn - start_pfn;
3542 if (size_pages > kernelcore_remaining)
3543 size_pages = kernelcore_remaining;
3544 zone_movable_pfn[nid] = start_pfn + size_pages;
3545
3546 /*
3547 * Some kernelcore has been met, update counts and
3548 * break if the kernelcore for this node has been
3549 * satisified
3550 */
3551 required_kernelcore -= min(required_kernelcore,
3552 size_pages);
3553 kernelcore_remaining -= size_pages;
3554 if (!kernelcore_remaining)
3555 break;
3556 }
3557 }
3558
3559 /*
3560 * If there is still required_kernelcore, we do another pass with one
3561 * less node in the count. This will push zone_movable_pfn[nid] further
3562 * along on the nodes that still have memory until kernelcore is
3563 * satisified
3564 */
3565 usable_nodes--;
3566 if (usable_nodes && required_kernelcore > usable_nodes)
3567 goto restart;
3568
3569 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3570 for (nid = 0; nid < MAX_NUMNODES; nid++)
3571 zone_movable_pfn[nid] =
3572 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3573}
3574
37b07e41
LS
3575/* Any regular memory on that node ? */
3576static void check_for_regular_memory(pg_data_t *pgdat)
3577{
3578#ifdef CONFIG_HIGHMEM
3579 enum zone_type zone_type;
3580
3581 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3582 struct zone *zone = &pgdat->node_zones[zone_type];
3583 if (zone->present_pages)
3584 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3585 }
3586#endif
3587}
3588
c713216d
MG
3589/**
3590 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 3591 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
3592 *
3593 * This will call free_area_init_node() for each active node in the system.
3594 * Using the page ranges provided by add_active_range(), the size of each
3595 * zone in each node and their holes is calculated. If the maximum PFN
3596 * between two adjacent zones match, it is assumed that the zone is empty.
3597 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3598 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3599 * starts where the previous one ended. For example, ZONE_DMA32 starts
3600 * at arch_max_dma_pfn.
3601 */
3602void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3603{
3604 unsigned long nid;
3605 enum zone_type i;
3606
a6af2bc3
MG
3607 /* Sort early_node_map as initialisation assumes it is sorted */
3608 sort_node_map();
3609
c713216d
MG
3610 /* Record where the zone boundaries are */
3611 memset(arch_zone_lowest_possible_pfn, 0,
3612 sizeof(arch_zone_lowest_possible_pfn));
3613 memset(arch_zone_highest_possible_pfn, 0,
3614 sizeof(arch_zone_highest_possible_pfn));
3615 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3616 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3617 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
3618 if (i == ZONE_MOVABLE)
3619 continue;
c713216d
MG
3620 arch_zone_lowest_possible_pfn[i] =
3621 arch_zone_highest_possible_pfn[i-1];
3622 arch_zone_highest_possible_pfn[i] =
3623 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3624 }
2a1e274a
MG
3625 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3626 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3627
3628 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
3629 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
3630 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
c713216d 3631
c713216d
MG
3632 /* Print out the zone ranges */
3633 printk("Zone PFN ranges:\n");
2a1e274a
MG
3634 for (i = 0; i < MAX_NR_ZONES; i++) {
3635 if (i == ZONE_MOVABLE)
3636 continue;
c713216d
MG
3637 printk(" %-8s %8lu -> %8lu\n",
3638 zone_names[i],
3639 arch_zone_lowest_possible_pfn[i],
3640 arch_zone_highest_possible_pfn[i]);
2a1e274a
MG
3641 }
3642
3643 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
3644 printk("Movable zone start PFN for each node\n");
3645 for (i = 0; i < MAX_NUMNODES; i++) {
3646 if (zone_movable_pfn[i])
3647 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
3648 }
c713216d
MG
3649
3650 /* Print out the early_node_map[] */
3651 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3652 for (i = 0; i < nr_nodemap_entries; i++)
3653 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid,
3654 early_node_map[i].start_pfn,
3655 early_node_map[i].end_pfn);
3656
3657 /* Initialise every node */
8ef82866 3658 setup_nr_node_ids();
c713216d
MG
3659 for_each_online_node(nid) {
3660 pg_data_t *pgdat = NODE_DATA(nid);
3661 free_area_init_node(nid, pgdat, NULL,
3662 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
3663
3664 /* Any memory on that node */
3665 if (pgdat->node_present_pages)
3666 node_set_state(nid, N_HIGH_MEMORY);
3667 check_for_regular_memory(pgdat);
c713216d
MG
3668 }
3669}
2a1e274a 3670
7e63efef 3671static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
3672{
3673 unsigned long long coremem;
3674 if (!p)
3675 return -EINVAL;
3676
3677 coremem = memparse(p, &p);
7e63efef 3678 *core = coremem >> PAGE_SHIFT;
2a1e274a 3679
7e63efef 3680 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
3681 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
3682
3683 return 0;
3684}
ed7ed365 3685
7e63efef
MG
3686/*
3687 * kernelcore=size sets the amount of memory for use for allocations that
3688 * cannot be reclaimed or migrated.
3689 */
3690static int __init cmdline_parse_kernelcore(char *p)
3691{
3692 return cmdline_parse_core(p, &required_kernelcore);
3693}
3694
3695/*
3696 * movablecore=size sets the amount of memory for use for allocations that
3697 * can be reclaimed or migrated.
3698 */
3699static int __init cmdline_parse_movablecore(char *p)
3700{
3701 return cmdline_parse_core(p, &required_movablecore);
3702}
3703
ed7ed365 3704early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 3705early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 3706
c713216d
MG
3707#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3708
0e0b864e 3709/**
88ca3b94
RD
3710 * set_dma_reserve - set the specified number of pages reserved in the first zone
3711 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
3712 *
3713 * The per-cpu batchsize and zone watermarks are determined by present_pages.
3714 * In the DMA zone, a significant percentage may be consumed by kernel image
3715 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
3716 * function may optionally be used to account for unfreeable pages in the
3717 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
3718 * smaller per-cpu batchsize.
0e0b864e
MG
3719 */
3720void __init set_dma_reserve(unsigned long new_dma_reserve)
3721{
3722 dma_reserve = new_dma_reserve;
3723}
3724
93b7504e 3725#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
3726static bootmem_data_t contig_bootmem_data;
3727struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
3728
3729EXPORT_SYMBOL(contig_page_data);
93b7504e 3730#endif
1da177e4
LT
3731
3732void __init free_area_init(unsigned long *zones_size)
3733{
93b7504e 3734 free_area_init_node(0, NODE_DATA(0), zones_size,
1da177e4
LT
3735 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
3736}
1da177e4 3737
1da177e4
LT
3738static int page_alloc_cpu_notify(struct notifier_block *self,
3739 unsigned long action, void *hcpu)
3740{
3741 int cpu = (unsigned long)hcpu;
1da177e4 3742
8bb78442 3743 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1da177e4
LT
3744 local_irq_disable();
3745 __drain_pages(cpu);
f8891e5e 3746 vm_events_fold_cpu(cpu);
1da177e4 3747 local_irq_enable();
2244b95a 3748 refresh_cpu_vm_stats(cpu);
1da177e4
LT
3749 }
3750 return NOTIFY_OK;
3751}
1da177e4
LT
3752
3753void __init page_alloc_init(void)
3754{
3755 hotcpu_notifier(page_alloc_cpu_notify, 0);
3756}
3757
cb45b0e9
HA
3758/*
3759 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
3760 * or min_free_kbytes changes.
3761 */
3762static void calculate_totalreserve_pages(void)
3763{
3764 struct pglist_data *pgdat;
3765 unsigned long reserve_pages = 0;
2f6726e5 3766 enum zone_type i, j;
cb45b0e9
HA
3767
3768 for_each_online_pgdat(pgdat) {
3769 for (i = 0; i < MAX_NR_ZONES; i++) {
3770 struct zone *zone = pgdat->node_zones + i;
3771 unsigned long max = 0;
3772
3773 /* Find valid and maximum lowmem_reserve in the zone */
3774 for (j = i; j < MAX_NR_ZONES; j++) {
3775 if (zone->lowmem_reserve[j] > max)
3776 max = zone->lowmem_reserve[j];
3777 }
3778
3779 /* we treat pages_high as reserved pages. */
3780 max += zone->pages_high;
3781
3782 if (max > zone->present_pages)
3783 max = zone->present_pages;
3784 reserve_pages += max;
3785 }
3786 }
3787 totalreserve_pages = reserve_pages;
3788}
3789
1da177e4
LT
3790/*
3791 * setup_per_zone_lowmem_reserve - called whenever
3792 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
3793 * has a correct pages reserved value, so an adequate number of
3794 * pages are left in the zone after a successful __alloc_pages().
3795 */
3796static void setup_per_zone_lowmem_reserve(void)
3797{
3798 struct pglist_data *pgdat;
2f6726e5 3799 enum zone_type j, idx;
1da177e4 3800
ec936fc5 3801 for_each_online_pgdat(pgdat) {
1da177e4
LT
3802 for (j = 0; j < MAX_NR_ZONES; j++) {
3803 struct zone *zone = pgdat->node_zones + j;
3804 unsigned long present_pages = zone->present_pages;
3805
3806 zone->lowmem_reserve[j] = 0;
3807
2f6726e5
CL
3808 idx = j;
3809 while (idx) {
1da177e4
LT
3810 struct zone *lower_zone;
3811
2f6726e5
CL
3812 idx--;
3813
1da177e4
LT
3814 if (sysctl_lowmem_reserve_ratio[idx] < 1)
3815 sysctl_lowmem_reserve_ratio[idx] = 1;
3816
3817 lower_zone = pgdat->node_zones + idx;
3818 lower_zone->lowmem_reserve[j] = present_pages /
3819 sysctl_lowmem_reserve_ratio[idx];
3820 present_pages += lower_zone->present_pages;
3821 }
3822 }
3823 }
cb45b0e9
HA
3824
3825 /* update totalreserve_pages */
3826 calculate_totalreserve_pages();
1da177e4
LT
3827}
3828
88ca3b94
RD
3829/**
3830 * setup_per_zone_pages_min - called when min_free_kbytes changes.
3831 *
3832 * Ensures that the pages_{min,low,high} values for each zone are set correctly
3833 * with respect to min_free_kbytes.
1da177e4 3834 */
3947be19 3835void setup_per_zone_pages_min(void)
1da177e4
LT
3836{
3837 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
3838 unsigned long lowmem_pages = 0;
3839 struct zone *zone;
3840 unsigned long flags;
3841
3842 /* Calculate total number of !ZONE_HIGHMEM pages */
3843 for_each_zone(zone) {
3844 if (!is_highmem(zone))
3845 lowmem_pages += zone->present_pages;
3846 }
3847
3848 for_each_zone(zone) {
ac924c60
AM
3849 u64 tmp;
3850
1da177e4 3851 spin_lock_irqsave(&zone->lru_lock, flags);
ac924c60
AM
3852 tmp = (u64)pages_min * zone->present_pages;
3853 do_div(tmp, lowmem_pages);
1da177e4
LT
3854 if (is_highmem(zone)) {
3855 /*
669ed175
NP
3856 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
3857 * need highmem pages, so cap pages_min to a small
3858 * value here.
3859 *
3860 * The (pages_high-pages_low) and (pages_low-pages_min)
3861 * deltas controls asynch page reclaim, and so should
3862 * not be capped for highmem.
1da177e4
LT
3863 */
3864 int min_pages;
3865
3866 min_pages = zone->present_pages / 1024;
3867 if (min_pages < SWAP_CLUSTER_MAX)
3868 min_pages = SWAP_CLUSTER_MAX;
3869 if (min_pages > 128)
3870 min_pages = 128;
3871 zone->pages_min = min_pages;
3872 } else {
669ed175
NP
3873 /*
3874 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
3875 * proportionate to the zone's size.
3876 */
669ed175 3877 zone->pages_min = tmp;
1da177e4
LT
3878 }
3879
ac924c60
AM
3880 zone->pages_low = zone->pages_min + (tmp >> 2);
3881 zone->pages_high = zone->pages_min + (tmp >> 1);
1da177e4
LT
3882 spin_unlock_irqrestore(&zone->lru_lock, flags);
3883 }
cb45b0e9
HA
3884
3885 /* update totalreserve_pages */
3886 calculate_totalreserve_pages();
1da177e4
LT
3887}
3888
3889/*
3890 * Initialise min_free_kbytes.
3891 *
3892 * For small machines we want it small (128k min). For large machines
3893 * we want it large (64MB max). But it is not linear, because network
3894 * bandwidth does not increase linearly with machine size. We use
3895 *
3896 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
3897 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
3898 *
3899 * which yields
3900 *
3901 * 16MB: 512k
3902 * 32MB: 724k
3903 * 64MB: 1024k
3904 * 128MB: 1448k
3905 * 256MB: 2048k
3906 * 512MB: 2896k
3907 * 1024MB: 4096k
3908 * 2048MB: 5792k
3909 * 4096MB: 8192k
3910 * 8192MB: 11584k
3911 * 16384MB: 16384k
3912 */
3913static int __init init_per_zone_pages_min(void)
3914{
3915 unsigned long lowmem_kbytes;
3916
3917 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
3918
3919 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
3920 if (min_free_kbytes < 128)
3921 min_free_kbytes = 128;
3922 if (min_free_kbytes > 65536)
3923 min_free_kbytes = 65536;
3924 setup_per_zone_pages_min();
3925 setup_per_zone_lowmem_reserve();
3926 return 0;
3927}
3928module_init(init_per_zone_pages_min)
3929
3930/*
3931 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
3932 * that we can call two helper functions whenever min_free_kbytes
3933 * changes.
3934 */
3935int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
3936 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3937{
3938 proc_dointvec(table, write, file, buffer, length, ppos);
3b1d92c5
MG
3939 if (write)
3940 setup_per_zone_pages_min();
1da177e4
LT
3941 return 0;
3942}
3943
9614634f
CL
3944#ifdef CONFIG_NUMA
3945int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
3946 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3947{
3948 struct zone *zone;
3949 int rc;
3950
3951 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
3952 if (rc)
3953 return rc;
3954
3955 for_each_zone(zone)
8417bba4 3956 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
3957 sysctl_min_unmapped_ratio) / 100;
3958 return 0;
3959}
0ff38490
CL
3960
3961int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
3962 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3963{
3964 struct zone *zone;
3965 int rc;
3966
3967 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
3968 if (rc)
3969 return rc;
3970
3971 for_each_zone(zone)
3972 zone->min_slab_pages = (zone->present_pages *
3973 sysctl_min_slab_ratio) / 100;
3974 return 0;
3975}
9614634f
CL
3976#endif
3977
1da177e4
LT
3978/*
3979 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
3980 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
3981 * whenever sysctl_lowmem_reserve_ratio changes.
3982 *
3983 * The reserve ratio obviously has absolutely no relation with the
3984 * pages_min watermarks. The lowmem reserve ratio can only make sense
3985 * if in function of the boot time zone sizes.
3986 */
3987int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
3988 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3989{
3990 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
3991 setup_per_zone_lowmem_reserve();
3992 return 0;
3993}
3994
8ad4b1fb
RS
3995/*
3996 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
3997 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
3998 * can have before it gets flushed back to buddy allocator.
3999 */
4000
4001int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4002 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4003{
4004 struct zone *zone;
4005 unsigned int cpu;
4006 int ret;
4007
4008 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4009 if (!write || (ret == -EINVAL))
4010 return ret;
4011 for_each_zone(zone) {
4012 for_each_online_cpu(cpu) {
4013 unsigned long high;
4014 high = zone->present_pages / percpu_pagelist_fraction;
4015 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4016 }
4017 }
4018 return 0;
4019}
4020
f034b5d4 4021int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
4022
4023#ifdef CONFIG_NUMA
4024static int __init set_hashdist(char *str)
4025{
4026 if (!str)
4027 return 0;
4028 hashdist = simple_strtoul(str, &str, 0);
4029 return 1;
4030}
4031__setup("hashdist=", set_hashdist);
4032#endif
4033
4034/*
4035 * allocate a large system hash table from bootmem
4036 * - it is assumed that the hash table must contain an exact power-of-2
4037 * quantity of entries
4038 * - limit is the number of hash buckets, not the total allocation size
4039 */
4040void *__init alloc_large_system_hash(const char *tablename,
4041 unsigned long bucketsize,
4042 unsigned long numentries,
4043 int scale,
4044 int flags,
4045 unsigned int *_hash_shift,
4046 unsigned int *_hash_mask,
4047 unsigned long limit)
4048{
4049 unsigned long long max = limit;
4050 unsigned long log2qty, size;
4051 void *table = NULL;
4052
4053 /* allow the kernel cmdline to have a say */
4054 if (!numentries) {
4055 /* round applicable memory size up to nearest megabyte */
04903664 4056 numentries = nr_kernel_pages;
1da177e4
LT
4057 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4058 numentries >>= 20 - PAGE_SHIFT;
4059 numentries <<= 20 - PAGE_SHIFT;
4060
4061 /* limit to 1 bucket per 2^scale bytes of low memory */
4062 if (scale > PAGE_SHIFT)
4063 numentries >>= (scale - PAGE_SHIFT);
4064 else
4065 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
4066
4067 /* Make sure we've got at least a 0-order allocation.. */
4068 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4069 numentries = PAGE_SIZE / bucketsize;
1da177e4 4070 }
6e692ed3 4071 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
4072
4073 /* limit allocation size to 1/16 total memory by default */
4074 if (max == 0) {
4075 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4076 do_div(max, bucketsize);
4077 }
4078
4079 if (numentries > max)
4080 numentries = max;
4081
f0d1b0b3 4082 log2qty = ilog2(numentries);
1da177e4
LT
4083
4084 do {
4085 size = bucketsize << log2qty;
4086 if (flags & HASH_EARLY)
4087 table = alloc_bootmem(size);
4088 else if (hashdist)
4089 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4090 else {
4091 unsigned long order;
4092 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
4093 ;
4094 table = (void*) __get_free_pages(GFP_ATOMIC, order);
1037b83b
ED
4095 /*
4096 * If bucketsize is not a power-of-two, we may free
4097 * some pages at the end of hash table.
4098 */
4099 if (table) {
4100 unsigned long alloc_end = (unsigned long)table +
4101 (PAGE_SIZE << order);
4102 unsigned long used = (unsigned long)table +
4103 PAGE_ALIGN(size);
4104 split_page(virt_to_page(table), order);
4105 while (used < alloc_end) {
4106 free_page(used);
4107 used += PAGE_SIZE;
4108 }
4109 }
1da177e4
LT
4110 }
4111 } while (!table && size > PAGE_SIZE && --log2qty);
4112
4113 if (!table)
4114 panic("Failed to allocate %s hash table\n", tablename);
4115
b49ad484 4116 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
1da177e4
LT
4117 tablename,
4118 (1U << log2qty),
f0d1b0b3 4119 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
4120 size);
4121
4122 if (_hash_shift)
4123 *_hash_shift = log2qty;
4124 if (_hash_mask)
4125 *_hash_mask = (1 << log2qty) - 1;
4126
4127 return table;
4128}
a117e66e
KH
4129
4130#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
a117e66e
KH
4131struct page *pfn_to_page(unsigned long pfn)
4132{
67de6482 4133 return __pfn_to_page(pfn);
a117e66e
KH
4134}
4135unsigned long page_to_pfn(struct page *page)
4136{
67de6482 4137 return __page_to_pfn(page);
a117e66e 4138}
a117e66e
KH
4139EXPORT_SYMBOL(pfn_to_page);
4140EXPORT_SYMBOL(page_to_pfn);
4141#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
6220ec78 4142
835c134e
MG
4143/* Return a pointer to the bitmap storing bits affecting a block of pages */
4144static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4145 unsigned long pfn)
4146{
4147#ifdef CONFIG_SPARSEMEM
4148 return __pfn_to_section(pfn)->pageblock_flags;
4149#else
4150 return zone->pageblock_flags;
4151#endif /* CONFIG_SPARSEMEM */
4152}
4153
4154static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4155{
4156#ifdef CONFIG_SPARSEMEM
4157 pfn &= (PAGES_PER_SECTION-1);
4158 return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS;
4159#else
4160 pfn = pfn - zone->zone_start_pfn;
4161 return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS;
4162#endif /* CONFIG_SPARSEMEM */
4163}
4164
4165/**
4166 * get_pageblock_flags_group - Return the requested group of flags for the MAX_ORDER_NR_PAGES block of pages
4167 * @page: The page within the block of interest
4168 * @start_bitidx: The first bit of interest to retrieve
4169 * @end_bitidx: The last bit of interest
4170 * returns pageblock_bits flags
4171 */
4172unsigned long get_pageblock_flags_group(struct page *page,
4173 int start_bitidx, int end_bitidx)
4174{
4175 struct zone *zone;
4176 unsigned long *bitmap;
4177 unsigned long pfn, bitidx;
4178 unsigned long flags = 0;
4179 unsigned long value = 1;
4180
4181 zone = page_zone(page);
4182 pfn = page_to_pfn(page);
4183 bitmap = get_pageblock_bitmap(zone, pfn);
4184 bitidx = pfn_to_bitidx(zone, pfn);
4185
4186 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4187 if (test_bit(bitidx + start_bitidx, bitmap))
4188 flags |= value;
6220ec78 4189
835c134e
MG
4190 return flags;
4191}
4192
4193/**
4194 * set_pageblock_flags_group - Set the requested group of flags for a MAX_ORDER_NR_PAGES block of pages
4195 * @page: The page within the block of interest
4196 * @start_bitidx: The first bit of interest
4197 * @end_bitidx: The last bit of interest
4198 * @flags: The flags to set
4199 */
4200void set_pageblock_flags_group(struct page *page, unsigned long flags,
4201 int start_bitidx, int end_bitidx)
4202{
4203 struct zone *zone;
4204 unsigned long *bitmap;
4205 unsigned long pfn, bitidx;
4206 unsigned long value = 1;
4207
4208 zone = page_zone(page);
4209 pfn = page_to_pfn(page);
4210 bitmap = get_pageblock_bitmap(zone, pfn);
4211 bitidx = pfn_to_bitidx(zone, pfn);
4212
4213 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4214 if (flags & value)
4215 __set_bit(bitidx + start_bitidx, bitmap);
4216 else
4217 __clear_bit(bitidx + start_bitidx, bitmap);
4218}
This page took 0.610259 seconds and 5 git commands to generate.