[PATCH] Fix do_mbind warning with CONFIG_MIGRATION=n
[deliverable/linux.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/compiler.h>
9f158333 24#include <linux/kernel.h>
1da177e4
LT
25#include <linux/module.h>
26#include <linux/suspend.h>
27#include <linux/pagevec.h>
28#include <linux/blkdev.h>
29#include <linux/slab.h>
30#include <linux/notifier.h>
31#include <linux/topology.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
bdc8cb98 35#include <linux/memory_hotplug.h>
1da177e4
LT
36#include <linux/nodemask.h>
37#include <linux/vmalloc.h>
4be38e35 38#include <linux/mempolicy.h>
6811378e 39#include <linux/stop_machine.h>
c713216d
MG
40#include <linux/sort.h>
41#include <linux/pfn.h>
1da177e4
LT
42
43#include <asm/tlbflush.h>
ac924c60 44#include <asm/div64.h>
1da177e4
LT
45#include "internal.h"
46
47/*
48 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
49 * initializer cleaner
50 */
c3d8c141 51nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
7223a93a 52EXPORT_SYMBOL(node_online_map);
c3d8c141 53nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
7223a93a 54EXPORT_SYMBOL(node_possible_map);
6c231b7b 55unsigned long totalram_pages __read_mostly;
cb45b0e9 56unsigned long totalreserve_pages __read_mostly;
1da177e4 57long nr_swap_pages;
8ad4b1fb 58int percpu_pagelist_fraction;
1da177e4 59
d98c7a09 60static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 61
1da177e4
LT
62/*
63 * results with 256, 32 in the lowmem_reserve sysctl:
64 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
65 * 1G machine -> (16M dma, 784M normal, 224M high)
66 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
67 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
68 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
69 *
70 * TBD: should special case ZONE_DMA32 machines here - in those we normally
71 * don't need any ZONE_NORMAL reservation
1da177e4 72 */
2f1b6248
CL
73int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
74 256,
fb0e7942 75#ifdef CONFIG_ZONE_DMA32
2f1b6248 76 256,
fb0e7942 77#endif
e53ef38d 78#ifdef CONFIG_HIGHMEM
2f1b6248 79 32
e53ef38d 80#endif
2f1b6248 81};
1da177e4
LT
82
83EXPORT_SYMBOL(totalram_pages);
1da177e4
LT
84
85/*
86 * Used by page_zone() to look up the address of the struct zone whose
87 * id is encoded in the upper bits of page->flags
88 */
c3d8c141 89struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
1da177e4
LT
90EXPORT_SYMBOL(zone_table);
91
2f1b6248
CL
92static char *zone_names[MAX_NR_ZONES] = {
93 "DMA",
fb0e7942 94#ifdef CONFIG_ZONE_DMA32
2f1b6248 95 "DMA32",
fb0e7942 96#endif
2f1b6248 97 "Normal",
e53ef38d 98#ifdef CONFIG_HIGHMEM
2f1b6248 99 "HighMem"
e53ef38d 100#endif
2f1b6248
CL
101};
102
1da177e4
LT
103int min_free_kbytes = 1024;
104
86356ab1
YG
105unsigned long __meminitdata nr_kernel_pages;
106unsigned long __meminitdata nr_all_pages;
0e0b864e 107static unsigned long __initdata dma_reserve;
1da177e4 108
c713216d
MG
109#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
110 /*
111 * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
112 * ranges of memory (RAM) that may be registered with add_active_range().
113 * Ranges passed to add_active_range() will be merged if possible
114 * so the number of times add_active_range() can be called is
115 * related to the number of nodes and the number of holes
116 */
117 #ifdef CONFIG_MAX_ACTIVE_REGIONS
118 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
119 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
120 #else
121 #if MAX_NUMNODES >= 32
122 /* If there can be many nodes, allow up to 50 holes per node */
123 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
124 #else
125 /* By default, allow up to 256 distinct regions */
126 #define MAX_ACTIVE_REGIONS 256
127 #endif
128 #endif
129
130 struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS];
131 int __initdata nr_nodemap_entries;
132 unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
133 unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
fb01439c
MG
134#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
135 unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];
136 unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES];
137#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
c713216d
MG
138#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
139
13e7444b 140#ifdef CONFIG_DEBUG_VM
c6a57e19 141static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 142{
bdc8cb98
DH
143 int ret = 0;
144 unsigned seq;
145 unsigned long pfn = page_to_pfn(page);
c6a57e19 146
bdc8cb98
DH
147 do {
148 seq = zone_span_seqbegin(zone);
149 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
150 ret = 1;
151 else if (pfn < zone->zone_start_pfn)
152 ret = 1;
153 } while (zone_span_seqretry(zone, seq));
154
155 return ret;
c6a57e19
DH
156}
157
158static int page_is_consistent(struct zone *zone, struct page *page)
159{
1da177e4
LT
160#ifdef CONFIG_HOLES_IN_ZONE
161 if (!pfn_valid(page_to_pfn(page)))
c6a57e19 162 return 0;
1da177e4
LT
163#endif
164 if (zone != page_zone(page))
c6a57e19
DH
165 return 0;
166
167 return 1;
168}
169/*
170 * Temporary debugging check for pages not lying within a given zone.
171 */
172static int bad_range(struct zone *zone, struct page *page)
173{
174 if (page_outside_zone_boundaries(zone, page))
1da177e4 175 return 1;
c6a57e19
DH
176 if (!page_is_consistent(zone, page))
177 return 1;
178
1da177e4
LT
179 return 0;
180}
13e7444b
NP
181#else
182static inline int bad_range(struct zone *zone, struct page *page)
183{
184 return 0;
185}
186#endif
187
224abf92 188static void bad_page(struct page *page)
1da177e4 189{
224abf92 190 printk(KERN_EMERG "Bad page state in process '%s'\n"
7365f3d1
HD
191 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
192 KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
193 KERN_EMERG "Backtrace:\n",
224abf92
NP
194 current->comm, page, (int)(2*sizeof(unsigned long)),
195 (unsigned long)page->flags, page->mapping,
196 page_mapcount(page), page_count(page));
1da177e4 197 dump_stack();
334795ec
HD
198 page->flags &= ~(1 << PG_lru |
199 1 << PG_private |
1da177e4 200 1 << PG_locked |
1da177e4
LT
201 1 << PG_active |
202 1 << PG_dirty |
334795ec
HD
203 1 << PG_reclaim |
204 1 << PG_slab |
1da177e4 205 1 << PG_swapcache |
676165a8
NP
206 1 << PG_writeback |
207 1 << PG_buddy );
1da177e4
LT
208 set_page_count(page, 0);
209 reset_page_mapcount(page);
210 page->mapping = NULL;
9f158333 211 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
212}
213
1da177e4
LT
214/*
215 * Higher-order pages are called "compound pages". They are structured thusly:
216 *
217 * The first PAGE_SIZE page is called the "head page".
218 *
219 * The remaining PAGE_SIZE pages are called "tail pages".
220 *
221 * All pages have PG_compound set. All pages have their ->private pointing at
222 * the head page (even the head page has this).
223 *
41d78ba5
HD
224 * The first tail page's ->lru.next holds the address of the compound page's
225 * put_page() function. Its ->lru.prev holds the order of allocation.
226 * This usage means that zero-order pages may not be compound.
1da177e4 227 */
d98c7a09
HD
228
229static void free_compound_page(struct page *page)
230{
231 __free_pages_ok(page, (unsigned long)page[1].lru.prev);
232}
233
1da177e4
LT
234static void prep_compound_page(struct page *page, unsigned long order)
235{
236 int i;
237 int nr_pages = 1 << order;
238
d98c7a09 239 page[1].lru.next = (void *)free_compound_page; /* set dtor */
41d78ba5 240 page[1].lru.prev = (void *)order;
1da177e4
LT
241 for (i = 0; i < nr_pages; i++) {
242 struct page *p = page + i;
243
5e9dace8 244 __SetPageCompound(p);
4c21e2f2 245 set_page_private(p, (unsigned long)page);
1da177e4
LT
246 }
247}
248
249static void destroy_compound_page(struct page *page, unsigned long order)
250{
251 int i;
252 int nr_pages = 1 << order;
253
41d78ba5 254 if (unlikely((unsigned long)page[1].lru.prev != order))
224abf92 255 bad_page(page);
1da177e4
LT
256
257 for (i = 0; i < nr_pages; i++) {
258 struct page *p = page + i;
259
224abf92
NP
260 if (unlikely(!PageCompound(p) |
261 (page_private(p) != (unsigned long)page)))
262 bad_page(page);
5e9dace8 263 __ClearPageCompound(p);
1da177e4
LT
264 }
265}
1da177e4 266
17cf4406
NP
267static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
268{
269 int i;
270
725d704e 271 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
6626c5d5
AM
272 /*
273 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
274 * and __GFP_HIGHMEM from hard or soft interrupt context.
275 */
725d704e 276 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
277 for (i = 0; i < (1 << order); i++)
278 clear_highpage(page + i);
279}
280
1da177e4
LT
281/*
282 * function for dealing with page's order in buddy system.
283 * zone->lock is already acquired when we use these.
284 * So, we don't need atomic page->flags operations here.
285 */
6aa3001b
AM
286static inline unsigned long page_order(struct page *page)
287{
4c21e2f2 288 return page_private(page);
1da177e4
LT
289}
290
6aa3001b
AM
291static inline void set_page_order(struct page *page, int order)
292{
4c21e2f2 293 set_page_private(page, order);
676165a8 294 __SetPageBuddy(page);
1da177e4
LT
295}
296
297static inline void rmv_page_order(struct page *page)
298{
676165a8 299 __ClearPageBuddy(page);
4c21e2f2 300 set_page_private(page, 0);
1da177e4
LT
301}
302
303/*
304 * Locate the struct page for both the matching buddy in our
305 * pair (buddy1) and the combined O(n+1) page they form (page).
306 *
307 * 1) Any buddy B1 will have an order O twin B2 which satisfies
308 * the following equation:
309 * B2 = B1 ^ (1 << O)
310 * For example, if the starting buddy (buddy2) is #8 its order
311 * 1 buddy is #10:
312 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
313 *
314 * 2) Any buddy B will have an order O+1 parent P which
315 * satisfies the following equation:
316 * P = B & ~(1 << O)
317 *
d6e05edc 318 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
319 */
320static inline struct page *
321__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
322{
323 unsigned long buddy_idx = page_idx ^ (1 << order);
324
325 return page + (buddy_idx - page_idx);
326}
327
328static inline unsigned long
329__find_combined_index(unsigned long page_idx, unsigned int order)
330{
331 return (page_idx & ~(1 << order));
332}
333
334/*
335 * This function checks whether a page is free && is the buddy
336 * we can do coalesce a page and its buddy if
13e7444b 337 * (a) the buddy is not in a hole &&
676165a8 338 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
339 * (c) a page and its buddy have the same order &&
340 * (d) a page and its buddy are in the same zone.
676165a8
NP
341 *
342 * For recording whether a page is in the buddy system, we use PG_buddy.
343 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 344 *
676165a8 345 * For recording page's order, we use page_private(page).
1da177e4 346 */
cb2b95e1
AW
347static inline int page_is_buddy(struct page *page, struct page *buddy,
348 int order)
1da177e4 349{
13e7444b 350#ifdef CONFIG_HOLES_IN_ZONE
cb2b95e1 351 if (!pfn_valid(page_to_pfn(buddy)))
13e7444b
NP
352 return 0;
353#endif
354
cb2b95e1
AW
355 if (page_zone_id(page) != page_zone_id(buddy))
356 return 0;
357
358 if (PageBuddy(buddy) && page_order(buddy) == order) {
359 BUG_ON(page_count(buddy) != 0);
6aa3001b 360 return 1;
676165a8 361 }
6aa3001b 362 return 0;
1da177e4
LT
363}
364
365/*
366 * Freeing function for a buddy system allocator.
367 *
368 * The concept of a buddy system is to maintain direct-mapped table
369 * (containing bit values) for memory blocks of various "orders".
370 * The bottom level table contains the map for the smallest allocatable
371 * units of memory (here, pages), and each level above it describes
372 * pairs of units from the levels below, hence, "buddies".
373 * At a high level, all that happens here is marking the table entry
374 * at the bottom level available, and propagating the changes upward
375 * as necessary, plus some accounting needed to play nicely with other
376 * parts of the VM system.
377 * At each level, we keep a list of pages, which are heads of continuous
676165a8 378 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 379 * order is recorded in page_private(page) field.
1da177e4
LT
380 * So when we are allocating or freeing one, we can derive the state of the
381 * other. That is, if we allocate a small block, and both were
382 * free, the remainder of the region must be split into blocks.
383 * If a block is freed, and its buddy is also free, then this
384 * triggers coalescing into a block of larger size.
385 *
386 * -- wli
387 */
388
48db57f8 389static inline void __free_one_page(struct page *page,
1da177e4
LT
390 struct zone *zone, unsigned int order)
391{
392 unsigned long page_idx;
393 int order_size = 1 << order;
394
224abf92 395 if (unlikely(PageCompound(page)))
1da177e4
LT
396 destroy_compound_page(page, order);
397
398 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
399
725d704e
NP
400 VM_BUG_ON(page_idx & (order_size - 1));
401 VM_BUG_ON(bad_range(zone, page));
1da177e4
LT
402
403 zone->free_pages += order_size;
404 while (order < MAX_ORDER-1) {
405 unsigned long combined_idx;
406 struct free_area *area;
407 struct page *buddy;
408
1da177e4 409 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 410 if (!page_is_buddy(page, buddy, order))
1da177e4 411 break; /* Move the buddy up one level. */
13e7444b 412
1da177e4
LT
413 list_del(&buddy->lru);
414 area = zone->free_area + order;
415 area->nr_free--;
416 rmv_page_order(buddy);
13e7444b 417 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
418 page = page + (combined_idx - page_idx);
419 page_idx = combined_idx;
420 order++;
421 }
422 set_page_order(page, order);
423 list_add(&page->lru, &zone->free_area[order].free_list);
424 zone->free_area[order].nr_free++;
425}
426
224abf92 427static inline int free_pages_check(struct page *page)
1da177e4 428{
92be2e33
NP
429 if (unlikely(page_mapcount(page) |
430 (page->mapping != NULL) |
431 (page_count(page) != 0) |
1da177e4
LT
432 (page->flags & (
433 1 << PG_lru |
434 1 << PG_private |
435 1 << PG_locked |
436 1 << PG_active |
437 1 << PG_reclaim |
438 1 << PG_slab |
439 1 << PG_swapcache |
b5810039 440 1 << PG_writeback |
676165a8
NP
441 1 << PG_reserved |
442 1 << PG_buddy ))))
224abf92 443 bad_page(page);
1da177e4 444 if (PageDirty(page))
242e5468 445 __ClearPageDirty(page);
689bcebf
HD
446 /*
447 * For now, we report if PG_reserved was found set, but do not
448 * clear it, and do not free the page. But we shall soon need
449 * to do more, for when the ZERO_PAGE count wraps negative.
450 */
451 return PageReserved(page);
1da177e4
LT
452}
453
454/*
455 * Frees a list of pages.
456 * Assumes all pages on list are in same zone, and of same order.
207f36ee 457 * count is the number of pages to free.
1da177e4
LT
458 *
459 * If the zone was previously in an "all pages pinned" state then look to
460 * see if this freeing clears that state.
461 *
462 * And clear the zone's pages_scanned counter, to hold off the "all pages are
463 * pinned" detection logic.
464 */
48db57f8
NP
465static void free_pages_bulk(struct zone *zone, int count,
466 struct list_head *list, int order)
1da177e4 467{
c54ad30c 468 spin_lock(&zone->lock);
1da177e4
LT
469 zone->all_unreclaimable = 0;
470 zone->pages_scanned = 0;
48db57f8
NP
471 while (count--) {
472 struct page *page;
473
725d704e 474 VM_BUG_ON(list_empty(list));
1da177e4 475 page = list_entry(list->prev, struct page, lru);
48db57f8 476 /* have to delete it as __free_one_page list manipulates */
1da177e4 477 list_del(&page->lru);
48db57f8 478 __free_one_page(page, zone, order);
1da177e4 479 }
c54ad30c 480 spin_unlock(&zone->lock);
1da177e4
LT
481}
482
48db57f8 483static void free_one_page(struct zone *zone, struct page *page, int order)
1da177e4 484{
006d22d9
CL
485 spin_lock(&zone->lock);
486 zone->all_unreclaimable = 0;
487 zone->pages_scanned = 0;
488 __free_one_page(page, zone ,order);
489 spin_unlock(&zone->lock);
48db57f8
NP
490}
491
492static void __free_pages_ok(struct page *page, unsigned int order)
493{
494 unsigned long flags;
1da177e4 495 int i;
689bcebf 496 int reserved = 0;
1da177e4
LT
497
498 arch_free_page(page, order);
de5097c2 499 if (!PageHighMem(page))
f9b8404c
IM
500 debug_check_no_locks_freed(page_address(page),
501 PAGE_SIZE<<order);
1da177e4 502
1da177e4 503 for (i = 0 ; i < (1 << order) ; ++i)
224abf92 504 reserved += free_pages_check(page + i);
689bcebf
HD
505 if (reserved)
506 return;
507
48db57f8 508 kernel_map_pages(page, 1 << order, 0);
c54ad30c 509 local_irq_save(flags);
f8891e5e 510 __count_vm_events(PGFREE, 1 << order);
48db57f8 511 free_one_page(page_zone(page), page, order);
c54ad30c 512 local_irq_restore(flags);
1da177e4
LT
513}
514
a226f6c8
DH
515/*
516 * permit the bootmem allocator to evade page validation on high-order frees
517 */
518void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
519{
520 if (order == 0) {
521 __ClearPageReserved(page);
522 set_page_count(page, 0);
7835e98b 523 set_page_refcounted(page);
545b1ea9 524 __free_page(page);
a226f6c8 525 } else {
a226f6c8
DH
526 int loop;
527
545b1ea9 528 prefetchw(page);
a226f6c8
DH
529 for (loop = 0; loop < BITS_PER_LONG; loop++) {
530 struct page *p = &page[loop];
531
545b1ea9
NP
532 if (loop + 1 < BITS_PER_LONG)
533 prefetchw(p + 1);
a226f6c8
DH
534 __ClearPageReserved(p);
535 set_page_count(p, 0);
536 }
537
7835e98b 538 set_page_refcounted(page);
545b1ea9 539 __free_pages(page, order);
a226f6c8
DH
540 }
541}
542
1da177e4
LT
543
544/*
545 * The order of subdivision here is critical for the IO subsystem.
546 * Please do not alter this order without good reasons and regression
547 * testing. Specifically, as large blocks of memory are subdivided,
548 * the order in which smaller blocks are delivered depends on the order
549 * they're subdivided in this function. This is the primary factor
550 * influencing the order in which pages are delivered to the IO
551 * subsystem according to empirical testing, and this is also justified
552 * by considering the behavior of a buddy system containing a single
553 * large block of memory acted on by a series of small allocations.
554 * This behavior is a critical factor in sglist merging's success.
555 *
556 * -- wli
557 */
085cc7d5 558static inline void expand(struct zone *zone, struct page *page,
1da177e4
LT
559 int low, int high, struct free_area *area)
560{
561 unsigned long size = 1 << high;
562
563 while (high > low) {
564 area--;
565 high--;
566 size >>= 1;
725d704e 567 VM_BUG_ON(bad_range(zone, &page[size]));
1da177e4
LT
568 list_add(&page[size].lru, &area->free_list);
569 area->nr_free++;
570 set_page_order(&page[size], high);
571 }
1da177e4
LT
572}
573
1da177e4
LT
574/*
575 * This page is about to be returned from the page allocator
576 */
17cf4406 577static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
1da177e4 578{
92be2e33
NP
579 if (unlikely(page_mapcount(page) |
580 (page->mapping != NULL) |
581 (page_count(page) != 0) |
334795ec
HD
582 (page->flags & (
583 1 << PG_lru |
1da177e4
LT
584 1 << PG_private |
585 1 << PG_locked |
1da177e4
LT
586 1 << PG_active |
587 1 << PG_dirty |
588 1 << PG_reclaim |
334795ec 589 1 << PG_slab |
1da177e4 590 1 << PG_swapcache |
b5810039 591 1 << PG_writeback |
676165a8
NP
592 1 << PG_reserved |
593 1 << PG_buddy ))))
224abf92 594 bad_page(page);
1da177e4 595
689bcebf
HD
596 /*
597 * For now, we report if PG_reserved was found set, but do not
598 * clear it, and do not allocate the page: as a safety net.
599 */
600 if (PageReserved(page))
601 return 1;
602
1da177e4
LT
603 page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
604 1 << PG_referenced | 1 << PG_arch_1 |
605 1 << PG_checked | 1 << PG_mappedtodisk);
4c21e2f2 606 set_page_private(page, 0);
7835e98b 607 set_page_refcounted(page);
1da177e4 608 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
609
610 if (gfp_flags & __GFP_ZERO)
611 prep_zero_page(page, order, gfp_flags);
612
613 if (order && (gfp_flags & __GFP_COMP))
614 prep_compound_page(page, order);
615
689bcebf 616 return 0;
1da177e4
LT
617}
618
619/*
620 * Do the hard work of removing an element from the buddy allocator.
621 * Call me with the zone->lock already held.
622 */
623static struct page *__rmqueue(struct zone *zone, unsigned int order)
624{
625 struct free_area * area;
626 unsigned int current_order;
627 struct page *page;
628
629 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
630 area = zone->free_area + current_order;
631 if (list_empty(&area->free_list))
632 continue;
633
634 page = list_entry(area->free_list.next, struct page, lru);
635 list_del(&page->lru);
636 rmv_page_order(page);
637 area->nr_free--;
638 zone->free_pages -= 1UL << order;
085cc7d5
NP
639 expand(zone, page, order, current_order, area);
640 return page;
1da177e4
LT
641 }
642
643 return NULL;
644}
645
646/*
647 * Obtain a specified number of elements from the buddy allocator, all under
648 * a single hold of the lock, for efficiency. Add them to the supplied list.
649 * Returns the number of new pages which were placed at *list.
650 */
651static int rmqueue_bulk(struct zone *zone, unsigned int order,
652 unsigned long count, struct list_head *list)
653{
1da177e4 654 int i;
1da177e4 655
c54ad30c 656 spin_lock(&zone->lock);
1da177e4 657 for (i = 0; i < count; ++i) {
085cc7d5
NP
658 struct page *page = __rmqueue(zone, order);
659 if (unlikely(page == NULL))
1da177e4 660 break;
1da177e4
LT
661 list_add_tail(&page->lru, list);
662 }
c54ad30c 663 spin_unlock(&zone->lock);
085cc7d5 664 return i;
1da177e4
LT
665}
666
4ae7c039 667#ifdef CONFIG_NUMA
8fce4d8e
CL
668/*
669 * Called from the slab reaper to drain pagesets on a particular node that
39bbcb8f 670 * belongs to the currently executing processor.
879336c3
CL
671 * Note that this function must be called with the thread pinned to
672 * a single processor.
8fce4d8e
CL
673 */
674void drain_node_pages(int nodeid)
4ae7c039 675{
2f6726e5
CL
676 int i;
677 enum zone_type z;
4ae7c039
CL
678 unsigned long flags;
679
8fce4d8e
CL
680 for (z = 0; z < MAX_NR_ZONES; z++) {
681 struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
4ae7c039
CL
682 struct per_cpu_pageset *pset;
683
39bbcb8f
CL
684 if (!populated_zone(zone))
685 continue;
686
23316bc8 687 pset = zone_pcp(zone, smp_processor_id());
4ae7c039
CL
688 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
689 struct per_cpu_pages *pcp;
690
691 pcp = &pset->pcp[i];
879336c3
CL
692 if (pcp->count) {
693 local_irq_save(flags);
694 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
695 pcp->count = 0;
696 local_irq_restore(flags);
697 }
4ae7c039
CL
698 }
699 }
4ae7c039
CL
700}
701#endif
702
1da177e4
LT
703#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
704static void __drain_pages(unsigned int cpu)
705{
c54ad30c 706 unsigned long flags;
1da177e4
LT
707 struct zone *zone;
708 int i;
709
710 for_each_zone(zone) {
711 struct per_cpu_pageset *pset;
712
e7c8d5c9 713 pset = zone_pcp(zone, cpu);
1da177e4
LT
714 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
715 struct per_cpu_pages *pcp;
716
717 pcp = &pset->pcp[i];
c54ad30c 718 local_irq_save(flags);
48db57f8
NP
719 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
720 pcp->count = 0;
c54ad30c 721 local_irq_restore(flags);
1da177e4
LT
722 }
723 }
724}
725#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
726
727#ifdef CONFIG_PM
728
729void mark_free_pages(struct zone *zone)
730{
f623f0db
RW
731 unsigned long pfn, max_zone_pfn;
732 unsigned long flags;
1da177e4
LT
733 int order;
734 struct list_head *curr;
735
736 if (!zone->spanned_pages)
737 return;
738
739 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
740
741 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
742 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
743 if (pfn_valid(pfn)) {
744 struct page *page = pfn_to_page(pfn);
745
746 if (!PageNosave(page))
747 ClearPageNosaveFree(page);
748 }
1da177e4
LT
749
750 for (order = MAX_ORDER - 1; order >= 0; --order)
751 list_for_each(curr, &zone->free_area[order].free_list) {
f623f0db 752 unsigned long i;
1da177e4 753
f623f0db
RW
754 pfn = page_to_pfn(list_entry(curr, struct page, lru));
755 for (i = 0; i < (1UL << order); i++)
756 SetPageNosaveFree(pfn_to_page(pfn + i));
757 }
1da177e4 758
1da177e4
LT
759 spin_unlock_irqrestore(&zone->lock, flags);
760}
761
762/*
763 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
764 */
765void drain_local_pages(void)
766{
767 unsigned long flags;
768
769 local_irq_save(flags);
770 __drain_pages(smp_processor_id());
771 local_irq_restore(flags);
772}
773#endif /* CONFIG_PM */
774
1da177e4
LT
775/*
776 * Free a 0-order page
777 */
1da177e4
LT
778static void fastcall free_hot_cold_page(struct page *page, int cold)
779{
780 struct zone *zone = page_zone(page);
781 struct per_cpu_pages *pcp;
782 unsigned long flags;
783
784 arch_free_page(page, 0);
785
1da177e4
LT
786 if (PageAnon(page))
787 page->mapping = NULL;
224abf92 788 if (free_pages_check(page))
689bcebf
HD
789 return;
790
689bcebf
HD
791 kernel_map_pages(page, 1, 0);
792
e7c8d5c9 793 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
1da177e4 794 local_irq_save(flags);
f8891e5e 795 __count_vm_event(PGFREE);
1da177e4
LT
796 list_add(&page->lru, &pcp->list);
797 pcp->count++;
48db57f8
NP
798 if (pcp->count >= pcp->high) {
799 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
800 pcp->count -= pcp->batch;
801 }
1da177e4
LT
802 local_irq_restore(flags);
803 put_cpu();
804}
805
806void fastcall free_hot_page(struct page *page)
807{
808 free_hot_cold_page(page, 0);
809}
810
811void fastcall free_cold_page(struct page *page)
812{
813 free_hot_cold_page(page, 1);
814}
815
8dfcc9ba
NP
816/*
817 * split_page takes a non-compound higher-order page, and splits it into
818 * n (1<<order) sub-pages: page[0..n]
819 * Each sub-page must be freed individually.
820 *
821 * Note: this is probably too low level an operation for use in drivers.
822 * Please consult with lkml before using this in your driver.
823 */
824void split_page(struct page *page, unsigned int order)
825{
826 int i;
827
725d704e
NP
828 VM_BUG_ON(PageCompound(page));
829 VM_BUG_ON(!page_count(page));
7835e98b
NP
830 for (i = 1; i < (1 << order); i++)
831 set_page_refcounted(page + i);
8dfcc9ba 832}
8dfcc9ba 833
1da177e4
LT
834/*
835 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
836 * we cheat by calling it from here, in the order > 0 path. Saves a branch
837 * or two.
838 */
a74609fa
NP
839static struct page *buffered_rmqueue(struct zonelist *zonelist,
840 struct zone *zone, int order, gfp_t gfp_flags)
1da177e4
LT
841{
842 unsigned long flags;
689bcebf 843 struct page *page;
1da177e4 844 int cold = !!(gfp_flags & __GFP_COLD);
a74609fa 845 int cpu;
1da177e4 846
689bcebf 847again:
a74609fa 848 cpu = get_cpu();
48db57f8 849 if (likely(order == 0)) {
1da177e4
LT
850 struct per_cpu_pages *pcp;
851
a74609fa 852 pcp = &zone_pcp(zone, cpu)->pcp[cold];
1da177e4 853 local_irq_save(flags);
a74609fa 854 if (!pcp->count) {
1da177e4
LT
855 pcp->count += rmqueue_bulk(zone, 0,
856 pcp->batch, &pcp->list);
a74609fa
NP
857 if (unlikely(!pcp->count))
858 goto failed;
1da177e4 859 }
a74609fa
NP
860 page = list_entry(pcp->list.next, struct page, lru);
861 list_del(&page->lru);
862 pcp->count--;
7fb1d9fc 863 } else {
1da177e4
LT
864 spin_lock_irqsave(&zone->lock, flags);
865 page = __rmqueue(zone, order);
a74609fa
NP
866 spin_unlock(&zone->lock);
867 if (!page)
868 goto failed;
1da177e4
LT
869 }
870
f8891e5e 871 __count_zone_vm_events(PGALLOC, zone, 1 << order);
ca889e6c 872 zone_statistics(zonelist, zone);
a74609fa
NP
873 local_irq_restore(flags);
874 put_cpu();
1da177e4 875
725d704e 876 VM_BUG_ON(bad_range(zone, page));
17cf4406 877 if (prep_new_page(page, order, gfp_flags))
a74609fa 878 goto again;
1da177e4 879 return page;
a74609fa
NP
880
881failed:
882 local_irq_restore(flags);
883 put_cpu();
884 return NULL;
1da177e4
LT
885}
886
7fb1d9fc 887#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
3148890b
NP
888#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
889#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
890#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
891#define ALLOC_HARDER 0x10 /* try to alloc harder */
892#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
893#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 894
1da177e4
LT
895/*
896 * Return 1 if free pages are above 'mark'. This takes into account the order
897 * of the allocation.
898 */
899int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
7fb1d9fc 900 int classzone_idx, int alloc_flags)
1da177e4
LT
901{
902 /* free_pages my go negative - that's OK */
e80ee884
NP
903 unsigned long min = mark;
904 long free_pages = z->free_pages - (1 << order) + 1;
1da177e4
LT
905 int o;
906
7fb1d9fc 907 if (alloc_flags & ALLOC_HIGH)
1da177e4 908 min -= min / 2;
7fb1d9fc 909 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
910 min -= min / 4;
911
912 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
913 return 0;
914 for (o = 0; o < order; o++) {
915 /* At the next order, this order's pages become unavailable */
916 free_pages -= z->free_area[o].nr_free << o;
917
918 /* Require fewer higher order pages to be free */
919 min >>= 1;
920
921 if (free_pages <= min)
922 return 0;
923 }
924 return 1;
925}
926
7fb1d9fc
RS
927/*
928 * get_page_from_freeliest goes through the zonelist trying to allocate
929 * a page.
930 */
931static struct page *
932get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
933 struct zonelist *zonelist, int alloc_flags)
753ee728 934{
7fb1d9fc
RS
935 struct zone **z = zonelist->zones;
936 struct page *page = NULL;
937 int classzone_idx = zone_idx(*z);
1192d526 938 struct zone *zone;
7fb1d9fc
RS
939
940 /*
941 * Go through the zonelist once, looking for a zone with enough free.
942 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
943 */
944 do {
1192d526 945 zone = *z;
08e0f6a9 946 if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) &&
1192d526 947 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
9b819d20 948 break;
7fb1d9fc 949 if ((alloc_flags & ALLOC_CPUSET) &&
1192d526 950 !cpuset_zone_allowed(zone, gfp_mask))
7fb1d9fc
RS
951 continue;
952
953 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b
NP
954 unsigned long mark;
955 if (alloc_flags & ALLOC_WMARK_MIN)
1192d526 956 mark = zone->pages_min;
3148890b 957 else if (alloc_flags & ALLOC_WMARK_LOW)
1192d526 958 mark = zone->pages_low;
3148890b 959 else
1192d526
CL
960 mark = zone->pages_high;
961 if (!zone_watermark_ok(zone , order, mark,
7fb1d9fc 962 classzone_idx, alloc_flags))
9eeff239 963 if (!zone_reclaim_mode ||
1192d526 964 !zone_reclaim(zone, gfp_mask, order))
9eeff239 965 continue;
7fb1d9fc
RS
966 }
967
1192d526 968 page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
7fb1d9fc 969 if (page) {
7fb1d9fc
RS
970 break;
971 }
972 } while (*(++z) != NULL);
973 return page;
753ee728
MH
974}
975
1da177e4
LT
976/*
977 * This is the 'heart' of the zoned buddy allocator.
978 */
979struct page * fastcall
dd0fc66f 980__alloc_pages(gfp_t gfp_mask, unsigned int order,
1da177e4
LT
981 struct zonelist *zonelist)
982{
260b2367 983 const gfp_t wait = gfp_mask & __GFP_WAIT;
7fb1d9fc 984 struct zone **z;
1da177e4
LT
985 struct page *page;
986 struct reclaim_state reclaim_state;
987 struct task_struct *p = current;
1da177e4 988 int do_retry;
7fb1d9fc 989 int alloc_flags;
1da177e4
LT
990 int did_some_progress;
991
992 might_sleep_if(wait);
993
6b1de916 994restart:
7fb1d9fc 995 z = zonelist->zones; /* the list of zones suitable for gfp_mask */
1da177e4 996
7fb1d9fc 997 if (unlikely(*z == NULL)) {
1da177e4
LT
998 /* Should this ever happen?? */
999 return NULL;
1000 }
6b1de916 1001
7fb1d9fc 1002 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 1003 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
7fb1d9fc
RS
1004 if (page)
1005 goto got_pg;
1da177e4 1006
6b1de916 1007 do {
43b0bc00 1008 wakeup_kswapd(*z, order);
6b1de916 1009 } while (*(++z));
1da177e4 1010
9bf2229f 1011 /*
7fb1d9fc
RS
1012 * OK, we're below the kswapd watermark and have kicked background
1013 * reclaim. Now things get more complex, so set up alloc_flags according
1014 * to how we want to proceed.
1015 *
1016 * The caller may dip into page reserves a bit more if the caller
1017 * cannot run direct reclaim, or if the caller has realtime scheduling
4eac915d
PJ
1018 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1019 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
9bf2229f 1020 */
3148890b 1021 alloc_flags = ALLOC_WMARK_MIN;
7fb1d9fc
RS
1022 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1023 alloc_flags |= ALLOC_HARDER;
1024 if (gfp_mask & __GFP_HIGH)
1025 alloc_flags |= ALLOC_HIGH;
bdd804f4
PJ
1026 if (wait)
1027 alloc_flags |= ALLOC_CPUSET;
1da177e4
LT
1028
1029 /*
1030 * Go through the zonelist again. Let __GFP_HIGH and allocations
7fb1d9fc 1031 * coming from realtime tasks go deeper into reserves.
1da177e4
LT
1032 *
1033 * This is the last chance, in general, before the goto nopage.
1034 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
9bf2229f 1035 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1da177e4 1036 */
7fb1d9fc
RS
1037 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
1038 if (page)
1039 goto got_pg;
1da177e4
LT
1040
1041 /* This allocation should allow future memory freeing. */
b84a35be
NP
1042
1043 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1044 && !in_interrupt()) {
1045 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
885036d3 1046nofail_alloc:
b84a35be 1047 /* go through the zonelist yet again, ignoring mins */
7fb1d9fc 1048 page = get_page_from_freelist(gfp_mask, order,
47f3a867 1049 zonelist, ALLOC_NO_WATERMARKS);
7fb1d9fc
RS
1050 if (page)
1051 goto got_pg;
885036d3
KK
1052 if (gfp_mask & __GFP_NOFAIL) {
1053 blk_congestion_wait(WRITE, HZ/50);
1054 goto nofail_alloc;
1055 }
1da177e4
LT
1056 }
1057 goto nopage;
1058 }
1059
1060 /* Atomic allocations - we can't balance anything */
1061 if (!wait)
1062 goto nopage;
1063
1064rebalance:
1065 cond_resched();
1066
1067 /* We now go into synchronous reclaim */
3e0d98b9 1068 cpuset_memory_pressure_bump();
1da177e4
LT
1069 p->flags |= PF_MEMALLOC;
1070 reclaim_state.reclaimed_slab = 0;
1071 p->reclaim_state = &reclaim_state;
1072
7fb1d9fc 1073 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
1da177e4
LT
1074
1075 p->reclaim_state = NULL;
1076 p->flags &= ~PF_MEMALLOC;
1077
1078 cond_resched();
1079
1080 if (likely(did_some_progress)) {
7fb1d9fc
RS
1081 page = get_page_from_freelist(gfp_mask, order,
1082 zonelist, alloc_flags);
1083 if (page)
1084 goto got_pg;
1da177e4
LT
1085 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1086 /*
1087 * Go through the zonelist yet one more time, keep
1088 * very high watermark here, this is only to catch
1089 * a parallel oom killing, we must fail if we're still
1090 * under heavy pressure.
1091 */
7fb1d9fc 1092 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
3148890b 1093 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
7fb1d9fc
RS
1094 if (page)
1095 goto got_pg;
1da177e4 1096
9b0f8b04 1097 out_of_memory(zonelist, gfp_mask, order);
1da177e4
LT
1098 goto restart;
1099 }
1100
1101 /*
1102 * Don't let big-order allocations loop unless the caller explicitly
1103 * requests that. Wait for some write requests to complete then retry.
1104 *
1105 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
1106 * <= 3, but that may not be true in other implementations.
1107 */
1108 do_retry = 0;
1109 if (!(gfp_mask & __GFP_NORETRY)) {
1110 if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
1111 do_retry = 1;
1112 if (gfp_mask & __GFP_NOFAIL)
1113 do_retry = 1;
1114 }
1115 if (do_retry) {
1116 blk_congestion_wait(WRITE, HZ/50);
1117 goto rebalance;
1118 }
1119
1120nopage:
1121 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1122 printk(KERN_WARNING "%s: page allocation failure."
1123 " order:%d, mode:0x%x\n",
1124 p->comm, order, gfp_mask);
1125 dump_stack();
578c2fd6 1126 show_mem();
1da177e4 1127 }
1da177e4 1128got_pg:
1da177e4
LT
1129 return page;
1130}
1131
1132EXPORT_SYMBOL(__alloc_pages);
1133
1134/*
1135 * Common helper functions.
1136 */
dd0fc66f 1137fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4
LT
1138{
1139 struct page * page;
1140 page = alloc_pages(gfp_mask, order);
1141 if (!page)
1142 return 0;
1143 return (unsigned long) page_address(page);
1144}
1145
1146EXPORT_SYMBOL(__get_free_pages);
1147
dd0fc66f 1148fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4
LT
1149{
1150 struct page * page;
1151
1152 /*
1153 * get_zeroed_page() returns a 32-bit address, which cannot represent
1154 * a highmem page
1155 */
725d704e 1156 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1da177e4
LT
1157
1158 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1159 if (page)
1160 return (unsigned long) page_address(page);
1161 return 0;
1162}
1163
1164EXPORT_SYMBOL(get_zeroed_page);
1165
1166void __pagevec_free(struct pagevec *pvec)
1167{
1168 int i = pagevec_count(pvec);
1169
1170 while (--i >= 0)
1171 free_hot_cold_page(pvec->pages[i], pvec->cold);
1172}
1173
1174fastcall void __free_pages(struct page *page, unsigned int order)
1175{
b5810039 1176 if (put_page_testzero(page)) {
1da177e4
LT
1177 if (order == 0)
1178 free_hot_page(page);
1179 else
1180 __free_pages_ok(page, order);
1181 }
1182}
1183
1184EXPORT_SYMBOL(__free_pages);
1185
1186fastcall void free_pages(unsigned long addr, unsigned int order)
1187{
1188 if (addr != 0) {
725d704e 1189 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
1190 __free_pages(virt_to_page((void *)addr), order);
1191 }
1192}
1193
1194EXPORT_SYMBOL(free_pages);
1195
1196/*
1197 * Total amount of free (allocatable) RAM:
1198 */
1199unsigned int nr_free_pages(void)
1200{
1201 unsigned int sum = 0;
1202 struct zone *zone;
1203
1204 for_each_zone(zone)
1205 sum += zone->free_pages;
1206
1207 return sum;
1208}
1209
1210EXPORT_SYMBOL(nr_free_pages);
1211
1212#ifdef CONFIG_NUMA
1213unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
1214{
2f6726e5
CL
1215 unsigned int sum = 0;
1216 enum zone_type i;
1da177e4
LT
1217
1218 for (i = 0; i < MAX_NR_ZONES; i++)
1219 sum += pgdat->node_zones[i].free_pages;
1220
1221 return sum;
1222}
1223#endif
1224
1225static unsigned int nr_free_zone_pages(int offset)
1226{
e310fd43
MB
1227 /* Just pick one node, since fallback list is circular */
1228 pg_data_t *pgdat = NODE_DATA(numa_node_id());
1da177e4
LT
1229 unsigned int sum = 0;
1230
e310fd43
MB
1231 struct zonelist *zonelist = pgdat->node_zonelists + offset;
1232 struct zone **zonep = zonelist->zones;
1233 struct zone *zone;
1da177e4 1234
e310fd43
MB
1235 for (zone = *zonep++; zone; zone = *zonep++) {
1236 unsigned long size = zone->present_pages;
1237 unsigned long high = zone->pages_high;
1238 if (size > high)
1239 sum += size - high;
1da177e4
LT
1240 }
1241
1242 return sum;
1243}
1244
1245/*
1246 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1247 */
1248unsigned int nr_free_buffer_pages(void)
1249{
af4ca457 1250 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4
LT
1251}
1252
1253/*
1254 * Amount of free RAM allocatable within all zones
1255 */
1256unsigned int nr_free_pagecache_pages(void)
1257{
af4ca457 1258 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
1da177e4 1259}
08e0f6a9
CL
1260
1261static inline void show_node(struct zone *zone)
1da177e4 1262{
08e0f6a9
CL
1263 if (NUMA_BUILD)
1264 printk("Node %ld ", zone_to_nid(zone));
1da177e4 1265}
1da177e4 1266
1da177e4
LT
1267void si_meminfo(struct sysinfo *val)
1268{
1269 val->totalram = totalram_pages;
1270 val->sharedram = 0;
1271 val->freeram = nr_free_pages();
1272 val->bufferram = nr_blockdev_pages();
1da177e4
LT
1273 val->totalhigh = totalhigh_pages;
1274 val->freehigh = nr_free_highpages();
1da177e4
LT
1275 val->mem_unit = PAGE_SIZE;
1276}
1277
1278EXPORT_SYMBOL(si_meminfo);
1279
1280#ifdef CONFIG_NUMA
1281void si_meminfo_node(struct sysinfo *val, int nid)
1282{
1283 pg_data_t *pgdat = NODE_DATA(nid);
1284
1285 val->totalram = pgdat->node_present_pages;
1286 val->freeram = nr_free_pages_pgdat(pgdat);
98d2b0eb 1287#ifdef CONFIG_HIGHMEM
1da177e4
LT
1288 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1289 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
98d2b0eb
CL
1290#else
1291 val->totalhigh = 0;
1292 val->freehigh = 0;
1293#endif
1da177e4
LT
1294 val->mem_unit = PAGE_SIZE;
1295}
1296#endif
1297
1298#define K(x) ((x) << (PAGE_SHIFT-10))
1299
1300/*
1301 * Show free area list (used inside shift_scroll-lock stuff)
1302 * We also calculate the percentage fragmentation. We do this by counting the
1303 * memory on each free list with the exception of the first item on the list.
1304 */
1305void show_free_areas(void)
1306{
c7241913 1307 int cpu;
1da177e4
LT
1308 unsigned long active;
1309 unsigned long inactive;
1310 unsigned long free;
1311 struct zone *zone;
1312
1313 for_each_zone(zone) {
c7241913 1314 if (!populated_zone(zone))
1da177e4 1315 continue;
c7241913
JS
1316
1317 show_node(zone);
1318 printk("%s per-cpu:\n", zone->name);
1da177e4 1319
6b482c67 1320 for_each_online_cpu(cpu) {
1da177e4
LT
1321 struct per_cpu_pageset *pageset;
1322
e7c8d5c9 1323 pageset = zone_pcp(zone, cpu);
1da177e4 1324
c7241913
JS
1325 printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d "
1326 "Cold: hi:%5d, btch:%4d usd:%4d\n",
1327 cpu, pageset->pcp[0].high,
1328 pageset->pcp[0].batch, pageset->pcp[0].count,
1329 pageset->pcp[1].high, pageset->pcp[1].batch,
1330 pageset->pcp[1].count);
1da177e4
LT
1331 }
1332 }
1333
1da177e4
LT
1334 get_zone_counts(&active, &inactive, &free);
1335
1da177e4
LT
1336 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
1337 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
1338 active,
1339 inactive,
b1e7a8fd 1340 global_page_state(NR_FILE_DIRTY),
ce866b34 1341 global_page_state(NR_WRITEBACK),
fd39fc85 1342 global_page_state(NR_UNSTABLE_NFS),
1da177e4 1343 nr_free_pages(),
972d1a7b
CL
1344 global_page_state(NR_SLAB_RECLAIMABLE) +
1345 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 1346 global_page_state(NR_FILE_MAPPED),
df849a15 1347 global_page_state(NR_PAGETABLE));
1da177e4
LT
1348
1349 for_each_zone(zone) {
1350 int i;
1351
c7241913
JS
1352 if (!populated_zone(zone))
1353 continue;
1354
1da177e4
LT
1355 show_node(zone);
1356 printk("%s"
1357 " free:%lukB"
1358 " min:%lukB"
1359 " low:%lukB"
1360 " high:%lukB"
1361 " active:%lukB"
1362 " inactive:%lukB"
1363 " present:%lukB"
1364 " pages_scanned:%lu"
1365 " all_unreclaimable? %s"
1366 "\n",
1367 zone->name,
1368 K(zone->free_pages),
1369 K(zone->pages_min),
1370 K(zone->pages_low),
1371 K(zone->pages_high),
1372 K(zone->nr_active),
1373 K(zone->nr_inactive),
1374 K(zone->present_pages),
1375 zone->pages_scanned,
1376 (zone->all_unreclaimable ? "yes" : "no")
1377 );
1378 printk("lowmem_reserve[]:");
1379 for (i = 0; i < MAX_NR_ZONES; i++)
1380 printk(" %lu", zone->lowmem_reserve[i]);
1381 printk("\n");
1382 }
1383
1384 for_each_zone(zone) {
8f9de51a 1385 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4 1386
c7241913
JS
1387 if (!populated_zone(zone))
1388 continue;
1389
1da177e4
LT
1390 show_node(zone);
1391 printk("%s: ", zone->name);
1da177e4
LT
1392
1393 spin_lock_irqsave(&zone->lock, flags);
1394 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
1395 nr[order] = zone->free_area[order].nr_free;
1396 total += nr[order] << order;
1da177e4
LT
1397 }
1398 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
1399 for (order = 0; order < MAX_ORDER; order++)
1400 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
1401 printk("= %lukB\n", K(total));
1402 }
1403
1404 show_swap_cache_info();
1405}
1406
1407/*
1408 * Builds allocation fallback zone lists.
1a93205b
CL
1409 *
1410 * Add all populated zones of a node to the zonelist.
1da177e4 1411 */
86356ab1 1412static int __meminit build_zonelists_node(pg_data_t *pgdat,
2f6726e5 1413 struct zonelist *zonelist, int nr_zones, enum zone_type zone_type)
1da177e4 1414{
1a93205b
CL
1415 struct zone *zone;
1416
98d2b0eb 1417 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 1418 zone_type++;
02a68a5e
CL
1419
1420 do {
2f6726e5 1421 zone_type--;
070f8032 1422 zone = pgdat->node_zones + zone_type;
1a93205b 1423 if (populated_zone(zone)) {
070f8032
CL
1424 zonelist->zones[nr_zones++] = zone;
1425 check_highest_zone(zone_type);
1da177e4 1426 }
02a68a5e 1427
2f6726e5 1428 } while (zone_type);
070f8032 1429 return nr_zones;
1da177e4
LT
1430}
1431
1432#ifdef CONFIG_NUMA
1433#define MAX_NODE_LOAD (num_online_nodes())
86356ab1 1434static int __meminitdata node_load[MAX_NUMNODES];
1da177e4 1435/**
4dc3b16b 1436 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
1437 * @node: node whose fallback list we're appending
1438 * @used_node_mask: nodemask_t of already used nodes
1439 *
1440 * We use a number of factors to determine which is the next node that should
1441 * appear on a given node's fallback list. The node should not have appeared
1442 * already in @node's fallback list, and it should be the next closest node
1443 * according to the distance array (which contains arbitrary distance values
1444 * from each node to each node in the system), and should also prefer nodes
1445 * with no CPUs, since presumably they'll have very little allocation pressure
1446 * on them otherwise.
1447 * It returns -1 if no node is found.
1448 */
86356ab1 1449static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 1450{
4cf808eb 1451 int n, val;
1da177e4
LT
1452 int min_val = INT_MAX;
1453 int best_node = -1;
1454
4cf808eb
LT
1455 /* Use the local node if we haven't already */
1456 if (!node_isset(node, *used_node_mask)) {
1457 node_set(node, *used_node_mask);
1458 return node;
1459 }
1da177e4 1460
4cf808eb
LT
1461 for_each_online_node(n) {
1462 cpumask_t tmp;
1da177e4
LT
1463
1464 /* Don't want a node to appear more than once */
1465 if (node_isset(n, *used_node_mask))
1466 continue;
1467
1da177e4
LT
1468 /* Use the distance array to find the distance */
1469 val = node_distance(node, n);
1470
4cf808eb
LT
1471 /* Penalize nodes under us ("prefer the next node") */
1472 val += (n < node);
1473
1da177e4
LT
1474 /* Give preference to headless and unused nodes */
1475 tmp = node_to_cpumask(n);
1476 if (!cpus_empty(tmp))
1477 val += PENALTY_FOR_NODE_WITH_CPUS;
1478
1479 /* Slight preference for less loaded node */
1480 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
1481 val += node_load[n];
1482
1483 if (val < min_val) {
1484 min_val = val;
1485 best_node = n;
1486 }
1487 }
1488
1489 if (best_node >= 0)
1490 node_set(best_node, *used_node_mask);
1491
1492 return best_node;
1493}
1494
86356ab1 1495static void __meminit build_zonelists(pg_data_t *pgdat)
1da177e4 1496{
19655d34
CL
1497 int j, node, local_node;
1498 enum zone_type i;
1da177e4
LT
1499 int prev_node, load;
1500 struct zonelist *zonelist;
1501 nodemask_t used_mask;
1502
1503 /* initialize zonelists */
19655d34 1504 for (i = 0; i < MAX_NR_ZONES; i++) {
1da177e4
LT
1505 zonelist = pgdat->node_zonelists + i;
1506 zonelist->zones[0] = NULL;
1507 }
1508
1509 /* NUMA-aware ordering of nodes */
1510 local_node = pgdat->node_id;
1511 load = num_online_nodes();
1512 prev_node = local_node;
1513 nodes_clear(used_mask);
1514 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
1515 int distance = node_distance(local_node, node);
1516
1517 /*
1518 * If another node is sufficiently far away then it is better
1519 * to reclaim pages in a zone before going off node.
1520 */
1521 if (distance > RECLAIM_DISTANCE)
1522 zone_reclaim_mode = 1;
1523
1da177e4
LT
1524 /*
1525 * We don't want to pressure a particular node.
1526 * So adding penalty to the first node in same
1527 * distance group to make it round-robin.
1528 */
9eeff239
CL
1529
1530 if (distance != node_distance(local_node, prev_node))
1da177e4
LT
1531 node_load[node] += load;
1532 prev_node = node;
1533 load--;
19655d34 1534 for (i = 0; i < MAX_NR_ZONES; i++) {
1da177e4
LT
1535 zonelist = pgdat->node_zonelists + i;
1536 for (j = 0; zonelist->zones[j] != NULL; j++);
1537
19655d34 1538 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
1539 zonelist->zones[j] = NULL;
1540 }
1541 }
1542}
1543
1544#else /* CONFIG_NUMA */
1545
86356ab1 1546static void __meminit build_zonelists(pg_data_t *pgdat)
1da177e4 1547{
19655d34
CL
1548 int node, local_node;
1549 enum zone_type i,j;
1da177e4
LT
1550
1551 local_node = pgdat->node_id;
19655d34 1552 for (i = 0; i < MAX_NR_ZONES; i++) {
1da177e4
LT
1553 struct zonelist *zonelist;
1554
1555 zonelist = pgdat->node_zonelists + i;
1556
19655d34 1557 j = build_zonelists_node(pgdat, zonelist, 0, i);
1da177e4
LT
1558 /*
1559 * Now we build the zonelist so that it contains the zones
1560 * of all the other nodes.
1561 * We don't want to pressure a particular node, so when
1562 * building the zones for node N, we make sure that the
1563 * zones coming right after the local ones are those from
1564 * node N+1 (modulo N)
1565 */
1566 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
1567 if (!node_online(node))
1568 continue;
19655d34 1569 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
1570 }
1571 for (node = 0; node < local_node; node++) {
1572 if (!node_online(node))
1573 continue;
19655d34 1574 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
1da177e4
LT
1575 }
1576
1577 zonelist->zones[j] = NULL;
1578 }
1579}
1580
1581#endif /* CONFIG_NUMA */
1582
6811378e
YG
1583/* return values int ....just for stop_machine_run() */
1584static int __meminit __build_all_zonelists(void *dummy)
1da177e4 1585{
6811378e
YG
1586 int nid;
1587 for_each_online_node(nid)
1588 build_zonelists(NODE_DATA(nid));
1589 return 0;
1590}
1591
1592void __meminit build_all_zonelists(void)
1593{
1594 if (system_state == SYSTEM_BOOTING) {
423b41d7 1595 __build_all_zonelists(NULL);
6811378e
YG
1596 cpuset_init_current_mems_allowed();
1597 } else {
1598 /* we have to stop all cpus to guaranntee there is no user
1599 of zonelist */
1600 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
1601 /* cpuset refresh routine should be here */
1602 }
bd1e22b8
AM
1603 vm_total_pages = nr_free_pagecache_pages();
1604 printk("Built %i zonelists. Total pages: %ld\n",
1605 num_online_nodes(), vm_total_pages);
1da177e4
LT
1606}
1607
1608/*
1609 * Helper functions to size the waitqueue hash table.
1610 * Essentially these want to choose hash table sizes sufficiently
1611 * large so that collisions trying to wait on pages are rare.
1612 * But in fact, the number of active page waitqueues on typical
1613 * systems is ridiculously low, less than 200. So this is even
1614 * conservative, even though it seems large.
1615 *
1616 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
1617 * waitqueues, i.e. the size of the waitq table given the number of pages.
1618 */
1619#define PAGES_PER_WAITQUEUE 256
1620
cca448fe 1621#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 1622static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
1623{
1624 unsigned long size = 1;
1625
1626 pages /= PAGES_PER_WAITQUEUE;
1627
1628 while (size < pages)
1629 size <<= 1;
1630
1631 /*
1632 * Once we have dozens or even hundreds of threads sleeping
1633 * on IO we've got bigger problems than wait queue collision.
1634 * Limit the size of the wait table to a reasonable size.
1635 */
1636 size = min(size, 4096UL);
1637
1638 return max(size, 4UL);
1639}
cca448fe
YG
1640#else
1641/*
1642 * A zone's size might be changed by hot-add, so it is not possible to determine
1643 * a suitable size for its wait_table. So we use the maximum size now.
1644 *
1645 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
1646 *
1647 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
1648 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
1649 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
1650 *
1651 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
1652 * or more by the traditional way. (See above). It equals:
1653 *
1654 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
1655 * ia64(16K page size) : = ( 8G + 4M)byte.
1656 * powerpc (64K page size) : = (32G +16M)byte.
1657 */
1658static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1659{
1660 return 4096UL;
1661}
1662#endif
1da177e4
LT
1663
1664/*
1665 * This is an integer logarithm so that shifts can be used later
1666 * to extract the more random high bits from the multiplicative
1667 * hash function before the remainder is taken.
1668 */
1669static inline unsigned long wait_table_bits(unsigned long size)
1670{
1671 return ffz(~size);
1672}
1673
1674#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1675
1da177e4
LT
1676/*
1677 * Initially all pages are reserved - free ones are freed
1678 * up by free_all_bootmem() once the early boot process is
1679 * done. Non-atomic initialization, single-pass.
1680 */
c09b4240 1681void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1da177e4
LT
1682 unsigned long start_pfn)
1683{
1da177e4 1684 struct page *page;
29751f69
AW
1685 unsigned long end_pfn = start_pfn + size;
1686 unsigned long pfn;
1da177e4 1687
cbe8dd4a 1688 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
d41dee36
AW
1689 if (!early_pfn_valid(pfn))
1690 continue;
1691 page = pfn_to_page(pfn);
1692 set_page_links(page, zone, nid, pfn);
7835e98b 1693 init_page_count(page);
1da177e4
LT
1694 reset_page_mapcount(page);
1695 SetPageReserved(page);
1696 INIT_LIST_HEAD(&page->lru);
1697#ifdef WANT_PAGE_VIRTUAL
1698 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1699 if (!is_highmem_idx(zone))
3212c6be 1700 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 1701#endif
1da177e4
LT
1702 }
1703}
1704
1705void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1706 unsigned long size)
1707{
1708 int order;
1709 for (order = 0; order < MAX_ORDER ; order++) {
1710 INIT_LIST_HEAD(&zone->free_area[order].free_list);
1711 zone->free_area[order].nr_free = 0;
1712 }
1713}
1714
d41dee36 1715#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr)
2f1b6248
CL
1716void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
1717 unsigned long pfn, unsigned long size)
d41dee36
AW
1718{
1719 unsigned long snum = pfn_to_section_nr(pfn);
1720 unsigned long end = pfn_to_section_nr(pfn + size);
1721
1722 if (FLAGS_HAS_NODE)
1723 zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
1724 else
1725 for (; snum <= end; snum++)
1726 zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
1727}
1728
1da177e4
LT
1729#ifndef __HAVE_ARCH_MEMMAP_INIT
1730#define memmap_init(size, nid, zone, start_pfn) \
1731 memmap_init_zone((size), (nid), (zone), (start_pfn))
1732#endif
1733
6292d9aa 1734static int __cpuinit zone_batchsize(struct zone *zone)
e7c8d5c9
CL
1735{
1736 int batch;
1737
1738 /*
1739 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 1740 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
1741 *
1742 * OK, so we don't know how big the cache is. So guess.
1743 */
1744 batch = zone->present_pages / 1024;
ba56e91c
SR
1745 if (batch * PAGE_SIZE > 512 * 1024)
1746 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
1747 batch /= 4; /* We effectively *= 4 below */
1748 if (batch < 1)
1749 batch = 1;
1750
1751 /*
0ceaacc9
NP
1752 * Clamp the batch to a 2^n - 1 value. Having a power
1753 * of 2 value was found to be more likely to have
1754 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 1755 *
0ceaacc9
NP
1756 * For example if 2 tasks are alternately allocating
1757 * batches of pages, one task can end up with a lot
1758 * of pages of one half of the possible page colors
1759 * and the other with pages of the other colors.
e7c8d5c9 1760 */
0ceaacc9 1761 batch = (1 << (fls(batch + batch/2)-1)) - 1;
ba56e91c 1762
e7c8d5c9
CL
1763 return batch;
1764}
1765
2caaad41
CL
1766inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
1767{
1768 struct per_cpu_pages *pcp;
1769
1c6fe946
MD
1770 memset(p, 0, sizeof(*p));
1771
2caaad41
CL
1772 pcp = &p->pcp[0]; /* hot */
1773 pcp->count = 0;
2caaad41
CL
1774 pcp->high = 6 * batch;
1775 pcp->batch = max(1UL, 1 * batch);
1776 INIT_LIST_HEAD(&pcp->list);
1777
1778 pcp = &p->pcp[1]; /* cold*/
1779 pcp->count = 0;
2caaad41 1780 pcp->high = 2 * batch;
e46a5e28 1781 pcp->batch = max(1UL, batch/2);
2caaad41
CL
1782 INIT_LIST_HEAD(&pcp->list);
1783}
1784
8ad4b1fb
RS
1785/*
1786 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
1787 * to the value high for the pageset p.
1788 */
1789
1790static void setup_pagelist_highmark(struct per_cpu_pageset *p,
1791 unsigned long high)
1792{
1793 struct per_cpu_pages *pcp;
1794
1795 pcp = &p->pcp[0]; /* hot list */
1796 pcp->high = high;
1797 pcp->batch = max(1UL, high/4);
1798 if ((high/4) > (PAGE_SHIFT * 8))
1799 pcp->batch = PAGE_SHIFT * 8;
1800}
1801
1802
e7c8d5c9
CL
1803#ifdef CONFIG_NUMA
1804/*
2caaad41
CL
1805 * Boot pageset table. One per cpu which is going to be used for all
1806 * zones and all nodes. The parameters will be set in such a way
1807 * that an item put on a list will immediately be handed over to
1808 * the buddy list. This is safe since pageset manipulation is done
1809 * with interrupts disabled.
1810 *
1811 * Some NUMA counter updates may also be caught by the boot pagesets.
b7c84c6a
CL
1812 *
1813 * The boot_pagesets must be kept even after bootup is complete for
1814 * unused processors and/or zones. They do play a role for bootstrapping
1815 * hotplugged processors.
1816 *
1817 * zoneinfo_show() and maybe other functions do
1818 * not check if the processor is online before following the pageset pointer.
1819 * Other parts of the kernel may not check if the zone is available.
2caaad41 1820 */
88a2a4ac 1821static struct per_cpu_pageset boot_pageset[NR_CPUS];
2caaad41
CL
1822
1823/*
1824 * Dynamically allocate memory for the
e7c8d5c9
CL
1825 * per cpu pageset array in struct zone.
1826 */
6292d9aa 1827static int __cpuinit process_zones(int cpu)
e7c8d5c9
CL
1828{
1829 struct zone *zone, *dzone;
e7c8d5c9
CL
1830
1831 for_each_zone(zone) {
e7c8d5c9 1832
66a55030
CL
1833 if (!populated_zone(zone))
1834 continue;
1835
23316bc8 1836 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
e7c8d5c9 1837 GFP_KERNEL, cpu_to_node(cpu));
23316bc8 1838 if (!zone_pcp(zone, cpu))
e7c8d5c9 1839 goto bad;
e7c8d5c9 1840
23316bc8 1841 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
8ad4b1fb
RS
1842
1843 if (percpu_pagelist_fraction)
1844 setup_pagelist_highmark(zone_pcp(zone, cpu),
1845 (zone->present_pages / percpu_pagelist_fraction));
e7c8d5c9
CL
1846 }
1847
1848 return 0;
1849bad:
1850 for_each_zone(dzone) {
1851 if (dzone == zone)
1852 break;
23316bc8
NP
1853 kfree(zone_pcp(dzone, cpu));
1854 zone_pcp(dzone, cpu) = NULL;
e7c8d5c9
CL
1855 }
1856 return -ENOMEM;
1857}
1858
1859static inline void free_zone_pagesets(int cpu)
1860{
e7c8d5c9
CL
1861 struct zone *zone;
1862
1863 for_each_zone(zone) {
1864 struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
1865
f3ef9ead
DR
1866 /* Free per_cpu_pageset if it is slab allocated */
1867 if (pset != &boot_pageset[cpu])
1868 kfree(pset);
e7c8d5c9 1869 zone_pcp(zone, cpu) = NULL;
e7c8d5c9 1870 }
e7c8d5c9
CL
1871}
1872
9c7b216d 1873static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
e7c8d5c9
CL
1874 unsigned long action,
1875 void *hcpu)
1876{
1877 int cpu = (long)hcpu;
1878 int ret = NOTIFY_OK;
1879
1880 switch (action) {
1881 case CPU_UP_PREPARE:
1882 if (process_zones(cpu))
1883 ret = NOTIFY_BAD;
1884 break;
b0d41693 1885 case CPU_UP_CANCELED:
e7c8d5c9
CL
1886 case CPU_DEAD:
1887 free_zone_pagesets(cpu);
1888 break;
e7c8d5c9
CL
1889 default:
1890 break;
1891 }
1892 return ret;
1893}
1894
74b85f37 1895static struct notifier_block __cpuinitdata pageset_notifier =
e7c8d5c9
CL
1896 { &pageset_cpuup_callback, NULL, 0 };
1897
78d9955b 1898void __init setup_per_cpu_pageset(void)
e7c8d5c9
CL
1899{
1900 int err;
1901
1902 /* Initialize per_cpu_pageset for cpu 0.
1903 * A cpuup callback will do this for every cpu
1904 * as it comes online
1905 */
1906 err = process_zones(smp_processor_id());
1907 BUG_ON(err);
1908 register_cpu_notifier(&pageset_notifier);
1909}
1910
1911#endif
1912
c09b4240 1913static __meminit
cca448fe 1914int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
1915{
1916 int i;
1917 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 1918 size_t alloc_size;
ed8ece2e
DH
1919
1920 /*
1921 * The per-page waitqueue mechanism uses hashed waitqueues
1922 * per zone.
1923 */
02b694de
YG
1924 zone->wait_table_hash_nr_entries =
1925 wait_table_hash_nr_entries(zone_size_pages);
1926 zone->wait_table_bits =
1927 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
1928 alloc_size = zone->wait_table_hash_nr_entries
1929 * sizeof(wait_queue_head_t);
1930
1931 if (system_state == SYSTEM_BOOTING) {
1932 zone->wait_table = (wait_queue_head_t *)
1933 alloc_bootmem_node(pgdat, alloc_size);
1934 } else {
1935 /*
1936 * This case means that a zone whose size was 0 gets new memory
1937 * via memory hot-add.
1938 * But it may be the case that a new node was hot-added. In
1939 * this case vmalloc() will not be able to use this new node's
1940 * memory - this wait_table must be initialized to use this new
1941 * node itself as well.
1942 * To use this new node's memory, further consideration will be
1943 * necessary.
1944 */
1945 zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size);
1946 }
1947 if (!zone->wait_table)
1948 return -ENOMEM;
ed8ece2e 1949
02b694de 1950 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 1951 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
1952
1953 return 0;
ed8ece2e
DH
1954}
1955
c09b4240 1956static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e
DH
1957{
1958 int cpu;
1959 unsigned long batch = zone_batchsize(zone);
1960
1961 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1962#ifdef CONFIG_NUMA
1963 /* Early boot. Slab allocator not functional yet */
23316bc8 1964 zone_pcp(zone, cpu) = &boot_pageset[cpu];
ed8ece2e
DH
1965 setup_pageset(&boot_pageset[cpu],0);
1966#else
1967 setup_pageset(zone_pcp(zone,cpu), batch);
1968#endif
1969 }
f5335c0f
AB
1970 if (zone->present_pages)
1971 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
1972 zone->name, zone->present_pages, batch);
ed8ece2e
DH
1973}
1974
718127cc
YG
1975__meminit int init_currently_empty_zone(struct zone *zone,
1976 unsigned long zone_start_pfn,
1977 unsigned long size)
ed8ece2e
DH
1978{
1979 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
1980 int ret;
1981 ret = zone_wait_table_init(zone, size);
1982 if (ret)
1983 return ret;
ed8ece2e
DH
1984 pgdat->nr_zones = zone_idx(zone) + 1;
1985
ed8ece2e
DH
1986 zone->zone_start_pfn = zone_start_pfn;
1987
1988 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
1989
1990 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
718127cc
YG
1991
1992 return 0;
ed8ece2e
DH
1993}
1994
c713216d
MG
1995#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
1996/*
1997 * Basic iterator support. Return the first range of PFNs for a node
1998 * Note: nid == MAX_NUMNODES returns first region regardless of node
1999 */
2000static int __init first_active_region_index_in_nid(int nid)
2001{
2002 int i;
2003
2004 for (i = 0; i < nr_nodemap_entries; i++)
2005 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2006 return i;
2007
2008 return -1;
2009}
2010
2011/*
2012 * Basic iterator support. Return the next active range of PFNs for a node
2013 * Note: nid == MAX_NUMNODES returns next region regardles of node
2014 */
2015static int __init next_active_region_index_in_nid(int index, int nid)
2016{
2017 for (index = index + 1; index < nr_nodemap_entries; index++)
2018 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2019 return index;
2020
2021 return -1;
2022}
2023
2024#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2025/*
2026 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2027 * Architectures may implement their own version but if add_active_range()
2028 * was used and there are no special requirements, this is a convenient
2029 * alternative
2030 */
2031int __init early_pfn_to_nid(unsigned long pfn)
2032{
2033 int i;
2034
2035 for (i = 0; i < nr_nodemap_entries; i++) {
2036 unsigned long start_pfn = early_node_map[i].start_pfn;
2037 unsigned long end_pfn = early_node_map[i].end_pfn;
2038
2039 if (start_pfn <= pfn && pfn < end_pfn)
2040 return early_node_map[i].nid;
2041 }
2042
2043 return 0;
2044}
2045#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2046
2047/* Basic iterator support to walk early_node_map[] */
2048#define for_each_active_range_index_in_nid(i, nid) \
2049 for (i = first_active_region_index_in_nid(nid); i != -1; \
2050 i = next_active_region_index_in_nid(i, nid))
2051
2052/**
2053 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
2054 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2055 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
2056 *
2057 * If an architecture guarantees that all ranges registered with
2058 * add_active_ranges() contain no holes and may be freed, this
2059 * this function may be used instead of calling free_bootmem() manually.
2060 */
2061void __init free_bootmem_with_active_regions(int nid,
2062 unsigned long max_low_pfn)
2063{
2064 int i;
2065
2066 for_each_active_range_index_in_nid(i, nid) {
2067 unsigned long size_pages = 0;
2068 unsigned long end_pfn = early_node_map[i].end_pfn;
2069
2070 if (early_node_map[i].start_pfn >= max_low_pfn)
2071 continue;
2072
2073 if (end_pfn > max_low_pfn)
2074 end_pfn = max_low_pfn;
2075
2076 size_pages = end_pfn - early_node_map[i].start_pfn;
2077 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
2078 PFN_PHYS(early_node_map[i].start_pfn),
2079 size_pages << PAGE_SHIFT);
2080 }
2081}
2082
2083/**
2084 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 2085 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
2086 *
2087 * If an architecture guarantees that all ranges registered with
2088 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 2089 * function may be used instead of calling memory_present() manually.
c713216d
MG
2090 */
2091void __init sparse_memory_present_with_active_regions(int nid)
2092{
2093 int i;
2094
2095 for_each_active_range_index_in_nid(i, nid)
2096 memory_present(early_node_map[i].nid,
2097 early_node_map[i].start_pfn,
2098 early_node_map[i].end_pfn);
2099}
2100
fb01439c
MG
2101/**
2102 * push_node_boundaries - Push node boundaries to at least the requested boundary
2103 * @nid: The nid of the node to push the boundary for
2104 * @start_pfn: The start pfn of the node
2105 * @end_pfn: The end pfn of the node
2106 *
2107 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
2108 * time. Specifically, on x86_64, SRAT will report ranges that can potentially
2109 * be hotplugged even though no physical memory exists. This function allows
2110 * an arch to push out the node boundaries so mem_map is allocated that can
2111 * be used later.
2112 */
2113#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
2114void __init push_node_boundaries(unsigned int nid,
2115 unsigned long start_pfn, unsigned long end_pfn)
2116{
2117 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
2118 nid, start_pfn, end_pfn);
2119
2120 /* Initialise the boundary for this node if necessary */
2121 if (node_boundary_end_pfn[nid] == 0)
2122 node_boundary_start_pfn[nid] = -1UL;
2123
2124 /* Update the boundaries */
2125 if (node_boundary_start_pfn[nid] > start_pfn)
2126 node_boundary_start_pfn[nid] = start_pfn;
2127 if (node_boundary_end_pfn[nid] < end_pfn)
2128 node_boundary_end_pfn[nid] = end_pfn;
2129}
2130
2131/* If necessary, push the node boundary out for reserve hotadd */
2132static void __init account_node_boundary(unsigned int nid,
2133 unsigned long *start_pfn, unsigned long *end_pfn)
2134{
2135 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
2136 nid, *start_pfn, *end_pfn);
2137
2138 /* Return if boundary information has not been provided */
2139 if (node_boundary_end_pfn[nid] == 0)
2140 return;
2141
2142 /* Check the boundaries and update if necessary */
2143 if (node_boundary_start_pfn[nid] < *start_pfn)
2144 *start_pfn = node_boundary_start_pfn[nid];
2145 if (node_boundary_end_pfn[nid] > *end_pfn)
2146 *end_pfn = node_boundary_end_pfn[nid];
2147}
2148#else
2149void __init push_node_boundaries(unsigned int nid,
2150 unsigned long start_pfn, unsigned long end_pfn) {}
2151
2152static void __init account_node_boundary(unsigned int nid,
2153 unsigned long *start_pfn, unsigned long *end_pfn) {}
2154#endif
2155
2156
c713216d
MG
2157/**
2158 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
2159 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
2160 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
2161 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
2162 *
2163 * It returns the start and end page frame of a node based on information
2164 * provided by an arch calling add_active_range(). If called for a node
2165 * with no available memory, a warning is printed and the start and end
88ca3b94 2166 * PFNs will be 0.
c713216d
MG
2167 */
2168void __init get_pfn_range_for_nid(unsigned int nid,
2169 unsigned long *start_pfn, unsigned long *end_pfn)
2170{
2171 int i;
2172 *start_pfn = -1UL;
2173 *end_pfn = 0;
2174
2175 for_each_active_range_index_in_nid(i, nid) {
2176 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
2177 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
2178 }
2179
2180 if (*start_pfn == -1UL) {
2181 printk(KERN_WARNING "Node %u active with no memory\n", nid);
2182 *start_pfn = 0;
2183 }
fb01439c
MG
2184
2185 /* Push the node boundaries out if requested */
2186 account_node_boundary(nid, start_pfn, end_pfn);
c713216d
MG
2187}
2188
2189/*
2190 * Return the number of pages a zone spans in a node, including holes
2191 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
2192 */
2193unsigned long __init zone_spanned_pages_in_node(int nid,
2194 unsigned long zone_type,
2195 unsigned long *ignored)
2196{
2197 unsigned long node_start_pfn, node_end_pfn;
2198 unsigned long zone_start_pfn, zone_end_pfn;
2199
2200 /* Get the start and end of the node and zone */
2201 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
2202 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
2203 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2204
2205 /* Check that this node has pages within the zone's required range */
2206 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
2207 return 0;
2208
2209 /* Move the zone boundaries inside the node if necessary */
2210 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
2211 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
2212
2213 /* Return the spanned pages */
2214 return zone_end_pfn - zone_start_pfn;
2215}
2216
2217/*
2218 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 2219 * then all holes in the requested range will be accounted for.
c713216d
MG
2220 */
2221unsigned long __init __absent_pages_in_range(int nid,
2222 unsigned long range_start_pfn,
2223 unsigned long range_end_pfn)
2224{
2225 int i = 0;
2226 unsigned long prev_end_pfn = 0, hole_pages = 0;
2227 unsigned long start_pfn;
2228
2229 /* Find the end_pfn of the first active range of pfns in the node */
2230 i = first_active_region_index_in_nid(nid);
2231 if (i == -1)
2232 return 0;
2233
9c7cd687
MG
2234 /* Account for ranges before physical memory on this node */
2235 if (early_node_map[i].start_pfn > range_start_pfn)
2236 hole_pages = early_node_map[i].start_pfn - range_start_pfn;
2237
c713216d
MG
2238 prev_end_pfn = early_node_map[i].start_pfn;
2239
2240 /* Find all holes for the zone within the node */
2241 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
2242
2243 /* No need to continue if prev_end_pfn is outside the zone */
2244 if (prev_end_pfn >= range_end_pfn)
2245 break;
2246
2247 /* Make sure the end of the zone is not within the hole */
2248 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
2249 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
2250
2251 /* Update the hole size cound and move on */
2252 if (start_pfn > range_start_pfn) {
2253 BUG_ON(prev_end_pfn > start_pfn);
2254 hole_pages += start_pfn - prev_end_pfn;
2255 }
2256 prev_end_pfn = early_node_map[i].end_pfn;
2257 }
2258
9c7cd687
MG
2259 /* Account for ranges past physical memory on this node */
2260 if (range_end_pfn > prev_end_pfn)
2261 hole_pages = range_end_pfn -
2262 max(range_start_pfn, prev_end_pfn);
2263
c713216d
MG
2264 return hole_pages;
2265}
2266
2267/**
2268 * absent_pages_in_range - Return number of page frames in holes within a range
2269 * @start_pfn: The start PFN to start searching for holes
2270 * @end_pfn: The end PFN to stop searching for holes
2271 *
88ca3b94 2272 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
2273 */
2274unsigned long __init absent_pages_in_range(unsigned long start_pfn,
2275 unsigned long end_pfn)
2276{
2277 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
2278}
2279
2280/* Return the number of page frames in holes in a zone on a node */
2281unsigned long __init zone_absent_pages_in_node(int nid,
2282 unsigned long zone_type,
2283 unsigned long *ignored)
2284{
9c7cd687
MG
2285 unsigned long node_start_pfn, node_end_pfn;
2286 unsigned long zone_start_pfn, zone_end_pfn;
2287
2288 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
2289 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
2290 node_start_pfn);
2291 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
2292 node_end_pfn);
2293
2294 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 2295}
0e0b864e 2296
c713216d
MG
2297#else
2298static inline unsigned long zone_spanned_pages_in_node(int nid,
2299 unsigned long zone_type,
2300 unsigned long *zones_size)
2301{
2302 return zones_size[zone_type];
2303}
2304
2305static inline unsigned long zone_absent_pages_in_node(int nid,
2306 unsigned long zone_type,
2307 unsigned long *zholes_size)
2308{
2309 if (!zholes_size)
2310 return 0;
2311
2312 return zholes_size[zone_type];
2313}
0e0b864e 2314
c713216d
MG
2315#endif
2316
2317static void __init calculate_node_totalpages(struct pglist_data *pgdat,
2318 unsigned long *zones_size, unsigned long *zholes_size)
2319{
2320 unsigned long realtotalpages, totalpages = 0;
2321 enum zone_type i;
2322
2323 for (i = 0; i < MAX_NR_ZONES; i++)
2324 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
2325 zones_size);
2326 pgdat->node_spanned_pages = totalpages;
2327
2328 realtotalpages = totalpages;
2329 for (i = 0; i < MAX_NR_ZONES; i++)
2330 realtotalpages -=
2331 zone_absent_pages_in_node(pgdat->node_id, i,
2332 zholes_size);
2333 pgdat->node_present_pages = realtotalpages;
2334 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
2335 realtotalpages);
2336}
2337
1da177e4
LT
2338/*
2339 * Set up the zone data structures:
2340 * - mark all pages reserved
2341 * - mark all memory queues empty
2342 * - clear the memory bitmaps
2343 */
86356ab1 2344static void __meminit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
2345 unsigned long *zones_size, unsigned long *zholes_size)
2346{
2f1b6248 2347 enum zone_type j;
ed8ece2e 2348 int nid = pgdat->node_id;
1da177e4 2349 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 2350 int ret;
1da177e4 2351
208d54e5 2352 pgdat_resize_init(pgdat);
1da177e4
LT
2353 pgdat->nr_zones = 0;
2354 init_waitqueue_head(&pgdat->kswapd_wait);
2355 pgdat->kswapd_max_order = 0;
2356
2357 for (j = 0; j < MAX_NR_ZONES; j++) {
2358 struct zone *zone = pgdat->node_zones + j;
0e0b864e 2359 unsigned long size, realsize, memmap_pages;
1da177e4 2360
c713216d
MG
2361 size = zone_spanned_pages_in_node(nid, j, zones_size);
2362 realsize = size - zone_absent_pages_in_node(nid, j,
2363 zholes_size);
1da177e4 2364
0e0b864e
MG
2365 /*
2366 * Adjust realsize so that it accounts for how much memory
2367 * is used by this zone for memmap. This affects the watermark
2368 * and per-cpu initialisations
2369 */
2370 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
2371 if (realsize >= memmap_pages) {
2372 realsize -= memmap_pages;
2373 printk(KERN_DEBUG
2374 " %s zone: %lu pages used for memmap\n",
2375 zone_names[j], memmap_pages);
2376 } else
2377 printk(KERN_WARNING
2378 " %s zone: %lu pages exceeds realsize %lu\n",
2379 zone_names[j], memmap_pages, realsize);
2380
2381 /* Account for reserved DMA pages */
2382 if (j == ZONE_DMA && realsize > dma_reserve) {
2383 realsize -= dma_reserve;
2384 printk(KERN_DEBUG " DMA zone: %lu pages reserved\n",
2385 dma_reserve);
2386 }
2387
98d2b0eb 2388 if (!is_highmem_idx(j))
1da177e4
LT
2389 nr_kernel_pages += realsize;
2390 nr_all_pages += realsize;
2391
2392 zone->spanned_pages = size;
2393 zone->present_pages = realsize;
9614634f 2394#ifdef CONFIG_NUMA
d5f541ed 2395 zone->node = nid;
8417bba4 2396 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 2397 / 100;
0ff38490 2398 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 2399#endif
1da177e4
LT
2400 zone->name = zone_names[j];
2401 spin_lock_init(&zone->lock);
2402 spin_lock_init(&zone->lru_lock);
bdc8cb98 2403 zone_seqlock_init(zone);
1da177e4
LT
2404 zone->zone_pgdat = pgdat;
2405 zone->free_pages = 0;
2406
2407 zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
2408
ed8ece2e 2409 zone_pcp_init(zone);
1da177e4
LT
2410 INIT_LIST_HEAD(&zone->active_list);
2411 INIT_LIST_HEAD(&zone->inactive_list);
2412 zone->nr_scan_active = 0;
2413 zone->nr_scan_inactive = 0;
2414 zone->nr_active = 0;
2415 zone->nr_inactive = 0;
2244b95a 2416 zap_zone_vm_stats(zone);
53e9a615 2417 atomic_set(&zone->reclaim_in_progress, 0);
1da177e4
LT
2418 if (!size)
2419 continue;
2420
d41dee36 2421 zonetable_add(zone, nid, j, zone_start_pfn, size);
718127cc
YG
2422 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
2423 BUG_ON(ret);
1da177e4 2424 zone_start_pfn += size;
1da177e4
LT
2425 }
2426}
2427
2428static void __init alloc_node_mem_map(struct pglist_data *pgdat)
2429{
1da177e4
LT
2430 /* Skip empty nodes */
2431 if (!pgdat->node_spanned_pages)
2432 return;
2433
d41dee36 2434#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
2435 /* ia64 gets its own node_mem_map, before this, without bootmem */
2436 if (!pgdat->node_mem_map) {
e984bb43 2437 unsigned long size, start, end;
d41dee36
AW
2438 struct page *map;
2439
e984bb43
BP
2440 /*
2441 * The zone's endpoints aren't required to be MAX_ORDER
2442 * aligned but the node_mem_map endpoints must be in order
2443 * for the buddy allocator to function correctly.
2444 */
2445 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
2446 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
2447 end = ALIGN(end, MAX_ORDER_NR_PAGES);
2448 size = (end - start) * sizeof(struct page);
6f167ec7
DH
2449 map = alloc_remap(pgdat->node_id, size);
2450 if (!map)
2451 map = alloc_bootmem_node(pgdat, size);
e984bb43 2452 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 2453 }
d41dee36 2454#ifdef CONFIG_FLATMEM
1da177e4
LT
2455 /*
2456 * With no DISCONTIG, the global mem_map is just set as node 0's
2457 */
c713216d 2458 if (pgdat == NODE_DATA(0)) {
1da177e4 2459 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
2460#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2461 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
2462 mem_map -= pgdat->node_start_pfn;
2463#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
2464 }
1da177e4 2465#endif
d41dee36 2466#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
2467}
2468
86356ab1 2469void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
1da177e4
LT
2470 unsigned long *zones_size, unsigned long node_start_pfn,
2471 unsigned long *zholes_size)
2472{
2473 pgdat->node_id = nid;
2474 pgdat->node_start_pfn = node_start_pfn;
c713216d 2475 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
2476
2477 alloc_node_mem_map(pgdat);
2478
2479 free_area_init_core(pgdat, zones_size, zholes_size);
2480}
2481
c713216d
MG
2482#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2483/**
2484 * add_active_range - Register a range of PFNs backed by physical memory
2485 * @nid: The node ID the range resides on
2486 * @start_pfn: The start PFN of the available physical memory
2487 * @end_pfn: The end PFN of the available physical memory
2488 *
2489 * These ranges are stored in an early_node_map[] and later used by
2490 * free_area_init_nodes() to calculate zone sizes and holes. If the
2491 * range spans a memory hole, it is up to the architecture to ensure
2492 * the memory is not freed by the bootmem allocator. If possible
2493 * the range being registered will be merged with existing ranges.
2494 */
2495void __init add_active_range(unsigned int nid, unsigned long start_pfn,
2496 unsigned long end_pfn)
2497{
2498 int i;
2499
2500 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
2501 "%d entries of %d used\n",
2502 nid, start_pfn, end_pfn,
2503 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
2504
2505 /* Merge with existing active regions if possible */
2506 for (i = 0; i < nr_nodemap_entries; i++) {
2507 if (early_node_map[i].nid != nid)
2508 continue;
2509
2510 /* Skip if an existing region covers this new one */
2511 if (start_pfn >= early_node_map[i].start_pfn &&
2512 end_pfn <= early_node_map[i].end_pfn)
2513 return;
2514
2515 /* Merge forward if suitable */
2516 if (start_pfn <= early_node_map[i].end_pfn &&
2517 end_pfn > early_node_map[i].end_pfn) {
2518 early_node_map[i].end_pfn = end_pfn;
2519 return;
2520 }
2521
2522 /* Merge backward if suitable */
2523 if (start_pfn < early_node_map[i].end_pfn &&
2524 end_pfn >= early_node_map[i].start_pfn) {
2525 early_node_map[i].start_pfn = start_pfn;
2526 return;
2527 }
2528 }
2529
2530 /* Check that early_node_map is large enough */
2531 if (i >= MAX_ACTIVE_REGIONS) {
2532 printk(KERN_CRIT "More than %d memory regions, truncating\n",
2533 MAX_ACTIVE_REGIONS);
2534 return;
2535 }
2536
2537 early_node_map[i].nid = nid;
2538 early_node_map[i].start_pfn = start_pfn;
2539 early_node_map[i].end_pfn = end_pfn;
2540 nr_nodemap_entries = i + 1;
2541}
2542
2543/**
2544 * shrink_active_range - Shrink an existing registered range of PFNs
2545 * @nid: The node id the range is on that should be shrunk
2546 * @old_end_pfn: The old end PFN of the range
2547 * @new_end_pfn: The new PFN of the range
2548 *
2549 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
2550 * The map is kept at the end physical page range that has already been
2551 * registered with add_active_range(). This function allows an arch to shrink
2552 * an existing registered range.
2553 */
2554void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
2555 unsigned long new_end_pfn)
2556{
2557 int i;
2558
2559 /* Find the old active region end and shrink */
2560 for_each_active_range_index_in_nid(i, nid)
2561 if (early_node_map[i].end_pfn == old_end_pfn) {
2562 early_node_map[i].end_pfn = new_end_pfn;
2563 break;
2564 }
2565}
2566
2567/**
2568 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 2569 *
c713216d
MG
2570 * During discovery, it may be found that a table like SRAT is invalid
2571 * and an alternative discovery method must be used. This function removes
2572 * all currently registered regions.
2573 */
88ca3b94 2574void __init remove_all_active_ranges(void)
c713216d
MG
2575{
2576 memset(early_node_map, 0, sizeof(early_node_map));
2577 nr_nodemap_entries = 0;
fb01439c
MG
2578#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
2579 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
2580 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
2581#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
c713216d
MG
2582}
2583
2584/* Compare two active node_active_regions */
2585static int __init cmp_node_active_region(const void *a, const void *b)
2586{
2587 struct node_active_region *arange = (struct node_active_region *)a;
2588 struct node_active_region *brange = (struct node_active_region *)b;
2589
2590 /* Done this way to avoid overflows */
2591 if (arange->start_pfn > brange->start_pfn)
2592 return 1;
2593 if (arange->start_pfn < brange->start_pfn)
2594 return -1;
2595
2596 return 0;
2597}
2598
2599/* sort the node_map by start_pfn */
2600static void __init sort_node_map(void)
2601{
2602 sort(early_node_map, (size_t)nr_nodemap_entries,
2603 sizeof(struct node_active_region),
2604 cmp_node_active_region, NULL);
2605}
2606
2607/* Find the lowest pfn for a node. This depends on a sorted early_node_map */
2608unsigned long __init find_min_pfn_for_node(unsigned long nid)
2609{
2610 int i;
2611
2612 /* Assuming a sorted map, the first range found has the starting pfn */
2613 for_each_active_range_index_in_nid(i, nid)
2614 return early_node_map[i].start_pfn;
2615
2616 printk(KERN_WARNING "Could not find start_pfn for node %lu\n", nid);
2617 return 0;
2618}
2619
2620/**
2621 * find_min_pfn_with_active_regions - Find the minimum PFN registered
2622 *
2623 * It returns the minimum PFN based on information provided via
88ca3b94 2624 * add_active_range().
c713216d
MG
2625 */
2626unsigned long __init find_min_pfn_with_active_regions(void)
2627{
2628 return find_min_pfn_for_node(MAX_NUMNODES);
2629}
2630
2631/**
2632 * find_max_pfn_with_active_regions - Find the maximum PFN registered
2633 *
2634 * It returns the maximum PFN based on information provided via
88ca3b94 2635 * add_active_range().
c713216d
MG
2636 */
2637unsigned long __init find_max_pfn_with_active_regions(void)
2638{
2639 int i;
2640 unsigned long max_pfn = 0;
2641
2642 for (i = 0; i < nr_nodemap_entries; i++)
2643 max_pfn = max(max_pfn, early_node_map[i].end_pfn);
2644
2645 return max_pfn;
2646}
2647
2648/**
2649 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 2650 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
2651 *
2652 * This will call free_area_init_node() for each active node in the system.
2653 * Using the page ranges provided by add_active_range(), the size of each
2654 * zone in each node and their holes is calculated. If the maximum PFN
2655 * between two adjacent zones match, it is assumed that the zone is empty.
2656 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
2657 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
2658 * starts where the previous one ended. For example, ZONE_DMA32 starts
2659 * at arch_max_dma_pfn.
2660 */
2661void __init free_area_init_nodes(unsigned long *max_zone_pfn)
2662{
2663 unsigned long nid;
2664 enum zone_type i;
2665
2666 /* Record where the zone boundaries are */
2667 memset(arch_zone_lowest_possible_pfn, 0,
2668 sizeof(arch_zone_lowest_possible_pfn));
2669 memset(arch_zone_highest_possible_pfn, 0,
2670 sizeof(arch_zone_highest_possible_pfn));
2671 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
2672 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
2673 for (i = 1; i < MAX_NR_ZONES; i++) {
2674 arch_zone_lowest_possible_pfn[i] =
2675 arch_zone_highest_possible_pfn[i-1];
2676 arch_zone_highest_possible_pfn[i] =
2677 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
2678 }
2679
2680 /* Regions in the early_node_map can be in any order */
2681 sort_node_map();
2682
2683 /* Print out the zone ranges */
2684 printk("Zone PFN ranges:\n");
2685 for (i = 0; i < MAX_NR_ZONES; i++)
2686 printk(" %-8s %8lu -> %8lu\n",
2687 zone_names[i],
2688 arch_zone_lowest_possible_pfn[i],
2689 arch_zone_highest_possible_pfn[i]);
2690
2691 /* Print out the early_node_map[] */
2692 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
2693 for (i = 0; i < nr_nodemap_entries; i++)
2694 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid,
2695 early_node_map[i].start_pfn,
2696 early_node_map[i].end_pfn);
2697
2698 /* Initialise every node */
2699 for_each_online_node(nid) {
2700 pg_data_t *pgdat = NODE_DATA(nid);
2701 free_area_init_node(nid, pgdat, NULL,
2702 find_min_pfn_for_node(nid), NULL);
2703 }
2704}
2705#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
2706
0e0b864e 2707/**
88ca3b94
RD
2708 * set_dma_reserve - set the specified number of pages reserved in the first zone
2709 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
2710 *
2711 * The per-cpu batchsize and zone watermarks are determined by present_pages.
2712 * In the DMA zone, a significant percentage may be consumed by kernel image
2713 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
2714 * function may optionally be used to account for unfreeable pages in the
2715 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
2716 * smaller per-cpu batchsize.
0e0b864e
MG
2717 */
2718void __init set_dma_reserve(unsigned long new_dma_reserve)
2719{
2720 dma_reserve = new_dma_reserve;
2721}
2722
93b7504e 2723#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
2724static bootmem_data_t contig_bootmem_data;
2725struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
2726
2727EXPORT_SYMBOL(contig_page_data);
93b7504e 2728#endif
1da177e4
LT
2729
2730void __init free_area_init(unsigned long *zones_size)
2731{
93b7504e 2732 free_area_init_node(0, NODE_DATA(0), zones_size,
1da177e4
LT
2733 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
2734}
1da177e4 2735
1da177e4
LT
2736#ifdef CONFIG_HOTPLUG_CPU
2737static int page_alloc_cpu_notify(struct notifier_block *self,
2738 unsigned long action, void *hcpu)
2739{
2740 int cpu = (unsigned long)hcpu;
1da177e4
LT
2741
2742 if (action == CPU_DEAD) {
1da177e4
LT
2743 local_irq_disable();
2744 __drain_pages(cpu);
f8891e5e 2745 vm_events_fold_cpu(cpu);
1da177e4 2746 local_irq_enable();
2244b95a 2747 refresh_cpu_vm_stats(cpu);
1da177e4
LT
2748 }
2749 return NOTIFY_OK;
2750}
2751#endif /* CONFIG_HOTPLUG_CPU */
2752
2753void __init page_alloc_init(void)
2754{
2755 hotcpu_notifier(page_alloc_cpu_notify, 0);
2756}
2757
cb45b0e9
HA
2758/*
2759 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
2760 * or min_free_kbytes changes.
2761 */
2762static void calculate_totalreserve_pages(void)
2763{
2764 struct pglist_data *pgdat;
2765 unsigned long reserve_pages = 0;
2f6726e5 2766 enum zone_type i, j;
cb45b0e9
HA
2767
2768 for_each_online_pgdat(pgdat) {
2769 for (i = 0; i < MAX_NR_ZONES; i++) {
2770 struct zone *zone = pgdat->node_zones + i;
2771 unsigned long max = 0;
2772
2773 /* Find valid and maximum lowmem_reserve in the zone */
2774 for (j = i; j < MAX_NR_ZONES; j++) {
2775 if (zone->lowmem_reserve[j] > max)
2776 max = zone->lowmem_reserve[j];
2777 }
2778
2779 /* we treat pages_high as reserved pages. */
2780 max += zone->pages_high;
2781
2782 if (max > zone->present_pages)
2783 max = zone->present_pages;
2784 reserve_pages += max;
2785 }
2786 }
2787 totalreserve_pages = reserve_pages;
2788}
2789
1da177e4
LT
2790/*
2791 * setup_per_zone_lowmem_reserve - called whenever
2792 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
2793 * has a correct pages reserved value, so an adequate number of
2794 * pages are left in the zone after a successful __alloc_pages().
2795 */
2796static void setup_per_zone_lowmem_reserve(void)
2797{
2798 struct pglist_data *pgdat;
2f6726e5 2799 enum zone_type j, idx;
1da177e4 2800
ec936fc5 2801 for_each_online_pgdat(pgdat) {
1da177e4
LT
2802 for (j = 0; j < MAX_NR_ZONES; j++) {
2803 struct zone *zone = pgdat->node_zones + j;
2804 unsigned long present_pages = zone->present_pages;
2805
2806 zone->lowmem_reserve[j] = 0;
2807
2f6726e5
CL
2808 idx = j;
2809 while (idx) {
1da177e4
LT
2810 struct zone *lower_zone;
2811
2f6726e5
CL
2812 idx--;
2813
1da177e4
LT
2814 if (sysctl_lowmem_reserve_ratio[idx] < 1)
2815 sysctl_lowmem_reserve_ratio[idx] = 1;
2816
2817 lower_zone = pgdat->node_zones + idx;
2818 lower_zone->lowmem_reserve[j] = present_pages /
2819 sysctl_lowmem_reserve_ratio[idx];
2820 present_pages += lower_zone->present_pages;
2821 }
2822 }
2823 }
cb45b0e9
HA
2824
2825 /* update totalreserve_pages */
2826 calculate_totalreserve_pages();
1da177e4
LT
2827}
2828
88ca3b94
RD
2829/**
2830 * setup_per_zone_pages_min - called when min_free_kbytes changes.
2831 *
2832 * Ensures that the pages_{min,low,high} values for each zone are set correctly
2833 * with respect to min_free_kbytes.
1da177e4 2834 */
3947be19 2835void setup_per_zone_pages_min(void)
1da177e4
LT
2836{
2837 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
2838 unsigned long lowmem_pages = 0;
2839 struct zone *zone;
2840 unsigned long flags;
2841
2842 /* Calculate total number of !ZONE_HIGHMEM pages */
2843 for_each_zone(zone) {
2844 if (!is_highmem(zone))
2845 lowmem_pages += zone->present_pages;
2846 }
2847
2848 for_each_zone(zone) {
ac924c60
AM
2849 u64 tmp;
2850
1da177e4 2851 spin_lock_irqsave(&zone->lru_lock, flags);
ac924c60
AM
2852 tmp = (u64)pages_min * zone->present_pages;
2853 do_div(tmp, lowmem_pages);
1da177e4
LT
2854 if (is_highmem(zone)) {
2855 /*
669ed175
NP
2856 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
2857 * need highmem pages, so cap pages_min to a small
2858 * value here.
2859 *
2860 * The (pages_high-pages_low) and (pages_low-pages_min)
2861 * deltas controls asynch page reclaim, and so should
2862 * not be capped for highmem.
1da177e4
LT
2863 */
2864 int min_pages;
2865
2866 min_pages = zone->present_pages / 1024;
2867 if (min_pages < SWAP_CLUSTER_MAX)
2868 min_pages = SWAP_CLUSTER_MAX;
2869 if (min_pages > 128)
2870 min_pages = 128;
2871 zone->pages_min = min_pages;
2872 } else {
669ed175
NP
2873 /*
2874 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
2875 * proportionate to the zone's size.
2876 */
669ed175 2877 zone->pages_min = tmp;
1da177e4
LT
2878 }
2879
ac924c60
AM
2880 zone->pages_low = zone->pages_min + (tmp >> 2);
2881 zone->pages_high = zone->pages_min + (tmp >> 1);
1da177e4
LT
2882 spin_unlock_irqrestore(&zone->lru_lock, flags);
2883 }
cb45b0e9
HA
2884
2885 /* update totalreserve_pages */
2886 calculate_totalreserve_pages();
1da177e4
LT
2887}
2888
2889/*
2890 * Initialise min_free_kbytes.
2891 *
2892 * For small machines we want it small (128k min). For large machines
2893 * we want it large (64MB max). But it is not linear, because network
2894 * bandwidth does not increase linearly with machine size. We use
2895 *
2896 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
2897 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
2898 *
2899 * which yields
2900 *
2901 * 16MB: 512k
2902 * 32MB: 724k
2903 * 64MB: 1024k
2904 * 128MB: 1448k
2905 * 256MB: 2048k
2906 * 512MB: 2896k
2907 * 1024MB: 4096k
2908 * 2048MB: 5792k
2909 * 4096MB: 8192k
2910 * 8192MB: 11584k
2911 * 16384MB: 16384k
2912 */
2913static int __init init_per_zone_pages_min(void)
2914{
2915 unsigned long lowmem_kbytes;
2916
2917 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
2918
2919 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
2920 if (min_free_kbytes < 128)
2921 min_free_kbytes = 128;
2922 if (min_free_kbytes > 65536)
2923 min_free_kbytes = 65536;
2924 setup_per_zone_pages_min();
2925 setup_per_zone_lowmem_reserve();
2926 return 0;
2927}
2928module_init(init_per_zone_pages_min)
2929
2930/*
2931 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
2932 * that we can call two helper functions whenever min_free_kbytes
2933 * changes.
2934 */
2935int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
2936 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2937{
2938 proc_dointvec(table, write, file, buffer, length, ppos);
2939 setup_per_zone_pages_min();
2940 return 0;
2941}
2942
9614634f
CL
2943#ifdef CONFIG_NUMA
2944int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
2945 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2946{
2947 struct zone *zone;
2948 int rc;
2949
2950 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2951 if (rc)
2952 return rc;
2953
2954 for_each_zone(zone)
8417bba4 2955 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
2956 sysctl_min_unmapped_ratio) / 100;
2957 return 0;
2958}
0ff38490
CL
2959
2960int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
2961 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2962{
2963 struct zone *zone;
2964 int rc;
2965
2966 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2967 if (rc)
2968 return rc;
2969
2970 for_each_zone(zone)
2971 zone->min_slab_pages = (zone->present_pages *
2972 sysctl_min_slab_ratio) / 100;
2973 return 0;
2974}
9614634f
CL
2975#endif
2976
1da177e4
LT
2977/*
2978 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
2979 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
2980 * whenever sysctl_lowmem_reserve_ratio changes.
2981 *
2982 * The reserve ratio obviously has absolutely no relation with the
2983 * pages_min watermarks. The lowmem reserve ratio can only make sense
2984 * if in function of the boot time zone sizes.
2985 */
2986int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
2987 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2988{
2989 proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2990 setup_per_zone_lowmem_reserve();
2991 return 0;
2992}
2993
8ad4b1fb
RS
2994/*
2995 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
2996 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
2997 * can have before it gets flushed back to buddy allocator.
2998 */
2999
3000int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
3001 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
3002{
3003 struct zone *zone;
3004 unsigned int cpu;
3005 int ret;
3006
3007 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
3008 if (!write || (ret == -EINVAL))
3009 return ret;
3010 for_each_zone(zone) {
3011 for_each_online_cpu(cpu) {
3012 unsigned long high;
3013 high = zone->present_pages / percpu_pagelist_fraction;
3014 setup_pagelist_highmark(zone_pcp(zone, cpu), high);
3015 }
3016 }
3017 return 0;
3018}
3019
f034b5d4 3020int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
3021
3022#ifdef CONFIG_NUMA
3023static int __init set_hashdist(char *str)
3024{
3025 if (!str)
3026 return 0;
3027 hashdist = simple_strtoul(str, &str, 0);
3028 return 1;
3029}
3030__setup("hashdist=", set_hashdist);
3031#endif
3032
3033/*
3034 * allocate a large system hash table from bootmem
3035 * - it is assumed that the hash table must contain an exact power-of-2
3036 * quantity of entries
3037 * - limit is the number of hash buckets, not the total allocation size
3038 */
3039void *__init alloc_large_system_hash(const char *tablename,
3040 unsigned long bucketsize,
3041 unsigned long numentries,
3042 int scale,
3043 int flags,
3044 unsigned int *_hash_shift,
3045 unsigned int *_hash_mask,
3046 unsigned long limit)
3047{
3048 unsigned long long max = limit;
3049 unsigned long log2qty, size;
3050 void *table = NULL;
3051
3052 /* allow the kernel cmdline to have a say */
3053 if (!numentries) {
3054 /* round applicable memory size up to nearest megabyte */
3055 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
3056 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
3057 numentries >>= 20 - PAGE_SHIFT;
3058 numentries <<= 20 - PAGE_SHIFT;
3059
3060 /* limit to 1 bucket per 2^scale bytes of low memory */
3061 if (scale > PAGE_SHIFT)
3062 numentries >>= (scale - PAGE_SHIFT);
3063 else
3064 numentries <<= (PAGE_SHIFT - scale);
3065 }
6e692ed3 3066 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
3067
3068 /* limit allocation size to 1/16 total memory by default */
3069 if (max == 0) {
3070 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
3071 do_div(max, bucketsize);
3072 }
3073
3074 if (numentries > max)
3075 numentries = max;
3076
3077 log2qty = long_log2(numentries);
3078
3079 do {
3080 size = bucketsize << log2qty;
3081 if (flags & HASH_EARLY)
3082 table = alloc_bootmem(size);
3083 else if (hashdist)
3084 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
3085 else {
3086 unsigned long order;
3087 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
3088 ;
3089 table = (void*) __get_free_pages(GFP_ATOMIC, order);
3090 }
3091 } while (!table && size > PAGE_SIZE && --log2qty);
3092
3093 if (!table)
3094 panic("Failed to allocate %s hash table\n", tablename);
3095
3096 printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
3097 tablename,
3098 (1U << log2qty),
3099 long_log2(size) - PAGE_SHIFT,
3100 size);
3101
3102 if (_hash_shift)
3103 *_hash_shift = log2qty;
3104 if (_hash_mask)
3105 *_hash_mask = (1 << log2qty) - 1;
3106
3107 return table;
3108}
a117e66e
KH
3109
3110#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
a117e66e
KH
3111struct page *pfn_to_page(unsigned long pfn)
3112{
67de6482 3113 return __pfn_to_page(pfn);
a117e66e
KH
3114}
3115unsigned long page_to_pfn(struct page *page)
3116{
67de6482 3117 return __page_to_pfn(page);
a117e66e 3118}
a117e66e
KH
3119EXPORT_SYMBOL(pfn_to_page);
3120EXPORT_SYMBOL(page_to_pfn);
3121#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
This page took 0.370489 seconds and 5 git commands to generate.