87d340999ce8a5d8ada08487627769bb0574381f
[deliverable/linux.git] / mm / vmscan.c
1 /*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/vmstat.h>
23 #include <linux/file.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/notifier.h>
36 #include <linux/rwsem.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39
40 #include <asm/tlbflush.h>
41 #include <asm/div64.h>
42
43 #include <linux/swapops.h>
44
45 #include "internal.h"
46
47 struct scan_control {
48 /* Incremented by the number of inactive pages that were scanned */
49 unsigned long nr_scanned;
50
51 /* This context's GFP mask */
52 gfp_t gfp_mask;
53
54 int may_writepage;
55
56 /* Can pages be swapped as part of reclaim? */
57 int may_swap;
58
59 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
60 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
61 * In this context, it doesn't matter that we scan the
62 * whole list at once. */
63 int swap_cluster_max;
64
65 int swappiness;
66
67 int all_unreclaimable;
68 };
69
70 /*
71 * The list of shrinker callbacks used by to apply pressure to
72 * ageable caches.
73 */
74 struct shrinker {
75 shrinker_t shrinker;
76 struct list_head list;
77 int seeks; /* seeks to recreate an obj */
78 long nr; /* objs pending delete */
79 };
80
81 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
82
83 #ifdef ARCH_HAS_PREFETCH
84 #define prefetch_prev_lru_page(_page, _base, _field) \
85 do { \
86 if ((_page)->lru.prev != _base) { \
87 struct page *prev; \
88 \
89 prev = lru_to_page(&(_page->lru)); \
90 prefetch(&prev->_field); \
91 } \
92 } while (0)
93 #else
94 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
95 #endif
96
97 #ifdef ARCH_HAS_PREFETCHW
98 #define prefetchw_prev_lru_page(_page, _base, _field) \
99 do { \
100 if ((_page)->lru.prev != _base) { \
101 struct page *prev; \
102 \
103 prev = lru_to_page(&(_page->lru)); \
104 prefetchw(&prev->_field); \
105 } \
106 } while (0)
107 #else
108 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
109 #endif
110
111 /*
112 * From 0 .. 100. Higher means more swappy.
113 */
114 int vm_swappiness = 60;
115 long vm_total_pages; /* The total number of pages which the VM controls */
116
117 static LIST_HEAD(shrinker_list);
118 static DECLARE_RWSEM(shrinker_rwsem);
119
120 /*
121 * Add a shrinker callback to be called from the vm
122 */
123 struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
124 {
125 struct shrinker *shrinker;
126
127 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
128 if (shrinker) {
129 shrinker->shrinker = theshrinker;
130 shrinker->seeks = seeks;
131 shrinker->nr = 0;
132 down_write(&shrinker_rwsem);
133 list_add_tail(&shrinker->list, &shrinker_list);
134 up_write(&shrinker_rwsem);
135 }
136 return shrinker;
137 }
138 EXPORT_SYMBOL(set_shrinker);
139
140 /*
141 * Remove one
142 */
143 void remove_shrinker(struct shrinker *shrinker)
144 {
145 down_write(&shrinker_rwsem);
146 list_del(&shrinker->list);
147 up_write(&shrinker_rwsem);
148 kfree(shrinker);
149 }
150 EXPORT_SYMBOL(remove_shrinker);
151
152 #define SHRINK_BATCH 128
153 /*
154 * Call the shrink functions to age shrinkable caches
155 *
156 * Here we assume it costs one seek to replace a lru page and that it also
157 * takes a seek to recreate a cache object. With this in mind we age equal
158 * percentages of the lru and ageable caches. This should balance the seeks
159 * generated by these structures.
160 *
161 * If the vm encounted mapped pages on the LRU it increase the pressure on
162 * slab to avoid swapping.
163 *
164 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
165 *
166 * `lru_pages' represents the number of on-LRU pages in all the zones which
167 * are eligible for the caller's allocation attempt. It is used for balancing
168 * slab reclaim versus page reclaim.
169 *
170 * Returns the number of slab objects which we shrunk.
171 */
172 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
173 unsigned long lru_pages)
174 {
175 struct shrinker *shrinker;
176 unsigned long ret = 0;
177
178 if (scanned == 0)
179 scanned = SWAP_CLUSTER_MAX;
180
181 if (!down_read_trylock(&shrinker_rwsem))
182 return 1; /* Assume we'll be able to shrink next time */
183
184 list_for_each_entry(shrinker, &shrinker_list, list) {
185 unsigned long long delta;
186 unsigned long total_scan;
187 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
188
189 delta = (4 * scanned) / shrinker->seeks;
190 delta *= max_pass;
191 do_div(delta, lru_pages + 1);
192 shrinker->nr += delta;
193 if (shrinker->nr < 0) {
194 printk(KERN_ERR "%s: nr=%ld\n",
195 __FUNCTION__, shrinker->nr);
196 shrinker->nr = max_pass;
197 }
198
199 /*
200 * Avoid risking looping forever due to too large nr value:
201 * never try to free more than twice the estimate number of
202 * freeable entries.
203 */
204 if (shrinker->nr > max_pass * 2)
205 shrinker->nr = max_pass * 2;
206
207 total_scan = shrinker->nr;
208 shrinker->nr = 0;
209
210 while (total_scan >= SHRINK_BATCH) {
211 long this_scan = SHRINK_BATCH;
212 int shrink_ret;
213 int nr_before;
214
215 nr_before = (*shrinker->shrinker)(0, gfp_mask);
216 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
217 if (shrink_ret == -1)
218 break;
219 if (shrink_ret < nr_before)
220 ret += nr_before - shrink_ret;
221 count_vm_events(SLABS_SCANNED, this_scan);
222 total_scan -= this_scan;
223
224 cond_resched();
225 }
226
227 shrinker->nr += total_scan;
228 }
229 up_read(&shrinker_rwsem);
230 return ret;
231 }
232
233 /* Called without lock on whether page is mapped, so answer is unstable */
234 static inline int page_mapping_inuse(struct page *page)
235 {
236 struct address_space *mapping;
237
238 /* Page is in somebody's page tables. */
239 if (page_mapped(page))
240 return 1;
241
242 /* Be more reluctant to reclaim swapcache than pagecache */
243 if (PageSwapCache(page))
244 return 1;
245
246 mapping = page_mapping(page);
247 if (!mapping)
248 return 0;
249
250 /* File is mmap'd by somebody? */
251 return mapping_mapped(mapping);
252 }
253
254 static inline int is_page_cache_freeable(struct page *page)
255 {
256 return page_count(page) - !!PagePrivate(page) == 2;
257 }
258
259 static int may_write_to_queue(struct backing_dev_info *bdi)
260 {
261 if (current->flags & PF_SWAPWRITE)
262 return 1;
263 if (!bdi_write_congested(bdi))
264 return 1;
265 if (bdi == current->backing_dev_info)
266 return 1;
267 return 0;
268 }
269
270 /*
271 * We detected a synchronous write error writing a page out. Probably
272 * -ENOSPC. We need to propagate that into the address_space for a subsequent
273 * fsync(), msync() or close().
274 *
275 * The tricky part is that after writepage we cannot touch the mapping: nothing
276 * prevents it from being freed up. But we have a ref on the page and once
277 * that page is locked, the mapping is pinned.
278 *
279 * We're allowed to run sleeping lock_page() here because we know the caller has
280 * __GFP_FS.
281 */
282 static void handle_write_error(struct address_space *mapping,
283 struct page *page, int error)
284 {
285 lock_page(page);
286 if (page_mapping(page) == mapping) {
287 if (error == -ENOSPC)
288 set_bit(AS_ENOSPC, &mapping->flags);
289 else
290 set_bit(AS_EIO, &mapping->flags);
291 }
292 unlock_page(page);
293 }
294
295 /* possible outcome of pageout() */
296 typedef enum {
297 /* failed to write page out, page is locked */
298 PAGE_KEEP,
299 /* move page to the active list, page is locked */
300 PAGE_ACTIVATE,
301 /* page has been sent to the disk successfully, page is unlocked */
302 PAGE_SUCCESS,
303 /* page is clean and locked */
304 PAGE_CLEAN,
305 } pageout_t;
306
307 /*
308 * pageout is called by shrink_page_list() for each dirty page.
309 * Calls ->writepage().
310 */
311 static pageout_t pageout(struct page *page, struct address_space *mapping)
312 {
313 /*
314 * If the page is dirty, only perform writeback if that write
315 * will be non-blocking. To prevent this allocation from being
316 * stalled by pagecache activity. But note that there may be
317 * stalls if we need to run get_block(). We could test
318 * PagePrivate for that.
319 *
320 * If this process is currently in generic_file_write() against
321 * this page's queue, we can perform writeback even if that
322 * will block.
323 *
324 * If the page is swapcache, write it back even if that would
325 * block, for some throttling. This happens by accident, because
326 * swap_backing_dev_info is bust: it doesn't reflect the
327 * congestion state of the swapdevs. Easy to fix, if needed.
328 * See swapfile.c:page_queue_congested().
329 */
330 if (!is_page_cache_freeable(page))
331 return PAGE_KEEP;
332 if (!mapping) {
333 /*
334 * Some data journaling orphaned pages can have
335 * page->mapping == NULL while being dirty with clean buffers.
336 */
337 if (PagePrivate(page)) {
338 if (try_to_free_buffers(page)) {
339 ClearPageDirty(page);
340 printk("%s: orphaned page\n", __FUNCTION__);
341 return PAGE_CLEAN;
342 }
343 }
344 return PAGE_KEEP;
345 }
346 if (mapping->a_ops->writepage == NULL)
347 return PAGE_ACTIVATE;
348 if (!may_write_to_queue(mapping->backing_dev_info))
349 return PAGE_KEEP;
350
351 if (clear_page_dirty_for_io(page)) {
352 int res;
353 struct writeback_control wbc = {
354 .sync_mode = WB_SYNC_NONE,
355 .nr_to_write = SWAP_CLUSTER_MAX,
356 .range_start = 0,
357 .range_end = LLONG_MAX,
358 .nonblocking = 1,
359 .for_reclaim = 1,
360 };
361
362 SetPageReclaim(page);
363 res = mapping->a_ops->writepage(page, &wbc);
364 if (res < 0)
365 handle_write_error(mapping, page, res);
366 if (res == AOP_WRITEPAGE_ACTIVATE) {
367 ClearPageReclaim(page);
368 return PAGE_ACTIVATE;
369 }
370 if (!PageWriteback(page)) {
371 /* synchronous write or broken a_ops? */
372 ClearPageReclaim(page);
373 }
374 inc_zone_page_state(page, NR_VMSCAN_WRITE);
375 return PAGE_SUCCESS;
376 }
377
378 return PAGE_CLEAN;
379 }
380
381 int remove_mapping(struct address_space *mapping, struct page *page)
382 {
383 BUG_ON(!PageLocked(page));
384 BUG_ON(mapping != page_mapping(page));
385
386 write_lock_irq(&mapping->tree_lock);
387
388 /*
389 * The non-racy check for busy page. It is critical to check
390 * PageDirty _after_ making sure that the page is freeable and
391 * not in use by anybody. (pagecache + us == 2)
392 */
393 if (unlikely(page_count(page) != 2))
394 goto cannot_free;
395 smp_rmb();
396 if (unlikely(PageDirty(page)))
397 goto cannot_free;
398
399 if (PageSwapCache(page)) {
400 swp_entry_t swap = { .val = page_private(page) };
401 __delete_from_swap_cache(page);
402 write_unlock_irq(&mapping->tree_lock);
403 swap_free(swap);
404 __put_page(page); /* The pagecache ref */
405 return 1;
406 }
407
408 __remove_from_page_cache(page);
409 write_unlock_irq(&mapping->tree_lock);
410 __put_page(page);
411 return 1;
412
413 cannot_free:
414 write_unlock_irq(&mapping->tree_lock);
415 return 0;
416 }
417
418 /*
419 * shrink_page_list() returns the number of reclaimed pages
420 */
421 static unsigned long shrink_page_list(struct list_head *page_list,
422 struct scan_control *sc)
423 {
424 LIST_HEAD(ret_pages);
425 struct pagevec freed_pvec;
426 int pgactivate = 0;
427 unsigned long nr_reclaimed = 0;
428
429 cond_resched();
430
431 pagevec_init(&freed_pvec, 1);
432 while (!list_empty(page_list)) {
433 struct address_space *mapping;
434 struct page *page;
435 int may_enter_fs;
436 int referenced;
437
438 cond_resched();
439
440 page = lru_to_page(page_list);
441 list_del(&page->lru);
442
443 if (TestSetPageLocked(page))
444 goto keep;
445
446 VM_BUG_ON(PageActive(page));
447
448 sc->nr_scanned++;
449
450 if (!sc->may_swap && page_mapped(page))
451 goto keep_locked;
452
453 /* Double the slab pressure for mapped and swapcache pages */
454 if (page_mapped(page) || PageSwapCache(page))
455 sc->nr_scanned++;
456
457 if (PageWriteback(page))
458 goto keep_locked;
459
460 referenced = page_referenced(page, 1);
461 /* In active use or really unfreeable? Activate it. */
462 if (referenced && page_mapping_inuse(page))
463 goto activate_locked;
464
465 #ifdef CONFIG_SWAP
466 /*
467 * Anonymous process memory has backing store?
468 * Try to allocate it some swap space here.
469 */
470 if (PageAnon(page) && !PageSwapCache(page))
471 if (!add_to_swap(page, GFP_ATOMIC))
472 goto activate_locked;
473 #endif /* CONFIG_SWAP */
474
475 mapping = page_mapping(page);
476 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
477 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
478
479 /*
480 * The page is mapped into the page tables of one or more
481 * processes. Try to unmap it here.
482 */
483 if (page_mapped(page) && mapping) {
484 switch (try_to_unmap(page, 0)) {
485 case SWAP_FAIL:
486 goto activate_locked;
487 case SWAP_AGAIN:
488 goto keep_locked;
489 case SWAP_SUCCESS:
490 ; /* try to free the page below */
491 }
492 }
493
494 if (PageDirty(page)) {
495 if (referenced)
496 goto keep_locked;
497 if (!may_enter_fs)
498 goto keep_locked;
499 if (!sc->may_writepage)
500 goto keep_locked;
501
502 /* Page is dirty, try to write it out here */
503 switch(pageout(page, mapping)) {
504 case PAGE_KEEP:
505 goto keep_locked;
506 case PAGE_ACTIVATE:
507 goto activate_locked;
508 case PAGE_SUCCESS:
509 if (PageWriteback(page) || PageDirty(page))
510 goto keep;
511 /*
512 * A synchronous write - probably a ramdisk. Go
513 * ahead and try to reclaim the page.
514 */
515 if (TestSetPageLocked(page))
516 goto keep;
517 if (PageDirty(page) || PageWriteback(page))
518 goto keep_locked;
519 mapping = page_mapping(page);
520 case PAGE_CLEAN:
521 ; /* try to free the page below */
522 }
523 }
524
525 /*
526 * If the page has buffers, try to free the buffer mappings
527 * associated with this page. If we succeed we try to free
528 * the page as well.
529 *
530 * We do this even if the page is PageDirty().
531 * try_to_release_page() does not perform I/O, but it is
532 * possible for a page to have PageDirty set, but it is actually
533 * clean (all its buffers are clean). This happens if the
534 * buffers were written out directly, with submit_bh(). ext3
535 * will do this, as well as the blockdev mapping.
536 * try_to_release_page() will discover that cleanness and will
537 * drop the buffers and mark the page clean - it can be freed.
538 *
539 * Rarely, pages can have buffers and no ->mapping. These are
540 * the pages which were not successfully invalidated in
541 * truncate_complete_page(). We try to drop those buffers here
542 * and if that worked, and the page is no longer mapped into
543 * process address space (page_count == 1) it can be freed.
544 * Otherwise, leave the page on the LRU so it is swappable.
545 */
546 if (PagePrivate(page)) {
547 if (!try_to_release_page(page, sc->gfp_mask))
548 goto activate_locked;
549 if (!mapping && page_count(page) == 1)
550 goto free_it;
551 }
552
553 if (!mapping || !remove_mapping(mapping, page))
554 goto keep_locked;
555
556 free_it:
557 unlock_page(page);
558 nr_reclaimed++;
559 if (!pagevec_add(&freed_pvec, page))
560 __pagevec_release_nonlru(&freed_pvec);
561 continue;
562
563 activate_locked:
564 SetPageActive(page);
565 pgactivate++;
566 keep_locked:
567 unlock_page(page);
568 keep:
569 list_add(&page->lru, &ret_pages);
570 VM_BUG_ON(PageLRU(page));
571 }
572 list_splice(&ret_pages, page_list);
573 if (pagevec_count(&freed_pvec))
574 __pagevec_release_nonlru(&freed_pvec);
575 count_vm_events(PGACTIVATE, pgactivate);
576 return nr_reclaimed;
577 }
578
579 /*
580 * zone->lru_lock is heavily contended. Some of the functions that
581 * shrink the lists perform better by taking out a batch of pages
582 * and working on them outside the LRU lock.
583 *
584 * For pagecache intensive workloads, this function is the hottest
585 * spot in the kernel (apart from copy_*_user functions).
586 *
587 * Appropriate locks must be held before calling this function.
588 *
589 * @nr_to_scan: The number of pages to look through on the list.
590 * @src: The LRU list to pull pages off.
591 * @dst: The temp list to put pages on to.
592 * @scanned: The number of pages that were scanned.
593 *
594 * returns how many pages were moved onto *@dst.
595 */
596 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
597 struct list_head *src, struct list_head *dst,
598 unsigned long *scanned)
599 {
600 unsigned long nr_taken = 0;
601 struct page *page;
602 unsigned long scan;
603
604 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
605 struct list_head *target;
606 page = lru_to_page(src);
607 prefetchw_prev_lru_page(page, src, flags);
608
609 VM_BUG_ON(!PageLRU(page));
610
611 list_del(&page->lru);
612 target = src;
613 if (likely(get_page_unless_zero(page))) {
614 /*
615 * Be careful not to clear PageLRU until after we're
616 * sure the page is not being freed elsewhere -- the
617 * page release code relies on it.
618 */
619 ClearPageLRU(page);
620 target = dst;
621 nr_taken++;
622 } /* else it is being freed elsewhere */
623
624 list_add(&page->lru, target);
625 }
626
627 *scanned = scan;
628 return nr_taken;
629 }
630
631 /*
632 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
633 * of reclaimed pages
634 */
635 static unsigned long shrink_inactive_list(unsigned long max_scan,
636 struct zone *zone, struct scan_control *sc)
637 {
638 LIST_HEAD(page_list);
639 struct pagevec pvec;
640 unsigned long nr_scanned = 0;
641 unsigned long nr_reclaimed = 0;
642
643 pagevec_init(&pvec, 1);
644
645 lru_add_drain();
646 spin_lock_irq(&zone->lru_lock);
647 do {
648 struct page *page;
649 unsigned long nr_taken;
650 unsigned long nr_scan;
651 unsigned long nr_freed;
652
653 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
654 &zone->inactive_list,
655 &page_list, &nr_scan);
656 zone->nr_inactive -= nr_taken;
657 zone->pages_scanned += nr_scan;
658 spin_unlock_irq(&zone->lru_lock);
659
660 nr_scanned += nr_scan;
661 nr_freed = shrink_page_list(&page_list, sc);
662 nr_reclaimed += nr_freed;
663 local_irq_disable();
664 if (current_is_kswapd()) {
665 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
666 __count_vm_events(KSWAPD_STEAL, nr_freed);
667 } else
668 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
669 __count_vm_events(PGACTIVATE, nr_freed);
670
671 if (nr_taken == 0)
672 goto done;
673
674 spin_lock(&zone->lru_lock);
675 /*
676 * Put back any unfreeable pages.
677 */
678 while (!list_empty(&page_list)) {
679 page = lru_to_page(&page_list);
680 VM_BUG_ON(PageLRU(page));
681 SetPageLRU(page);
682 list_del(&page->lru);
683 if (PageActive(page))
684 add_page_to_active_list(zone, page);
685 else
686 add_page_to_inactive_list(zone, page);
687 if (!pagevec_add(&pvec, page)) {
688 spin_unlock_irq(&zone->lru_lock);
689 __pagevec_release(&pvec);
690 spin_lock_irq(&zone->lru_lock);
691 }
692 }
693 } while (nr_scanned < max_scan);
694 spin_unlock(&zone->lru_lock);
695 done:
696 local_irq_enable();
697 pagevec_release(&pvec);
698 return nr_reclaimed;
699 }
700
701 static inline int zone_is_near_oom(struct zone *zone)
702 {
703 return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3;
704 }
705
706 /*
707 * This moves pages from the active list to the inactive list.
708 *
709 * We move them the other way if the page is referenced by one or more
710 * processes, from rmap.
711 *
712 * If the pages are mostly unmapped, the processing is fast and it is
713 * appropriate to hold zone->lru_lock across the whole operation. But if
714 * the pages are mapped, the processing is slow (page_referenced()) so we
715 * should drop zone->lru_lock around each page. It's impossible to balance
716 * this, so instead we remove the pages from the LRU while processing them.
717 * It is safe to rely on PG_active against the non-LRU pages in here because
718 * nobody will play with that bit on a non-LRU page.
719 *
720 * The downside is that we have to touch page->_count against each page.
721 * But we had to alter page->flags anyway.
722 */
723 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
724 struct scan_control *sc)
725 {
726 unsigned long pgmoved;
727 int pgdeactivate = 0;
728 unsigned long pgscanned;
729 LIST_HEAD(l_hold); /* The pages which were snipped off */
730 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
731 LIST_HEAD(l_active); /* Pages to go onto the active_list */
732 struct page *page;
733 struct pagevec pvec;
734 int reclaim_mapped = 0;
735
736 if (sc->may_swap) {
737 long mapped_ratio;
738 long distress;
739 long swap_tendency;
740
741 if (zone_is_near_oom(zone))
742 goto force_reclaim_mapped;
743
744 /*
745 * `distress' is a measure of how much trouble we're having
746 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
747 */
748 distress = 100 >> zone->prev_priority;
749
750 /*
751 * The point of this algorithm is to decide when to start
752 * reclaiming mapped memory instead of just pagecache. Work out
753 * how much memory
754 * is mapped.
755 */
756 mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
757 global_page_state(NR_ANON_PAGES)) * 100) /
758 vm_total_pages;
759
760 /*
761 * Now decide how much we really want to unmap some pages. The
762 * mapped ratio is downgraded - just because there's a lot of
763 * mapped memory doesn't necessarily mean that page reclaim
764 * isn't succeeding.
765 *
766 * The distress ratio is important - we don't want to start
767 * going oom.
768 *
769 * A 100% value of vm_swappiness overrides this algorithm
770 * altogether.
771 */
772 swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
773
774 /*
775 * Now use this metric to decide whether to start moving mapped
776 * memory onto the inactive list.
777 */
778 if (swap_tendency >= 100)
779 force_reclaim_mapped:
780 reclaim_mapped = 1;
781 }
782
783 lru_add_drain();
784 spin_lock_irq(&zone->lru_lock);
785 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
786 &l_hold, &pgscanned);
787 zone->pages_scanned += pgscanned;
788 zone->nr_active -= pgmoved;
789 spin_unlock_irq(&zone->lru_lock);
790
791 while (!list_empty(&l_hold)) {
792 cond_resched();
793 page = lru_to_page(&l_hold);
794 list_del(&page->lru);
795 if (page_mapped(page)) {
796 if (!reclaim_mapped ||
797 (total_swap_pages == 0 && PageAnon(page)) ||
798 page_referenced(page, 0)) {
799 list_add(&page->lru, &l_active);
800 continue;
801 }
802 }
803 list_add(&page->lru, &l_inactive);
804 }
805
806 pagevec_init(&pvec, 1);
807 pgmoved = 0;
808 spin_lock_irq(&zone->lru_lock);
809 while (!list_empty(&l_inactive)) {
810 page = lru_to_page(&l_inactive);
811 prefetchw_prev_lru_page(page, &l_inactive, flags);
812 VM_BUG_ON(PageLRU(page));
813 SetPageLRU(page);
814 VM_BUG_ON(!PageActive(page));
815 ClearPageActive(page);
816
817 list_move(&page->lru, &zone->inactive_list);
818 pgmoved++;
819 if (!pagevec_add(&pvec, page)) {
820 zone->nr_inactive += pgmoved;
821 spin_unlock_irq(&zone->lru_lock);
822 pgdeactivate += pgmoved;
823 pgmoved = 0;
824 if (buffer_heads_over_limit)
825 pagevec_strip(&pvec);
826 __pagevec_release(&pvec);
827 spin_lock_irq(&zone->lru_lock);
828 }
829 }
830 zone->nr_inactive += pgmoved;
831 pgdeactivate += pgmoved;
832 if (buffer_heads_over_limit) {
833 spin_unlock_irq(&zone->lru_lock);
834 pagevec_strip(&pvec);
835 spin_lock_irq(&zone->lru_lock);
836 }
837
838 pgmoved = 0;
839 while (!list_empty(&l_active)) {
840 page = lru_to_page(&l_active);
841 prefetchw_prev_lru_page(page, &l_active, flags);
842 VM_BUG_ON(PageLRU(page));
843 SetPageLRU(page);
844 VM_BUG_ON(!PageActive(page));
845 list_move(&page->lru, &zone->active_list);
846 pgmoved++;
847 if (!pagevec_add(&pvec, page)) {
848 zone->nr_active += pgmoved;
849 pgmoved = 0;
850 spin_unlock_irq(&zone->lru_lock);
851 __pagevec_release(&pvec);
852 spin_lock_irq(&zone->lru_lock);
853 }
854 }
855 zone->nr_active += pgmoved;
856
857 __count_zone_vm_events(PGREFILL, zone, pgscanned);
858 __count_vm_events(PGDEACTIVATE, pgdeactivate);
859 spin_unlock_irq(&zone->lru_lock);
860
861 pagevec_release(&pvec);
862 }
863
864 /*
865 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
866 */
867 static unsigned long shrink_zone(int priority, struct zone *zone,
868 struct scan_control *sc)
869 {
870 unsigned long nr_active;
871 unsigned long nr_inactive;
872 unsigned long nr_to_scan;
873 unsigned long nr_reclaimed = 0;
874
875 atomic_inc(&zone->reclaim_in_progress);
876
877 /*
878 * Add one to `nr_to_scan' just to make sure that the kernel will
879 * slowly sift through the active list.
880 */
881 zone->nr_scan_active += (zone->nr_active >> priority) + 1;
882 nr_active = zone->nr_scan_active;
883 if (nr_active >= sc->swap_cluster_max)
884 zone->nr_scan_active = 0;
885 else
886 nr_active = 0;
887
888 zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1;
889 nr_inactive = zone->nr_scan_inactive;
890 if (nr_inactive >= sc->swap_cluster_max)
891 zone->nr_scan_inactive = 0;
892 else
893 nr_inactive = 0;
894
895 while (nr_active || nr_inactive) {
896 if (nr_active) {
897 nr_to_scan = min(nr_active,
898 (unsigned long)sc->swap_cluster_max);
899 nr_active -= nr_to_scan;
900 shrink_active_list(nr_to_scan, zone, sc);
901 }
902
903 if (nr_inactive) {
904 nr_to_scan = min(nr_inactive,
905 (unsigned long)sc->swap_cluster_max);
906 nr_inactive -= nr_to_scan;
907 nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
908 sc);
909 }
910 }
911
912 throttle_vm_writeout();
913
914 atomic_dec(&zone->reclaim_in_progress);
915 return nr_reclaimed;
916 }
917
918 /*
919 * This is the direct reclaim path, for page-allocating processes. We only
920 * try to reclaim pages from zones which will satisfy the caller's allocation
921 * request.
922 *
923 * We reclaim from a zone even if that zone is over pages_high. Because:
924 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
925 * allocation or
926 * b) The zones may be over pages_high but they must go *over* pages_high to
927 * satisfy the `incremental min' zone defense algorithm.
928 *
929 * Returns the number of reclaimed pages.
930 *
931 * If a zone is deemed to be full of pinned pages then just give it a light
932 * scan then give up on it.
933 */
934 static unsigned long shrink_zones(int priority, struct zone **zones,
935 struct scan_control *sc)
936 {
937 unsigned long nr_reclaimed = 0;
938 int i;
939
940 sc->all_unreclaimable = 1;
941 for (i = 0; zones[i] != NULL; i++) {
942 struct zone *zone = zones[i];
943
944 if (!populated_zone(zone))
945 continue;
946
947 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
948 continue;
949
950 zone->temp_priority = priority;
951 if (zone->prev_priority > priority)
952 zone->prev_priority = priority;
953
954 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
955 continue; /* Let kswapd poll it */
956
957 sc->all_unreclaimable = 0;
958
959 nr_reclaimed += shrink_zone(priority, zone, sc);
960 }
961 return nr_reclaimed;
962 }
963
964 /*
965 * This is the main entry point to direct page reclaim.
966 *
967 * If a full scan of the inactive list fails to free enough memory then we
968 * are "out of memory" and something needs to be killed.
969 *
970 * If the caller is !__GFP_FS then the probability of a failure is reasonably
971 * high - the zone may be full of dirty or under-writeback pages, which this
972 * caller can't do much about. We kick pdflush and take explicit naps in the
973 * hope that some of these pages can be written. But if the allocating task
974 * holds filesystem locks which prevent writeout this might not work, and the
975 * allocation attempt will fail.
976 */
977 unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
978 {
979 int priority;
980 int ret = 0;
981 unsigned long total_scanned = 0;
982 unsigned long nr_reclaimed = 0;
983 struct reclaim_state *reclaim_state = current->reclaim_state;
984 unsigned long lru_pages = 0;
985 int i;
986 struct scan_control sc = {
987 .gfp_mask = gfp_mask,
988 .may_writepage = !laptop_mode,
989 .swap_cluster_max = SWAP_CLUSTER_MAX,
990 .may_swap = 1,
991 .swappiness = vm_swappiness,
992 };
993
994 count_vm_event(ALLOCSTALL);
995
996 for (i = 0; zones[i] != NULL; i++) {
997 struct zone *zone = zones[i];
998
999 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1000 continue;
1001
1002 zone->temp_priority = DEF_PRIORITY;
1003 lru_pages += zone->nr_active + zone->nr_inactive;
1004 }
1005
1006 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1007 sc.nr_scanned = 0;
1008 if (!priority)
1009 disable_swap_token();
1010 nr_reclaimed += shrink_zones(priority, zones, &sc);
1011 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1012 if (reclaim_state) {
1013 nr_reclaimed += reclaim_state->reclaimed_slab;
1014 reclaim_state->reclaimed_slab = 0;
1015 }
1016 total_scanned += sc.nr_scanned;
1017 if (nr_reclaimed >= sc.swap_cluster_max) {
1018 ret = 1;
1019 goto out;
1020 }
1021
1022 /*
1023 * Try to write back as many pages as we just scanned. This
1024 * tends to cause slow streaming writers to write data to the
1025 * disk smoothly, at the dirtying rate, which is nice. But
1026 * that's undesirable in laptop mode, where we *want* lumpy
1027 * writeout. So in laptop mode, write out the whole world.
1028 */
1029 if (total_scanned > sc.swap_cluster_max +
1030 sc.swap_cluster_max / 2) {
1031 wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1032 sc.may_writepage = 1;
1033 }
1034
1035 /* Take a nap, wait for some writeback to complete */
1036 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
1037 blk_congestion_wait(WRITE, HZ/10);
1038 }
1039 /* top priority shrink_caches still had more to do? don't OOM, then */
1040 if (!sc.all_unreclaimable)
1041 ret = 1;
1042 out:
1043 for (i = 0; zones[i] != 0; i++) {
1044 struct zone *zone = zones[i];
1045
1046 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1047 continue;
1048
1049 zone->prev_priority = zone->temp_priority;
1050 }
1051 return ret;
1052 }
1053
1054 /*
1055 * For kswapd, balance_pgdat() will work across all this node's zones until
1056 * they are all at pages_high.
1057 *
1058 * Returns the number of pages which were actually freed.
1059 *
1060 * There is special handling here for zones which are full of pinned pages.
1061 * This can happen if the pages are all mlocked, or if they are all used by
1062 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
1063 * What we do is to detect the case where all pages in the zone have been
1064 * scanned twice and there has been zero successful reclaim. Mark the zone as
1065 * dead and from now on, only perform a short scan. Basically we're polling
1066 * the zone for when the problem goes away.
1067 *
1068 * kswapd scans the zones in the highmem->normal->dma direction. It skips
1069 * zones which have free_pages > pages_high, but once a zone is found to have
1070 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1071 * of the number of free pages in the lower zones. This interoperates with
1072 * the page allocator fallback scheme to ensure that aging of pages is balanced
1073 * across the zones.
1074 */
1075 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1076 {
1077 int all_zones_ok;
1078 int priority;
1079 int i;
1080 unsigned long total_scanned;
1081 unsigned long nr_reclaimed;
1082 struct reclaim_state *reclaim_state = current->reclaim_state;
1083 struct scan_control sc = {
1084 .gfp_mask = GFP_KERNEL,
1085 .may_swap = 1,
1086 .swap_cluster_max = SWAP_CLUSTER_MAX,
1087 .swappiness = vm_swappiness,
1088 };
1089
1090 loop_again:
1091 total_scanned = 0;
1092 nr_reclaimed = 0;
1093 sc.may_writepage = !laptop_mode;
1094 count_vm_event(PAGEOUTRUN);
1095
1096 for (i = 0; i < pgdat->nr_zones; i++) {
1097 struct zone *zone = pgdat->node_zones + i;
1098
1099 zone->temp_priority = DEF_PRIORITY;
1100 }
1101
1102 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1103 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1104 unsigned long lru_pages = 0;
1105
1106 /* The swap token gets in the way of swapout... */
1107 if (!priority)
1108 disable_swap_token();
1109
1110 all_zones_ok = 1;
1111
1112 /*
1113 * Scan in the highmem->dma direction for the highest
1114 * zone which needs scanning
1115 */
1116 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1117 struct zone *zone = pgdat->node_zones + i;
1118
1119 if (!populated_zone(zone))
1120 continue;
1121
1122 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1123 continue;
1124
1125 if (!zone_watermark_ok(zone, order, zone->pages_high,
1126 0, 0)) {
1127 end_zone = i;
1128 goto scan;
1129 }
1130 }
1131 goto out;
1132 scan:
1133 for (i = 0; i <= end_zone; i++) {
1134 struct zone *zone = pgdat->node_zones + i;
1135
1136 lru_pages += zone->nr_active + zone->nr_inactive;
1137 }
1138
1139 /*
1140 * Now scan the zone in the dma->highmem direction, stopping
1141 * at the last zone which needs scanning.
1142 *
1143 * We do this because the page allocator works in the opposite
1144 * direction. This prevents the page allocator from allocating
1145 * pages behind kswapd's direction of progress, which would
1146 * cause too much scanning of the lower zones.
1147 */
1148 for (i = 0; i <= end_zone; i++) {
1149 struct zone *zone = pgdat->node_zones + i;
1150 int nr_slab;
1151
1152 if (!populated_zone(zone))
1153 continue;
1154
1155 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1156 continue;
1157
1158 if (!zone_watermark_ok(zone, order, zone->pages_high,
1159 end_zone, 0))
1160 all_zones_ok = 0;
1161 zone->temp_priority = priority;
1162 if (zone->prev_priority > priority)
1163 zone->prev_priority = priority;
1164 sc.nr_scanned = 0;
1165 nr_reclaimed += shrink_zone(priority, zone, &sc);
1166 reclaim_state->reclaimed_slab = 0;
1167 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1168 lru_pages);
1169 nr_reclaimed += reclaim_state->reclaimed_slab;
1170 total_scanned += sc.nr_scanned;
1171 if (zone->all_unreclaimable)
1172 continue;
1173 if (nr_slab == 0 && zone->pages_scanned >=
1174 (zone->nr_active + zone->nr_inactive) * 6)
1175 zone->all_unreclaimable = 1;
1176 /*
1177 * If we've done a decent amount of scanning and
1178 * the reclaim ratio is low, start doing writepage
1179 * even in laptop mode
1180 */
1181 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1182 total_scanned > nr_reclaimed + nr_reclaimed / 2)
1183 sc.may_writepage = 1;
1184 }
1185 if (all_zones_ok)
1186 break; /* kswapd: all done */
1187 /*
1188 * OK, kswapd is getting into trouble. Take a nap, then take
1189 * another pass across the zones.
1190 */
1191 if (total_scanned && priority < DEF_PRIORITY - 2)
1192 blk_congestion_wait(WRITE, HZ/10);
1193
1194 /*
1195 * We do this so kswapd doesn't build up large priorities for
1196 * example when it is freeing in parallel with allocators. It
1197 * matches the direct reclaim path behaviour in terms of impact
1198 * on zone->*_priority.
1199 */
1200 if (nr_reclaimed >= SWAP_CLUSTER_MAX)
1201 break;
1202 }
1203 out:
1204 for (i = 0; i < pgdat->nr_zones; i++) {
1205 struct zone *zone = pgdat->node_zones + i;
1206
1207 zone->prev_priority = zone->temp_priority;
1208 }
1209 if (!all_zones_ok) {
1210 cond_resched();
1211 goto loop_again;
1212 }
1213
1214 return nr_reclaimed;
1215 }
1216
1217 /*
1218 * The background pageout daemon, started as a kernel thread
1219 * from the init process.
1220 *
1221 * This basically trickles out pages so that we have _some_
1222 * free memory available even if there is no other activity
1223 * that frees anything up. This is needed for things like routing
1224 * etc, where we otherwise might have all activity going on in
1225 * asynchronous contexts that cannot page things out.
1226 *
1227 * If there are applications that are active memory-allocators
1228 * (most normal use), this basically shouldn't matter.
1229 */
1230 static int kswapd(void *p)
1231 {
1232 unsigned long order;
1233 pg_data_t *pgdat = (pg_data_t*)p;
1234 struct task_struct *tsk = current;
1235 DEFINE_WAIT(wait);
1236 struct reclaim_state reclaim_state = {
1237 .reclaimed_slab = 0,
1238 };
1239 cpumask_t cpumask;
1240
1241 cpumask = node_to_cpumask(pgdat->node_id);
1242 if (!cpus_empty(cpumask))
1243 set_cpus_allowed(tsk, cpumask);
1244 current->reclaim_state = &reclaim_state;
1245
1246 /*
1247 * Tell the memory management that we're a "memory allocator",
1248 * and that if we need more memory we should get access to it
1249 * regardless (see "__alloc_pages()"). "kswapd" should
1250 * never get caught in the normal page freeing logic.
1251 *
1252 * (Kswapd normally doesn't need memory anyway, but sometimes
1253 * you need a small amount of memory in order to be able to
1254 * page out something else, and this flag essentially protects
1255 * us from recursively trying to free more memory as we're
1256 * trying to free the first piece of memory in the first place).
1257 */
1258 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1259
1260 order = 0;
1261 for ( ; ; ) {
1262 unsigned long new_order;
1263
1264 try_to_freeze();
1265
1266 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1267 new_order = pgdat->kswapd_max_order;
1268 pgdat->kswapd_max_order = 0;
1269 if (order < new_order) {
1270 /*
1271 * Don't sleep if someone wants a larger 'order'
1272 * allocation
1273 */
1274 order = new_order;
1275 } else {
1276 schedule();
1277 order = pgdat->kswapd_max_order;
1278 }
1279 finish_wait(&pgdat->kswapd_wait, &wait);
1280
1281 balance_pgdat(pgdat, order);
1282 }
1283 return 0;
1284 }
1285
1286 /*
1287 * A zone is low on free memory, so wake its kswapd task to service it.
1288 */
1289 void wakeup_kswapd(struct zone *zone, int order)
1290 {
1291 pg_data_t *pgdat;
1292
1293 if (!populated_zone(zone))
1294 return;
1295
1296 pgdat = zone->zone_pgdat;
1297 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1298 return;
1299 if (pgdat->kswapd_max_order < order)
1300 pgdat->kswapd_max_order = order;
1301 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1302 return;
1303 if (!waitqueue_active(&pgdat->kswapd_wait))
1304 return;
1305 wake_up_interruptible(&pgdat->kswapd_wait);
1306 }
1307
1308 #ifdef CONFIG_PM
1309 /*
1310 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
1311 * from LRU lists system-wide, for given pass and priority, and returns the
1312 * number of reclaimed pages
1313 *
1314 * For pass > 3 we also try to shrink the LRU lists that contain a few pages
1315 */
1316 static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
1317 int prio, struct scan_control *sc)
1318 {
1319 struct zone *zone;
1320 unsigned long nr_to_scan, ret = 0;
1321
1322 for_each_zone(zone) {
1323
1324 if (!populated_zone(zone))
1325 continue;
1326
1327 if (zone->all_unreclaimable && prio != DEF_PRIORITY)
1328 continue;
1329
1330 /* For pass = 0 we don't shrink the active list */
1331 if (pass > 0) {
1332 zone->nr_scan_active += (zone->nr_active >> prio) + 1;
1333 if (zone->nr_scan_active >= nr_pages || pass > 3) {
1334 zone->nr_scan_active = 0;
1335 nr_to_scan = min(nr_pages, zone->nr_active);
1336 shrink_active_list(nr_to_scan, zone, sc);
1337 }
1338 }
1339
1340 zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1;
1341 if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
1342 zone->nr_scan_inactive = 0;
1343 nr_to_scan = min(nr_pages, zone->nr_inactive);
1344 ret += shrink_inactive_list(nr_to_scan, zone, sc);
1345 if (ret >= nr_pages)
1346 return ret;
1347 }
1348 }
1349
1350 return ret;
1351 }
1352
1353 /*
1354 * Try to free `nr_pages' of memory, system-wide, and return the number of
1355 * freed pages.
1356 *
1357 * Rather than trying to age LRUs the aim is to preserve the overall
1358 * LRU order by reclaiming preferentially
1359 * inactive > active > active referenced > active mapped
1360 */
1361 unsigned long shrink_all_memory(unsigned long nr_pages)
1362 {
1363 unsigned long lru_pages, nr_slab;
1364 unsigned long ret = 0;
1365 int pass;
1366 struct reclaim_state reclaim_state;
1367 struct zone *zone;
1368 struct scan_control sc = {
1369 .gfp_mask = GFP_KERNEL,
1370 .may_swap = 0,
1371 .swap_cluster_max = nr_pages,
1372 .may_writepage = 1,
1373 .swappiness = vm_swappiness,
1374 };
1375
1376 current->reclaim_state = &reclaim_state;
1377
1378 lru_pages = 0;
1379 for_each_zone(zone)
1380 lru_pages += zone->nr_active + zone->nr_inactive;
1381
1382 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1383 /* If slab caches are huge, it's better to hit them first */
1384 while (nr_slab >= lru_pages) {
1385 reclaim_state.reclaimed_slab = 0;
1386 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
1387 if (!reclaim_state.reclaimed_slab)
1388 break;
1389
1390 ret += reclaim_state.reclaimed_slab;
1391 if (ret >= nr_pages)
1392 goto out;
1393
1394 nr_slab -= reclaim_state.reclaimed_slab;
1395 }
1396
1397 /*
1398 * We try to shrink LRUs in 5 passes:
1399 * 0 = Reclaim from inactive_list only
1400 * 1 = Reclaim from active list but don't reclaim mapped
1401 * 2 = 2nd pass of type 1
1402 * 3 = Reclaim mapped (normal reclaim)
1403 * 4 = 2nd pass of type 3
1404 */
1405 for (pass = 0; pass < 5; pass++) {
1406 int prio;
1407
1408 /* Needed for shrinking slab caches later on */
1409 if (!lru_pages)
1410 for_each_zone(zone) {
1411 lru_pages += zone->nr_active;
1412 lru_pages += zone->nr_inactive;
1413 }
1414
1415 /* Force reclaiming mapped pages in the passes #3 and #4 */
1416 if (pass > 2) {
1417 sc.may_swap = 1;
1418 sc.swappiness = 100;
1419 }
1420
1421 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1422 unsigned long nr_to_scan = nr_pages - ret;
1423
1424 sc.nr_scanned = 0;
1425 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1426 if (ret >= nr_pages)
1427 goto out;
1428
1429 reclaim_state.reclaimed_slab = 0;
1430 shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
1431 ret += reclaim_state.reclaimed_slab;
1432 if (ret >= nr_pages)
1433 goto out;
1434
1435 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1436 blk_congestion_wait(WRITE, HZ / 10);
1437 }
1438
1439 lru_pages = 0;
1440 }
1441
1442 /*
1443 * If ret = 0, we could not shrink LRUs, but there may be something
1444 * in slab caches
1445 */
1446 if (!ret)
1447 do {
1448 reclaim_state.reclaimed_slab = 0;
1449 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
1450 ret += reclaim_state.reclaimed_slab;
1451 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1452
1453 out:
1454 current->reclaim_state = NULL;
1455
1456 return ret;
1457 }
1458 #endif
1459
1460 #ifdef CONFIG_HOTPLUG_CPU
1461 /* It's optimal to keep kswapds on the same CPUs as their memory, but
1462 not required for correctness. So if the last cpu in a node goes
1463 away, we get changed to run anywhere: as the first one comes back,
1464 restore their cpu bindings. */
1465 static int __devinit cpu_callback(struct notifier_block *nfb,
1466 unsigned long action, void *hcpu)
1467 {
1468 pg_data_t *pgdat;
1469 cpumask_t mask;
1470
1471 if (action == CPU_ONLINE) {
1472 for_each_online_pgdat(pgdat) {
1473 mask = node_to_cpumask(pgdat->node_id);
1474 if (any_online_cpu(mask) != NR_CPUS)
1475 /* One of our CPUs online: restore mask */
1476 set_cpus_allowed(pgdat->kswapd, mask);
1477 }
1478 }
1479 return NOTIFY_OK;
1480 }
1481 #endif /* CONFIG_HOTPLUG_CPU */
1482
1483 /*
1484 * This kswapd start function will be called by init and node-hot-add.
1485 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
1486 */
1487 int kswapd_run(int nid)
1488 {
1489 pg_data_t *pgdat = NODE_DATA(nid);
1490 int ret = 0;
1491
1492 if (pgdat->kswapd)
1493 return 0;
1494
1495 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
1496 if (IS_ERR(pgdat->kswapd)) {
1497 /* failure at boot is fatal */
1498 BUG_ON(system_state == SYSTEM_BOOTING);
1499 printk("Failed to start kswapd on node %d\n",nid);
1500 ret = -1;
1501 }
1502 return ret;
1503 }
1504
1505 static int __init kswapd_init(void)
1506 {
1507 int nid;
1508
1509 swap_setup();
1510 for_each_online_node(nid)
1511 kswapd_run(nid);
1512 hotcpu_notifier(cpu_callback, 0);
1513 return 0;
1514 }
1515
1516 module_init(kswapd_init)
1517
1518 #ifdef CONFIG_NUMA
1519 /*
1520 * Zone reclaim mode
1521 *
1522 * If non-zero call zone_reclaim when the number of free pages falls below
1523 * the watermarks.
1524 */
1525 int zone_reclaim_mode __read_mostly;
1526
1527 #define RECLAIM_OFF 0
1528 #define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */
1529 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1530 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
1531
1532 /*
1533 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1534 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1535 * a zone.
1536 */
1537 #define ZONE_RECLAIM_PRIORITY 4
1538
1539 /*
1540 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
1541 * occur.
1542 */
1543 int sysctl_min_unmapped_ratio = 1;
1544
1545 /*
1546 * If the number of slab pages in a zone grows beyond this percentage then
1547 * slab reclaim needs to occur.
1548 */
1549 int sysctl_min_slab_ratio = 5;
1550
1551 /*
1552 * Try to free up some pages from this zone through reclaim.
1553 */
1554 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1555 {
1556 /* Minimum pages needed in order to stay on node */
1557 const unsigned long nr_pages = 1 << order;
1558 struct task_struct *p = current;
1559 struct reclaim_state reclaim_state;
1560 int priority;
1561 unsigned long nr_reclaimed = 0;
1562 struct scan_control sc = {
1563 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1564 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1565 .swap_cluster_max = max_t(unsigned long, nr_pages,
1566 SWAP_CLUSTER_MAX),
1567 .gfp_mask = gfp_mask,
1568 .swappiness = vm_swappiness,
1569 };
1570 unsigned long slab_reclaimable;
1571
1572 disable_swap_token();
1573 cond_resched();
1574 /*
1575 * We need to be able to allocate from the reserves for RECLAIM_SWAP
1576 * and we also need to be able to write out pages for RECLAIM_WRITE
1577 * and RECLAIM_SWAP.
1578 */
1579 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
1580 reclaim_state.reclaimed_slab = 0;
1581 p->reclaim_state = &reclaim_state;
1582
1583 if (zone_page_state(zone, NR_FILE_PAGES) -
1584 zone_page_state(zone, NR_FILE_MAPPED) >
1585 zone->min_unmapped_pages) {
1586 /*
1587 * Free memory by calling shrink zone with increasing
1588 * priorities until we have enough memory freed.
1589 */
1590 priority = ZONE_RECLAIM_PRIORITY;
1591 do {
1592 nr_reclaimed += shrink_zone(priority, zone, &sc);
1593 priority--;
1594 } while (priority >= 0 && nr_reclaimed < nr_pages);
1595 }
1596
1597 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
1598 if (slab_reclaimable > zone->min_slab_pages) {
1599 /*
1600 * shrink_slab() does not currently allow us to determine how
1601 * many pages were freed in this zone. So we take the current
1602 * number of slab pages and shake the slab until it is reduced
1603 * by the same nr_pages that we used for reclaiming unmapped
1604 * pages.
1605 *
1606 * Note that shrink_slab will free memory on all zones and may
1607 * take a long time.
1608 */
1609 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
1610 zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
1611 slab_reclaimable - nr_pages)
1612 ;
1613
1614 /*
1615 * Update nr_reclaimed by the number of slab pages we
1616 * reclaimed from this zone.
1617 */
1618 nr_reclaimed += slab_reclaimable -
1619 zone_page_state(zone, NR_SLAB_RECLAIMABLE);
1620 }
1621
1622 p->reclaim_state = NULL;
1623 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1624 return nr_reclaimed >= nr_pages;
1625 }
1626
1627 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1628 {
1629 cpumask_t mask;
1630 int node_id;
1631
1632 /*
1633 * Zone reclaim reclaims unmapped file backed pages and
1634 * slab pages if we are over the defined limits.
1635 *
1636 * A small portion of unmapped file backed pages is needed for
1637 * file I/O otherwise pages read by file I/O will be immediately
1638 * thrown out if the zone is overallocated. So we do not reclaim
1639 * if less than a specified percentage of the zone is used by
1640 * unmapped file backed pages.
1641 */
1642 if (zone_page_state(zone, NR_FILE_PAGES) -
1643 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
1644 && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
1645 <= zone->min_slab_pages)
1646 return 0;
1647
1648 /*
1649 * Avoid concurrent zone reclaims, do not reclaim in a zone that does
1650 * not have reclaimable pages and if we should not delay the allocation
1651 * then do not scan.
1652 */
1653 if (!(gfp_mask & __GFP_WAIT) ||
1654 zone->all_unreclaimable ||
1655 atomic_read(&zone->reclaim_in_progress) > 0 ||
1656 (current->flags & PF_MEMALLOC))
1657 return 0;
1658
1659 /*
1660 * Only run zone reclaim on the local zone or on zones that do not
1661 * have associated processors. This will favor the local processor
1662 * over remote processors and spread off node memory allocations
1663 * as wide as possible.
1664 */
1665 node_id = zone_to_nid(zone);
1666 mask = node_to_cpumask(node_id);
1667 if (!cpus_empty(mask) && node_id != numa_node_id())
1668 return 0;
1669 return __zone_reclaim(zone, gfp_mask, order);
1670 }
1671 #endif
This page took 0.069212 seconds and 4 git commands to generate.