2 * Memory Migration functionality - linux/mm/migration.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/buffer_head.h>
20 #include <linux/mm_inline.h>
21 #include <linux/pagevec.h>
22 #include <linux/rmap.h>
23 #include <linux/topology.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuset.h>
26 #include <linux/swapops.h>
30 /* The maximum number of pages to take off the LRU for migration */
31 #define MIGRATE_CHUNK_SIZE 256
33 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
36 * Isolate one page from the LRU lists. If successful put it onto
37 * the indicated list with elevated page count.
40 * -EBUSY: page not on LRU list
41 * 0: page removed from LRU list and added to the specified list.
43 int isolate_lru_page(struct page
*page
, struct list_head
*pagelist
)
48 struct zone
*zone
= page_zone(page
);
50 spin_lock_irq(&zone
->lru_lock
);
56 del_page_from_active_list(zone
, page
);
58 del_page_from_inactive_list(zone
, page
);
59 list_add_tail(&page
->lru
, pagelist
);
61 spin_unlock_irq(&zone
->lru_lock
);
67 * migrate_prep() needs to be called after we have compiled the list of pages
68 * to be migrated using isolate_lru_page() but before we begin a series of calls
71 int migrate_prep(void)
73 /* Must have swap device for migration */
74 if (nr_swap_pages
<= 0)
78 * Clear the LRU lists so pages can be isolated.
79 * Note that pages may be moved off the LRU after we have
80 * drained them. Those pages will fail to migrate like other
81 * pages that may be busy.
88 static inline void move_to_lru(struct page
*page
)
91 if (PageActive(page
)) {
93 * lru_cache_add_active checks that
94 * the PG_active bit is off.
96 ClearPageActive(page
);
97 lru_cache_add_active(page
);
105 * Add isolated pages on the list back to the LRU.
107 * returns the number of pages put back.
109 int putback_lru_pages(struct list_head
*l
)
115 list_for_each_entry_safe(page
, page2
, l
, lru
) {
123 * Non migratable page
125 int fail_migrate_page(struct page
*newpage
, struct page
*page
)
129 EXPORT_SYMBOL(fail_migrate_page
);
132 * swapout a single page
133 * page is locked upon entry, unlocked on exit
135 static int swap_page(struct page
*page
)
137 struct address_space
*mapping
= page_mapping(page
);
139 if (page_mapped(page
) && mapping
)
140 if (try_to_unmap(page
, 1) != SWAP_SUCCESS
)
143 if (PageDirty(page
)) {
144 /* Page is dirty, try to write it out here */
145 switch(pageout(page
, mapping
)) {
154 ; /* try to free the page below */
158 if (PagePrivate(page
)) {
159 if (!try_to_release_page(page
, GFP_KERNEL
) ||
160 (!mapping
&& page_count(page
) == 1))
164 if (remove_mapping(mapping
, page
)) {
176 EXPORT_SYMBOL(swap_page
);
179 * Remove references for a page and establish the new page with the correct
180 * basic settings to be able to stop accesses to the page.
182 int migrate_page_remove_references(struct page
*newpage
,
183 struct page
*page
, int nr_refs
)
185 struct address_space
*mapping
= page_mapping(page
);
186 struct page
**radix_pointer
;
189 * Avoid doing any of the following work if the page count
190 * indicates that the page is in use or truncate has removed
193 if (!mapping
|| page_mapcount(page
) + nr_refs
!= page_count(page
))
197 * Establish swap ptes for anonymous pages or destroy pte
200 * In order to reestablish file backed mappings the fault handlers
201 * will take the radix tree_lock which may then be used to stop
202 * processses from accessing this page until the new page is ready.
204 * A process accessing via a swap pte (an anonymous page) will take a
205 * page_lock on the old page which will block the process until the
206 * migration attempt is complete. At that time the PageSwapCache bit
207 * will be examined. If the page was migrated then the PageSwapCache
208 * bit will be clear and the operation to retrieve the page will be
209 * retried which will find the new page in the radix tree. Then a new
210 * direct mapping may be generated based on the radix tree contents.
212 * If the page was not migrated then the PageSwapCache bit
213 * is still set and the operation may continue.
215 if (try_to_unmap(page
, 1) == SWAP_FAIL
)
216 /* A vma has VM_LOCKED set -> permanent failure */
220 * Give up if we were unable to remove all mappings.
222 if (page_mapcount(page
))
225 write_lock_irq(&mapping
->tree_lock
);
227 radix_pointer
= (struct page
**)radix_tree_lookup_slot(
231 if (!page_mapping(page
) || page_count(page
) != nr_refs
||
232 *radix_pointer
!= page
) {
233 write_unlock_irq(&mapping
->tree_lock
);
238 * Now we know that no one else is looking at the page.
240 * Certain minimal information about a page must be available
241 * in order for other subsystems to properly handle the page if they
242 * find it through the radix tree update before we are finished
246 newpage
->index
= page
->index
;
247 newpage
->mapping
= page
->mapping
;
248 if (PageSwapCache(page
)) {
249 SetPageSwapCache(newpage
);
250 set_page_private(newpage
, page_private(page
));
253 *radix_pointer
= newpage
;
255 write_unlock_irq(&mapping
->tree_lock
);
259 EXPORT_SYMBOL(migrate_page_remove_references
);
262 * Copy the page to its new location
264 void migrate_page_copy(struct page
*newpage
, struct page
*page
)
266 copy_highpage(newpage
, page
);
269 SetPageError(newpage
);
270 if (PageReferenced(page
))
271 SetPageReferenced(newpage
);
272 if (PageUptodate(page
))
273 SetPageUptodate(newpage
);
274 if (PageActive(page
))
275 SetPageActive(newpage
);
276 if (PageChecked(page
))
277 SetPageChecked(newpage
);
278 if (PageMappedToDisk(page
))
279 SetPageMappedToDisk(newpage
);
281 if (PageDirty(page
)) {
282 clear_page_dirty_for_io(page
);
283 set_page_dirty(newpage
);
286 ClearPageSwapCache(page
);
287 ClearPageActive(page
);
288 ClearPagePrivate(page
);
289 set_page_private(page
, 0);
290 page
->mapping
= NULL
;
293 * If any waiters have accumulated on the new page then
296 if (PageWriteback(newpage
))
297 end_page_writeback(newpage
);
299 EXPORT_SYMBOL(migrate_page_copy
);
302 * Common logic to directly migrate a single page suitable for
303 * pages that do not use PagePrivate.
305 * Pages are locked upon entry and exit.
307 int migrate_page(struct page
*newpage
, struct page
*page
)
311 BUG_ON(PageWriteback(page
)); /* Writeback must be complete */
313 rc
= migrate_page_remove_references(newpage
, page
, 2);
318 migrate_page_copy(newpage
, page
);
321 * Remove auxiliary swap entries and replace
322 * them with real ptes.
324 * Note that a real pte entry will allow processes that are not
325 * waiting on the page lock to use the new page via the page tables
326 * before the new page is unlocked.
328 remove_from_swap(newpage
);
331 EXPORT_SYMBOL(migrate_page
);
336 * Two lists are passed to this function. The first list
337 * contains the pages isolated from the LRU to be migrated.
338 * The second list contains new pages that the pages isolated
339 * can be moved to. If the second list is NULL then all
340 * pages are swapped out.
342 * The function returns after 10 attempts or if no pages
343 * are movable anymore because to has become empty
344 * or no retryable pages exist anymore.
346 * Return: Number of pages not migrated when "to" ran empty.
348 int migrate_pages(struct list_head
*from
, struct list_head
*to
,
349 struct list_head
*moved
, struct list_head
*failed
)
356 int swapwrite
= current
->flags
& PF_SWAPWRITE
;
360 current
->flags
|= PF_SWAPWRITE
;
365 list_for_each_entry_safe(page
, page2
, from
, lru
) {
366 struct page
*newpage
= NULL
;
367 struct address_space
*mapping
;
372 if (page_count(page
) == 1)
373 /* page was freed from under us. So we are done. */
376 if (to
&& list_empty(to
))
380 * Skip locked pages during the first two passes to give the
381 * functions holding the lock time to release the page. Later we
382 * use lock_page() to have a higher chance of acquiring the
389 if (TestSetPageLocked(page
))
393 * Only wait on writeback if we have already done a pass where
394 * we we may have triggered writeouts for lots of pages.
397 wait_on_page_writeback(page
);
399 if (PageWriteback(page
))
404 * Anonymous pages must have swap cache references otherwise
405 * the information contained in the page maps cannot be
408 if (PageAnon(page
) && !PageSwapCache(page
)) {
409 if (!add_to_swap(page
, GFP_KERNEL
)) {
416 rc
= swap_page(page
);
420 newpage
= lru_to_page(to
);
424 * Pages are properly locked and writeback is complete.
425 * Try to migrate the page.
427 mapping
= page_mapping(page
);
431 if (mapping
->a_ops
->migratepage
) {
433 * Most pages have a mapping and most filesystems
434 * should provide a migration function. Anonymous
435 * pages are part of swap space which also has its
436 * own migration function. This is the most common
437 * path for page migration.
439 rc
= mapping
->a_ops
->migratepage(newpage
, page
);
444 * Default handling if a filesystem does not provide
445 * a migration function. We can only migrate clean
446 * pages so try to write out any dirty pages first.
448 if (PageDirty(page
)) {
449 switch (pageout(page
, mapping
)) {
455 unlock_page(newpage
);
459 ; /* try to migrate the page below */
464 * Buffers are managed in a filesystem specific way.
465 * We must have no buffers or drop them.
467 if (!page_has_buffers(page
) ||
468 try_to_release_page(page
, GFP_KERNEL
)) {
469 rc
= migrate_page(newpage
, page
);
474 * On early passes with mapped pages simply
475 * retry. There may be a lock held for some
476 * buffers that may go away. Later
481 * Persistently unable to drop buffers..... As a
482 * measure of last resort we fall back to
485 unlock_page(newpage
);
487 rc
= swap_page(page
);
492 unlock_page(newpage
);
501 /* Permanent failure */
502 list_move(&page
->lru
, failed
);
506 /* Successful migration. Return page to LRU */
507 move_to_lru(newpage
);
509 list_move(&page
->lru
, moved
);
512 if (retry
&& pass
++ < 10)
516 current
->flags
&= ~PF_SWAPWRITE
;
518 return nr_failed
+ retry
;
522 * Migration function for pages with buffers. This function can only be used
523 * if the underlying filesystem guarantees that no other references to "page"
526 int buffer_migrate_page(struct page
*newpage
, struct page
*page
)
528 struct address_space
*mapping
= page
->mapping
;
529 struct buffer_head
*bh
, *head
;
535 if (!page_has_buffers(page
))
536 return migrate_page(newpage
, page
);
538 head
= page_buffers(page
);
540 rc
= migrate_page_remove_references(newpage
, page
, 3);
549 bh
= bh
->b_this_page
;
551 } while (bh
!= head
);
553 ClearPagePrivate(page
);
554 set_page_private(newpage
, page_private(page
));
555 set_page_private(page
, 0);
561 set_bh_page(bh
, newpage
, bh_offset(bh
));
562 bh
= bh
->b_this_page
;
564 } while (bh
!= head
);
566 SetPagePrivate(newpage
);
568 migrate_page_copy(newpage
, page
);
574 bh
= bh
->b_this_page
;
576 } while (bh
!= head
);
580 EXPORT_SYMBOL(buffer_migrate_page
);
583 * Migrate the list 'pagelist' of pages to a certain destination.
585 * Specify destination with either non-NULL vma or dest_node >= 0
586 * Return the number of pages not migrated or error code
588 int migrate_pages_to(struct list_head
*pagelist
,
589 struct vm_area_struct
*vma
, int dest
)
595 unsigned long offset
= 0;
602 list_for_each(p
, pagelist
) {
605 * The address passed to alloc_page_vma is used to
606 * generate the proper interleave behavior. We fake
607 * the address here by an increasing offset in order
608 * to get the proper distribution of pages.
610 * No decision has been made as to which page
611 * a certain old page is moved to so we cannot
612 * specify the correct address.
614 page
= alloc_page_vma(GFP_HIGHUSER
, vma
,
615 offset
+ vma
->vm_start
);
619 page
= alloc_pages_node(dest
, GFP_HIGHUSER
, 0);
625 list_add_tail(&page
->lru
, &newlist
);
627 if (nr_pages
> MIGRATE_CHUNK_SIZE
)
630 err
= migrate_pages(pagelist
, &newlist
, &moved
, &failed
);
632 putback_lru_pages(&moved
); /* Call release pages instead ?? */
634 if (err
>= 0 && list_empty(&newlist
) && !list_empty(pagelist
))
637 /* Return leftover allocated pages */
638 while (!list_empty(&newlist
)) {
639 page
= list_entry(newlist
.next
, struct page
, lru
);
640 list_del(&page
->lru
);
643 list_splice(&failed
, pagelist
);
647 /* Calculate number of leftover pages */
649 list_for_each(p
, pagelist
)