2 * Memory Migration functionality - linux/mm/migration.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/buffer_head.h>
20 #include <linux/mm_inline.h>
21 #include <linux/pagevec.h>
22 #include <linux/rmap.h>
23 #include <linux/topology.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuset.h>
26 #include <linux/swapops.h>
30 /* The maximum number of pages to take off the LRU for migration */
31 #define MIGRATE_CHUNK_SIZE 256
33 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
36 * Isolate one page from the LRU lists. If successful put it onto
37 * the indicated list with elevated page count.
40 * -EBUSY: page not on LRU list
41 * 0: page removed from LRU list and added to the specified list.
43 int isolate_lru_page(struct page *page, struct list_head *pagelist)
48 struct zone *zone = page_zone(page);
50 spin_lock_irq(&zone->lru_lock);
56 del_page_from_active_list(zone, page);
58 del_page_from_inactive_list(zone, page);
59 list_add_tail(&page->lru, pagelist);
61 spin_unlock_irq(&zone->lru_lock);
67 * migrate_prep() needs to be called after we have compiled the list of pages
68 * to be migrated using isolate_lru_page() but before we begin a series of calls
71 int migrate_prep(void)
73 /* Must have swap device for migration */
74 if (nr_swap_pages <= 0)
78 * Clear the LRU lists so pages can be isolated.
79 * Note that pages may be moved off the LRU after we have
80 * drained them. Those pages will fail to migrate like other
81 * pages that may be busy.
88 static inline void move_to_lru(struct page *page)
91 if (PageActive(page)) {
93 * lru_cache_add_active checks that
94 * the PG_active bit is off.
96 ClearPageActive(page);
97 lru_cache_add_active(page);
105 * Add isolated pages on the list back to the LRU.
107 * returns the number of pages put back.
109 int putback_lru_pages(struct list_head *l)
115 list_for_each_entry_safe(page, page2, l, lru) {
123 * swapout a single page
124 * page is locked upon entry, unlocked on exit
126 static int swap_page(struct page *page)
128 struct address_space *mapping = page_mapping(page);
130 if (page_mapped(page) && mapping)
131 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
134 if (PageDirty(page)) {
135 /* Page is dirty, try to write it out here */
136 switch(pageout(page, mapping)) {
145 ; /* try to free the page below */
149 if (PagePrivate(page)) {
150 if (!try_to_release_page(page, GFP_KERNEL) ||
151 (!mapping && page_count(page) == 1))
155 if (remove_mapping(mapping, page)) {
169 * Remove references for a page and establish the new page with the correct
170 * basic settings to be able to stop accesses to the page.
172 * The number of remaining references must be:
173 * 1 for anonymous pages without a mapping
174 * 2 for pages with a mapping
175 * 3 for pages with a mapping and PagePrivate set.
177 static int migrate_page_remove_references(struct page *newpage,
180 struct address_space *mapping = page_mapping(page);
181 struct page **radix_pointer;
187 * Establish swap ptes for anonymous pages or destroy pte
190 * In order to reestablish file backed mappings the fault handlers
191 * will take the radix tree_lock which may then be used to stop
192 * processses from accessing this page until the new page is ready.
194 * A process accessing via a swap pte (an anonymous page) will take a
195 * page_lock on the old page which will block the process until the
196 * migration attempt is complete. At that time the PageSwapCache bit
197 * will be examined. If the page was migrated then the PageSwapCache
198 * bit will be clear and the operation to retrieve the page will be
199 * retried which will find the new page in the radix tree. Then a new
200 * direct mapping may be generated based on the radix tree contents.
202 * If the page was not migrated then the PageSwapCache bit
203 * is still set and the operation may continue.
205 if (try_to_unmap(page, 1) == SWAP_FAIL)
206 /* A vma has VM_LOCKED set -> permanent failure */
210 * Give up if we were unable to remove all mappings.
212 if (page_mapcount(page))
215 write_lock_irq(&mapping->tree_lock);
217 radix_pointer = (struct page **)radix_tree_lookup_slot(
221 if (!page_mapping(page) ||
222 page_count(page) != 2 + !!PagePrivate(page) ||
223 *radix_pointer != page) {
224 write_unlock_irq(&mapping->tree_lock);
229 * Now we know that no one else is looking at the page.
231 * Certain minimal information about a page must be available
232 * in order for other subsystems to properly handle the page if they
233 * find it through the radix tree update before we are finished
237 newpage->index = page->index;
238 newpage->mapping = page->mapping;
239 if (PageSwapCache(page)) {
240 SetPageSwapCache(newpage);
241 set_page_private(newpage, page_private(page));
244 *radix_pointer = newpage;
246 write_unlock_irq(&mapping->tree_lock);
252 * Copy the page to its new location
254 static void migrate_page_copy(struct page *newpage, struct page *page)
256 copy_highpage(newpage, page);
259 SetPageError(newpage);
260 if (PageReferenced(page))
261 SetPageReferenced(newpage);
262 if (PageUptodate(page))
263 SetPageUptodate(newpage);
264 if (PageActive(page))
265 SetPageActive(newpage);
266 if (PageChecked(page))
267 SetPageChecked(newpage);
268 if (PageMappedToDisk(page))
269 SetPageMappedToDisk(newpage);
271 if (PageDirty(page)) {
272 clear_page_dirty_for_io(page);
273 set_page_dirty(newpage);
276 ClearPageSwapCache(page);
277 ClearPageActive(page);
278 ClearPagePrivate(page);
279 set_page_private(page, 0);
280 page->mapping = NULL;
283 * If any waiters have accumulated on the new page then
286 if (PageWriteback(newpage))
287 end_page_writeback(newpage);
290 /************************************************************
291 * Migration functions
292 ***********************************************************/
294 /* Always fail migration. Used for mappings that are not movable */
295 int fail_migrate_page(struct page *newpage, struct page *page)
299 EXPORT_SYMBOL(fail_migrate_page);
302 * Common logic to directly migrate a single page suitable for
303 * pages that do not use PagePrivate.
305 * Pages are locked upon entry and exit.
307 int migrate_page(struct page *newpage, struct page *page)
311 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
313 rc = migrate_page_remove_references(newpage, page);
318 migrate_page_copy(newpage, page);
321 * Remove auxiliary swap entries and replace
322 * them with real ptes.
324 * Note that a real pte entry will allow processes that are not
325 * waiting on the page lock to use the new page via the page tables
326 * before the new page is unlocked.
328 remove_from_swap(newpage);
331 EXPORT_SYMBOL(migrate_page);
334 * Migration function for pages with buffers. This function can only be used
335 * if the underlying filesystem guarantees that no other references to "page"
338 int buffer_migrate_page(struct page *newpage, struct page *page)
340 struct address_space *mapping = page->mapping;
341 struct buffer_head *bh, *head;
347 if (!page_has_buffers(page))
348 return migrate_page(newpage, page);
350 head = page_buffers(page);
352 rc = migrate_page_remove_references(newpage, page);
361 bh = bh->b_this_page;
363 } while (bh != head);
365 ClearPagePrivate(page);
366 set_page_private(newpage, page_private(page));
367 set_page_private(page, 0);
373 set_bh_page(bh, newpage, bh_offset(bh));
374 bh = bh->b_this_page;
376 } while (bh != head);
378 SetPagePrivate(newpage);
380 migrate_page_copy(newpage, page);
386 bh = bh->b_this_page;
388 } while (bh != head);
392 EXPORT_SYMBOL(buffer_migrate_page);
397 * Two lists are passed to this function. The first list
398 * contains the pages isolated from the LRU to be migrated.
399 * The second list contains new pages that the pages isolated
400 * can be moved to. If the second list is NULL then all
401 * pages are swapped out.
403 * The function returns after 10 attempts or if no pages
404 * are movable anymore because to has become empty
405 * or no retryable pages exist anymore.
407 * Return: Number of pages not migrated when "to" ran empty.
409 int migrate_pages(struct list_head *from, struct list_head *to,
410 struct list_head *moved, struct list_head *failed)
417 int swapwrite = current->flags & PF_SWAPWRITE;
421 current->flags |= PF_SWAPWRITE;
426 list_for_each_entry_safe(page, page2, from, lru) {
427 struct page *newpage = NULL;
428 struct address_space *mapping;
433 if (page_count(page) == 1)
434 /* page was freed from under us. So we are done. */
437 if (to && list_empty(to))
441 * Skip locked pages during the first two passes to give the
442 * functions holding the lock time to release the page. Later we
443 * use lock_page() to have a higher chance of acquiring the
450 if (TestSetPageLocked(page))
454 * Only wait on writeback if we have already done a pass where
455 * we we may have triggered writeouts for lots of pages.
458 wait_on_page_writeback(page);
460 if (PageWriteback(page))
465 * Anonymous pages must have swap cache references otherwise
466 * the information contained in the page maps cannot be
469 if (PageAnon(page) && !PageSwapCache(page)) {
470 if (!add_to_swap(page, GFP_KERNEL)) {
477 rc = swap_page(page);
481 newpage = lru_to_page(to);
485 * Pages are properly locked and writeback is complete.
486 * Try to migrate the page.
488 mapping = page_mapping(page);
492 if (mapping->a_ops->migratepage) {
494 * Most pages have a mapping and most filesystems
495 * should provide a migration function. Anonymous
496 * pages are part of swap space which also has its
497 * own migration function. This is the most common
498 * path for page migration.
500 rc = mapping->a_ops->migratepage(newpage, page);
504 /* Make sure the dirty bit is up to date */
505 if (try_to_unmap(page, 1) == SWAP_FAIL) {
510 if (page_mapcount(page)) {
516 * Default handling if a filesystem does not provide
517 * a migration function. We can only migrate clean
518 * pages so try to write out any dirty pages first.
520 if (PageDirty(page)) {
521 switch (pageout(page, mapping)) {
527 unlock_page(newpage);
531 ; /* try to migrate the page below */
536 * Buffers are managed in a filesystem specific way.
537 * We must have no buffers or drop them.
539 if (!page_has_buffers(page) ||
540 try_to_release_page(page, GFP_KERNEL)) {
541 rc = migrate_page(newpage, page);
546 * On early passes with mapped pages simply
547 * retry. There may be a lock held for some
548 * buffers that may go away. Later
553 * Persistently unable to drop buffers..... As a
554 * measure of last resort we fall back to
557 unlock_page(newpage);
559 rc = swap_page(page);
564 unlock_page(newpage);
573 /* Permanent failure */
574 list_move(&page->lru, failed);
578 /* Successful migration. Return page to LRU */
579 move_to_lru(newpage);
581 list_move(&page->lru, moved);
584 if (retry && pass++ < 10)
588 current->flags &= ~PF_SWAPWRITE;
590 return nr_failed + retry;
594 * Migrate the list 'pagelist' of pages to a certain destination.
596 * Specify destination with either non-NULL vma or dest_node >= 0
597 * Return the number of pages not migrated or error code
599 int migrate_pages_to(struct list_head *pagelist,
600 struct vm_area_struct *vma, int dest)
606 unsigned long offset = 0;
613 list_for_each(p, pagelist) {
616 * The address passed to alloc_page_vma is used to
617 * generate the proper interleave behavior. We fake
618 * the address here by an increasing offset in order
619 * to get the proper distribution of pages.
621 * No decision has been made as to which page
622 * a certain old page is moved to so we cannot
623 * specify the correct address.
625 page = alloc_page_vma(GFP_HIGHUSER, vma,
626 offset + vma->vm_start);
630 page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
636 list_add_tail(&page->lru, &newlist);
638 if (nr_pages > MIGRATE_CHUNK_SIZE)
641 err = migrate_pages(pagelist, &newlist, &moved, &failed);
643 putback_lru_pages(&moved); /* Call release pages instead ?? */
645 if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
648 /* Return leftover allocated pages */
649 while (!list_empty(&newlist)) {
650 page = list_entry(newlist.next, struct page, lru);
651 list_del(&page->lru);
654 list_splice(&failed, pagelist);
658 /* Calculate number of leftover pages */
660 list_for_each(p, pagelist)