X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmigrate.c;h=e2fdbce1874b4ff7a481621b6c5a3285d8d75260;hb=f15f41383d466860f8b22f669da8c841f8d73c3b;hp=e9b161bde95b4c382092a1c94a8fcfbd4bcf58f0;hpb=b361735043e3001eadb1d40916fd1a4fca1a9363;p=pandora-kernel.git diff --git a/mm/migrate.c b/mm/migrate.c index e9b161bde95b..e2fdbce1874b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -49,9 +49,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist) struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); - if (PageLRU(page)) { + if (PageLRU(page) && get_page_unless_zero(page)) { ret = 0; - get_page(page); ClearPageLRU(page); if (PageActive(page)) del_page_from_active_list(zone, page); @@ -297,7 +296,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, void **pslot; if (!mapping) { - /* Anonymous page */ + /* Anonymous page without mapping */ if (page_count(page) != 1) return -EAGAIN; return 0; @@ -333,6 +332,19 @@ static int migrate_page_move_mapping(struct address_space *mapping, */ __put_page(page); + /* + * If moved to a different zone then also account + * the page for that zone. Other VM counters will be + * taken care of when we establish references to the + * new page and drop references to the old page. + * + * Note that anonymous pages are accounted for + * via NR_FILE_PAGES and NR_ANON_PAGES if they + * are mapped to swap space. + */ + __dec_zone_page_state(page, NR_FILE_PAGES); + __inc_zone_page_state(newpage, NR_FILE_PAGES); + write_unlock_irq(&mapping->tree_lock); return 0; @@ -599,6 +611,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, int rc = 0; int *result = NULL; struct page *newpage = get_new_page(page, private, &result); + int rcu_locked = 0; if (!newpage) return -ENOMEM; @@ -619,18 +632,41 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, goto unlock; wait_on_page_writeback(page); } - /* - * Establish migration ptes or remove ptes + * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, + * we cannot notice that anon_vma is freed while we migrates a page. + * This rcu_read_lock() delays freeing anon_vma pointer until the end + * of migration. File cache pages are no problem because of page_lock() + * File Caches may use write_page() or lock_page() in migration, then, + * just care Anon page here. + */ + if (PageAnon(page)) { + rcu_read_lock(); + rcu_locked = 1; + } + /* + * This is a corner case handling. + * When a new swap-cache is read into, it is linked to LRU + * and treated as swapcache but has no rmap yet. + * Calling try_to_unmap() against a page->mapping==NULL page is + * BUG. So handle it here. */ + if (!page->mapping) + goto rcu_unlock; + /* Establish migration ptes or remove ptes */ try_to_unmap(page, 1); + if (!page_mapped(page)) rc = move_to_new_page(newpage, page); if (rc) remove_migration_ptes(page, page); +rcu_unlock: + if (rcu_locked) + rcu_read_unlock(); unlock: + unlock_page(page); if (rc != -EAGAIN) { @@ -748,7 +784,8 @@ static struct page *new_page_node(struct page *p, unsigned long private, *result = &pm->status; - return alloc_pages_node(pm->node, GFP_HIGHUSER | GFP_THISNODE, 0); + return alloc_pages_node(pm->node, + GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); } /* @@ -781,7 +818,7 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, err = -EFAULT; vma = find_vma(mm, pp->addr); - if (!vma) + if (!vma || !vma_migratable(vma)) goto set_status; page = follow_page(vma, pp->addr, FOLL_GET);