X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmigrate.c;h=2a80136b23bbc16fb9d2ec2fad932b29e30dda50;hb=254db57f9b12daba841a4d91ddb9a8161e9c74ba;hp=d8c65a65c61d5701b8ecbc9185ac864ef564707c;hpb=762b8291be6944bd891fbe2c8976a2b2e5bbc726;p=pandora-kernel.git diff --git a/mm/migrate.c b/mm/migrate.c index d8c65a65c61d..2a80136b23bb 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -285,7 +285,15 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, page = migration_entry_to_page(entry); - get_page(page); + /* + * Once radix-tree replacement of page migration started, page_count + * *must* be zero. And, we don't want to call wait_on_page_locked() + * against a page without get_page(). + * So, we use get_page_unless_zero(), here. Even failed, page fault + * will occur again. + */ + if (!get_page_unless_zero(page)) + goto out; pte_unmap_unlock(ptep, ptl); wait_on_page_locked(page); put_page(page); @@ -305,6 +313,7 @@ out: static int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) { + int expected_count; void **pslot; if (!mapping) { @@ -314,14 +323,20 @@ static int migrate_page_move_mapping(struct address_space *mapping, return 0; } - write_lock_irq(&mapping->tree_lock); + spin_lock_irq(&mapping->tree_lock); pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); - if (page_count(page) != 2 + !!PagePrivate(page) || + expected_count = 2 + !!PagePrivate(page); + if (page_count(page) != expected_count || (struct page *)radix_tree_deref_slot(pslot) != page) { - write_unlock_irq(&mapping->tree_lock); + spin_unlock_irq(&mapping->tree_lock); + return -EAGAIN; + } + + if (!page_freeze_refs(page, expected_count)) { + spin_unlock_irq(&mapping->tree_lock); return -EAGAIN; } @@ -338,6 +353,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, radix_tree_replace_slot(pslot, newpage); + page_unfreeze_refs(page, expected_count); /* * Drop cache reference from old page. * We know this isn't the last reference. @@ -357,10 +373,9 @@ static int migrate_page_move_mapping(struct address_space *mapping, __dec_zone_page_state(page, NR_FILE_PAGES); __inc_zone_page_state(newpage, NR_FILE_PAGES); - write_unlock_irq(&mapping->tree_lock); - if (!PageSwapCache(newpage)) { + spin_unlock_irq(&mapping->tree_lock); + if (!PageSwapCache(newpage)) mem_cgroup_uncharge_cache_page(page); - } return 0; } @@ -590,7 +605,7 @@ static int move_to_new_page(struct page *newpage, struct page *page) * establishing additional references. We are the only one * holding a reference to the new page at this point. */ - if (TestSetPageLocked(newpage)) + if (!trylock_page(newpage)) BUG(); /* Prepare mapping for the new page.*/ @@ -652,7 +667,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, BUG_ON(charge); rc = -EAGAIN; - if (TestSetPageLocked(page)) { + if (!trylock_page(page)) { if (!force) goto move_newpage; lock_page(page);