ARM: 8634/1: hw_breakpoint: blacklist Scorpion CPUs
[pandora-kernel.git] / mm / migrate.c
index 65c12d2..f2d86f2 100644 (file)
@@ -141,13 +141,16 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
 
        get_page(new);
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+
+       /* Recheck VMA as permissions can change since migration started  */
        if (is_write_migration_entry(entry))
-               pte = pte_mkwrite(pte);
+               pte = maybe_mkwrite(pte, vma);
+
 #ifdef CONFIG_HUGETLB_PAGE
        if (PageHuge(new))
                pte = pte_mkhuge(pte);
 #endif
-       flush_cache_page(vma, addr, pte_pfn(pte));
+       flush_dcache_page(new);
        set_pte_at(mm, addr, ptep, pte);
 
        if (PageHuge(new)) {
@@ -184,15 +187,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
  *
  * This function is called from do_swap_page().
  */
-void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
-                               unsigned long address)
+static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+                               spinlock_t *ptl)
 {
-       pte_t *ptep, pte;
-       spinlock_t *ptl;
+       pte_t pte;
        swp_entry_t entry;
        struct page *page;
 
-       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+       spin_lock(ptl);
        pte = *ptep;
        if (!is_swap_pte(pte))
                goto out;
@@ -220,14 +222,29 @@ out:
        pte_unmap_unlock(ptep, ptl);
 }
 
+void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+                               unsigned long address)
+{
+       spinlock_t *ptl = pte_lockptr(mm, pmd);
+       pte_t *ptep = pte_offset_map(pmd, address);
+       __migration_entry_wait(mm, ptep, ptl);
+}
+
+void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
+{
+       spinlock_t *ptl = &(mm)->page_table_lock;
+       __migration_entry_wait(mm, pte, ptl);
+}
+
 #ifdef CONFIG_BLOCK
 /* Returns true if all buffers are successfully locked */
-static bool buffer_migrate_lock_buffers(struct buffer_head *head, bool sync)
+static bool buffer_migrate_lock_buffers(struct buffer_head *head,
+                                                       enum migrate_mode mode)
 {
        struct buffer_head *bh = head;
 
        /* Simple case, sync compaction */
-       if (sync) {
+       if (mode != MIGRATE_ASYNC) {
                do {
                        get_bh(bh);
                        lock_buffer(bh);
@@ -263,7 +280,7 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, bool sync)
 }
 #else
 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
-                                                               bool sync)
+                                                       enum migrate_mode mode)
 {
        return true;
 }
@@ -277,9 +294,9 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
  * 2 for pages with a mapping
  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
-static int migrate_page_move_mapping(struct address_space *mapping,
+int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page,
-               struct buffer_head *head, bool sync)
+               struct buffer_head *head, enum migrate_mode mode)
 {
        int expected_count;
        void **pslot;
@@ -315,7 +332,8 @@ static int migrate_page_move_mapping(struct address_space *mapping,
         * the mapping back due to an elevated page count, we would have to
         * block waiting on other references to be dropped.
         */
-       if (!sync && head && !buffer_migrate_lock_buffers(head, sync)) {
+       if (mode == MIGRATE_ASYNC && head &&
+                       !buffer_migrate_lock_buffers(head, mode)) {
                page_unfreeze_refs(page, expected_count);
                spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
@@ -359,6 +377,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
 
        return 0;
 }
+EXPORT_SYMBOL(migrate_page_move_mapping);
 
 /*
  * The expected number of remaining references is the same as that
@@ -458,6 +477,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
        if (PageWriteback(newpage))
                end_page_writeback(newpage);
 }
+EXPORT_SYMBOL(migrate_page_copy);
 
 /************************************************************
  *                    Migration functions
@@ -478,13 +498,14 @@ EXPORT_SYMBOL(fail_migrate_page);
  * Pages are locked upon entry and exit.
  */
 int migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page, bool sync)
+               struct page *newpage, struct page *page,
+               enum migrate_mode mode)
 {
        int rc;
 
        BUG_ON(PageWriteback(page));    /* Writeback must be complete */
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, sync);
+       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
 
        if (rc)
                return rc;
@@ -501,17 +522,17 @@ EXPORT_SYMBOL(migrate_page);
  * exist.
  */
 int buffer_migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page, bool sync)
+               struct page *newpage, struct page *page, enum migrate_mode mode)
 {
        struct buffer_head *bh, *head;
        int rc;
 
        if (!page_has_buffers(page))
-               return migrate_page(mapping, newpage, page, sync);
+               return migrate_page(mapping, newpage, page, mode);
 
        head = page_buffers(page);
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, head, sync);
+       rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
 
        if (rc)
                return rc;
@@ -521,8 +542,8 @@ int buffer_migrate_page(struct address_space *mapping,
         * with an IRQ-safe spinlock held. In the sync case, the buffers
         * need to be locked now
         */
-       if (sync)
-               BUG_ON(!buffer_migrate_lock_buffers(head, sync));
+       if (mode != MIGRATE_ASYNC)
+               BUG_ON(!buffer_migrate_lock_buffers(head, mode));
 
        ClearPagePrivate(page);
        set_page_private(newpage, page_private(page));
@@ -599,10 +620,11 @@ static int writeout(struct address_space *mapping, struct page *page)
  * Default handling if a filesystem does not provide a migration function.
  */
 static int fallback_migrate_page(struct address_space *mapping,
-       struct page *newpage, struct page *page, bool sync)
+       struct page *newpage, struct page *page, enum migrate_mode mode)
 {
        if (PageDirty(page)) {
-               if (!sync)
+               /* Only writeback pages in full synchronous migration */
+               if (mode != MIGRATE_SYNC)
                        return -EBUSY;
                return writeout(mapping, page);
        }
@@ -615,7 +637,7 @@ static int fallback_migrate_page(struct address_space *mapping,
            !try_to_release_page(page, GFP_KERNEL))
                return -EAGAIN;
 
-       return migrate_page(mapping, newpage, page, sync);
+       return migrate_page(mapping, newpage, page, mode);
 }
 
 /*
@@ -630,7 +652,7 @@ static int fallback_migrate_page(struct address_space *mapping,
  *  == 0 - success
  */
 static int move_to_new_page(struct page *newpage, struct page *page,
-                                       int remap_swapcache, bool sync)
+                               int remap_swapcache, enum migrate_mode mode)
 {
        struct address_space *mapping;
        int rc;
@@ -651,7 +673,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
 
        mapping = page_mapping(page);
        if (!mapping)
-               rc = migrate_page(mapping, newpage, page, sync);
+               rc = migrate_page(mapping, newpage, page, mode);
        else if (mapping->a_ops->migratepage)
                /*
                 * Most pages have a mapping and most filesystems provide a
@@ -660,9 +682,9 @@ static int move_to_new_page(struct page *newpage, struct page *page,
                 * is the most common path for page migration.
                 */
                rc = mapping->a_ops->migratepage(mapping,
-                                               newpage, page, sync);
+                                               newpage, page, mode);
        else
-               rc = fallback_migrate_page(mapping, newpage, page, sync);
+               rc = fallback_migrate_page(mapping, newpage, page, mode);
 
        if (rc) {
                newpage->mapping = NULL;
@@ -677,7 +699,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
 }
 
 static int __unmap_and_move(struct page *page, struct page *newpage,
-                               int force, bool offlining, bool sync)
+                       int force, bool offlining, enum migrate_mode mode)
 {
        int rc = -EAGAIN;
        int remap_swapcache = 1;
@@ -686,7 +708,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
        struct anon_vma *anon_vma = NULL;
 
        if (!trylock_page(page)) {
-               if (!force || !sync)
+               if (!force || mode == MIGRATE_ASYNC)
                        goto out;
 
                /*
@@ -732,10 +754,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 
        if (PageWriteback(page)) {
                /*
-                * For !sync, there is no point retrying as the retry loop
-                * is expected to be too short for PageWriteback to be cleared
+                * Only in the case of a full syncronous migration is it
+                * necessary to wait for PageWriteback. In the async case,
+                * the retry loop is too short and in the sync-light case,
+                * the overhead of stalling is too much
                 */
-               if (!sync) {
+               if (mode != MIGRATE_SYNC) {
                        rc = -EBUSY;
                        goto uncharge;
                }
@@ -806,7 +830,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 
 skip_unmap:
        if (!page_mapped(page))
-               rc = move_to_new_page(newpage, page, remap_swapcache, sync);
+               rc = move_to_new_page(newpage, page, remap_swapcache, mode);
 
        if (rc && remap_swapcache)
                remove_migration_ptes(page, page);
@@ -829,7 +853,8 @@ out:
  * to the newly allocated page in newpage.
  */
 static int unmap_and_move(new_page_t get_new_page, unsigned long private,
-                       struct page *page, int force, bool offlining, bool sync)
+                       struct page *page, int force, bool offlining,
+                       enum migrate_mode mode)
 {
        int rc = 0;
        int *result = NULL;
@@ -847,7 +872,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                if (unlikely(split_huge_page(page)))
                        goto out;
 
-       rc = __unmap_and_move(page, newpage, force, offlining, sync);
+       rc = __unmap_and_move(page, newpage, force, offlining, mode);
 out:
        if (rc != -EAGAIN) {
                /*
@@ -895,7 +920,8 @@ out:
  */
 static int unmap_and_move_huge_page(new_page_t get_new_page,
                                unsigned long private, struct page *hpage,
-                               int force, bool offlining, bool sync)
+                               int force, bool offlining,
+                               enum migrate_mode mode)
 {
        int rc = 0;
        int *result = NULL;
@@ -908,7 +934,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        rc = -EAGAIN;
 
        if (!trylock_page(hpage)) {
-               if (!force || !sync)
+               if (!force || mode != MIGRATE_SYNC)
                        goto out;
                lock_page(hpage);
        }
@@ -919,7 +945,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 
        if (!page_mapped(hpage))
-               rc = move_to_new_page(new_hpage, hpage, 1, sync);
+               rc = move_to_new_page(new_hpage, hpage, 1, mode);
 
        if (rc)
                remove_migration_ptes(hpage, hpage);
@@ -962,7 +988,7 @@ out:
  */
 int migrate_pages(struct list_head *from,
                new_page_t get_new_page, unsigned long private, bool offlining,
-               bool sync)
+               enum migrate_mode mode)
 {
        int retry = 1;
        int nr_failed = 0;
@@ -983,7 +1009,7 @@ int migrate_pages(struct list_head *from,
 
                        rc = unmap_and_move(get_new_page, private,
                                                page, pass > 2, offlining,
-                                               sync);
+                                               mode);
 
                        switch(rc) {
                        case -ENOMEM:
@@ -1013,7 +1039,7 @@ out:
 
 int migrate_huge_pages(struct list_head *from,
                new_page_t get_new_page, unsigned long private, bool offlining,
-               bool sync)
+               enum migrate_mode mode)
 {
        int retry = 1;
        int nr_failed = 0;
@@ -1030,7 +1056,7 @@ int migrate_huge_pages(struct list_head *from,
 
                        rc = unmap_and_move_huge_page(get_new_page,
                                        private, page, pass > 2, offlining,
-                                       sync);
+                                       mode);
 
                        switch(rc) {
                        case -ENOMEM:
@@ -1159,7 +1185,7 @@ set_status:
        err = 0;
        if (!list_empty(&pagelist)) {
                err = migrate_pages(&pagelist, new_page_node,
-                               (unsigned long)pm, 0, true);
+                               (unsigned long)pm, 0, MIGRATE_SYNC);
                if (err)
                        putback_lru_pages(&pagelist);
        }