sky2: fix hang on shutdown (and other irq issues)
[pandora-kernel.git] / mm / huge_memory.c
index e2d1587..4298aba 100644 (file)
@@ -89,7 +89,8 @@ struct khugepaged_scan {
        struct list_head mm_head;
        struct mm_slot *mm_slot;
        unsigned long address;
-} khugepaged_scan = {
+};
+static struct khugepaged_scan khugepaged_scan = {
        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 };
 
@@ -829,7 +830,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                copy_user_highpage(pages[i], page + i,
-                                  haddr + PAGE_SHIFT*i, vma);
+                                  haddr + PAGE_SIZE * i, vma);
                __SetPageUptodate(pages[i]);
                cond_resched();
        }
@@ -989,7 +990,7 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON(!PageCompound(page));
        if (flags & FOLL_GET)
-               get_page(page);
+               get_page_foll(page);
 
 out:
        return page;
@@ -1052,6 +1053,51 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        return ret;
 }
 
+int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
+                 unsigned long old_addr,
+                 unsigned long new_addr, unsigned long old_end,
+                 pmd_t *old_pmd, pmd_t *new_pmd)
+{
+       int ret = 0;
+       pmd_t pmd;
+
+       struct mm_struct *mm = vma->vm_mm;
+
+       if ((old_addr & ~HPAGE_PMD_MASK) ||
+           (new_addr & ~HPAGE_PMD_MASK) ||
+           old_end - old_addr < HPAGE_PMD_SIZE ||
+           (new_vma->vm_flags & VM_NOHUGEPAGE))
+               goto out;
+
+       /*
+        * The destination pmd shouldn't be established, free_pgtables()
+        * should have release it.
+        */
+       if (WARN_ON(!pmd_none(*new_pmd))) {
+               VM_BUG_ON(pmd_trans_huge(*new_pmd));
+               goto out;
+       }
+
+       spin_lock(&mm->page_table_lock);
+       if (likely(pmd_trans_huge(*old_pmd))) {
+               if (pmd_trans_splitting(*old_pmd)) {
+                       spin_unlock(&mm->page_table_lock);
+                       wait_split_huge_page(vma->anon_vma, old_pmd);
+                       ret = -1;
+               } else {
+                       pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
+                       VM_BUG_ON(!pmd_none(*new_pmd));
+                       set_pmd_at(mm, new_addr, new_pmd, pmd);
+                       spin_unlock(&mm->page_table_lock);
+                       ret = 1;
+               }
+       } else {
+               spin_unlock(&mm->page_table_lock);
+       }
+out:
+       return ret;
+}
+
 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, pgprot_t newprot)
 {
@@ -1156,6 +1202,7 @@ static void __split_huge_page_refcount(struct page *page)
        unsigned long head_index = page->index;
        struct zone *zone = page_zone(page);
        int zonestat;
+       int tail_count = 0;
 
        /* prevent PageLRU to go away from under us, and freeze lru stats */
        spin_lock_irq(&zone->lru_lock);
@@ -1164,11 +1211,27 @@ static void __split_huge_page_refcount(struct page *page)
        for (i = 1; i < HPAGE_PMD_NR; i++) {
                struct page *page_tail = page + i;
 
-               /* tail_page->_count cannot change */
-               atomic_sub(atomic_read(&page_tail->_count), &page->_count);
-               BUG_ON(page_count(page) <= 0);
-               atomic_add(page_mapcount(page) + 1, &page_tail->_count);
-               BUG_ON(atomic_read(&page_tail->_count) <= 0);
+               /* tail_page->_mapcount cannot change */
+               BUG_ON(page_mapcount(page_tail) < 0);
+               tail_count += page_mapcount(page_tail);
+               /* check for overflow */
+               BUG_ON(tail_count < 0);
+               BUG_ON(atomic_read(&page_tail->_count) != 0);
+               /*
+                * tail_page->_count is zero and not changing from
+                * under us. But get_page_unless_zero() may be running
+                * from under us on the tail_page. If we used
+                * atomic_set() below instead of atomic_add(), we
+                * would then run atomic_set() concurrently with
+                * get_page_unless_zero(), and atomic_set() is
+                * implemented in C not using locked ops. spin_unlock
+                * on x86 sometime uses locked ops because of PPro
+                * errata 66, 92, so unless somebody can guarantee
+                * atomic_set() here would be safe on all archs (and
+                * not only on x86), it's safer to use atomic_add().
+                */
+               atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
+                          &page_tail->_count);
 
                /* after clearing PageTail the gup refcount can be released */
                smp_mb();
@@ -1186,10 +1249,7 @@ static void __split_huge_page_refcount(struct page *page)
                                      (1L << PG_uptodate)));
                page_tail->flags |= (1L << PG_dirty);
 
-               /*
-                * 1) clear PageTail before overwriting first_page
-                * 2) clear PageTail before clearing PageHead for VM_BUG_ON
-                */
+               /* clear PageTail before overwriting first_page */
                smp_wmb();
 
                /*
@@ -1206,7 +1266,6 @@ static void __split_huge_page_refcount(struct page *page)
                 * status is achieved setting a reserved bit in the
                 * pmd, not by clearing the present bit.
                */
-               BUG_ON(page_mapcount(page_tail));
                page_tail->_mapcount = page->_mapcount;
 
                BUG_ON(page_tail->mapping);
@@ -1223,6 +1282,8 @@ static void __split_huge_page_refcount(struct page *page)
 
                lru_add_page_tail(zone, page, page_tail);
        }
+       atomic_sub(tail_count, &page->_count);
+       BUG_ON(atomic_read(&page->_count) <= 0);
 
        __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
        __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
@@ -1906,7 +1967,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        BUG_ON(!pmd_none(*pmd));
        page_add_new_anon_rmap(new_page, vma, address);
        set_pmd_at(mm, address, pmd, _pmd);
-       update_mmu_cache(vma, address, entry);
+       update_mmu_cache(vma, address, _pmd);
        prepare_pmd_huge_pte(pgtable, mm);
        mm->nr_ptes--;
        spin_unlock(&mm->page_table_lock);
@@ -2024,6 +2085,8 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 
 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                                            struct page **hpage)
+       __releases(&khugepaged_mm_lock)
+       __acquires(&khugepaged_mm_lock)
 {
        struct mm_slot *mm_slot;
        struct mm_struct *mm;