Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / mm / hugetlb.c
index 3f0c130..908f01d 100644 (file)
@@ -1079,6 +1079,7 @@ static void return_unused_surplus_pages(struct hstate *h,
        while (nr_pages--) {
                if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
                        break;
+               cond_resched_lock(&hugetlb_lock);
        }
 }
 
@@ -1448,6 +1449,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
        while (min_count < persistent_huge_pages(h)) {
                if (!free_pool_huge_page(h, nodes_allowed, 0))
                        break;
+               cond_resched_lock(&hugetlb_lock);
        }
        while (count < persistent_huge_pages(h)) {
                if (!adjust_pool_surplus(h, nodes_allowed, 1))
@@ -2271,6 +2273,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
                update_mmu_cache(vma, address, ptep);
 }
 
+static int is_hugetlb_entry_migration(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_migration_entry(swp))
+               return 1;
+       else
+               return 0;
+}
+
+static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+               return 1;
+       else
+               return 0;
+}
 
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
@@ -2298,7 +2325,24 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 
                spin_lock(&dst->page_table_lock);
                spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
-               if (!huge_pte_none(huge_ptep_get(src_pte))) {
+               entry = huge_ptep_get(src_pte);
+               if (huge_pte_none(entry)) { /* skip none entry */
+                       ;
+               } else if (unlikely(is_hugetlb_entry_migration(entry) ||
+                                   is_hugetlb_entry_hwpoisoned(entry))) {
+                       swp_entry_t swp_entry = pte_to_swp_entry(entry);
+
+                       if (is_write_migration_entry(swp_entry) && cow) {
+                               /*
+                                * COW mappings require pages in both
+                                * parent and child to be set to read.
+                                */
+                               make_migration_entry_read(&swp_entry);
+                               entry = swp_entry_to_pte(swp_entry);
+                               set_huge_pte_at(src, addr, src_pte, entry);
+                       }
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
+               } else {
                        if (cow)
                                huge_ptep_set_wrprotect(src, addr, src_pte);
                        entry = huge_ptep_get(src_pte);
@@ -2316,32 +2360,6 @@ nomem:
        return -ENOMEM;
 }
 
-static int is_hugetlb_entry_migration(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end, struct page *ref_page)
 {
@@ -2401,9 +2419,10 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                        continue;
 
                /*
-                * HWPoisoned hugepage is already unmapped and dropped reference
+                * Migrating hugepage or HWPoisoned hugepage is already
+                * unmapped and its refcount is dropped
                 */
-               if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+               if (unlikely(!pte_present(pte)))
                        continue;
 
                page = pte_page(pte);
@@ -2483,6 +2502,14 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                if (iter_vma == vma)
                        continue;
 
+               /*
+                * Shared VMAs have their own reserves and do not affect
+                * MAP_PRIVATE accounting but it is possible that a shared
+                * VMA is using the same page so check and skip such VMAs.
+                */
+               if (iter_vma->vm_flags & VM_MAYSHARE)
+                       continue;
+
                /*
                 * Unmap the page from other VMAs without their own reserves.
                 * They get marked to be SIGKILLed if they fault in these
@@ -2781,6 +2808,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *pagecache_page = NULL;
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
+       int need_wait_lock = 0;
 
        ptep = huge_pte_offset(mm, address);
        if (ptep) {
@@ -2791,12 +2819,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
                               VM_FAULT_SET_HINDEX(h - hstates);
+       } else {
+               ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+               if (!ptep)
+                       return VM_FAULT_OOM;
        }
 
-       ptep = huge_pte_alloc(mm, address, huge_page_size(h));
-       if (!ptep)
-               return VM_FAULT_OOM;
-
        /*
         * Serialize hugepage allocation and instantiation, so that we don't
         * get spurious allocation failures if two CPUs race to instantiate
@@ -2811,6 +2839,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        ret = 0;
 
+       /*
+        * entry could be a migration/hwpoison entry at this point, so this
+        * check prevents the kernel from going below assuming that we have
+        * a active hugepage in pagecache. This goto expects the 2nd page fault,
+        * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
+        * handle it.
+        */
+       if (!pte_present(entry))
+               goto out_mutex;
+
        /*
         * If we are going to COW the mapping later, we examine the pending
         * reservations for this page now. This will ensure that any
@@ -2830,29 +2868,30 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                                vma, address);
        }
 
+       spin_lock(&mm->page_table_lock);
+       /* Check for a racing update before calling hugetlb_cow */
+       if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+               goto out_page_table_lock;
+
        /*
         * hugetlb_cow() requires page locks of pte_page(entry) and
         * pagecache_page, so here we need take the former one
         * when page != pagecache_page or !pagecache_page.
-        * Note that locking order is always pagecache_page -> page,
-        * so no worry about deadlock.
         */
        page = pte_page(entry);
-       get_page(page);
        if (page != pagecache_page)
-               lock_page(page);
-
-       spin_lock(&mm->page_table_lock);
-       /* Check for a racing update before calling hugetlb_cow */
-       if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
-               goto out_page_table_lock;
+               if (!trylock_page(page)) {
+                       need_wait_lock = 1;
+                       goto out_page_table_lock;
+               }
 
+       get_page(page);
 
        if (flags & FAULT_FLAG_WRITE) {
                if (!pte_write(entry)) {
                        ret = hugetlb_cow(mm, vma, address, ptep, entry,
                                                        pagecache_page);
-                       goto out_page_table_lock;
+                       goto out_put_page;
                }
                entry = pte_mkdirty(entry);
        }
@@ -2860,7 +2899,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (huge_ptep_set_access_flags(vma, address, ptep, entry,
                                                flags & FAULT_FLAG_WRITE))
                update_mmu_cache(vma, address, ptep);
-
+out_put_page:
+       if (page != pagecache_page)
+               unlock_page(page);
+       put_page(page);
 out_page_table_lock:
        spin_unlock(&mm->page_table_lock);
 
@@ -2868,13 +2910,18 @@ out_page_table_lock:
                unlock_page(pagecache_page);
                put_page(pagecache_page);
        }
-       if (page != pagecache_page)
-               unlock_page(page);
-       put_page(page);
-
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
 
+       /*
+        * Generally it's safe to hold refcount during waiting page lock. But
+        * here we just wait to defer the next page fault to avoid busy loop and
+        * the page is not used after unlocked before returning from the current
+        * page fault. So we are safe from accessing freed page, even if we wait
+        * here without taking refcount.
+        */
+       if (need_wait_lock)
+               wait_on_page_locked(page);
        return ret;
 }
 
@@ -3000,7 +3047,22 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                        continue;
                if (huge_pmd_unshare(mm, &address, ptep))
                        continue;
-               if (!huge_pte_none(huge_ptep_get(ptep))) {
+               pte = huge_ptep_get(ptep);
+               if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+                       continue;
+               if (unlikely(is_hugetlb_entry_migration(pte))) {
+                       swp_entry_t entry = pte_to_swp_entry(pte);
+
+                       if (is_write_migration_entry(entry)) {
+                               pte_t newpte;
+
+                               make_migration_entry_read(&entry);
+                               newpte = swp_entry_to_pte(entry);
+                               set_huge_pte_at(mm, address, ptep, newpte);
+                       }
+                       continue;
+               }
+               if (!huge_pte_none(pte)) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
                        set_huge_pte_at(mm, address, ptep, pte);