Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / mm / hugetlb.c
index d2c43a2..908f01d 100644 (file)
@@ -633,6 +633,7 @@ static void free_huge_page(struct page *page)
                h->surplus_huge_pages--;
                h->surplus_huge_pages_node[nid]--;
        } else {
+               arch_clear_hugepage_flags(page);
                enqueue_huge_page(h, page);
        }
        spin_unlock(&hugetlb_lock);
@@ -1713,9 +1714,9 @@ static void __init hugetlb_sysfs_init(void)
 
 /*
  * node_hstate/s - associate per node hstate attributes, via their kobjects,
- * with node sysdevs in node_devices[] using a parallel array.  The array
- * index of a node sysdev or _hstate == node id.
- * This is here to avoid any static dependency of the node sysdev driver, in
+ * with node devices in node_devices[] using a parallel array.  The array
+ * index of a node device or _hstate == node id.
+ * This is here to avoid any static dependency of the node device driver, in
  * the base kernel, on the hugetlb module.
  */
 struct node_hstate {
@@ -1725,7 +1726,7 @@ struct node_hstate {
 struct node_hstate node_hstates[MAX_NUMNODES];
 
 /*
- * A subset of global hstate attributes for node sysdevs
+ * A subset of global hstate attributes for node devices
  */
 static struct attribute *per_node_hstate_attrs[] = {
        &nr_hugepages_attr.attr,
@@ -1739,7 +1740,7 @@ static struct attribute_group per_node_hstate_attr_group = {
 };
 
 /*
- * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
+ * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
  * Returns node id via non-NULL nidp.
  */
 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
@@ -1762,13 +1763,13 @@ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
 }
 
 /*
- * Unregister hstate attributes from a single node sysdev.
+ * Unregister hstate attributes from a single node device.
  * No-op if no hstate attributes attached.
  */
 void hugetlb_unregister_node(struct node *node)
 {
        struct hstate *h;
-       struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+       struct node_hstate *nhs = &node_hstates[node->dev.id];
 
        if (!nhs->hugepages_kobj)
                return;         /* no hstate attributes */
@@ -1784,7 +1785,7 @@ void hugetlb_unregister_node(struct node *node)
 }
 
 /*
- * hugetlb module exit:  unregister hstate attributes from node sysdevs
+ * hugetlb module exit:  unregister hstate attributes from node devices
  * that have them.
  */
 static void hugetlb_unregister_all_nodes(void)
@@ -1792,7 +1793,7 @@ static void hugetlb_unregister_all_nodes(void)
        int nid;
 
        /*
-        * disable node sysdev registrations.
+        * disable node device registrations.
         */
        register_hugetlbfs_with_node(NULL, NULL);
 
@@ -1804,20 +1805,20 @@ static void hugetlb_unregister_all_nodes(void)
 }
 
 /*
- * Register hstate attributes for a single node sysdev.
+ * Register hstate attributes for a single node device.
  * No-op if attributes already registered.
  */
 void hugetlb_register_node(struct node *node)
 {
        struct hstate *h;
-       struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+       struct node_hstate *nhs = &node_hstates[node->dev.id];
        int err;
 
        if (nhs->hugepages_kobj)
                return;         /* already allocated */
 
        nhs->hugepages_kobj = kobject_create_and_add("hugepages",
-                                                       &node->sysdev.kobj);
+                                                       &node->dev.kobj);
        if (!nhs->hugepages_kobj)
                return;
 
@@ -1828,7 +1829,7 @@ void hugetlb_register_node(struct node *node)
                if (err) {
                        printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
                                        " for node %d\n",
-                                               h->name, node->sysdev.id);
+                                               h->name, node->dev.id);
                        hugetlb_unregister_node(node);
                        break;
                }
@@ -1837,8 +1838,8 @@ void hugetlb_register_node(struct node *node)
 
 /*
  * hugetlb init time:  register hstate attributes for all registered node
- * sysdevs of nodes that have memory.  All on-line nodes should have
- * registered their associated sysdev by this time.
+ * devices of nodes that have memory.  All on-line nodes should have
+ * registered their associated device by this time.
  */
 static void hugetlb_register_all_nodes(void)
 {
@@ -1846,12 +1847,12 @@ static void hugetlb_register_all_nodes(void)
 
        for_each_node_state(nid, N_HIGH_MEMORY) {
                struct node *node = &node_devices[nid];
-               if (node->sysdev.id == nid)
+               if (node->dev.id == nid)
                        hugetlb_register_node(node);
        }
 
        /*
-        * Let the node sysdev driver know we're here so it can
+        * Let the node device driver know we're here so it can
         * [un]register hstate attributes on node hotplug.
         */
        register_hugetlbfs_with_node(hugetlb_register_node,
@@ -2418,9 +2419,10 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                        continue;
 
                /*
-                * HWPoisoned hugepage is already unmapped and dropped reference
+                * Migrating hugepage or HWPoisoned hugepage is already
+                * unmapped and its refcount is dropped
                 */
-               if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+               if (unlikely(!pte_present(pte)))
                        continue;
 
                page = pte_page(pte);
@@ -2500,6 +2502,14 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                if (iter_vma == vma)
                        continue;
 
+               /*
+                * Shared VMAs have their own reserves and do not affect
+                * MAP_PRIVATE accounting but it is possible that a shared
+                * VMA is using the same page so check and skip such VMAs.
+                */
+               if (iter_vma->vm_flags & VM_MAYSHARE)
+                       continue;
+
                /*
                 * Unmap the page from other VMAs without their own reserves.
                 * They get marked to be SIGKILLed if they fault in these
@@ -2798,6 +2808,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *pagecache_page = NULL;
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
+       int need_wait_lock = 0;
 
        ptep = huge_pte_offset(mm, address);
        if (ptep) {
@@ -2808,12 +2819,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
                               VM_FAULT_SET_HINDEX(h - hstates);
+       } else {
+               ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+               if (!ptep)
+                       return VM_FAULT_OOM;
        }
 
-       ptep = huge_pte_alloc(mm, address, huge_page_size(h));
-       if (!ptep)
-               return VM_FAULT_OOM;
-
        /*
         * Serialize hugepage allocation and instantiation, so that we don't
         * get spurious allocation failures if two CPUs race to instantiate
@@ -2828,6 +2839,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        ret = 0;
 
+       /*
+        * entry could be a migration/hwpoison entry at this point, so this
+        * check prevents the kernel from going below assuming that we have
+        * a active hugepage in pagecache. This goto expects the 2nd page fault,
+        * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
+        * handle it.
+        */
+       if (!pte_present(entry))
+               goto out_mutex;
+
        /*
         * If we are going to COW the mapping later, we examine the pending
         * reservations for this page now. This will ensure that any
@@ -2847,29 +2868,30 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                                vma, address);
        }
 
+       spin_lock(&mm->page_table_lock);
+       /* Check for a racing update before calling hugetlb_cow */
+       if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+               goto out_page_table_lock;
+
        /*
         * hugetlb_cow() requires page locks of pte_page(entry) and
         * pagecache_page, so here we need take the former one
         * when page != pagecache_page or !pagecache_page.
-        * Note that locking order is always pagecache_page -> page,
-        * so no worry about deadlock.
         */
        page = pte_page(entry);
-       get_page(page);
        if (page != pagecache_page)
-               lock_page(page);
-
-       spin_lock(&mm->page_table_lock);
-       /* Check for a racing update before calling hugetlb_cow */
-       if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
-               goto out_page_table_lock;
+               if (!trylock_page(page)) {
+                       need_wait_lock = 1;
+                       goto out_page_table_lock;
+               }
 
+       get_page(page);
 
        if (flags & FAULT_FLAG_WRITE) {
                if (!pte_write(entry)) {
                        ret = hugetlb_cow(mm, vma, address, ptep, entry,
                                                        pagecache_page);
-                       goto out_page_table_lock;
+                       goto out_put_page;
                }
                entry = pte_mkdirty(entry);
        }
@@ -2877,7 +2899,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (huge_ptep_set_access_flags(vma, address, ptep, entry,
                                                flags & FAULT_FLAG_WRITE))
                update_mmu_cache(vma, address, ptep);
-
+out_put_page:
+       if (page != pagecache_page)
+               unlock_page(page);
+       put_page(page);
 out_page_table_lock:
        spin_unlock(&mm->page_table_lock);
 
@@ -2885,13 +2910,18 @@ out_page_table_lock:
                unlock_page(pagecache_page);
                put_page(pagecache_page);
        }
-       if (page != pagecache_page)
-               unlock_page(page);
-       put_page(page);
-
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
 
+       /*
+        * Generally it's safe to hold refcount during waiting page lock. But
+        * here we just wait to defer the next page fault to avoid busy loop and
+        * the page is not used after unlocked before returning from the current
+        * page fault. So we are safe from accessing freed page, even if we wait
+        * here without taking refcount.
+        */
+       if (need_wait_lock)
+               wait_on_page_locked(page);
        return ret;
 }
 
@@ -3017,7 +3047,22 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                        continue;
                if (huge_pmd_unshare(mm, &address, ptep))
                        continue;
-               if (!huge_pte_none(huge_ptep_get(ptep))) {
+               pte = huge_ptep_get(ptep);
+               if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+                       continue;
+               if (unlikely(is_hugetlb_entry_migration(pte))) {
+                       swp_entry_t entry = pte_to_swp_entry(pte);
+
+                       if (is_write_migration_entry(entry)) {
+                               pte_t newpte;
+
+                               make_migration_entry_read(&entry);
+                               newpte = swp_entry_to_pte(entry);
+                               set_huge_pte_at(mm, address, ptep, newpte);
+                       }
+                       continue;
+               }
+               if (!huge_pte_none(pte)) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
                        set_huge_pte_at(mm, address, ptep, pte);