net/mlx4_en: Fix mixed PFC and Global pause user control requests
[pandora-kernel.git] / mm / hugetlb.c
index dcc6780..a489d93 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bootmem.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
+#include <linux/mmdebug.h>
 #include <linux/rmap.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
@@ -1417,6 +1418,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
                 * and reducing the surplus.
                 */
                spin_unlock(&hugetlb_lock);
+
+               /* yield cpu to avoid soft lockup */
+               cond_resched();
+
                ret = alloc_fresh_huge_page(h, nodes_allowed);
                spin_lock(&hugetlb_lock);
                if (!ret)
@@ -1889,11 +1894,7 @@ module_exit(hugetlb_exit);
 
 static int __init hugetlb_init(void)
 {
-       /* Some platform decide whether they support huge pages at boot
-        * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
-        * there is no such support
-        */
-       if (HPAGE_SHIFT == 0)
+       if (!hugepages_supported())
                return 0;
 
        if (!size_to_hstate(default_hstate_size)) {
@@ -2010,6 +2011,9 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
        unsigned long tmp;
        int ret;
 
+       if (!hugepages_supported())
+               return -ENOTSUPP;
+
        tmp = h->max_huge_pages;
 
        if (write && h->order >= MAX_ORDER)
@@ -2075,6 +2079,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
        unsigned long tmp;
        int ret;
 
+       if (!hugepages_supported())
+               return -ENOTSUPP;
+
        tmp = h->nr_overcommit_huge_pages;
 
        if (write && h->order >= MAX_ORDER)
@@ -2100,6 +2107,8 @@ out:
 void hugetlb_report_meminfo(struct seq_file *m)
 {
        struct hstate *h = &default_hstate;
+       if (!hugepages_supported())
+               return;
        seq_printf(m,
                        "HugePages_Total:   %5lu\n"
                        "HugePages_Free:    %5lu\n"
@@ -2116,6 +2125,8 @@ void hugetlb_report_meminfo(struct seq_file *m)
 int hugetlb_report_node_meminfo(int nid, char *buf)
 {
        struct hstate *h = &default_hstate;
+       if (!hugepages_supported())
+               return 0;
        return sprintf(buf,
                "Node %d HugePages_Total: %5u\n"
                "Node %d HugePages_Free:  %5u\n"
@@ -2418,9 +2429,10 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                        continue;
 
                /*
-                * HWPoisoned hugepage is already unmapped and dropped reference
+                * Migrating hugepage or HWPoisoned hugepage is already
+                * unmapped and its refcount is dropped
                 */
-               if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+               if (unlikely(!pte_present(pte)))
                        continue;
 
                page = pte_page(pte);
@@ -2500,6 +2512,14 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                if (iter_vma == vma)
                        continue;
 
+               /*
+                * Shared VMAs have their own reserves and do not affect
+                * MAP_PRIVATE accounting but it is possible that a shared
+                * VMA is using the same page so check and skip such VMAs.
+                */
+               if (iter_vma->vm_flags & VM_MAYSHARE)
+                       continue;
+
                /*
                 * Unmap the page from other VMAs without their own reserves.
                 * They get marked to be SIGKILLed if they fault in these
@@ -2809,12 +2829,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
                               VM_FAULT_SET_HINDEX(h - hstates);
+       } else {
+               ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+               if (!ptep)
+                       return VM_FAULT_OOM;
        }
 
-       ptep = huge_pte_alloc(mm, address, huge_page_size(h));
-       if (!ptep)
-               return VM_FAULT_OOM;
-
        /*
         * Serialize hugepage allocation and instantiation, so that we don't
         * get spurious allocation failures if two CPUs race to instantiate
@@ -3078,6 +3098,14 @@ int hugetlb_reserve_pages(struct inode *inode,
        struct hstate *h = hstate_inode(inode);
        struct hugepage_subpool *spool = subpool_inode(inode);
 
+       /* This should never happen */
+       if (from > to) {
+#ifdef CONFIG_DEBUG_VM
+               WARN(1, "%s called with a negative range\n", __func__);
+#endif
+               return -EINVAL;
+       }
+
        /*
         * Only apply hugepage reservation if asked. At fault time, an
         * attempt will be made for VM_NORESERVE to allocate a page