mm: larger stack guard gap, between vmas
[pandora-kernel.git] / arch / sparc / mm / hugetlbpage.c
index f4e9764..e13e85d 100644 (file)
@@ -5,7 +5,6 @@
  */
 
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
@@ -34,7 +33,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct * vma;
        unsigned long task_size = TASK_SIZE;
-       unsigned long start_addr;
+       unsigned long start_addr, vm_start;
 
        if (test_thread_flag(TIF_32BIT))
                task_size = STACK_TOP32;
@@ -68,15 +67,17 @@ full_search:
                        }
                        return -ENOMEM;
                }
-               if (likely(!vma || addr + len <= vma->vm_start)) {
+               if (vma)
+                       vm_start = vm_start_gap(vma);
+               if (likely(!vma || addr + len <= vm_start)) {
                        /*
                         * Remember the place where we stopped the search:
                         */
                        mm->free_area_cache = addr + len;
                        return addr;
                }
-               if (addr + mm->cached_hole_size < vma->vm_start)
-                       mm->cached_hole_size = vma->vm_start - addr;
+               if (addr + mm->cached_hole_size < vm_start)
+                       mm->cached_hole_size = vm_start - addr;
 
                addr = ALIGN(vma->vm_end, HPAGE_SIZE);
        }
@@ -91,6 +92,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
+       unsigned long vm_start;
 
        /* This should only ever run for 32-bit processes.  */
        BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -107,7 +109,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        /* make sure it can fit in the remaining address space */
        if (likely(addr > len)) {
                vma = find_vma(mm, addr-len);
-               if (!vma || addr <= vma->vm_start) {
+               if (!vma || addr <= vm_start_gap(vma)) {
                        /* remember the address as a hint for next time */
                        return (mm->free_area_cache = addr-len);
                }
@@ -125,18 +127,20 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                 * return with success:
                 */
                vma = find_vma(mm, addr);
-               if (likely(!vma || addr+len <= vma->vm_start)) {
+               if (vma)
+                       vm_start = vm_start_gap(vma);
+               if (likely(!vma || addr + len <= vm_start)) {
                        /* remember the address as a hint for next time */
                        return (mm->free_area_cache = addr);
                }
 
                /* remember the largest hole we saw so far */
-               if (addr + mm->cached_hole_size < vma->vm_start)
-                       mm->cached_hole_size = vma->vm_start - addr;
+               if (addr + mm->cached_hole_size < vm_start)
+                       mm->cached_hole_size = vm_start - addr;
 
                /* try just below the current vma->vm_start */
-               addr = (vma->vm_start-len) & HPAGE_MASK;
-       } while (likely(len < vma->vm_start));
+               addr = (vm_start - len) & HPAGE_MASK;
+       } while (likely(len < vm_start));
 
 bottomup:
        /*
@@ -183,7 +187,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, HPAGE_SIZE);
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
        if (mm->get_unmapped_area == arch_get_unmapped_area)