[PATCH] x86: i8253/i8259A lock cleanup
[pandora-kernel.git] / mm / mmap.c
index 926d030..da3fa90 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -937,9 +937,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        /* mlock MCL_FUTURE? */
        if (vm_flags & VM_LOCKED) {
                unsigned long locked, lock_limit;
-               locked = mm->locked_vm << PAGE_SHIFT;
+               locked = len >> PAGE_SHIFT;
+               locked += mm->locked_vm;
                lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
-               locked += len;
+               lock_limit >>= PAGE_SHIFT;
                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
        }
@@ -1009,8 +1010,7 @@ munmap_back:
        }
 
        /* Check against address space limit. */
-       if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->signal->rlim[RLIMIT_AS].rlim_cur)
+       if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        if (accountable && (!(flags & MAP_NORESERVE) ||
@@ -1175,7 +1175,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                    (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
-       start_addr = addr = mm->free_area_cache;
+       if (len > mm->cached_hole_size) {
+               start_addr = addr = mm->free_area_cache;
+       } else {
+               start_addr = addr = TASK_UNMAPPED_BASE;
+               mm->cached_hole_size = 0;
+       }
 
 full_search:
        for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
@@ -1186,7 +1191,9 @@ full_search:
                         * some holes.
                         */
                        if (start_addr != TASK_UNMAPPED_BASE) {
-                               start_addr = addr = TASK_UNMAPPED_BASE;
+                               addr = TASK_UNMAPPED_BASE;
+                               start_addr = addr;
+                               mm->cached_hole_size = 0;
                                goto full_search;
                        }
                        return -ENOMEM;
@@ -1198,19 +1205,22 @@ full_search:
                        mm->free_area_cache = addr + len;
                        return addr;
                }
+               if (addr + mm->cached_hole_size < vma->vm_start)
+                       mm->cached_hole_size = vma->vm_start - addr;
                addr = vma->vm_end;
        }
 }
 #endif 
 
-void arch_unmap_area(struct vm_area_struct *area)
+void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
 {
        /*
         * Is this a new hole at the lowest possible address?
         */
-       if (area->vm_start >= TASK_UNMAPPED_BASE &&
-                       area->vm_start < area->vm_mm->free_area_cache)
-               area->vm_mm->free_area_cache = area->vm_start;
+       if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
+               mm->free_area_cache = addr;
+               mm->cached_hole_size = ~0UL;
+       }
 }
 
 /*
@@ -1240,17 +1250,26 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        return addr;
        }
 
+       /* check if free_area_cache is useful for us */
+       if (len <= mm->cached_hole_size) {
+               mm->cached_hole_size = 0;
+               mm->free_area_cache = mm->mmap_base;
+       }
+
        /* either no address requested or can't fit in requested address hole */
        addr = mm->free_area_cache;
 
        /* make sure it can fit in the remaining address space */
-       if (addr >= len) {
+       if (addr > len) {
                vma = find_vma(mm, addr-len);
                if (!vma || addr <= vma->vm_start)
                        /* remember the address as a hint for next time */
                        return (mm->free_area_cache = addr-len);
        }
 
+       if (mm->mmap_base < len)
+               goto bottomup;
+
        addr = mm->mmap_base-len;
 
        do {
@@ -1264,75 +1283,85 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        /* remember the address as a hint for next time */
                        return (mm->free_area_cache = addr);
 
+               /* remember the largest hole we saw so far */
+               if (addr + mm->cached_hole_size < vma->vm_start)
+                       mm->cached_hole_size = vma->vm_start - addr;
+
                /* try just below the current vma->vm_start */
                addr = vma->vm_start-len;
-       } while (len <= vma->vm_start);
+       } while (len < vma->vm_start);
 
+bottomup:
        /*
         * A failed mmap() very likely causes application failure,
         * so fall back to the bottom-up function here. This scenario
         * can happen with large stack limits and large mmap()
         * allocations.
         */
-       mm->free_area_cache = TASK_UNMAPPED_BASE;
+       mm->cached_hole_size = ~0UL;
+       mm->free_area_cache = TASK_UNMAPPED_BASE;
        addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
        /*
         * Restore the topdown base:
         */
        mm->free_area_cache = mm->mmap_base;
+       mm->cached_hole_size = ~0UL;
 
        return addr;
 }
 #endif
 
-void arch_unmap_area_topdown(struct vm_area_struct *area)
+void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
 {
        /*
         * Is this a new hole at the highest possible address?
         */
-       if (area->vm_end > area->vm_mm->free_area_cache)
-               area->vm_mm->free_area_cache = area->vm_end;
+       if (addr > mm->free_area_cache)
+               mm->free_area_cache = addr;
 
        /* dont allow allocations above current base */
-       if (area->vm_mm->free_area_cache > area->vm_mm->mmap_base)
-               area->vm_mm->free_area_cache = area->vm_mm->mmap_base;
+       if (mm->free_area_cache > mm->mmap_base)
+               mm->free_area_cache = mm->mmap_base;
 }
 
 unsigned long
 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags)
 {
-       if (flags & MAP_FIXED) {
-               unsigned long ret;
+       unsigned long ret;
 
-               if (addr > TASK_SIZE - len)
-                       return -ENOMEM;
-               if (addr & ~PAGE_MASK)
-                       return -EINVAL;
-               if (file && is_file_hugepages(file))  {
-                       /*
-                        * Check if the given range is hugepage aligned, and
-                        * can be made suitable for hugepages.
-                        */
-                       ret = prepare_hugepage_range(addr, len);
-               } else {
-                       /*
-                        * Ensure that a normal request is not falling in a
-                        * reserved hugepage range.  For some archs like IA-64,
-                        * there is a separate region for hugepages.
-                        */
-                       ret = is_hugepage_only_range(current->mm, addr, len);
-               }
-               if (ret)
-                       return -EINVAL;
-               return addr;
-       }
+       if (!(flags & MAP_FIXED)) {
+               unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
-       if (file && file->f_op && file->f_op->get_unmapped_area)
-               return file->f_op->get_unmapped_area(file, addr, len,
-                                               pgoff, flags);
+               get_area = current->mm->get_unmapped_area;
+               if (file && file->f_op && file->f_op->get_unmapped_area)
+                       get_area = file->f_op->get_unmapped_area;
+               addr = get_area(file, addr, len, pgoff, flags);
+               if (IS_ERR_VALUE(addr))
+                       return addr;
+       }
 
-       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+       if (addr > TASK_SIZE - len)
+               return -ENOMEM;
+       if (addr & ~PAGE_MASK)
+               return -EINVAL;
+       if (file && is_file_hugepages(file))  {
+               /*
+                * Check if the given range is hugepage aligned, and
+                * can be made suitable for hugepages.
+                */
+               ret = prepare_hugepage_range(addr, len);
+       } else {
+               /*
+                * Ensure that a normal request is not falling in a
+                * reserved hugepage range.  For some archs like IA-64,
+                * there is a separate region for hugepages.
+                */
+               ret = is_hugepage_only_range(current->mm, addr, len);
+       }
+       if (ret)
+               return -EINVAL;
+       return addr;
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1421,7 +1450,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
        struct rlimit *rlim = current->signal->rlim;
 
        /* address space limit tests */
-       if (mm->total_vm + grow > rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT)
+       if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
@@ -1592,7 +1621,6 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
        if (area->vm_flags & VM_LOCKED)
                area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
        vm_stat_unaccount(area);
-       area->vm_mm->unmap_area(area);
        remove_vm_struct(area);
 }
 
@@ -1602,14 +1630,13 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
  * Ok - we have the memory areas we should free on the 'free' list,
  * so release them, and do the vma updates.
  */
-static void unmap_vma_list(struct mm_struct *mm,
-       struct vm_area_struct *mpnt)
+static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
 {
        do {
-               struct vm_area_struct *next = mpnt->vm_next;
-               unmap_vma(mm, mpnt);
-               mpnt = next;
-       } while (mpnt != NULL);
+               struct vm_area_struct *next = vma->vm_next;
+               unmap_vma(mm, vma);
+               vma = next;
+       } while (vma);
        validate_mm(mm);
 }
 
@@ -1631,7 +1658,7 @@ static void unmap_region(struct mm_struct *mm,
        tlb = tlb_gather_mmu(mm, 0);
        unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(&tlb, vma, prev? prev->vm_end: 0,
+       free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
                                 next? next->vm_start: 0);
        tlb_finish_mmu(tlb, start, end);
        spin_unlock(&mm->page_table_lock);
@@ -1647,6 +1674,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct vm_area_struct **insertion_point;
        struct vm_area_struct *tail_vma = NULL;
+       unsigned long addr;
 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
        do {
@@ -1657,6 +1685,11 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
        tail_vma->vm_next = NULL;
+       if (mm->unmap_area == arch_unmap_area)
+               addr = prev ? prev->vm_end : mm->mmap_base;
+       else
+               addr = vma ?  vma->vm_start : mm->mmap_base;
+       mm->unmap_area(mm, addr);
        mm->mmap_cache = NULL;          /* Kill the cache. */
 }
 
@@ -1720,7 +1753,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 {
        unsigned long end;
-       struct vm_area_struct *mpnt, *prev, *last;
+       struct vm_area_struct *vma, *prev, *last;
 
        if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
                return -EINVAL;
@@ -1729,14 +1762,14 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
                return -EINVAL;
 
        /* Find the first overlapping VMA */
-       mpnt = find_vma_prev(mm, start, &prev);
-       if (!mpnt)
+       vma = find_vma_prev(mm, start, &prev);
+       if (!vma)
                return 0;
-       /* we have  start < mpnt->vm_end  */
+       /* we have  start < vma->vm_end  */
 
        /* if it doesn't overlap, we have nothing.. */
        end = start + len;
-       if (mpnt->vm_start >= end)
+       if (vma->vm_start >= end)
                return 0;
 
        /*
@@ -1746,11 +1779,11 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
         * unmapped vm_area_struct will remain in use: so lower split_vma
         * places tmp vma above, and higher split_vma places tmp vma below.
         */
-       if (start > mpnt->vm_start) {
-               int error = split_vma(mm, mpnt, start, 0);
+       if (start > vma->vm_start) {
+               int error = split_vma(mm, vma, start, 0);
                if (error)
                        return error;
-               prev = mpnt;
+               prev = vma;
        }
 
        /* Does it split the last one? */
@@ -1760,16 +1793,16 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
                if (error)
                        return error;
        }
-       mpnt = prev? prev->vm_next: mm->mmap;
+       vma = prev? prev->vm_next: mm->mmap;
 
        /*
         * Remove the vma's, and unmap the actual pages
         */
-       detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
-       unmap_region(mm, mpnt, prev, start, end);
+       detach_vmas_to_be_unmapped(mm, vma, prev, end);
+       unmap_region(mm, vma, prev, start, end);
 
        /* Fix up all other VM information */
-       unmap_vma_list(mm, mpnt);
+       unmap_vma_list(mm, vma);
 
        return 0;
 }
@@ -1824,9 +1857,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
         */
        if (mm->def_flags & VM_LOCKED) {
                unsigned long locked, lock_limit;
-               locked = mm->locked_vm << PAGE_SHIFT;
+               locked = len >> PAGE_SHIFT;
+               locked += mm->locked_vm;
                lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
-               locked += len;
+               lock_limit >>= PAGE_SHIFT;
                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
        }
@@ -1849,8 +1883,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        }
 
        /* Check against address space limits *after* clearing old maps... */
-       if ((mm->total_vm << PAGE_SHIFT) + len
-           > current->signal->rlim[RLIMIT_AS].rlim_cur)
+       if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        if (mm->map_count > sysctl_max_map_count)
@@ -1900,6 +1933,7 @@ void exit_mmap(struct mm_struct *mm)
        struct mmu_gather *tlb;
        struct vm_area_struct *vma = mm->mmap;
        unsigned long nr_accounted = 0;
+       unsigned long end;
 
        lru_add_drain();
 
@@ -1908,10 +1942,10 @@ void exit_mmap(struct mm_struct *mm)
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
-       mm->map_count -= unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
+       end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(&tlb, vma, 0, 0);
-       tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm));
+       free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
+       tlb_finish_mmu(tlb, 0, end);
 
        mm->mmap = mm->mmap_cache = NULL;
        mm->mm_rb = RB_ROOT;
@@ -1931,8 +1965,7 @@ void exit_mmap(struct mm_struct *mm)
                vma = next;
        }
 
-       BUG_ON(mm->map_count);  /* This is just debugging */
-       BUG_ON(mm->nr_ptes);    /* This is just debugging */
+       BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
 }
 
 /* Insert vm structure into process list sorted by address
@@ -2020,3 +2053,19 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        }
        return new_vma;
 }
+
+/*
+ * Return true if the calling process may expand its vm space by the passed
+ * number of pages
+ */
+int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+{
+       unsigned long cur = mm->total_vm;       /* pages */
+       unsigned long lim;
+
+       lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+
+       if (cur + npages > lim)
+               return 0;
+       return 1;
+}