X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmmap.c;h=94f4e3444ae55e2cad123c6c952bf0880e5e8d3d;hb=51017990da152ae5e9bd9edba20f709a7211623e;hp=eae90af60ea62e066defeab345e5079f57a80dc2;hpb=866d43c9ea88daa3751b58aba16a2a9b7f7aa067;p=pandora-kernel.git diff --git a/mm/mmap.c b/mm/mmap.c index eae90af60ea6..94f4e3444ae5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -111,7 +111,7 @@ struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; */ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { - unsigned long free, allowed; + long free, allowed; vm_acct_memory(pages); @@ -537,9 +537,12 @@ again: remove_next = 1 + (end > next->vm_end); * shrinking vma had, to cover any anon pages imported. */ if (exporter && exporter->anon_vma && !importer->anon_vma) { - if (anon_vma_clone(importer, exporter)) - return -ENOMEM; + int error; + importer->anon_vma = exporter->anon_vma; + error = anon_vma_clone(importer, exporter); + if (error) + return error; } } @@ -796,7 +799,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, end, prev->vm_pgoff, NULL); if (err) return NULL; - khugepaged_enter_vma_merge(prev); + khugepaged_enter_vma_merge(prev, vm_flags); return prev; } @@ -815,7 +818,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, next->vm_pgoff - pglen, NULL); if (err) return NULL; - khugepaged_enter_vma_merge(area); + khugepaged_enter_vma_merge(area, vm_flags); return area; } @@ -1368,7 +1371,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma; unsigned long start_addr; - if (len > TASK_SIZE) + if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) @@ -1377,7 +1380,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vma->vm_start)) return addr; } @@ -1442,9 +1445,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + unsigned long low_limit = max(PAGE_SIZE, mmap_min_addr); /* requested length too big for entire address space */ - if (len > TASK_SIZE) + if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) @@ -1454,7 +1458,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vma->vm_start)) return addr; } @@ -1469,14 +1473,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = mm->free_area_cache; /* make sure it can fit in the remaining address space */ - if (addr > len) { + if (addr >= low_limit + len) { vma = find_vma(mm, addr-len); if (!vma || addr <= vma->vm_start) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } - if (mm->mmap_base < len) + if (mm->mmap_base < low_limit + len) goto bottomup; addr = mm->mmap_base-len; @@ -1498,7 +1502,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* try just below the current vma->vm_start */ addr = vma->vm_start-len; - } while (len < vma->vm_start); + } while (vma->vm_start >= low_limit + len); bottomup: /* @@ -1573,7 +1577,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) if (mm) { /* Check the cache first. */ /* (Cache hit rate is typically around 35%.) */ - vma = mm->mmap_cache; + vma = ACCESS_ONCE(mm->mmap_cache); if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { struct rb_node * rb_node; @@ -1647,14 +1651,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start; + unsigned long new_start, actual_size; /* address space limit tests */ if (!may_expand_vm(mm, grow)) return -ENOMEM; /* Stack limit test */ - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + actual_size = size; + if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) + actual_size -= PAGE_SIZE; + if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ @@ -1740,7 +1747,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) } } vma_unlock_anon_vma(vma); - khugepaged_enter_vma_merge(vma); + khugepaged_enter_vma_merge(vma, vma->vm_flags); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ @@ -1791,7 +1798,7 @@ int expand_downwards(struct vm_area_struct *vma, } } vma_unlock_anon_vma(vma); - khugepaged_enter_vma_merge(vma); + khugepaged_enter_vma_merge(vma, vma->vm_flags); return error; }