X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmmap.c;h=37a72e4d437be73d4bdefc629d4ad87ae01df603;hb=2e8d6ed89dc84f4f7a82c57e10125f6fc3c98d9d;hp=48f6efd7745f41ca2f3455a6c152a767ca16e982;hpb=c529da02159cc5e82f7af69f8bde7d572cab6896;p=pandora-kernel.git diff --git a/mm/mmap.c b/mm/mmap.c index 48f6efd7745f..37a72e4d437b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -245,6 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) unsigned long rlim, retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; + struct vm_area_struct *next; unsigned long min_brk; down_write(&mm->mmap_sem); @@ -289,7 +290,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) } /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + next = find_vma(mm, oldbrk); + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ @@ -541,10 +543,8 @@ again: remove_next = 1 + (end > next->vm_end); importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); - if (error) { - importer->anon_vma = NULL; + if (error) return error; - } } } @@ -1370,8 +1370,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long start_addr; + struct vm_area_struct *vma, *prev; + unsigned long start_addr, vm_start, prev_end; if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; @@ -1381,9 +1381,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } if (len > mm->cached_hole_size) { @@ -1394,7 +1395,17 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, } full_search: - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + for (vma = find_vma_prev(mm, addr, &prev); ; prev = vma, + vma = vma->vm_next) { + if (prev) { + prev_end = vm_end_gap(prev); + if (addr < prev_end) { + addr = prev_end; + /* If vma already violates gap, forget it */ + if (vma && addr > vma->vm_start) + addr = vma->vm_start; + } + } /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) { /* @@ -1409,16 +1420,16 @@ full_search: } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + vm_start = vma ? vm_start_gap(vma) : TASK_SIZE; + if (addr + len <= vm_start) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - addr = vma->vm_end; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; } } #endif @@ -1444,9 +1455,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + unsigned long vm_start, prev_end; unsigned long low_limit = max(PAGE_SIZE, mmap_min_addr); /* requested length too big for entire address space */ @@ -1459,9 +1471,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -1476,8 +1489,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* make sure it can fit in the remaining address space */ if (addr >= low_limit + len) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) + vma = find_vma_prev(mm, addr-len, &prev); + if ((!vma || addr <= vm_start_gap(vma)) && + (!prev || addr-len >= vm_end_gap(prev))) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -1493,18 +1507,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * else if new region fits below vma->vm_start, * return with success: */ - vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) + vma = find_vma_prev(mm, addr, &prev); + vm_start = vma ? vm_start_gap(vma) : mm->mmap_base; + prev_end = prev ? vm_end_gap(prev) : low_limit; + + if (addr + len <= vm_start && addr >= prev_end) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + if (addr + mm->cached_hole_size < vm_start) + mm->cached_hole_size = vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - } while (vma->vm_start >= low_limit + len); + addr = vm_start - len; + } while (vm_start >= low_limit + len); bottomup: /* @@ -1609,39 +1626,19 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) EXPORT_SYMBOL(find_vma); -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ +/* + * Same as find_vma, but also return a pointer to the previous VMA in *pprev. + * Note: pprev is set to NULL when return value is NULL. + */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { - struct vm_area_struct *vma = NULL, *prev = NULL; - struct rb_node *rb_node; - if (!mm) - goto out; - - /* Guard against addr being lower than the first VMA */ - vma = mm->mmap; - - /* Go through the RB tree quickly. */ - rb_node = mm->mm_rb.rb_node; - - while (rb_node) { - struct vm_area_struct *vma_tmp; - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); - - if (addr < vma_tmp->vm_end) { - rb_node = rb_node->rb_left; - } else { - prev = vma_tmp; - if (!prev->vm_next || (addr < prev->vm_next->vm_end)) - break; - rb_node = rb_node->rb_right; - } - } + struct vm_area_struct *vma; -out: - *pprev = prev; - return prev ? prev->vm_next : vma; + vma = find_vma(mm, addr); + *pprev = vma ? vma->vm_prev : NULL; + return vma; } /* @@ -1649,21 +1646,19 @@ out: * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) +static int acct_stack_growth(struct vm_area_struct *vma, + unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start, actual_size; + unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, grow)) return -ENOMEM; /* Stack limit test */ - actual_size = size; - if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) - actual_size -= PAGE_SIZE; - if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ @@ -1705,32 +1700,43 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { - int error; + struct vm_area_struct *next; + unsigned long gap_addr; + int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ + /* Guard against exceeding limits of the address space. */ + address &= PAGE_MASK; + if (address >= TASK_SIZE) + return -ENOMEM; + address += PAGE_SIZE; + + /* Enforce stack_guard_gap */ + gap_addr = address + stack_guard_gap; + + /* Guard against overflow */ + if (gap_addr < address || gap_addr > TASK_SIZE) + gap_addr = TASK_SIZE; + + next = vma->vm_next; + if (next && next->vm_start < gap_addr) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; - vma_lock_anon_vma(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. - * Also guard against wrapping around to address 0. */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else { - vma_unlock_anon_vma(vma); - return -ENOMEM; - } - error = 0; + vma_lock_anon_vma(vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { @@ -1760,27 +1766,36 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) int expand_downwards(struct vm_area_struct *vma, unsigned long address) { + struct vm_area_struct *prev; + unsigned long gap_addr; int error; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ - if (unlikely(anon_vma_prepare(vma))) - return -ENOMEM; - address &= PAGE_MASK; error = security_file_mmap(NULL, 0, 0, 0, address, 1); if (error) return error; - vma_lock_anon_vma(vma); + /* Enforce stack_guard_gap */ + gap_addr = address - stack_guard_gap; + if (gap_addr > address) + return -ENOMEM; + prev = vma->vm_prev; + if (prev && prev->vm_end > gap_addr) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + + /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ + vma_lock_anon_vma(vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { @@ -1804,6 +1819,22 @@ int expand_downwards(struct vm_area_struct *vma, return error; } +/* enforced gap between the expanding stack and other mappings. */ +unsigned long stack_guard_gap = 256UL<