Merge branch 'rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuil...
[pandora-kernel.git] / mm / mmap.c
index e38e910..331e51a 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -388,17 +388,23 @@ static inline void
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
+       struct vm_area_struct *next;
+
+       vma->vm_prev = prev;
        if (prev) {
-               vma->vm_next = prev->vm_next;
+               next = prev->vm_next;
                prev->vm_next = vma;
        } else {
                mm->mmap = vma;
                if (rb_parent)
-                       vma->vm_next = rb_entry(rb_parent,
+                       next = rb_entry(rb_parent,
                                        struct vm_area_struct, vm_rb);
                else
-                       vma->vm_next = NULL;
+                       next = NULL;
        }
+       vma->vm_next = next;
+       if (next)
+               next->vm_prev = vma;
 }
 
 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -452,12 +458,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
                spin_lock(&mapping->i_mmap_lock);
                vma->vm_truncate_count = mapping->truncate_count;
        }
-       anon_vma_lock(vma);
 
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        __vma_link_file(vma);
 
-       anon_vma_unlock(vma);
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
 
@@ -485,7 +489,11 @@ static inline void
 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev)
 {
-       prev->vm_next = vma->vm_next;
+       struct vm_area_struct *next = vma->vm_next;
+
+       prev->vm_next = next;
+       if (next)
+               next->vm_prev = prev;
        rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
@@ -506,6 +514,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
        struct vm_area_struct *importer = NULL;
        struct address_space *mapping = NULL;
        struct prio_tree_root *root = NULL;
+       struct anon_vma *anon_vma = NULL;
        struct file *file = vma->vm_file;
        long adjust_next = 0;
        int remove_next = 0;
@@ -578,6 +587,17 @@ again:                     remove_next = 1 + (end > next->vm_end);
                }
        }
 
+       /*
+        * When changing only vma->vm_end, we don't really need anon_vma
+        * lock. This is a fairly rare case by itself, but the anon_vma
+        * lock may be shared between many sibling processes.  Skipping
+        * the lock for brk adjustments makes a difference sometimes.
+        */
+       if (vma->anon_vma && (insert || importer || start != vma->vm_start)) {
+               anon_vma = vma->anon_vma;
+               anon_vma_lock(anon_vma);
+       }
+
        if (root) {
                flush_dcache_mmap_lock(mapping);
                vma_prio_tree_remove(vma, root);
@@ -617,6 +637,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
                __insert_vm_struct(mm, insert);
        }
 
+       if (anon_vma)
+               anon_vma_unlock(anon_vma);
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
 
@@ -1710,7 +1732,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
         */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
-       anon_vma_lock(vma);
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
@@ -1721,7 +1743,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        if (address < PAGE_ALIGN(address+4))
                address = PAGE_ALIGN(address+4);
        else {
-               anon_vma_unlock(vma);
+               vma_unlock_anon_vma(vma);
                return -ENOMEM;
        }
        error = 0;
@@ -1739,7 +1761,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                        perf_event_mmap(vma);
                }
        }
-       anon_vma_unlock(vma);
+       vma_unlock_anon_vma(vma);
        return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1764,7 +1786,7 @@ static int expand_downwards(struct vm_area_struct *vma,
        if (error)
                return error;
 
-       anon_vma_lock(vma);
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
@@ -1786,7 +1808,7 @@ static int expand_downwards(struct vm_area_struct *vma,
                        perf_event_mmap(vma);
                }
        }
-       anon_vma_unlock(vma);
+       vma_unlock_anon_vma(vma);
        return error;
 }
 
@@ -1903,6 +1925,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long addr;
 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+       vma->vm_prev = NULL;
        do {
                rb_erase(&vma->vm_rb, &mm->mm_rb);
                mm->map_count--;
@@ -1910,6 +1933,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
+       if (vma)
+               vma->vm_prev = prev;
        tail_vma->vm_next = NULL;
        if (mm->unmap_area == arch_unmap_area)
                addr = prev ? prev->vm_end : mm->mmap_base;
@@ -2470,23 +2495,23 @@ static DEFINE_MUTEX(mm_all_locks_mutex);
 
 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
 {
-       if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+       if (!test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
                /*
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
+               spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem);
                /*
                 * We can safely modify head.next after taking the
-                * anon_vma->lock. If some other vma in this mm shares
+                * anon_vma->root->lock. If some other vma in this mm shares
                 * the same anon_vma we won't take it again.
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us thanks to the
-                * anon_vma->lock.
+                * anon_vma->root->lock.
                 */
                if (__test_and_set_bit(0, (unsigned long *)
-                                      &anon_vma->head.next))
+                                      &anon_vma->root->head.next))
                        BUG();
        }
 }
@@ -2577,7 +2602,7 @@ out_unlock:
 
 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
 {
-       if (test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+       if (test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
                /*
                 * The LSB of head.next can't change to 0 from under
                 * us because we hold the mm_all_locks_mutex.
@@ -2588,12 +2613,12 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us until we release the
-                * anon_vma->lock.
+                * anon_vma->root->lock.
                 */
                if (!__test_and_clear_bit(0, (unsigned long *)
-                                         &anon_vma->head.next))
+                                         &anon_vma->root->head.next))
                        BUG();
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
        }
 }