Merge git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial
[pandora-kernel.git] / mm / mmap.c
1 /*
2  * mm/mmap.c
3  *
4  * Written by obz.
5  *
6  * Address space accounting code        <alan@redhat.com>
7  */
8
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11 #include <linux/shm.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swap.h>
15 #include <linux/syscalls.h>
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/personality.h>
21 #include <linux/security.h>
22 #include <linux/hugetlb.h>
23 #include <linux/profile.h>
24 #include <linux/module.h>
25 #include <linux/mount.h>
26 #include <linux/mempolicy.h>
27 #include <linux/rmap.h>
28
29 #include <asm/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlb.h>
32
33 static void unmap_region(struct mm_struct *mm,
34                 struct vm_area_struct *vma, struct vm_area_struct *prev,
35                 unsigned long start, unsigned long end);
36
37 /*
38  * WARNING: the debugging will use recursive algorithms so never enable this
39  * unless you know what you are doing.
40  */
41 #undef DEBUG_MM_RB
42
43 /* description of effects of mapping type and prot in current implementation.
44  * this is due to the limited x86 page protection hardware.  The expected
45  * behavior is in parens:
46  *
47  * map_type     prot
48  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
49  * MAP_SHARED   r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
50  *              w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
51  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
52  *              
53  * MAP_PRIVATE  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
54  *              w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
55  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
56  *
57  */
58 pgprot_t protection_map[16] = {
59         __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
60         __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61 };
62
63 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
64 int sysctl_overcommit_ratio = 50;       /* default is 50% */
65 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
66 atomic_t vm_committed_space = ATOMIC_INIT(0);
67
68 /*
69  * Check that a process has enough memory to allocate a new virtual
70  * mapping. 0 means there is enough memory for the allocation to
71  * succeed and -ENOMEM implies there is not.
72  *
73  * We currently support three overcommit policies, which are set via the
74  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
75  *
76  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
77  * Additional code 2002 Jul 20 by Robert Love.
78  *
79  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
80  *
81  * Note this is a helper function intended to be used by LSMs which
82  * wish to use this logic.
83  */
84 int __vm_enough_memory(long pages, int cap_sys_admin)
85 {
86         unsigned long free, allowed;
87
88         vm_acct_memory(pages);
89
90         /*
91          * Sometimes we want to use more memory than we have
92          */
93         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
94                 return 0;
95
96         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
97                 unsigned long n;
98
99                 free = global_page_state(NR_FILE_PAGES);
100                 free += nr_swap_pages;
101
102                 /*
103                  * Any slabs which are created with the
104                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
105                  * which are reclaimable, under pressure.  The dentry
106                  * cache and most inode caches should fall into this
107                  */
108                 free += atomic_read(&slab_reclaim_pages);
109
110                 /*
111                  * Leave the last 3% for root
112                  */
113                 if (!cap_sys_admin)
114                         free -= free / 32;
115
116                 if (free > pages)
117                         return 0;
118
119                 /*
120                  * nr_free_pages() is very expensive on large systems,
121                  * only call if we're about to fail.
122                  */
123                 n = nr_free_pages();
124
125                 /*
126                  * Leave reserved pages. The pages are not for anonymous pages.
127                  */
128                 if (n <= totalreserve_pages)
129                         goto error;
130                 else
131                         n -= totalreserve_pages;
132
133                 /*
134                  * Leave the last 3% for root
135                  */
136                 if (!cap_sys_admin)
137                         n -= n / 32;
138                 free += n;
139
140                 if (free > pages)
141                         return 0;
142
143                 goto error;
144         }
145
146         allowed = (totalram_pages - hugetlb_total_pages())
147                 * sysctl_overcommit_ratio / 100;
148         /*
149          * Leave the last 3% for root
150          */
151         if (!cap_sys_admin)
152                 allowed -= allowed / 32;
153         allowed += total_swap_pages;
154
155         /* Don't let a single process grow too big:
156            leave 3% of the size of this process for other processes */
157         allowed -= current->mm->total_vm / 32;
158
159         /*
160          * cast `allowed' as a signed long because vm_committed_space
161          * sometimes has a negative value
162          */
163         if (atomic_read(&vm_committed_space) < (long)allowed)
164                 return 0;
165 error:
166         vm_unacct_memory(pages);
167
168         return -ENOMEM;
169 }
170
171 EXPORT_SYMBOL(__vm_enough_memory);
172
173 /*
174  * Requires inode->i_mapping->i_mmap_lock
175  */
176 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
177                 struct file *file, struct address_space *mapping)
178 {
179         if (vma->vm_flags & VM_DENYWRITE)
180                 atomic_inc(&file->f_dentry->d_inode->i_writecount);
181         if (vma->vm_flags & VM_SHARED)
182                 mapping->i_mmap_writable--;
183
184         flush_dcache_mmap_lock(mapping);
185         if (unlikely(vma->vm_flags & VM_NONLINEAR))
186                 list_del_init(&vma->shared.vm_set.list);
187         else
188                 vma_prio_tree_remove(vma, &mapping->i_mmap);
189         flush_dcache_mmap_unlock(mapping);
190 }
191
192 /*
193  * Unlink a file-based vm structure from its prio_tree, to hide
194  * vma from rmap and vmtruncate before freeing its page tables.
195  */
196 void unlink_file_vma(struct vm_area_struct *vma)
197 {
198         struct file *file = vma->vm_file;
199
200         if (file) {
201                 struct address_space *mapping = file->f_mapping;
202                 spin_lock(&mapping->i_mmap_lock);
203                 __remove_shared_vm_struct(vma, file, mapping);
204                 spin_unlock(&mapping->i_mmap_lock);
205         }
206 }
207
208 /*
209  * Close a vm structure and free it, returning the next.
210  */
211 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
212 {
213         struct vm_area_struct *next = vma->vm_next;
214
215         might_sleep();
216         if (vma->vm_ops && vma->vm_ops->close)
217                 vma->vm_ops->close(vma);
218         if (vma->vm_file)
219                 fput(vma->vm_file);
220         mpol_free(vma_policy(vma));
221         kmem_cache_free(vm_area_cachep, vma);
222         return next;
223 }
224
225 asmlinkage unsigned long sys_brk(unsigned long brk)
226 {
227         unsigned long rlim, retval;
228         unsigned long newbrk, oldbrk;
229         struct mm_struct *mm = current->mm;
230
231         down_write(&mm->mmap_sem);
232
233         if (brk < mm->end_code)
234                 goto out;
235
236         /*
237          * Check against rlimit here. If this check is done later after the test
238          * of oldbrk with newbrk then it can escape the test and let the data
239          * segment grow beyond its set limit the in case where the limit is
240          * not page aligned -Ram Gupta
241          */
242         rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
243         if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
244                 goto out;
245
246         newbrk = PAGE_ALIGN(brk);
247         oldbrk = PAGE_ALIGN(mm->brk);
248         if (oldbrk == newbrk)
249                 goto set_brk;
250
251         /* Always allow shrinking brk. */
252         if (brk <= mm->brk) {
253                 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
254                         goto set_brk;
255                 goto out;
256         }
257
258         /* Check against existing mmap mappings. */
259         if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
260                 goto out;
261
262         /* Ok, looks good - let it rip. */
263         if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
264                 goto out;
265 set_brk:
266         mm->brk = brk;
267 out:
268         retval = mm->brk;
269         up_write(&mm->mmap_sem);
270         return retval;
271 }
272
273 #ifdef DEBUG_MM_RB
274 static int browse_rb(struct rb_root *root)
275 {
276         int i = 0, j;
277         struct rb_node *nd, *pn = NULL;
278         unsigned long prev = 0, pend = 0;
279
280         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
281                 struct vm_area_struct *vma;
282                 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
283                 if (vma->vm_start < prev)
284                         printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
285                 if (vma->vm_start < pend)
286                         printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
287                 if (vma->vm_start > vma->vm_end)
288                         printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
289                 i++;
290                 pn = nd;
291         }
292         j = 0;
293         for (nd = pn; nd; nd = rb_prev(nd)) {
294                 j++;
295         }
296         if (i != j)
297                 printk("backwards %d, forwards %d\n", j, i), i = 0;
298         return i;
299 }
300
301 void validate_mm(struct mm_struct *mm)
302 {
303         int bug = 0;
304         int i = 0;
305         struct vm_area_struct *tmp = mm->mmap;
306         while (tmp) {
307                 tmp = tmp->vm_next;
308                 i++;
309         }
310         if (i != mm->map_count)
311                 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
312         i = browse_rb(&mm->mm_rb);
313         if (i != mm->map_count)
314                 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
315         BUG_ON(bug);
316 }
317 #else
318 #define validate_mm(mm) do { } while (0)
319 #endif
320
321 static struct vm_area_struct *
322 find_vma_prepare(struct mm_struct *mm, unsigned long addr,
323                 struct vm_area_struct **pprev, struct rb_node ***rb_link,
324                 struct rb_node ** rb_parent)
325 {
326         struct vm_area_struct * vma;
327         struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
328
329         __rb_link = &mm->mm_rb.rb_node;
330         rb_prev = __rb_parent = NULL;
331         vma = NULL;
332
333         while (*__rb_link) {
334                 struct vm_area_struct *vma_tmp;
335
336                 __rb_parent = *__rb_link;
337                 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
338
339                 if (vma_tmp->vm_end > addr) {
340                         vma = vma_tmp;
341                         if (vma_tmp->vm_start <= addr)
342                                 return vma;
343                         __rb_link = &__rb_parent->rb_left;
344                 } else {
345                         rb_prev = __rb_parent;
346                         __rb_link = &__rb_parent->rb_right;
347                 }
348         }
349
350         *pprev = NULL;
351         if (rb_prev)
352                 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
353         *rb_link = __rb_link;
354         *rb_parent = __rb_parent;
355         return vma;
356 }
357
358 static inline void
359 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
360                 struct vm_area_struct *prev, struct rb_node *rb_parent)
361 {
362         if (prev) {
363                 vma->vm_next = prev->vm_next;
364                 prev->vm_next = vma;
365         } else {
366                 mm->mmap = vma;
367                 if (rb_parent)
368                         vma->vm_next = rb_entry(rb_parent,
369                                         struct vm_area_struct, vm_rb);
370                 else
371                         vma->vm_next = NULL;
372         }
373 }
374
375 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
376                 struct rb_node **rb_link, struct rb_node *rb_parent)
377 {
378         rb_link_node(&vma->vm_rb, rb_parent, rb_link);
379         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
380 }
381
382 static inline void __vma_link_file(struct vm_area_struct *vma)
383 {
384         struct file * file;
385
386         file = vma->vm_file;
387         if (file) {
388                 struct address_space *mapping = file->f_mapping;
389
390                 if (vma->vm_flags & VM_DENYWRITE)
391                         atomic_dec(&file->f_dentry->d_inode->i_writecount);
392                 if (vma->vm_flags & VM_SHARED)
393                         mapping->i_mmap_writable++;
394
395                 flush_dcache_mmap_lock(mapping);
396                 if (unlikely(vma->vm_flags & VM_NONLINEAR))
397                         vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
398                 else
399                         vma_prio_tree_insert(vma, &mapping->i_mmap);
400                 flush_dcache_mmap_unlock(mapping);
401         }
402 }
403
404 static void
405 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
406         struct vm_area_struct *prev, struct rb_node **rb_link,
407         struct rb_node *rb_parent)
408 {
409         __vma_link_list(mm, vma, prev, rb_parent);
410         __vma_link_rb(mm, vma, rb_link, rb_parent);
411         __anon_vma_link(vma);
412 }
413
414 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
415                         struct vm_area_struct *prev, struct rb_node **rb_link,
416                         struct rb_node *rb_parent)
417 {
418         struct address_space *mapping = NULL;
419
420         if (vma->vm_file)
421                 mapping = vma->vm_file->f_mapping;
422
423         if (mapping) {
424                 spin_lock(&mapping->i_mmap_lock);
425                 vma->vm_truncate_count = mapping->truncate_count;
426         }
427         anon_vma_lock(vma);
428
429         __vma_link(mm, vma, prev, rb_link, rb_parent);
430         __vma_link_file(vma);
431
432         anon_vma_unlock(vma);
433         if (mapping)
434                 spin_unlock(&mapping->i_mmap_lock);
435
436         mm->map_count++;
437         validate_mm(mm);
438 }
439
440 /*
441  * Helper for vma_adjust in the split_vma insert case:
442  * insert vm structure into list and rbtree and anon_vma,
443  * but it has already been inserted into prio_tree earlier.
444  */
445 static void
446 __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
447 {
448         struct vm_area_struct * __vma, * prev;
449         struct rb_node ** rb_link, * rb_parent;
450
451         __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
452         BUG_ON(__vma && __vma->vm_start < vma->vm_end);
453         __vma_link(mm, vma, prev, rb_link, rb_parent);
454         mm->map_count++;
455 }
456
457 static inline void
458 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
459                 struct vm_area_struct *prev)
460 {
461         prev->vm_next = vma->vm_next;
462         rb_erase(&vma->vm_rb, &mm->mm_rb);
463         if (mm->mmap_cache == vma)
464                 mm->mmap_cache = prev;
465 }
466
467 /*
468  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
469  * is already present in an i_mmap tree without adjusting the tree.
470  * The following helper function should be used when such adjustments
471  * are necessary.  The "insert" vma (if any) is to be inserted
472  * before we drop the necessary locks.
473  */
474 void vma_adjust(struct vm_area_struct *vma, unsigned long start,
475         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
476 {
477         struct mm_struct *mm = vma->vm_mm;
478         struct vm_area_struct *next = vma->vm_next;
479         struct vm_area_struct *importer = NULL;
480         struct address_space *mapping = NULL;
481         struct prio_tree_root *root = NULL;
482         struct file *file = vma->vm_file;
483         struct anon_vma *anon_vma = NULL;
484         long adjust_next = 0;
485         int remove_next = 0;
486
487         if (next && !insert) {
488                 if (end >= next->vm_end) {
489                         /*
490                          * vma expands, overlapping all the next, and
491                          * perhaps the one after too (mprotect case 6).
492                          */
493 again:                  remove_next = 1 + (end > next->vm_end);
494                         end = next->vm_end;
495                         anon_vma = next->anon_vma;
496                         importer = vma;
497                 } else if (end > next->vm_start) {
498                         /*
499                          * vma expands, overlapping part of the next:
500                          * mprotect case 5 shifting the boundary up.
501                          */
502                         adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
503                         anon_vma = next->anon_vma;
504                         importer = vma;
505                 } else if (end < vma->vm_end) {
506                         /*
507                          * vma shrinks, and !insert tells it's not
508                          * split_vma inserting another: so it must be
509                          * mprotect case 4 shifting the boundary down.
510                          */
511                         adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
512                         anon_vma = next->anon_vma;
513                         importer = next;
514                 }
515         }
516
517         if (file) {
518                 mapping = file->f_mapping;
519                 if (!(vma->vm_flags & VM_NONLINEAR))
520                         root = &mapping->i_mmap;
521                 spin_lock(&mapping->i_mmap_lock);
522                 if (importer &&
523                     vma->vm_truncate_count != next->vm_truncate_count) {
524                         /*
525                          * unmap_mapping_range might be in progress:
526                          * ensure that the expanding vma is rescanned.
527                          */
528                         importer->vm_truncate_count = 0;
529                 }
530                 if (insert) {
531                         insert->vm_truncate_count = vma->vm_truncate_count;
532                         /*
533                          * Put into prio_tree now, so instantiated pages
534                          * are visible to arm/parisc __flush_dcache_page
535                          * throughout; but we cannot insert into address
536                          * space until vma start or end is updated.
537                          */
538                         __vma_link_file(insert);
539                 }
540         }
541
542         /*
543          * When changing only vma->vm_end, we don't really need
544          * anon_vma lock: but is that case worth optimizing out?
545          */
546         if (vma->anon_vma)
547                 anon_vma = vma->anon_vma;
548         if (anon_vma) {
549                 spin_lock(&anon_vma->lock);
550                 /*
551                  * Easily overlooked: when mprotect shifts the boundary,
552                  * make sure the expanding vma has anon_vma set if the
553                  * shrinking vma had, to cover any anon pages imported.
554                  */
555                 if (importer && !importer->anon_vma) {
556                         importer->anon_vma = anon_vma;
557                         __anon_vma_link(importer);
558                 }
559         }
560
561         if (root) {
562                 flush_dcache_mmap_lock(mapping);
563                 vma_prio_tree_remove(vma, root);
564                 if (adjust_next)
565                         vma_prio_tree_remove(next, root);
566         }
567
568         vma->vm_start = start;
569         vma->vm_end = end;
570         vma->vm_pgoff = pgoff;
571         if (adjust_next) {
572                 next->vm_start += adjust_next << PAGE_SHIFT;
573                 next->vm_pgoff += adjust_next;
574         }
575
576         if (root) {
577                 if (adjust_next)
578                         vma_prio_tree_insert(next, root);
579                 vma_prio_tree_insert(vma, root);
580                 flush_dcache_mmap_unlock(mapping);
581         }
582
583         if (remove_next) {
584                 /*
585                  * vma_merge has merged next into vma, and needs
586                  * us to remove next before dropping the locks.
587                  */
588                 __vma_unlink(mm, next, vma);
589                 if (file)
590                         __remove_shared_vm_struct(next, file, mapping);
591                 if (next->anon_vma)
592                         __anon_vma_merge(vma, next);
593         } else if (insert) {
594                 /*
595                  * split_vma has split insert from vma, and needs
596                  * us to insert it before dropping the locks
597                  * (it may either follow vma or precede it).
598                  */
599                 __insert_vm_struct(mm, insert);
600         }
601
602         if (anon_vma)
603                 spin_unlock(&anon_vma->lock);
604         if (mapping)
605                 spin_unlock(&mapping->i_mmap_lock);
606
607         if (remove_next) {
608                 if (file)
609                         fput(file);
610                 mm->map_count--;
611                 mpol_free(vma_policy(next));
612                 kmem_cache_free(vm_area_cachep, next);
613                 /*
614                  * In mprotect's case 6 (see comments on vma_merge),
615                  * we must remove another next too. It would clutter
616                  * up the code too much to do both in one go.
617                  */
618                 if (remove_next == 2) {
619                         next = vma->vm_next;
620                         goto again;
621                 }
622         }
623
624         validate_mm(mm);
625 }
626
627 /*
628  * If the vma has a ->close operation then the driver probably needs to release
629  * per-vma resources, so we don't attempt to merge those.
630  */
631 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
632
633 static inline int is_mergeable_vma(struct vm_area_struct *vma,
634                         struct file *file, unsigned long vm_flags)
635 {
636         if (vma->vm_flags != vm_flags)
637                 return 0;
638         if (vma->vm_file != file)
639                 return 0;
640         if (vma->vm_ops && vma->vm_ops->close)
641                 return 0;
642         return 1;
643 }
644
645 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
646                                         struct anon_vma *anon_vma2)
647 {
648         return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
649 }
650
651 /*
652  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
653  * in front of (at a lower virtual address and file offset than) the vma.
654  *
655  * We cannot merge two vmas if they have differently assigned (non-NULL)
656  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
657  *
658  * We don't check here for the merged mmap wrapping around the end of pagecache
659  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
660  * wrap, nor mmaps which cover the final page at index -1UL.
661  */
662 static int
663 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
664         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
665 {
666         if (is_mergeable_vma(vma, file, vm_flags) &&
667             is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
668                 if (vma->vm_pgoff == vm_pgoff)
669                         return 1;
670         }
671         return 0;
672 }
673
674 /*
675  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
676  * beyond (at a higher virtual address and file offset than) the vma.
677  *
678  * We cannot merge two vmas if they have differently assigned (non-NULL)
679  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
680  */
681 static int
682 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
683         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
684 {
685         if (is_mergeable_vma(vma, file, vm_flags) &&
686             is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
687                 pgoff_t vm_pglen;
688                 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
689                 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
690                         return 1;
691         }
692         return 0;
693 }
694
695 /*
696  * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
697  * whether that can be merged with its predecessor or its successor.
698  * Or both (it neatly fills a hole).
699  *
700  * In most cases - when called for mmap, brk or mremap - [addr,end) is
701  * certain not to be mapped by the time vma_merge is called; but when
702  * called for mprotect, it is certain to be already mapped (either at
703  * an offset within prev, or at the start of next), and the flags of
704  * this area are about to be changed to vm_flags - and the no-change
705  * case has already been eliminated.
706  *
707  * The following mprotect cases have to be considered, where AAAA is
708  * the area passed down from mprotect_fixup, never extending beyond one
709  * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
710  *
711  *     AAAA             AAAA                AAAA          AAAA
712  *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
713  *    cannot merge    might become    might become    might become
714  *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
715  *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
716  *    mremap move:                                    PPPPNNNNNNNN 8
717  *        AAAA
718  *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
719  *    might become    case 1 below    case 2 below    case 3 below
720  *
721  * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
722  * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
723  */
724 struct vm_area_struct *vma_merge(struct mm_struct *mm,
725                         struct vm_area_struct *prev, unsigned long addr,
726                         unsigned long end, unsigned long vm_flags,
727                         struct anon_vma *anon_vma, struct file *file,
728                         pgoff_t pgoff, struct mempolicy *policy)
729 {
730         pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
731         struct vm_area_struct *area, *next;
732
733         /*
734          * We later require that vma->vm_flags == vm_flags,
735          * so this tests vma->vm_flags & VM_SPECIAL, too.
736          */
737         if (vm_flags & VM_SPECIAL)
738                 return NULL;
739
740         if (prev)
741                 next = prev->vm_next;
742         else
743                 next = mm->mmap;
744         area = next;
745         if (next && next->vm_end == end)                /* cases 6, 7, 8 */
746                 next = next->vm_next;
747
748         /*
749          * Can it merge with the predecessor?
750          */
751         if (prev && prev->vm_end == addr &&
752                         mpol_equal(vma_policy(prev), policy) &&
753                         can_vma_merge_after(prev, vm_flags,
754                                                 anon_vma, file, pgoff)) {
755                 /*
756                  * OK, it can.  Can we now merge in the successor as well?
757                  */
758                 if (next && end == next->vm_start &&
759                                 mpol_equal(policy, vma_policy(next)) &&
760                                 can_vma_merge_before(next, vm_flags,
761                                         anon_vma, file, pgoff+pglen) &&
762                                 is_mergeable_anon_vma(prev->anon_vma,
763                                                       next->anon_vma)) {
764                                                         /* cases 1, 6 */
765                         vma_adjust(prev, prev->vm_start,
766                                 next->vm_end, prev->vm_pgoff, NULL);
767                 } else                                  /* cases 2, 5, 7 */
768                         vma_adjust(prev, prev->vm_start,
769                                 end, prev->vm_pgoff, NULL);
770                 return prev;
771         }
772
773         /*
774          * Can this new request be merged in front of next?
775          */
776         if (next && end == next->vm_start &&
777                         mpol_equal(policy, vma_policy(next)) &&
778                         can_vma_merge_before(next, vm_flags,
779                                         anon_vma, file, pgoff+pglen)) {
780                 if (prev && addr < prev->vm_end)        /* case 4 */
781                         vma_adjust(prev, prev->vm_start,
782                                 addr, prev->vm_pgoff, NULL);
783                 else                                    /* cases 3, 8 */
784                         vma_adjust(area, addr, next->vm_end,
785                                 next->vm_pgoff - pglen, NULL);
786                 return area;
787         }
788
789         return NULL;
790 }
791
792 /*
793  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
794  * neighbouring vmas for a suitable anon_vma, before it goes off
795  * to allocate a new anon_vma.  It checks because a repetitive
796  * sequence of mprotects and faults may otherwise lead to distinct
797  * anon_vmas being allocated, preventing vma merge in subsequent
798  * mprotect.
799  */
800 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
801 {
802         struct vm_area_struct *near;
803         unsigned long vm_flags;
804
805         near = vma->vm_next;
806         if (!near)
807                 goto try_prev;
808
809         /*
810          * Since only mprotect tries to remerge vmas, match flags
811          * which might be mprotected into each other later on.
812          * Neither mlock nor madvise tries to remerge at present,
813          * so leave their flags as obstructing a merge.
814          */
815         vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
816         vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
817
818         if (near->anon_vma && vma->vm_end == near->vm_start &&
819                         mpol_equal(vma_policy(vma), vma_policy(near)) &&
820                         can_vma_merge_before(near, vm_flags,
821                                 NULL, vma->vm_file, vma->vm_pgoff +
822                                 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
823                 return near->anon_vma;
824 try_prev:
825         /*
826          * It is potentially slow to have to call find_vma_prev here.
827          * But it's only on the first write fault on the vma, not
828          * every time, and we could devise a way to avoid it later
829          * (e.g. stash info in next's anon_vma_node when assigning
830          * an anon_vma, or when trying vma_merge).  Another time.
831          */
832         BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
833         if (!near)
834                 goto none;
835
836         vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
837         vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
838
839         if (near->anon_vma && near->vm_end == vma->vm_start &&
840                         mpol_equal(vma_policy(near), vma_policy(vma)) &&
841                         can_vma_merge_after(near, vm_flags,
842                                 NULL, vma->vm_file, vma->vm_pgoff))
843                 return near->anon_vma;
844 none:
845         /*
846          * There's no absolute need to look only at touching neighbours:
847          * we could search further afield for "compatible" anon_vmas.
848          * But it would probably just be a waste of time searching,
849          * or lead to too many vmas hanging off the same anon_vma.
850          * We're trying to allow mprotect remerging later on,
851          * not trying to minimize memory used for anon_vmas.
852          */
853         return NULL;
854 }
855
856 #ifdef CONFIG_PROC_FS
857 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
858                                                 struct file *file, long pages)
859 {
860         const unsigned long stack_flags
861                 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
862
863         if (file) {
864                 mm->shared_vm += pages;
865                 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
866                         mm->exec_vm += pages;
867         } else if (flags & stack_flags)
868                 mm->stack_vm += pages;
869         if (flags & (VM_RESERVED|VM_IO))
870                 mm->reserved_vm += pages;
871 }
872 #endif /* CONFIG_PROC_FS */
873
874 /*
875  * The caller must hold down_write(current->mm->mmap_sem).
876  */
877
878 unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
879                         unsigned long len, unsigned long prot,
880                         unsigned long flags, unsigned long pgoff)
881 {
882         struct mm_struct * mm = current->mm;
883         struct vm_area_struct * vma, * prev;
884         struct inode *inode;
885         unsigned int vm_flags;
886         int correct_wcount = 0;
887         int error;
888         struct rb_node ** rb_link, * rb_parent;
889         int accountable = 1;
890         unsigned long charged = 0, reqprot = prot;
891
892         if (file) {
893                 if (is_file_hugepages(file))
894                         accountable = 0;
895
896                 if (!file->f_op || !file->f_op->mmap)
897                         return -ENODEV;
898
899                 if ((prot & PROT_EXEC) &&
900                     (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
901                         return -EPERM;
902         }
903         /*
904          * Does the application expect PROT_READ to imply PROT_EXEC?
905          *
906          * (the exception is when the underlying filesystem is noexec
907          *  mounted, in which case we dont add PROT_EXEC.)
908          */
909         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
910                 if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)))
911                         prot |= PROT_EXEC;
912
913         if (!len)
914                 return -EINVAL;
915
916         /* Careful about overflows.. */
917         len = PAGE_ALIGN(len);
918         if (!len || len > TASK_SIZE)
919                 return -ENOMEM;
920
921         /* offset overflow? */
922         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
923                return -EOVERFLOW;
924
925         /* Too many mappings? */
926         if (mm->map_count > sysctl_max_map_count)
927                 return -ENOMEM;
928
929         /* Obtain the address to map to. we verify (or select) it and ensure
930          * that it represents a valid section of the address space.
931          */
932         addr = get_unmapped_area(file, addr, len, pgoff, flags);
933         if (addr & ~PAGE_MASK)
934                 return addr;
935
936         /* Do simple checking here so the lower-level routines won't have
937          * to. we assume access permissions have been handled by the open
938          * of the memory object, so we don't do any here.
939          */
940         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
941                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
942
943         if (flags & MAP_LOCKED) {
944                 if (!can_do_mlock())
945                         return -EPERM;
946                 vm_flags |= VM_LOCKED;
947         }
948         /* mlock MCL_FUTURE? */
949         if (vm_flags & VM_LOCKED) {
950                 unsigned long locked, lock_limit;
951                 locked = len >> PAGE_SHIFT;
952                 locked += mm->locked_vm;
953                 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
954                 lock_limit >>= PAGE_SHIFT;
955                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
956                         return -EAGAIN;
957         }
958
959         inode = file ? file->f_dentry->d_inode : NULL;
960
961         if (file) {
962                 switch (flags & MAP_TYPE) {
963                 case MAP_SHARED:
964                         if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
965                                 return -EACCES;
966
967                         /*
968                          * Make sure we don't allow writing to an append-only
969                          * file..
970                          */
971                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
972                                 return -EACCES;
973
974                         /*
975                          * Make sure there are no mandatory locks on the file.
976                          */
977                         if (locks_verify_locked(inode))
978                                 return -EAGAIN;
979
980                         vm_flags |= VM_SHARED | VM_MAYSHARE;
981                         if (!(file->f_mode & FMODE_WRITE))
982                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
983
984                         /* fall through */
985                 case MAP_PRIVATE:
986                         if (!(file->f_mode & FMODE_READ))
987                                 return -EACCES;
988                         break;
989
990                 default:
991                         return -EINVAL;
992                 }
993         } else {
994                 switch (flags & MAP_TYPE) {
995                 case MAP_SHARED:
996                         vm_flags |= VM_SHARED | VM_MAYSHARE;
997                         break;
998                 case MAP_PRIVATE:
999                         /*
1000                          * Set pgoff according to addr for anon_vma.
1001                          */
1002                         pgoff = addr >> PAGE_SHIFT;
1003                         break;
1004                 default:
1005                         return -EINVAL;
1006                 }
1007         }
1008
1009         error = security_file_mmap(file, reqprot, prot, flags);
1010         if (error)
1011                 return error;
1012                 
1013         /* Clear old maps */
1014         error = -ENOMEM;
1015 munmap_back:
1016         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1017         if (vma && vma->vm_start < addr + len) {
1018                 if (do_munmap(mm, addr, len))
1019                         return -ENOMEM;
1020                 goto munmap_back;
1021         }
1022
1023         /* Check against address space limit. */
1024         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1025                 return -ENOMEM;
1026
1027         if (accountable && (!(flags & MAP_NORESERVE) ||
1028                             sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
1029                 if (vm_flags & VM_SHARED) {
1030                         /* Check memory availability in shmem_file_setup? */
1031                         vm_flags |= VM_ACCOUNT;
1032                 } else if (vm_flags & VM_WRITE) {
1033                         /*
1034                          * Private writable mapping: check memory availability
1035                          */
1036                         charged = len >> PAGE_SHIFT;
1037                         if (security_vm_enough_memory(charged))
1038                                 return -ENOMEM;
1039                         vm_flags |= VM_ACCOUNT;
1040                 }
1041         }
1042
1043         /*
1044          * Can we just expand an old private anonymous mapping?
1045          * The VM_SHARED test is necessary because shmem_zero_setup
1046          * will create the file object for a shared anonymous map below.
1047          */
1048         if (!file && !(vm_flags & VM_SHARED) &&
1049             vma_merge(mm, prev, addr, addr + len, vm_flags,
1050                                         NULL, NULL, pgoff, NULL))
1051                 goto out;
1052
1053         /*
1054          * Determine the object being mapped and call the appropriate
1055          * specific mapper. the address has already been validated, but
1056          * not unmapped, but the maps are removed from the list.
1057          */
1058         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1059         if (!vma) {
1060                 error = -ENOMEM;
1061                 goto unacct_error;
1062         }
1063
1064         vma->vm_mm = mm;
1065         vma->vm_start = addr;
1066         vma->vm_end = addr + len;
1067         vma->vm_flags = vm_flags;
1068         vma->vm_page_prot = protection_map[vm_flags &
1069                                 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
1070         vma->vm_pgoff = pgoff;
1071
1072         if (file) {
1073                 error = -EINVAL;
1074                 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1075                         goto free_vma;
1076                 if (vm_flags & VM_DENYWRITE) {
1077                         error = deny_write_access(file);
1078                         if (error)
1079                                 goto free_vma;
1080                         correct_wcount = 1;
1081                 }
1082                 vma->vm_file = file;
1083                 get_file(file);
1084                 error = file->f_op->mmap(file, vma);
1085                 if (error)
1086                         goto unmap_and_free_vma;
1087         } else if (vm_flags & VM_SHARED) {
1088                 error = shmem_zero_setup(vma);
1089                 if (error)
1090                         goto free_vma;
1091         }
1092
1093         /* Don't make the VMA automatically writable if it's shared, but the
1094          * backer wishes to know when pages are first written to */
1095         if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1096                 vma->vm_page_prot =
1097                         protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
1098
1099         /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
1100          * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
1101          * that memory reservation must be checked; but that reservation
1102          * belongs to shared memory object, not to vma: so now clear it.
1103          */
1104         if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
1105                 vma->vm_flags &= ~VM_ACCOUNT;
1106
1107         /* Can addr have changed??
1108          *
1109          * Answer: Yes, several device drivers can do it in their
1110          *         f_op->mmap method. -DaveM
1111          */
1112         addr = vma->vm_start;
1113         pgoff = vma->vm_pgoff;
1114         vm_flags = vma->vm_flags;
1115
1116         if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
1117                         vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
1118                 file = vma->vm_file;
1119                 vma_link(mm, vma, prev, rb_link, rb_parent);
1120                 if (correct_wcount)
1121                         atomic_inc(&inode->i_writecount);
1122         } else {
1123                 if (file) {
1124                         if (correct_wcount)
1125                                 atomic_inc(&inode->i_writecount);
1126                         fput(file);
1127                 }
1128                 mpol_free(vma_policy(vma));
1129                 kmem_cache_free(vm_area_cachep, vma);
1130         }
1131 out:    
1132         mm->total_vm += len >> PAGE_SHIFT;
1133         vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1134         if (vm_flags & VM_LOCKED) {
1135                 mm->locked_vm += len >> PAGE_SHIFT;
1136                 make_pages_present(addr, addr + len);
1137         }
1138         if (flags & MAP_POPULATE) {
1139                 up_write(&mm->mmap_sem);
1140                 sys_remap_file_pages(addr, len, 0,
1141                                         pgoff, flags & MAP_NONBLOCK);
1142                 down_write(&mm->mmap_sem);
1143         }
1144         return addr;
1145
1146 unmap_and_free_vma:
1147         if (correct_wcount)
1148                 atomic_inc(&inode->i_writecount);
1149         vma->vm_file = NULL;
1150         fput(file);
1151
1152         /* Undo any partial mapping done by a device driver. */
1153         unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1154         charged = 0;
1155 free_vma:
1156         kmem_cache_free(vm_area_cachep, vma);
1157 unacct_error:
1158         if (charged)
1159                 vm_unacct_memory(charged);
1160         return error;
1161 }
1162
1163 EXPORT_SYMBOL(do_mmap_pgoff);
1164
1165 /* Get an address range which is currently unmapped.
1166  * For shmat() with addr=0.
1167  *
1168  * Ugly calling convention alert:
1169  * Return value with the low bits set means error value,
1170  * ie
1171  *      if (ret & ~PAGE_MASK)
1172  *              error = ret;
1173  *
1174  * This function "knows" that -ENOMEM has the bits set.
1175  */
1176 #ifndef HAVE_ARCH_UNMAPPED_AREA
1177 unsigned long
1178 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1179                 unsigned long len, unsigned long pgoff, unsigned long flags)
1180 {
1181         struct mm_struct *mm = current->mm;
1182         struct vm_area_struct *vma;
1183         unsigned long start_addr;
1184
1185         if (len > TASK_SIZE)
1186                 return -ENOMEM;
1187
1188         if (addr) {
1189                 addr = PAGE_ALIGN(addr);
1190                 vma = find_vma(mm, addr);
1191                 if (TASK_SIZE - len >= addr &&
1192                     (!vma || addr + len <= vma->vm_start))
1193                         return addr;
1194         }
1195         if (len > mm->cached_hole_size) {
1196                 start_addr = addr = mm->free_area_cache;
1197         } else {
1198                 start_addr = addr = TASK_UNMAPPED_BASE;
1199                 mm->cached_hole_size = 0;
1200         }
1201
1202 full_search:
1203         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1204                 /* At this point:  (!vma || addr < vma->vm_end). */
1205                 if (TASK_SIZE - len < addr) {
1206                         /*
1207                          * Start a new search - just in case we missed
1208                          * some holes.
1209                          */
1210                         if (start_addr != TASK_UNMAPPED_BASE) {
1211                                 addr = TASK_UNMAPPED_BASE;
1212                                 start_addr = addr;
1213                                 mm->cached_hole_size = 0;
1214                                 goto full_search;
1215                         }
1216                         return -ENOMEM;
1217                 }
1218                 if (!vma || addr + len <= vma->vm_start) {
1219                         /*
1220                          * Remember the place where we stopped the search:
1221                          */
1222                         mm->free_area_cache = addr + len;
1223                         return addr;
1224                 }
1225                 if (addr + mm->cached_hole_size < vma->vm_start)
1226                         mm->cached_hole_size = vma->vm_start - addr;
1227                 addr = vma->vm_end;
1228         }
1229 }
1230 #endif  
1231
1232 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1233 {
1234         /*
1235          * Is this a new hole at the lowest possible address?
1236          */
1237         if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
1238                 mm->free_area_cache = addr;
1239                 mm->cached_hole_size = ~0UL;
1240         }
1241 }
1242
1243 /*
1244  * This mmap-allocator allocates new areas top-down from below the
1245  * stack's low limit (the base):
1246  */
1247 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1248 unsigned long
1249 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1250                           const unsigned long len, const unsigned long pgoff,
1251                           const unsigned long flags)
1252 {
1253         struct vm_area_struct *vma;
1254         struct mm_struct *mm = current->mm;
1255         unsigned long addr = addr0;
1256
1257         /* requested length too big for entire address space */
1258         if (len > TASK_SIZE)
1259                 return -ENOMEM;
1260
1261         /* requesting a specific address */
1262         if (addr) {
1263                 addr = PAGE_ALIGN(addr);
1264                 vma = find_vma(mm, addr);
1265                 if (TASK_SIZE - len >= addr &&
1266                                 (!vma || addr + len <= vma->vm_start))
1267                         return addr;
1268         }
1269
1270         /* check if free_area_cache is useful for us */
1271         if (len <= mm->cached_hole_size) {
1272                 mm->cached_hole_size = 0;
1273                 mm->free_area_cache = mm->mmap_base;
1274         }
1275
1276         /* either no address requested or can't fit in requested address hole */
1277         addr = mm->free_area_cache;
1278
1279         /* make sure it can fit in the remaining address space */
1280         if (addr > len) {
1281                 vma = find_vma(mm, addr-len);
1282                 if (!vma || addr <= vma->vm_start)
1283                         /* remember the address as a hint for next time */
1284                         return (mm->free_area_cache = addr-len);
1285         }
1286
1287         if (mm->mmap_base < len)
1288                 goto bottomup;
1289
1290         addr = mm->mmap_base-len;
1291
1292         do {
1293                 /*
1294                  * Lookup failure means no vma is above this address,
1295                  * else if new region fits below vma->vm_start,
1296                  * return with success:
1297                  */
1298                 vma = find_vma(mm, addr);
1299                 if (!vma || addr+len <= vma->vm_start)
1300                         /* remember the address as a hint for next time */
1301                         return (mm->free_area_cache = addr);
1302
1303                 /* remember the largest hole we saw so far */
1304                 if (addr + mm->cached_hole_size < vma->vm_start)
1305                         mm->cached_hole_size = vma->vm_start - addr;
1306
1307                 /* try just below the current vma->vm_start */
1308                 addr = vma->vm_start-len;
1309         } while (len < vma->vm_start);
1310
1311 bottomup:
1312         /*
1313          * A failed mmap() very likely causes application failure,
1314          * so fall back to the bottom-up function here. This scenario
1315          * can happen with large stack limits and large mmap()
1316          * allocations.
1317          */
1318         mm->cached_hole_size = ~0UL;
1319         mm->free_area_cache = TASK_UNMAPPED_BASE;
1320         addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1321         /*
1322          * Restore the topdown base:
1323          */
1324         mm->free_area_cache = mm->mmap_base;
1325         mm->cached_hole_size = ~0UL;
1326
1327         return addr;
1328 }
1329 #endif
1330
1331 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1332 {
1333         /*
1334          * Is this a new hole at the highest possible address?
1335          */
1336         if (addr > mm->free_area_cache)
1337                 mm->free_area_cache = addr;
1338
1339         /* dont allow allocations above current base */
1340         if (mm->free_area_cache > mm->mmap_base)
1341                 mm->free_area_cache = mm->mmap_base;
1342 }
1343
1344 unsigned long
1345 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1346                 unsigned long pgoff, unsigned long flags)
1347 {
1348         unsigned long ret;
1349
1350         if (!(flags & MAP_FIXED)) {
1351                 unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1352
1353                 get_area = current->mm->get_unmapped_area;
1354                 if (file && file->f_op && file->f_op->get_unmapped_area)
1355                         get_area = file->f_op->get_unmapped_area;
1356                 addr = get_area(file, addr, len, pgoff, flags);
1357                 if (IS_ERR_VALUE(addr))
1358                         return addr;
1359         }
1360
1361         if (addr > TASK_SIZE - len)
1362                 return -ENOMEM;
1363         if (addr & ~PAGE_MASK)
1364                 return -EINVAL;
1365         if (file && is_file_hugepages(file))  {
1366                 /*
1367                  * Check if the given range is hugepage aligned, and
1368                  * can be made suitable for hugepages.
1369                  */
1370                 ret = prepare_hugepage_range(addr, len);
1371         } else {
1372                 /*
1373                  * Ensure that a normal request is not falling in a
1374                  * reserved hugepage range.  For some archs like IA-64,
1375                  * there is a separate region for hugepages.
1376                  */
1377                 ret = is_hugepage_only_range(current->mm, addr, len);
1378         }
1379         if (ret)
1380                 return -EINVAL;
1381         return addr;
1382 }
1383
1384 EXPORT_SYMBOL(get_unmapped_area);
1385
1386 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1387 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
1388 {
1389         struct vm_area_struct *vma = NULL;
1390
1391         if (mm) {
1392                 /* Check the cache first. */
1393                 /* (Cache hit rate is typically around 35%.) */
1394                 vma = mm->mmap_cache;
1395                 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1396                         struct rb_node * rb_node;
1397
1398                         rb_node = mm->mm_rb.rb_node;
1399                         vma = NULL;
1400
1401                         while (rb_node) {
1402                                 struct vm_area_struct * vma_tmp;
1403
1404                                 vma_tmp = rb_entry(rb_node,
1405                                                 struct vm_area_struct, vm_rb);
1406
1407                                 if (vma_tmp->vm_end > addr) {
1408                                         vma = vma_tmp;
1409                                         if (vma_tmp->vm_start <= addr)
1410                                                 break;
1411                                         rb_node = rb_node->rb_left;
1412                                 } else
1413                                         rb_node = rb_node->rb_right;
1414                         }
1415                         if (vma)
1416                                 mm->mmap_cache = vma;
1417                 }
1418         }
1419         return vma;
1420 }
1421
1422 EXPORT_SYMBOL(find_vma);
1423
1424 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
1425 struct vm_area_struct *
1426 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1427                         struct vm_area_struct **pprev)
1428 {
1429         struct vm_area_struct *vma = NULL, *prev = NULL;
1430         struct rb_node * rb_node;
1431         if (!mm)
1432                 goto out;
1433
1434         /* Guard against addr being lower than the first VMA */
1435         vma = mm->mmap;
1436
1437         /* Go through the RB tree quickly. */
1438         rb_node = mm->mm_rb.rb_node;
1439
1440         while (rb_node) {
1441                 struct vm_area_struct *vma_tmp;
1442                 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1443
1444                 if (addr < vma_tmp->vm_end) {
1445                         rb_node = rb_node->rb_left;
1446                 } else {
1447                         prev = vma_tmp;
1448                         if (!prev->vm_next || (addr < prev->vm_next->vm_end))
1449                                 break;
1450                         rb_node = rb_node->rb_right;
1451                 }
1452         }
1453
1454 out:
1455         *pprev = prev;
1456         return prev ? prev->vm_next : vma;
1457 }
1458
1459 /*
1460  * Verify that the stack growth is acceptable and
1461  * update accounting. This is shared with both the
1462  * grow-up and grow-down cases.
1463  */
1464 static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow)
1465 {
1466         struct mm_struct *mm = vma->vm_mm;
1467         struct rlimit *rlim = current->signal->rlim;
1468
1469         /* address space limit tests */
1470         if (!may_expand_vm(mm, grow))
1471                 return -ENOMEM;
1472
1473         /* Stack limit test */
1474         if (size > rlim[RLIMIT_STACK].rlim_cur)
1475                 return -ENOMEM;
1476
1477         /* mlock limit tests */
1478         if (vma->vm_flags & VM_LOCKED) {
1479                 unsigned long locked;
1480                 unsigned long limit;
1481                 locked = mm->locked_vm + grow;
1482                 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
1483                 if (locked > limit && !capable(CAP_IPC_LOCK))
1484                         return -ENOMEM;
1485         }
1486
1487         /*
1488          * Overcommit..  This must be the final test, as it will
1489          * update security statistics.
1490          */
1491         if (security_vm_enough_memory(grow))
1492                 return -ENOMEM;
1493
1494         /* Ok, everything looks good - let it rip */
1495         mm->total_vm += grow;
1496         if (vma->vm_flags & VM_LOCKED)
1497                 mm->locked_vm += grow;
1498         vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
1499         return 0;
1500 }
1501
1502 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1503 /*
1504  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1505  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1506  */
1507 #ifndef CONFIG_IA64
1508 static inline
1509 #endif
1510 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1511 {
1512         int error;
1513
1514         if (!(vma->vm_flags & VM_GROWSUP))
1515                 return -EFAULT;
1516
1517         /*
1518          * We must make sure the anon_vma is allocated
1519          * so that the anon_vma locking is not a noop.
1520          */
1521         if (unlikely(anon_vma_prepare(vma)))
1522                 return -ENOMEM;
1523         anon_vma_lock(vma);
1524
1525         /*
1526          * vma->vm_start/vm_end cannot change under us because the caller
1527          * is required to hold the mmap_sem in read mode.  We need the
1528          * anon_vma lock to serialize against concurrent expand_stacks.
1529          */
1530         address += 4 + PAGE_SIZE - 1;
1531         address &= PAGE_MASK;
1532         error = 0;
1533
1534         /* Somebody else might have raced and expanded it already */
1535         if (address > vma->vm_end) {
1536                 unsigned long size, grow;
1537
1538                 size = address - vma->vm_start;
1539                 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1540
1541                 error = acct_stack_growth(vma, size, grow);
1542                 if (!error)
1543                         vma->vm_end = address;
1544         }
1545         anon_vma_unlock(vma);
1546         return error;
1547 }
1548 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
1549
1550 #ifdef CONFIG_STACK_GROWSUP
1551 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1552 {
1553         return expand_upwards(vma, address);
1554 }
1555
1556 struct vm_area_struct *
1557 find_extend_vma(struct mm_struct *mm, unsigned long addr)
1558 {
1559         struct vm_area_struct *vma, *prev;
1560
1561         addr &= PAGE_MASK;
1562         vma = find_vma_prev(mm, addr, &prev);
1563         if (vma && (vma->vm_start <= addr))
1564                 return vma;
1565         if (!prev || expand_stack(prev, addr))
1566                 return NULL;
1567         if (prev->vm_flags & VM_LOCKED) {
1568                 make_pages_present(addr, prev->vm_end);
1569         }
1570         return prev;
1571 }
1572 #else
1573 /*
1574  * vma is the first one with address < vma->vm_start.  Have to extend vma.
1575  */
1576 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1577 {
1578         int error;
1579
1580         /*
1581          * We must make sure the anon_vma is allocated
1582          * so that the anon_vma locking is not a noop.
1583          */
1584         if (unlikely(anon_vma_prepare(vma)))
1585                 return -ENOMEM;
1586         anon_vma_lock(vma);
1587
1588         /*
1589          * vma->vm_start/vm_end cannot change under us because the caller
1590          * is required to hold the mmap_sem in read mode.  We need the
1591          * anon_vma lock to serialize against concurrent expand_stacks.
1592          */
1593         address &= PAGE_MASK;
1594         error = 0;
1595
1596         /* Somebody else might have raced and expanded it already */
1597         if (address < vma->vm_start) {
1598                 unsigned long size, grow;
1599
1600                 size = vma->vm_end - address;
1601                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1602
1603                 error = acct_stack_growth(vma, size, grow);
1604                 if (!error) {
1605                         vma->vm_start = address;
1606                         vma->vm_pgoff -= grow;
1607                 }
1608         }
1609         anon_vma_unlock(vma);
1610         return error;
1611 }
1612
1613 struct vm_area_struct *
1614 find_extend_vma(struct mm_struct * mm, unsigned long addr)
1615 {
1616         struct vm_area_struct * vma;
1617         unsigned long start;
1618
1619         addr &= PAGE_MASK;
1620         vma = find_vma(mm,addr);
1621         if (!vma)
1622                 return NULL;
1623         if (vma->vm_start <= addr)
1624                 return vma;
1625         if (!(vma->vm_flags & VM_GROWSDOWN))
1626                 return NULL;
1627         start = vma->vm_start;
1628         if (expand_stack(vma, addr))
1629                 return NULL;
1630         if (vma->vm_flags & VM_LOCKED) {
1631                 make_pages_present(addr, start);
1632         }
1633         return vma;
1634 }
1635 #endif
1636
1637 /*
1638  * Ok - we have the memory areas we should free on the vma list,
1639  * so release them, and do the vma updates.
1640  *
1641  * Called with the mm semaphore held.
1642  */
1643 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1644 {
1645         /* Update high watermark before we lower total_vm */
1646         update_hiwater_vm(mm);
1647         do {
1648                 long nrpages = vma_pages(vma);
1649
1650                 mm->total_vm -= nrpages;
1651                 if (vma->vm_flags & VM_LOCKED)
1652                         mm->locked_vm -= nrpages;
1653                 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1654                 vma = remove_vma(vma);
1655         } while (vma);
1656         validate_mm(mm);
1657 }
1658
1659 /*
1660  * Get rid of page table information in the indicated region.
1661  *
1662  * Called with the mm semaphore held.
1663  */
1664 static void unmap_region(struct mm_struct *mm,
1665                 struct vm_area_struct *vma, struct vm_area_struct *prev,
1666                 unsigned long start, unsigned long end)
1667 {
1668         struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1669         struct mmu_gather *tlb;
1670         unsigned long nr_accounted = 0;
1671
1672         lru_add_drain();
1673         tlb = tlb_gather_mmu(mm, 0);
1674         update_hiwater_rss(mm);
1675         unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1676         vm_unacct_memory(nr_accounted);
1677         free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
1678                                  next? next->vm_start: 0);
1679         tlb_finish_mmu(tlb, start, end);
1680 }
1681
1682 /*
1683  * Create a list of vma's touched by the unmap, removing them from the mm's
1684  * vma list as we go..
1685  */
1686 static void
1687 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1688         struct vm_area_struct *prev, unsigned long end)
1689 {
1690         struct vm_area_struct **insertion_point;
1691         struct vm_area_struct *tail_vma = NULL;
1692         unsigned long addr;
1693
1694         insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1695         do {
1696                 rb_erase(&vma->vm_rb, &mm->mm_rb);
1697                 mm->map_count--;
1698                 tail_vma = vma;
1699                 vma = vma->vm_next;
1700         } while (vma && vma->vm_start < end);
1701         *insertion_point = vma;
1702         tail_vma->vm_next = NULL;
1703         if (mm->unmap_area == arch_unmap_area)
1704                 addr = prev ? prev->vm_end : mm->mmap_base;
1705         else
1706                 addr = vma ?  vma->vm_start : mm->mmap_base;
1707         mm->unmap_area(mm, addr);
1708         mm->mmap_cache = NULL;          /* Kill the cache. */
1709 }
1710
1711 /*
1712  * Split a vma into two pieces at address 'addr', a new vma is allocated
1713  * either for the first part or the the tail.
1714  */
1715 int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1716               unsigned long addr, int new_below)
1717 {
1718         struct mempolicy *pol;
1719         struct vm_area_struct *new;
1720
1721         if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK))
1722                 return -EINVAL;
1723
1724         if (mm->map_count >= sysctl_max_map_count)
1725                 return -ENOMEM;
1726
1727         new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1728         if (!new)
1729                 return -ENOMEM;
1730
1731         /* most fields are the same, copy all, and then fixup */
1732         *new = *vma;
1733
1734         if (new_below)
1735                 new->vm_end = addr;
1736         else {
1737                 new->vm_start = addr;
1738                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1739         }
1740
1741         pol = mpol_copy(vma_policy(vma));
1742         if (IS_ERR(pol)) {
1743                 kmem_cache_free(vm_area_cachep, new);
1744                 return PTR_ERR(pol);
1745         }
1746         vma_set_policy(new, pol);
1747
1748         if (new->vm_file)
1749                 get_file(new->vm_file);
1750
1751         if (new->vm_ops && new->vm_ops->open)
1752                 new->vm_ops->open(new);
1753
1754         if (new_below)
1755                 vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
1756                         ((addr - new->vm_start) >> PAGE_SHIFT), new);
1757         else
1758                 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
1759
1760         return 0;
1761 }
1762
1763 /* Munmap is split into 2 main parts -- this part which finds
1764  * what needs doing, and the areas themselves, which do the
1765  * work.  This now handles partial unmappings.
1766  * Jeremy Fitzhardinge <jeremy@goop.org>
1767  */
1768 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1769 {
1770         unsigned long end;
1771         struct vm_area_struct *vma, *prev, *last;
1772
1773         if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
1774                 return -EINVAL;
1775
1776         if ((len = PAGE_ALIGN(len)) == 0)
1777                 return -EINVAL;
1778
1779         /* Find the first overlapping VMA */
1780         vma = find_vma_prev(mm, start, &prev);
1781         if (!vma)
1782                 return 0;
1783         /* we have  start < vma->vm_end  */
1784
1785         /* if it doesn't overlap, we have nothing.. */
1786         end = start + len;
1787         if (vma->vm_start >= end)
1788                 return 0;
1789
1790         /*
1791          * If we need to split any vma, do it now to save pain later.
1792          *
1793          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
1794          * unmapped vm_area_struct will remain in use: so lower split_vma
1795          * places tmp vma above, and higher split_vma places tmp vma below.
1796          */
1797         if (start > vma->vm_start) {
1798                 int error = split_vma(mm, vma, start, 0);
1799                 if (error)
1800                         return error;
1801                 prev = vma;
1802         }
1803
1804         /* Does it split the last one? */
1805         last = find_vma(mm, end);
1806         if (last && end > last->vm_start) {
1807                 int error = split_vma(mm, last, end, 1);
1808                 if (error)
1809                         return error;
1810         }
1811         vma = prev? prev->vm_next: mm->mmap;
1812
1813         /*
1814          * Remove the vma's, and unmap the actual pages
1815          */
1816         detach_vmas_to_be_unmapped(mm, vma, prev, end);
1817         unmap_region(mm, vma, prev, start, end);
1818
1819         /* Fix up all other VM information */
1820         remove_vma_list(mm, vma);
1821
1822         return 0;
1823 }
1824
1825 EXPORT_SYMBOL(do_munmap);
1826
1827 asmlinkage long sys_munmap(unsigned long addr, size_t len)
1828 {
1829         int ret;
1830         struct mm_struct *mm = current->mm;
1831
1832         profile_munmap(addr);
1833
1834         down_write(&mm->mmap_sem);
1835         ret = do_munmap(mm, addr, len);
1836         up_write(&mm->mmap_sem);
1837         return ret;
1838 }
1839
1840 static inline void verify_mm_writelocked(struct mm_struct *mm)
1841 {
1842 #ifdef CONFIG_DEBUG_VM
1843         if (unlikely(down_read_trylock(&mm->mmap_sem))) {
1844                 WARN_ON(1);
1845                 up_read(&mm->mmap_sem);
1846         }
1847 #endif
1848 }
1849
1850 /*
1851  *  this is really a simplified "do_mmap".  it only handles
1852  *  anonymous maps.  eventually we may be able to do some
1853  *  brk-specific accounting here.
1854  */
1855 unsigned long do_brk(unsigned long addr, unsigned long len)
1856 {
1857         struct mm_struct * mm = current->mm;
1858         struct vm_area_struct * vma, * prev;
1859         unsigned long flags;
1860         struct rb_node ** rb_link, * rb_parent;
1861         pgoff_t pgoff = addr >> PAGE_SHIFT;
1862
1863         len = PAGE_ALIGN(len);
1864         if (!len)
1865                 return addr;
1866
1867         if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1868                 return -EINVAL;
1869
1870         /*
1871          * mlock MCL_FUTURE?
1872          */
1873         if (mm->def_flags & VM_LOCKED) {
1874                 unsigned long locked, lock_limit;
1875                 locked = len >> PAGE_SHIFT;
1876                 locked += mm->locked_vm;
1877                 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1878                 lock_limit >>= PAGE_SHIFT;
1879                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1880                         return -EAGAIN;
1881         }
1882
1883         /*
1884          * mm->mmap_sem is required to protect against another thread
1885          * changing the mappings in case we sleep.
1886          */
1887         verify_mm_writelocked(mm);
1888
1889         /*
1890          * Clear old maps.  this also does some error checking for us
1891          */
1892  munmap_back:
1893         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1894         if (vma && vma->vm_start < addr + len) {
1895                 if (do_munmap(mm, addr, len))
1896                         return -ENOMEM;
1897                 goto munmap_back;
1898         }
1899
1900         /* Check against address space limits *after* clearing old maps... */
1901         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1902                 return -ENOMEM;
1903
1904         if (mm->map_count > sysctl_max_map_count)
1905                 return -ENOMEM;
1906
1907         if (security_vm_enough_memory(len >> PAGE_SHIFT))
1908                 return -ENOMEM;
1909
1910         flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1911
1912         /* Can we just expand an old private anonymous mapping? */
1913         if (vma_merge(mm, prev, addr, addr + len, flags,
1914                                         NULL, NULL, pgoff, NULL))
1915                 goto out;
1916
1917         /*
1918          * create a vma struct for an anonymous mapping
1919          */
1920         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1921         if (!vma) {
1922                 vm_unacct_memory(len >> PAGE_SHIFT);
1923                 return -ENOMEM;
1924         }
1925
1926         vma->vm_mm = mm;
1927         vma->vm_start = addr;
1928         vma->vm_end = addr + len;
1929         vma->vm_pgoff = pgoff;
1930         vma->vm_flags = flags;
1931         vma->vm_page_prot = protection_map[flags &
1932                                 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
1933         vma_link(mm, vma, prev, rb_link, rb_parent);
1934 out:
1935         mm->total_vm += len >> PAGE_SHIFT;
1936         if (flags & VM_LOCKED) {
1937                 mm->locked_vm += len >> PAGE_SHIFT;
1938                 make_pages_present(addr, addr + len);
1939         }
1940         return addr;
1941 }
1942
1943 EXPORT_SYMBOL(do_brk);
1944
1945 /* Release all mmaps. */
1946 void exit_mmap(struct mm_struct *mm)
1947 {
1948         struct mmu_gather *tlb;
1949         struct vm_area_struct *vma = mm->mmap;
1950         unsigned long nr_accounted = 0;
1951         unsigned long end;
1952
1953         lru_add_drain();
1954         flush_cache_mm(mm);
1955         tlb = tlb_gather_mmu(mm, 1);
1956         /* Don't update_hiwater_rss(mm) here, do_exit already did */
1957         /* Use -1 here to ensure all VMAs in the mm are unmapped */
1958         end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
1959         vm_unacct_memory(nr_accounted);
1960         free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
1961         tlb_finish_mmu(tlb, 0, end);
1962
1963         /*
1964          * Walk the list again, actually closing and freeing it,
1965          * with preemption enabled, without holding any MM locks.
1966          */
1967         while (vma)
1968                 vma = remove_vma(vma);
1969
1970         BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
1971 }
1972
1973 /* Insert vm structure into process list sorted by address
1974  * and into the inode's i_mmap tree.  If vm_file is non-NULL
1975  * then i_mmap_lock is taken here.
1976  */
1977 int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1978 {
1979         struct vm_area_struct * __vma, * prev;
1980         struct rb_node ** rb_link, * rb_parent;
1981
1982         /*
1983          * The vm_pgoff of a purely anonymous vma should be irrelevant
1984          * until its first write fault, when page's anon_vma and index
1985          * are set.  But now set the vm_pgoff it will almost certainly
1986          * end up with (unless mremap moves it elsewhere before that
1987          * first wfault), so /proc/pid/maps tells a consistent story.
1988          *
1989          * By setting it to reflect the virtual start address of the
1990          * vma, merges and splits can happen in a seamless way, just
1991          * using the existing file pgoff checks and manipulations.
1992          * Similarly in do_mmap_pgoff and in do_brk.
1993          */
1994         if (!vma->vm_file) {
1995                 BUG_ON(vma->anon_vma);
1996                 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
1997         }
1998         __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
1999         if (__vma && __vma->vm_start < vma->vm_end)
2000                 return -ENOMEM;
2001         if ((vma->vm_flags & VM_ACCOUNT) &&
2002              security_vm_enough_memory(vma_pages(vma)))
2003                 return -ENOMEM;
2004         vma_link(mm, vma, prev, rb_link, rb_parent);
2005         return 0;
2006 }
2007
2008 /*
2009  * Copy the vma structure to a new location in the same mm,
2010  * prior to moving page table entries, to effect an mremap move.
2011  */
2012 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2013         unsigned long addr, unsigned long len, pgoff_t pgoff)
2014 {
2015         struct vm_area_struct *vma = *vmap;
2016         unsigned long vma_start = vma->vm_start;
2017         struct mm_struct *mm = vma->vm_mm;
2018         struct vm_area_struct *new_vma, *prev;
2019         struct rb_node **rb_link, *rb_parent;
2020         struct mempolicy *pol;
2021
2022         /*
2023          * If anonymous vma has not yet been faulted, update new pgoff
2024          * to match new location, to increase its chance of merging.
2025          */
2026         if (!vma->vm_file && !vma->anon_vma)
2027                 pgoff = addr >> PAGE_SHIFT;
2028
2029         find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2030         new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2031                         vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2032         if (new_vma) {
2033                 /*
2034                  * Source vma may have been merged into new_vma
2035                  */
2036                 if (vma_start >= new_vma->vm_start &&
2037                     vma_start < new_vma->vm_end)
2038                         *vmap = new_vma;
2039         } else {
2040                 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2041                 if (new_vma) {
2042                         *new_vma = *vma;
2043                         pol = mpol_copy(vma_policy(vma));
2044                         if (IS_ERR(pol)) {
2045                                 kmem_cache_free(vm_area_cachep, new_vma);
2046                                 return NULL;
2047                         }
2048                         vma_set_policy(new_vma, pol);
2049                         new_vma->vm_start = addr;
2050                         new_vma->vm_end = addr + len;
2051                         new_vma->vm_pgoff = pgoff;
2052                         if (new_vma->vm_file)
2053                                 get_file(new_vma->vm_file);
2054                         if (new_vma->vm_ops && new_vma->vm_ops->open)
2055                                 new_vma->vm_ops->open(new_vma);
2056                         vma_link(mm, new_vma, prev, rb_link, rb_parent);
2057                 }
2058         }
2059         return new_vma;
2060 }
2061
2062 /*
2063  * Return true if the calling process may expand its vm space by the passed
2064  * number of pages
2065  */
2066 int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2067 {
2068         unsigned long cur = mm->total_vm;       /* pages */
2069         unsigned long lim;
2070
2071         lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
2072
2073         if (cur + npages > lim)
2074                 return 0;
2075         return 1;
2076 }