mm: larger stack guard gap, between vmas
[pandora-kernel.git] / arch / mips / mm / mmap.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2011 Wind River Systems,
7  *   written by Ralf Baechle <ralf@linux-mips.org>
8  */
9 #include <linux/compiler.h>
10 #include <linux/errno.h>
11 #include <linux/mm.h>
12 #include <linux/mman.h>
13 #include <linux/module.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
17
18 unsigned long shm_align_mask = PAGE_SIZE - 1;   /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask);
20
21 /* gap between mmap and stack */
22 #define MIN_GAP (128*1024*1024UL)
23 #define MAX_GAP ((TASK_SIZE)/6*5)
24
25 static int mmap_is_legacy(void)
26 {
27         if (current->personality & ADDR_COMPAT_LAYOUT)
28                 return 1;
29
30         if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
31                 return 1;
32
33         return sysctl_legacy_va_layout;
34 }
35
36 static unsigned long mmap_base(unsigned long rnd)
37 {
38         unsigned long gap = rlimit(RLIMIT_STACK);
39
40         if (gap < MIN_GAP)
41                 gap = MIN_GAP;
42         else if (gap > MAX_GAP)
43                 gap = MAX_GAP;
44
45         return PAGE_ALIGN(TASK_SIZE - gap - rnd);
46 }
47
48 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
49                                               unsigned long pgoff)
50 {
51         unsigned long base = addr & ~shm_align_mask;
52         unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
53
54         if (base + off <= addr)
55                 return base + off;
56
57         return base - off;
58 }
59
60 #define COLOUR_ALIGN(addr, pgoff)                               \
61         ((((addr) + shm_align_mask) & ~shm_align_mask) +        \
62          (((pgoff) << PAGE_SHIFT) & shm_align_mask))
63
64 enum mmap_allocation_direction {UP, DOWN};
65
66 static unsigned long arch_get_unmapped_area_common(struct file *filp,
67         unsigned long addr0, unsigned long len, unsigned long pgoff,
68         unsigned long flags, enum mmap_allocation_direction dir)
69 {
70         struct mm_struct *mm = current->mm;
71         struct vm_area_struct *vma;
72         unsigned long addr = addr0;
73         unsigned long vm_start;
74         int do_color_align;
75
76         if (unlikely(len > TASK_SIZE))
77                 return -ENOMEM;
78
79         if (flags & MAP_FIXED) {
80                 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
81                 if (TASK_SIZE - len < addr)
82                         return -EINVAL;
83
84                 /*
85                  * We do not accept a shared mapping if it would violate
86                  * cache aliasing constraints.
87                  */
88                 if ((flags & MAP_SHARED) &&
89                     ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
90                         return -EINVAL;
91                 return addr;
92         }
93
94         do_color_align = 0;
95         if (filp || (flags & MAP_SHARED))
96                 do_color_align = 1;
97
98         /* requesting a specific address */
99         if (addr) {
100                 if (do_color_align)
101                         addr = COLOUR_ALIGN(addr, pgoff);
102                 else
103                         addr = PAGE_ALIGN(addr);
104
105                 vma = find_vma(mm, addr);
106                 if (TASK_SIZE - len >= addr &&
107                     (!vma || addr + len <= vm_start_gap(vma)))
108                         return addr;
109         }
110
111         if (dir == UP) {
112                 addr = mm->mmap_base;
113                 if (do_color_align)
114                         addr = COLOUR_ALIGN(addr, pgoff);
115                 else
116                         addr = PAGE_ALIGN(addr);
117
118                 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
119                         /* At this point:  (!vma || addr < vma->vm_end). */
120                         if (TASK_SIZE - len < addr)
121                                 return -ENOMEM;
122                         if (!vma || addr + len <= vm_start_gap(vma))
123                                 return addr;
124                         addr = vma->vm_end;
125                         if (do_color_align)
126                                 addr = COLOUR_ALIGN(addr, pgoff);
127                  }
128          } else {
129                 /* check if free_area_cache is useful for us */
130                 if (len <= mm->cached_hole_size) {
131                         mm->cached_hole_size = 0;
132                         mm->free_area_cache = mm->mmap_base;
133                 }
134
135                 /*
136                  * either no address requested, or the mapping can't fit into
137                  * the requested address hole
138                  */
139                 addr = mm->free_area_cache;
140                 if (do_color_align) {
141                         unsigned long base =
142                                 COLOUR_ALIGN_DOWN(addr - len, pgoff);
143                         addr = base + len;
144                 }
145
146                 /* make sure it can fit in the remaining address space */
147                 if (likely(addr > len)) {
148                         vma = find_vma(mm, addr - len);
149                         if (!vma || addr <= vm_start_gap(vma)) {
150                                 /* cache the address as a hint for next time */
151                                 return mm->free_area_cache = addr - len;
152                         }
153                 }
154
155                 if (unlikely(mm->mmap_base < len))
156                         goto bottomup;
157
158                 addr = mm->mmap_base - len;
159                 if (do_color_align)
160                         addr = COLOUR_ALIGN_DOWN(addr, pgoff);
161
162                 do {
163                         /*
164                          * Lookup failure means no vma is above this address,
165                          * else if new region fits below vma->vm_start,
166                          * return with success:
167                          */
168                         vma = find_vma(mm, addr);
169                         if (vma)
170                                 vm_start = vm_start_gap(vma);
171                         if (likely(!vma || addr + len <= vm_start)) {
172                                 /* cache the address as a hint for next time */
173                                 return mm->free_area_cache = addr;
174                         }
175
176                         /* remember the largest hole we saw so far */
177                         if (addr + mm->cached_hole_size < vm_start)
178                                 mm->cached_hole_size = vm_start - addr;
179
180                         /* try just below the current vma->vm_start */
181                         addr = vm_start - len;
182                         if (do_color_align)
183                                 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
184                 } while (likely(len < vm_start));
185
186 bottomup:
187                 /*
188                  * A failed mmap() very likely causes application failure,
189                  * so fall back to the bottom-up function here. This scenario
190                  * can happen with large stack limits and large mmap()
191                  * allocations.
192                  */
193                 mm->cached_hole_size = ~0UL;
194                 mm->free_area_cache = TASK_UNMAPPED_BASE;
195                 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
196                 /*
197                  * Restore the topdown base:
198                  */
199                 mm->free_area_cache = mm->mmap_base;
200                 mm->cached_hole_size = ~0UL;
201
202                 return addr;
203         }
204 }
205
206 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
207         unsigned long len, unsigned long pgoff, unsigned long flags)
208 {
209         return arch_get_unmapped_area_common(filp,
210                         addr0, len, pgoff, flags, UP);
211 }
212
213 /*
214  * There is no need to export this but sched.h declares the function as
215  * extern so making it static here results in an error.
216  */
217 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
218         unsigned long addr0, unsigned long len, unsigned long pgoff,
219         unsigned long flags)
220 {
221         return arch_get_unmapped_area_common(filp,
222                         addr0, len, pgoff, flags, DOWN);
223 }
224
225 void arch_pick_mmap_layout(struct mm_struct *mm)
226 {
227         unsigned long random_factor = 0UL;
228
229         if (current->flags & PF_RANDOMIZE) {
230                 random_factor = get_random_int();
231                 random_factor = random_factor << PAGE_SHIFT;
232                 if (TASK_IS_32BIT_ADDR)
233                         random_factor &= 0xfffffful;
234                 else
235                         random_factor &= 0xffffffful;
236         }
237
238         if (mmap_is_legacy()) {
239                 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
240                 mm->get_unmapped_area = arch_get_unmapped_area;
241                 mm->unmap_area = arch_unmap_area;
242         } else {
243                 mm->mmap_base = mmap_base(random_factor);
244                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
245                 mm->unmap_area = arch_unmap_area_topdown;
246         }
247 }
248
249 static inline unsigned long brk_rnd(void)
250 {
251         unsigned long rnd = get_random_int();
252
253         rnd = rnd << PAGE_SHIFT;
254         /* 8MB for 32bit, 256MB for 64bit */
255         if (TASK_IS_32BIT_ADDR)
256                 rnd = rnd & 0x7ffffful;
257         else
258                 rnd = rnd & 0xffffffful;
259
260         return rnd;
261 }
262
263 unsigned long arch_randomize_brk(struct mm_struct *mm)
264 {
265         unsigned long base = mm->brk;
266         unsigned long ret;
267
268         ret = PAGE_ALIGN(base + brk_rnd());
269
270         if (ret < mm->brk)
271                 return mm->brk;
272
273         return ret;
274 }