2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
9 #include <linux/compiler.h>
10 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/module.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
18 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask);
21 /* gap between mmap and stack */
22 #define MIN_GAP (128*1024*1024UL)
23 #define MAX_GAP ((TASK_SIZE)/6*5)
25 static int mmap_is_legacy(void)
27 if (current->personality & ADDR_COMPAT_LAYOUT)
30 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
33 return sysctl_legacy_va_layout;
36 static unsigned long mmap_base(unsigned long rnd)
38 unsigned long gap = rlimit(RLIMIT_STACK);
42 else if (gap > MAX_GAP)
45 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
48 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
51 unsigned long base = addr & ~shm_align_mask;
52 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
54 if (base + off <= addr)
60 #define COLOUR_ALIGN(addr, pgoff) \
61 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
62 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
64 enum mmap_allocation_direction {UP, DOWN};
66 static unsigned long arch_get_unmapped_area_common(struct file *filp,
67 unsigned long addr0, unsigned long len, unsigned long pgoff,
68 unsigned long flags, enum mmap_allocation_direction dir)
70 struct mm_struct *mm = current->mm;
71 struct vm_area_struct *vma;
72 unsigned long addr = addr0;
73 unsigned long vm_start;
76 if (unlikely(len > TASK_SIZE))
79 if (flags & MAP_FIXED) {
80 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
81 if (TASK_SIZE - len < addr)
85 * We do not accept a shared mapping if it would violate
86 * cache aliasing constraints.
88 if ((flags & MAP_SHARED) &&
89 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
95 if (filp || (flags & MAP_SHARED))
98 /* requesting a specific address */
101 addr = COLOUR_ALIGN(addr, pgoff);
103 addr = PAGE_ALIGN(addr);
105 vma = find_vma(mm, addr);
106 if (TASK_SIZE - len >= addr &&
107 (!vma || addr + len <= vm_start_gap(vma)))
112 addr = mm->mmap_base;
114 addr = COLOUR_ALIGN(addr, pgoff);
116 addr = PAGE_ALIGN(addr);
118 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
119 /* At this point: (!vma || addr < vma->vm_end). */
120 if (TASK_SIZE - len < addr)
122 if (!vma || addr + len <= vm_start_gap(vma))
126 addr = COLOUR_ALIGN(addr, pgoff);
129 /* check if free_area_cache is useful for us */
130 if (len <= mm->cached_hole_size) {
131 mm->cached_hole_size = 0;
132 mm->free_area_cache = mm->mmap_base;
136 * either no address requested, or the mapping can't fit into
137 * the requested address hole
139 addr = mm->free_area_cache;
140 if (do_color_align) {
142 COLOUR_ALIGN_DOWN(addr - len, pgoff);
146 /* make sure it can fit in the remaining address space */
147 if (likely(addr > len)) {
148 vma = find_vma(mm, addr - len);
149 if (!vma || addr <= vm_start_gap(vma)) {
150 /* cache the address as a hint for next time */
151 return mm->free_area_cache = addr - len;
155 if (unlikely(mm->mmap_base < len))
158 addr = mm->mmap_base - len;
160 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
164 * Lookup failure means no vma is above this address,
165 * else if new region fits below vma->vm_start,
166 * return with success:
168 vma = find_vma(mm, addr);
170 vm_start = vm_start_gap(vma);
171 if (likely(!vma || addr + len <= vm_start)) {
172 /* cache the address as a hint for next time */
173 return mm->free_area_cache = addr;
176 /* remember the largest hole we saw so far */
177 if (addr + mm->cached_hole_size < vm_start)
178 mm->cached_hole_size = vm_start - addr;
180 /* try just below the current vma->vm_start */
181 addr = vm_start - len;
183 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
184 } while (likely(len < vm_start));
188 * A failed mmap() very likely causes application failure,
189 * so fall back to the bottom-up function here. This scenario
190 * can happen with large stack limits and large mmap()
193 mm->cached_hole_size = ~0UL;
194 mm->free_area_cache = TASK_UNMAPPED_BASE;
195 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
197 * Restore the topdown base:
199 mm->free_area_cache = mm->mmap_base;
200 mm->cached_hole_size = ~0UL;
206 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
207 unsigned long len, unsigned long pgoff, unsigned long flags)
209 return arch_get_unmapped_area_common(filp,
210 addr0, len, pgoff, flags, UP);
214 * There is no need to export this but sched.h declares the function as
215 * extern so making it static here results in an error.
217 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
218 unsigned long addr0, unsigned long len, unsigned long pgoff,
221 return arch_get_unmapped_area_common(filp,
222 addr0, len, pgoff, flags, DOWN);
225 void arch_pick_mmap_layout(struct mm_struct *mm)
227 unsigned long random_factor = 0UL;
229 if (current->flags & PF_RANDOMIZE) {
230 random_factor = get_random_int();
231 random_factor = random_factor << PAGE_SHIFT;
232 if (TASK_IS_32BIT_ADDR)
233 random_factor &= 0xfffffful;
235 random_factor &= 0xffffffful;
238 if (mmap_is_legacy()) {
239 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
240 mm->get_unmapped_area = arch_get_unmapped_area;
241 mm->unmap_area = arch_unmap_area;
243 mm->mmap_base = mmap_base(random_factor);
244 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
245 mm->unmap_area = arch_unmap_area_topdown;
249 static inline unsigned long brk_rnd(void)
251 unsigned long rnd = get_random_int();
253 rnd = rnd << PAGE_SHIFT;
254 /* 8MB for 32bit, 256MB for 64bit */
255 if (TASK_IS_32BIT_ADDR)
256 rnd = rnd & 0x7ffffful;
258 rnd = rnd & 0xffffffful;
263 unsigned long arch_randomize_brk(struct mm_struct *mm)
265 unsigned long base = mm->brk;
268 ret = PAGE_ALIGN(base + brk_rnd());