1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/uaccess.h>
19 #include <asm/syscalls.h>
21 struct __read_mostly va_alignment va_align = {
26 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
28 * @flags denotes the allocation direction - bottomup or topdown -
29 * or vDSO; see call sites below.
31 unsigned long align_addr(unsigned long addr, struct file *filp,
32 enum align_flags flags)
34 unsigned long tmp_addr;
36 /* handle 32- and 64-bit case with a single conditional */
37 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
40 if (!(current->flags & PF_RANDOMIZE))
43 if (!((flags & ALIGN_VDSO) || filp))
49 * We need an address which is <= than the original
50 * one only when in topdown direction.
52 if (!(flags & ALIGN_TOPDOWN))
53 tmp_addr += va_align.mask;
55 tmp_addr &= ~va_align.mask;
60 static int __init control_va_addr_alignment(char *str)
62 /* guard against enabling this on other CPU families */
63 if (va_align.flags < 0)
72 if (!strcmp(str, "32"))
73 va_align.flags = ALIGN_VA_32;
74 else if (!strcmp(str, "64"))
75 va_align.flags = ALIGN_VA_64;
76 else if (!strcmp(str, "off"))
78 else if (!strcmp(str, "on"))
79 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
85 __setup("align_va_addr", control_va_addr_alignment);
87 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
88 unsigned long, prot, unsigned long, flags,
89 unsigned long, fd, unsigned long, off)
96 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
101 static void find_start_end(unsigned long flags, unsigned long *begin,
104 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
105 unsigned long new_begin;
106 /* This is usually used needed to map code in small
107 model, so it needs to be in the first 31bit. Limit
108 it to that. This means we need to move the
109 unmapped base down for this case. This can give
110 conflicts with the heap, but we assume that glibc
111 malloc knows how to fall back to mmap. Give it 1GB
112 of playground for now. -AK */
115 if (current->flags & PF_RANDOMIZE) {
116 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
121 *begin = TASK_UNMAPPED_BASE;
127 arch_get_unmapped_area(struct file *filp, unsigned long addr,
128 unsigned long len, unsigned long pgoff, unsigned long flags)
130 struct mm_struct *mm = current->mm;
131 struct vm_area_struct *vma;
132 unsigned long start_addr;
133 unsigned long begin, end;
135 if (flags & MAP_FIXED)
138 find_start_end(flags, &begin, &end);
144 addr = PAGE_ALIGN(addr);
145 vma = find_vma(mm, addr);
146 if (end - len >= addr &&
147 (!vma || addr + len <= vma->vm_start))
150 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
151 && len <= mm->cached_hole_size) {
152 mm->cached_hole_size = 0;
153 mm->free_area_cache = begin;
155 addr = mm->free_area_cache;
162 addr = align_addr(addr, filp, 0);
164 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
165 /* At this point: (!vma || addr < vma->vm_end). */
166 if (end - len < addr) {
168 * Start a new search - just in case we missed
171 if (start_addr != begin) {
172 start_addr = addr = begin;
173 mm->cached_hole_size = 0;
178 if (!vma || addr + len <= vma->vm_start) {
180 * Remember the place where we stopped the search:
182 mm->free_area_cache = addr + len;
185 if (addr + mm->cached_hole_size < vma->vm_start)
186 mm->cached_hole_size = vma->vm_start - addr;
189 addr = align_addr(addr, filp, 0);
195 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
196 const unsigned long len, const unsigned long pgoff,
197 const unsigned long flags)
199 struct vm_area_struct *vma;
200 struct mm_struct *mm = current->mm;
201 unsigned long addr = addr0;
203 /* requested length too big for entire address space */
207 if (flags & MAP_FIXED)
210 /* for MAP_32BIT mappings we force the legact mmap base */
211 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
214 /* requesting a specific address */
216 addr = PAGE_ALIGN(addr);
217 vma = find_vma(mm, addr);
218 if (TASK_SIZE - len >= addr &&
219 (!vma || addr + len <= vma->vm_start))
223 /* check if free_area_cache is useful for us */
224 if (len <= mm->cached_hole_size) {
225 mm->cached_hole_size = 0;
226 mm->free_area_cache = mm->mmap_base;
229 /* either no address requested or can't fit in requested address hole */
230 addr = mm->free_area_cache;
232 /* make sure it can fit in the remaining address space */
234 unsigned long tmp_addr = align_addr(addr - len, filp,
237 vma = find_vma(mm, tmp_addr);
238 if (!vma || tmp_addr + len <= vma->vm_start)
239 /* remember the address as a hint for next time */
240 return mm->free_area_cache = tmp_addr;
243 if (mm->mmap_base < len)
246 addr = mm->mmap_base-len;
249 addr = align_addr(addr, filp, ALIGN_TOPDOWN);
252 * Lookup failure means no vma is above this address,
253 * else if new region fits below vma->vm_start,
254 * return with success:
256 vma = find_vma(mm, addr);
257 if (!vma || addr+len <= vma->vm_start)
258 /* remember the address as a hint for next time */
259 return mm->free_area_cache = addr;
261 /* remember the largest hole we saw so far */
262 if (addr + mm->cached_hole_size < vma->vm_start)
263 mm->cached_hole_size = vma->vm_start - addr;
265 /* try just below the current vma->vm_start */
266 addr = vma->vm_start-len;
267 } while (len < vma->vm_start);
271 * A failed mmap() very likely causes application failure,
272 * so fall back to the bottom-up function here. This scenario
273 * can happen with large stack limits and large mmap()
276 mm->cached_hole_size = ~0UL;
277 mm->free_area_cache = TASK_UNMAPPED_BASE;
278 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
280 * Restore the topdown base:
282 mm->free_area_cache = mm->mmap_base;
283 mm->cached_hole_size = ~0UL;