1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/uaccess.h>
17 #include <linux/elf.h>
20 #include <asm/syscalls.h>
23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
25 * @flags denotes the allocation direction - bottomup or topdown -
26 * or vDSO; see call sites below.
28 unsigned long align_addr(unsigned long addr, struct file *filp,
29 enum align_flags flags)
31 unsigned long tmp_addr;
33 /* handle 32- and 64-bit case with a single conditional */
34 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
37 if (!(current->flags & PF_RANDOMIZE))
40 if (!((flags & ALIGN_VDSO) || filp))
46 * We need an address which is <= than the original
47 * one only when in topdown direction.
49 if (!(flags & ALIGN_TOPDOWN))
50 tmp_addr += va_align.mask;
52 tmp_addr &= ~va_align.mask;
57 static int __init control_va_addr_alignment(char *str)
59 /* guard against enabling this on other CPU families */
60 if (va_align.flags < 0)
69 if (!strcmp(str, "32"))
70 va_align.flags = ALIGN_VA_32;
71 else if (!strcmp(str, "64"))
72 va_align.flags = ALIGN_VA_64;
73 else if (!strcmp(str, "off"))
75 else if (!strcmp(str, "on"))
76 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
82 __setup("align_va_addr", control_va_addr_alignment);
84 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
85 unsigned long, prot, unsigned long, flags,
86 unsigned long, fd, unsigned long, off)
93 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
98 static void find_start_end(unsigned long flags, unsigned long *begin,
101 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
102 unsigned long new_begin;
103 /* This is usually used needed to map code in small
104 model, so it needs to be in the first 31bit. Limit
105 it to that. This means we need to move the
106 unmapped base down for this case. This can give
107 conflicts with the heap, but we assume that glibc
108 malloc knows how to fall back to mmap. Give it 1GB
109 of playground for now. -AK */
112 if (current->flags & PF_RANDOMIZE) {
113 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
118 *begin = current->mm->mmap_legacy_base;
124 arch_get_unmapped_area(struct file *filp, unsigned long addr,
125 unsigned long len, unsigned long pgoff, unsigned long flags)
127 struct mm_struct *mm = current->mm;
128 struct vm_area_struct *vma;
129 unsigned long start_addr, vm_start;
130 unsigned long begin, end;
132 if (flags & MAP_FIXED)
135 find_start_end(flags, &begin, &end);
141 addr = PAGE_ALIGN(addr);
142 vma = find_vma(mm, addr);
143 if (end - len >= addr &&
144 (!vma || addr + len <= vm_start_gap(vma)))
147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
148 && len <= mm->cached_hole_size) {
149 mm->cached_hole_size = 0;
150 mm->free_area_cache = begin;
152 addr = mm->free_area_cache;
159 addr = align_addr(addr, filp, 0);
161 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
162 /* At this point: (!vma || addr < vma->vm_end). */
163 if (end - len < addr) {
165 * Start a new search - just in case we missed
168 if (start_addr != begin) {
169 start_addr = addr = begin;
170 mm->cached_hole_size = 0;
176 vm_start = vm_start_gap(vma);
177 if (!vma || addr + len <= vm_start) {
179 * Remember the place where we stopped the search:
181 mm->free_area_cache = addr + len;
184 if (addr + mm->cached_hole_size < vm_start)
185 mm->cached_hole_size = vm_start - addr;
188 addr = align_addr(addr, filp, 0);
194 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
195 const unsigned long len, const unsigned long pgoff,
196 const unsigned long flags)
198 struct vm_area_struct *vma;
199 struct mm_struct *mm = current->mm;
200 unsigned long addr = addr0;
201 unsigned long vm_start;
203 /* requested length too big for entire address space */
207 if (flags & MAP_FIXED)
210 /* for MAP_32BIT mappings we force the legact mmap base */
211 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
214 /* requesting a specific address */
216 addr = PAGE_ALIGN(addr);
217 vma = find_vma(mm, addr);
218 if (TASK_SIZE - len >= addr &&
219 (!vma || addr + len <= vm_start_gap(vma)))
223 /* check if free_area_cache is useful for us */
224 if (len <= mm->cached_hole_size) {
225 mm->cached_hole_size = 0;
226 mm->free_area_cache = mm->mmap_base;
229 /* either no address requested or can't fit in requested address hole */
230 addr = mm->free_area_cache;
232 /* make sure it can fit in the remaining address space */
234 unsigned long tmp_addr = align_addr(addr - len, filp,
237 vma = find_vma(mm, tmp_addr);
238 if (!vma || tmp_addr + len <= vm_start_gap(vma))
239 /* remember the address as a hint for next time */
240 return mm->free_area_cache = tmp_addr;
243 if (mm->mmap_base < len)
246 addr = mm->mmap_base-len;
249 addr = align_addr(addr, filp, ALIGN_TOPDOWN);
252 * Lookup failure means no vma is above this address,
253 * else if new region fits below vma->vm_start,
254 * return with success:
256 vma = find_vma(mm, addr);
258 vm_start = vm_start_gap(vma);
259 if (!vma || addr + len <= vm_start)
260 /* remember the address as a hint for next time */
261 return mm->free_area_cache = addr;
263 /* remember the largest hole we saw so far */
264 if (addr + mm->cached_hole_size < vm_start)
265 mm->cached_hole_size = vm_start - addr;
267 /* try just below the current vma->vm_start */
268 addr = vm_start - len;
269 } while (len < vm_start);
273 * A failed mmap() very likely causes application failure,
274 * so fall back to the bottom-up function here. This scenario
275 * can happen with large stack limits and large mmap()
278 mm->cached_hole_size = ~0UL;
279 mm->free_area_cache = TASK_UNMAPPED_BASE;
280 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
282 * Restore the topdown base:
284 mm->free_area_cache = mm->mmap_base;
285 mm->cached_hole_size = ~0UL;