4 * Copyright (C) 2008 - 2009 Paul Mundt
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/mman.h>
13 #include <linux/module.h>
15 #include <asm/processor.h>
17 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
18 EXPORT_SYMBOL(shm_align_mask);
22 * To avoid cache aliases, we map the shared page with same color.
24 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
27 unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
28 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
33 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
36 unsigned long base = addr & ~shm_align_mask;
37 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
39 if (base + off <= addr)
45 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
46 unsigned long len, unsigned long pgoff, unsigned long flags)
48 struct mm_struct *mm = current->mm;
49 struct vm_area_struct *vma;
50 unsigned long start_addr, vm_start;
53 if (flags & MAP_FIXED) {
54 /* We do not accept a shared mapping if it would violate
55 * cache aliasing constraints.
57 if ((flags & MAP_SHARED) &&
58 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
63 if (unlikely(len > TASK_SIZE))
67 if (filp || (flags & MAP_SHARED))
72 addr = COLOUR_ALIGN(addr, pgoff);
74 addr = PAGE_ALIGN(addr);
76 vma = find_vma(mm, addr);
77 if (TASK_SIZE - len >= addr &&
78 (!vma || addr + len <= vm_start_gap(vma)))
82 if (len > mm->cached_hole_size) {
83 start_addr = addr = mm->free_area_cache;
85 mm->cached_hole_size = 0;
86 start_addr = addr = TASK_UNMAPPED_BASE;
91 addr = COLOUR_ALIGN(addr, pgoff);
93 addr = PAGE_ALIGN(mm->free_area_cache);
95 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
96 /* At this point: (!vma || addr < vma->vm_end). */
97 if (unlikely(TASK_SIZE - len < addr)) {
99 * Start a new search - just in case we missed
102 if (start_addr != TASK_UNMAPPED_BASE) {
103 start_addr = addr = TASK_UNMAPPED_BASE;
104 mm->cached_hole_size = 0;
110 vm_start = vm_start_gap(vma);
111 if (likely(!vma || addr + len <= vm_start)) {
113 * Remember the place where we stopped the search:
115 mm->free_area_cache = addr + len;
118 if (addr + mm->cached_hole_size < vm_start)
119 mm->cached_hole_size = vm_start - addr;
123 addr = COLOUR_ALIGN(addr, pgoff);
128 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
129 const unsigned long len, const unsigned long pgoff,
130 const unsigned long flags)
132 struct vm_area_struct *vma;
133 struct mm_struct *mm = current->mm;
134 unsigned long addr = addr0;
135 unsigned long vm_start;
138 if (flags & MAP_FIXED) {
139 /* We do not accept a shared mapping if it would violate
140 * cache aliasing constraints.
142 if ((flags & MAP_SHARED) &&
143 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
148 if (unlikely(len > TASK_SIZE))
152 if (filp || (flags & MAP_SHARED))
155 /* requesting a specific address */
158 addr = COLOUR_ALIGN(addr, pgoff);
160 addr = PAGE_ALIGN(addr);
162 vma = find_vma(mm, addr);
163 if (TASK_SIZE - len >= addr &&
164 (!vma || addr + len <= vm_start_gap(vma)))
168 /* check if free_area_cache is useful for us */
169 if (len <= mm->cached_hole_size) {
170 mm->cached_hole_size = 0;
171 mm->free_area_cache = mm->mmap_base;
174 /* either no address requested or can't fit in requested address hole */
175 addr = mm->free_area_cache;
176 if (do_colour_align) {
177 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
182 /* make sure it can fit in the remaining address space */
183 if (likely(addr > len)) {
184 vma = find_vma(mm, addr-len);
185 if (!vma || addr <= vm_start_gap(vma)) {
186 /* remember the address as a hint for next time */
187 return (mm->free_area_cache = addr-len);
191 if (unlikely(mm->mmap_base < len))
194 addr = mm->mmap_base-len;
196 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
200 * Lookup failure means no vma is above this address,
201 * else if new region fits below vma->vm_start,
202 * return with success:
204 vma = find_vma(mm, addr);
206 vm_start = vm_start_gap(vma);
207 if (likely(!vma || addr + len <= vm_start)) {
208 /* remember the address as a hint for next time */
209 return (mm->free_area_cache = addr);
212 /* remember the largest hole we saw so far */
213 if (addr + mm->cached_hole_size < vm_start)
214 mm->cached_hole_size = vm_start - addr;
216 /* try just below the current vma->vm_start */
219 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
220 } while (likely(len < vm_start));
224 * A failed mmap() very likely causes application failure,
225 * so fall back to the bottom-up function here. This scenario
226 * can happen with large stack limits and large mmap()
229 mm->cached_hole_size = ~0UL;
230 mm->free_area_cache = TASK_UNMAPPED_BASE;
231 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
233 * Restore the topdown base:
235 mm->free_area_cache = mm->mmap_base;
236 mm->cached_hole_size = ~0UL;
240 #endif /* CONFIG_MMU */
243 * You really shouldn't be using read() or write() on /dev/mem. This
244 * might go away in the future.
246 int valid_phys_addr_range(unsigned long addr, size_t count)
248 if (addr < __MEMORY_START)
250 if (addr + count > __pa(high_memory))
256 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)