2 * address space "slices" (meta-segments) support
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6 * Based on hugetlb implementation
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
37 static DEFINE_SPINLOCK(slice_convert_lock);
43 static void slice_print_mask(const char *label, struct slice_mask mask)
45 char *p, buf[16 + 3 + 16 + 1];
51 for (i = 0; i < SLICE_NUM_LOW; i++)
52 *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
56 for (i = 0; i < SLICE_NUM_HIGH; i++)
57 *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0';
60 printk(KERN_DEBUG "%s:%s\n", label, buf);
63 #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
67 static void slice_print_mask(const char *label, struct slice_mask mask) {}
68 #define slice_dbg(fmt...)
72 static struct slice_mask slice_range_to_mask(unsigned long start,
75 unsigned long end = start + len - 1;
76 struct slice_mask ret = { 0, 0 };
78 if (start < SLICE_LOW_TOP) {
79 unsigned long mend = min(end, SLICE_LOW_TOP);
80 unsigned long mstart = min(start, SLICE_LOW_TOP);
82 ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
83 - (1u << GET_LOW_SLICE_INDEX(mstart));
86 if ((start + len) > SLICE_LOW_TOP)
87 ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1))
88 - (1u << GET_HIGH_SLICE_INDEX(start));
93 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
96 struct vm_area_struct *vma;
98 if ((mm->task_size - len) < addr)
100 vma = find_vma(mm, addr);
101 return (!vma || (addr + len) <= vm_start_gap(vma));
104 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
106 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
107 1ul << SLICE_LOW_SHIFT);
110 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
112 unsigned long start = slice << SLICE_HIGH_SHIFT;
113 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
115 /* Hack, so that each addresses is controlled by exactly one
116 * of the high or low area bitmaps, the first high area starts
119 start = SLICE_LOW_TOP;
121 return !slice_area_is_free(mm, start, end - start);
124 static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
126 struct slice_mask ret = { 0, 0 };
129 for (i = 0; i < SLICE_NUM_LOW; i++)
130 if (!slice_low_has_vma(mm, i))
131 ret.low_slices |= 1u << i;
133 if (mm->task_size <= SLICE_LOW_TOP)
136 for (i = 0; i < SLICE_NUM_HIGH; i++)
137 if (!slice_high_has_vma(mm, i))
138 ret.high_slices |= 1u << i;
143 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
145 struct slice_mask ret = { 0, 0 };
149 psizes = mm->context.low_slices_psize;
150 for (i = 0; i < SLICE_NUM_LOW; i++)
151 if (((psizes >> (i * 4)) & 0xf) == psize)
152 ret.low_slices |= 1u << i;
154 psizes = mm->context.high_slices_psize;
155 for (i = 0; i < SLICE_NUM_HIGH; i++)
156 if (((psizes >> (i * 4)) & 0xf) == psize)
157 ret.high_slices |= 1u << i;
162 static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
164 return (mask.low_slices & available.low_slices) == mask.low_slices &&
165 (mask.high_slices & available.high_slices) == mask.high_slices;
168 static void slice_flush_segments(void *parm)
170 struct mm_struct *mm = parm;
173 if (mm != current->active_mm)
176 /* update the paca copy of the context struct */
177 get_paca()->context = current->active_mm->context;
179 local_irq_save(flags);
180 slb_flush_and_rebolt();
181 local_irq_restore(flags);
184 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
186 /* Write the new slice psize bits */
187 u64 lpsizes, hpsizes;
188 unsigned long i, flags;
190 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
191 slice_print_mask(" mask", mask);
193 /* We need to use a spinlock here to protect against
194 * concurrent 64k -> 4k demotion ...
196 spin_lock_irqsave(&slice_convert_lock, flags);
198 lpsizes = mm->context.low_slices_psize;
199 for (i = 0; i < SLICE_NUM_LOW; i++)
200 if (mask.low_slices & (1u << i))
201 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
202 (((unsigned long)psize) << (i * 4));
204 hpsizes = mm->context.high_slices_psize;
205 for (i = 0; i < SLICE_NUM_HIGH; i++)
206 if (mask.high_slices & (1u << i))
207 hpsizes = (hpsizes & ~(0xful << (i * 4))) |
208 (((unsigned long)psize) << (i * 4));
210 mm->context.low_slices_psize = lpsizes;
211 mm->context.high_slices_psize = hpsizes;
213 slice_dbg(" lsps=%lx, hsps=%lx\n",
214 mm->context.low_slices_psize,
215 mm->context.high_slices_psize);
217 spin_unlock_irqrestore(&slice_convert_lock, flags);
219 #ifdef CONFIG_SPU_BASE
220 spu_flush_all_slbs(mm);
224 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
226 struct slice_mask available,
227 int psize, int use_cache)
229 struct vm_area_struct *vma;
230 unsigned long start_addr, addr, vm_start;
231 struct slice_mask mask;
232 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
235 if (len <= mm->cached_hole_size) {
236 start_addr = addr = TASK_UNMAPPED_BASE;
237 mm->cached_hole_size = 0;
239 start_addr = addr = mm->free_area_cache;
241 start_addr = addr = TASK_UNMAPPED_BASE;
245 addr = _ALIGN_UP(addr, 1ul << pshift);
246 if ((TASK_SIZE - len) < addr)
248 vma = find_vma(mm, addr);
249 BUG_ON(vma && (addr >= vma->vm_end));
251 mask = slice_range_to_mask(addr, len);
252 if (!slice_check_fit(mask, available)) {
253 if (addr < SLICE_LOW_TOP)
254 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
256 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
260 vm_start = vm_start_gap(vma);
261 if (!vma || addr + len <= vm_start) {
263 * Remember the place where we stopped the search:
266 mm->free_area_cache = addr + len;
269 if (use_cache && (addr + mm->cached_hole_size) < vm_start)
270 mm->cached_hole_size = vm_start - addr;
274 /* Make sure we didn't miss any holes */
275 if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
276 start_addr = addr = TASK_UNMAPPED_BASE;
277 mm->cached_hole_size = 0;
283 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
285 struct slice_mask available,
286 int psize, int use_cache)
288 struct vm_area_struct *vma;
289 unsigned long addr, vm_start;
290 struct slice_mask mask;
291 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
293 /* check if free_area_cache is useful for us */
295 if (len <= mm->cached_hole_size) {
296 mm->cached_hole_size = 0;
297 mm->free_area_cache = mm->mmap_base;
300 /* either no address requested or can't fit in requested
303 addr = mm->free_area_cache;
305 /* make sure it can fit in the remaining address space */
307 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
308 mask = slice_range_to_mask(addr, len);
309 if (slice_check_fit(mask, available) &&
310 slice_area_is_free(mm, addr, len))
311 /* remember the address as a hint for
314 return (mm->free_area_cache = addr);
318 addr = mm->mmap_base;
320 /* Go down by chunk size */
321 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
323 /* Check for hit with different page size */
324 mask = slice_range_to_mask(addr, len);
325 if (!slice_check_fit(mask, available)) {
326 if (addr < SLICE_LOW_TOP)
327 addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
328 else if (addr < (1ul << SLICE_HIGH_SHIFT))
329 addr = SLICE_LOW_TOP;
331 addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
336 * Lookup failure means no vma is above this address,
337 * else if new region fits below vma->vm_start,
338 * return with success:
340 vma = find_vma(mm, addr);
342 vm_start = vm_start_gap(vma);
343 if (!vma || (addr + len) <= vm_start) {
344 /* remember the address as a hint for next time */
346 mm->free_area_cache = addr;
350 /* remember the largest hole we saw so far */
351 if (use_cache && (addr + mm->cached_hole_size) < vm_start)
352 mm->cached_hole_size = vm_start - addr;
354 /* try just below the current vma->vm_start */
359 * A failed mmap() very likely causes application failure,
360 * so fall back to the bottom-up function here. This scenario
361 * can happen with large stack limits and large mmap()
364 addr = slice_find_area_bottomup(mm, len, available, psize, 0);
367 * Restore the topdown base:
370 mm->free_area_cache = mm->mmap_base;
371 mm->cached_hole_size = ~0UL;
378 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
379 struct slice_mask mask, int psize,
380 int topdown, int use_cache)
383 return slice_find_area_topdown(mm, len, mask, psize, use_cache);
385 return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
388 #define or_mask(dst, src) do { \
389 (dst).low_slices |= (src).low_slices; \
390 (dst).high_slices |= (src).high_slices; \
393 #define andnot_mask(dst, src) do { \
394 (dst).low_slices &= ~(src).low_slices; \
395 (dst).high_slices &= ~(src).high_slices; \
398 #ifdef CONFIG_PPC_64K_PAGES
399 #define MMU_PAGE_BASE MMU_PAGE_64K
401 #define MMU_PAGE_BASE MMU_PAGE_4K
404 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
405 unsigned long flags, unsigned int psize,
406 int topdown, int use_cache)
408 struct slice_mask mask = {0, 0};
409 struct slice_mask good_mask;
410 struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
411 struct slice_mask compat_mask = {0, 0};
412 int fixed = (flags & MAP_FIXED);
413 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
414 struct mm_struct *mm = current->mm;
415 unsigned long newaddr;
418 BUG_ON(mm->task_size == 0);
420 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
421 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
422 addr, len, flags, topdown, use_cache);
424 if (len > mm->task_size)
426 if (len & ((1ul << pshift) - 1))
428 if (fixed && (addr & ((1ul << pshift) - 1)))
430 if (fixed && addr > (mm->task_size - len))
433 /* If hint, make sure it matches our alignment restrictions */
434 if (!fixed && addr) {
435 addr = _ALIGN_UP(addr, 1ul << pshift);
436 slice_dbg(" aligned addr=%lx\n", addr);
437 /* Ignore hint if it's too large or overlaps a VMA */
438 if (addr > mm->task_size - len ||
439 !slice_area_is_free(mm, addr, len))
443 /* First make up a "good" mask of slices that have the right size
446 good_mask = slice_mask_for_size(mm, psize);
447 slice_print_mask(" good_mask", good_mask);
450 * Here "good" means slices that are already the right page size,
451 * "compat" means slices that have a compatible page size (i.e.
452 * 4k in a 64k pagesize kernel), and "free" means slices without
456 * check if fits in good | compat => OK
457 * check if fits in good | compat | free => convert free
460 * check if hint fits in good => OK
461 * check if hint fits in good | free => convert free
463 * search in good, found => OK
464 * search in good | free, found => convert free
465 * search in good | compat | free, found => convert free.
468 #ifdef CONFIG_PPC_64K_PAGES
469 /* If we support combo pages, we can allow 64k pages in 4k slices */
470 if (psize == MMU_PAGE_64K) {
471 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
473 or_mask(good_mask, compat_mask);
477 /* First check hint if it's valid or if we have MAP_FIXED */
478 if (addr != 0 || fixed) {
479 /* Build a mask for the requested range */
480 mask = slice_range_to_mask(addr, len);
481 slice_print_mask(" mask", mask);
483 /* Check if we fit in the good mask. If we do, we just return,
486 if (slice_check_fit(mask, good_mask)) {
487 slice_dbg(" fits good !\n");
491 /* Now let's see if we can find something in the existing
492 * slices for that size
494 newaddr = slice_find_area(mm, len, good_mask, psize, topdown,
496 if (newaddr != -ENOMEM) {
497 /* Found within the good mask, we don't have to setup,
498 * we thus return directly
500 slice_dbg(" found area at 0x%lx\n", newaddr);
505 /* We don't fit in the good mask, check what other slices are
506 * empty and thus can be converted
508 potential_mask = slice_mask_for_free(mm);
509 or_mask(potential_mask, good_mask);
510 slice_print_mask(" potential", potential_mask);
512 if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
513 slice_dbg(" fits potential !\n");
517 /* If we have MAP_FIXED and failed the above steps, then error out */
521 slice_dbg(" search...\n");
523 /* If we had a hint that didn't work out, see if we can fit
524 * anywhere in the good area.
527 addr = slice_find_area(mm, len, good_mask, psize, topdown,
529 if (addr != -ENOMEM) {
530 slice_dbg(" found area at 0x%lx\n", addr);
535 /* Now let's see if we can find something in the existing slices
536 * for that size plus free slices
538 addr = slice_find_area(mm, len, potential_mask, psize, topdown,
541 #ifdef CONFIG_PPC_64K_PAGES
542 if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
543 /* retry the search with 4k-page slices included */
544 or_mask(potential_mask, compat_mask);
545 addr = slice_find_area(mm, len, potential_mask, psize,
553 mask = slice_range_to_mask(addr, len);
554 slice_dbg(" found potential area at 0x%lx\n", addr);
555 slice_print_mask(" mask", mask);
558 andnot_mask(mask, good_mask);
559 andnot_mask(mask, compat_mask);
560 if (mask.low_slices || mask.high_slices) {
561 slice_convert(mm, mask, psize);
562 if (psize > MMU_PAGE_BASE)
563 on_each_cpu(slice_flush_segments, mm, 1);
568 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
570 unsigned long arch_get_unmapped_area(struct file *filp,
576 return slice_get_unmapped_area(addr, len, flags,
577 current->mm->context.user_psize,
581 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
582 const unsigned long addr0,
583 const unsigned long len,
584 const unsigned long pgoff,
585 const unsigned long flags)
587 return slice_get_unmapped_area(addr0, len, flags,
588 current->mm->context.user_psize,
592 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
597 if (addr < SLICE_LOW_TOP) {
598 psizes = mm->context.low_slices_psize;
599 index = GET_LOW_SLICE_INDEX(addr);
601 psizes = mm->context.high_slices_psize;
602 index = GET_HIGH_SLICE_INDEX(addr);
605 return (psizes >> (index * 4)) & 0xf;
607 EXPORT_SYMBOL_GPL(get_slice_psize);
610 * This is called by hash_page when it needs to do a lazy conversion of
611 * an address space from real 64K pages to combo 4K pages (typically
612 * when hitting a non cacheable mapping on a processor or hypervisor
613 * that won't allow them for 64K pages).
615 * This is also called in init_new_context() to change back the user
616 * psize from whatever the parent context had it set to
617 * N.B. This may be called before mm->context.id has been set.
619 * This function will only change the content of the {low,high)_slice_psize
620 * masks, it will not flush SLBs as this shall be handled lazily by the
623 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
625 unsigned long flags, lpsizes, hpsizes;
626 unsigned int old_psize;
629 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
631 spin_lock_irqsave(&slice_convert_lock, flags);
633 old_psize = mm->context.user_psize;
634 slice_dbg(" old_psize=%d\n", old_psize);
635 if (old_psize == psize)
638 mm->context.user_psize = psize;
641 lpsizes = mm->context.low_slices_psize;
642 for (i = 0; i < SLICE_NUM_LOW; i++)
643 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
644 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
645 (((unsigned long)psize) << (i * 4));
647 hpsizes = mm->context.high_slices_psize;
648 for (i = 0; i < SLICE_NUM_HIGH; i++)
649 if (((hpsizes >> (i * 4)) & 0xf) == old_psize)
650 hpsizes = (hpsizes & ~(0xful << (i * 4))) |
651 (((unsigned long)psize) << (i * 4));
653 mm->context.low_slices_psize = lpsizes;
654 mm->context.high_slices_psize = hpsizes;
656 slice_dbg(" lsps=%lx, hsps=%lx\n",
657 mm->context.low_slices_psize,
658 mm->context.high_slices_psize);
661 spin_unlock_irqrestore(&slice_convert_lock, flags);
664 void slice_set_psize(struct mm_struct *mm, unsigned long address,
667 unsigned long i, flags;
670 spin_lock_irqsave(&slice_convert_lock, flags);
671 if (address < SLICE_LOW_TOP) {
672 i = GET_LOW_SLICE_INDEX(address);
673 p = &mm->context.low_slices_psize;
675 i = GET_HIGH_SLICE_INDEX(address);
676 p = &mm->context.high_slices_psize;
678 *p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4));
679 spin_unlock_irqrestore(&slice_convert_lock, flags);
681 #ifdef CONFIG_SPU_BASE
682 spu_flush_all_slbs(mm);
686 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
687 unsigned long len, unsigned int psize)
689 struct slice_mask mask = slice_range_to_mask(start, len);
691 slice_convert(mm, mask, psize);
695 * is_hugepage_only_range() is used by generic code to verify wether
696 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
698 * until the generic code provides a more generic hook and/or starts
699 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
700 * here knows how to deal with), we hijack it to keep standard mappings
703 * because of that generic code limitation, MAP_FIXED mapping cannot
704 * "convert" back a slice with no VMAs to the standard page size, only
705 * get_unmapped_area() can. It would be possible to fix it here but I
706 * prefer working on fixing the generic code instead.
708 * WARNING: This will not work if hugetlbfs isn't enabled since the
709 * generic code will redefine that function as 0 in that. This is ok
710 * for now as we only use slices with hugetlbfs enabled. This should
711 * be fixed as the generic code gets fixed.
713 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
716 struct slice_mask mask, available;
717 unsigned int psize = mm->context.user_psize;
719 mask = slice_range_to_mask(addr, len);
720 available = slice_mask_for_size(mm, psize);
721 #ifdef CONFIG_PPC_64K_PAGES
722 /* We need to account for 4k slices too */
723 if (psize == MMU_PAGE_64K) {
724 struct slice_mask compat_mask;
725 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
726 or_mask(available, compat_mask);
730 #if 0 /* too verbose */
731 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
733 slice_print_mask(" mask", mask);
734 slice_print_mask(" available", available);
736 return !slice_check_fit(mask, available);