2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
7 #include <linux/init.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/sysctl.h>
15 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 #include <asm/mmu_context.h>
21 /* Slightly simplified from the non-hugepage variant because by
22 * definition we don't have to worry about any page coloring stuff
24 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
25 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
27 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
33 struct mm_struct *mm = current->mm;
34 struct vm_area_struct * vma;
35 unsigned long task_size = TASK_SIZE;
36 unsigned long start_addr, vm_start;
38 if (test_thread_flag(TIF_32BIT))
39 task_size = STACK_TOP32;
40 if (unlikely(len >= VA_EXCLUDE_START))
43 if (len > mm->cached_hole_size) {
44 start_addr = addr = mm->free_area_cache;
46 start_addr = addr = TASK_UNMAPPED_BASE;
47 mm->cached_hole_size = 0;
53 addr = ALIGN(addr, HPAGE_SIZE);
55 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
56 /* At this point: (!vma || addr < vma->vm_end). */
57 if (addr < VA_EXCLUDE_START &&
58 (addr + len) >= VA_EXCLUDE_START) {
59 addr = VA_EXCLUDE_END;
60 vma = find_vma(mm, VA_EXCLUDE_END);
62 if (unlikely(task_size < addr)) {
63 if (start_addr != TASK_UNMAPPED_BASE) {
64 start_addr = addr = TASK_UNMAPPED_BASE;
65 mm->cached_hole_size = 0;
71 vm_start = vm_start_gap(vma);
72 if (likely(!vma || addr + len <= vm_start)) {
74 * Remember the place where we stopped the search:
76 mm->free_area_cache = addr + len;
79 if (addr + mm->cached_hole_size < vm_start)
80 mm->cached_hole_size = vm_start - addr;
82 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
87 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
88 const unsigned long len,
89 const unsigned long pgoff,
90 const unsigned long flags)
92 struct vm_area_struct *vma;
93 struct mm_struct *mm = current->mm;
94 unsigned long addr = addr0;
95 unsigned long vm_start;
97 /* This should only ever run for 32-bit processes. */
98 BUG_ON(!test_thread_flag(TIF_32BIT));
100 /* check if free_area_cache is useful for us */
101 if (len <= mm->cached_hole_size) {
102 mm->cached_hole_size = 0;
103 mm->free_area_cache = mm->mmap_base;
106 /* either no address requested or can't fit in requested address hole */
107 addr = mm->free_area_cache & HPAGE_MASK;
109 /* make sure it can fit in the remaining address space */
110 if (likely(addr > len)) {
111 vma = find_vma(mm, addr-len);
112 if (!vma || addr <= vm_start_gap(vma)) {
113 /* remember the address as a hint for next time */
114 return (mm->free_area_cache = addr-len);
118 if (unlikely(mm->mmap_base < len))
121 addr = (mm->mmap_base-len) & HPAGE_MASK;
125 * Lookup failure means no vma is above this address,
126 * else if new region fits below vma->vm_start,
127 * return with success:
129 vma = find_vma(mm, addr);
131 vm_start = vm_start_gap(vma);
132 if (likely(!vma || addr + len <= vm_start)) {
133 /* remember the address as a hint for next time */
134 return (mm->free_area_cache = addr);
137 /* remember the largest hole we saw so far */
138 if (addr + mm->cached_hole_size < vm_start)
139 mm->cached_hole_size = vm_start - addr;
141 /* try just below the current vma->vm_start */
142 addr = (vm_start - len) & HPAGE_MASK;
143 } while (likely(len < vm_start));
147 * A failed mmap() very likely causes application failure,
148 * so fall back to the bottom-up function here. This scenario
149 * can happen with large stack limits and large mmap()
152 mm->cached_hole_size = ~0UL;
153 mm->free_area_cache = TASK_UNMAPPED_BASE;
154 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
156 * Restore the topdown base:
158 mm->free_area_cache = mm->mmap_base;
159 mm->cached_hole_size = ~0UL;
165 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
166 unsigned long len, unsigned long pgoff, unsigned long flags)
168 struct mm_struct *mm = current->mm;
169 struct vm_area_struct *vma;
170 unsigned long task_size = TASK_SIZE;
172 if (test_thread_flag(TIF_32BIT))
173 task_size = STACK_TOP32;
175 if (len & ~HPAGE_MASK)
180 if (flags & MAP_FIXED) {
181 if (prepare_hugepage_range(file, addr, len))
187 addr = ALIGN(addr, HPAGE_SIZE);
188 vma = find_vma(mm, addr);
189 if (task_size - len >= addr &&
190 (!vma || addr + len <= vm_start_gap(vma)))
193 if (mm->get_unmapped_area == arch_get_unmapped_area)
194 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
197 return hugetlb_get_unmapped_area_topdown(file, addr, len,
201 pte_t *huge_pte_alloc(struct mm_struct *mm,
202 unsigned long addr, unsigned long sz)
209 /* We must align the address, because our caller will run
210 * set_huge_pte_at() on whatever we return, which writes out
211 * all of the sub-ptes for the hugepage range. So we have
212 * to give it the first such sub-pte.
216 pgd = pgd_offset(mm, addr);
217 pud = pud_alloc(mm, pgd, addr);
219 pmd = pmd_alloc(mm, pud, addr);
221 pte = pte_alloc_map(mm, NULL, pmd, addr);
226 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
235 pgd = pgd_offset(mm, addr);
236 if (!pgd_none(*pgd)) {
237 pud = pud_offset(pgd, addr);
238 if (!pud_none(*pud)) {
239 pmd = pmd_offset(pud, addr);
241 pte = pte_offset_map(pmd, addr);
247 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
252 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
253 pte_t *ptep, pte_t entry)
257 if (!pte_present(*ptep) && pte_present(entry))
258 mm->context.huge_pte_count++;
261 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
262 set_pte_at(mm, addr, ptep, entry);
265 pte_val(entry) += PAGE_SIZE;
269 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
276 if (pte_present(entry))
277 mm->context.huge_pte_count--;
281 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
282 pte_clear(mm, addr, ptep);
290 struct page *follow_huge_addr(struct mm_struct *mm,
291 unsigned long address, int write)
293 return ERR_PTR(-EINVAL);
296 int pmd_huge(pmd_t pmd)
301 int pud_huge(pud_t pud)
306 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
307 pmd_t *pmd, int write)
312 static void context_reload(void *__data)
314 struct mm_struct *mm = __data;
316 if (mm == current->mm)
317 load_secondary_context(mm);
320 void hugetlb_prefault_arch_hook(struct mm_struct *mm)
322 struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
324 if (likely(tp->tsb != NULL))
327 tsb_grow(mm, MM_TSB_HUGE, 0);
328 tsb_context_switch(mm);
331 /* On UltraSPARC-III+ and later, configure the second half of
332 * the Data-TLB for huge pages.
334 if (tlb_type == cheetah_plus) {
337 spin_lock(&ctx_alloc_lock);
338 ctx = mm->context.sparc64_ctx_val;
339 ctx &= ~CTX_PGSZ_MASK;
340 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
341 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
343 if (ctx != mm->context.sparc64_ctx_val) {
344 /* When changing the page size fields, we
345 * must perform a context flush so that no
346 * stale entries match. This flush must
347 * occur with the original context register
352 /* Reload the context register of all processors
353 * also executing in this address space.
355 mm->context.sparc64_ctx_val = ctx;
356 on_each_cpu(context_reload, mm, 0);
358 spin_unlock(&ctx_alloc_lock);