Merge tag 'gpio-fixes-v3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[pandora-kernel.git] / arch / metag / mm / hugetlbpage.c
1 /*
2  * arch/metag/mm/hugetlbpage.c
3  *
4  * METAG HugeTLB page support.
5  *
6  * Cloned from SuperH
7  *
8  * Cloned from sparc64 by Paul Mundt.
9  *
10  * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
11  */
12
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/mm.h>
16 #include <linux/hugetlb.h>
17 #include <linux/pagemap.h>
18 #include <linux/sysctl.h>
19
20 #include <asm/mman.h>
21 #include <asm/pgalloc.h>
22 #include <asm/tlb.h>
23 #include <asm/tlbflush.h>
24 #include <asm/cacheflush.h>
25
26 /*
27  * If the arch doesn't supply something else, assume that hugepage
28  * size aligned regions are ok without further preparation.
29  */
30 int prepare_hugepage_range(struct file *file, unsigned long addr,
31                                                 unsigned long len)
32 {
33         struct mm_struct *mm = current->mm;
34         struct hstate *h = hstate_file(file);
35         struct vm_area_struct *vma;
36
37         if (len & ~huge_page_mask(h))
38                 return -EINVAL;
39         if (addr & ~huge_page_mask(h))
40                 return -EINVAL;
41         if (TASK_SIZE - len < addr)
42                 return -EINVAL;
43
44         vma = find_vma(mm, ALIGN_HUGEPT(addr));
45         if (vma && !(vma->vm_flags & MAP_HUGETLB))
46                 return -EINVAL;
47
48         vma = find_vma(mm, addr);
49         if (vma) {
50                 if (addr + len > vma->vm_start)
51                         return -EINVAL;
52                 if (!(vma->vm_flags & MAP_HUGETLB) &&
53                     (ALIGN_HUGEPT(addr + len) > vma->vm_start))
54                         return -EINVAL;
55         }
56         return 0;
57 }
58
59 pte_t *huge_pte_alloc(struct mm_struct *mm,
60                         unsigned long addr, unsigned long sz)
61 {
62         pgd_t *pgd;
63         pud_t *pud;
64         pmd_t *pmd;
65         pte_t *pte;
66
67         pgd = pgd_offset(mm, addr);
68         pud = pud_offset(pgd, addr);
69         pmd = pmd_offset(pud, addr);
70         pte = pte_alloc_map(mm, NULL, pmd, addr);
71         pgd->pgd &= ~_PAGE_SZ_MASK;
72         pgd->pgd |= _PAGE_SZHUGE;
73
74         return pte;
75 }
76
77 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
78 {
79         pgd_t *pgd;
80         pud_t *pud;
81         pmd_t *pmd;
82         pte_t *pte = NULL;
83
84         pgd = pgd_offset(mm, addr);
85         pud = pud_offset(pgd, addr);
86         pmd = pmd_offset(pud, addr);
87         pte = pte_offset_kernel(pmd, addr);
88
89         return pte;
90 }
91
92 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
93 {
94         return 0;
95 }
96
97 struct page *follow_huge_addr(struct mm_struct *mm,
98                               unsigned long address, int write)
99 {
100         return ERR_PTR(-EINVAL);
101 }
102
103 int pmd_huge(pmd_t pmd)
104 {
105         return pmd_page_shift(pmd) > PAGE_SHIFT;
106 }
107
108 int pud_huge(pud_t pud)
109 {
110         return 0;
111 }
112
113 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
114                              pmd_t *pmd, int write)
115 {
116         return NULL;
117 }
118
119 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
120
121 /*
122  * Look for an unmapped area starting after another hugetlb vma.
123  * There are guaranteed to be no huge pte's spare if all the huge pages are
124  * full size (4MB), so in that case compile out this search.
125  */
126 #if HPAGE_SHIFT == HUGEPT_SHIFT
127 static inline unsigned long
128 hugetlb_get_unmapped_area_existing(unsigned long len)
129 {
130         return 0;
131 }
132 #else
133 static unsigned long
134 hugetlb_get_unmapped_area_existing(unsigned long len)
135 {
136         struct mm_struct *mm = current->mm;
137         struct vm_area_struct *vma;
138         unsigned long start_addr, addr;
139         int after_huge;
140
141         if (mm->context.part_huge) {
142                 start_addr = mm->context.part_huge;
143                 after_huge = 1;
144         } else {
145                 start_addr = TASK_UNMAPPED_BASE;
146                 after_huge = 0;
147         }
148 new_search:
149         addr = start_addr;
150
151         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
152                 if ((!vma && !after_huge) || TASK_SIZE - len < addr) {
153                         /*
154                          * Start a new search - just in case we missed
155                          * some holes.
156                          */
157                         if (start_addr != TASK_UNMAPPED_BASE) {
158                                 start_addr = TASK_UNMAPPED_BASE;
159                                 goto new_search;
160                         }
161                         return 0;
162                 }
163                 /* skip ahead if we've aligned right over some vmas */
164                 if (vma && vma->vm_end <= addr)
165                         continue;
166                 /* space before the next vma? */
167                 if (after_huge && (!vma || ALIGN_HUGEPT(addr + len)
168                             <= vma->vm_start)) {
169                         unsigned long end = addr + len;
170                         if (end & HUGEPT_MASK)
171                                 mm->context.part_huge = end;
172                         else if (addr == mm->context.part_huge)
173                                 mm->context.part_huge = 0;
174                         return addr;
175                 }
176                 if (vma && (vma->vm_flags & MAP_HUGETLB)) {
177                         /* space after a huge vma in 2nd level page table? */
178                         if (vma->vm_end & HUGEPT_MASK) {
179                                 after_huge = 1;
180                                 /* no need to align to the next PT block */
181                                 addr = vma->vm_end;
182                                 continue;
183                         }
184                 }
185                 after_huge = 0;
186                 addr = ALIGN_HUGEPT(vma->vm_end);
187         }
188 }
189 #endif
190
191 /* Do a full search to find an area without any nearby normal pages. */
192 static unsigned long
193 hugetlb_get_unmapped_area_new_pmd(unsigned long len)
194 {
195         struct vm_unmapped_area_info info;
196
197         info.flags = 0;
198         info.length = len;
199         info.low_limit = TASK_UNMAPPED_BASE;
200         info.high_limit = TASK_SIZE;
201         info.align_mask = PAGE_MASK & HUGEPT_MASK;
202         info.align_offset = 0;
203         return vm_unmapped_area(&info);
204 }
205
206 unsigned long
207 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
208                 unsigned long len, unsigned long pgoff, unsigned long flags)
209 {
210         struct hstate *h = hstate_file(file);
211
212         if (len & ~huge_page_mask(h))
213                 return -EINVAL;
214         if (len > TASK_SIZE)
215                 return -ENOMEM;
216
217         if (flags & MAP_FIXED) {
218                 if (prepare_hugepage_range(file, addr, len))
219                         return -EINVAL;
220                 return addr;
221         }
222
223         if (addr) {
224                 addr = ALIGN(addr, huge_page_size(h));
225                 if (!prepare_hugepage_range(file, addr, len))
226                         return addr;
227         }
228
229         /*
230          * Look for an existing hugetlb vma with space after it (this is to to
231          * minimise fragmentation caused by huge pages.
232          */
233         addr = hugetlb_get_unmapped_area_existing(len);
234         if (addr)
235                 return addr;
236
237         /*
238          * Find an unmapped naturally aligned set of 4MB blocks that we can use
239          * for huge pages.
240          */
241         return hugetlb_get_unmapped_area_new_pmd(len);
242 }
243
244 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
245
246 /* necessary for boot time 4MB huge page allocation */
247 static __init int setup_hugepagesz(char *opt)
248 {
249         unsigned long ps = memparse(opt, &opt);
250         if (ps == (1 << HPAGE_SHIFT)) {
251                 hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
252         } else {
253                 pr_err("hugepagesz: Unsupported page size %lu M\n",
254                        ps >> 20);
255                 return 0;
256         }
257         return 1;
258 }
259 __setup("hugepagesz=", setup_hugepagesz);