hugetlb: Try to grow hugetlb pool for MAP_SHARED mappings
[pandora-kernel.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
17
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20
21 #include <linux/hugetlb.h>
22 #include "internal.h"
23
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26 static unsigned long surplus_huge_pages;
27 unsigned long max_huge_pages;
28 static struct list_head hugepage_freelists[MAX_NUMNODES];
29 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
30 static unsigned int free_huge_pages_node[MAX_NUMNODES];
31 static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
32 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
33 unsigned long hugepages_treat_as_movable;
34
35 /*
36  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
37  */
38 static DEFINE_SPINLOCK(hugetlb_lock);
39
40 static void clear_huge_page(struct page *page, unsigned long addr)
41 {
42         int i;
43
44         might_sleep();
45         for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
46                 cond_resched();
47                 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
48         }
49 }
50
51 static void copy_huge_page(struct page *dst, struct page *src,
52                            unsigned long addr, struct vm_area_struct *vma)
53 {
54         int i;
55
56         might_sleep();
57         for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
58                 cond_resched();
59                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
60         }
61 }
62
63 static void enqueue_huge_page(struct page *page)
64 {
65         int nid = page_to_nid(page);
66         list_add(&page->lru, &hugepage_freelists[nid]);
67         free_huge_pages++;
68         free_huge_pages_node[nid]++;
69 }
70
71 static struct page *dequeue_huge_page(struct vm_area_struct *vma,
72                                 unsigned long address)
73 {
74         int nid;
75         struct page *page = NULL;
76         struct mempolicy *mpol;
77         struct zonelist *zonelist = huge_zonelist(vma, address,
78                                         htlb_alloc_mask, &mpol);
79         struct zone **z;
80
81         for (z = zonelist->zones; *z; z++) {
82                 nid = zone_to_nid(*z);
83                 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
84                     !list_empty(&hugepage_freelists[nid])) {
85                         page = list_entry(hugepage_freelists[nid].next,
86                                           struct page, lru);
87                         list_del(&page->lru);
88                         free_huge_pages--;
89                         free_huge_pages_node[nid]--;
90                         if (vma && vma->vm_flags & VM_MAYSHARE)
91                                 resv_huge_pages--;
92                         break;
93                 }
94         }
95         mpol_free(mpol);        /* unref if mpol !NULL */
96         return page;
97 }
98
99 static void update_and_free_page(struct page *page)
100 {
101         int i;
102         nr_huge_pages--;
103         nr_huge_pages_node[page_to_nid(page)]--;
104         for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
105                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
106                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
107                                 1 << PG_private | 1<< PG_writeback);
108         }
109         set_compound_page_dtor(page, NULL);
110         set_page_refcounted(page);
111         __free_pages(page, HUGETLB_PAGE_ORDER);
112 }
113
114 static void free_huge_page(struct page *page)
115 {
116         int nid = page_to_nid(page);
117
118         BUG_ON(page_count(page));
119         INIT_LIST_HEAD(&page->lru);
120
121         spin_lock(&hugetlb_lock);
122         if (surplus_huge_pages_node[nid]) {
123                 update_and_free_page(page);
124                 surplus_huge_pages--;
125                 surplus_huge_pages_node[nid]--;
126         } else {
127                 enqueue_huge_page(page);
128         }
129         spin_unlock(&hugetlb_lock);
130 }
131
132 /*
133  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
134  * balanced by operating on them in a round-robin fashion.
135  * Returns 1 if an adjustment was made.
136  */
137 static int adjust_pool_surplus(int delta)
138 {
139         static int prev_nid;
140         int nid = prev_nid;
141         int ret = 0;
142
143         VM_BUG_ON(delta != -1 && delta != 1);
144         do {
145                 nid = next_node(nid, node_online_map);
146                 if (nid == MAX_NUMNODES)
147                         nid = first_node(node_online_map);
148
149                 /* To shrink on this node, there must be a surplus page */
150                 if (delta < 0 && !surplus_huge_pages_node[nid])
151                         continue;
152                 /* Surplus cannot exceed the total number of pages */
153                 if (delta > 0 && surplus_huge_pages_node[nid] >=
154                                                 nr_huge_pages_node[nid])
155                         continue;
156
157                 surplus_huge_pages += delta;
158                 surplus_huge_pages_node[nid] += delta;
159                 ret = 1;
160                 break;
161         } while (nid != prev_nid);
162
163         prev_nid = nid;
164         return ret;
165 }
166
167 static int alloc_fresh_huge_page(void)
168 {
169         static int prev_nid;
170         struct page *page;
171         int nid;
172
173         /*
174          * Copy static prev_nid to local nid, work on that, then copy it
175          * back to prev_nid afterwards: otherwise there's a window in which
176          * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
177          * But we don't need to use a spin_lock here: it really doesn't
178          * matter if occasionally a racer chooses the same nid as we do.
179          */
180         nid = next_node(prev_nid, node_online_map);
181         if (nid == MAX_NUMNODES)
182                 nid = first_node(node_online_map);
183         prev_nid = nid;
184
185         page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
186                                         HUGETLB_PAGE_ORDER);
187         if (page) {
188                 set_compound_page_dtor(page, free_huge_page);
189                 spin_lock(&hugetlb_lock);
190                 nr_huge_pages++;
191                 nr_huge_pages_node[page_to_nid(page)]++;
192                 spin_unlock(&hugetlb_lock);
193                 put_page(page); /* free it into the hugepage allocator */
194                 return 1;
195         }
196         return 0;
197 }
198
199 static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
200                                                 unsigned long address)
201 {
202         struct page *page;
203
204         page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
205                                         HUGETLB_PAGE_ORDER);
206         if (page) {
207                 set_compound_page_dtor(page, free_huge_page);
208                 spin_lock(&hugetlb_lock);
209                 nr_huge_pages++;
210                 nr_huge_pages_node[page_to_nid(page)]++;
211                 surplus_huge_pages++;
212                 surplus_huge_pages_node[page_to_nid(page)]++;
213                 spin_unlock(&hugetlb_lock);
214         }
215
216         return page;
217 }
218
219 /*
220  * Increase the hugetlb pool such that it can accomodate a reservation
221  * of size 'delta'.
222  */
223 static int gather_surplus_pages(int delta)
224 {
225         struct list_head surplus_list;
226         struct page *page, *tmp;
227         int ret, i;
228         int needed, allocated;
229
230         needed = (resv_huge_pages + delta) - free_huge_pages;
231         if (needed <= 0)
232                 return 0;
233
234         allocated = 0;
235         INIT_LIST_HEAD(&surplus_list);
236
237         ret = -ENOMEM;
238 retry:
239         spin_unlock(&hugetlb_lock);
240         for (i = 0; i < needed; i++) {
241                 page = alloc_buddy_huge_page(NULL, 0);
242                 if (!page) {
243                         /*
244                          * We were not able to allocate enough pages to
245                          * satisfy the entire reservation so we free what
246                          * we've allocated so far.
247                          */
248                         spin_lock(&hugetlb_lock);
249                         needed = 0;
250                         goto free;
251                 }
252
253                 list_add(&page->lru, &surplus_list);
254         }
255         allocated += needed;
256
257         /*
258          * After retaking hugetlb_lock, we need to recalculate 'needed'
259          * because either resv_huge_pages or free_huge_pages may have changed.
260          */
261         spin_lock(&hugetlb_lock);
262         needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
263         if (needed > 0)
264                 goto retry;
265
266         /*
267          * The surplus_list now contains _at_least_ the number of extra pages
268          * needed to accomodate the reservation.  Add the appropriate number
269          * of pages to the hugetlb pool and free the extras back to the buddy
270          * allocator.
271          */
272         needed += allocated;
273         ret = 0;
274 free:
275         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
276                 list_del(&page->lru);
277                 if ((--needed) >= 0)
278                         enqueue_huge_page(page);
279                 else
280                         update_and_free_page(page);
281         }
282
283         return ret;
284 }
285
286 /*
287  * When releasing a hugetlb pool reservation, any surplus pages that were
288  * allocated to satisfy the reservation must be explicitly freed if they were
289  * never used.
290  */
291 void return_unused_surplus_pages(unsigned long unused_resv_pages)
292 {
293         static int nid = -1;
294         struct page *page;
295         unsigned long nr_pages;
296
297         nr_pages = min(unused_resv_pages, surplus_huge_pages);
298
299         while (nr_pages) {
300                 nid = next_node(nid, node_online_map);
301                 if (nid == MAX_NUMNODES)
302                         nid = first_node(node_online_map);
303
304                 if (!surplus_huge_pages_node[nid])
305                         continue;
306
307                 if (!list_empty(&hugepage_freelists[nid])) {
308                         page = list_entry(hugepage_freelists[nid].next,
309                                           struct page, lru);
310                         list_del(&page->lru);
311                         update_and_free_page(page);
312                         free_huge_pages--;
313                         free_huge_pages_node[nid]--;
314                         surplus_huge_pages--;
315                         surplus_huge_pages_node[nid]--;
316                         nr_pages--;
317                 }
318         }
319 }
320
321 static struct page *alloc_huge_page(struct vm_area_struct *vma,
322                                     unsigned long addr)
323 {
324         struct page *page = NULL;
325         int use_reserved_page = vma->vm_flags & VM_MAYSHARE;
326
327         spin_lock(&hugetlb_lock);
328         if (!use_reserved_page && (free_huge_pages <= resv_huge_pages))
329                 goto fail;
330
331         page = dequeue_huge_page(vma, addr);
332         if (!page)
333                 goto fail;
334
335         spin_unlock(&hugetlb_lock);
336         set_page_refcounted(page);
337         return page;
338
339 fail:
340         spin_unlock(&hugetlb_lock);
341
342         /*
343          * Private mappings do not use reserved huge pages so the allocation
344          * may have failed due to an undersized hugetlb pool.  Try to grab a
345          * surplus huge page from the buddy allocator.
346          */
347         if (!use_reserved_page)
348                 page = alloc_buddy_huge_page(vma, addr);
349
350         return page;
351 }
352
353 static int __init hugetlb_init(void)
354 {
355         unsigned long i;
356
357         if (HPAGE_SHIFT == 0)
358                 return 0;
359
360         for (i = 0; i < MAX_NUMNODES; ++i)
361                 INIT_LIST_HEAD(&hugepage_freelists[i]);
362
363         for (i = 0; i < max_huge_pages; ++i) {
364                 if (!alloc_fresh_huge_page())
365                         break;
366         }
367         max_huge_pages = free_huge_pages = nr_huge_pages = i;
368         printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
369         return 0;
370 }
371 module_init(hugetlb_init);
372
373 static int __init hugetlb_setup(char *s)
374 {
375         if (sscanf(s, "%lu", &max_huge_pages) <= 0)
376                 max_huge_pages = 0;
377         return 1;
378 }
379 __setup("hugepages=", hugetlb_setup);
380
381 static unsigned int cpuset_mems_nr(unsigned int *array)
382 {
383         int node;
384         unsigned int nr = 0;
385
386         for_each_node_mask(node, cpuset_current_mems_allowed)
387                 nr += array[node];
388
389         return nr;
390 }
391
392 #ifdef CONFIG_SYSCTL
393 #ifdef CONFIG_HIGHMEM
394 static void try_to_free_low(unsigned long count)
395 {
396         int i;
397
398         for (i = 0; i < MAX_NUMNODES; ++i) {
399                 struct page *page, *next;
400                 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
401                         if (PageHighMem(page))
402                                 continue;
403                         list_del(&page->lru);
404                         update_and_free_page(page);
405                         free_huge_pages--;
406                         free_huge_pages_node[page_to_nid(page)]--;
407                         if (count >= nr_huge_pages)
408                                 return;
409                 }
410         }
411 }
412 #else
413 static inline void try_to_free_low(unsigned long count)
414 {
415 }
416 #endif
417
418 #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
419 static unsigned long set_max_huge_pages(unsigned long count)
420 {
421         unsigned long min_count, ret;
422
423         /*
424          * Increase the pool size
425          * First take pages out of surplus state.  Then make up the
426          * remaining difference by allocating fresh huge pages.
427          */
428         spin_lock(&hugetlb_lock);
429         while (surplus_huge_pages && count > persistent_huge_pages) {
430                 if (!adjust_pool_surplus(-1))
431                         break;
432         }
433
434         while (count > persistent_huge_pages) {
435                 int ret;
436                 /*
437                  * If this allocation races such that we no longer need the
438                  * page, free_huge_page will handle it by freeing the page
439                  * and reducing the surplus.
440                  */
441                 spin_unlock(&hugetlb_lock);
442                 ret = alloc_fresh_huge_page();
443                 spin_lock(&hugetlb_lock);
444                 if (!ret)
445                         goto out;
446
447         }
448         if (count >= persistent_huge_pages)
449                 goto out;
450
451         /*
452          * Decrease the pool size
453          * First return free pages to the buddy allocator (being careful
454          * to keep enough around to satisfy reservations).  Then place
455          * pages into surplus state as needed so the pool will shrink
456          * to the desired size as pages become free.
457          */
458         min_count = max(count, resv_huge_pages);
459         try_to_free_low(min_count);
460         while (min_count < persistent_huge_pages) {
461                 struct page *page = dequeue_huge_page(NULL, 0);
462                 if (!page)
463                         break;
464                 update_and_free_page(page);
465         }
466         while (count < persistent_huge_pages) {
467                 if (!adjust_pool_surplus(1))
468                         break;
469         }
470 out:
471         ret = persistent_huge_pages;
472         spin_unlock(&hugetlb_lock);
473         return ret;
474 }
475
476 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
477                            struct file *file, void __user *buffer,
478                            size_t *length, loff_t *ppos)
479 {
480         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
481         max_huge_pages = set_max_huge_pages(max_huge_pages);
482         return 0;
483 }
484
485 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
486                         struct file *file, void __user *buffer,
487                         size_t *length, loff_t *ppos)
488 {
489         proc_dointvec(table, write, file, buffer, length, ppos);
490         if (hugepages_treat_as_movable)
491                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
492         else
493                 htlb_alloc_mask = GFP_HIGHUSER;
494         return 0;
495 }
496
497 #endif /* CONFIG_SYSCTL */
498
499 int hugetlb_report_meminfo(char *buf)
500 {
501         return sprintf(buf,
502                         "HugePages_Total: %5lu\n"
503                         "HugePages_Free:  %5lu\n"
504                         "HugePages_Rsvd:  %5lu\n"
505                         "HugePages_Surp:  %5lu\n"
506                         "Hugepagesize:    %5lu kB\n",
507                         nr_huge_pages,
508                         free_huge_pages,
509                         resv_huge_pages,
510                         surplus_huge_pages,
511                         HPAGE_SIZE/1024);
512 }
513
514 int hugetlb_report_node_meminfo(int nid, char *buf)
515 {
516         return sprintf(buf,
517                 "Node %d HugePages_Total: %5u\n"
518                 "Node %d HugePages_Free:  %5u\n",
519                 nid, nr_huge_pages_node[nid],
520                 nid, free_huge_pages_node[nid]);
521 }
522
523 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
524 unsigned long hugetlb_total_pages(void)
525 {
526         return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
527 }
528
529 /*
530  * We cannot handle pagefaults against hugetlb pages at all.  They cause
531  * handle_mm_fault() to try to instantiate regular-sized pages in the
532  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
533  * this far.
534  */
535 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
536 {
537         BUG();
538         return 0;
539 }
540
541 struct vm_operations_struct hugetlb_vm_ops = {
542         .fault = hugetlb_vm_op_fault,
543 };
544
545 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
546                                 int writable)
547 {
548         pte_t entry;
549
550         if (writable) {
551                 entry =
552                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
553         } else {
554                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
555         }
556         entry = pte_mkyoung(entry);
557         entry = pte_mkhuge(entry);
558
559         return entry;
560 }
561
562 static void set_huge_ptep_writable(struct vm_area_struct *vma,
563                                    unsigned long address, pte_t *ptep)
564 {
565         pte_t entry;
566
567         entry = pte_mkwrite(pte_mkdirty(*ptep));
568         if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
569                 update_mmu_cache(vma, address, entry);
570         }
571 }
572
573
574 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
575                             struct vm_area_struct *vma)
576 {
577         pte_t *src_pte, *dst_pte, entry;
578         struct page *ptepage;
579         unsigned long addr;
580         int cow;
581
582         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
583
584         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
585                 src_pte = huge_pte_offset(src, addr);
586                 if (!src_pte)
587                         continue;
588                 dst_pte = huge_pte_alloc(dst, addr);
589                 if (!dst_pte)
590                         goto nomem;
591                 spin_lock(&dst->page_table_lock);
592                 spin_lock(&src->page_table_lock);
593                 if (!pte_none(*src_pte)) {
594                         if (cow)
595                                 ptep_set_wrprotect(src, addr, src_pte);
596                         entry = *src_pte;
597                         ptepage = pte_page(entry);
598                         get_page(ptepage);
599                         set_huge_pte_at(dst, addr, dst_pte, entry);
600                 }
601                 spin_unlock(&src->page_table_lock);
602                 spin_unlock(&dst->page_table_lock);
603         }
604         return 0;
605
606 nomem:
607         return -ENOMEM;
608 }
609
610 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
611                             unsigned long end)
612 {
613         struct mm_struct *mm = vma->vm_mm;
614         unsigned long address;
615         pte_t *ptep;
616         pte_t pte;
617         struct page *page;
618         struct page *tmp;
619         /*
620          * A page gathering list, protected by per file i_mmap_lock. The
621          * lock is used to avoid list corruption from multiple unmapping
622          * of the same page since we are using page->lru.
623          */
624         LIST_HEAD(page_list);
625
626         WARN_ON(!is_vm_hugetlb_page(vma));
627         BUG_ON(start & ~HPAGE_MASK);
628         BUG_ON(end & ~HPAGE_MASK);
629
630         spin_lock(&mm->page_table_lock);
631         for (address = start; address < end; address += HPAGE_SIZE) {
632                 ptep = huge_pte_offset(mm, address);
633                 if (!ptep)
634                         continue;
635
636                 if (huge_pmd_unshare(mm, &address, ptep))
637                         continue;
638
639                 pte = huge_ptep_get_and_clear(mm, address, ptep);
640                 if (pte_none(pte))
641                         continue;
642
643                 page = pte_page(pte);
644                 if (pte_dirty(pte))
645                         set_page_dirty(page);
646                 list_add(&page->lru, &page_list);
647         }
648         spin_unlock(&mm->page_table_lock);
649         flush_tlb_range(vma, start, end);
650         list_for_each_entry_safe(page, tmp, &page_list, lru) {
651                 list_del(&page->lru);
652                 put_page(page);
653         }
654 }
655
656 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
657                           unsigned long end)
658 {
659         /*
660          * It is undesirable to test vma->vm_file as it should be non-null
661          * for valid hugetlb area. However, vm_file will be NULL in the error
662          * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
663          * do_mmap_pgoff() nullifies vma->vm_file before calling this function
664          * to clean up. Since no pte has actually been setup, it is safe to
665          * do nothing in this case.
666          */
667         if (vma->vm_file) {
668                 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
669                 __unmap_hugepage_range(vma, start, end);
670                 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
671         }
672 }
673
674 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
675                         unsigned long address, pte_t *ptep, pte_t pte)
676 {
677         struct page *old_page, *new_page;
678         int avoidcopy;
679
680         old_page = pte_page(pte);
681
682         /* If no-one else is actually using this page, avoid the copy
683          * and just make the page writable */
684         avoidcopy = (page_count(old_page) == 1);
685         if (avoidcopy) {
686                 set_huge_ptep_writable(vma, address, ptep);
687                 return 0;
688         }
689
690         page_cache_get(old_page);
691         new_page = alloc_huge_page(vma, address);
692
693         if (!new_page) {
694                 page_cache_release(old_page);
695                 return VM_FAULT_OOM;
696         }
697
698         spin_unlock(&mm->page_table_lock);
699         copy_huge_page(new_page, old_page, address, vma);
700         spin_lock(&mm->page_table_lock);
701
702         ptep = huge_pte_offset(mm, address & HPAGE_MASK);
703         if (likely(pte_same(*ptep, pte))) {
704                 /* Break COW */
705                 set_huge_pte_at(mm, address, ptep,
706                                 make_huge_pte(vma, new_page, 1));
707                 /* Make the old page be freed below */
708                 new_page = old_page;
709         }
710         page_cache_release(new_page);
711         page_cache_release(old_page);
712         return 0;
713 }
714
715 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
716                         unsigned long address, pte_t *ptep, int write_access)
717 {
718         int ret = VM_FAULT_SIGBUS;
719         unsigned long idx;
720         unsigned long size;
721         struct page *page;
722         struct address_space *mapping;
723         pte_t new_pte;
724
725         mapping = vma->vm_file->f_mapping;
726         idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
727                 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
728
729         /*
730          * Use page lock to guard against racing truncation
731          * before we get page_table_lock.
732          */
733 retry:
734         page = find_lock_page(mapping, idx);
735         if (!page) {
736                 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
737                 if (idx >= size)
738                         goto out;
739                 if (hugetlb_get_quota(mapping))
740                         goto out;
741                 page = alloc_huge_page(vma, address);
742                 if (!page) {
743                         hugetlb_put_quota(mapping);
744                         ret = VM_FAULT_OOM;
745                         goto out;
746                 }
747                 clear_huge_page(page, address);
748
749                 if (vma->vm_flags & VM_SHARED) {
750                         int err;
751
752                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
753                         if (err) {
754                                 put_page(page);
755                                 hugetlb_put_quota(mapping);
756                                 if (err == -EEXIST)
757                                         goto retry;
758                                 goto out;
759                         }
760                 } else
761                         lock_page(page);
762         }
763
764         spin_lock(&mm->page_table_lock);
765         size = i_size_read(mapping->host) >> HPAGE_SHIFT;
766         if (idx >= size)
767                 goto backout;
768
769         ret = 0;
770         if (!pte_none(*ptep))
771                 goto backout;
772
773         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
774                                 && (vma->vm_flags & VM_SHARED)));
775         set_huge_pte_at(mm, address, ptep, new_pte);
776
777         if (write_access && !(vma->vm_flags & VM_SHARED)) {
778                 /* Optimization, do the COW without a second fault */
779                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
780         }
781
782         spin_unlock(&mm->page_table_lock);
783         unlock_page(page);
784 out:
785         return ret;
786
787 backout:
788         spin_unlock(&mm->page_table_lock);
789         hugetlb_put_quota(mapping);
790         unlock_page(page);
791         put_page(page);
792         goto out;
793 }
794
795 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
796                         unsigned long address, int write_access)
797 {
798         pte_t *ptep;
799         pte_t entry;
800         int ret;
801         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
802
803         ptep = huge_pte_alloc(mm, address);
804         if (!ptep)
805                 return VM_FAULT_OOM;
806
807         /*
808          * Serialize hugepage allocation and instantiation, so that we don't
809          * get spurious allocation failures if two CPUs race to instantiate
810          * the same page in the page cache.
811          */
812         mutex_lock(&hugetlb_instantiation_mutex);
813         entry = *ptep;
814         if (pte_none(entry)) {
815                 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
816                 mutex_unlock(&hugetlb_instantiation_mutex);
817                 return ret;
818         }
819
820         ret = 0;
821
822         spin_lock(&mm->page_table_lock);
823         /* Check for a racing update before calling hugetlb_cow */
824         if (likely(pte_same(entry, *ptep)))
825                 if (write_access && !pte_write(entry))
826                         ret = hugetlb_cow(mm, vma, address, ptep, entry);
827         spin_unlock(&mm->page_table_lock);
828         mutex_unlock(&hugetlb_instantiation_mutex);
829
830         return ret;
831 }
832
833 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
834                         struct page **pages, struct vm_area_struct **vmas,
835                         unsigned long *position, int *length, int i)
836 {
837         unsigned long pfn_offset;
838         unsigned long vaddr = *position;
839         int remainder = *length;
840
841         spin_lock(&mm->page_table_lock);
842         while (vaddr < vma->vm_end && remainder) {
843                 pte_t *pte;
844                 struct page *page;
845
846                 /*
847                  * Some archs (sparc64, sh*) have multiple pte_ts to
848                  * each hugepage.  We have to make * sure we get the
849                  * first, for the page indexing below to work.
850                  */
851                 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
852
853                 if (!pte || pte_none(*pte)) {
854                         int ret;
855
856                         spin_unlock(&mm->page_table_lock);
857                         ret = hugetlb_fault(mm, vma, vaddr, 0);
858                         spin_lock(&mm->page_table_lock);
859                         if (!(ret & VM_FAULT_ERROR))
860                                 continue;
861
862                         remainder = 0;
863                         if (!i)
864                                 i = -EFAULT;
865                         break;
866                 }
867
868                 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
869                 page = pte_page(*pte);
870 same_page:
871                 if (pages) {
872                         get_page(page);
873                         pages[i] = page + pfn_offset;
874                 }
875
876                 if (vmas)
877                         vmas[i] = vma;
878
879                 vaddr += PAGE_SIZE;
880                 ++pfn_offset;
881                 --remainder;
882                 ++i;
883                 if (vaddr < vma->vm_end && remainder &&
884                                 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
885                         /*
886                          * We use pfn_offset to avoid touching the pageframes
887                          * of this compound page.
888                          */
889                         goto same_page;
890                 }
891         }
892         spin_unlock(&mm->page_table_lock);
893         *length = remainder;
894         *position = vaddr;
895
896         return i;
897 }
898
899 void hugetlb_change_protection(struct vm_area_struct *vma,
900                 unsigned long address, unsigned long end, pgprot_t newprot)
901 {
902         struct mm_struct *mm = vma->vm_mm;
903         unsigned long start = address;
904         pte_t *ptep;
905         pte_t pte;
906
907         BUG_ON(address >= end);
908         flush_cache_range(vma, address, end);
909
910         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
911         spin_lock(&mm->page_table_lock);
912         for (; address < end; address += HPAGE_SIZE) {
913                 ptep = huge_pte_offset(mm, address);
914                 if (!ptep)
915                         continue;
916                 if (huge_pmd_unshare(mm, &address, ptep))
917                         continue;
918                 if (!pte_none(*ptep)) {
919                         pte = huge_ptep_get_and_clear(mm, address, ptep);
920                         pte = pte_mkhuge(pte_modify(pte, newprot));
921                         set_huge_pte_at(mm, address, ptep, pte);
922                 }
923         }
924         spin_unlock(&mm->page_table_lock);
925         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
926
927         flush_tlb_range(vma, start, end);
928 }
929
930 struct file_region {
931         struct list_head link;
932         long from;
933         long to;
934 };
935
936 static long region_add(struct list_head *head, long f, long t)
937 {
938         struct file_region *rg, *nrg, *trg;
939
940         /* Locate the region we are either in or before. */
941         list_for_each_entry(rg, head, link)
942                 if (f <= rg->to)
943                         break;
944
945         /* Round our left edge to the current segment if it encloses us. */
946         if (f > rg->from)
947                 f = rg->from;
948
949         /* Check for and consume any regions we now overlap with. */
950         nrg = rg;
951         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
952                 if (&rg->link == head)
953                         break;
954                 if (rg->from > t)
955                         break;
956
957                 /* If this area reaches higher then extend our area to
958                  * include it completely.  If this is not the first area
959                  * which we intend to reuse, free it. */
960                 if (rg->to > t)
961                         t = rg->to;
962                 if (rg != nrg) {
963                         list_del(&rg->link);
964                         kfree(rg);
965                 }
966         }
967         nrg->from = f;
968         nrg->to = t;
969         return 0;
970 }
971
972 static long region_chg(struct list_head *head, long f, long t)
973 {
974         struct file_region *rg, *nrg;
975         long chg = 0;
976
977         /* Locate the region we are before or in. */
978         list_for_each_entry(rg, head, link)
979                 if (f <= rg->to)
980                         break;
981
982         /* If we are below the current region then a new region is required.
983          * Subtle, allocate a new region at the position but make it zero
984          * size such that we can guarentee to record the reservation. */
985         if (&rg->link == head || t < rg->from) {
986                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
987                 if (nrg == 0)
988                         return -ENOMEM;
989                 nrg->from = f;
990                 nrg->to   = f;
991                 INIT_LIST_HEAD(&nrg->link);
992                 list_add(&nrg->link, rg->link.prev);
993
994                 return t - f;
995         }
996
997         /* Round our left edge to the current segment if it encloses us. */
998         if (f > rg->from)
999                 f = rg->from;
1000         chg = t - f;
1001
1002         /* Check for and consume any regions we now overlap with. */
1003         list_for_each_entry(rg, rg->link.prev, link) {
1004                 if (&rg->link == head)
1005                         break;
1006                 if (rg->from > t)
1007                         return chg;
1008
1009                 /* We overlap with this area, if it extends futher than
1010                  * us then we must extend ourselves.  Account for its
1011                  * existing reservation. */
1012                 if (rg->to > t) {
1013                         chg += rg->to - t;
1014                         t = rg->to;
1015                 }
1016                 chg -= rg->to - rg->from;
1017         }
1018         return chg;
1019 }
1020
1021 static long region_truncate(struct list_head *head, long end)
1022 {
1023         struct file_region *rg, *trg;
1024         long chg = 0;
1025
1026         /* Locate the region we are either in or before. */
1027         list_for_each_entry(rg, head, link)
1028                 if (end <= rg->to)
1029                         break;
1030         if (&rg->link == head)
1031                 return 0;
1032
1033         /* If we are in the middle of a region then adjust it. */
1034         if (end > rg->from) {
1035                 chg = rg->to - end;
1036                 rg->to = end;
1037                 rg = list_entry(rg->link.next, typeof(*rg), link);
1038         }
1039
1040         /* Drop any remaining regions. */
1041         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1042                 if (&rg->link == head)
1043                         break;
1044                 chg += rg->to - rg->from;
1045                 list_del(&rg->link);
1046                 kfree(rg);
1047         }
1048         return chg;
1049 }
1050
1051 static int hugetlb_acct_memory(long delta)
1052 {
1053         int ret = -ENOMEM;
1054
1055         spin_lock(&hugetlb_lock);
1056         /*
1057          * When cpuset is configured, it breaks the strict hugetlb page
1058          * reservation as the accounting is done on a global variable. Such
1059          * reservation is completely rubbish in the presence of cpuset because
1060          * the reservation is not checked against page availability for the
1061          * current cpuset. Application can still potentially OOM'ed by kernel
1062          * with lack of free htlb page in cpuset that the task is in.
1063          * Attempt to enforce strict accounting with cpuset is almost
1064          * impossible (or too ugly) because cpuset is too fluid that
1065          * task or memory node can be dynamically moved between cpusets.
1066          *
1067          * The change of semantics for shared hugetlb mapping with cpuset is
1068          * undesirable. However, in order to preserve some of the semantics,
1069          * we fall back to check against current free page availability as
1070          * a best attempt and hopefully to minimize the impact of changing
1071          * semantics that cpuset has.
1072          */
1073         if (delta > 0) {
1074                 if (gather_surplus_pages(delta) < 0)
1075                         goto out;
1076
1077                 if (delta > cpuset_mems_nr(free_huge_pages_node))
1078                         goto out;
1079         }
1080
1081         ret = 0;
1082         resv_huge_pages += delta;
1083         if (delta < 0)
1084                 return_unused_surplus_pages((unsigned long) -delta);
1085
1086 out:
1087         spin_unlock(&hugetlb_lock);
1088         return ret;
1089 }
1090
1091 int hugetlb_reserve_pages(struct inode *inode, long from, long to)
1092 {
1093         long ret, chg;
1094
1095         chg = region_chg(&inode->i_mapping->private_list, from, to);
1096         if (chg < 0)
1097                 return chg;
1098
1099         ret = hugetlb_acct_memory(chg);
1100         if (ret < 0)
1101                 return ret;
1102         region_add(&inode->i_mapping->private_list, from, to);
1103         return 0;
1104 }
1105
1106 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
1107 {
1108         long chg = region_truncate(&inode->i_mapping->private_list, offset);
1109         hugetlb_acct_memory(freed - chg);
1110 }