mm: hugetlb: fix softlockup when a large number of hugepages are freed.
[pandora-kernel.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <linux/io.h>
28
29 #include <linux/hugetlb.h>
30 #include <linux/node.h>
31 #include "internal.h"
32
33 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
36
37 static int max_hstate;
38 unsigned int default_hstate_idx;
39 struct hstate hstates[HUGE_MAX_HSTATE];
40
41 __initdata LIST_HEAD(huge_boot_pages);
42
43 /* for command line parsing */
44 static struct hstate * __initdata parsed_hstate;
45 static unsigned long __initdata default_hstate_max_huge_pages;
46 static unsigned long __initdata default_hstate_size;
47
48 #define for_each_hstate(h) \
49         for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
50
51 /*
52  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
53  */
54 static DEFINE_SPINLOCK(hugetlb_lock);
55
56 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
57 {
58         bool free = (spool->count == 0) && (spool->used_hpages == 0);
59
60         spin_unlock(&spool->lock);
61
62         /* If no pages are used, and no other handles to the subpool
63          * remain, free the subpool the subpool remain */
64         if (free)
65                 kfree(spool);
66 }
67
68 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
69 {
70         struct hugepage_subpool *spool;
71
72         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
73         if (!spool)
74                 return NULL;
75
76         spin_lock_init(&spool->lock);
77         spool->count = 1;
78         spool->max_hpages = nr_blocks;
79         spool->used_hpages = 0;
80
81         return spool;
82 }
83
84 void hugepage_put_subpool(struct hugepage_subpool *spool)
85 {
86         spin_lock(&spool->lock);
87         BUG_ON(!spool->count);
88         spool->count--;
89         unlock_or_release_subpool(spool);
90 }
91
92 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
93                                       long delta)
94 {
95         int ret = 0;
96
97         if (!spool)
98                 return 0;
99
100         spin_lock(&spool->lock);
101         if ((spool->used_hpages + delta) <= spool->max_hpages) {
102                 spool->used_hpages += delta;
103         } else {
104                 ret = -ENOMEM;
105         }
106         spin_unlock(&spool->lock);
107
108         return ret;
109 }
110
111 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
112                                        long delta)
113 {
114         if (!spool)
115                 return;
116
117         spin_lock(&spool->lock);
118         spool->used_hpages -= delta;
119         /* If hugetlbfs_put_super couldn't free spool due to
120         * an outstanding quota reference, free it now. */
121         unlock_or_release_subpool(spool);
122 }
123
124 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
125 {
126         return HUGETLBFS_SB(inode->i_sb)->spool;
127 }
128
129 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
130 {
131         return subpool_inode(vma->vm_file->f_dentry->d_inode);
132 }
133
134 /*
135  * Region tracking -- allows tracking of reservations and instantiated pages
136  *                    across the pages in a mapping.
137  *
138  * The region data structures are protected by a combination of the mmap_sem
139  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
140  * must either hold the mmap_sem for write, or the mmap_sem for read and
141  * the hugetlb_instantiation mutex:
142  *
143  *      down_write(&mm->mmap_sem);
144  * or
145  *      down_read(&mm->mmap_sem);
146  *      mutex_lock(&hugetlb_instantiation_mutex);
147  */
148 struct file_region {
149         struct list_head link;
150         long from;
151         long to;
152 };
153
154 static long region_add(struct list_head *head, long f, long t)
155 {
156         struct file_region *rg, *nrg, *trg;
157
158         /* Locate the region we are either in or before. */
159         list_for_each_entry(rg, head, link)
160                 if (f <= rg->to)
161                         break;
162
163         /* Round our left edge to the current segment if it encloses us. */
164         if (f > rg->from)
165                 f = rg->from;
166
167         /* Check for and consume any regions we now overlap with. */
168         nrg = rg;
169         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
170                 if (&rg->link == head)
171                         break;
172                 if (rg->from > t)
173                         break;
174
175                 /* If this area reaches higher then extend our area to
176                  * include it completely.  If this is not the first area
177                  * which we intend to reuse, free it. */
178                 if (rg->to > t)
179                         t = rg->to;
180                 if (rg != nrg) {
181                         list_del(&rg->link);
182                         kfree(rg);
183                 }
184         }
185         nrg->from = f;
186         nrg->to = t;
187         return 0;
188 }
189
190 static long region_chg(struct list_head *head, long f, long t)
191 {
192         struct file_region *rg, *nrg;
193         long chg = 0;
194
195         /* Locate the region we are before or in. */
196         list_for_each_entry(rg, head, link)
197                 if (f <= rg->to)
198                         break;
199
200         /* If we are below the current region then a new region is required.
201          * Subtle, allocate a new region at the position but make it zero
202          * size such that we can guarantee to record the reservation. */
203         if (&rg->link == head || t < rg->from) {
204                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
205                 if (!nrg)
206                         return -ENOMEM;
207                 nrg->from = f;
208                 nrg->to   = f;
209                 INIT_LIST_HEAD(&nrg->link);
210                 list_add(&nrg->link, rg->link.prev);
211
212                 return t - f;
213         }
214
215         /* Round our left edge to the current segment if it encloses us. */
216         if (f > rg->from)
217                 f = rg->from;
218         chg = t - f;
219
220         /* Check for and consume any regions we now overlap with. */
221         list_for_each_entry(rg, rg->link.prev, link) {
222                 if (&rg->link == head)
223                         break;
224                 if (rg->from > t)
225                         return chg;
226
227                 /* We overlap with this area, if it extends further than
228                  * us then we must extend ourselves.  Account for its
229                  * existing reservation. */
230                 if (rg->to > t) {
231                         chg += rg->to - t;
232                         t = rg->to;
233                 }
234                 chg -= rg->to - rg->from;
235         }
236         return chg;
237 }
238
239 static long region_truncate(struct list_head *head, long end)
240 {
241         struct file_region *rg, *trg;
242         long chg = 0;
243
244         /* Locate the region we are either in or before. */
245         list_for_each_entry(rg, head, link)
246                 if (end <= rg->to)
247                         break;
248         if (&rg->link == head)
249                 return 0;
250
251         /* If we are in the middle of a region then adjust it. */
252         if (end > rg->from) {
253                 chg = rg->to - end;
254                 rg->to = end;
255                 rg = list_entry(rg->link.next, typeof(*rg), link);
256         }
257
258         /* Drop any remaining regions. */
259         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
260                 if (&rg->link == head)
261                         break;
262                 chg += rg->to - rg->from;
263                 list_del(&rg->link);
264                 kfree(rg);
265         }
266         return chg;
267 }
268
269 static long region_count(struct list_head *head, long f, long t)
270 {
271         struct file_region *rg;
272         long chg = 0;
273
274         /* Locate each segment we overlap with, and count that overlap. */
275         list_for_each_entry(rg, head, link) {
276                 int seg_from;
277                 int seg_to;
278
279                 if (rg->to <= f)
280                         continue;
281                 if (rg->from >= t)
282                         break;
283
284                 seg_from = max(rg->from, f);
285                 seg_to = min(rg->to, t);
286
287                 chg += seg_to - seg_from;
288         }
289
290         return chg;
291 }
292
293 /*
294  * Convert the address within this vma to the page offset within
295  * the mapping, in pagecache page units; huge pages here.
296  */
297 static pgoff_t vma_hugecache_offset(struct hstate *h,
298                         struct vm_area_struct *vma, unsigned long address)
299 {
300         return ((address - vma->vm_start) >> huge_page_shift(h)) +
301                         (vma->vm_pgoff >> huge_page_order(h));
302 }
303
304 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
305                                      unsigned long address)
306 {
307         return vma_hugecache_offset(hstate_vma(vma), vma, address);
308 }
309
310 /*
311  * Return the size of the pages allocated when backing a VMA. In the majority
312  * cases this will be same size as used by the page table entries.
313  */
314 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
315 {
316         struct hstate *hstate;
317
318         if (!is_vm_hugetlb_page(vma))
319                 return PAGE_SIZE;
320
321         hstate = hstate_vma(vma);
322
323         return 1UL << (hstate->order + PAGE_SHIFT);
324 }
325 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
326
327 /*
328  * Return the page size being used by the MMU to back a VMA. In the majority
329  * of cases, the page size used by the kernel matches the MMU size. On
330  * architectures where it differs, an architecture-specific version of this
331  * function is required.
332  */
333 #ifndef vma_mmu_pagesize
334 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
335 {
336         return vma_kernel_pagesize(vma);
337 }
338 #endif
339
340 /*
341  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
342  * bits of the reservation map pointer, which are always clear due to
343  * alignment.
344  */
345 #define HPAGE_RESV_OWNER    (1UL << 0)
346 #define HPAGE_RESV_UNMAPPED (1UL << 1)
347 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
348
349 /*
350  * These helpers are used to track how many pages are reserved for
351  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
352  * is guaranteed to have their future faults succeed.
353  *
354  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
355  * the reserve counters are updated with the hugetlb_lock held. It is safe
356  * to reset the VMA at fork() time as it is not in use yet and there is no
357  * chance of the global counters getting corrupted as a result of the values.
358  *
359  * The private mapping reservation is represented in a subtly different
360  * manner to a shared mapping.  A shared mapping has a region map associated
361  * with the underlying file, this region map represents the backing file
362  * pages which have ever had a reservation assigned which this persists even
363  * after the page is instantiated.  A private mapping has a region map
364  * associated with the original mmap which is attached to all VMAs which
365  * reference it, this region map represents those offsets which have consumed
366  * reservation ie. where pages have been instantiated.
367  */
368 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
369 {
370         return (unsigned long)vma->vm_private_data;
371 }
372
373 static void set_vma_private_data(struct vm_area_struct *vma,
374                                                         unsigned long value)
375 {
376         vma->vm_private_data = (void *)value;
377 }
378
379 struct resv_map {
380         struct kref refs;
381         struct list_head regions;
382 };
383
384 static struct resv_map *resv_map_alloc(void)
385 {
386         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
387         if (!resv_map)
388                 return NULL;
389
390         kref_init(&resv_map->refs);
391         INIT_LIST_HEAD(&resv_map->regions);
392
393         return resv_map;
394 }
395
396 static void resv_map_release(struct kref *ref)
397 {
398         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
399
400         /* Clear out any active regions before we release the map. */
401         region_truncate(&resv_map->regions, 0);
402         kfree(resv_map);
403 }
404
405 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
406 {
407         VM_BUG_ON(!is_vm_hugetlb_page(vma));
408         if (!(vma->vm_flags & VM_MAYSHARE))
409                 return (struct resv_map *)(get_vma_private_data(vma) &
410                                                         ~HPAGE_RESV_MASK);
411         return NULL;
412 }
413
414 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
415 {
416         VM_BUG_ON(!is_vm_hugetlb_page(vma));
417         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
418
419         set_vma_private_data(vma, (get_vma_private_data(vma) &
420                                 HPAGE_RESV_MASK) | (unsigned long)map);
421 }
422
423 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
424 {
425         VM_BUG_ON(!is_vm_hugetlb_page(vma));
426         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
427
428         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
429 }
430
431 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
432 {
433         VM_BUG_ON(!is_vm_hugetlb_page(vma));
434
435         return (get_vma_private_data(vma) & flag) != 0;
436 }
437
438 /* Decrement the reserved pages in the hugepage pool by one */
439 static void decrement_hugepage_resv_vma(struct hstate *h,
440                         struct vm_area_struct *vma)
441 {
442         if (vma->vm_flags & VM_NORESERVE)
443                 return;
444
445         if (vma->vm_flags & VM_MAYSHARE) {
446                 /* Shared mappings always use reserves */
447                 h->resv_huge_pages--;
448         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
449                 /*
450                  * Only the process that called mmap() has reserves for
451                  * private mappings.
452                  */
453                 h->resv_huge_pages--;
454         }
455 }
456
457 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
458 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
459 {
460         VM_BUG_ON(!is_vm_hugetlb_page(vma));
461         if (!(vma->vm_flags & VM_MAYSHARE))
462                 vma->vm_private_data = (void *)0;
463 }
464
465 /* Returns true if the VMA has associated reserve pages */
466 static int vma_has_reserves(struct vm_area_struct *vma)
467 {
468         if (vma->vm_flags & VM_MAYSHARE)
469                 return 1;
470         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
471                 return 1;
472         return 0;
473 }
474
475 static void copy_gigantic_page(struct page *dst, struct page *src)
476 {
477         int i;
478         struct hstate *h = page_hstate(src);
479         struct page *dst_base = dst;
480         struct page *src_base = src;
481
482         for (i = 0; i < pages_per_huge_page(h); ) {
483                 cond_resched();
484                 copy_highpage(dst, src);
485
486                 i++;
487                 dst = mem_map_next(dst, dst_base, i);
488                 src = mem_map_next(src, src_base, i);
489         }
490 }
491
492 void copy_huge_page(struct page *dst, struct page *src)
493 {
494         int i;
495         struct hstate *h = page_hstate(src);
496
497         if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
498                 copy_gigantic_page(dst, src);
499                 return;
500         }
501
502         might_sleep();
503         for (i = 0; i < pages_per_huge_page(h); i++) {
504                 cond_resched();
505                 copy_highpage(dst + i, src + i);
506         }
507 }
508
509 static void enqueue_huge_page(struct hstate *h, struct page *page)
510 {
511         int nid = page_to_nid(page);
512         list_add(&page->lru, &h->hugepage_freelists[nid]);
513         h->free_huge_pages++;
514         h->free_huge_pages_node[nid]++;
515 }
516
517 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
518 {
519         struct page *page;
520
521         if (list_empty(&h->hugepage_freelists[nid]))
522                 return NULL;
523         page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
524         list_del(&page->lru);
525         set_page_refcounted(page);
526         h->free_huge_pages--;
527         h->free_huge_pages_node[nid]--;
528         return page;
529 }
530
531 static struct page *dequeue_huge_page_vma(struct hstate *h,
532                                 struct vm_area_struct *vma,
533                                 unsigned long address, int avoid_reserve)
534 {
535         struct page *page = NULL;
536         struct mempolicy *mpol;
537         nodemask_t *nodemask;
538         struct zonelist *zonelist;
539         struct zone *zone;
540         struct zoneref *z;
541         unsigned int cpuset_mems_cookie;
542
543 retry_cpuset:
544         cpuset_mems_cookie = get_mems_allowed();
545         zonelist = huge_zonelist(vma, address,
546                                         htlb_alloc_mask, &mpol, &nodemask);
547         /*
548          * A child process with MAP_PRIVATE mappings created by their parent
549          * have no page reserves. This check ensures that reservations are
550          * not "stolen". The child may still get SIGKILLed
551          */
552         if (!vma_has_reserves(vma) &&
553                         h->free_huge_pages - h->resv_huge_pages == 0)
554                 goto err;
555
556         /* If reserves cannot be used, ensure enough pages are in the pool */
557         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
558                 goto err;
559
560         for_each_zone_zonelist_nodemask(zone, z, zonelist,
561                                                 MAX_NR_ZONES - 1, nodemask) {
562                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
563                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
564                         if (page) {
565                                 if (!avoid_reserve)
566                                         decrement_hugepage_resv_vma(h, vma);
567                                 break;
568                         }
569                 }
570         }
571
572         mpol_cond_put(mpol);
573         if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
574                 goto retry_cpuset;
575         return page;
576
577 err:
578         mpol_cond_put(mpol);
579         return NULL;
580 }
581
582 static void update_and_free_page(struct hstate *h, struct page *page)
583 {
584         int i;
585
586         VM_BUG_ON(h->order >= MAX_ORDER);
587
588         h->nr_huge_pages--;
589         h->nr_huge_pages_node[page_to_nid(page)]--;
590         for (i = 0; i < pages_per_huge_page(h); i++) {
591                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
592                                 1 << PG_referenced | 1 << PG_dirty |
593                                 1 << PG_active | 1 << PG_reserved |
594                                 1 << PG_private | 1 << PG_writeback);
595         }
596         set_compound_page_dtor(page, NULL);
597         set_page_refcounted(page);
598         arch_release_hugepage(page);
599         __free_pages(page, huge_page_order(h));
600 }
601
602 struct hstate *size_to_hstate(unsigned long size)
603 {
604         struct hstate *h;
605
606         for_each_hstate(h) {
607                 if (huge_page_size(h) == size)
608                         return h;
609         }
610         return NULL;
611 }
612
613 static void free_huge_page(struct page *page)
614 {
615         /*
616          * Can't pass hstate in here because it is called from the
617          * compound page destructor.
618          */
619         struct hstate *h = page_hstate(page);
620         int nid = page_to_nid(page);
621         struct hugepage_subpool *spool =
622                 (struct hugepage_subpool *)page_private(page);
623
624         set_page_private(page, 0);
625         page->mapping = NULL;
626         BUG_ON(page_count(page));
627         BUG_ON(page_mapcount(page));
628         INIT_LIST_HEAD(&page->lru);
629
630         spin_lock(&hugetlb_lock);
631         if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
632                 update_and_free_page(h, page);
633                 h->surplus_huge_pages--;
634                 h->surplus_huge_pages_node[nid]--;
635         } else {
636                 enqueue_huge_page(h, page);
637         }
638         spin_unlock(&hugetlb_lock);
639         hugepage_subpool_put_pages(spool, 1);
640 }
641
642 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
643 {
644         set_compound_page_dtor(page, free_huge_page);
645         spin_lock(&hugetlb_lock);
646         h->nr_huge_pages++;
647         h->nr_huge_pages_node[nid]++;
648         spin_unlock(&hugetlb_lock);
649         put_page(page); /* free it into the hugepage allocator */
650 }
651
652 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
653 {
654         int i;
655         int nr_pages = 1 << order;
656         struct page *p = page + 1;
657
658         /* we rely on prep_new_huge_page to set the destructor */
659         set_compound_order(page, order);
660         __SetPageHead(page);
661         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
662                 __SetPageTail(p);
663                 set_page_count(p, 0);
664                 p->first_page = page;
665         }
666 }
667
668 int PageHuge(struct page *page)
669 {
670         compound_page_dtor *dtor;
671
672         if (!PageCompound(page))
673                 return 0;
674
675         page = compound_head(page);
676         dtor = get_compound_page_dtor(page);
677
678         return dtor == free_huge_page;
679 }
680 EXPORT_SYMBOL_GPL(PageHuge);
681
682 /*
683  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
684  * normal or transparent huge pages.
685  */
686 int PageHeadHuge(struct page *page_head)
687 {
688         compound_page_dtor *dtor;
689
690         if (!PageHead(page_head))
691                 return 0;
692
693         dtor = get_compound_page_dtor(page_head);
694
695         return dtor == free_huge_page;
696 }
697 EXPORT_SYMBOL_GPL(PageHeadHuge);
698
699 pgoff_t __basepage_index(struct page *page)
700 {
701         struct page *page_head = compound_head(page);
702         pgoff_t index = page_index(page_head);
703         unsigned long compound_idx;
704
705         if (!PageHuge(page_head))
706                 return page_index(page);
707
708         if (compound_order(page_head) >= MAX_ORDER)
709                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
710         else
711                 compound_idx = page - page_head;
712
713         return (index << compound_order(page_head)) + compound_idx;
714 }
715
716 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
717 {
718         struct page *page;
719
720         if (h->order >= MAX_ORDER)
721                 return NULL;
722
723         page = alloc_pages_exact_node(nid,
724                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
725                                                 __GFP_REPEAT|__GFP_NOWARN,
726                 huge_page_order(h));
727         if (page) {
728                 if (arch_prepare_hugepage(page)) {
729                         __free_pages(page, huge_page_order(h));
730                         return NULL;
731                 }
732                 prep_new_huge_page(h, page, nid);
733         }
734
735         return page;
736 }
737
738 /*
739  * common helper functions for hstate_next_node_to_{alloc|free}.
740  * We may have allocated or freed a huge page based on a different
741  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
742  * be outside of *nodes_allowed.  Ensure that we use an allowed
743  * node for alloc or free.
744  */
745 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
746 {
747         nid = next_node(nid, *nodes_allowed);
748         if (nid == MAX_NUMNODES)
749                 nid = first_node(*nodes_allowed);
750         VM_BUG_ON(nid >= MAX_NUMNODES);
751
752         return nid;
753 }
754
755 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
756 {
757         if (!node_isset(nid, *nodes_allowed))
758                 nid = next_node_allowed(nid, nodes_allowed);
759         return nid;
760 }
761
762 /*
763  * returns the previously saved node ["this node"] from which to
764  * allocate a persistent huge page for the pool and advance the
765  * next node from which to allocate, handling wrap at end of node
766  * mask.
767  */
768 static int hstate_next_node_to_alloc(struct hstate *h,
769                                         nodemask_t *nodes_allowed)
770 {
771         int nid;
772
773         VM_BUG_ON(!nodes_allowed);
774
775         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
776         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
777
778         return nid;
779 }
780
781 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
782 {
783         struct page *page;
784         int start_nid;
785         int next_nid;
786         int ret = 0;
787
788         start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
789         next_nid = start_nid;
790
791         do {
792                 page = alloc_fresh_huge_page_node(h, next_nid);
793                 if (page) {
794                         ret = 1;
795                         break;
796                 }
797                 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
798         } while (next_nid != start_nid);
799
800         if (ret)
801                 count_vm_event(HTLB_BUDDY_PGALLOC);
802         else
803                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
804
805         return ret;
806 }
807
808 /*
809  * helper for free_pool_huge_page() - return the previously saved
810  * node ["this node"] from which to free a huge page.  Advance the
811  * next node id whether or not we find a free huge page to free so
812  * that the next attempt to free addresses the next node.
813  */
814 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
815 {
816         int nid;
817
818         VM_BUG_ON(!nodes_allowed);
819
820         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
821         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
822
823         return nid;
824 }
825
826 /*
827  * Free huge page from pool from next node to free.
828  * Attempt to keep persistent huge pages more or less
829  * balanced over allowed nodes.
830  * Called with hugetlb_lock locked.
831  */
832 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
833                                                          bool acct_surplus)
834 {
835         int start_nid;
836         int next_nid;
837         int ret = 0;
838
839         start_nid = hstate_next_node_to_free(h, nodes_allowed);
840         next_nid = start_nid;
841
842         do {
843                 /*
844                  * If we're returning unused surplus pages, only examine
845                  * nodes with surplus pages.
846                  */
847                 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
848                     !list_empty(&h->hugepage_freelists[next_nid])) {
849                         struct page *page =
850                                 list_entry(h->hugepage_freelists[next_nid].next,
851                                           struct page, lru);
852                         list_del(&page->lru);
853                         h->free_huge_pages--;
854                         h->free_huge_pages_node[next_nid]--;
855                         if (acct_surplus) {
856                                 h->surplus_huge_pages--;
857                                 h->surplus_huge_pages_node[next_nid]--;
858                         }
859                         update_and_free_page(h, page);
860                         ret = 1;
861                         break;
862                 }
863                 next_nid = hstate_next_node_to_free(h, nodes_allowed);
864         } while (next_nid != start_nid);
865
866         return ret;
867 }
868
869 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
870 {
871         struct page *page;
872         unsigned int r_nid;
873
874         if (h->order >= MAX_ORDER)
875                 return NULL;
876
877         /*
878          * Assume we will successfully allocate the surplus page to
879          * prevent racing processes from causing the surplus to exceed
880          * overcommit
881          *
882          * This however introduces a different race, where a process B
883          * tries to grow the static hugepage pool while alloc_pages() is
884          * called by process A. B will only examine the per-node
885          * counters in determining if surplus huge pages can be
886          * converted to normal huge pages in adjust_pool_surplus(). A
887          * won't be able to increment the per-node counter, until the
888          * lock is dropped by B, but B doesn't drop hugetlb_lock until
889          * no more huge pages can be converted from surplus to normal
890          * state (and doesn't try to convert again). Thus, we have a
891          * case where a surplus huge page exists, the pool is grown, and
892          * the surplus huge page still exists after, even though it
893          * should just have been converted to a normal huge page. This
894          * does not leak memory, though, as the hugepage will be freed
895          * once it is out of use. It also does not allow the counters to
896          * go out of whack in adjust_pool_surplus() as we don't modify
897          * the node values until we've gotten the hugepage and only the
898          * per-node value is checked there.
899          */
900         spin_lock(&hugetlb_lock);
901         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
902                 spin_unlock(&hugetlb_lock);
903                 return NULL;
904         } else {
905                 h->nr_huge_pages++;
906                 h->surplus_huge_pages++;
907         }
908         spin_unlock(&hugetlb_lock);
909
910         if (nid == NUMA_NO_NODE)
911                 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
912                                    __GFP_REPEAT|__GFP_NOWARN,
913                                    huge_page_order(h));
914         else
915                 page = alloc_pages_exact_node(nid,
916                         htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
917                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
918
919         if (page && arch_prepare_hugepage(page)) {
920                 __free_pages(page, huge_page_order(h));
921                 return NULL;
922         }
923
924         spin_lock(&hugetlb_lock);
925         if (page) {
926                 r_nid = page_to_nid(page);
927                 set_compound_page_dtor(page, free_huge_page);
928                 /*
929                  * We incremented the global counters already
930                  */
931                 h->nr_huge_pages_node[r_nid]++;
932                 h->surplus_huge_pages_node[r_nid]++;
933                 __count_vm_event(HTLB_BUDDY_PGALLOC);
934         } else {
935                 h->nr_huge_pages--;
936                 h->surplus_huge_pages--;
937                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
938         }
939         spin_unlock(&hugetlb_lock);
940
941         return page;
942 }
943
944 /*
945  * This allocation function is useful in the context where vma is irrelevant.
946  * E.g. soft-offlining uses this function because it only cares physical
947  * address of error page.
948  */
949 struct page *alloc_huge_page_node(struct hstate *h, int nid)
950 {
951         struct page *page;
952
953         spin_lock(&hugetlb_lock);
954         page = dequeue_huge_page_node(h, nid);
955         spin_unlock(&hugetlb_lock);
956
957         if (!page)
958                 page = alloc_buddy_huge_page(h, nid);
959
960         return page;
961 }
962
963 /*
964  * Increase the hugetlb pool such that it can accommodate a reservation
965  * of size 'delta'.
966  */
967 static int gather_surplus_pages(struct hstate *h, int delta)
968 {
969         struct list_head surplus_list;
970         struct page *page, *tmp;
971         int ret, i;
972         int needed, allocated;
973
974         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
975         if (needed <= 0) {
976                 h->resv_huge_pages += delta;
977                 return 0;
978         }
979
980         allocated = 0;
981         INIT_LIST_HEAD(&surplus_list);
982
983         ret = -ENOMEM;
984 retry:
985         spin_unlock(&hugetlb_lock);
986         for (i = 0; i < needed; i++) {
987                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
988                 if (!page)
989                         /*
990                          * We were not able to allocate enough pages to
991                          * satisfy the entire reservation so we free what
992                          * we've allocated so far.
993                          */
994                         goto free;
995
996                 list_add(&page->lru, &surplus_list);
997         }
998         allocated += needed;
999
1000         /*
1001          * After retaking hugetlb_lock, we need to recalculate 'needed'
1002          * because either resv_huge_pages or free_huge_pages may have changed.
1003          */
1004         spin_lock(&hugetlb_lock);
1005         needed = (h->resv_huge_pages + delta) -
1006                         (h->free_huge_pages + allocated);
1007         if (needed > 0)
1008                 goto retry;
1009
1010         /*
1011          * The surplus_list now contains _at_least_ the number of extra pages
1012          * needed to accommodate the reservation.  Add the appropriate number
1013          * of pages to the hugetlb pool and free the extras back to the buddy
1014          * allocator.  Commit the entire reservation here to prevent another
1015          * process from stealing the pages as they are added to the pool but
1016          * before they are reserved.
1017          */
1018         needed += allocated;
1019         h->resv_huge_pages += delta;
1020         ret = 0;
1021
1022         /* Free the needed pages to the hugetlb pool */
1023         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1024                 if ((--needed) < 0)
1025                         break;
1026                 list_del(&page->lru);
1027                 /*
1028                  * This page is now managed by the hugetlb allocator and has
1029                  * no users -- drop the buddy allocator's reference.
1030                  */
1031                 put_page_testzero(page);
1032                 VM_BUG_ON(page_count(page));
1033                 enqueue_huge_page(h, page);
1034         }
1035         spin_unlock(&hugetlb_lock);
1036
1037         /* Free unnecessary surplus pages to the buddy allocator */
1038 free:
1039         if (!list_empty(&surplus_list)) {
1040                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1041                         list_del(&page->lru);
1042                         put_page(page);
1043                 }
1044         }
1045         spin_lock(&hugetlb_lock);
1046
1047         return ret;
1048 }
1049
1050 /*
1051  * When releasing a hugetlb pool reservation, any surplus pages that were
1052  * allocated to satisfy the reservation must be explicitly freed if they were
1053  * never used.
1054  * Called with hugetlb_lock held.
1055  */
1056 static void return_unused_surplus_pages(struct hstate *h,
1057                                         unsigned long unused_resv_pages)
1058 {
1059         unsigned long nr_pages;
1060
1061         /* Uncommit the reservation */
1062         h->resv_huge_pages -= unused_resv_pages;
1063
1064         /* Cannot return gigantic pages currently */
1065         if (h->order >= MAX_ORDER)
1066                 return;
1067
1068         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1069
1070         /*
1071          * We want to release as many surplus pages as possible, spread
1072          * evenly across all nodes with memory. Iterate across these nodes
1073          * until we can no longer free unreserved surplus pages. This occurs
1074          * when the nodes with surplus pages have no free pages.
1075          * free_pool_huge_page() will balance the the freed pages across the
1076          * on-line nodes with memory and will handle the hstate accounting.
1077          */
1078         while (nr_pages--) {
1079                 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
1080                         break;
1081         }
1082 }
1083
1084 /*
1085  * Determine if the huge page at addr within the vma has an associated
1086  * reservation.  Where it does not we will need to logically increase
1087  * reservation and actually increase subpool usage before an allocation
1088  * can occur.  Where any new reservation would be required the
1089  * reservation change is prepared, but not committed.  Once the page
1090  * has been allocated from the subpool and instantiated the change should
1091  * be committed via vma_commit_reservation.  No action is required on
1092  * failure.
1093  */
1094 static long vma_needs_reservation(struct hstate *h,
1095                         struct vm_area_struct *vma, unsigned long addr)
1096 {
1097         struct address_space *mapping = vma->vm_file->f_mapping;
1098         struct inode *inode = mapping->host;
1099
1100         if (vma->vm_flags & VM_MAYSHARE) {
1101                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1102                 return region_chg(&inode->i_mapping->private_list,
1103                                                         idx, idx + 1);
1104
1105         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1106                 return 1;
1107
1108         } else  {
1109                 long err;
1110                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1111                 struct resv_map *reservations = vma_resv_map(vma);
1112
1113                 err = region_chg(&reservations->regions, idx, idx + 1);
1114                 if (err < 0)
1115                         return err;
1116                 return 0;
1117         }
1118 }
1119 static void vma_commit_reservation(struct hstate *h,
1120                         struct vm_area_struct *vma, unsigned long addr)
1121 {
1122         struct address_space *mapping = vma->vm_file->f_mapping;
1123         struct inode *inode = mapping->host;
1124
1125         if (vma->vm_flags & VM_MAYSHARE) {
1126                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1127                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1128
1129         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1130                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1131                 struct resv_map *reservations = vma_resv_map(vma);
1132
1133                 /* Mark this page used in the map. */
1134                 region_add(&reservations->regions, idx, idx + 1);
1135         }
1136 }
1137
1138 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1139                                     unsigned long addr, int avoid_reserve)
1140 {
1141         struct hugepage_subpool *spool = subpool_vma(vma);
1142         struct hstate *h = hstate_vma(vma);
1143         struct page *page;
1144         long chg;
1145
1146         /*
1147          * Processes that did not create the mapping will have no
1148          * reserves and will not have accounted against subpool
1149          * limit. Check that the subpool limit can be made before
1150          * satisfying the allocation MAP_NORESERVE mappings may also
1151          * need pages and subpool limit allocated allocated if no reserve
1152          * mapping overlaps.
1153          */
1154         chg = vma_needs_reservation(h, vma, addr);
1155         if (chg < 0)
1156                 return ERR_PTR(-VM_FAULT_OOM);
1157         if (chg)
1158                 if (hugepage_subpool_get_pages(spool, chg))
1159                         return ERR_PTR(-VM_FAULT_SIGBUS);
1160
1161         spin_lock(&hugetlb_lock);
1162         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1163         spin_unlock(&hugetlb_lock);
1164
1165         if (!page) {
1166                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1167                 if (!page) {
1168                         hugepage_subpool_put_pages(spool, chg);
1169                         return ERR_PTR(-VM_FAULT_SIGBUS);
1170                 }
1171         }
1172
1173         set_page_private(page, (unsigned long)spool);
1174
1175         vma_commit_reservation(h, vma, addr);
1176
1177         return page;
1178 }
1179
1180 int __weak alloc_bootmem_huge_page(struct hstate *h)
1181 {
1182         struct huge_bootmem_page *m;
1183         int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1184
1185         while (nr_nodes) {
1186                 void *addr;
1187
1188                 addr = __alloc_bootmem_node_nopanic(
1189                                 NODE_DATA(hstate_next_node_to_alloc(h,
1190                                                 &node_states[N_HIGH_MEMORY])),
1191                                 huge_page_size(h), huge_page_size(h), 0);
1192
1193                 if (addr) {
1194                         /*
1195                          * Use the beginning of the huge page to store the
1196                          * huge_bootmem_page struct (until gather_bootmem
1197                          * puts them into the mem_map).
1198                          */
1199                         m = addr;
1200                         goto found;
1201                 }
1202                 nr_nodes--;
1203         }
1204         return 0;
1205
1206 found:
1207         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1208         /* Put them into a private list first because mem_map is not up yet */
1209         list_add(&m->list, &huge_boot_pages);
1210         m->hstate = h;
1211         return 1;
1212 }
1213
1214 static void prep_compound_huge_page(struct page *page, int order)
1215 {
1216         if (unlikely(order > (MAX_ORDER - 1)))
1217                 prep_compound_gigantic_page(page, order);
1218         else
1219                 prep_compound_page(page, order);
1220 }
1221
1222 /* Put bootmem huge pages into the standard lists after mem_map is up */
1223 static void __init gather_bootmem_prealloc(void)
1224 {
1225         struct huge_bootmem_page *m;
1226
1227         list_for_each_entry(m, &huge_boot_pages, list) {
1228                 struct hstate *h = m->hstate;
1229                 struct page *page;
1230
1231 #ifdef CONFIG_HIGHMEM
1232                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1233                 free_bootmem_late((unsigned long)m,
1234                                   sizeof(struct huge_bootmem_page));
1235 #else
1236                 page = virt_to_page(m);
1237 #endif
1238                 __ClearPageReserved(page);
1239                 WARN_ON(page_count(page) != 1);
1240                 prep_compound_huge_page(page, h->order);
1241                 prep_new_huge_page(h, page, page_to_nid(page));
1242                 /*
1243                  * If we had gigantic hugepages allocated at boot time, we need
1244                  * to restore the 'stolen' pages to totalram_pages in order to
1245                  * fix confusing memory reports from free(1) and another
1246                  * side-effects, like CommitLimit going negative.
1247                  */
1248                 if (h->order > (MAX_ORDER - 1))
1249                         totalram_pages += 1 << h->order;
1250         }
1251 }
1252
1253 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1254 {
1255         unsigned long i;
1256
1257         for (i = 0; i < h->max_huge_pages; ++i) {
1258                 if (h->order >= MAX_ORDER) {
1259                         if (!alloc_bootmem_huge_page(h))
1260                                 break;
1261                 } else if (!alloc_fresh_huge_page(h,
1262                                          &node_states[N_HIGH_MEMORY]))
1263                         break;
1264         }
1265         h->max_huge_pages = i;
1266 }
1267
1268 static void __init hugetlb_init_hstates(void)
1269 {
1270         struct hstate *h;
1271
1272         for_each_hstate(h) {
1273                 /* oversize hugepages were init'ed in early boot */
1274                 if (h->order < MAX_ORDER)
1275                         hugetlb_hstate_alloc_pages(h);
1276         }
1277 }
1278
1279 static char * __init memfmt(char *buf, unsigned long n)
1280 {
1281         if (n >= (1UL << 30))
1282                 sprintf(buf, "%lu GB", n >> 30);
1283         else if (n >= (1UL << 20))
1284                 sprintf(buf, "%lu MB", n >> 20);
1285         else
1286                 sprintf(buf, "%lu KB", n >> 10);
1287         return buf;
1288 }
1289
1290 static void __init report_hugepages(void)
1291 {
1292         struct hstate *h;
1293
1294         for_each_hstate(h) {
1295                 char buf[32];
1296                 printk(KERN_INFO "HugeTLB registered %s page size, "
1297                                  "pre-allocated %ld pages\n",
1298                         memfmt(buf, huge_page_size(h)),
1299                         h->free_huge_pages);
1300         }
1301 }
1302
1303 #ifdef CONFIG_HIGHMEM
1304 static void try_to_free_low(struct hstate *h, unsigned long count,
1305                                                 nodemask_t *nodes_allowed)
1306 {
1307         int i;
1308
1309         if (h->order >= MAX_ORDER)
1310                 return;
1311
1312         for_each_node_mask(i, *nodes_allowed) {
1313                 struct page *page, *next;
1314                 struct list_head *freel = &h->hugepage_freelists[i];
1315                 list_for_each_entry_safe(page, next, freel, lru) {
1316                         if (count >= h->nr_huge_pages)
1317                                 return;
1318                         if (PageHighMem(page))
1319                                 continue;
1320                         list_del(&page->lru);
1321                         update_and_free_page(h, page);
1322                         h->free_huge_pages--;
1323                         h->free_huge_pages_node[page_to_nid(page)]--;
1324                 }
1325         }
1326 }
1327 #else
1328 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1329                                                 nodemask_t *nodes_allowed)
1330 {
1331 }
1332 #endif
1333
1334 /*
1335  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1336  * balanced by operating on them in a round-robin fashion.
1337  * Returns 1 if an adjustment was made.
1338  */
1339 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1340                                 int delta)
1341 {
1342         int start_nid, next_nid;
1343         int ret = 0;
1344
1345         VM_BUG_ON(delta != -1 && delta != 1);
1346
1347         if (delta < 0)
1348                 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1349         else
1350                 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1351         next_nid = start_nid;
1352
1353         do {
1354                 int nid = next_nid;
1355                 if (delta < 0)  {
1356                         /*
1357                          * To shrink on this node, there must be a surplus page
1358                          */
1359                         if (!h->surplus_huge_pages_node[nid]) {
1360                                 next_nid = hstate_next_node_to_alloc(h,
1361                                                                 nodes_allowed);
1362                                 continue;
1363                         }
1364                 }
1365                 if (delta > 0) {
1366                         /*
1367                          * Surplus cannot exceed the total number of pages
1368                          */
1369                         if (h->surplus_huge_pages_node[nid] >=
1370                                                 h->nr_huge_pages_node[nid]) {
1371                                 next_nid = hstate_next_node_to_free(h,
1372                                                                 nodes_allowed);
1373                                 continue;
1374                         }
1375                 }
1376
1377                 h->surplus_huge_pages += delta;
1378                 h->surplus_huge_pages_node[nid] += delta;
1379                 ret = 1;
1380                 break;
1381         } while (next_nid != start_nid);
1382
1383         return ret;
1384 }
1385
1386 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1387 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1388                                                 nodemask_t *nodes_allowed)
1389 {
1390         unsigned long min_count, ret;
1391
1392         if (h->order >= MAX_ORDER)
1393                 return h->max_huge_pages;
1394
1395         /*
1396          * Increase the pool size
1397          * First take pages out of surplus state.  Then make up the
1398          * remaining difference by allocating fresh huge pages.
1399          *
1400          * We might race with alloc_buddy_huge_page() here and be unable
1401          * to convert a surplus huge page to a normal huge page. That is
1402          * not critical, though, it just means the overall size of the
1403          * pool might be one hugepage larger than it needs to be, but
1404          * within all the constraints specified by the sysctls.
1405          */
1406         spin_lock(&hugetlb_lock);
1407         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1408                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1409                         break;
1410         }
1411
1412         while (count > persistent_huge_pages(h)) {
1413                 /*
1414                  * If this allocation races such that we no longer need the
1415                  * page, free_huge_page will handle it by freeing the page
1416                  * and reducing the surplus.
1417                  */
1418                 spin_unlock(&hugetlb_lock);
1419                 ret = alloc_fresh_huge_page(h, nodes_allowed);
1420                 spin_lock(&hugetlb_lock);
1421                 if (!ret)
1422                         goto out;
1423
1424                 /* Bail for signals. Probably ctrl-c from user */
1425                 if (signal_pending(current))
1426                         goto out;
1427         }
1428
1429         /*
1430          * Decrease the pool size
1431          * First return free pages to the buddy allocator (being careful
1432          * to keep enough around to satisfy reservations).  Then place
1433          * pages into surplus state as needed so the pool will shrink
1434          * to the desired size as pages become free.
1435          *
1436          * By placing pages into the surplus state independent of the
1437          * overcommit value, we are allowing the surplus pool size to
1438          * exceed overcommit. There are few sane options here. Since
1439          * alloc_buddy_huge_page() is checking the global counter,
1440          * though, we'll note that we're not allowed to exceed surplus
1441          * and won't grow the pool anywhere else. Not until one of the
1442          * sysctls are changed, or the surplus pages go out of use.
1443          */
1444         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1445         min_count = max(count, min_count);
1446         try_to_free_low(h, min_count, nodes_allowed);
1447         while (min_count < persistent_huge_pages(h)) {
1448                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1449                         break;
1450                 cond_resched_lock(&hugetlb_lock);
1451         }
1452         while (count < persistent_huge_pages(h)) {
1453                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1454                         break;
1455         }
1456 out:
1457         ret = persistent_huge_pages(h);
1458         spin_unlock(&hugetlb_lock);
1459         return ret;
1460 }
1461
1462 #define HSTATE_ATTR_RO(_name) \
1463         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1464
1465 #define HSTATE_ATTR(_name) \
1466         static struct kobj_attribute _name##_attr = \
1467                 __ATTR(_name, 0644, _name##_show, _name##_store)
1468
1469 static struct kobject *hugepages_kobj;
1470 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1471
1472 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1473
1474 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1475 {
1476         int i;
1477
1478         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1479                 if (hstate_kobjs[i] == kobj) {
1480                         if (nidp)
1481                                 *nidp = NUMA_NO_NODE;
1482                         return &hstates[i];
1483                 }
1484
1485         return kobj_to_node_hstate(kobj, nidp);
1486 }
1487
1488 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1489                                         struct kobj_attribute *attr, char *buf)
1490 {
1491         struct hstate *h;
1492         unsigned long nr_huge_pages;
1493         int nid;
1494
1495         h = kobj_to_hstate(kobj, &nid);
1496         if (nid == NUMA_NO_NODE)
1497                 nr_huge_pages = h->nr_huge_pages;
1498         else
1499                 nr_huge_pages = h->nr_huge_pages_node[nid];
1500
1501         return sprintf(buf, "%lu\n", nr_huge_pages);
1502 }
1503
1504 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1505                         struct kobject *kobj, struct kobj_attribute *attr,
1506                         const char *buf, size_t len)
1507 {
1508         int err;
1509         int nid;
1510         unsigned long count;
1511         struct hstate *h;
1512         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1513
1514         err = strict_strtoul(buf, 10, &count);
1515         if (err)
1516                 goto out;
1517
1518         h = kobj_to_hstate(kobj, &nid);
1519         if (h->order >= MAX_ORDER) {
1520                 err = -EINVAL;
1521                 goto out;
1522         }
1523
1524         if (nid == NUMA_NO_NODE) {
1525                 /*
1526                  * global hstate attribute
1527                  */
1528                 if (!(obey_mempolicy &&
1529                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1530                         NODEMASK_FREE(nodes_allowed);
1531                         nodes_allowed = &node_states[N_HIGH_MEMORY];
1532                 }
1533         } else if (nodes_allowed) {
1534                 /*
1535                  * per node hstate attribute: adjust count to global,
1536                  * but restrict alloc/free to the specified node.
1537                  */
1538                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1539                 init_nodemask_of_node(nodes_allowed, nid);
1540         } else
1541                 nodes_allowed = &node_states[N_HIGH_MEMORY];
1542
1543         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1544
1545         if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1546                 NODEMASK_FREE(nodes_allowed);
1547
1548         return len;
1549 out:
1550         NODEMASK_FREE(nodes_allowed);
1551         return err;
1552 }
1553
1554 static ssize_t nr_hugepages_show(struct kobject *kobj,
1555                                        struct kobj_attribute *attr, char *buf)
1556 {
1557         return nr_hugepages_show_common(kobj, attr, buf);
1558 }
1559
1560 static ssize_t nr_hugepages_store(struct kobject *kobj,
1561                struct kobj_attribute *attr, const char *buf, size_t len)
1562 {
1563         return nr_hugepages_store_common(false, kobj, attr, buf, len);
1564 }
1565 HSTATE_ATTR(nr_hugepages);
1566
1567 #ifdef CONFIG_NUMA
1568
1569 /*
1570  * hstate attribute for optionally mempolicy-based constraint on persistent
1571  * huge page alloc/free.
1572  */
1573 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1574                                        struct kobj_attribute *attr, char *buf)
1575 {
1576         return nr_hugepages_show_common(kobj, attr, buf);
1577 }
1578
1579 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1580                struct kobj_attribute *attr, const char *buf, size_t len)
1581 {
1582         return nr_hugepages_store_common(true, kobj, attr, buf, len);
1583 }
1584 HSTATE_ATTR(nr_hugepages_mempolicy);
1585 #endif
1586
1587
1588 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1589                                         struct kobj_attribute *attr, char *buf)
1590 {
1591         struct hstate *h = kobj_to_hstate(kobj, NULL);
1592         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1593 }
1594
1595 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1596                 struct kobj_attribute *attr, const char *buf, size_t count)
1597 {
1598         int err;
1599         unsigned long input;
1600         struct hstate *h = kobj_to_hstate(kobj, NULL);
1601
1602         if (h->order >= MAX_ORDER)
1603                 return -EINVAL;
1604
1605         err = strict_strtoul(buf, 10, &input);
1606         if (err)
1607                 return err;
1608
1609         spin_lock(&hugetlb_lock);
1610         h->nr_overcommit_huge_pages = input;
1611         spin_unlock(&hugetlb_lock);
1612
1613         return count;
1614 }
1615 HSTATE_ATTR(nr_overcommit_hugepages);
1616
1617 static ssize_t free_hugepages_show(struct kobject *kobj,
1618                                         struct kobj_attribute *attr, char *buf)
1619 {
1620         struct hstate *h;
1621         unsigned long free_huge_pages;
1622         int nid;
1623
1624         h = kobj_to_hstate(kobj, &nid);
1625         if (nid == NUMA_NO_NODE)
1626                 free_huge_pages = h->free_huge_pages;
1627         else
1628                 free_huge_pages = h->free_huge_pages_node[nid];
1629
1630         return sprintf(buf, "%lu\n", free_huge_pages);
1631 }
1632 HSTATE_ATTR_RO(free_hugepages);
1633
1634 static ssize_t resv_hugepages_show(struct kobject *kobj,
1635                                         struct kobj_attribute *attr, char *buf)
1636 {
1637         struct hstate *h = kobj_to_hstate(kobj, NULL);
1638         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1639 }
1640 HSTATE_ATTR_RO(resv_hugepages);
1641
1642 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1643                                         struct kobj_attribute *attr, char *buf)
1644 {
1645         struct hstate *h;
1646         unsigned long surplus_huge_pages;
1647         int nid;
1648
1649         h = kobj_to_hstate(kobj, &nid);
1650         if (nid == NUMA_NO_NODE)
1651                 surplus_huge_pages = h->surplus_huge_pages;
1652         else
1653                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1654
1655         return sprintf(buf, "%lu\n", surplus_huge_pages);
1656 }
1657 HSTATE_ATTR_RO(surplus_hugepages);
1658
1659 static struct attribute *hstate_attrs[] = {
1660         &nr_hugepages_attr.attr,
1661         &nr_overcommit_hugepages_attr.attr,
1662         &free_hugepages_attr.attr,
1663         &resv_hugepages_attr.attr,
1664         &surplus_hugepages_attr.attr,
1665 #ifdef CONFIG_NUMA
1666         &nr_hugepages_mempolicy_attr.attr,
1667 #endif
1668         NULL,
1669 };
1670
1671 static struct attribute_group hstate_attr_group = {
1672         .attrs = hstate_attrs,
1673 };
1674
1675 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1676                                     struct kobject **hstate_kobjs,
1677                                     struct attribute_group *hstate_attr_group)
1678 {
1679         int retval;
1680         int hi = h - hstates;
1681
1682         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1683         if (!hstate_kobjs[hi])
1684                 return -ENOMEM;
1685
1686         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1687         if (retval)
1688                 kobject_put(hstate_kobjs[hi]);
1689
1690         return retval;
1691 }
1692
1693 static void __init hugetlb_sysfs_init(void)
1694 {
1695         struct hstate *h;
1696         int err;
1697
1698         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1699         if (!hugepages_kobj)
1700                 return;
1701
1702         for_each_hstate(h) {
1703                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1704                                          hstate_kobjs, &hstate_attr_group);
1705                 if (err)
1706                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1707                                                                 h->name);
1708         }
1709 }
1710
1711 #ifdef CONFIG_NUMA
1712
1713 /*
1714  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1715  * with node sysdevs in node_devices[] using a parallel array.  The array
1716  * index of a node sysdev or _hstate == node id.
1717  * This is here to avoid any static dependency of the node sysdev driver, in
1718  * the base kernel, on the hugetlb module.
1719  */
1720 struct node_hstate {
1721         struct kobject          *hugepages_kobj;
1722         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1723 };
1724 struct node_hstate node_hstates[MAX_NUMNODES];
1725
1726 /*
1727  * A subset of global hstate attributes for node sysdevs
1728  */
1729 static struct attribute *per_node_hstate_attrs[] = {
1730         &nr_hugepages_attr.attr,
1731         &free_hugepages_attr.attr,
1732         &surplus_hugepages_attr.attr,
1733         NULL,
1734 };
1735
1736 static struct attribute_group per_node_hstate_attr_group = {
1737         .attrs = per_node_hstate_attrs,
1738 };
1739
1740 /*
1741  * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1742  * Returns node id via non-NULL nidp.
1743  */
1744 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1745 {
1746         int nid;
1747
1748         for (nid = 0; nid < nr_node_ids; nid++) {
1749                 struct node_hstate *nhs = &node_hstates[nid];
1750                 int i;
1751                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1752                         if (nhs->hstate_kobjs[i] == kobj) {
1753                                 if (nidp)
1754                                         *nidp = nid;
1755                                 return &hstates[i];
1756                         }
1757         }
1758
1759         BUG();
1760         return NULL;
1761 }
1762
1763 /*
1764  * Unregister hstate attributes from a single node sysdev.
1765  * No-op if no hstate attributes attached.
1766  */
1767 void hugetlb_unregister_node(struct node *node)
1768 {
1769         struct hstate *h;
1770         struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1771
1772         if (!nhs->hugepages_kobj)
1773                 return;         /* no hstate attributes */
1774
1775         for_each_hstate(h)
1776                 if (nhs->hstate_kobjs[h - hstates]) {
1777                         kobject_put(nhs->hstate_kobjs[h - hstates]);
1778                         nhs->hstate_kobjs[h - hstates] = NULL;
1779                 }
1780
1781         kobject_put(nhs->hugepages_kobj);
1782         nhs->hugepages_kobj = NULL;
1783 }
1784
1785 /*
1786  * hugetlb module exit:  unregister hstate attributes from node sysdevs
1787  * that have them.
1788  */
1789 static void hugetlb_unregister_all_nodes(void)
1790 {
1791         int nid;
1792
1793         /*
1794          * disable node sysdev registrations.
1795          */
1796         register_hugetlbfs_with_node(NULL, NULL);
1797
1798         /*
1799          * remove hstate attributes from any nodes that have them.
1800          */
1801         for (nid = 0; nid < nr_node_ids; nid++)
1802                 hugetlb_unregister_node(&node_devices[nid]);
1803 }
1804
1805 /*
1806  * Register hstate attributes for a single node sysdev.
1807  * No-op if attributes already registered.
1808  */
1809 void hugetlb_register_node(struct node *node)
1810 {
1811         struct hstate *h;
1812         struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1813         int err;
1814
1815         if (nhs->hugepages_kobj)
1816                 return;         /* already allocated */
1817
1818         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1819                                                         &node->sysdev.kobj);
1820         if (!nhs->hugepages_kobj)
1821                 return;
1822
1823         for_each_hstate(h) {
1824                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1825                                                 nhs->hstate_kobjs,
1826                                                 &per_node_hstate_attr_group);
1827                 if (err) {
1828                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1829                                         " for node %d\n",
1830                                                 h->name, node->sysdev.id);
1831                         hugetlb_unregister_node(node);
1832                         break;
1833                 }
1834         }
1835 }
1836
1837 /*
1838  * hugetlb init time:  register hstate attributes for all registered node
1839  * sysdevs of nodes that have memory.  All on-line nodes should have
1840  * registered their associated sysdev by this time.
1841  */
1842 static void hugetlb_register_all_nodes(void)
1843 {
1844         int nid;
1845
1846         for_each_node_state(nid, N_HIGH_MEMORY) {
1847                 struct node *node = &node_devices[nid];
1848                 if (node->sysdev.id == nid)
1849                         hugetlb_register_node(node);
1850         }
1851
1852         /*
1853          * Let the node sysdev driver know we're here so it can
1854          * [un]register hstate attributes on node hotplug.
1855          */
1856         register_hugetlbfs_with_node(hugetlb_register_node,
1857                                      hugetlb_unregister_node);
1858 }
1859 #else   /* !CONFIG_NUMA */
1860
1861 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1862 {
1863         BUG();
1864         if (nidp)
1865                 *nidp = -1;
1866         return NULL;
1867 }
1868
1869 static void hugetlb_unregister_all_nodes(void) { }
1870
1871 static void hugetlb_register_all_nodes(void) { }
1872
1873 #endif
1874
1875 static void __exit hugetlb_exit(void)
1876 {
1877         struct hstate *h;
1878
1879         hugetlb_unregister_all_nodes();
1880
1881         for_each_hstate(h) {
1882                 kobject_put(hstate_kobjs[h - hstates]);
1883         }
1884
1885         kobject_put(hugepages_kobj);
1886 }
1887 module_exit(hugetlb_exit);
1888
1889 static int __init hugetlb_init(void)
1890 {
1891         /* Some platform decide whether they support huge pages at boot
1892          * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1893          * there is no such support
1894          */
1895         if (HPAGE_SHIFT == 0)
1896                 return 0;
1897
1898         if (!size_to_hstate(default_hstate_size)) {
1899                 default_hstate_size = HPAGE_SIZE;
1900                 if (!size_to_hstate(default_hstate_size))
1901                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1902         }
1903         default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1904         if (default_hstate_max_huge_pages)
1905                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1906
1907         hugetlb_init_hstates();
1908
1909         gather_bootmem_prealloc();
1910
1911         report_hugepages();
1912
1913         hugetlb_sysfs_init();
1914
1915         hugetlb_register_all_nodes();
1916
1917         return 0;
1918 }
1919 module_init(hugetlb_init);
1920
1921 /* Should be called on processing a hugepagesz=... option */
1922 void __init hugetlb_add_hstate(unsigned order)
1923 {
1924         struct hstate *h;
1925         unsigned long i;
1926
1927         if (size_to_hstate(PAGE_SIZE << order)) {
1928                 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1929                 return;
1930         }
1931         BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1932         BUG_ON(order == 0);
1933         h = &hstates[max_hstate++];
1934         h->order = order;
1935         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1936         h->nr_huge_pages = 0;
1937         h->free_huge_pages = 0;
1938         for (i = 0; i < MAX_NUMNODES; ++i)
1939                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1940         h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1941         h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1942         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1943                                         huge_page_size(h)/1024);
1944
1945         parsed_hstate = h;
1946 }
1947
1948 static int __init hugetlb_nrpages_setup(char *s)
1949 {
1950         unsigned long *mhp;
1951         static unsigned long *last_mhp;
1952
1953         /*
1954          * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1955          * so this hugepages= parameter goes to the "default hstate".
1956          */
1957         if (!max_hstate)
1958                 mhp = &default_hstate_max_huge_pages;
1959         else
1960                 mhp = &parsed_hstate->max_huge_pages;
1961
1962         if (mhp == last_mhp) {
1963                 printk(KERN_WARNING "hugepages= specified twice without "
1964                         "interleaving hugepagesz=, ignoring\n");
1965                 return 1;
1966         }
1967
1968         if (sscanf(s, "%lu", mhp) <= 0)
1969                 *mhp = 0;
1970
1971         /*
1972          * Global state is always initialized later in hugetlb_init.
1973          * But we need to allocate >= MAX_ORDER hstates here early to still
1974          * use the bootmem allocator.
1975          */
1976         if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1977                 hugetlb_hstate_alloc_pages(parsed_hstate);
1978
1979         last_mhp = mhp;
1980
1981         return 1;
1982 }
1983 __setup("hugepages=", hugetlb_nrpages_setup);
1984
1985 static int __init hugetlb_default_setup(char *s)
1986 {
1987         default_hstate_size = memparse(s, &s);
1988         return 1;
1989 }
1990 __setup("default_hugepagesz=", hugetlb_default_setup);
1991
1992 static unsigned int cpuset_mems_nr(unsigned int *array)
1993 {
1994         int node;
1995         unsigned int nr = 0;
1996
1997         for_each_node_mask(node, cpuset_current_mems_allowed)
1998                 nr += array[node];
1999
2000         return nr;
2001 }
2002
2003 #ifdef CONFIG_SYSCTL
2004 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2005                          struct ctl_table *table, int write,
2006                          void __user *buffer, size_t *length, loff_t *ppos)
2007 {
2008         struct hstate *h = &default_hstate;
2009         unsigned long tmp;
2010         int ret;
2011
2012         tmp = h->max_huge_pages;
2013
2014         if (write && h->order >= MAX_ORDER)
2015                 return -EINVAL;
2016
2017         table->data = &tmp;
2018         table->maxlen = sizeof(unsigned long);
2019         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2020         if (ret)
2021                 goto out;
2022
2023         if (write) {
2024                 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
2025                                                 GFP_KERNEL | __GFP_NORETRY);
2026                 if (!(obey_mempolicy &&
2027                                init_nodemask_of_mempolicy(nodes_allowed))) {
2028                         NODEMASK_FREE(nodes_allowed);
2029                         nodes_allowed = &node_states[N_HIGH_MEMORY];
2030                 }
2031                 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2032
2033                 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
2034                         NODEMASK_FREE(nodes_allowed);
2035         }
2036 out:
2037         return ret;
2038 }
2039
2040 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2041                           void __user *buffer, size_t *length, loff_t *ppos)
2042 {
2043
2044         return hugetlb_sysctl_handler_common(false, table, write,
2045                                                         buffer, length, ppos);
2046 }
2047
2048 #ifdef CONFIG_NUMA
2049 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2050                           void __user *buffer, size_t *length, loff_t *ppos)
2051 {
2052         return hugetlb_sysctl_handler_common(true, table, write,
2053                                                         buffer, length, ppos);
2054 }
2055 #endif /* CONFIG_NUMA */
2056
2057 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2058                         void __user *buffer,
2059                         size_t *length, loff_t *ppos)
2060 {
2061         proc_dointvec(table, write, buffer, length, ppos);
2062         if (hugepages_treat_as_movable)
2063                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2064         else
2065                 htlb_alloc_mask = GFP_HIGHUSER;
2066         return 0;
2067 }
2068
2069 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2070                         void __user *buffer,
2071                         size_t *length, loff_t *ppos)
2072 {
2073         struct hstate *h = &default_hstate;
2074         unsigned long tmp;
2075         int ret;
2076
2077         tmp = h->nr_overcommit_huge_pages;
2078
2079         if (write && h->order >= MAX_ORDER)
2080                 return -EINVAL;
2081
2082         table->data = &tmp;
2083         table->maxlen = sizeof(unsigned long);
2084         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2085         if (ret)
2086                 goto out;
2087
2088         if (write) {
2089                 spin_lock(&hugetlb_lock);
2090                 h->nr_overcommit_huge_pages = tmp;
2091                 spin_unlock(&hugetlb_lock);
2092         }
2093 out:
2094         return ret;
2095 }
2096
2097 #endif /* CONFIG_SYSCTL */
2098
2099 void hugetlb_report_meminfo(struct seq_file *m)
2100 {
2101         struct hstate *h = &default_hstate;
2102         seq_printf(m,
2103                         "HugePages_Total:   %5lu\n"
2104                         "HugePages_Free:    %5lu\n"
2105                         "HugePages_Rsvd:    %5lu\n"
2106                         "HugePages_Surp:    %5lu\n"
2107                         "Hugepagesize:   %8lu kB\n",
2108                         h->nr_huge_pages,
2109                         h->free_huge_pages,
2110                         h->resv_huge_pages,
2111                         h->surplus_huge_pages,
2112                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2113 }
2114
2115 int hugetlb_report_node_meminfo(int nid, char *buf)
2116 {
2117         struct hstate *h = &default_hstate;
2118         return sprintf(buf,
2119                 "Node %d HugePages_Total: %5u\n"
2120                 "Node %d HugePages_Free:  %5u\n"
2121                 "Node %d HugePages_Surp:  %5u\n",
2122                 nid, h->nr_huge_pages_node[nid],
2123                 nid, h->free_huge_pages_node[nid],
2124                 nid, h->surplus_huge_pages_node[nid]);
2125 }
2126
2127 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2128 unsigned long hugetlb_total_pages(void)
2129 {
2130         struct hstate *h;
2131         unsigned long nr_total_pages = 0;
2132
2133         for_each_hstate(h)
2134                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2135         return nr_total_pages;
2136 }
2137
2138 static int hugetlb_acct_memory(struct hstate *h, long delta)
2139 {
2140         int ret = -ENOMEM;
2141
2142         spin_lock(&hugetlb_lock);
2143         /*
2144          * When cpuset is configured, it breaks the strict hugetlb page
2145          * reservation as the accounting is done on a global variable. Such
2146          * reservation is completely rubbish in the presence of cpuset because
2147          * the reservation is not checked against page availability for the
2148          * current cpuset. Application can still potentially OOM'ed by kernel
2149          * with lack of free htlb page in cpuset that the task is in.
2150          * Attempt to enforce strict accounting with cpuset is almost
2151          * impossible (or too ugly) because cpuset is too fluid that
2152          * task or memory node can be dynamically moved between cpusets.
2153          *
2154          * The change of semantics for shared hugetlb mapping with cpuset is
2155          * undesirable. However, in order to preserve some of the semantics,
2156          * we fall back to check against current free page availability as
2157          * a best attempt and hopefully to minimize the impact of changing
2158          * semantics that cpuset has.
2159          */
2160         if (delta > 0) {
2161                 if (gather_surplus_pages(h, delta) < 0)
2162                         goto out;
2163
2164                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2165                         return_unused_surplus_pages(h, delta);
2166                         goto out;
2167                 }
2168         }
2169
2170         ret = 0;
2171         if (delta < 0)
2172                 return_unused_surplus_pages(h, (unsigned long) -delta);
2173
2174 out:
2175         spin_unlock(&hugetlb_lock);
2176         return ret;
2177 }
2178
2179 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2180 {
2181         struct resv_map *reservations = vma_resv_map(vma);
2182
2183         /*
2184          * This new VMA should share its siblings reservation map if present.
2185          * The VMA will only ever have a valid reservation map pointer where
2186          * it is being copied for another still existing VMA.  As that VMA
2187          * has a reference to the reservation map it cannot disappear until
2188          * after this open call completes.  It is therefore safe to take a
2189          * new reference here without additional locking.
2190          */
2191         if (reservations)
2192                 kref_get(&reservations->refs);
2193 }
2194
2195 static void resv_map_put(struct vm_area_struct *vma)
2196 {
2197         struct resv_map *reservations = vma_resv_map(vma);
2198
2199         if (!reservations)
2200                 return;
2201         kref_put(&reservations->refs, resv_map_release);
2202 }
2203
2204 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2205 {
2206         struct hstate *h = hstate_vma(vma);
2207         struct resv_map *reservations = vma_resv_map(vma);
2208         struct hugepage_subpool *spool = subpool_vma(vma);
2209         unsigned long reserve;
2210         unsigned long start;
2211         unsigned long end;
2212
2213         if (reservations) {
2214                 start = vma_hugecache_offset(h, vma, vma->vm_start);
2215                 end = vma_hugecache_offset(h, vma, vma->vm_end);
2216
2217                 reserve = (end - start) -
2218                         region_count(&reservations->regions, start, end);
2219
2220                 resv_map_put(vma);
2221
2222                 if (reserve) {
2223                         hugetlb_acct_memory(h, -reserve);
2224                         hugepage_subpool_put_pages(spool, reserve);
2225                 }
2226         }
2227 }
2228
2229 /*
2230  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2231  * handle_mm_fault() to try to instantiate regular-sized pages in the
2232  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2233  * this far.
2234  */
2235 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2236 {
2237         BUG();
2238         return 0;
2239 }
2240
2241 const struct vm_operations_struct hugetlb_vm_ops = {
2242         .fault = hugetlb_vm_op_fault,
2243         .open = hugetlb_vm_op_open,
2244         .close = hugetlb_vm_op_close,
2245 };
2246
2247 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2248                                 int writable)
2249 {
2250         pte_t entry;
2251
2252         if (writable) {
2253                 entry =
2254                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2255         } else {
2256                 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2257         }
2258         entry = pte_mkyoung(entry);
2259         entry = pte_mkhuge(entry);
2260
2261         return entry;
2262 }
2263
2264 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2265                                    unsigned long address, pte_t *ptep)
2266 {
2267         pte_t entry;
2268
2269         entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2270         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2271                 update_mmu_cache(vma, address, ptep);
2272 }
2273
2274
2275 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2276                             struct vm_area_struct *vma)
2277 {
2278         pte_t *src_pte, *dst_pte, entry;
2279         struct page *ptepage;
2280         unsigned long addr;
2281         int cow;
2282         struct hstate *h = hstate_vma(vma);
2283         unsigned long sz = huge_page_size(h);
2284
2285         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2286
2287         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2288                 src_pte = huge_pte_offset(src, addr);
2289                 if (!src_pte)
2290                         continue;
2291                 dst_pte = huge_pte_alloc(dst, addr, sz);
2292                 if (!dst_pte)
2293                         goto nomem;
2294
2295                 /* If the pagetables are shared don't copy or take references */
2296                 if (dst_pte == src_pte)
2297                         continue;
2298
2299                 spin_lock(&dst->page_table_lock);
2300                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2301                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2302                         if (cow)
2303                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2304                         entry = huge_ptep_get(src_pte);
2305                         ptepage = pte_page(entry);
2306                         get_page(ptepage);
2307                         page_dup_rmap(ptepage);
2308                         set_huge_pte_at(dst, addr, dst_pte, entry);
2309                 }
2310                 spin_unlock(&src->page_table_lock);
2311                 spin_unlock(&dst->page_table_lock);
2312         }
2313         return 0;
2314
2315 nomem:
2316         return -ENOMEM;
2317 }
2318
2319 static int is_hugetlb_entry_migration(pte_t pte)
2320 {
2321         swp_entry_t swp;
2322
2323         if (huge_pte_none(pte) || pte_present(pte))
2324                 return 0;
2325         swp = pte_to_swp_entry(pte);
2326         if (non_swap_entry(swp) && is_migration_entry(swp))
2327                 return 1;
2328         else
2329                 return 0;
2330 }
2331
2332 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2333 {
2334         swp_entry_t swp;
2335
2336         if (huge_pte_none(pte) || pte_present(pte))
2337                 return 0;
2338         swp = pte_to_swp_entry(pte);
2339         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2340                 return 1;
2341         else
2342                 return 0;
2343 }
2344
2345 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2346                             unsigned long end, struct page *ref_page)
2347 {
2348         struct mm_struct *mm = vma->vm_mm;
2349         unsigned long address;
2350         pte_t *ptep;
2351         pte_t pte;
2352         struct page *page;
2353         struct page *tmp;
2354         struct hstate *h = hstate_vma(vma);
2355         unsigned long sz = huge_page_size(h);
2356
2357         /*
2358          * A page gathering list, protected by per file i_mmap_mutex. The
2359          * lock is used to avoid list corruption from multiple unmapping
2360          * of the same page since we are using page->lru.
2361          */
2362         LIST_HEAD(page_list);
2363
2364         WARN_ON(!is_vm_hugetlb_page(vma));
2365         BUG_ON(start & ~huge_page_mask(h));
2366         BUG_ON(end & ~huge_page_mask(h));
2367
2368         mmu_notifier_invalidate_range_start(mm, start, end);
2369         spin_lock(&mm->page_table_lock);
2370         for (address = start; address < end; address += sz) {
2371                 ptep = huge_pte_offset(mm, address);
2372                 if (!ptep)
2373                         continue;
2374
2375                 if (huge_pmd_unshare(mm, &address, ptep))
2376                         continue;
2377
2378                 /*
2379                  * If a reference page is supplied, it is because a specific
2380                  * page is being unmapped, not a range. Ensure the page we
2381                  * are about to unmap is the actual page of interest.
2382                  */
2383                 if (ref_page) {
2384                         pte = huge_ptep_get(ptep);
2385                         if (huge_pte_none(pte))
2386                                 continue;
2387                         page = pte_page(pte);
2388                         if (page != ref_page)
2389                                 continue;
2390
2391                         /*
2392                          * Mark the VMA as having unmapped its page so that
2393                          * future faults in this VMA will fail rather than
2394                          * looking like data was lost
2395                          */
2396                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2397                 }
2398
2399                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2400                 if (huge_pte_none(pte))
2401                         continue;
2402
2403                 /*
2404                  * HWPoisoned hugepage is already unmapped and dropped reference
2405                  */
2406                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2407                         continue;
2408
2409                 page = pte_page(pte);
2410                 if (pte_dirty(pte))
2411                         set_page_dirty(page);
2412                 list_add(&page->lru, &page_list);
2413         }
2414         spin_unlock(&mm->page_table_lock);
2415         flush_tlb_range(vma, start, end);
2416         mmu_notifier_invalidate_range_end(mm, start, end);
2417         list_for_each_entry_safe(page, tmp, &page_list, lru) {
2418                 page_remove_rmap(page);
2419                 list_del(&page->lru);
2420                 put_page(page);
2421         }
2422 }
2423
2424 void __unmap_hugepage_range_final(struct vm_area_struct *vma,
2425                           unsigned long start, unsigned long end,
2426                           struct page *ref_page)
2427 {
2428         __unmap_hugepage_range(vma, start, end, ref_page);
2429
2430         /*
2431          * Clear this flag so that x86's huge_pmd_share page_table_shareable
2432          * test will fail on a vma being torn down, and not grab a page table
2433          * on its way out.  We're lucky that the flag has such an appropriate
2434          * name, and can in fact be safely cleared here. We could clear it
2435          * before the __unmap_hugepage_range above, but all that's necessary
2436          * is to clear it before releasing the i_mmap_mutex. This works
2437          * because in the context this is called, the VMA is about to be
2438          * destroyed and the i_mmap_mutex is held.
2439          */
2440         vma->vm_flags &= ~VM_MAYSHARE;
2441 }
2442
2443 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2444                           unsigned long end, struct page *ref_page)
2445 {
2446         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2447         __unmap_hugepage_range(vma, start, end, ref_page);
2448         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2449 }
2450
2451 /*
2452  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2453  * mappping it owns the reserve page for. The intention is to unmap the page
2454  * from other VMAs and let the children be SIGKILLed if they are faulting the
2455  * same region.
2456  */
2457 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2458                                 struct page *page, unsigned long address)
2459 {
2460         struct hstate *h = hstate_vma(vma);
2461         struct vm_area_struct *iter_vma;
2462         struct address_space *mapping;
2463         struct prio_tree_iter iter;
2464         pgoff_t pgoff;
2465
2466         /*
2467          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2468          * from page cache lookup which is in HPAGE_SIZE units.
2469          */
2470         address = address & huge_page_mask(h);
2471         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2472                         vma->vm_pgoff;
2473         mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
2474
2475         /*
2476          * Take the mapping lock for the duration of the table walk. As
2477          * this mapping should be shared between all the VMAs,
2478          * __unmap_hugepage_range() is called as the lock is already held
2479          */
2480         mutex_lock(&mapping->i_mmap_mutex);
2481         vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2482                 /* Do not unmap the current VMA */
2483                 if (iter_vma == vma)
2484                         continue;
2485
2486                 /*
2487                  * Unmap the page from other VMAs without their own reserves.
2488                  * They get marked to be SIGKILLed if they fault in these
2489                  * areas. This is because a future no-page fault on this VMA
2490                  * could insert a zeroed page instead of the data existing
2491                  * from the time of fork. This would look like data corruption
2492                  */
2493                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2494                         __unmap_hugepage_range(iter_vma,
2495                                 address, address + huge_page_size(h),
2496                                 page);
2497         }
2498         mutex_unlock(&mapping->i_mmap_mutex);
2499
2500         return 1;
2501 }
2502
2503 /*
2504  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2505  */
2506 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2507                         unsigned long address, pte_t *ptep, pte_t pte,
2508                         struct page *pagecache_page)
2509 {
2510         struct hstate *h = hstate_vma(vma);
2511         struct page *old_page, *new_page;
2512         int avoidcopy;
2513         int outside_reserve = 0;
2514
2515         old_page = pte_page(pte);
2516
2517 retry_avoidcopy:
2518         /* If no-one else is actually using this page, avoid the copy
2519          * and just make the page writable */
2520         avoidcopy = (page_mapcount(old_page) == 1);
2521         if (avoidcopy) {
2522                 if (PageAnon(old_page))
2523                         page_move_anon_rmap(old_page, vma, address);
2524                 set_huge_ptep_writable(vma, address, ptep);
2525                 return 0;
2526         }
2527
2528         /*
2529          * If the process that created a MAP_PRIVATE mapping is about to
2530          * perform a COW due to a shared page count, attempt to satisfy
2531          * the allocation without using the existing reserves. The pagecache
2532          * page is used to determine if the reserve at this address was
2533          * consumed or not. If reserves were used, a partial faulted mapping
2534          * at the time of fork() could consume its reserves on COW instead
2535          * of the full address range.
2536          */
2537         if (!(vma->vm_flags & VM_MAYSHARE) &&
2538                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2539                         old_page != pagecache_page)
2540                 outside_reserve = 1;
2541
2542         page_cache_get(old_page);
2543
2544         /* Drop page_table_lock as buddy allocator may be called */
2545         spin_unlock(&mm->page_table_lock);
2546         new_page = alloc_huge_page(vma, address, outside_reserve);
2547
2548         if (IS_ERR(new_page)) {
2549                 page_cache_release(old_page);
2550
2551                 /*
2552                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2553                  * it is due to references held by a child and an insufficient
2554                  * huge page pool. To guarantee the original mappers
2555                  * reliability, unmap the page from child processes. The child
2556                  * may get SIGKILLed if it later faults.
2557                  */
2558                 if (outside_reserve) {
2559                         BUG_ON(huge_pte_none(pte));
2560                         if (unmap_ref_private(mm, vma, old_page, address)) {
2561                                 BUG_ON(huge_pte_none(pte));
2562                                 spin_lock(&mm->page_table_lock);
2563                                 goto retry_avoidcopy;
2564                         }
2565                         WARN_ON_ONCE(1);
2566                 }
2567
2568                 /* Caller expects lock to be held */
2569                 spin_lock(&mm->page_table_lock);
2570                 return -PTR_ERR(new_page);
2571         }
2572
2573         /*
2574          * When the original hugepage is shared one, it does not have
2575          * anon_vma prepared.
2576          */
2577         if (unlikely(anon_vma_prepare(vma))) {
2578                 page_cache_release(new_page);
2579                 page_cache_release(old_page);
2580                 /* Caller expects lock to be held */
2581                 spin_lock(&mm->page_table_lock);
2582                 return VM_FAULT_OOM;
2583         }
2584
2585         copy_user_huge_page(new_page, old_page, address, vma,
2586                             pages_per_huge_page(h));
2587         __SetPageUptodate(new_page);
2588
2589         /*
2590          * Retake the page_table_lock to check for racing updates
2591          * before the page tables are altered
2592          */
2593         spin_lock(&mm->page_table_lock);
2594         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2595         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2596                 /* Break COW */
2597                 mmu_notifier_invalidate_range_start(mm,
2598                         address & huge_page_mask(h),
2599                         (address & huge_page_mask(h)) + huge_page_size(h));
2600                 huge_ptep_clear_flush(vma, address, ptep);
2601                 set_huge_pte_at(mm, address, ptep,
2602                                 make_huge_pte(vma, new_page, 1));
2603                 page_remove_rmap(old_page);
2604                 hugepage_add_new_anon_rmap(new_page, vma, address);
2605                 /* Make the old page be freed below */
2606                 new_page = old_page;
2607                 mmu_notifier_invalidate_range_end(mm,
2608                         address & huge_page_mask(h),
2609                         (address & huge_page_mask(h)) + huge_page_size(h));
2610         }
2611         page_cache_release(new_page);
2612         page_cache_release(old_page);
2613         return 0;
2614 }
2615
2616 /* Return the pagecache page at a given address within a VMA */
2617 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2618                         struct vm_area_struct *vma, unsigned long address)
2619 {
2620         struct address_space *mapping;
2621         pgoff_t idx;
2622
2623         mapping = vma->vm_file->f_mapping;
2624         idx = vma_hugecache_offset(h, vma, address);
2625
2626         return find_lock_page(mapping, idx);
2627 }
2628
2629 /*
2630  * Return whether there is a pagecache page to back given address within VMA.
2631  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2632  */
2633 static bool hugetlbfs_pagecache_present(struct hstate *h,
2634                         struct vm_area_struct *vma, unsigned long address)
2635 {
2636         struct address_space *mapping;
2637         pgoff_t idx;
2638         struct page *page;
2639
2640         mapping = vma->vm_file->f_mapping;
2641         idx = vma_hugecache_offset(h, vma, address);
2642
2643         page = find_get_page(mapping, idx);
2644         if (page)
2645                 put_page(page);
2646         return page != NULL;
2647 }
2648
2649 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2650                         unsigned long address, pte_t *ptep, unsigned int flags)
2651 {
2652         struct hstate *h = hstate_vma(vma);
2653         int ret = VM_FAULT_SIGBUS;
2654         pgoff_t idx;
2655         unsigned long size;
2656         struct page *page;
2657         struct address_space *mapping;
2658         pte_t new_pte;
2659
2660         /*
2661          * Currently, we are forced to kill the process in the event the
2662          * original mapper has unmapped pages from the child due to a failed
2663          * COW. Warn that such a situation has occurred as it may not be obvious
2664          */
2665         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2666                 printk(KERN_WARNING
2667                         "PID %d killed due to inadequate hugepage pool\n",
2668                         current->pid);
2669                 return ret;
2670         }
2671
2672         mapping = vma->vm_file->f_mapping;
2673         idx = vma_hugecache_offset(h, vma, address);
2674
2675         /*
2676          * Use page lock to guard against racing truncation
2677          * before we get page_table_lock.
2678          */
2679 retry:
2680         page = find_lock_page(mapping, idx);
2681         if (!page) {
2682                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2683                 if (idx >= size)
2684                         goto out;
2685                 page = alloc_huge_page(vma, address, 0);
2686                 if (IS_ERR(page)) {
2687                         ret = -PTR_ERR(page);
2688                         goto out;
2689                 }
2690                 clear_huge_page(page, address, pages_per_huge_page(h));
2691                 __SetPageUptodate(page);
2692
2693                 if (vma->vm_flags & VM_MAYSHARE) {
2694                         int err;
2695                         struct inode *inode = mapping->host;
2696
2697                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2698                         if (err) {
2699                                 put_page(page);
2700                                 if (err == -EEXIST)
2701                                         goto retry;
2702                                 goto out;
2703                         }
2704
2705                         spin_lock(&inode->i_lock);
2706                         inode->i_blocks += blocks_per_huge_page(h);
2707                         spin_unlock(&inode->i_lock);
2708                         page_dup_rmap(page);
2709                 } else {
2710                         lock_page(page);
2711                         if (unlikely(anon_vma_prepare(vma))) {
2712                                 ret = VM_FAULT_OOM;
2713                                 goto backout_unlocked;
2714                         }
2715                         hugepage_add_new_anon_rmap(page, vma, address);
2716                 }
2717         } else {
2718                 /*
2719                  * If memory error occurs between mmap() and fault, some process
2720                  * don't have hwpoisoned swap entry for errored virtual address.
2721                  * So we need to block hugepage fault by PG_hwpoison bit check.
2722                  */
2723                 if (unlikely(PageHWPoison(page))) {
2724                         ret = VM_FAULT_HWPOISON |
2725                               VM_FAULT_SET_HINDEX(h - hstates);
2726                         goto backout_unlocked;
2727                 }
2728                 page_dup_rmap(page);
2729         }
2730
2731         /*
2732          * If we are going to COW a private mapping later, we examine the
2733          * pending reservations for this page now. This will ensure that
2734          * any allocations necessary to record that reservation occur outside
2735          * the spinlock.
2736          */
2737         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2738                 if (vma_needs_reservation(h, vma, address) < 0) {
2739                         ret = VM_FAULT_OOM;
2740                         goto backout_unlocked;
2741                 }
2742
2743         spin_lock(&mm->page_table_lock);
2744         size = i_size_read(mapping->host) >> huge_page_shift(h);
2745         if (idx >= size)
2746                 goto backout;
2747
2748         ret = 0;
2749         if (!huge_pte_none(huge_ptep_get(ptep)))
2750                 goto backout;
2751
2752         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2753                                 && (vma->vm_flags & VM_SHARED)));
2754         set_huge_pte_at(mm, address, ptep, new_pte);
2755
2756         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2757                 /* Optimization, do the COW without a second fault */
2758                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2759         }
2760
2761         spin_unlock(&mm->page_table_lock);
2762         unlock_page(page);
2763 out:
2764         return ret;
2765
2766 backout:
2767         spin_unlock(&mm->page_table_lock);
2768 backout_unlocked:
2769         unlock_page(page);
2770         put_page(page);
2771         goto out;
2772 }
2773
2774 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2775                         unsigned long address, unsigned int flags)
2776 {
2777         pte_t *ptep;
2778         pte_t entry;
2779         int ret;
2780         struct page *page = NULL;
2781         struct page *pagecache_page = NULL;
2782         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2783         struct hstate *h = hstate_vma(vma);
2784
2785         ptep = huge_pte_offset(mm, address);
2786         if (ptep) {
2787                 entry = huge_ptep_get(ptep);
2788                 if (unlikely(is_hugetlb_entry_migration(entry))) {
2789                         migration_entry_wait_huge(mm, ptep);
2790                         return 0;
2791                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2792                         return VM_FAULT_HWPOISON_LARGE |
2793                                VM_FAULT_SET_HINDEX(h - hstates);
2794         }
2795
2796         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2797         if (!ptep)
2798                 return VM_FAULT_OOM;
2799
2800         /*
2801          * Serialize hugepage allocation and instantiation, so that we don't
2802          * get spurious allocation failures if two CPUs race to instantiate
2803          * the same page in the page cache.
2804          */
2805         mutex_lock(&hugetlb_instantiation_mutex);
2806         entry = huge_ptep_get(ptep);
2807         if (huge_pte_none(entry)) {
2808                 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2809                 goto out_mutex;
2810         }
2811
2812         ret = 0;
2813
2814         /*
2815          * If we are going to COW the mapping later, we examine the pending
2816          * reservations for this page now. This will ensure that any
2817          * allocations necessary to record that reservation occur outside the
2818          * spinlock. For private mappings, we also lookup the pagecache
2819          * page now as it is used to determine if a reservation has been
2820          * consumed.
2821          */
2822         if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2823                 if (vma_needs_reservation(h, vma, address) < 0) {
2824                         ret = VM_FAULT_OOM;
2825                         goto out_mutex;
2826                 }
2827
2828                 if (!(vma->vm_flags & VM_MAYSHARE))
2829                         pagecache_page = hugetlbfs_pagecache_page(h,
2830                                                                 vma, address);
2831         }
2832
2833         /*
2834          * hugetlb_cow() requires page locks of pte_page(entry) and
2835          * pagecache_page, so here we need take the former one
2836          * when page != pagecache_page or !pagecache_page.
2837          * Note that locking order is always pagecache_page -> page,
2838          * so no worry about deadlock.
2839          */
2840         page = pte_page(entry);
2841         get_page(page);
2842         if (page != pagecache_page)
2843                 lock_page(page);
2844
2845         spin_lock(&mm->page_table_lock);
2846         /* Check for a racing update before calling hugetlb_cow */
2847         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2848                 goto out_page_table_lock;
2849
2850
2851         if (flags & FAULT_FLAG_WRITE) {
2852                 if (!pte_write(entry)) {
2853                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
2854                                                         pagecache_page);
2855                         goto out_page_table_lock;
2856                 }
2857                 entry = pte_mkdirty(entry);
2858         }
2859         entry = pte_mkyoung(entry);
2860         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2861                                                 flags & FAULT_FLAG_WRITE))
2862                 update_mmu_cache(vma, address, ptep);
2863
2864 out_page_table_lock:
2865         spin_unlock(&mm->page_table_lock);
2866
2867         if (pagecache_page) {
2868                 unlock_page(pagecache_page);
2869                 put_page(pagecache_page);
2870         }
2871         if (page != pagecache_page)
2872                 unlock_page(page);
2873         put_page(page);
2874
2875 out_mutex:
2876         mutex_unlock(&hugetlb_instantiation_mutex);
2877
2878         return ret;
2879 }
2880
2881 /* Can be overriden by architectures */
2882 __attribute__((weak)) struct page *
2883 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2884                pud_t *pud, int write)
2885 {
2886         BUG();
2887         return NULL;
2888 }
2889
2890 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2891                         struct page **pages, struct vm_area_struct **vmas,
2892                         unsigned long *position, int *length, int i,
2893                         unsigned int flags)
2894 {
2895         unsigned long pfn_offset;
2896         unsigned long vaddr = *position;
2897         int remainder = *length;
2898         struct hstate *h = hstate_vma(vma);
2899
2900         spin_lock(&mm->page_table_lock);
2901         while (vaddr < vma->vm_end && remainder) {
2902                 pte_t *pte;
2903                 int absent;
2904                 struct page *page;
2905
2906                 /*
2907                  * Some archs (sparc64, sh*) have multiple pte_ts to
2908                  * each hugepage.  We have to make sure we get the
2909                  * first, for the page indexing below to work.
2910                  */
2911                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2912                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2913
2914                 /*
2915                  * When coredumping, it suits get_dump_page if we just return
2916                  * an error where there's an empty slot with no huge pagecache
2917                  * to back it.  This way, we avoid allocating a hugepage, and
2918                  * the sparse dumpfile avoids allocating disk blocks, but its
2919                  * huge holes still show up with zeroes where they need to be.
2920                  */
2921                 if (absent && (flags & FOLL_DUMP) &&
2922                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2923                         remainder = 0;
2924                         break;
2925                 }
2926
2927                 /*
2928                  * We need call hugetlb_fault for both hugepages under migration
2929                  * (in which case hugetlb_fault waits for the migration,) and
2930                  * hwpoisoned hugepages (in which case we need to prevent the
2931                  * caller from accessing to them.) In order to do this, we use
2932                  * here is_swap_pte instead of is_hugetlb_entry_migration and
2933                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
2934                  * both cases, and because we can't follow correct pages
2935                  * directly from any kind of swap entries.
2936                  */
2937                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
2938                     ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2939                         int ret;
2940
2941                         spin_unlock(&mm->page_table_lock);
2942                         ret = hugetlb_fault(mm, vma, vaddr,
2943                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2944                         spin_lock(&mm->page_table_lock);
2945                         if (!(ret & VM_FAULT_ERROR))
2946                                 continue;
2947
2948                         remainder = 0;
2949                         break;
2950                 }
2951
2952                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2953                 page = pte_page(huge_ptep_get(pte));
2954 same_page:
2955                 if (pages) {
2956                         pages[i] = mem_map_offset(page, pfn_offset);
2957                         get_page(pages[i]);
2958                 }
2959
2960                 if (vmas)
2961                         vmas[i] = vma;
2962
2963                 vaddr += PAGE_SIZE;
2964                 ++pfn_offset;
2965                 --remainder;
2966                 ++i;
2967                 if (vaddr < vma->vm_end && remainder &&
2968                                 pfn_offset < pages_per_huge_page(h)) {
2969                         /*
2970                          * We use pfn_offset to avoid touching the pageframes
2971                          * of this compound page.
2972                          */
2973                         goto same_page;
2974                 }
2975         }
2976         spin_unlock(&mm->page_table_lock);
2977         *length = remainder;
2978         *position = vaddr;
2979
2980         return i ? i : -EFAULT;
2981 }
2982
2983 void hugetlb_change_protection(struct vm_area_struct *vma,
2984                 unsigned long address, unsigned long end, pgprot_t newprot)
2985 {
2986         struct mm_struct *mm = vma->vm_mm;
2987         unsigned long start = address;
2988         pte_t *ptep;
2989         pte_t pte;
2990         struct hstate *h = hstate_vma(vma);
2991
2992         BUG_ON(address >= end);
2993         flush_cache_range(vma, address, end);
2994
2995         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2996         spin_lock(&mm->page_table_lock);
2997         for (; address < end; address += huge_page_size(h)) {
2998                 ptep = huge_pte_offset(mm, address);
2999                 if (!ptep)
3000                         continue;
3001                 if (huge_pmd_unshare(mm, &address, ptep))
3002                         continue;
3003                 if (!huge_pte_none(huge_ptep_get(ptep))) {
3004                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3005                         pte = pte_mkhuge(pte_modify(pte, newprot));
3006                         set_huge_pte_at(mm, address, ptep, pte);
3007                 }
3008         }
3009         spin_unlock(&mm->page_table_lock);
3010         /*
3011          * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3012          * may have cleared our pud entry and done put_page on the page table:
3013          * once we release i_mmap_mutex, another task can do the final put_page
3014          * and that page table be reused and filled with junk.
3015          */
3016         flush_tlb_range(vma, start, end);
3017         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3018 }
3019
3020 int hugetlb_reserve_pages(struct inode *inode,
3021                                         long from, long to,
3022                                         struct vm_area_struct *vma,
3023                                         vm_flags_t vm_flags)
3024 {
3025         long ret, chg;
3026         struct hstate *h = hstate_inode(inode);
3027         struct hugepage_subpool *spool = subpool_inode(inode);
3028
3029         /*
3030          * Only apply hugepage reservation if asked. At fault time, an
3031          * attempt will be made for VM_NORESERVE to allocate a page
3032          * without using reserves
3033          */
3034         if (vm_flags & VM_NORESERVE)
3035                 return 0;
3036
3037         /*
3038          * Shared mappings base their reservation on the number of pages that
3039          * are already allocated on behalf of the file. Private mappings need
3040          * to reserve the full area even if read-only as mprotect() may be
3041          * called to make the mapping read-write. Assume !vma is a shm mapping
3042          */
3043         if (!vma || vma->vm_flags & VM_MAYSHARE)
3044                 chg = region_chg(&inode->i_mapping->private_list, from, to);
3045         else {
3046                 struct resv_map *resv_map = resv_map_alloc();
3047                 if (!resv_map)
3048                         return -ENOMEM;
3049
3050                 chg = to - from;
3051
3052                 set_vma_resv_map(vma, resv_map);
3053                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3054         }
3055
3056         if (chg < 0) {
3057                 ret = chg;
3058                 goto out_err;
3059         }
3060
3061         /* There must be enough pages in the subpool for the mapping */
3062         if (hugepage_subpool_get_pages(spool, chg)) {
3063                 ret = -ENOSPC;
3064                 goto out_err;
3065         }
3066
3067         /*
3068          * Check enough hugepages are available for the reservation.
3069          * Hand the pages back to the subpool if there are not
3070          */
3071         ret = hugetlb_acct_memory(h, chg);
3072         if (ret < 0) {
3073                 hugepage_subpool_put_pages(spool, chg);
3074                 goto out_err;
3075         }
3076
3077         /*
3078          * Account for the reservations made. Shared mappings record regions
3079          * that have reservations as they are shared by multiple VMAs.
3080          * When the last VMA disappears, the region map says how much
3081          * the reservation was and the page cache tells how much of
3082          * the reservation was consumed. Private mappings are per-VMA and
3083          * only the consumed reservations are tracked. When the VMA
3084          * disappears, the original reservation is the VMA size and the
3085          * consumed reservations are stored in the map. Hence, nothing
3086          * else has to be done for private mappings here
3087          */
3088         if (!vma || vma->vm_flags & VM_MAYSHARE)
3089                 region_add(&inode->i_mapping->private_list, from, to);
3090         return 0;
3091 out_err:
3092         if (vma)
3093                 resv_map_put(vma);
3094         return ret;
3095 }
3096
3097 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3098 {
3099         struct hstate *h = hstate_inode(inode);
3100         long chg = region_truncate(&inode->i_mapping->private_list, offset);
3101         struct hugepage_subpool *spool = subpool_inode(inode);
3102
3103         spin_lock(&inode->i_lock);
3104         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3105         spin_unlock(&inode->i_lock);
3106
3107         hugepage_subpool_put_pages(spool, (chg - freed));
3108         hugetlb_acct_memory(h, -(chg - freed));
3109 }
3110
3111 #ifdef CONFIG_MEMORY_FAILURE
3112
3113 /* Should be called in hugetlb_lock */
3114 static int is_hugepage_on_freelist(struct page *hpage)
3115 {
3116         struct page *page;
3117         struct page *tmp;
3118         struct hstate *h = page_hstate(hpage);
3119         int nid = page_to_nid(hpage);
3120
3121         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3122                 if (page == hpage)
3123                         return 1;
3124         return 0;
3125 }
3126
3127 /*
3128  * This function is called from memory failure code.
3129  * Assume the caller holds page lock of the head page.
3130  */
3131 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3132 {
3133         struct hstate *h = page_hstate(hpage);
3134         int nid = page_to_nid(hpage);
3135         int ret = -EBUSY;
3136
3137         spin_lock(&hugetlb_lock);
3138         if (is_hugepage_on_freelist(hpage)) {
3139                 list_del(&hpage->lru);
3140                 set_page_refcounted(hpage);
3141                 h->free_huge_pages--;
3142                 h->free_huge_pages_node[nid]--;
3143                 ret = 0;
3144         }
3145         spin_unlock(&hugetlb_lock);
3146         return ret;
3147 }
3148 #endif