cpuset: mm: reduce large amounts of memory barrier related damage v3
[pandora-kernel.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <linux/io.h>
28
29 #include <linux/hugetlb.h>
30 #include <linux/node.h>
31 #include "internal.h"
32
33 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
36
37 static int max_hstate;
38 unsigned int default_hstate_idx;
39 struct hstate hstates[HUGE_MAX_HSTATE];
40
41 __initdata LIST_HEAD(huge_boot_pages);
42
43 /* for command line parsing */
44 static struct hstate * __initdata parsed_hstate;
45 static unsigned long __initdata default_hstate_max_huge_pages;
46 static unsigned long __initdata default_hstate_size;
47
48 #define for_each_hstate(h) \
49         for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
50
51 /*
52  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
53  */
54 static DEFINE_SPINLOCK(hugetlb_lock);
55
56 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
57 {
58         bool free = (spool->count == 0) && (spool->used_hpages == 0);
59
60         spin_unlock(&spool->lock);
61
62         /* If no pages are used, and no other handles to the subpool
63          * remain, free the subpool the subpool remain */
64         if (free)
65                 kfree(spool);
66 }
67
68 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
69 {
70         struct hugepage_subpool *spool;
71
72         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
73         if (!spool)
74                 return NULL;
75
76         spin_lock_init(&spool->lock);
77         spool->count = 1;
78         spool->max_hpages = nr_blocks;
79         spool->used_hpages = 0;
80
81         return spool;
82 }
83
84 void hugepage_put_subpool(struct hugepage_subpool *spool)
85 {
86         spin_lock(&spool->lock);
87         BUG_ON(!spool->count);
88         spool->count--;
89         unlock_or_release_subpool(spool);
90 }
91
92 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
93                                       long delta)
94 {
95         int ret = 0;
96
97         if (!spool)
98                 return 0;
99
100         spin_lock(&spool->lock);
101         if ((spool->used_hpages + delta) <= spool->max_hpages) {
102                 spool->used_hpages += delta;
103         } else {
104                 ret = -ENOMEM;
105         }
106         spin_unlock(&spool->lock);
107
108         return ret;
109 }
110
111 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
112                                        long delta)
113 {
114         if (!spool)
115                 return;
116
117         spin_lock(&spool->lock);
118         spool->used_hpages -= delta;
119         /* If hugetlbfs_put_super couldn't free spool due to
120         * an outstanding quota reference, free it now. */
121         unlock_or_release_subpool(spool);
122 }
123
124 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
125 {
126         return HUGETLBFS_SB(inode->i_sb)->spool;
127 }
128
129 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
130 {
131         return subpool_inode(vma->vm_file->f_dentry->d_inode);
132 }
133
134 /*
135  * Region tracking -- allows tracking of reservations and instantiated pages
136  *                    across the pages in a mapping.
137  *
138  * The region data structures are protected by a combination of the mmap_sem
139  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
140  * must either hold the mmap_sem for write, or the mmap_sem for read and
141  * the hugetlb_instantiation mutex:
142  *
143  *      down_write(&mm->mmap_sem);
144  * or
145  *      down_read(&mm->mmap_sem);
146  *      mutex_lock(&hugetlb_instantiation_mutex);
147  */
148 struct file_region {
149         struct list_head link;
150         long from;
151         long to;
152 };
153
154 static long region_add(struct list_head *head, long f, long t)
155 {
156         struct file_region *rg, *nrg, *trg;
157
158         /* Locate the region we are either in or before. */
159         list_for_each_entry(rg, head, link)
160                 if (f <= rg->to)
161                         break;
162
163         /* Round our left edge to the current segment if it encloses us. */
164         if (f > rg->from)
165                 f = rg->from;
166
167         /* Check for and consume any regions we now overlap with. */
168         nrg = rg;
169         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
170                 if (&rg->link == head)
171                         break;
172                 if (rg->from > t)
173                         break;
174
175                 /* If this area reaches higher then extend our area to
176                  * include it completely.  If this is not the first area
177                  * which we intend to reuse, free it. */
178                 if (rg->to > t)
179                         t = rg->to;
180                 if (rg != nrg) {
181                         list_del(&rg->link);
182                         kfree(rg);
183                 }
184         }
185         nrg->from = f;
186         nrg->to = t;
187         return 0;
188 }
189
190 static long region_chg(struct list_head *head, long f, long t)
191 {
192         struct file_region *rg, *nrg;
193         long chg = 0;
194
195         /* Locate the region we are before or in. */
196         list_for_each_entry(rg, head, link)
197                 if (f <= rg->to)
198                         break;
199
200         /* If we are below the current region then a new region is required.
201          * Subtle, allocate a new region at the position but make it zero
202          * size such that we can guarantee to record the reservation. */
203         if (&rg->link == head || t < rg->from) {
204                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
205                 if (!nrg)
206                         return -ENOMEM;
207                 nrg->from = f;
208                 nrg->to   = f;
209                 INIT_LIST_HEAD(&nrg->link);
210                 list_add(&nrg->link, rg->link.prev);
211
212                 return t - f;
213         }
214
215         /* Round our left edge to the current segment if it encloses us. */
216         if (f > rg->from)
217                 f = rg->from;
218         chg = t - f;
219
220         /* Check for and consume any regions we now overlap with. */
221         list_for_each_entry(rg, rg->link.prev, link) {
222                 if (&rg->link == head)
223                         break;
224                 if (rg->from > t)
225                         return chg;
226
227                 /* We overlap with this area, if it extends further than
228                  * us then we must extend ourselves.  Account for its
229                  * existing reservation. */
230                 if (rg->to > t) {
231                         chg += rg->to - t;
232                         t = rg->to;
233                 }
234                 chg -= rg->to - rg->from;
235         }
236         return chg;
237 }
238
239 static long region_truncate(struct list_head *head, long end)
240 {
241         struct file_region *rg, *trg;
242         long chg = 0;
243
244         /* Locate the region we are either in or before. */
245         list_for_each_entry(rg, head, link)
246                 if (end <= rg->to)
247                         break;
248         if (&rg->link == head)
249                 return 0;
250
251         /* If we are in the middle of a region then adjust it. */
252         if (end > rg->from) {
253                 chg = rg->to - end;
254                 rg->to = end;
255                 rg = list_entry(rg->link.next, typeof(*rg), link);
256         }
257
258         /* Drop any remaining regions. */
259         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
260                 if (&rg->link == head)
261                         break;
262                 chg += rg->to - rg->from;
263                 list_del(&rg->link);
264                 kfree(rg);
265         }
266         return chg;
267 }
268
269 static long region_count(struct list_head *head, long f, long t)
270 {
271         struct file_region *rg;
272         long chg = 0;
273
274         /* Locate each segment we overlap with, and count that overlap. */
275         list_for_each_entry(rg, head, link) {
276                 int seg_from;
277                 int seg_to;
278
279                 if (rg->to <= f)
280                         continue;
281                 if (rg->from >= t)
282                         break;
283
284                 seg_from = max(rg->from, f);
285                 seg_to = min(rg->to, t);
286
287                 chg += seg_to - seg_from;
288         }
289
290         return chg;
291 }
292
293 /*
294  * Convert the address within this vma to the page offset within
295  * the mapping, in pagecache page units; huge pages here.
296  */
297 static pgoff_t vma_hugecache_offset(struct hstate *h,
298                         struct vm_area_struct *vma, unsigned long address)
299 {
300         return ((address - vma->vm_start) >> huge_page_shift(h)) +
301                         (vma->vm_pgoff >> huge_page_order(h));
302 }
303
304 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
305                                      unsigned long address)
306 {
307         return vma_hugecache_offset(hstate_vma(vma), vma, address);
308 }
309
310 /*
311  * Return the size of the pages allocated when backing a VMA. In the majority
312  * cases this will be same size as used by the page table entries.
313  */
314 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
315 {
316         struct hstate *hstate;
317
318         if (!is_vm_hugetlb_page(vma))
319                 return PAGE_SIZE;
320
321         hstate = hstate_vma(vma);
322
323         return 1UL << (hstate->order + PAGE_SHIFT);
324 }
325 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
326
327 /*
328  * Return the page size being used by the MMU to back a VMA. In the majority
329  * of cases, the page size used by the kernel matches the MMU size. On
330  * architectures where it differs, an architecture-specific version of this
331  * function is required.
332  */
333 #ifndef vma_mmu_pagesize
334 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
335 {
336         return vma_kernel_pagesize(vma);
337 }
338 #endif
339
340 /*
341  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
342  * bits of the reservation map pointer, which are always clear due to
343  * alignment.
344  */
345 #define HPAGE_RESV_OWNER    (1UL << 0)
346 #define HPAGE_RESV_UNMAPPED (1UL << 1)
347 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
348
349 /*
350  * These helpers are used to track how many pages are reserved for
351  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
352  * is guaranteed to have their future faults succeed.
353  *
354  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
355  * the reserve counters are updated with the hugetlb_lock held. It is safe
356  * to reset the VMA at fork() time as it is not in use yet and there is no
357  * chance of the global counters getting corrupted as a result of the values.
358  *
359  * The private mapping reservation is represented in a subtly different
360  * manner to a shared mapping.  A shared mapping has a region map associated
361  * with the underlying file, this region map represents the backing file
362  * pages which have ever had a reservation assigned which this persists even
363  * after the page is instantiated.  A private mapping has a region map
364  * associated with the original mmap which is attached to all VMAs which
365  * reference it, this region map represents those offsets which have consumed
366  * reservation ie. where pages have been instantiated.
367  */
368 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
369 {
370         return (unsigned long)vma->vm_private_data;
371 }
372
373 static void set_vma_private_data(struct vm_area_struct *vma,
374                                                         unsigned long value)
375 {
376         vma->vm_private_data = (void *)value;
377 }
378
379 struct resv_map {
380         struct kref refs;
381         struct list_head regions;
382 };
383
384 static struct resv_map *resv_map_alloc(void)
385 {
386         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
387         if (!resv_map)
388                 return NULL;
389
390         kref_init(&resv_map->refs);
391         INIT_LIST_HEAD(&resv_map->regions);
392
393         return resv_map;
394 }
395
396 static void resv_map_release(struct kref *ref)
397 {
398         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
399
400         /* Clear out any active regions before we release the map. */
401         region_truncate(&resv_map->regions, 0);
402         kfree(resv_map);
403 }
404
405 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
406 {
407         VM_BUG_ON(!is_vm_hugetlb_page(vma));
408         if (!(vma->vm_flags & VM_MAYSHARE))
409                 return (struct resv_map *)(get_vma_private_data(vma) &
410                                                         ~HPAGE_RESV_MASK);
411         return NULL;
412 }
413
414 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
415 {
416         VM_BUG_ON(!is_vm_hugetlb_page(vma));
417         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
418
419         set_vma_private_data(vma, (get_vma_private_data(vma) &
420                                 HPAGE_RESV_MASK) | (unsigned long)map);
421 }
422
423 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
424 {
425         VM_BUG_ON(!is_vm_hugetlb_page(vma));
426         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
427
428         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
429 }
430
431 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
432 {
433         VM_BUG_ON(!is_vm_hugetlb_page(vma));
434
435         return (get_vma_private_data(vma) & flag) != 0;
436 }
437
438 /* Decrement the reserved pages in the hugepage pool by one */
439 static void decrement_hugepage_resv_vma(struct hstate *h,
440                         struct vm_area_struct *vma)
441 {
442         if (vma->vm_flags & VM_NORESERVE)
443                 return;
444
445         if (vma->vm_flags & VM_MAYSHARE) {
446                 /* Shared mappings always use reserves */
447                 h->resv_huge_pages--;
448         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
449                 /*
450                  * Only the process that called mmap() has reserves for
451                  * private mappings.
452                  */
453                 h->resv_huge_pages--;
454         }
455 }
456
457 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
458 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
459 {
460         VM_BUG_ON(!is_vm_hugetlb_page(vma));
461         if (!(vma->vm_flags & VM_MAYSHARE))
462                 vma->vm_private_data = (void *)0;
463 }
464
465 /* Returns true if the VMA has associated reserve pages */
466 static int vma_has_reserves(struct vm_area_struct *vma)
467 {
468         if (vma->vm_flags & VM_MAYSHARE)
469                 return 1;
470         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
471                 return 1;
472         return 0;
473 }
474
475 static void copy_gigantic_page(struct page *dst, struct page *src)
476 {
477         int i;
478         struct hstate *h = page_hstate(src);
479         struct page *dst_base = dst;
480         struct page *src_base = src;
481
482         for (i = 0; i < pages_per_huge_page(h); ) {
483                 cond_resched();
484                 copy_highpage(dst, src);
485
486                 i++;
487                 dst = mem_map_next(dst, dst_base, i);
488                 src = mem_map_next(src, src_base, i);
489         }
490 }
491
492 void copy_huge_page(struct page *dst, struct page *src)
493 {
494         int i;
495         struct hstate *h = page_hstate(src);
496
497         if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
498                 copy_gigantic_page(dst, src);
499                 return;
500         }
501
502         might_sleep();
503         for (i = 0; i < pages_per_huge_page(h); i++) {
504                 cond_resched();
505                 copy_highpage(dst + i, src + i);
506         }
507 }
508
509 static void enqueue_huge_page(struct hstate *h, struct page *page)
510 {
511         int nid = page_to_nid(page);
512         list_add(&page->lru, &h->hugepage_freelists[nid]);
513         h->free_huge_pages++;
514         h->free_huge_pages_node[nid]++;
515 }
516
517 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
518 {
519         struct page *page;
520
521         if (list_empty(&h->hugepage_freelists[nid]))
522                 return NULL;
523         page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
524         list_del(&page->lru);
525         set_page_refcounted(page);
526         h->free_huge_pages--;
527         h->free_huge_pages_node[nid]--;
528         return page;
529 }
530
531 static struct page *dequeue_huge_page_vma(struct hstate *h,
532                                 struct vm_area_struct *vma,
533                                 unsigned long address, int avoid_reserve)
534 {
535         struct page *page;
536         struct mempolicy *mpol;
537         nodemask_t *nodemask;
538         struct zonelist *zonelist;
539         struct zone *zone;
540         struct zoneref *z;
541         unsigned int cpuset_mems_cookie;
542
543 retry_cpuset:
544         cpuset_mems_cookie = get_mems_allowed();
545         zonelist = huge_zonelist(vma, address,
546                                         htlb_alloc_mask, &mpol, &nodemask);
547         /*
548          * A child process with MAP_PRIVATE mappings created by their parent
549          * have no page reserves. This check ensures that reservations are
550          * not "stolen". The child may still get SIGKILLed
551          */
552         if (!vma_has_reserves(vma) &&
553                         h->free_huge_pages - h->resv_huge_pages == 0)
554                 goto err;
555
556         /* If reserves cannot be used, ensure enough pages are in the pool */
557         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
558                 goto err;
559
560         for_each_zone_zonelist_nodemask(zone, z, zonelist,
561                                                 MAX_NR_ZONES - 1, nodemask) {
562                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
563                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
564                         if (page) {
565                                 if (!avoid_reserve)
566                                         decrement_hugepage_resv_vma(h, vma);
567                                 break;
568                         }
569                 }
570         }
571
572         mpol_cond_put(mpol);
573         if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
574                 goto retry_cpuset;
575         return page;
576
577 err:
578         mpol_cond_put(mpol);
579         return NULL;
580 }
581
582 static void update_and_free_page(struct hstate *h, struct page *page)
583 {
584         int i;
585
586         VM_BUG_ON(h->order >= MAX_ORDER);
587
588         h->nr_huge_pages--;
589         h->nr_huge_pages_node[page_to_nid(page)]--;
590         for (i = 0; i < pages_per_huge_page(h); i++) {
591                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
592                                 1 << PG_referenced | 1 << PG_dirty |
593                                 1 << PG_active | 1 << PG_reserved |
594                                 1 << PG_private | 1 << PG_writeback);
595         }
596         set_compound_page_dtor(page, NULL);
597         set_page_refcounted(page);
598         arch_release_hugepage(page);
599         __free_pages(page, huge_page_order(h));
600 }
601
602 struct hstate *size_to_hstate(unsigned long size)
603 {
604         struct hstate *h;
605
606         for_each_hstate(h) {
607                 if (huge_page_size(h) == size)
608                         return h;
609         }
610         return NULL;
611 }
612
613 static void free_huge_page(struct page *page)
614 {
615         /*
616          * Can't pass hstate in here because it is called from the
617          * compound page destructor.
618          */
619         struct hstate *h = page_hstate(page);
620         int nid = page_to_nid(page);
621         struct hugepage_subpool *spool =
622                 (struct hugepage_subpool *)page_private(page);
623
624         set_page_private(page, 0);
625         page->mapping = NULL;
626         BUG_ON(page_count(page));
627         BUG_ON(page_mapcount(page));
628         INIT_LIST_HEAD(&page->lru);
629
630         spin_lock(&hugetlb_lock);
631         if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
632                 update_and_free_page(h, page);
633                 h->surplus_huge_pages--;
634                 h->surplus_huge_pages_node[nid]--;
635         } else {
636                 enqueue_huge_page(h, page);
637         }
638         spin_unlock(&hugetlb_lock);
639         hugepage_subpool_put_pages(spool, 1);
640 }
641
642 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
643 {
644         set_compound_page_dtor(page, free_huge_page);
645         spin_lock(&hugetlb_lock);
646         h->nr_huge_pages++;
647         h->nr_huge_pages_node[nid]++;
648         spin_unlock(&hugetlb_lock);
649         put_page(page); /* free it into the hugepage allocator */
650 }
651
652 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
653 {
654         int i;
655         int nr_pages = 1 << order;
656         struct page *p = page + 1;
657
658         /* we rely on prep_new_huge_page to set the destructor */
659         set_compound_order(page, order);
660         __SetPageHead(page);
661         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
662                 __SetPageTail(p);
663                 set_page_count(p, 0);
664                 p->first_page = page;
665         }
666 }
667
668 int PageHuge(struct page *page)
669 {
670         compound_page_dtor *dtor;
671
672         if (!PageCompound(page))
673                 return 0;
674
675         page = compound_head(page);
676         dtor = get_compound_page_dtor(page);
677
678         return dtor == free_huge_page;
679 }
680 EXPORT_SYMBOL_GPL(PageHuge);
681
682 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
683 {
684         struct page *page;
685
686         if (h->order >= MAX_ORDER)
687                 return NULL;
688
689         page = alloc_pages_exact_node(nid,
690                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
691                                                 __GFP_REPEAT|__GFP_NOWARN,
692                 huge_page_order(h));
693         if (page) {
694                 if (arch_prepare_hugepage(page)) {
695                         __free_pages(page, huge_page_order(h));
696                         return NULL;
697                 }
698                 prep_new_huge_page(h, page, nid);
699         }
700
701         return page;
702 }
703
704 /*
705  * common helper functions for hstate_next_node_to_{alloc|free}.
706  * We may have allocated or freed a huge page based on a different
707  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
708  * be outside of *nodes_allowed.  Ensure that we use an allowed
709  * node for alloc or free.
710  */
711 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
712 {
713         nid = next_node(nid, *nodes_allowed);
714         if (nid == MAX_NUMNODES)
715                 nid = first_node(*nodes_allowed);
716         VM_BUG_ON(nid >= MAX_NUMNODES);
717
718         return nid;
719 }
720
721 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
722 {
723         if (!node_isset(nid, *nodes_allowed))
724                 nid = next_node_allowed(nid, nodes_allowed);
725         return nid;
726 }
727
728 /*
729  * returns the previously saved node ["this node"] from which to
730  * allocate a persistent huge page for the pool and advance the
731  * next node from which to allocate, handling wrap at end of node
732  * mask.
733  */
734 static int hstate_next_node_to_alloc(struct hstate *h,
735                                         nodemask_t *nodes_allowed)
736 {
737         int nid;
738
739         VM_BUG_ON(!nodes_allowed);
740
741         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
742         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
743
744         return nid;
745 }
746
747 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
748 {
749         struct page *page;
750         int start_nid;
751         int next_nid;
752         int ret = 0;
753
754         start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
755         next_nid = start_nid;
756
757         do {
758                 page = alloc_fresh_huge_page_node(h, next_nid);
759                 if (page) {
760                         ret = 1;
761                         break;
762                 }
763                 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
764         } while (next_nid != start_nid);
765
766         if (ret)
767                 count_vm_event(HTLB_BUDDY_PGALLOC);
768         else
769                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
770
771         return ret;
772 }
773
774 /*
775  * helper for free_pool_huge_page() - return the previously saved
776  * node ["this node"] from which to free a huge page.  Advance the
777  * next node id whether or not we find a free huge page to free so
778  * that the next attempt to free addresses the next node.
779  */
780 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
781 {
782         int nid;
783
784         VM_BUG_ON(!nodes_allowed);
785
786         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
787         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
788
789         return nid;
790 }
791
792 /*
793  * Free huge page from pool from next node to free.
794  * Attempt to keep persistent huge pages more or less
795  * balanced over allowed nodes.
796  * Called with hugetlb_lock locked.
797  */
798 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
799                                                          bool acct_surplus)
800 {
801         int start_nid;
802         int next_nid;
803         int ret = 0;
804
805         start_nid = hstate_next_node_to_free(h, nodes_allowed);
806         next_nid = start_nid;
807
808         do {
809                 /*
810                  * If we're returning unused surplus pages, only examine
811                  * nodes with surplus pages.
812                  */
813                 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
814                     !list_empty(&h->hugepage_freelists[next_nid])) {
815                         struct page *page =
816                                 list_entry(h->hugepage_freelists[next_nid].next,
817                                           struct page, lru);
818                         list_del(&page->lru);
819                         h->free_huge_pages--;
820                         h->free_huge_pages_node[next_nid]--;
821                         if (acct_surplus) {
822                                 h->surplus_huge_pages--;
823                                 h->surplus_huge_pages_node[next_nid]--;
824                         }
825                         update_and_free_page(h, page);
826                         ret = 1;
827                         break;
828                 }
829                 next_nid = hstate_next_node_to_free(h, nodes_allowed);
830         } while (next_nid != start_nid);
831
832         return ret;
833 }
834
835 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
836 {
837         struct page *page;
838         unsigned int r_nid;
839
840         if (h->order >= MAX_ORDER)
841                 return NULL;
842
843         /*
844          * Assume we will successfully allocate the surplus page to
845          * prevent racing processes from causing the surplus to exceed
846          * overcommit
847          *
848          * This however introduces a different race, where a process B
849          * tries to grow the static hugepage pool while alloc_pages() is
850          * called by process A. B will only examine the per-node
851          * counters in determining if surplus huge pages can be
852          * converted to normal huge pages in adjust_pool_surplus(). A
853          * won't be able to increment the per-node counter, until the
854          * lock is dropped by B, but B doesn't drop hugetlb_lock until
855          * no more huge pages can be converted from surplus to normal
856          * state (and doesn't try to convert again). Thus, we have a
857          * case where a surplus huge page exists, the pool is grown, and
858          * the surplus huge page still exists after, even though it
859          * should just have been converted to a normal huge page. This
860          * does not leak memory, though, as the hugepage will be freed
861          * once it is out of use. It also does not allow the counters to
862          * go out of whack in adjust_pool_surplus() as we don't modify
863          * the node values until we've gotten the hugepage and only the
864          * per-node value is checked there.
865          */
866         spin_lock(&hugetlb_lock);
867         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
868                 spin_unlock(&hugetlb_lock);
869                 return NULL;
870         } else {
871                 h->nr_huge_pages++;
872                 h->surplus_huge_pages++;
873         }
874         spin_unlock(&hugetlb_lock);
875
876         if (nid == NUMA_NO_NODE)
877                 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
878                                    __GFP_REPEAT|__GFP_NOWARN,
879                                    huge_page_order(h));
880         else
881                 page = alloc_pages_exact_node(nid,
882                         htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
883                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
884
885         if (page && arch_prepare_hugepage(page)) {
886                 __free_pages(page, huge_page_order(h));
887                 return NULL;
888         }
889
890         spin_lock(&hugetlb_lock);
891         if (page) {
892                 r_nid = page_to_nid(page);
893                 set_compound_page_dtor(page, free_huge_page);
894                 /*
895                  * We incremented the global counters already
896                  */
897                 h->nr_huge_pages_node[r_nid]++;
898                 h->surplus_huge_pages_node[r_nid]++;
899                 __count_vm_event(HTLB_BUDDY_PGALLOC);
900         } else {
901                 h->nr_huge_pages--;
902                 h->surplus_huge_pages--;
903                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
904         }
905         spin_unlock(&hugetlb_lock);
906
907         return page;
908 }
909
910 /*
911  * This allocation function is useful in the context where vma is irrelevant.
912  * E.g. soft-offlining uses this function because it only cares physical
913  * address of error page.
914  */
915 struct page *alloc_huge_page_node(struct hstate *h, int nid)
916 {
917         struct page *page;
918
919         spin_lock(&hugetlb_lock);
920         page = dequeue_huge_page_node(h, nid);
921         spin_unlock(&hugetlb_lock);
922
923         if (!page)
924                 page = alloc_buddy_huge_page(h, nid);
925
926         return page;
927 }
928
929 /*
930  * Increase the hugetlb pool such that it can accommodate a reservation
931  * of size 'delta'.
932  */
933 static int gather_surplus_pages(struct hstate *h, int delta)
934 {
935         struct list_head surplus_list;
936         struct page *page, *tmp;
937         int ret, i;
938         int needed, allocated;
939
940         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
941         if (needed <= 0) {
942                 h->resv_huge_pages += delta;
943                 return 0;
944         }
945
946         allocated = 0;
947         INIT_LIST_HEAD(&surplus_list);
948
949         ret = -ENOMEM;
950 retry:
951         spin_unlock(&hugetlb_lock);
952         for (i = 0; i < needed; i++) {
953                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
954                 if (!page)
955                         /*
956                          * We were not able to allocate enough pages to
957                          * satisfy the entire reservation so we free what
958                          * we've allocated so far.
959                          */
960                         goto free;
961
962                 list_add(&page->lru, &surplus_list);
963         }
964         allocated += needed;
965
966         /*
967          * After retaking hugetlb_lock, we need to recalculate 'needed'
968          * because either resv_huge_pages or free_huge_pages may have changed.
969          */
970         spin_lock(&hugetlb_lock);
971         needed = (h->resv_huge_pages + delta) -
972                         (h->free_huge_pages + allocated);
973         if (needed > 0)
974                 goto retry;
975
976         /*
977          * The surplus_list now contains _at_least_ the number of extra pages
978          * needed to accommodate the reservation.  Add the appropriate number
979          * of pages to the hugetlb pool and free the extras back to the buddy
980          * allocator.  Commit the entire reservation here to prevent another
981          * process from stealing the pages as they are added to the pool but
982          * before they are reserved.
983          */
984         needed += allocated;
985         h->resv_huge_pages += delta;
986         ret = 0;
987
988         /* Free the needed pages to the hugetlb pool */
989         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
990                 if ((--needed) < 0)
991                         break;
992                 list_del(&page->lru);
993                 /*
994                  * This page is now managed by the hugetlb allocator and has
995                  * no users -- drop the buddy allocator's reference.
996                  */
997                 put_page_testzero(page);
998                 VM_BUG_ON(page_count(page));
999                 enqueue_huge_page(h, page);
1000         }
1001         spin_unlock(&hugetlb_lock);
1002
1003         /* Free unnecessary surplus pages to the buddy allocator */
1004 free:
1005         if (!list_empty(&surplus_list)) {
1006                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1007                         list_del(&page->lru);
1008                         put_page(page);
1009                 }
1010         }
1011         spin_lock(&hugetlb_lock);
1012
1013         return ret;
1014 }
1015
1016 /*
1017  * When releasing a hugetlb pool reservation, any surplus pages that were
1018  * allocated to satisfy the reservation must be explicitly freed if they were
1019  * never used.
1020  * Called with hugetlb_lock held.
1021  */
1022 static void return_unused_surplus_pages(struct hstate *h,
1023                                         unsigned long unused_resv_pages)
1024 {
1025         unsigned long nr_pages;
1026
1027         /* Uncommit the reservation */
1028         h->resv_huge_pages -= unused_resv_pages;
1029
1030         /* Cannot return gigantic pages currently */
1031         if (h->order >= MAX_ORDER)
1032                 return;
1033
1034         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1035
1036         /*
1037          * We want to release as many surplus pages as possible, spread
1038          * evenly across all nodes with memory. Iterate across these nodes
1039          * until we can no longer free unreserved surplus pages. This occurs
1040          * when the nodes with surplus pages have no free pages.
1041          * free_pool_huge_page() will balance the the freed pages across the
1042          * on-line nodes with memory and will handle the hstate accounting.
1043          */
1044         while (nr_pages--) {
1045                 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
1046                         break;
1047         }
1048 }
1049
1050 /*
1051  * Determine if the huge page at addr within the vma has an associated
1052  * reservation.  Where it does not we will need to logically increase
1053  * reservation and actually increase subpool usage before an allocation
1054  * can occur.  Where any new reservation would be required the
1055  * reservation change is prepared, but not committed.  Once the page
1056  * has been allocated from the subpool and instantiated the change should
1057  * be committed via vma_commit_reservation.  No action is required on
1058  * failure.
1059  */
1060 static long vma_needs_reservation(struct hstate *h,
1061                         struct vm_area_struct *vma, unsigned long addr)
1062 {
1063         struct address_space *mapping = vma->vm_file->f_mapping;
1064         struct inode *inode = mapping->host;
1065
1066         if (vma->vm_flags & VM_MAYSHARE) {
1067                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1068                 return region_chg(&inode->i_mapping->private_list,
1069                                                         idx, idx + 1);
1070
1071         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1072                 return 1;
1073
1074         } else  {
1075                 long err;
1076                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1077                 struct resv_map *reservations = vma_resv_map(vma);
1078
1079                 err = region_chg(&reservations->regions, idx, idx + 1);
1080                 if (err < 0)
1081                         return err;
1082                 return 0;
1083         }
1084 }
1085 static void vma_commit_reservation(struct hstate *h,
1086                         struct vm_area_struct *vma, unsigned long addr)
1087 {
1088         struct address_space *mapping = vma->vm_file->f_mapping;
1089         struct inode *inode = mapping->host;
1090
1091         if (vma->vm_flags & VM_MAYSHARE) {
1092                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1093                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1094
1095         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1096                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1097                 struct resv_map *reservations = vma_resv_map(vma);
1098
1099                 /* Mark this page used in the map. */
1100                 region_add(&reservations->regions, idx, idx + 1);
1101         }
1102 }
1103
1104 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1105                                     unsigned long addr, int avoid_reserve)
1106 {
1107         struct hugepage_subpool *spool = subpool_vma(vma);
1108         struct hstate *h = hstate_vma(vma);
1109         struct page *page;
1110         long chg;
1111
1112         /*
1113          * Processes that did not create the mapping will have no
1114          * reserves and will not have accounted against subpool
1115          * limit. Check that the subpool limit can be made before
1116          * satisfying the allocation MAP_NORESERVE mappings may also
1117          * need pages and subpool limit allocated allocated if no reserve
1118          * mapping overlaps.
1119          */
1120         chg = vma_needs_reservation(h, vma, addr);
1121         if (chg < 0)
1122                 return ERR_PTR(-VM_FAULT_OOM);
1123         if (chg)
1124                 if (hugepage_subpool_get_pages(spool, chg))
1125                         return ERR_PTR(-VM_FAULT_SIGBUS);
1126
1127         spin_lock(&hugetlb_lock);
1128         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1129         spin_unlock(&hugetlb_lock);
1130
1131         if (!page) {
1132                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1133                 if (!page) {
1134                         hugepage_subpool_put_pages(spool, chg);
1135                         return ERR_PTR(-VM_FAULT_SIGBUS);
1136                 }
1137         }
1138
1139         set_page_private(page, (unsigned long)spool);
1140
1141         vma_commit_reservation(h, vma, addr);
1142
1143         return page;
1144 }
1145
1146 int __weak alloc_bootmem_huge_page(struct hstate *h)
1147 {
1148         struct huge_bootmem_page *m;
1149         int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1150
1151         while (nr_nodes) {
1152                 void *addr;
1153
1154                 addr = __alloc_bootmem_node_nopanic(
1155                                 NODE_DATA(hstate_next_node_to_alloc(h,
1156                                                 &node_states[N_HIGH_MEMORY])),
1157                                 huge_page_size(h), huge_page_size(h), 0);
1158
1159                 if (addr) {
1160                         /*
1161                          * Use the beginning of the huge page to store the
1162                          * huge_bootmem_page struct (until gather_bootmem
1163                          * puts them into the mem_map).
1164                          */
1165                         m = addr;
1166                         goto found;
1167                 }
1168                 nr_nodes--;
1169         }
1170         return 0;
1171
1172 found:
1173         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1174         /* Put them into a private list first because mem_map is not up yet */
1175         list_add(&m->list, &huge_boot_pages);
1176         m->hstate = h;
1177         return 1;
1178 }
1179
1180 static void prep_compound_huge_page(struct page *page, int order)
1181 {
1182         if (unlikely(order > (MAX_ORDER - 1)))
1183                 prep_compound_gigantic_page(page, order);
1184         else
1185                 prep_compound_page(page, order);
1186 }
1187
1188 /* Put bootmem huge pages into the standard lists after mem_map is up */
1189 static void __init gather_bootmem_prealloc(void)
1190 {
1191         struct huge_bootmem_page *m;
1192
1193         list_for_each_entry(m, &huge_boot_pages, list) {
1194                 struct hstate *h = m->hstate;
1195                 struct page *page;
1196
1197 #ifdef CONFIG_HIGHMEM
1198                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1199                 free_bootmem_late((unsigned long)m,
1200                                   sizeof(struct huge_bootmem_page));
1201 #else
1202                 page = virt_to_page(m);
1203 #endif
1204                 __ClearPageReserved(page);
1205                 WARN_ON(page_count(page) != 1);
1206                 prep_compound_huge_page(page, h->order);
1207                 prep_new_huge_page(h, page, page_to_nid(page));
1208                 /*
1209                  * If we had gigantic hugepages allocated at boot time, we need
1210                  * to restore the 'stolen' pages to totalram_pages in order to
1211                  * fix confusing memory reports from free(1) and another
1212                  * side-effects, like CommitLimit going negative.
1213                  */
1214                 if (h->order > (MAX_ORDER - 1))
1215                         totalram_pages += 1 << h->order;
1216         }
1217 }
1218
1219 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1220 {
1221         unsigned long i;
1222
1223         for (i = 0; i < h->max_huge_pages; ++i) {
1224                 if (h->order >= MAX_ORDER) {
1225                         if (!alloc_bootmem_huge_page(h))
1226                                 break;
1227                 } else if (!alloc_fresh_huge_page(h,
1228                                          &node_states[N_HIGH_MEMORY]))
1229                         break;
1230         }
1231         h->max_huge_pages = i;
1232 }
1233
1234 static void __init hugetlb_init_hstates(void)
1235 {
1236         struct hstate *h;
1237
1238         for_each_hstate(h) {
1239                 /* oversize hugepages were init'ed in early boot */
1240                 if (h->order < MAX_ORDER)
1241                         hugetlb_hstate_alloc_pages(h);
1242         }
1243 }
1244
1245 static char * __init memfmt(char *buf, unsigned long n)
1246 {
1247         if (n >= (1UL << 30))
1248                 sprintf(buf, "%lu GB", n >> 30);
1249         else if (n >= (1UL << 20))
1250                 sprintf(buf, "%lu MB", n >> 20);
1251         else
1252                 sprintf(buf, "%lu KB", n >> 10);
1253         return buf;
1254 }
1255
1256 static void __init report_hugepages(void)
1257 {
1258         struct hstate *h;
1259
1260         for_each_hstate(h) {
1261                 char buf[32];
1262                 printk(KERN_INFO "HugeTLB registered %s page size, "
1263                                  "pre-allocated %ld pages\n",
1264                         memfmt(buf, huge_page_size(h)),
1265                         h->free_huge_pages);
1266         }
1267 }
1268
1269 #ifdef CONFIG_HIGHMEM
1270 static void try_to_free_low(struct hstate *h, unsigned long count,
1271                                                 nodemask_t *nodes_allowed)
1272 {
1273         int i;
1274
1275         if (h->order >= MAX_ORDER)
1276                 return;
1277
1278         for_each_node_mask(i, *nodes_allowed) {
1279                 struct page *page, *next;
1280                 struct list_head *freel = &h->hugepage_freelists[i];
1281                 list_for_each_entry_safe(page, next, freel, lru) {
1282                         if (count >= h->nr_huge_pages)
1283                                 return;
1284                         if (PageHighMem(page))
1285                                 continue;
1286                         list_del(&page->lru);
1287                         update_and_free_page(h, page);
1288                         h->free_huge_pages--;
1289                         h->free_huge_pages_node[page_to_nid(page)]--;
1290                 }
1291         }
1292 }
1293 #else
1294 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1295                                                 nodemask_t *nodes_allowed)
1296 {
1297 }
1298 #endif
1299
1300 /*
1301  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1302  * balanced by operating on them in a round-robin fashion.
1303  * Returns 1 if an adjustment was made.
1304  */
1305 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1306                                 int delta)
1307 {
1308         int start_nid, next_nid;
1309         int ret = 0;
1310
1311         VM_BUG_ON(delta != -1 && delta != 1);
1312
1313         if (delta < 0)
1314                 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1315         else
1316                 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1317         next_nid = start_nid;
1318
1319         do {
1320                 int nid = next_nid;
1321                 if (delta < 0)  {
1322                         /*
1323                          * To shrink on this node, there must be a surplus page
1324                          */
1325                         if (!h->surplus_huge_pages_node[nid]) {
1326                                 next_nid = hstate_next_node_to_alloc(h,
1327                                                                 nodes_allowed);
1328                                 continue;
1329                         }
1330                 }
1331                 if (delta > 0) {
1332                         /*
1333                          * Surplus cannot exceed the total number of pages
1334                          */
1335                         if (h->surplus_huge_pages_node[nid] >=
1336                                                 h->nr_huge_pages_node[nid]) {
1337                                 next_nid = hstate_next_node_to_free(h,
1338                                                                 nodes_allowed);
1339                                 continue;
1340                         }
1341                 }
1342
1343                 h->surplus_huge_pages += delta;
1344                 h->surplus_huge_pages_node[nid] += delta;
1345                 ret = 1;
1346                 break;
1347         } while (next_nid != start_nid);
1348
1349         return ret;
1350 }
1351
1352 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1353 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1354                                                 nodemask_t *nodes_allowed)
1355 {
1356         unsigned long min_count, ret;
1357
1358         if (h->order >= MAX_ORDER)
1359                 return h->max_huge_pages;
1360
1361         /*
1362          * Increase the pool size
1363          * First take pages out of surplus state.  Then make up the
1364          * remaining difference by allocating fresh huge pages.
1365          *
1366          * We might race with alloc_buddy_huge_page() here and be unable
1367          * to convert a surplus huge page to a normal huge page. That is
1368          * not critical, though, it just means the overall size of the
1369          * pool might be one hugepage larger than it needs to be, but
1370          * within all the constraints specified by the sysctls.
1371          */
1372         spin_lock(&hugetlb_lock);
1373         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1374                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1375                         break;
1376         }
1377
1378         while (count > persistent_huge_pages(h)) {
1379                 /*
1380                  * If this allocation races such that we no longer need the
1381                  * page, free_huge_page will handle it by freeing the page
1382                  * and reducing the surplus.
1383                  */
1384                 spin_unlock(&hugetlb_lock);
1385                 ret = alloc_fresh_huge_page(h, nodes_allowed);
1386                 spin_lock(&hugetlb_lock);
1387                 if (!ret)
1388                         goto out;
1389
1390                 /* Bail for signals. Probably ctrl-c from user */
1391                 if (signal_pending(current))
1392                         goto out;
1393         }
1394
1395         /*
1396          * Decrease the pool size
1397          * First return free pages to the buddy allocator (being careful
1398          * to keep enough around to satisfy reservations).  Then place
1399          * pages into surplus state as needed so the pool will shrink
1400          * to the desired size as pages become free.
1401          *
1402          * By placing pages into the surplus state independent of the
1403          * overcommit value, we are allowing the surplus pool size to
1404          * exceed overcommit. There are few sane options here. Since
1405          * alloc_buddy_huge_page() is checking the global counter,
1406          * though, we'll note that we're not allowed to exceed surplus
1407          * and won't grow the pool anywhere else. Not until one of the
1408          * sysctls are changed, or the surplus pages go out of use.
1409          */
1410         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1411         min_count = max(count, min_count);
1412         try_to_free_low(h, min_count, nodes_allowed);
1413         while (min_count < persistent_huge_pages(h)) {
1414                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1415                         break;
1416         }
1417         while (count < persistent_huge_pages(h)) {
1418                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1419                         break;
1420         }
1421 out:
1422         ret = persistent_huge_pages(h);
1423         spin_unlock(&hugetlb_lock);
1424         return ret;
1425 }
1426
1427 #define HSTATE_ATTR_RO(_name) \
1428         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1429
1430 #define HSTATE_ATTR(_name) \
1431         static struct kobj_attribute _name##_attr = \
1432                 __ATTR(_name, 0644, _name##_show, _name##_store)
1433
1434 static struct kobject *hugepages_kobj;
1435 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1436
1437 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1438
1439 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1440 {
1441         int i;
1442
1443         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1444                 if (hstate_kobjs[i] == kobj) {
1445                         if (nidp)
1446                                 *nidp = NUMA_NO_NODE;
1447                         return &hstates[i];
1448                 }
1449
1450         return kobj_to_node_hstate(kobj, nidp);
1451 }
1452
1453 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1454                                         struct kobj_attribute *attr, char *buf)
1455 {
1456         struct hstate *h;
1457         unsigned long nr_huge_pages;
1458         int nid;
1459
1460         h = kobj_to_hstate(kobj, &nid);
1461         if (nid == NUMA_NO_NODE)
1462                 nr_huge_pages = h->nr_huge_pages;
1463         else
1464                 nr_huge_pages = h->nr_huge_pages_node[nid];
1465
1466         return sprintf(buf, "%lu\n", nr_huge_pages);
1467 }
1468
1469 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1470                         struct kobject *kobj, struct kobj_attribute *attr,
1471                         const char *buf, size_t len)
1472 {
1473         int err;
1474         int nid;
1475         unsigned long count;
1476         struct hstate *h;
1477         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1478
1479         err = strict_strtoul(buf, 10, &count);
1480         if (err)
1481                 goto out;
1482
1483         h = kobj_to_hstate(kobj, &nid);
1484         if (h->order >= MAX_ORDER) {
1485                 err = -EINVAL;
1486                 goto out;
1487         }
1488
1489         if (nid == NUMA_NO_NODE) {
1490                 /*
1491                  * global hstate attribute
1492                  */
1493                 if (!(obey_mempolicy &&
1494                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1495                         NODEMASK_FREE(nodes_allowed);
1496                         nodes_allowed = &node_states[N_HIGH_MEMORY];
1497                 }
1498         } else if (nodes_allowed) {
1499                 /*
1500                  * per node hstate attribute: adjust count to global,
1501                  * but restrict alloc/free to the specified node.
1502                  */
1503                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1504                 init_nodemask_of_node(nodes_allowed, nid);
1505         } else
1506                 nodes_allowed = &node_states[N_HIGH_MEMORY];
1507
1508         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1509
1510         if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1511                 NODEMASK_FREE(nodes_allowed);
1512
1513         return len;
1514 out:
1515         NODEMASK_FREE(nodes_allowed);
1516         return err;
1517 }
1518
1519 static ssize_t nr_hugepages_show(struct kobject *kobj,
1520                                        struct kobj_attribute *attr, char *buf)
1521 {
1522         return nr_hugepages_show_common(kobj, attr, buf);
1523 }
1524
1525 static ssize_t nr_hugepages_store(struct kobject *kobj,
1526                struct kobj_attribute *attr, const char *buf, size_t len)
1527 {
1528         return nr_hugepages_store_common(false, kobj, attr, buf, len);
1529 }
1530 HSTATE_ATTR(nr_hugepages);
1531
1532 #ifdef CONFIG_NUMA
1533
1534 /*
1535  * hstate attribute for optionally mempolicy-based constraint on persistent
1536  * huge page alloc/free.
1537  */
1538 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1539                                        struct kobj_attribute *attr, char *buf)
1540 {
1541         return nr_hugepages_show_common(kobj, attr, buf);
1542 }
1543
1544 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1545                struct kobj_attribute *attr, const char *buf, size_t len)
1546 {
1547         return nr_hugepages_store_common(true, kobj, attr, buf, len);
1548 }
1549 HSTATE_ATTR(nr_hugepages_mempolicy);
1550 #endif
1551
1552
1553 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1554                                         struct kobj_attribute *attr, char *buf)
1555 {
1556         struct hstate *h = kobj_to_hstate(kobj, NULL);
1557         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1558 }
1559
1560 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1561                 struct kobj_attribute *attr, const char *buf, size_t count)
1562 {
1563         int err;
1564         unsigned long input;
1565         struct hstate *h = kobj_to_hstate(kobj, NULL);
1566
1567         if (h->order >= MAX_ORDER)
1568                 return -EINVAL;
1569
1570         err = strict_strtoul(buf, 10, &input);
1571         if (err)
1572                 return err;
1573
1574         spin_lock(&hugetlb_lock);
1575         h->nr_overcommit_huge_pages = input;
1576         spin_unlock(&hugetlb_lock);
1577
1578         return count;
1579 }
1580 HSTATE_ATTR(nr_overcommit_hugepages);
1581
1582 static ssize_t free_hugepages_show(struct kobject *kobj,
1583                                         struct kobj_attribute *attr, char *buf)
1584 {
1585         struct hstate *h;
1586         unsigned long free_huge_pages;
1587         int nid;
1588
1589         h = kobj_to_hstate(kobj, &nid);
1590         if (nid == NUMA_NO_NODE)
1591                 free_huge_pages = h->free_huge_pages;
1592         else
1593                 free_huge_pages = h->free_huge_pages_node[nid];
1594
1595         return sprintf(buf, "%lu\n", free_huge_pages);
1596 }
1597 HSTATE_ATTR_RO(free_hugepages);
1598
1599 static ssize_t resv_hugepages_show(struct kobject *kobj,
1600                                         struct kobj_attribute *attr, char *buf)
1601 {
1602         struct hstate *h = kobj_to_hstate(kobj, NULL);
1603         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1604 }
1605 HSTATE_ATTR_RO(resv_hugepages);
1606
1607 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1608                                         struct kobj_attribute *attr, char *buf)
1609 {
1610         struct hstate *h;
1611         unsigned long surplus_huge_pages;
1612         int nid;
1613
1614         h = kobj_to_hstate(kobj, &nid);
1615         if (nid == NUMA_NO_NODE)
1616                 surplus_huge_pages = h->surplus_huge_pages;
1617         else
1618                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1619
1620         return sprintf(buf, "%lu\n", surplus_huge_pages);
1621 }
1622 HSTATE_ATTR_RO(surplus_hugepages);
1623
1624 static struct attribute *hstate_attrs[] = {
1625         &nr_hugepages_attr.attr,
1626         &nr_overcommit_hugepages_attr.attr,
1627         &free_hugepages_attr.attr,
1628         &resv_hugepages_attr.attr,
1629         &surplus_hugepages_attr.attr,
1630 #ifdef CONFIG_NUMA
1631         &nr_hugepages_mempolicy_attr.attr,
1632 #endif
1633         NULL,
1634 };
1635
1636 static struct attribute_group hstate_attr_group = {
1637         .attrs = hstate_attrs,
1638 };
1639
1640 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1641                                     struct kobject **hstate_kobjs,
1642                                     struct attribute_group *hstate_attr_group)
1643 {
1644         int retval;
1645         int hi = h - hstates;
1646
1647         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1648         if (!hstate_kobjs[hi])
1649                 return -ENOMEM;
1650
1651         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1652         if (retval)
1653                 kobject_put(hstate_kobjs[hi]);
1654
1655         return retval;
1656 }
1657
1658 static void __init hugetlb_sysfs_init(void)
1659 {
1660         struct hstate *h;
1661         int err;
1662
1663         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1664         if (!hugepages_kobj)
1665                 return;
1666
1667         for_each_hstate(h) {
1668                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1669                                          hstate_kobjs, &hstate_attr_group);
1670                 if (err)
1671                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1672                                                                 h->name);
1673         }
1674 }
1675
1676 #ifdef CONFIG_NUMA
1677
1678 /*
1679  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1680  * with node sysdevs in node_devices[] using a parallel array.  The array
1681  * index of a node sysdev or _hstate == node id.
1682  * This is here to avoid any static dependency of the node sysdev driver, in
1683  * the base kernel, on the hugetlb module.
1684  */
1685 struct node_hstate {
1686         struct kobject          *hugepages_kobj;
1687         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1688 };
1689 struct node_hstate node_hstates[MAX_NUMNODES];
1690
1691 /*
1692  * A subset of global hstate attributes for node sysdevs
1693  */
1694 static struct attribute *per_node_hstate_attrs[] = {
1695         &nr_hugepages_attr.attr,
1696         &free_hugepages_attr.attr,
1697         &surplus_hugepages_attr.attr,
1698         NULL,
1699 };
1700
1701 static struct attribute_group per_node_hstate_attr_group = {
1702         .attrs = per_node_hstate_attrs,
1703 };
1704
1705 /*
1706  * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1707  * Returns node id via non-NULL nidp.
1708  */
1709 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1710 {
1711         int nid;
1712
1713         for (nid = 0; nid < nr_node_ids; nid++) {
1714                 struct node_hstate *nhs = &node_hstates[nid];
1715                 int i;
1716                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1717                         if (nhs->hstate_kobjs[i] == kobj) {
1718                                 if (nidp)
1719                                         *nidp = nid;
1720                                 return &hstates[i];
1721                         }
1722         }
1723
1724         BUG();
1725         return NULL;
1726 }
1727
1728 /*
1729  * Unregister hstate attributes from a single node sysdev.
1730  * No-op if no hstate attributes attached.
1731  */
1732 void hugetlb_unregister_node(struct node *node)
1733 {
1734         struct hstate *h;
1735         struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1736
1737         if (!nhs->hugepages_kobj)
1738                 return;         /* no hstate attributes */
1739
1740         for_each_hstate(h)
1741                 if (nhs->hstate_kobjs[h - hstates]) {
1742                         kobject_put(nhs->hstate_kobjs[h - hstates]);
1743                         nhs->hstate_kobjs[h - hstates] = NULL;
1744                 }
1745
1746         kobject_put(nhs->hugepages_kobj);
1747         nhs->hugepages_kobj = NULL;
1748 }
1749
1750 /*
1751  * hugetlb module exit:  unregister hstate attributes from node sysdevs
1752  * that have them.
1753  */
1754 static void hugetlb_unregister_all_nodes(void)
1755 {
1756         int nid;
1757
1758         /*
1759          * disable node sysdev registrations.
1760          */
1761         register_hugetlbfs_with_node(NULL, NULL);
1762
1763         /*
1764          * remove hstate attributes from any nodes that have them.
1765          */
1766         for (nid = 0; nid < nr_node_ids; nid++)
1767                 hugetlb_unregister_node(&node_devices[nid]);
1768 }
1769
1770 /*
1771  * Register hstate attributes for a single node sysdev.
1772  * No-op if attributes already registered.
1773  */
1774 void hugetlb_register_node(struct node *node)
1775 {
1776         struct hstate *h;
1777         struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1778         int err;
1779
1780         if (nhs->hugepages_kobj)
1781                 return;         /* already allocated */
1782
1783         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1784                                                         &node->sysdev.kobj);
1785         if (!nhs->hugepages_kobj)
1786                 return;
1787
1788         for_each_hstate(h) {
1789                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1790                                                 nhs->hstate_kobjs,
1791                                                 &per_node_hstate_attr_group);
1792                 if (err) {
1793                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1794                                         " for node %d\n",
1795                                                 h->name, node->sysdev.id);
1796                         hugetlb_unregister_node(node);
1797                         break;
1798                 }
1799         }
1800 }
1801
1802 /*
1803  * hugetlb init time:  register hstate attributes for all registered node
1804  * sysdevs of nodes that have memory.  All on-line nodes should have
1805  * registered their associated sysdev by this time.
1806  */
1807 static void hugetlb_register_all_nodes(void)
1808 {
1809         int nid;
1810
1811         for_each_node_state(nid, N_HIGH_MEMORY) {
1812                 struct node *node = &node_devices[nid];
1813                 if (node->sysdev.id == nid)
1814                         hugetlb_register_node(node);
1815         }
1816
1817         /*
1818          * Let the node sysdev driver know we're here so it can
1819          * [un]register hstate attributes on node hotplug.
1820          */
1821         register_hugetlbfs_with_node(hugetlb_register_node,
1822                                      hugetlb_unregister_node);
1823 }
1824 #else   /* !CONFIG_NUMA */
1825
1826 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1827 {
1828         BUG();
1829         if (nidp)
1830                 *nidp = -1;
1831         return NULL;
1832 }
1833
1834 static void hugetlb_unregister_all_nodes(void) { }
1835
1836 static void hugetlb_register_all_nodes(void) { }
1837
1838 #endif
1839
1840 static void __exit hugetlb_exit(void)
1841 {
1842         struct hstate *h;
1843
1844         hugetlb_unregister_all_nodes();
1845
1846         for_each_hstate(h) {
1847                 kobject_put(hstate_kobjs[h - hstates]);
1848         }
1849
1850         kobject_put(hugepages_kobj);
1851 }
1852 module_exit(hugetlb_exit);
1853
1854 static int __init hugetlb_init(void)
1855 {
1856         /* Some platform decide whether they support huge pages at boot
1857          * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1858          * there is no such support
1859          */
1860         if (HPAGE_SHIFT == 0)
1861                 return 0;
1862
1863         if (!size_to_hstate(default_hstate_size)) {
1864                 default_hstate_size = HPAGE_SIZE;
1865                 if (!size_to_hstate(default_hstate_size))
1866                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1867         }
1868         default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1869         if (default_hstate_max_huge_pages)
1870                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1871
1872         hugetlb_init_hstates();
1873
1874         gather_bootmem_prealloc();
1875
1876         report_hugepages();
1877
1878         hugetlb_sysfs_init();
1879
1880         hugetlb_register_all_nodes();
1881
1882         return 0;
1883 }
1884 module_init(hugetlb_init);
1885
1886 /* Should be called on processing a hugepagesz=... option */
1887 void __init hugetlb_add_hstate(unsigned order)
1888 {
1889         struct hstate *h;
1890         unsigned long i;
1891
1892         if (size_to_hstate(PAGE_SIZE << order)) {
1893                 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1894                 return;
1895         }
1896         BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1897         BUG_ON(order == 0);
1898         h = &hstates[max_hstate++];
1899         h->order = order;
1900         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1901         h->nr_huge_pages = 0;
1902         h->free_huge_pages = 0;
1903         for (i = 0; i < MAX_NUMNODES; ++i)
1904                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1905         h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1906         h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1907         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1908                                         huge_page_size(h)/1024);
1909
1910         parsed_hstate = h;
1911 }
1912
1913 static int __init hugetlb_nrpages_setup(char *s)
1914 {
1915         unsigned long *mhp;
1916         static unsigned long *last_mhp;
1917
1918         /*
1919          * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1920          * so this hugepages= parameter goes to the "default hstate".
1921          */
1922         if (!max_hstate)
1923                 mhp = &default_hstate_max_huge_pages;
1924         else
1925                 mhp = &parsed_hstate->max_huge_pages;
1926
1927         if (mhp == last_mhp) {
1928                 printk(KERN_WARNING "hugepages= specified twice without "
1929                         "interleaving hugepagesz=, ignoring\n");
1930                 return 1;
1931         }
1932
1933         if (sscanf(s, "%lu", mhp) <= 0)
1934                 *mhp = 0;
1935
1936         /*
1937          * Global state is always initialized later in hugetlb_init.
1938          * But we need to allocate >= MAX_ORDER hstates here early to still
1939          * use the bootmem allocator.
1940          */
1941         if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1942                 hugetlb_hstate_alloc_pages(parsed_hstate);
1943
1944         last_mhp = mhp;
1945
1946         return 1;
1947 }
1948 __setup("hugepages=", hugetlb_nrpages_setup);
1949
1950 static int __init hugetlb_default_setup(char *s)
1951 {
1952         default_hstate_size = memparse(s, &s);
1953         return 1;
1954 }
1955 __setup("default_hugepagesz=", hugetlb_default_setup);
1956
1957 static unsigned int cpuset_mems_nr(unsigned int *array)
1958 {
1959         int node;
1960         unsigned int nr = 0;
1961
1962         for_each_node_mask(node, cpuset_current_mems_allowed)
1963                 nr += array[node];
1964
1965         return nr;
1966 }
1967
1968 #ifdef CONFIG_SYSCTL
1969 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1970                          struct ctl_table *table, int write,
1971                          void __user *buffer, size_t *length, loff_t *ppos)
1972 {
1973         struct hstate *h = &default_hstate;
1974         unsigned long tmp;
1975         int ret;
1976
1977         tmp = h->max_huge_pages;
1978
1979         if (write && h->order >= MAX_ORDER)
1980                 return -EINVAL;
1981
1982         table->data = &tmp;
1983         table->maxlen = sizeof(unsigned long);
1984         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1985         if (ret)
1986                 goto out;
1987
1988         if (write) {
1989                 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1990                                                 GFP_KERNEL | __GFP_NORETRY);
1991                 if (!(obey_mempolicy &&
1992                                init_nodemask_of_mempolicy(nodes_allowed))) {
1993                         NODEMASK_FREE(nodes_allowed);
1994                         nodes_allowed = &node_states[N_HIGH_MEMORY];
1995                 }
1996                 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1997
1998                 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1999                         NODEMASK_FREE(nodes_allowed);
2000         }
2001 out:
2002         return ret;
2003 }
2004
2005 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2006                           void __user *buffer, size_t *length, loff_t *ppos)
2007 {
2008
2009         return hugetlb_sysctl_handler_common(false, table, write,
2010                                                         buffer, length, ppos);
2011 }
2012
2013 #ifdef CONFIG_NUMA
2014 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2015                           void __user *buffer, size_t *length, loff_t *ppos)
2016 {
2017         return hugetlb_sysctl_handler_common(true, table, write,
2018                                                         buffer, length, ppos);
2019 }
2020 #endif /* CONFIG_NUMA */
2021
2022 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2023                         void __user *buffer,
2024                         size_t *length, loff_t *ppos)
2025 {
2026         proc_dointvec(table, write, buffer, length, ppos);
2027         if (hugepages_treat_as_movable)
2028                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2029         else
2030                 htlb_alloc_mask = GFP_HIGHUSER;
2031         return 0;
2032 }
2033
2034 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2035                         void __user *buffer,
2036                         size_t *length, loff_t *ppos)
2037 {
2038         struct hstate *h = &default_hstate;
2039         unsigned long tmp;
2040         int ret;
2041
2042         tmp = h->nr_overcommit_huge_pages;
2043
2044         if (write && h->order >= MAX_ORDER)
2045                 return -EINVAL;
2046
2047         table->data = &tmp;
2048         table->maxlen = sizeof(unsigned long);
2049         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2050         if (ret)
2051                 goto out;
2052
2053         if (write) {
2054                 spin_lock(&hugetlb_lock);
2055                 h->nr_overcommit_huge_pages = tmp;
2056                 spin_unlock(&hugetlb_lock);
2057         }
2058 out:
2059         return ret;
2060 }
2061
2062 #endif /* CONFIG_SYSCTL */
2063
2064 void hugetlb_report_meminfo(struct seq_file *m)
2065 {
2066         struct hstate *h = &default_hstate;
2067         seq_printf(m,
2068                         "HugePages_Total:   %5lu\n"
2069                         "HugePages_Free:    %5lu\n"
2070                         "HugePages_Rsvd:    %5lu\n"
2071                         "HugePages_Surp:    %5lu\n"
2072                         "Hugepagesize:   %8lu kB\n",
2073                         h->nr_huge_pages,
2074                         h->free_huge_pages,
2075                         h->resv_huge_pages,
2076                         h->surplus_huge_pages,
2077                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2078 }
2079
2080 int hugetlb_report_node_meminfo(int nid, char *buf)
2081 {
2082         struct hstate *h = &default_hstate;
2083         return sprintf(buf,
2084                 "Node %d HugePages_Total: %5u\n"
2085                 "Node %d HugePages_Free:  %5u\n"
2086                 "Node %d HugePages_Surp:  %5u\n",
2087                 nid, h->nr_huge_pages_node[nid],
2088                 nid, h->free_huge_pages_node[nid],
2089                 nid, h->surplus_huge_pages_node[nid]);
2090 }
2091
2092 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2093 unsigned long hugetlb_total_pages(void)
2094 {
2095         struct hstate *h = &default_hstate;
2096         return h->nr_huge_pages * pages_per_huge_page(h);
2097 }
2098
2099 static int hugetlb_acct_memory(struct hstate *h, long delta)
2100 {
2101         int ret = -ENOMEM;
2102
2103         spin_lock(&hugetlb_lock);
2104         /*
2105          * When cpuset is configured, it breaks the strict hugetlb page
2106          * reservation as the accounting is done on a global variable. Such
2107          * reservation is completely rubbish in the presence of cpuset because
2108          * the reservation is not checked against page availability for the
2109          * current cpuset. Application can still potentially OOM'ed by kernel
2110          * with lack of free htlb page in cpuset that the task is in.
2111          * Attempt to enforce strict accounting with cpuset is almost
2112          * impossible (or too ugly) because cpuset is too fluid that
2113          * task or memory node can be dynamically moved between cpusets.
2114          *
2115          * The change of semantics for shared hugetlb mapping with cpuset is
2116          * undesirable. However, in order to preserve some of the semantics,
2117          * we fall back to check against current free page availability as
2118          * a best attempt and hopefully to minimize the impact of changing
2119          * semantics that cpuset has.
2120          */
2121         if (delta > 0) {
2122                 if (gather_surplus_pages(h, delta) < 0)
2123                         goto out;
2124
2125                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2126                         return_unused_surplus_pages(h, delta);
2127                         goto out;
2128                 }
2129         }
2130
2131         ret = 0;
2132         if (delta < 0)
2133                 return_unused_surplus_pages(h, (unsigned long) -delta);
2134
2135 out:
2136         spin_unlock(&hugetlb_lock);
2137         return ret;
2138 }
2139
2140 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2141 {
2142         struct resv_map *reservations = vma_resv_map(vma);
2143
2144         /*
2145          * This new VMA should share its siblings reservation map if present.
2146          * The VMA will only ever have a valid reservation map pointer where
2147          * it is being copied for another still existing VMA.  As that VMA
2148          * has a reference to the reservation map it cannot disappear until
2149          * after this open call completes.  It is therefore safe to take a
2150          * new reference here without additional locking.
2151          */
2152         if (reservations)
2153                 kref_get(&reservations->refs);
2154 }
2155
2156 static void resv_map_put(struct vm_area_struct *vma)
2157 {
2158         struct resv_map *reservations = vma_resv_map(vma);
2159
2160         if (!reservations)
2161                 return;
2162         kref_put(&reservations->refs, resv_map_release);
2163 }
2164
2165 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2166 {
2167         struct hstate *h = hstate_vma(vma);
2168         struct resv_map *reservations = vma_resv_map(vma);
2169         struct hugepage_subpool *spool = subpool_vma(vma);
2170         unsigned long reserve;
2171         unsigned long start;
2172         unsigned long end;
2173
2174         if (reservations) {
2175                 start = vma_hugecache_offset(h, vma, vma->vm_start);
2176                 end = vma_hugecache_offset(h, vma, vma->vm_end);
2177
2178                 reserve = (end - start) -
2179                         region_count(&reservations->regions, start, end);
2180
2181                 resv_map_put(vma);
2182
2183                 if (reserve) {
2184                         hugetlb_acct_memory(h, -reserve);
2185                         hugepage_subpool_put_pages(spool, reserve);
2186                 }
2187         }
2188 }
2189
2190 /*
2191  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2192  * handle_mm_fault() to try to instantiate regular-sized pages in the
2193  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2194  * this far.
2195  */
2196 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2197 {
2198         BUG();
2199         return 0;
2200 }
2201
2202 const struct vm_operations_struct hugetlb_vm_ops = {
2203         .fault = hugetlb_vm_op_fault,
2204         .open = hugetlb_vm_op_open,
2205         .close = hugetlb_vm_op_close,
2206 };
2207
2208 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2209                                 int writable)
2210 {
2211         pte_t entry;
2212
2213         if (writable) {
2214                 entry =
2215                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2216         } else {
2217                 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2218         }
2219         entry = pte_mkyoung(entry);
2220         entry = pte_mkhuge(entry);
2221
2222         return entry;
2223 }
2224
2225 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2226                                    unsigned long address, pte_t *ptep)
2227 {
2228         pte_t entry;
2229
2230         entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2231         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2232                 update_mmu_cache(vma, address, ptep);
2233 }
2234
2235
2236 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2237                             struct vm_area_struct *vma)
2238 {
2239         pte_t *src_pte, *dst_pte, entry;
2240         struct page *ptepage;
2241         unsigned long addr;
2242         int cow;
2243         struct hstate *h = hstate_vma(vma);
2244         unsigned long sz = huge_page_size(h);
2245
2246         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2247
2248         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2249                 src_pte = huge_pte_offset(src, addr);
2250                 if (!src_pte)
2251                         continue;
2252                 dst_pte = huge_pte_alloc(dst, addr, sz);
2253                 if (!dst_pte)
2254                         goto nomem;
2255
2256                 /* If the pagetables are shared don't copy or take references */
2257                 if (dst_pte == src_pte)
2258                         continue;
2259
2260                 spin_lock(&dst->page_table_lock);
2261                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2262                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2263                         if (cow)
2264                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2265                         entry = huge_ptep_get(src_pte);
2266                         ptepage = pte_page(entry);
2267                         get_page(ptepage);
2268                         page_dup_rmap(ptepage);
2269                         set_huge_pte_at(dst, addr, dst_pte, entry);
2270                 }
2271                 spin_unlock(&src->page_table_lock);
2272                 spin_unlock(&dst->page_table_lock);
2273         }
2274         return 0;
2275
2276 nomem:
2277         return -ENOMEM;
2278 }
2279
2280 static int is_hugetlb_entry_migration(pte_t pte)
2281 {
2282         swp_entry_t swp;
2283
2284         if (huge_pte_none(pte) || pte_present(pte))
2285                 return 0;
2286         swp = pte_to_swp_entry(pte);
2287         if (non_swap_entry(swp) && is_migration_entry(swp))
2288                 return 1;
2289         else
2290                 return 0;
2291 }
2292
2293 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2294 {
2295         swp_entry_t swp;
2296
2297         if (huge_pte_none(pte) || pte_present(pte))
2298                 return 0;
2299         swp = pte_to_swp_entry(pte);
2300         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2301                 return 1;
2302         else
2303                 return 0;
2304 }
2305
2306 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2307                             unsigned long end, struct page *ref_page)
2308 {
2309         struct mm_struct *mm = vma->vm_mm;
2310         unsigned long address;
2311         pte_t *ptep;
2312         pte_t pte;
2313         struct page *page;
2314         struct page *tmp;
2315         struct hstate *h = hstate_vma(vma);
2316         unsigned long sz = huge_page_size(h);
2317
2318         /*
2319          * A page gathering list, protected by per file i_mmap_mutex. The
2320          * lock is used to avoid list corruption from multiple unmapping
2321          * of the same page since we are using page->lru.
2322          */
2323         LIST_HEAD(page_list);
2324
2325         WARN_ON(!is_vm_hugetlb_page(vma));
2326         BUG_ON(start & ~huge_page_mask(h));
2327         BUG_ON(end & ~huge_page_mask(h));
2328
2329         mmu_notifier_invalidate_range_start(mm, start, end);
2330         spin_lock(&mm->page_table_lock);
2331         for (address = start; address < end; address += sz) {
2332                 ptep = huge_pte_offset(mm, address);
2333                 if (!ptep)
2334                         continue;
2335
2336                 if (huge_pmd_unshare(mm, &address, ptep))
2337                         continue;
2338
2339                 /*
2340                  * If a reference page is supplied, it is because a specific
2341                  * page is being unmapped, not a range. Ensure the page we
2342                  * are about to unmap is the actual page of interest.
2343                  */
2344                 if (ref_page) {
2345                         pte = huge_ptep_get(ptep);
2346                         if (huge_pte_none(pte))
2347                                 continue;
2348                         page = pte_page(pte);
2349                         if (page != ref_page)
2350                                 continue;
2351
2352                         /*
2353                          * Mark the VMA as having unmapped its page so that
2354                          * future faults in this VMA will fail rather than
2355                          * looking like data was lost
2356                          */
2357                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2358                 }
2359
2360                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2361                 if (huge_pte_none(pte))
2362                         continue;
2363
2364                 /*
2365                  * HWPoisoned hugepage is already unmapped and dropped reference
2366                  */
2367                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2368                         continue;
2369
2370                 page = pte_page(pte);
2371                 if (pte_dirty(pte))
2372                         set_page_dirty(page);
2373                 list_add(&page->lru, &page_list);
2374         }
2375         spin_unlock(&mm->page_table_lock);
2376         flush_tlb_range(vma, start, end);
2377         mmu_notifier_invalidate_range_end(mm, start, end);
2378         list_for_each_entry_safe(page, tmp, &page_list, lru) {
2379                 page_remove_rmap(page);
2380                 list_del(&page->lru);
2381                 put_page(page);
2382         }
2383 }
2384
2385 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2386                           unsigned long end, struct page *ref_page)
2387 {
2388         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2389         __unmap_hugepage_range(vma, start, end, ref_page);
2390         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2391 }
2392
2393 /*
2394  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2395  * mappping it owns the reserve page for. The intention is to unmap the page
2396  * from other VMAs and let the children be SIGKILLed if they are faulting the
2397  * same region.
2398  */
2399 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2400                                 struct page *page, unsigned long address)
2401 {
2402         struct hstate *h = hstate_vma(vma);
2403         struct vm_area_struct *iter_vma;
2404         struct address_space *mapping;
2405         struct prio_tree_iter iter;
2406         pgoff_t pgoff;
2407
2408         /*
2409          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2410          * from page cache lookup which is in HPAGE_SIZE units.
2411          */
2412         address = address & huge_page_mask(h);
2413         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2414                 + (vma->vm_pgoff >> PAGE_SHIFT);
2415         mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
2416
2417         /*
2418          * Take the mapping lock for the duration of the table walk. As
2419          * this mapping should be shared between all the VMAs,
2420          * __unmap_hugepage_range() is called as the lock is already held
2421          */
2422         mutex_lock(&mapping->i_mmap_mutex);
2423         vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2424                 /* Do not unmap the current VMA */
2425                 if (iter_vma == vma)
2426                         continue;
2427
2428                 /*
2429                  * Unmap the page from other VMAs without their own reserves.
2430                  * They get marked to be SIGKILLed if they fault in these
2431                  * areas. This is because a future no-page fault on this VMA
2432                  * could insert a zeroed page instead of the data existing
2433                  * from the time of fork. This would look like data corruption
2434                  */
2435                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2436                         __unmap_hugepage_range(iter_vma,
2437                                 address, address + huge_page_size(h),
2438                                 page);
2439         }
2440         mutex_unlock(&mapping->i_mmap_mutex);
2441
2442         return 1;
2443 }
2444
2445 /*
2446  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2447  */
2448 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2449                         unsigned long address, pte_t *ptep, pte_t pte,
2450                         struct page *pagecache_page)
2451 {
2452         struct hstate *h = hstate_vma(vma);
2453         struct page *old_page, *new_page;
2454         int avoidcopy;
2455         int outside_reserve = 0;
2456
2457         old_page = pte_page(pte);
2458
2459 retry_avoidcopy:
2460         /* If no-one else is actually using this page, avoid the copy
2461          * and just make the page writable */
2462         avoidcopy = (page_mapcount(old_page) == 1);
2463         if (avoidcopy) {
2464                 if (PageAnon(old_page))
2465                         page_move_anon_rmap(old_page, vma, address);
2466                 set_huge_ptep_writable(vma, address, ptep);
2467                 return 0;
2468         }
2469
2470         /*
2471          * If the process that created a MAP_PRIVATE mapping is about to
2472          * perform a COW due to a shared page count, attempt to satisfy
2473          * the allocation without using the existing reserves. The pagecache
2474          * page is used to determine if the reserve at this address was
2475          * consumed or not. If reserves were used, a partial faulted mapping
2476          * at the time of fork() could consume its reserves on COW instead
2477          * of the full address range.
2478          */
2479         if (!(vma->vm_flags & VM_MAYSHARE) &&
2480                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2481                         old_page != pagecache_page)
2482                 outside_reserve = 1;
2483
2484         page_cache_get(old_page);
2485
2486         /* Drop page_table_lock as buddy allocator may be called */
2487         spin_unlock(&mm->page_table_lock);
2488         new_page = alloc_huge_page(vma, address, outside_reserve);
2489
2490         if (IS_ERR(new_page)) {
2491                 page_cache_release(old_page);
2492
2493                 /*
2494                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2495                  * it is due to references held by a child and an insufficient
2496                  * huge page pool. To guarantee the original mappers
2497                  * reliability, unmap the page from child processes. The child
2498                  * may get SIGKILLed if it later faults.
2499                  */
2500                 if (outside_reserve) {
2501                         BUG_ON(huge_pte_none(pte));
2502                         if (unmap_ref_private(mm, vma, old_page, address)) {
2503                                 BUG_ON(huge_pte_none(pte));
2504                                 spin_lock(&mm->page_table_lock);
2505                                 goto retry_avoidcopy;
2506                         }
2507                         WARN_ON_ONCE(1);
2508                 }
2509
2510                 /* Caller expects lock to be held */
2511                 spin_lock(&mm->page_table_lock);
2512                 return -PTR_ERR(new_page);
2513         }
2514
2515         /*
2516          * When the original hugepage is shared one, it does not have
2517          * anon_vma prepared.
2518          */
2519         if (unlikely(anon_vma_prepare(vma))) {
2520                 page_cache_release(new_page);
2521                 page_cache_release(old_page);
2522                 /* Caller expects lock to be held */
2523                 spin_lock(&mm->page_table_lock);
2524                 return VM_FAULT_OOM;
2525         }
2526
2527         copy_user_huge_page(new_page, old_page, address, vma,
2528                             pages_per_huge_page(h));
2529         __SetPageUptodate(new_page);
2530
2531         /*
2532          * Retake the page_table_lock to check for racing updates
2533          * before the page tables are altered
2534          */
2535         spin_lock(&mm->page_table_lock);
2536         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2537         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2538                 /* Break COW */
2539                 mmu_notifier_invalidate_range_start(mm,
2540                         address & huge_page_mask(h),
2541                         (address & huge_page_mask(h)) + huge_page_size(h));
2542                 huge_ptep_clear_flush(vma, address, ptep);
2543                 set_huge_pte_at(mm, address, ptep,
2544                                 make_huge_pte(vma, new_page, 1));
2545                 page_remove_rmap(old_page);
2546                 hugepage_add_new_anon_rmap(new_page, vma, address);
2547                 /* Make the old page be freed below */
2548                 new_page = old_page;
2549                 mmu_notifier_invalidate_range_end(mm,
2550                         address & huge_page_mask(h),
2551                         (address & huge_page_mask(h)) + huge_page_size(h));
2552         }
2553         page_cache_release(new_page);
2554         page_cache_release(old_page);
2555         return 0;
2556 }
2557
2558 /* Return the pagecache page at a given address within a VMA */
2559 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2560                         struct vm_area_struct *vma, unsigned long address)
2561 {
2562         struct address_space *mapping;
2563         pgoff_t idx;
2564
2565         mapping = vma->vm_file->f_mapping;
2566         idx = vma_hugecache_offset(h, vma, address);
2567
2568         return find_lock_page(mapping, idx);
2569 }
2570
2571 /*
2572  * Return whether there is a pagecache page to back given address within VMA.
2573  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2574  */
2575 static bool hugetlbfs_pagecache_present(struct hstate *h,
2576                         struct vm_area_struct *vma, unsigned long address)
2577 {
2578         struct address_space *mapping;
2579         pgoff_t idx;
2580         struct page *page;
2581
2582         mapping = vma->vm_file->f_mapping;
2583         idx = vma_hugecache_offset(h, vma, address);
2584
2585         page = find_get_page(mapping, idx);
2586         if (page)
2587                 put_page(page);
2588         return page != NULL;
2589 }
2590
2591 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2592                         unsigned long address, pte_t *ptep, unsigned int flags)
2593 {
2594         struct hstate *h = hstate_vma(vma);
2595         int ret = VM_FAULT_SIGBUS;
2596         pgoff_t idx;
2597         unsigned long size;
2598         struct page *page;
2599         struct address_space *mapping;
2600         pte_t new_pte;
2601
2602         /*
2603          * Currently, we are forced to kill the process in the event the
2604          * original mapper has unmapped pages from the child due to a failed
2605          * COW. Warn that such a situation has occurred as it may not be obvious
2606          */
2607         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2608                 printk(KERN_WARNING
2609                         "PID %d killed due to inadequate hugepage pool\n",
2610                         current->pid);
2611                 return ret;
2612         }
2613
2614         mapping = vma->vm_file->f_mapping;
2615         idx = vma_hugecache_offset(h, vma, address);
2616
2617         /*
2618          * Use page lock to guard against racing truncation
2619          * before we get page_table_lock.
2620          */
2621 retry:
2622         page = find_lock_page(mapping, idx);
2623         if (!page) {
2624                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2625                 if (idx >= size)
2626                         goto out;
2627                 page = alloc_huge_page(vma, address, 0);
2628                 if (IS_ERR(page)) {
2629                         ret = -PTR_ERR(page);
2630                         goto out;
2631                 }
2632                 clear_huge_page(page, address, pages_per_huge_page(h));
2633                 __SetPageUptodate(page);
2634
2635                 if (vma->vm_flags & VM_MAYSHARE) {
2636                         int err;
2637                         struct inode *inode = mapping->host;
2638
2639                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2640                         if (err) {
2641                                 put_page(page);
2642                                 if (err == -EEXIST)
2643                                         goto retry;
2644                                 goto out;
2645                         }
2646
2647                         spin_lock(&inode->i_lock);
2648                         inode->i_blocks += blocks_per_huge_page(h);
2649                         spin_unlock(&inode->i_lock);
2650                         page_dup_rmap(page);
2651                 } else {
2652                         lock_page(page);
2653                         if (unlikely(anon_vma_prepare(vma))) {
2654                                 ret = VM_FAULT_OOM;
2655                                 goto backout_unlocked;
2656                         }
2657                         hugepage_add_new_anon_rmap(page, vma, address);
2658                 }
2659         } else {
2660                 /*
2661                  * If memory error occurs between mmap() and fault, some process
2662                  * don't have hwpoisoned swap entry for errored virtual address.
2663                  * So we need to block hugepage fault by PG_hwpoison bit check.
2664                  */
2665                 if (unlikely(PageHWPoison(page))) {
2666                         ret = VM_FAULT_HWPOISON |
2667                               VM_FAULT_SET_HINDEX(h - hstates);
2668                         goto backout_unlocked;
2669                 }
2670                 page_dup_rmap(page);
2671         }
2672
2673         /*
2674          * If we are going to COW a private mapping later, we examine the
2675          * pending reservations for this page now. This will ensure that
2676          * any allocations necessary to record that reservation occur outside
2677          * the spinlock.
2678          */
2679         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2680                 if (vma_needs_reservation(h, vma, address) < 0) {
2681                         ret = VM_FAULT_OOM;
2682                         goto backout_unlocked;
2683                 }
2684
2685         spin_lock(&mm->page_table_lock);
2686         size = i_size_read(mapping->host) >> huge_page_shift(h);
2687         if (idx >= size)
2688                 goto backout;
2689
2690         ret = 0;
2691         if (!huge_pte_none(huge_ptep_get(ptep)))
2692                 goto backout;
2693
2694         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2695                                 && (vma->vm_flags & VM_SHARED)));
2696         set_huge_pte_at(mm, address, ptep, new_pte);
2697
2698         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2699                 /* Optimization, do the COW without a second fault */
2700                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2701         }
2702
2703         spin_unlock(&mm->page_table_lock);
2704         unlock_page(page);
2705 out:
2706         return ret;
2707
2708 backout:
2709         spin_unlock(&mm->page_table_lock);
2710 backout_unlocked:
2711         unlock_page(page);
2712         put_page(page);
2713         goto out;
2714 }
2715
2716 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2717                         unsigned long address, unsigned int flags)
2718 {
2719         pte_t *ptep;
2720         pte_t entry;
2721         int ret;
2722         struct page *page = NULL;
2723         struct page *pagecache_page = NULL;
2724         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2725         struct hstate *h = hstate_vma(vma);
2726
2727         ptep = huge_pte_offset(mm, address);
2728         if (ptep) {
2729                 entry = huge_ptep_get(ptep);
2730                 if (unlikely(is_hugetlb_entry_migration(entry))) {
2731                         migration_entry_wait(mm, (pmd_t *)ptep, address);
2732                         return 0;
2733                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2734                         return VM_FAULT_HWPOISON_LARGE |
2735                                VM_FAULT_SET_HINDEX(h - hstates);
2736         }
2737
2738         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2739         if (!ptep)
2740                 return VM_FAULT_OOM;
2741
2742         /*
2743          * Serialize hugepage allocation and instantiation, so that we don't
2744          * get spurious allocation failures if two CPUs race to instantiate
2745          * the same page in the page cache.
2746          */
2747         mutex_lock(&hugetlb_instantiation_mutex);
2748         entry = huge_ptep_get(ptep);
2749         if (huge_pte_none(entry)) {
2750                 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2751                 goto out_mutex;
2752         }
2753
2754         ret = 0;
2755
2756         /*
2757          * If we are going to COW the mapping later, we examine the pending
2758          * reservations for this page now. This will ensure that any
2759          * allocations necessary to record that reservation occur outside the
2760          * spinlock. For private mappings, we also lookup the pagecache
2761          * page now as it is used to determine if a reservation has been
2762          * consumed.
2763          */
2764         if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2765                 if (vma_needs_reservation(h, vma, address) < 0) {
2766                         ret = VM_FAULT_OOM;
2767                         goto out_mutex;
2768                 }
2769
2770                 if (!(vma->vm_flags & VM_MAYSHARE))
2771                         pagecache_page = hugetlbfs_pagecache_page(h,
2772                                                                 vma, address);
2773         }
2774
2775         /*
2776          * hugetlb_cow() requires page locks of pte_page(entry) and
2777          * pagecache_page, so here we need take the former one
2778          * when page != pagecache_page or !pagecache_page.
2779          * Note that locking order is always pagecache_page -> page,
2780          * so no worry about deadlock.
2781          */
2782         page = pte_page(entry);
2783         get_page(page);
2784         if (page != pagecache_page)
2785                 lock_page(page);
2786
2787         spin_lock(&mm->page_table_lock);
2788         /* Check for a racing update before calling hugetlb_cow */
2789         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2790                 goto out_page_table_lock;
2791
2792
2793         if (flags & FAULT_FLAG_WRITE) {
2794                 if (!pte_write(entry)) {
2795                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
2796                                                         pagecache_page);
2797                         goto out_page_table_lock;
2798                 }
2799                 entry = pte_mkdirty(entry);
2800         }
2801         entry = pte_mkyoung(entry);
2802         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2803                                                 flags & FAULT_FLAG_WRITE))
2804                 update_mmu_cache(vma, address, ptep);
2805
2806 out_page_table_lock:
2807         spin_unlock(&mm->page_table_lock);
2808
2809         if (pagecache_page) {
2810                 unlock_page(pagecache_page);
2811                 put_page(pagecache_page);
2812         }
2813         if (page != pagecache_page)
2814                 unlock_page(page);
2815         put_page(page);
2816
2817 out_mutex:
2818         mutex_unlock(&hugetlb_instantiation_mutex);
2819
2820         return ret;
2821 }
2822
2823 /* Can be overriden by architectures */
2824 __attribute__((weak)) struct page *
2825 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2826                pud_t *pud, int write)
2827 {
2828         BUG();
2829         return NULL;
2830 }
2831
2832 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2833                         struct page **pages, struct vm_area_struct **vmas,
2834                         unsigned long *position, int *length, int i,
2835                         unsigned int flags)
2836 {
2837         unsigned long pfn_offset;
2838         unsigned long vaddr = *position;
2839         int remainder = *length;
2840         struct hstate *h = hstate_vma(vma);
2841
2842         spin_lock(&mm->page_table_lock);
2843         while (vaddr < vma->vm_end && remainder) {
2844                 pte_t *pte;
2845                 int absent;
2846                 struct page *page;
2847
2848                 /*
2849                  * Some archs (sparc64, sh*) have multiple pte_ts to
2850                  * each hugepage.  We have to make sure we get the
2851                  * first, for the page indexing below to work.
2852                  */
2853                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2854                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2855
2856                 /*
2857                  * When coredumping, it suits get_dump_page if we just return
2858                  * an error where there's an empty slot with no huge pagecache
2859                  * to back it.  This way, we avoid allocating a hugepage, and
2860                  * the sparse dumpfile avoids allocating disk blocks, but its
2861                  * huge holes still show up with zeroes where they need to be.
2862                  */
2863                 if (absent && (flags & FOLL_DUMP) &&
2864                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2865                         remainder = 0;
2866                         break;
2867                 }
2868
2869                 if (absent ||
2870                     ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2871                         int ret;
2872
2873                         spin_unlock(&mm->page_table_lock);
2874                         ret = hugetlb_fault(mm, vma, vaddr,
2875                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2876                         spin_lock(&mm->page_table_lock);
2877                         if (!(ret & VM_FAULT_ERROR))
2878                                 continue;
2879
2880                         remainder = 0;
2881                         break;
2882                 }
2883
2884                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2885                 page = pte_page(huge_ptep_get(pte));
2886 same_page:
2887                 if (pages) {
2888                         pages[i] = mem_map_offset(page, pfn_offset);
2889                         get_page(pages[i]);
2890                 }
2891
2892                 if (vmas)
2893                         vmas[i] = vma;
2894
2895                 vaddr += PAGE_SIZE;
2896                 ++pfn_offset;
2897                 --remainder;
2898                 ++i;
2899                 if (vaddr < vma->vm_end && remainder &&
2900                                 pfn_offset < pages_per_huge_page(h)) {
2901                         /*
2902                          * We use pfn_offset to avoid touching the pageframes
2903                          * of this compound page.
2904                          */
2905                         goto same_page;
2906                 }
2907         }
2908         spin_unlock(&mm->page_table_lock);
2909         *length = remainder;
2910         *position = vaddr;
2911
2912         return i ? i : -EFAULT;
2913 }
2914
2915 void hugetlb_change_protection(struct vm_area_struct *vma,
2916                 unsigned long address, unsigned long end, pgprot_t newprot)
2917 {
2918         struct mm_struct *mm = vma->vm_mm;
2919         unsigned long start = address;
2920         pte_t *ptep;
2921         pte_t pte;
2922         struct hstate *h = hstate_vma(vma);
2923
2924         BUG_ON(address >= end);
2925         flush_cache_range(vma, address, end);
2926
2927         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2928         spin_lock(&mm->page_table_lock);
2929         for (; address < end; address += huge_page_size(h)) {
2930                 ptep = huge_pte_offset(mm, address);
2931                 if (!ptep)
2932                         continue;
2933                 if (huge_pmd_unshare(mm, &address, ptep))
2934                         continue;
2935                 if (!huge_pte_none(huge_ptep_get(ptep))) {
2936                         pte = huge_ptep_get_and_clear(mm, address, ptep);
2937                         pte = pte_mkhuge(pte_modify(pte, newprot));
2938                         set_huge_pte_at(mm, address, ptep, pte);
2939                 }
2940         }
2941         spin_unlock(&mm->page_table_lock);
2942         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2943
2944         flush_tlb_range(vma, start, end);
2945 }
2946
2947 int hugetlb_reserve_pages(struct inode *inode,
2948                                         long from, long to,
2949                                         struct vm_area_struct *vma,
2950                                         vm_flags_t vm_flags)
2951 {
2952         long ret, chg;
2953         struct hstate *h = hstate_inode(inode);
2954         struct hugepage_subpool *spool = subpool_inode(inode);
2955
2956         /*
2957          * Only apply hugepage reservation if asked. At fault time, an
2958          * attempt will be made for VM_NORESERVE to allocate a page
2959          * without using reserves
2960          */
2961         if (vm_flags & VM_NORESERVE)
2962                 return 0;
2963
2964         /*
2965          * Shared mappings base their reservation on the number of pages that
2966          * are already allocated on behalf of the file. Private mappings need
2967          * to reserve the full area even if read-only as mprotect() may be
2968          * called to make the mapping read-write. Assume !vma is a shm mapping
2969          */
2970         if (!vma || vma->vm_flags & VM_MAYSHARE)
2971                 chg = region_chg(&inode->i_mapping->private_list, from, to);
2972         else {
2973                 struct resv_map *resv_map = resv_map_alloc();
2974                 if (!resv_map)
2975                         return -ENOMEM;
2976
2977                 chg = to - from;
2978
2979                 set_vma_resv_map(vma, resv_map);
2980                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2981         }
2982
2983         if (chg < 0) {
2984                 ret = chg;
2985                 goto out_err;
2986         }
2987
2988         /* There must be enough pages in the subpool for the mapping */
2989         if (hugepage_subpool_get_pages(spool, chg)) {
2990                 ret = -ENOSPC;
2991                 goto out_err;
2992         }
2993
2994         /*
2995          * Check enough hugepages are available for the reservation.
2996          * Hand the pages back to the subpool if there are not
2997          */
2998         ret = hugetlb_acct_memory(h, chg);
2999         if (ret < 0) {
3000                 hugepage_subpool_put_pages(spool, chg);
3001                 goto out_err;
3002         }
3003
3004         /*
3005          * Account for the reservations made. Shared mappings record regions
3006          * that have reservations as they are shared by multiple VMAs.
3007          * When the last VMA disappears, the region map says how much
3008          * the reservation was and the page cache tells how much of
3009          * the reservation was consumed. Private mappings are per-VMA and
3010          * only the consumed reservations are tracked. When the VMA
3011          * disappears, the original reservation is the VMA size and the
3012          * consumed reservations are stored in the map. Hence, nothing
3013          * else has to be done for private mappings here
3014          */
3015         if (!vma || vma->vm_flags & VM_MAYSHARE)
3016                 region_add(&inode->i_mapping->private_list, from, to);
3017         return 0;
3018 out_err:
3019         if (vma)
3020                 resv_map_put(vma);
3021         return ret;
3022 }
3023
3024 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3025 {
3026         struct hstate *h = hstate_inode(inode);
3027         long chg = region_truncate(&inode->i_mapping->private_list, offset);
3028         struct hugepage_subpool *spool = subpool_inode(inode);
3029
3030         spin_lock(&inode->i_lock);
3031         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3032         spin_unlock(&inode->i_lock);
3033
3034         hugepage_subpool_put_pages(spool, (chg - freed));
3035         hugetlb_acct_memory(h, -(chg - freed));
3036 }
3037
3038 #ifdef CONFIG_MEMORY_FAILURE
3039
3040 /* Should be called in hugetlb_lock */
3041 static int is_hugepage_on_freelist(struct page *hpage)
3042 {
3043         struct page *page;
3044         struct page *tmp;
3045         struct hstate *h = page_hstate(hpage);
3046         int nid = page_to_nid(hpage);
3047
3048         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3049                 if (page == hpage)
3050                         return 1;
3051         return 0;
3052 }
3053
3054 /*
3055  * This function is called from memory failure code.
3056  * Assume the caller holds page lock of the head page.
3057  */
3058 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3059 {
3060         struct hstate *h = page_hstate(hpage);
3061         int nid = page_to_nid(hpage);
3062         int ret = -EBUSY;
3063
3064         spin_lock(&hugetlb_lock);
3065         if (is_hugepage_on_freelist(hpage)) {
3066                 list_del(&hpage->lru);
3067                 set_page_refcounted(hpage);
3068                 h->free_huge_pages--;
3069                 h->free_huge_pages_node[nid]--;
3070                 ret = 0;
3071         }
3072         spin_unlock(&hugetlb_lock);
3073         return ret;
3074 }
3075 #endif