mm/huge_memory.c: respect FOLL_FORCE/FOLL_COW for thp
[pandora-kernel.git] / mm / huge_memory.c
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/mm_inline.h>
16 #include <linux/kthread.h>
17 #include <linux/khugepaged.h>
18 #include <linux/freezer.h>
19 #include <linux/mman.h>
20 #include <asm/tlb.h>
21 #include <asm/pgalloc.h>
22 #include "internal.h"
23
24 /*
25  * By default transparent hugepage support is enabled for all mappings
26  * and khugepaged scans all mappings. Defrag is only invoked by
27  * khugepaged hugepage allocations and by page faults inside
28  * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
29  * allocations.
30  */
31 unsigned long transparent_hugepage_flags __read_mostly =
32 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
33         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
34 #endif
35 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
36         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
37 #endif
38         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
39         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
40
41 /* default scan 8*512 pte (or vmas) every 30 second */
42 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
43 static unsigned int khugepaged_pages_collapsed;
44 static unsigned int khugepaged_full_scans;
45 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
46 /* during fragmentation poll the hugepage allocator once every minute */
47 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
48 static struct task_struct *khugepaged_thread __read_mostly;
49 static DEFINE_MUTEX(khugepaged_mutex);
50 static DEFINE_SPINLOCK(khugepaged_mm_lock);
51 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
52 /*
53  * default collapse hugepages if there is at least one pte mapped like
54  * it would have happened if the vma was large enough during page
55  * fault.
56  */
57 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
58
59 static int khugepaged(void *none);
60 static int mm_slots_hash_init(void);
61 static int khugepaged_slab_init(void);
62 static void khugepaged_slab_free(void);
63
64 #define MM_SLOTS_HASH_HEADS 1024
65 static struct hlist_head *mm_slots_hash __read_mostly;
66 static struct kmem_cache *mm_slot_cache __read_mostly;
67
68 /**
69  * struct mm_slot - hash lookup from mm to mm_slot
70  * @hash: hash collision list
71  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
72  * @mm: the mm that this information is valid for
73  */
74 struct mm_slot {
75         struct hlist_node hash;
76         struct list_head mm_node;
77         struct mm_struct *mm;
78 };
79
80 /**
81  * struct khugepaged_scan - cursor for scanning
82  * @mm_head: the head of the mm list to scan
83  * @mm_slot: the current mm_slot we are scanning
84  * @address: the next address inside that to be scanned
85  *
86  * There is only the one khugepaged_scan instance of this cursor structure.
87  */
88 struct khugepaged_scan {
89         struct list_head mm_head;
90         struct mm_slot *mm_slot;
91         unsigned long address;
92 };
93 static struct khugepaged_scan khugepaged_scan = {
94         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
95 };
96
97
98 static int set_recommended_min_free_kbytes(void)
99 {
100         struct zone *zone;
101         int nr_zones = 0;
102         unsigned long recommended_min;
103         extern int min_free_kbytes;
104
105         if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
106                       &transparent_hugepage_flags) &&
107             !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
108                       &transparent_hugepage_flags))
109                 return 0;
110
111         for_each_populated_zone(zone)
112                 nr_zones++;
113
114         /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
115         recommended_min = pageblock_nr_pages * nr_zones * 2;
116
117         /*
118          * Make sure that on average at least two pageblocks are almost free
119          * of another type, one for a migratetype to fall back to and a
120          * second to avoid subsequent fallbacks of other types There are 3
121          * MIGRATE_TYPES we care about.
122          */
123         recommended_min += pageblock_nr_pages * nr_zones *
124                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
125
126         /* don't ever allow to reserve more than 5% of the lowmem */
127         recommended_min = min(recommended_min,
128                               (unsigned long) nr_free_buffer_pages() / 20);
129         recommended_min <<= (PAGE_SHIFT-10);
130
131         if (recommended_min > min_free_kbytes)
132                 min_free_kbytes = recommended_min;
133         setup_per_zone_wmarks();
134         return 0;
135 }
136 late_initcall(set_recommended_min_free_kbytes);
137
138 static int start_khugepaged(void)
139 {
140         int err = 0;
141         if (khugepaged_enabled()) {
142                 int wakeup;
143                 if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
144                         err = -ENOMEM;
145                         goto out;
146                 }
147                 mutex_lock(&khugepaged_mutex);
148                 if (!khugepaged_thread)
149                         khugepaged_thread = kthread_run(khugepaged, NULL,
150                                                         "khugepaged");
151                 if (unlikely(IS_ERR(khugepaged_thread))) {
152                         printk(KERN_ERR
153                                "khugepaged: kthread_run(khugepaged) failed\n");
154                         err = PTR_ERR(khugepaged_thread);
155                         khugepaged_thread = NULL;
156                 }
157                 wakeup = !list_empty(&khugepaged_scan.mm_head);
158                 mutex_unlock(&khugepaged_mutex);
159                 if (wakeup)
160                         wake_up_interruptible(&khugepaged_wait);
161
162                 set_recommended_min_free_kbytes();
163         } else
164                 /* wakeup to exit */
165                 wake_up_interruptible(&khugepaged_wait);
166 out:
167         return err;
168 }
169
170 #ifdef CONFIG_SYSFS
171
172 static ssize_t double_flag_show(struct kobject *kobj,
173                                 struct kobj_attribute *attr, char *buf,
174                                 enum transparent_hugepage_flag enabled,
175                                 enum transparent_hugepage_flag req_madv)
176 {
177         if (test_bit(enabled, &transparent_hugepage_flags)) {
178                 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
179                 return sprintf(buf, "[always] madvise never\n");
180         } else if (test_bit(req_madv, &transparent_hugepage_flags))
181                 return sprintf(buf, "always [madvise] never\n");
182         else
183                 return sprintf(buf, "always madvise [never]\n");
184 }
185 static ssize_t double_flag_store(struct kobject *kobj,
186                                  struct kobj_attribute *attr,
187                                  const char *buf, size_t count,
188                                  enum transparent_hugepage_flag enabled,
189                                  enum transparent_hugepage_flag req_madv)
190 {
191         if (!memcmp("always", buf,
192                     min(sizeof("always")-1, count))) {
193                 set_bit(enabled, &transparent_hugepage_flags);
194                 clear_bit(req_madv, &transparent_hugepage_flags);
195         } else if (!memcmp("madvise", buf,
196                            min(sizeof("madvise")-1, count))) {
197                 clear_bit(enabled, &transparent_hugepage_flags);
198                 set_bit(req_madv, &transparent_hugepage_flags);
199         } else if (!memcmp("never", buf,
200                            min(sizeof("never")-1, count))) {
201                 clear_bit(enabled, &transparent_hugepage_flags);
202                 clear_bit(req_madv, &transparent_hugepage_flags);
203         } else
204                 return -EINVAL;
205
206         return count;
207 }
208
209 static ssize_t enabled_show(struct kobject *kobj,
210                             struct kobj_attribute *attr, char *buf)
211 {
212         return double_flag_show(kobj, attr, buf,
213                                 TRANSPARENT_HUGEPAGE_FLAG,
214                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
215 }
216 static ssize_t enabled_store(struct kobject *kobj,
217                              struct kobj_attribute *attr,
218                              const char *buf, size_t count)
219 {
220         ssize_t ret;
221
222         ret = double_flag_store(kobj, attr, buf, count,
223                                 TRANSPARENT_HUGEPAGE_FLAG,
224                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
225
226         if (ret > 0) {
227                 int err = start_khugepaged();
228                 if (err)
229                         ret = err;
230         }
231
232         if (ret > 0 &&
233             (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
234                       &transparent_hugepage_flags) ||
235              test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
236                       &transparent_hugepage_flags)))
237                 set_recommended_min_free_kbytes();
238
239         return ret;
240 }
241 static struct kobj_attribute enabled_attr =
242         __ATTR(enabled, 0644, enabled_show, enabled_store);
243
244 static ssize_t single_flag_show(struct kobject *kobj,
245                                 struct kobj_attribute *attr, char *buf,
246                                 enum transparent_hugepage_flag flag)
247 {
248         return sprintf(buf, "%d\n",
249                        !!test_bit(flag, &transparent_hugepage_flags));
250 }
251
252 static ssize_t single_flag_store(struct kobject *kobj,
253                                  struct kobj_attribute *attr,
254                                  const char *buf, size_t count,
255                                  enum transparent_hugepage_flag flag)
256 {
257         unsigned long value;
258         int ret;
259
260         ret = kstrtoul(buf, 10, &value);
261         if (ret < 0)
262                 return ret;
263         if (value > 1)
264                 return -EINVAL;
265
266         if (value)
267                 set_bit(flag, &transparent_hugepage_flags);
268         else
269                 clear_bit(flag, &transparent_hugepage_flags);
270
271         return count;
272 }
273
274 /*
275  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
276  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
277  * memory just to allocate one more hugepage.
278  */
279 static ssize_t defrag_show(struct kobject *kobj,
280                            struct kobj_attribute *attr, char *buf)
281 {
282         return double_flag_show(kobj, attr, buf,
283                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
284                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
285 }
286 static ssize_t defrag_store(struct kobject *kobj,
287                             struct kobj_attribute *attr,
288                             const char *buf, size_t count)
289 {
290         return double_flag_store(kobj, attr, buf, count,
291                                  TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
292                                  TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
293 }
294 static struct kobj_attribute defrag_attr =
295         __ATTR(defrag, 0644, defrag_show, defrag_store);
296
297 #ifdef CONFIG_DEBUG_VM
298 static ssize_t debug_cow_show(struct kobject *kobj,
299                                 struct kobj_attribute *attr, char *buf)
300 {
301         return single_flag_show(kobj, attr, buf,
302                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
303 }
304 static ssize_t debug_cow_store(struct kobject *kobj,
305                                struct kobj_attribute *attr,
306                                const char *buf, size_t count)
307 {
308         return single_flag_store(kobj, attr, buf, count,
309                                  TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
310 }
311 static struct kobj_attribute debug_cow_attr =
312         __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
313 #endif /* CONFIG_DEBUG_VM */
314
315 static struct attribute *hugepage_attr[] = {
316         &enabled_attr.attr,
317         &defrag_attr.attr,
318 #ifdef CONFIG_DEBUG_VM
319         &debug_cow_attr.attr,
320 #endif
321         NULL,
322 };
323
324 static struct attribute_group hugepage_attr_group = {
325         .attrs = hugepage_attr,
326 };
327
328 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
329                                          struct kobj_attribute *attr,
330                                          char *buf)
331 {
332         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
333 }
334
335 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
336                                           struct kobj_attribute *attr,
337                                           const char *buf, size_t count)
338 {
339         unsigned long msecs;
340         int err;
341
342         err = strict_strtoul(buf, 10, &msecs);
343         if (err || msecs > UINT_MAX)
344                 return -EINVAL;
345
346         khugepaged_scan_sleep_millisecs = msecs;
347         wake_up_interruptible(&khugepaged_wait);
348
349         return count;
350 }
351 static struct kobj_attribute scan_sleep_millisecs_attr =
352         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
353                scan_sleep_millisecs_store);
354
355 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
356                                           struct kobj_attribute *attr,
357                                           char *buf)
358 {
359         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
360 }
361
362 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
363                                            struct kobj_attribute *attr,
364                                            const char *buf, size_t count)
365 {
366         unsigned long msecs;
367         int err;
368
369         err = strict_strtoul(buf, 10, &msecs);
370         if (err || msecs > UINT_MAX)
371                 return -EINVAL;
372
373         khugepaged_alloc_sleep_millisecs = msecs;
374         wake_up_interruptible(&khugepaged_wait);
375
376         return count;
377 }
378 static struct kobj_attribute alloc_sleep_millisecs_attr =
379         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
380                alloc_sleep_millisecs_store);
381
382 static ssize_t pages_to_scan_show(struct kobject *kobj,
383                                   struct kobj_attribute *attr,
384                                   char *buf)
385 {
386         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
387 }
388 static ssize_t pages_to_scan_store(struct kobject *kobj,
389                                    struct kobj_attribute *attr,
390                                    const char *buf, size_t count)
391 {
392         int err;
393         unsigned long pages;
394
395         err = strict_strtoul(buf, 10, &pages);
396         if (err || !pages || pages > UINT_MAX)
397                 return -EINVAL;
398
399         khugepaged_pages_to_scan = pages;
400
401         return count;
402 }
403 static struct kobj_attribute pages_to_scan_attr =
404         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
405                pages_to_scan_store);
406
407 static ssize_t pages_collapsed_show(struct kobject *kobj,
408                                     struct kobj_attribute *attr,
409                                     char *buf)
410 {
411         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
412 }
413 static struct kobj_attribute pages_collapsed_attr =
414         __ATTR_RO(pages_collapsed);
415
416 static ssize_t full_scans_show(struct kobject *kobj,
417                                struct kobj_attribute *attr,
418                                char *buf)
419 {
420         return sprintf(buf, "%u\n", khugepaged_full_scans);
421 }
422 static struct kobj_attribute full_scans_attr =
423         __ATTR_RO(full_scans);
424
425 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
426                                       struct kobj_attribute *attr, char *buf)
427 {
428         return single_flag_show(kobj, attr, buf,
429                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
430 }
431 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
432                                        struct kobj_attribute *attr,
433                                        const char *buf, size_t count)
434 {
435         return single_flag_store(kobj, attr, buf, count,
436                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
437 }
438 static struct kobj_attribute khugepaged_defrag_attr =
439         __ATTR(defrag, 0644, khugepaged_defrag_show,
440                khugepaged_defrag_store);
441
442 /*
443  * max_ptes_none controls if khugepaged should collapse hugepages over
444  * any unmapped ptes in turn potentially increasing the memory
445  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
446  * reduce the available free memory in the system as it
447  * runs. Increasing max_ptes_none will instead potentially reduce the
448  * free memory in the system during the khugepaged scan.
449  */
450 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
451                                              struct kobj_attribute *attr,
452                                              char *buf)
453 {
454         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
455 }
456 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
457                                               struct kobj_attribute *attr,
458                                               const char *buf, size_t count)
459 {
460         int err;
461         unsigned long max_ptes_none;
462
463         err = strict_strtoul(buf, 10, &max_ptes_none);
464         if (err || max_ptes_none > HPAGE_PMD_NR-1)
465                 return -EINVAL;
466
467         khugepaged_max_ptes_none = max_ptes_none;
468
469         return count;
470 }
471 static struct kobj_attribute khugepaged_max_ptes_none_attr =
472         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
473                khugepaged_max_ptes_none_store);
474
475 static struct attribute *khugepaged_attr[] = {
476         &khugepaged_defrag_attr.attr,
477         &khugepaged_max_ptes_none_attr.attr,
478         &pages_to_scan_attr.attr,
479         &pages_collapsed_attr.attr,
480         &full_scans_attr.attr,
481         &scan_sleep_millisecs_attr.attr,
482         &alloc_sleep_millisecs_attr.attr,
483         NULL,
484 };
485
486 static struct attribute_group khugepaged_attr_group = {
487         .attrs = khugepaged_attr,
488         .name = "khugepaged",
489 };
490 #endif /* CONFIG_SYSFS */
491
492 static int __init hugepage_init(void)
493 {
494         int err;
495 #ifdef CONFIG_SYSFS
496         static struct kobject *hugepage_kobj;
497 #endif
498
499         err = -EINVAL;
500         if (!has_transparent_hugepage()) {
501                 transparent_hugepage_flags = 0;
502                 goto out;
503         }
504
505 #ifdef CONFIG_SYSFS
506         err = -ENOMEM;
507         hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
508         if (unlikely(!hugepage_kobj)) {
509                 printk(KERN_ERR "hugepage: failed kobject create\n");
510                 goto out;
511         }
512
513         err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
514         if (err) {
515                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
516                 goto out;
517         }
518
519         err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
520         if (err) {
521                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
522                 goto out;
523         }
524 #endif
525
526         err = khugepaged_slab_init();
527         if (err)
528                 goto out;
529
530         err = mm_slots_hash_init();
531         if (err) {
532                 khugepaged_slab_free();
533                 goto out;
534         }
535
536         /*
537          * By default disable transparent hugepages on smaller systems,
538          * where the extra memory used could hurt more than TLB overhead
539          * is likely to save.  The admin can still enable it through /sys.
540          */
541         if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
542                 transparent_hugepage_flags = 0;
543
544         start_khugepaged();
545
546         set_recommended_min_free_kbytes();
547
548 out:
549         return err;
550 }
551 module_init(hugepage_init)
552
553 static int __init setup_transparent_hugepage(char *str)
554 {
555         int ret = 0;
556         if (!str)
557                 goto out;
558         if (!strcmp(str, "always")) {
559                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
560                         &transparent_hugepage_flags);
561                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
562                           &transparent_hugepage_flags);
563                 ret = 1;
564         } else if (!strcmp(str, "madvise")) {
565                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
566                           &transparent_hugepage_flags);
567                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
568                         &transparent_hugepage_flags);
569                 ret = 1;
570         } else if (!strcmp(str, "never")) {
571                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
572                           &transparent_hugepage_flags);
573                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
574                           &transparent_hugepage_flags);
575                 ret = 1;
576         }
577 out:
578         if (!ret)
579                 printk(KERN_WARNING
580                        "transparent_hugepage= cannot parse, ignored\n");
581         return ret;
582 }
583 __setup("transparent_hugepage=", setup_transparent_hugepage);
584
585 static void prepare_pmd_huge_pte(pgtable_t pgtable,
586                                  struct mm_struct *mm)
587 {
588         assert_spin_locked(&mm->page_table_lock);
589
590         /* FIFO */
591         if (!mm->pmd_huge_pte)
592                 INIT_LIST_HEAD(&pgtable->lru);
593         else
594                 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
595         mm->pmd_huge_pte = pgtable;
596 }
597
598 static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
599 {
600         if (likely(vma->vm_flags & VM_WRITE))
601                 pmd = pmd_mkwrite(pmd);
602         return pmd;
603 }
604
605 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
606                                         struct vm_area_struct *vma,
607                                         unsigned long haddr, pmd_t *pmd,
608                                         struct page *page)
609 {
610         int ret = 0;
611         pgtable_t pgtable;
612
613         VM_BUG_ON(!PageCompound(page));
614         pgtable = pte_alloc_one(mm, haddr);
615         if (unlikely(!pgtable)) {
616                 mem_cgroup_uncharge_page(page);
617                 put_page(page);
618                 return VM_FAULT_OOM;
619         }
620
621         clear_huge_page(page, haddr, HPAGE_PMD_NR);
622         __SetPageUptodate(page);
623
624         spin_lock(&mm->page_table_lock);
625         if (unlikely(!pmd_none(*pmd))) {
626                 spin_unlock(&mm->page_table_lock);
627                 mem_cgroup_uncharge_page(page);
628                 put_page(page);
629                 pte_free(mm, pgtable);
630         } else {
631                 pmd_t entry;
632                 entry = mk_pmd(page, vma->vm_page_prot);
633                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
634                 entry = pmd_mkhuge(entry);
635                 /*
636                  * The spinlocking to take the lru_lock inside
637                  * page_add_new_anon_rmap() acts as a full memory
638                  * barrier to be sure clear_huge_page writes become
639                  * visible after the set_pmd_at() write.
640                  */
641                 page_add_new_anon_rmap(page, vma, haddr);
642                 set_pmd_at(mm, haddr, pmd, entry);
643                 prepare_pmd_huge_pte(pgtable, mm);
644                 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
645                 mm->nr_ptes++;
646                 spin_unlock(&mm->page_table_lock);
647         }
648
649         return ret;
650 }
651
652 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
653 {
654         return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
655 }
656
657 static inline struct page *alloc_hugepage_vma(int defrag,
658                                               struct vm_area_struct *vma,
659                                               unsigned long haddr, int nd,
660                                               gfp_t extra_gfp)
661 {
662         return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
663                                HPAGE_PMD_ORDER, vma, haddr, nd);
664 }
665
666 #ifndef CONFIG_NUMA
667 static inline struct page *alloc_hugepage(int defrag)
668 {
669         return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
670                            HPAGE_PMD_ORDER);
671 }
672 #endif
673
674 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
675                                unsigned long address, pmd_t *pmd,
676                                unsigned int flags)
677 {
678         struct page *page;
679         unsigned long haddr = address & HPAGE_PMD_MASK;
680         pte_t *pte;
681
682         if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
683                 if (unlikely(anon_vma_prepare(vma)))
684                         return VM_FAULT_OOM;
685                 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
686                         return VM_FAULT_OOM;
687                 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
688                                           vma, haddr, numa_node_id(), 0);
689                 if (unlikely(!page)) {
690                         count_vm_event(THP_FAULT_FALLBACK);
691                         goto out;
692                 }
693                 count_vm_event(THP_FAULT_ALLOC);
694                 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
695                         put_page(page);
696                         goto out;
697                 }
698
699                 return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
700         }
701 out:
702         /*
703          * Use __pte_alloc instead of pte_alloc_map, because we can't
704          * run pte_offset_map on the pmd, if an huge pmd could
705          * materialize from under us from a different thread.
706          */
707         if (unlikely(__pte_alloc(mm, vma, pmd, address)))
708                 return VM_FAULT_OOM;
709         /* if an huge pmd materialized from under us just retry later */
710         if (unlikely(pmd_trans_huge(*pmd)))
711                 return 0;
712         /*
713          * A regular pmd is established and it can't morph into a huge pmd
714          * from under us anymore at this point because we hold the mmap_sem
715          * read mode and khugepaged takes it in write mode. So now it's
716          * safe to run pte_offset_map().
717          */
718         pte = pte_offset_map(pmd, address);
719         return handle_pte_fault(mm, vma, address, pte, pmd, flags);
720 }
721
722 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
723                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
724                   struct vm_area_struct *vma)
725 {
726         struct page *src_page;
727         pmd_t pmd;
728         pgtable_t pgtable;
729         int ret;
730
731         ret = -ENOMEM;
732         pgtable = pte_alloc_one(dst_mm, addr);
733         if (unlikely(!pgtable))
734                 goto out;
735
736         spin_lock(&dst_mm->page_table_lock);
737         spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
738
739         ret = -EAGAIN;
740         pmd = *src_pmd;
741         if (unlikely(!pmd_trans_huge(pmd))) {
742                 pte_free(dst_mm, pgtable);
743                 goto out_unlock;
744         }
745         if (unlikely(pmd_trans_splitting(pmd))) {
746                 /* split huge page running from under us */
747                 spin_unlock(&src_mm->page_table_lock);
748                 spin_unlock(&dst_mm->page_table_lock);
749                 pte_free(dst_mm, pgtable);
750
751                 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
752                 goto out;
753         }
754         src_page = pmd_page(pmd);
755         VM_BUG_ON(!PageHead(src_page));
756         get_page(src_page);
757         page_dup_rmap(src_page);
758         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
759
760         pmdp_set_wrprotect(src_mm, addr, src_pmd);
761         pmd = pmd_mkold(pmd_wrprotect(pmd));
762         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
763         prepare_pmd_huge_pte(pgtable, dst_mm);
764         dst_mm->nr_ptes++;
765
766         ret = 0;
767 out_unlock:
768         spin_unlock(&src_mm->page_table_lock);
769         spin_unlock(&dst_mm->page_table_lock);
770 out:
771         return ret;
772 }
773
774 /* no "address" argument so destroys page coloring of some arch */
775 pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
776 {
777         pgtable_t pgtable;
778
779         assert_spin_locked(&mm->page_table_lock);
780
781         /* FIFO */
782         pgtable = mm->pmd_huge_pte;
783         if (list_empty(&pgtable->lru))
784                 mm->pmd_huge_pte = NULL;
785         else {
786                 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
787                                               struct page, lru);
788                 list_del(&pgtable->lru);
789         }
790         return pgtable;
791 }
792
793 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
794                                         struct vm_area_struct *vma,
795                                         unsigned long address,
796                                         pmd_t *pmd, pmd_t orig_pmd,
797                                         struct page *page,
798                                         unsigned long haddr)
799 {
800         pgtable_t pgtable;
801         pmd_t _pmd;
802         int ret = 0, i;
803         struct page **pages;
804
805         pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
806                         GFP_KERNEL);
807         if (unlikely(!pages)) {
808                 ret |= VM_FAULT_OOM;
809                 goto out;
810         }
811
812         for (i = 0; i < HPAGE_PMD_NR; i++) {
813                 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
814                                                __GFP_OTHER_NODE,
815                                                vma, address, page_to_nid(page));
816                 if (unlikely(!pages[i] ||
817                              mem_cgroup_newpage_charge(pages[i], mm,
818                                                        GFP_KERNEL))) {
819                         if (pages[i])
820                                 put_page(pages[i]);
821                         mem_cgroup_uncharge_start();
822                         while (--i >= 0) {
823                                 mem_cgroup_uncharge_page(pages[i]);
824                                 put_page(pages[i]);
825                         }
826                         mem_cgroup_uncharge_end();
827                         kfree(pages);
828                         ret |= VM_FAULT_OOM;
829                         goto out;
830                 }
831         }
832
833         for (i = 0; i < HPAGE_PMD_NR; i++) {
834                 copy_user_highpage(pages[i], page + i,
835                                    haddr + PAGE_SIZE * i, vma);
836                 __SetPageUptodate(pages[i]);
837                 cond_resched();
838         }
839
840         spin_lock(&mm->page_table_lock);
841         if (unlikely(!pmd_same(*pmd, orig_pmd)))
842                 goto out_free_pages;
843         VM_BUG_ON(!PageHead(page));
844
845         pmdp_clear_flush_notify(vma, haddr, pmd);
846         /* leave pmd empty until pte is filled */
847
848         pgtable = get_pmd_huge_pte(mm);
849         pmd_populate(mm, &_pmd, pgtable);
850
851         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
852                 pte_t *pte, entry;
853                 entry = mk_pte(pages[i], vma->vm_page_prot);
854                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
855                 page_add_new_anon_rmap(pages[i], vma, haddr);
856                 pte = pte_offset_map(&_pmd, haddr);
857                 VM_BUG_ON(!pte_none(*pte));
858                 set_pte_at(mm, haddr, pte, entry);
859                 pte_unmap(pte);
860         }
861         kfree(pages);
862
863         smp_wmb(); /* make pte visible before pmd */
864         pmd_populate(mm, pmd, pgtable);
865         page_remove_rmap(page);
866         spin_unlock(&mm->page_table_lock);
867
868         ret |= VM_FAULT_WRITE;
869         put_page(page);
870
871 out:
872         return ret;
873
874 out_free_pages:
875         spin_unlock(&mm->page_table_lock);
876         mem_cgroup_uncharge_start();
877         for (i = 0; i < HPAGE_PMD_NR; i++) {
878                 mem_cgroup_uncharge_page(pages[i]);
879                 put_page(pages[i]);
880         }
881         mem_cgroup_uncharge_end();
882         kfree(pages);
883         goto out;
884 }
885
886 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
887                         unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
888 {
889         int ret = 0;
890         struct page *page, *new_page;
891         unsigned long haddr;
892
893         VM_BUG_ON(!vma->anon_vma);
894         spin_lock(&mm->page_table_lock);
895         if (unlikely(!pmd_same(*pmd, orig_pmd)))
896                 goto out_unlock;
897
898         page = pmd_page(orig_pmd);
899         VM_BUG_ON(!PageCompound(page) || !PageHead(page));
900         haddr = address & HPAGE_PMD_MASK;
901         if (page_mapcount(page) == 1) {
902                 pmd_t entry;
903                 entry = pmd_mkyoung(orig_pmd);
904                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
905                 if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
906                         update_mmu_cache(vma, address, entry);
907                 ret |= VM_FAULT_WRITE;
908                 goto out_unlock;
909         }
910         get_page(page);
911         spin_unlock(&mm->page_table_lock);
912
913         if (transparent_hugepage_enabled(vma) &&
914             !transparent_hugepage_debug_cow())
915                 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
916                                               vma, haddr, numa_node_id(), 0);
917         else
918                 new_page = NULL;
919
920         if (unlikely(!new_page)) {
921                 count_vm_event(THP_FAULT_FALLBACK);
922                 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
923                                                    pmd, orig_pmd, page, haddr);
924                 if (ret & VM_FAULT_OOM)
925                         split_huge_page(page);
926                 put_page(page);
927                 goto out;
928         }
929         count_vm_event(THP_FAULT_ALLOC);
930
931         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
932                 put_page(new_page);
933                 split_huge_page(page);
934                 put_page(page);
935                 ret |= VM_FAULT_OOM;
936                 goto out;
937         }
938
939         copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
940         __SetPageUptodate(new_page);
941
942         spin_lock(&mm->page_table_lock);
943         put_page(page);
944         if (unlikely(!pmd_same(*pmd, orig_pmd))) {
945                 mem_cgroup_uncharge_page(new_page);
946                 put_page(new_page);
947         } else {
948                 pmd_t entry;
949                 VM_BUG_ON(!PageHead(page));
950                 entry = mk_pmd(new_page, vma->vm_page_prot);
951                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
952                 entry = pmd_mkhuge(entry);
953                 pmdp_clear_flush_notify(vma, haddr, pmd);
954                 page_add_new_anon_rmap(new_page, vma, haddr);
955                 set_pmd_at(mm, haddr, pmd, entry);
956                 update_mmu_cache(vma, address, entry);
957                 page_remove_rmap(page);
958                 put_page(page);
959                 ret |= VM_FAULT_WRITE;
960         }
961 out_unlock:
962         spin_unlock(&mm->page_table_lock);
963 out:
964         return ret;
965 }
966
967 /*
968  * FOLL_FORCE can write to even unwritable pmd's, but only
969  * after we've gone through a COW cycle and they are dirty.
970  */
971 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
972                                         unsigned int flags)
973 {
974         return pmd_write(pmd) ||
975                 ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
976                  page && PageAnon(page));
977 }
978
979 struct page *follow_trans_huge_pmd(struct mm_struct *mm,
980                                    unsigned long addr,
981                                    pmd_t *pmd,
982                                    unsigned int flags)
983 {
984         struct page *page = NULL;
985
986         assert_spin_locked(&mm->page_table_lock);
987
988         page = pmd_page(*pmd);
989         VM_BUG_ON(!PageHead(page));
990
991         if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, page, flags))
992                 goto out;
993
994         if (flags & FOLL_TOUCH) {
995                 pmd_t _pmd;
996                 /*
997                  * We should set the dirty bit only for FOLL_WRITE but
998                  * for now the dirty bit in the pmd is meaningless.
999                  * And if the dirty bit will become meaningful and
1000                  * we'll only set it with FOLL_WRITE, an atomic
1001                  * set_bit will be required on the pmd to set the
1002                  * young bit, instead of the current set_pmd_at.
1003                  */
1004                 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1005                 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
1006         }
1007         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1008         VM_BUG_ON(!PageCompound(page));
1009         if (flags & FOLL_GET)
1010                 get_page_foll(page);
1011
1012 out:
1013         return page;
1014 }
1015
1016 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1017                  pmd_t *pmd)
1018 {
1019         int ret = 0;
1020
1021         spin_lock(&tlb->mm->page_table_lock);
1022         if (likely(pmd_trans_huge(*pmd))) {
1023                 if (unlikely(pmd_trans_splitting(*pmd))) {
1024                         spin_unlock(&tlb->mm->page_table_lock);
1025                         wait_split_huge_page(vma->anon_vma,
1026                                              pmd);
1027                 } else {
1028                         struct page *page;
1029                         pgtable_t pgtable;
1030                         pgtable = get_pmd_huge_pte(tlb->mm);
1031                         page = pmd_page(*pmd);
1032                         pmd_clear(pmd);
1033                         page_remove_rmap(page);
1034                         VM_BUG_ON(page_mapcount(page) < 0);
1035                         add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1036                         VM_BUG_ON(!PageHead(page));
1037                         tlb->mm->nr_ptes--;
1038                         spin_unlock(&tlb->mm->page_table_lock);
1039                         tlb_remove_page(tlb, page);
1040                         pte_free(tlb->mm, pgtable);
1041                         ret = 1;
1042                 }
1043         } else
1044                 spin_unlock(&tlb->mm->page_table_lock);
1045
1046         return ret;
1047 }
1048
1049 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1050                 unsigned long addr, unsigned long end,
1051                 unsigned char *vec)
1052 {
1053         int ret = 0;
1054
1055         spin_lock(&vma->vm_mm->page_table_lock);
1056         if (likely(pmd_trans_huge(*pmd))) {
1057                 ret = !pmd_trans_splitting(*pmd);
1058                 spin_unlock(&vma->vm_mm->page_table_lock);
1059                 if (unlikely(!ret))
1060                         wait_split_huge_page(vma->anon_vma, pmd);
1061                 else {
1062                         /*
1063                          * All logical pages in the range are present
1064                          * if backed by a huge page.
1065                          */
1066                         memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1067                 }
1068         } else
1069                 spin_unlock(&vma->vm_mm->page_table_lock);
1070
1071         return ret;
1072 }
1073
1074 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1075                   unsigned long old_addr,
1076                   unsigned long new_addr, unsigned long old_end,
1077                   pmd_t *old_pmd, pmd_t *new_pmd)
1078 {
1079         int ret = 0;
1080         pmd_t pmd;
1081
1082         struct mm_struct *mm = vma->vm_mm;
1083
1084         if ((old_addr & ~HPAGE_PMD_MASK) ||
1085             (new_addr & ~HPAGE_PMD_MASK) ||
1086             old_end - old_addr < HPAGE_PMD_SIZE ||
1087             (new_vma->vm_flags & VM_NOHUGEPAGE))
1088                 goto out;
1089
1090         /*
1091          * The destination pmd shouldn't be established, free_pgtables()
1092          * should have release it.
1093          */
1094         if (WARN_ON(!pmd_none(*new_pmd))) {
1095                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1096                 goto out;
1097         }
1098
1099         spin_lock(&mm->page_table_lock);
1100         if (likely(pmd_trans_huge(*old_pmd))) {
1101                 if (pmd_trans_splitting(*old_pmd)) {
1102                         spin_unlock(&mm->page_table_lock);
1103                         wait_split_huge_page(vma->anon_vma, old_pmd);
1104                         ret = -1;
1105                 } else {
1106                         pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1107                         VM_BUG_ON(!pmd_none(*new_pmd));
1108                         set_pmd_at(mm, new_addr, new_pmd, pmd);
1109                         spin_unlock(&mm->page_table_lock);
1110                         ret = 1;
1111                 }
1112         } else {
1113                 spin_unlock(&mm->page_table_lock);
1114         }
1115 out:
1116         return ret;
1117 }
1118
1119 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1120                 unsigned long addr, pgprot_t newprot)
1121 {
1122         struct mm_struct *mm = vma->vm_mm;
1123         int ret = 0;
1124
1125         spin_lock(&mm->page_table_lock);
1126         if (likely(pmd_trans_huge(*pmd))) {
1127                 if (unlikely(pmd_trans_splitting(*pmd))) {
1128                         spin_unlock(&mm->page_table_lock);
1129                         wait_split_huge_page(vma->anon_vma, pmd);
1130                 } else {
1131                         pmd_t entry;
1132
1133                         entry = pmdp_get_and_clear(mm, addr, pmd);
1134                         entry = pmd_modify(entry, newprot);
1135                         set_pmd_at(mm, addr, pmd, entry);
1136                         spin_unlock(&vma->vm_mm->page_table_lock);
1137                         flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
1138                         ret = 1;
1139                 }
1140         } else
1141                 spin_unlock(&vma->vm_mm->page_table_lock);
1142
1143         return ret;
1144 }
1145
1146 pmd_t *page_check_address_pmd(struct page *page,
1147                               struct mm_struct *mm,
1148                               unsigned long address,
1149                               enum page_check_address_pmd_flag flag)
1150 {
1151         pgd_t *pgd;
1152         pud_t *pud;
1153         pmd_t *pmd, *ret = NULL;
1154
1155         if (address & ~HPAGE_PMD_MASK)
1156                 goto out;
1157
1158         pgd = pgd_offset(mm, address);
1159         if (!pgd_present(*pgd))
1160                 goto out;
1161
1162         pud = pud_offset(pgd, address);
1163         if (!pud_present(*pud))
1164                 goto out;
1165
1166         pmd = pmd_offset(pud, address);
1167         if (pmd_none(*pmd))
1168                 goto out;
1169         if (pmd_page(*pmd) != page)
1170                 goto out;
1171         /*
1172          * split_vma() may create temporary aliased mappings. There is
1173          * no risk as long as all huge pmd are found and have their
1174          * splitting bit set before __split_huge_page_refcount
1175          * runs. Finding the same huge pmd more than once during the
1176          * same rmap walk is not a problem.
1177          */
1178         if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1179             pmd_trans_splitting(*pmd))
1180                 goto out;
1181         if (pmd_trans_huge(*pmd)) {
1182                 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1183                           !pmd_trans_splitting(*pmd));
1184                 ret = pmd;
1185         }
1186 out:
1187         return ret;
1188 }
1189
1190 static int __split_huge_page_splitting(struct page *page,
1191                                        struct vm_area_struct *vma,
1192                                        unsigned long address)
1193 {
1194         struct mm_struct *mm = vma->vm_mm;
1195         pmd_t *pmd;
1196         int ret = 0;
1197
1198         spin_lock(&mm->page_table_lock);
1199         pmd = page_check_address_pmd(page, mm, address,
1200                                      PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1201         if (pmd) {
1202                 /*
1203                  * We can't temporarily set the pmd to null in order
1204                  * to split it, the pmd must remain marked huge at all
1205                  * times or the VM won't take the pmd_trans_huge paths
1206                  * and it won't wait on the anon_vma->root->mutex to
1207                  * serialize against split_huge_page*.
1208                  */
1209                 pmdp_splitting_flush_notify(vma, address, pmd);
1210                 ret = 1;
1211         }
1212         spin_unlock(&mm->page_table_lock);
1213
1214         return ret;
1215 }
1216
1217 static void __split_huge_page_refcount(struct page *page)
1218 {
1219         int i;
1220         unsigned long head_index = page->index;
1221         struct zone *zone = page_zone(page);
1222         int zonestat;
1223         int tail_count = 0;
1224
1225         /* prevent PageLRU to go away from under us, and freeze lru stats */
1226         spin_lock_irq(&zone->lru_lock);
1227         compound_lock(page);
1228
1229         for (i = 1; i < HPAGE_PMD_NR; i++) {
1230                 struct page *page_tail = page + i;
1231
1232                 /* tail_page->_mapcount cannot change */
1233                 BUG_ON(page_mapcount(page_tail) < 0);
1234                 tail_count += page_mapcount(page_tail);
1235                 /* check for overflow */
1236                 BUG_ON(tail_count < 0);
1237                 BUG_ON(atomic_read(&page_tail->_count) != 0);
1238                 /*
1239                  * tail_page->_count is zero and not changing from
1240                  * under us. But get_page_unless_zero() may be running
1241                  * from under us on the tail_page. If we used
1242                  * atomic_set() below instead of atomic_add(), we
1243                  * would then run atomic_set() concurrently with
1244                  * get_page_unless_zero(), and atomic_set() is
1245                  * implemented in C not using locked ops. spin_unlock
1246                  * on x86 sometime uses locked ops because of PPro
1247                  * errata 66, 92, so unless somebody can guarantee
1248                  * atomic_set() here would be safe on all archs (and
1249                  * not only on x86), it's safer to use atomic_add().
1250                  */
1251                 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1252                            &page_tail->_count);
1253
1254                 /* after clearing PageTail the gup refcount can be released */
1255                 smp_mb();
1256
1257                 /*
1258                  * retain hwpoison flag of the poisoned tail page:
1259                  *   fix for the unsuitable process killed on Guest Machine(KVM)
1260                  *   by the memory-failure.
1261                  */
1262                 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1263                 page_tail->flags |= (page->flags &
1264                                      ((1L << PG_referenced) |
1265                                       (1L << PG_swapbacked) |
1266                                       (1L << PG_mlocked) |
1267                                       (1L << PG_uptodate)));
1268                 page_tail->flags |= (1L << PG_dirty);
1269
1270                 /* clear PageTail before overwriting first_page */
1271                 smp_wmb();
1272
1273                 /*
1274                  * __split_huge_page_splitting() already set the
1275                  * splitting bit in all pmd that could map this
1276                  * hugepage, that will ensure no CPU can alter the
1277                  * mapcount on the head page. The mapcount is only
1278                  * accounted in the head page and it has to be
1279                  * transferred to all tail pages in the below code. So
1280                  * for this code to be safe, the split the mapcount
1281                  * can't change. But that doesn't mean userland can't
1282                  * keep changing and reading the page contents while
1283                  * we transfer the mapcount, so the pmd splitting
1284                  * status is achieved setting a reserved bit in the
1285                  * pmd, not by clearing the present bit.
1286                 */
1287                 page_tail->_mapcount = page->_mapcount;
1288
1289                 BUG_ON(page_tail->mapping);
1290                 page_tail->mapping = page->mapping;
1291
1292                 page_tail->index = ++head_index;
1293
1294                 BUG_ON(!PageAnon(page_tail));
1295                 BUG_ON(!PageUptodate(page_tail));
1296                 BUG_ON(!PageDirty(page_tail));
1297                 BUG_ON(!PageSwapBacked(page_tail));
1298
1299                 mem_cgroup_split_huge_fixup(page, page_tail);
1300
1301                 lru_add_page_tail(zone, page, page_tail);
1302         }
1303         atomic_sub(tail_count, &page->_count);
1304         BUG_ON(atomic_read(&page->_count) <= 0);
1305
1306         __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1307         __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1308
1309         /*
1310          * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
1311          * so adjust those appropriately if this page is on the LRU.
1312          */
1313         if (PageLRU(page)) {
1314                 zonestat = NR_LRU_BASE + page_lru(page);
1315                 __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
1316         }
1317
1318         ClearPageCompound(page);
1319         compound_unlock(page);
1320         spin_unlock_irq(&zone->lru_lock);
1321
1322         for (i = 1; i < HPAGE_PMD_NR; i++) {
1323                 struct page *page_tail = page + i;
1324                 BUG_ON(page_count(page_tail) <= 0);
1325                 /*
1326                  * Tail pages may be freed if there wasn't any mapping
1327                  * like if add_to_swap() is running on a lru page that
1328                  * had its mapping zapped. And freeing these pages
1329                  * requires taking the lru_lock so we do the put_page
1330                  * of the tail pages after the split is complete.
1331                  */
1332                 put_page(page_tail);
1333         }
1334
1335         /*
1336          * Only the head page (now become a regular page) is required
1337          * to be pinned by the caller.
1338          */
1339         BUG_ON(page_count(page) <= 0);
1340 }
1341
1342 static int __split_huge_page_map(struct page *page,
1343                                  struct vm_area_struct *vma,
1344                                  unsigned long address)
1345 {
1346         struct mm_struct *mm = vma->vm_mm;
1347         pmd_t *pmd, _pmd;
1348         int ret = 0, i;
1349         pgtable_t pgtable;
1350         unsigned long haddr;
1351
1352         spin_lock(&mm->page_table_lock);
1353         pmd = page_check_address_pmd(page, mm, address,
1354                                      PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1355         if (pmd) {
1356                 pgtable = get_pmd_huge_pte(mm);
1357                 pmd_populate(mm, &_pmd, pgtable);
1358
1359                 for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1360                      i++, haddr += PAGE_SIZE) {
1361                         pte_t *pte, entry;
1362                         BUG_ON(PageCompound(page+i));
1363                         entry = mk_pte(page + i, vma->vm_page_prot);
1364                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1365                         if (!pmd_write(*pmd))
1366                                 entry = pte_wrprotect(entry);
1367                         else
1368                                 BUG_ON(page_mapcount(page) != 1);
1369                         if (!pmd_young(*pmd))
1370                                 entry = pte_mkold(entry);
1371                         pte = pte_offset_map(&_pmd, haddr);
1372                         BUG_ON(!pte_none(*pte));
1373                         set_pte_at(mm, haddr, pte, entry);
1374                         pte_unmap(pte);
1375                 }
1376
1377                 smp_wmb(); /* make pte visible before pmd */
1378                 /*
1379                  * Up to this point the pmd is present and huge and
1380                  * userland has the whole access to the hugepage
1381                  * during the split (which happens in place). If we
1382                  * overwrite the pmd with the not-huge version
1383                  * pointing to the pte here (which of course we could
1384                  * if all CPUs were bug free), userland could trigger
1385                  * a small page size TLB miss on the small sized TLB
1386                  * while the hugepage TLB entry is still established
1387                  * in the huge TLB. Some CPU doesn't like that. See
1388                  * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1389                  * Erratum 383 on page 93. Intel should be safe but is
1390                  * also warns that it's only safe if the permission
1391                  * and cache attributes of the two entries loaded in
1392                  * the two TLB is identical (which should be the case
1393                  * here). But it is generally safer to never allow
1394                  * small and huge TLB entries for the same virtual
1395                  * address to be loaded simultaneously. So instead of
1396                  * doing "pmd_populate(); flush_tlb_range();" we first
1397                  * mark the current pmd notpresent (atomically because
1398                  * here the pmd_trans_huge and pmd_trans_splitting
1399                  * must remain set at all times on the pmd until the
1400                  * split is complete for this pmd), then we flush the
1401                  * SMP TLB and finally we write the non-huge version
1402                  * of the pmd entry with pmd_populate.
1403                  */
1404                 set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1405                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1406                 pmd_populate(mm, pmd, pgtable);
1407                 ret = 1;
1408         }
1409         spin_unlock(&mm->page_table_lock);
1410
1411         return ret;
1412 }
1413
1414 /* must be called with anon_vma->root->mutex hold */
1415 static void __split_huge_page(struct page *page,
1416                               struct anon_vma *anon_vma)
1417 {
1418         int mapcount, mapcount2;
1419         struct anon_vma_chain *avc;
1420
1421         BUG_ON(!PageHead(page));
1422         BUG_ON(PageTail(page));
1423
1424         mapcount = 0;
1425         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1426                 struct vm_area_struct *vma = avc->vma;
1427                 unsigned long addr = vma_address(page, vma);
1428                 BUG_ON(is_vma_temporary_stack(vma));
1429                 if (addr == -EFAULT)
1430                         continue;
1431                 mapcount += __split_huge_page_splitting(page, vma, addr);
1432         }
1433         /*
1434          * It is critical that new vmas are added to the tail of the
1435          * anon_vma list. This guarantes that if copy_huge_pmd() runs
1436          * and establishes a child pmd before
1437          * __split_huge_page_splitting() freezes the parent pmd (so if
1438          * we fail to prevent copy_huge_pmd() from running until the
1439          * whole __split_huge_page() is complete), we will still see
1440          * the newly established pmd of the child later during the
1441          * walk, to be able to set it as pmd_trans_splitting too.
1442          */
1443         if (mapcount != page_mapcount(page))
1444                 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1445                        mapcount, page_mapcount(page));
1446         BUG_ON(mapcount != page_mapcount(page));
1447
1448         __split_huge_page_refcount(page);
1449
1450         mapcount2 = 0;
1451         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1452                 struct vm_area_struct *vma = avc->vma;
1453                 unsigned long addr = vma_address(page, vma);
1454                 BUG_ON(is_vma_temporary_stack(vma));
1455                 if (addr == -EFAULT)
1456                         continue;
1457                 mapcount2 += __split_huge_page_map(page, vma, addr);
1458         }
1459         if (mapcount != mapcount2)
1460                 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1461                        mapcount, mapcount2, page_mapcount(page));
1462         BUG_ON(mapcount != mapcount2);
1463 }
1464
1465 int split_huge_page(struct page *page)
1466 {
1467         struct anon_vma *anon_vma;
1468         int ret = 1;
1469
1470         BUG_ON(!PageAnon(page));
1471         anon_vma = page_lock_anon_vma(page);
1472         if (!anon_vma)
1473                 goto out;
1474         ret = 0;
1475         if (!PageCompound(page))
1476                 goto out_unlock;
1477
1478         BUG_ON(!PageSwapBacked(page));
1479         __split_huge_page(page, anon_vma);
1480         count_vm_event(THP_SPLIT);
1481
1482         BUG_ON(PageCompound(page));
1483 out_unlock:
1484         page_unlock_anon_vma(anon_vma);
1485 out:
1486         return ret;
1487 }
1488
1489 #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
1490                    VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
1491
1492 int hugepage_madvise(struct vm_area_struct *vma,
1493                      unsigned long *vm_flags, int advice)
1494 {
1495         switch (advice) {
1496         case MADV_HUGEPAGE:
1497                 /*
1498                  * Be somewhat over-protective like KSM for now!
1499                  */
1500                 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1501                         return -EINVAL;
1502                 *vm_flags &= ~VM_NOHUGEPAGE;
1503                 *vm_flags |= VM_HUGEPAGE;
1504                 /*
1505                  * If the vma become good for khugepaged to scan,
1506                  * register it here without waiting a page fault that
1507                  * may not happen any time soon.
1508                  */
1509                 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
1510                         return -ENOMEM;
1511                 break;
1512         case MADV_NOHUGEPAGE:
1513                 /*
1514                  * Be somewhat over-protective like KSM for now!
1515                  */
1516                 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1517                         return -EINVAL;
1518                 *vm_flags &= ~VM_HUGEPAGE;
1519                 *vm_flags |= VM_NOHUGEPAGE;
1520                 /*
1521                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1522                  * this vma even if we leave the mm registered in khugepaged if
1523                  * it got registered before VM_NOHUGEPAGE was set.
1524                  */
1525                 break;
1526         }
1527
1528         return 0;
1529 }
1530
1531 static int __init khugepaged_slab_init(void)
1532 {
1533         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1534                                           sizeof(struct mm_slot),
1535                                           __alignof__(struct mm_slot), 0, NULL);
1536         if (!mm_slot_cache)
1537                 return -ENOMEM;
1538
1539         return 0;
1540 }
1541
1542 static void __init khugepaged_slab_free(void)
1543 {
1544         kmem_cache_destroy(mm_slot_cache);
1545         mm_slot_cache = NULL;
1546 }
1547
1548 static inline struct mm_slot *alloc_mm_slot(void)
1549 {
1550         if (!mm_slot_cache)     /* initialization failed */
1551                 return NULL;
1552         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1553 }
1554
1555 static inline void free_mm_slot(struct mm_slot *mm_slot)
1556 {
1557         kmem_cache_free(mm_slot_cache, mm_slot);
1558 }
1559
1560 static int __init mm_slots_hash_init(void)
1561 {
1562         mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1563                                 GFP_KERNEL);
1564         if (!mm_slots_hash)
1565                 return -ENOMEM;
1566         return 0;
1567 }
1568
1569 #if 0
1570 static void __init mm_slots_hash_free(void)
1571 {
1572         kfree(mm_slots_hash);
1573         mm_slots_hash = NULL;
1574 }
1575 #endif
1576
1577 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1578 {
1579         struct mm_slot *mm_slot;
1580         struct hlist_head *bucket;
1581         struct hlist_node *node;
1582
1583         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1584                                 % MM_SLOTS_HASH_HEADS];
1585         hlist_for_each_entry(mm_slot, node, bucket, hash) {
1586                 if (mm == mm_slot->mm)
1587                         return mm_slot;
1588         }
1589         return NULL;
1590 }
1591
1592 static void insert_to_mm_slots_hash(struct mm_struct *mm,
1593                                     struct mm_slot *mm_slot)
1594 {
1595         struct hlist_head *bucket;
1596
1597         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1598                                 % MM_SLOTS_HASH_HEADS];
1599         mm_slot->mm = mm;
1600         hlist_add_head(&mm_slot->hash, bucket);
1601 }
1602
1603 static inline int khugepaged_test_exit(struct mm_struct *mm)
1604 {
1605         return atomic_read(&mm->mm_users) == 0;
1606 }
1607
1608 int __khugepaged_enter(struct mm_struct *mm)
1609 {
1610         struct mm_slot *mm_slot;
1611         int wakeup;
1612
1613         mm_slot = alloc_mm_slot();
1614         if (!mm_slot)
1615                 return -ENOMEM;
1616
1617         /* __khugepaged_exit() must not run from under us */
1618         VM_BUG_ON(khugepaged_test_exit(mm));
1619         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1620                 free_mm_slot(mm_slot);
1621                 return 0;
1622         }
1623
1624         spin_lock(&khugepaged_mm_lock);
1625         insert_to_mm_slots_hash(mm, mm_slot);
1626         /*
1627          * Insert just behind the scanning cursor, to let the area settle
1628          * down a little.
1629          */
1630         wakeup = list_empty(&khugepaged_scan.mm_head);
1631         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1632         spin_unlock(&khugepaged_mm_lock);
1633
1634         atomic_inc(&mm->mm_count);
1635         if (wakeup)
1636                 wake_up_interruptible(&khugepaged_wait);
1637
1638         return 0;
1639 }
1640
1641 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
1642                                unsigned long vm_flags)
1643 {
1644         unsigned long hstart, hend;
1645         if (!vma->anon_vma)
1646                 /*
1647                  * Not yet faulted in so we will register later in the
1648                  * page fault if needed.
1649                  */
1650                 return 0;
1651         if (vma->vm_ops || (vm_flags & VM_NO_THP))
1652                 /* khugepaged not yet working on file or special mappings */
1653                 return 0;
1654         /*
1655          * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1656          * true too, verify it here.
1657          */
1658         VM_BUG_ON(is_linear_pfn_mapping(vma));
1659         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1660         hend = vma->vm_end & HPAGE_PMD_MASK;
1661         if (hstart < hend)
1662                 return khugepaged_enter(vma, vm_flags);
1663         return 0;
1664 }
1665
1666 void __khugepaged_exit(struct mm_struct *mm)
1667 {
1668         struct mm_slot *mm_slot;
1669         int free = 0;
1670
1671         spin_lock(&khugepaged_mm_lock);
1672         mm_slot = get_mm_slot(mm);
1673         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1674                 hlist_del(&mm_slot->hash);
1675                 list_del(&mm_slot->mm_node);
1676                 free = 1;
1677         }
1678         spin_unlock(&khugepaged_mm_lock);
1679
1680         if (free) {
1681                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1682                 free_mm_slot(mm_slot);
1683                 mmdrop(mm);
1684         } else if (mm_slot) {
1685                 /*
1686                  * This is required to serialize against
1687                  * khugepaged_test_exit() (which is guaranteed to run
1688                  * under mmap sem read mode). Stop here (after we
1689                  * return all pagetables will be destroyed) until
1690                  * khugepaged has finished working on the pagetables
1691                  * under the mmap_sem.
1692                  */
1693                 down_write(&mm->mmap_sem);
1694                 up_write(&mm->mmap_sem);
1695         }
1696 }
1697
1698 static void release_pte_page(struct page *page)
1699 {
1700         /* 0 stands for page_is_file_cache(page) == false */
1701         dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1702         unlock_page(page);
1703         putback_lru_page(page);
1704 }
1705
1706 static void release_pte_pages(pte_t *pte, pte_t *_pte)
1707 {
1708         while (--_pte >= pte) {
1709                 pte_t pteval = *_pte;
1710                 if (!pte_none(pteval))
1711                         release_pte_page(pte_page(pteval));
1712         }
1713 }
1714
1715 static void release_all_pte_pages(pte_t *pte)
1716 {
1717         release_pte_pages(pte, pte + HPAGE_PMD_NR);
1718 }
1719
1720 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1721                                         unsigned long address,
1722                                         pte_t *pte)
1723 {
1724         struct page *page;
1725         pte_t *_pte;
1726         int referenced = 0, isolated = 0, none = 0;
1727         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1728              _pte++, address += PAGE_SIZE) {
1729                 pte_t pteval = *_pte;
1730                 if (pte_none(pteval)) {
1731                         if (++none <= khugepaged_max_ptes_none)
1732                                 continue;
1733                         else {
1734                                 release_pte_pages(pte, _pte);
1735                                 goto out;
1736                         }
1737                 }
1738                 if (!pte_present(pteval) || !pte_write(pteval)) {
1739                         release_pte_pages(pte, _pte);
1740                         goto out;
1741                 }
1742                 page = vm_normal_page(vma, address, pteval);
1743                 if (unlikely(!page)) {
1744                         release_pte_pages(pte, _pte);
1745                         goto out;
1746                 }
1747                 VM_BUG_ON(PageCompound(page));
1748                 BUG_ON(!PageAnon(page));
1749                 VM_BUG_ON(!PageSwapBacked(page));
1750
1751                 /* cannot use mapcount: can't collapse if there's a gup pin */
1752                 if (page_count(page) != 1) {
1753                         release_pte_pages(pte, _pte);
1754                         goto out;
1755                 }
1756                 /*
1757                  * We can do it before isolate_lru_page because the
1758                  * page can't be freed from under us. NOTE: PG_lock
1759                  * is needed to serialize against split_huge_page
1760                  * when invoked from the VM.
1761                  */
1762                 if (!trylock_page(page)) {
1763                         release_pte_pages(pte, _pte);
1764                         goto out;
1765                 }
1766                 /*
1767                  * Isolate the page to avoid collapsing an hugepage
1768                  * currently in use by the VM.
1769                  */
1770                 if (isolate_lru_page(page)) {
1771                         unlock_page(page);
1772                         release_pte_pages(pte, _pte);
1773                         goto out;
1774                 }
1775                 /* 0 stands for page_is_file_cache(page) == false */
1776                 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1777                 VM_BUG_ON(!PageLocked(page));
1778                 VM_BUG_ON(PageLRU(page));
1779
1780                 /* If there is no mapped pte young don't collapse the page */
1781                 if (pte_young(pteval) || PageReferenced(page) ||
1782                     mmu_notifier_test_young(vma->vm_mm, address))
1783                         referenced = 1;
1784         }
1785         if (unlikely(!referenced))
1786                 release_all_pte_pages(pte);
1787         else
1788                 isolated = 1;
1789 out:
1790         return isolated;
1791 }
1792
1793 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1794                                       struct vm_area_struct *vma,
1795                                       unsigned long address,
1796                                       spinlock_t *ptl)
1797 {
1798         pte_t *_pte;
1799         for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1800                 pte_t pteval = *_pte;
1801                 struct page *src_page;
1802
1803                 if (pte_none(pteval)) {
1804                         clear_user_highpage(page, address);
1805                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1806                 } else {
1807                         src_page = pte_page(pteval);
1808                         copy_user_highpage(page, src_page, address, vma);
1809                         VM_BUG_ON(page_mapcount(src_page) != 1);
1810                         VM_BUG_ON(page_count(src_page) != 2);
1811                         release_pte_page(src_page);
1812                         /*
1813                          * ptl mostly unnecessary, but preempt has to
1814                          * be disabled to update the per-cpu stats
1815                          * inside page_remove_rmap().
1816                          */
1817                         spin_lock(ptl);
1818                         /*
1819                          * paravirt calls inside pte_clear here are
1820                          * superfluous.
1821                          */
1822                         pte_clear(vma->vm_mm, address, _pte);
1823                         page_remove_rmap(src_page);
1824                         spin_unlock(ptl);
1825                         free_page_and_swap_cache(src_page);
1826                 }
1827
1828                 address += PAGE_SIZE;
1829                 page++;
1830         }
1831 }
1832
1833 static bool hugepage_vma_check(struct vm_area_struct *vma)
1834 {
1835         if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
1836             (vma->vm_flags & VM_NOHUGEPAGE))
1837                 return false;
1838
1839         if (!vma->anon_vma || vma->vm_ops)
1840                 return false;
1841         if (is_vma_temporary_stack(vma))
1842                 return false;
1843         /*
1844          * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1845          * true too, verify it here.
1846          */
1847         VM_BUG_ON(is_linear_pfn_mapping(vma));
1848         return !(vma->vm_flags & VM_NO_THP);
1849 }
1850
1851 static void collapse_huge_page(struct mm_struct *mm,
1852                                unsigned long address,
1853                                struct page **hpage,
1854                                struct vm_area_struct *vma,
1855                                int node)
1856 {
1857         pgd_t *pgd;
1858         pud_t *pud;
1859         pmd_t *pmd, _pmd;
1860         pte_t *pte;
1861         pgtable_t pgtable;
1862         struct page *new_page;
1863         spinlock_t *ptl;
1864         int isolated;
1865         unsigned long hstart, hend;
1866
1867         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1868 #ifndef CONFIG_NUMA
1869         up_read(&mm->mmap_sem);
1870         VM_BUG_ON(!*hpage);
1871         new_page = *hpage;
1872 #else
1873         VM_BUG_ON(*hpage);
1874         /*
1875          * Allocate the page while the vma is still valid and under
1876          * the mmap_sem read mode so there is no memory allocation
1877          * later when we take the mmap_sem in write mode. This is more
1878          * friendly behavior (OTOH it may actually hide bugs) to
1879          * filesystems in userland with daemons allocating memory in
1880          * the userland I/O paths.  Allocating memory with the
1881          * mmap_sem in read mode is good idea also to allow greater
1882          * scalability.
1883          */
1884         new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1885                                       node, __GFP_OTHER_NODE);
1886
1887         /*
1888          * After allocating the hugepage, release the mmap_sem read lock in
1889          * preparation for taking it in write mode.
1890          */
1891         up_read(&mm->mmap_sem);
1892         if (unlikely(!new_page)) {
1893                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1894                 *hpage = ERR_PTR(-ENOMEM);
1895                 return;
1896         }
1897 #endif
1898
1899         count_vm_event(THP_COLLAPSE_ALLOC);
1900         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1901 #ifdef CONFIG_NUMA
1902                 put_page(new_page);
1903 #endif
1904                 return;
1905         }
1906
1907         /*
1908          * Prevent all access to pagetables with the exception of
1909          * gup_fast later hanlded by the ptep_clear_flush and the VM
1910          * handled by the anon_vma lock + PG_lock.
1911          */
1912         down_write(&mm->mmap_sem);
1913         if (unlikely(khugepaged_test_exit(mm)))
1914                 goto out;
1915
1916         vma = find_vma(mm, address);
1917         if (!vma)
1918                 goto out;
1919         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1920         hend = vma->vm_end & HPAGE_PMD_MASK;
1921         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1922                 goto out;
1923         if (!hugepage_vma_check(vma))
1924                 goto out;
1925         pgd = pgd_offset(mm, address);
1926         if (!pgd_present(*pgd))
1927                 goto out;
1928
1929         pud = pud_offset(pgd, address);
1930         if (!pud_present(*pud))
1931                 goto out;
1932
1933         pmd = pmd_offset(pud, address);
1934         /* pmd can't go away or become huge under us */
1935         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1936                 goto out;
1937
1938         anon_vma_lock(vma->anon_vma);
1939
1940         pte = pte_offset_map(pmd, address);
1941         ptl = pte_lockptr(mm, pmd);
1942
1943         spin_lock(&mm->page_table_lock); /* probably unnecessary */
1944         /*
1945          * After this gup_fast can't run anymore. This also removes
1946          * any huge TLB entry from the CPU so we won't allow
1947          * huge and small TLB entries for the same virtual address
1948          * to avoid the risk of CPU bugs in that area.
1949          */
1950         _pmd = pmdp_clear_flush_notify(vma, address, pmd);
1951         spin_unlock(&mm->page_table_lock);
1952
1953         spin_lock(ptl);
1954         isolated = __collapse_huge_page_isolate(vma, address, pte);
1955         spin_unlock(ptl);
1956
1957         if (unlikely(!isolated)) {
1958                 pte_unmap(pte);
1959                 spin_lock(&mm->page_table_lock);
1960                 BUG_ON(!pmd_none(*pmd));
1961                 /*
1962                  * We can only use set_pmd_at when establishing
1963                  * hugepmds and never for establishing regular pmds that
1964                  * points to regular pagetables. Use pmd_populate for that
1965                  */
1966                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1967                 spin_unlock(&mm->page_table_lock);
1968                 anon_vma_unlock(vma->anon_vma);
1969                 goto out;
1970         }
1971
1972         /*
1973          * All pages are isolated and locked so anon_vma rmap
1974          * can't run anymore.
1975          */
1976         anon_vma_unlock(vma->anon_vma);
1977
1978         __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
1979         pte_unmap(pte);
1980         __SetPageUptodate(new_page);
1981         pgtable = pmd_pgtable(_pmd);
1982         VM_BUG_ON(page_count(pgtable) != 1);
1983         VM_BUG_ON(page_mapcount(pgtable) != 0);
1984
1985         _pmd = mk_pmd(new_page, vma->vm_page_prot);
1986         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1987         _pmd = pmd_mkhuge(_pmd);
1988
1989         /*
1990          * spin_lock() below is not the equivalent of smp_wmb(), so
1991          * this is needed to avoid the copy_huge_page writes to become
1992          * visible after the set_pmd_at() write.
1993          */
1994         smp_wmb();
1995
1996         spin_lock(&mm->page_table_lock);
1997         BUG_ON(!pmd_none(*pmd));
1998         page_add_new_anon_rmap(new_page, vma, address);
1999         set_pmd_at(mm, address, pmd, _pmd);
2000         update_mmu_cache(vma, address, _pmd);
2001         prepare_pmd_huge_pte(pgtable, mm);
2002         spin_unlock(&mm->page_table_lock);
2003
2004 #ifndef CONFIG_NUMA
2005         *hpage = NULL;
2006 #endif
2007         khugepaged_pages_collapsed++;
2008 out_up_write:
2009         up_write(&mm->mmap_sem);
2010         return;
2011
2012 out:
2013         mem_cgroup_uncharge_page(new_page);
2014 #ifdef CONFIG_NUMA
2015         put_page(new_page);
2016 #endif
2017         goto out_up_write;
2018 }
2019
2020 static int khugepaged_scan_pmd(struct mm_struct *mm,
2021                                struct vm_area_struct *vma,
2022                                unsigned long address,
2023                                struct page **hpage)
2024 {
2025         pgd_t *pgd;
2026         pud_t *pud;
2027         pmd_t *pmd;
2028         pte_t *pte, *_pte;
2029         int ret = 0, referenced = 0, none = 0;
2030         struct page *page;
2031         unsigned long _address;
2032         spinlock_t *ptl;
2033         int node = -1;
2034
2035         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2036
2037         pgd = pgd_offset(mm, address);
2038         if (!pgd_present(*pgd))
2039                 goto out;
2040
2041         pud = pud_offset(pgd, address);
2042         if (!pud_present(*pud))
2043                 goto out;
2044
2045         pmd = pmd_offset(pud, address);
2046         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
2047                 goto out;
2048
2049         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2050         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2051              _pte++, _address += PAGE_SIZE) {
2052                 pte_t pteval = *_pte;
2053                 if (pte_none(pteval)) {
2054                         if (++none <= khugepaged_max_ptes_none)
2055                                 continue;
2056                         else
2057                                 goto out_unmap;
2058                 }
2059                 if (!pte_present(pteval) || !pte_write(pteval))
2060                         goto out_unmap;
2061                 page = vm_normal_page(vma, _address, pteval);
2062                 if (unlikely(!page))
2063                         goto out_unmap;
2064                 /*
2065                  * Chose the node of the first page. This could
2066                  * be more sophisticated and look at more pages,
2067                  * but isn't for now.
2068                  */
2069                 if (node == -1)
2070                         node = page_to_nid(page);
2071                 VM_BUG_ON(PageCompound(page));
2072                 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2073                         goto out_unmap;
2074                 /* cannot use mapcount: can't collapse if there's a gup pin */
2075                 if (page_count(page) != 1)
2076                         goto out_unmap;
2077                 if (pte_young(pteval) || PageReferenced(page) ||
2078                     mmu_notifier_test_young(vma->vm_mm, address))
2079                         referenced = 1;
2080         }
2081         if (referenced)
2082                 ret = 1;
2083 out_unmap:
2084         pte_unmap_unlock(pte, ptl);
2085         if (ret)
2086                 /* collapse_huge_page will return with the mmap_sem released */
2087                 collapse_huge_page(mm, address, hpage, vma, node);
2088 out:
2089         return ret;
2090 }
2091
2092 static void collect_mm_slot(struct mm_slot *mm_slot)
2093 {
2094         struct mm_struct *mm = mm_slot->mm;
2095
2096         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2097
2098         if (khugepaged_test_exit(mm)) {
2099                 /* free mm_slot */
2100                 hlist_del(&mm_slot->hash);
2101                 list_del(&mm_slot->mm_node);
2102
2103                 /*
2104                  * Not strictly needed because the mm exited already.
2105                  *
2106                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2107                  */
2108
2109                 /* khugepaged_mm_lock actually not necessary for the below */
2110                 free_mm_slot(mm_slot);
2111                 mmdrop(mm);
2112         }
2113 }
2114
2115 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2116                                             struct page **hpage)
2117         __releases(&khugepaged_mm_lock)
2118         __acquires(&khugepaged_mm_lock)
2119 {
2120         struct mm_slot *mm_slot;
2121         struct mm_struct *mm;
2122         struct vm_area_struct *vma;
2123         int progress = 0;
2124
2125         VM_BUG_ON(!pages);
2126         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2127
2128         if (khugepaged_scan.mm_slot)
2129                 mm_slot = khugepaged_scan.mm_slot;
2130         else {
2131                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2132                                      struct mm_slot, mm_node);
2133                 khugepaged_scan.address = 0;
2134                 khugepaged_scan.mm_slot = mm_slot;
2135         }
2136         spin_unlock(&khugepaged_mm_lock);
2137
2138         mm = mm_slot->mm;
2139         down_read(&mm->mmap_sem);
2140         if (unlikely(khugepaged_test_exit(mm)))
2141                 vma = NULL;
2142         else
2143                 vma = find_vma(mm, khugepaged_scan.address);
2144
2145         progress++;
2146         for (; vma; vma = vma->vm_next) {
2147                 unsigned long hstart, hend;
2148
2149                 cond_resched();
2150                 if (unlikely(khugepaged_test_exit(mm))) {
2151                         progress++;
2152                         break;
2153                 }
2154                 if (!hugepage_vma_check(vma)) {
2155 skip:
2156                         progress++;
2157                         continue;
2158                 }
2159                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2160                 hend = vma->vm_end & HPAGE_PMD_MASK;
2161                 if (hstart >= hend)
2162                         goto skip;
2163                 if (khugepaged_scan.address > hend)
2164                         goto skip;
2165                 if (khugepaged_scan.address < hstart)
2166                         khugepaged_scan.address = hstart;
2167                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2168
2169                 while (khugepaged_scan.address < hend) {
2170                         int ret;
2171                         cond_resched();
2172                         if (unlikely(khugepaged_test_exit(mm)))
2173                                 goto breakouterloop;
2174
2175                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2176                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2177                                   hend);
2178                         ret = khugepaged_scan_pmd(mm, vma,
2179                                                   khugepaged_scan.address,
2180                                                   hpage);
2181                         /* move to next address */
2182                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2183                         progress += HPAGE_PMD_NR;
2184                         if (ret)
2185                                 /* we released mmap_sem so break loop */
2186                                 goto breakouterloop_mmap_sem;
2187                         if (progress >= pages)
2188                                 goto breakouterloop;
2189                 }
2190         }
2191 breakouterloop:
2192         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2193 breakouterloop_mmap_sem:
2194
2195         spin_lock(&khugepaged_mm_lock);
2196         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2197         /*
2198          * Release the current mm_slot if this mm is about to die, or
2199          * if we scanned all vmas of this mm.
2200          */
2201         if (khugepaged_test_exit(mm) || !vma) {
2202                 /*
2203                  * Make sure that if mm_users is reaching zero while
2204                  * khugepaged runs here, khugepaged_exit will find
2205                  * mm_slot not pointing to the exiting mm.
2206                  */
2207                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2208                         khugepaged_scan.mm_slot = list_entry(
2209                                 mm_slot->mm_node.next,
2210                                 struct mm_slot, mm_node);
2211                         khugepaged_scan.address = 0;
2212                 } else {
2213                         khugepaged_scan.mm_slot = NULL;
2214                         khugepaged_full_scans++;
2215                 }
2216
2217                 collect_mm_slot(mm_slot);
2218         }
2219
2220         return progress;
2221 }
2222
2223 static int khugepaged_has_work(void)
2224 {
2225         return !list_empty(&khugepaged_scan.mm_head) &&
2226                 khugepaged_enabled();
2227 }
2228
2229 static int khugepaged_wait_event(void)
2230 {
2231         return !list_empty(&khugepaged_scan.mm_head) ||
2232                 !khugepaged_enabled();
2233 }
2234
2235 static void khugepaged_do_scan(struct page **hpage)
2236 {
2237         unsigned int progress = 0, pass_through_head = 0;
2238         unsigned int pages = khugepaged_pages_to_scan;
2239
2240         barrier(); /* write khugepaged_pages_to_scan to local stack */
2241
2242         while (progress < pages) {
2243                 cond_resched();
2244
2245 #ifndef CONFIG_NUMA
2246                 if (!*hpage) {
2247                         *hpage = alloc_hugepage(khugepaged_defrag());
2248                         if (unlikely(!*hpage)) {
2249                                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2250                                 break;
2251                         }
2252                         count_vm_event(THP_COLLAPSE_ALLOC);
2253                 }
2254 #else
2255                 if (IS_ERR(*hpage))
2256                         break;
2257 #endif
2258
2259                 if (unlikely(kthread_should_stop() || freezing(current)))
2260                         break;
2261
2262                 spin_lock(&khugepaged_mm_lock);
2263                 if (!khugepaged_scan.mm_slot)
2264                         pass_through_head++;
2265                 if (khugepaged_has_work() &&
2266                     pass_through_head < 2)
2267                         progress += khugepaged_scan_mm_slot(pages - progress,
2268                                                             hpage);
2269                 else
2270                         progress = pages;
2271                 spin_unlock(&khugepaged_mm_lock);
2272         }
2273 }
2274
2275 static void khugepaged_alloc_sleep(void)
2276 {
2277         wait_event_freezable_timeout(khugepaged_wait, false,
2278                         msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2279 }
2280
2281 #ifndef CONFIG_NUMA
2282 static struct page *khugepaged_alloc_hugepage(void)
2283 {
2284         struct page *hpage;
2285
2286         do {
2287                 hpage = alloc_hugepage(khugepaged_defrag());
2288                 if (!hpage) {
2289                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2290                         khugepaged_alloc_sleep();
2291                 } else
2292                         count_vm_event(THP_COLLAPSE_ALLOC);
2293         } while (unlikely(!hpage) &&
2294                  likely(khugepaged_enabled()));
2295         return hpage;
2296 }
2297 #endif
2298
2299 static void khugepaged_loop(void)
2300 {
2301         struct page *hpage;
2302
2303 #ifdef CONFIG_NUMA
2304         hpage = NULL;
2305 #endif
2306         while (likely(khugepaged_enabled())) {
2307 #ifndef CONFIG_NUMA
2308                 hpage = khugepaged_alloc_hugepage();
2309                 if (unlikely(!hpage))
2310                         break;
2311 #else
2312                 if (IS_ERR(hpage)) {
2313                         khugepaged_alloc_sleep();
2314                         hpage = NULL;
2315                 }
2316 #endif
2317
2318                 khugepaged_do_scan(&hpage);
2319 #ifndef CONFIG_NUMA
2320                 if (hpage)
2321                         put_page(hpage);
2322 #endif
2323                 try_to_freeze();
2324                 if (unlikely(kthread_should_stop()))
2325                         break;
2326                 if (khugepaged_has_work()) {
2327                         if (!khugepaged_scan_sleep_millisecs)
2328                                 continue;
2329                         wait_event_freezable_timeout(khugepaged_wait, false,
2330                             msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2331                 } else if (khugepaged_enabled())
2332                         wait_event_freezable(khugepaged_wait,
2333                                              khugepaged_wait_event());
2334         }
2335 }
2336
2337 static int khugepaged(void *none)
2338 {
2339         struct mm_slot *mm_slot;
2340
2341         set_freezable();
2342         set_user_nice(current, 19);
2343
2344         /* serialize with start_khugepaged() */
2345         mutex_lock(&khugepaged_mutex);
2346
2347         for (;;) {
2348                 mutex_unlock(&khugepaged_mutex);
2349                 VM_BUG_ON(khugepaged_thread != current);
2350                 khugepaged_loop();
2351                 VM_BUG_ON(khugepaged_thread != current);
2352
2353                 mutex_lock(&khugepaged_mutex);
2354                 if (!khugepaged_enabled())
2355                         break;
2356                 if (unlikely(kthread_should_stop()))
2357                         break;
2358         }
2359
2360         spin_lock(&khugepaged_mm_lock);
2361         mm_slot = khugepaged_scan.mm_slot;
2362         khugepaged_scan.mm_slot = NULL;
2363         if (mm_slot)
2364                 collect_mm_slot(mm_slot);
2365         spin_unlock(&khugepaged_mm_lock);
2366
2367         khugepaged_thread = NULL;
2368         mutex_unlock(&khugepaged_mutex);
2369
2370         return 0;
2371 }
2372
2373 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2374 {
2375         struct page *page;
2376
2377         spin_lock(&mm->page_table_lock);
2378         if (unlikely(!pmd_trans_huge(*pmd))) {
2379                 spin_unlock(&mm->page_table_lock);
2380                 return;
2381         }
2382         page = pmd_page(*pmd);
2383         VM_BUG_ON(!page_count(page));
2384         get_page(page);
2385         spin_unlock(&mm->page_table_lock);
2386
2387         split_huge_page(page);
2388
2389         put_page(page);
2390         BUG_ON(pmd_trans_huge(*pmd));
2391 }
2392
2393 static void split_huge_page_address(struct mm_struct *mm,
2394                                     unsigned long address)
2395 {
2396         pgd_t *pgd;
2397         pud_t *pud;
2398         pmd_t *pmd;
2399
2400         VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2401
2402         pgd = pgd_offset(mm, address);
2403         if (!pgd_present(*pgd))
2404                 return;
2405
2406         pud = pud_offset(pgd, address);
2407         if (!pud_present(*pud))
2408                 return;
2409
2410         pmd = pmd_offset(pud, address);
2411         if (!pmd_present(*pmd))
2412                 return;
2413         /*
2414          * Caller holds the mmap_sem write mode, so a huge pmd cannot
2415          * materialize from under us.
2416          */
2417         split_huge_page_pmd(mm, pmd);
2418 }
2419
2420 void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2421                              unsigned long start,
2422                              unsigned long end,
2423                              long adjust_next)
2424 {
2425         /*
2426          * If the new start address isn't hpage aligned and it could
2427          * previously contain an hugepage: check if we need to split
2428          * an huge pmd.
2429          */
2430         if (start & ~HPAGE_PMD_MASK &&
2431             (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2432             (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2433                 split_huge_page_address(vma->vm_mm, start);
2434
2435         /*
2436          * If the new end address isn't hpage aligned and it could
2437          * previously contain an hugepage: check if we need to split
2438          * an huge pmd.
2439          */
2440         if (end & ~HPAGE_PMD_MASK &&
2441             (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2442             (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2443                 split_huge_page_address(vma->vm_mm, end);
2444
2445         /*
2446          * If we're also updating the vma->vm_next->vm_start, if the new
2447          * vm_next->vm_start isn't page aligned and it could previously
2448          * contain an hugepage: check if we need to split an huge pmd.
2449          */
2450         if (adjust_next > 0) {
2451                 struct vm_area_struct *next = vma->vm_next;
2452                 unsigned long nstart = next->vm_start;
2453                 nstart += adjust_next << PAGE_SHIFT;
2454                 if (nstart & ~HPAGE_PMD_MASK &&
2455                     (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2456                     (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2457                         split_huge_page_address(next->vm_mm, nstart);
2458         }
2459 }