0415a83afd66ce6ea2cd2532b8dbc4e8aae324c9
[pandora-kernel.git] / mm / huge_memory.c
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/mm_inline.h>
16 #include <linux/kthread.h>
17 #include <linux/khugepaged.h>
18 #include <asm/tlb.h>
19 #include <asm/pgalloc.h>
20 #include "internal.h"
21
22 /*
23  * By default transparent hugepage support is enabled for all mappings
24  * and khugepaged scans all mappings. Defrag is only invoked by
25  * khugepaged hugepage allocations and by page faults inside
26  * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
27  * allocations.
28  */
29 unsigned long transparent_hugepage_flags __read_mostly =
30         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
31         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
32         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
33
34 /* default scan 8*512 pte (or vmas) every 30 second */
35 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
36 static unsigned int khugepaged_pages_collapsed;
37 static unsigned int khugepaged_full_scans;
38 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
39 /* during fragmentation poll the hugepage allocator once every minute */
40 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
41 static struct task_struct *khugepaged_thread __read_mostly;
42 static DEFINE_MUTEX(khugepaged_mutex);
43 static DEFINE_SPINLOCK(khugepaged_mm_lock);
44 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
45 /*
46  * default collapse hugepages if there is at least one pte mapped like
47  * it would have happened if the vma was large enough during page
48  * fault.
49  */
50 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
51
52 static int khugepaged(void *none);
53 static int mm_slots_hash_init(void);
54 static int khugepaged_slab_init(void);
55 static void khugepaged_slab_free(void);
56
57 #define MM_SLOTS_HASH_HEADS 1024
58 static struct hlist_head *mm_slots_hash __read_mostly;
59 static struct kmem_cache *mm_slot_cache __read_mostly;
60
61 /**
62  * struct mm_slot - hash lookup from mm to mm_slot
63  * @hash: hash collision list
64  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
65  * @mm: the mm that this information is valid for
66  */
67 struct mm_slot {
68         struct hlist_node hash;
69         struct list_head mm_node;
70         struct mm_struct *mm;
71 };
72
73 /**
74  * struct khugepaged_scan - cursor for scanning
75  * @mm_head: the head of the mm list to scan
76  * @mm_slot: the current mm_slot we are scanning
77  * @address: the next address inside that to be scanned
78  *
79  * There is only the one khugepaged_scan instance of this cursor structure.
80  */
81 struct khugepaged_scan {
82         struct list_head mm_head;
83         struct mm_slot *mm_slot;
84         unsigned long address;
85 } khugepaged_scan = {
86         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
87 };
88
89
90 static int set_recommended_min_free_kbytes(void)
91 {
92         struct zone *zone;
93         int nr_zones = 0;
94         unsigned long recommended_min;
95         extern int min_free_kbytes;
96
97         if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
98                       &transparent_hugepage_flags) &&
99             !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
100                       &transparent_hugepage_flags))
101                 return 0;
102
103         for_each_populated_zone(zone)
104                 nr_zones++;
105
106         /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
107         recommended_min = pageblock_nr_pages * nr_zones * 2;
108
109         /*
110          * Make sure that on average at least two pageblocks are almost free
111          * of another type, one for a migratetype to fall back to and a
112          * second to avoid subsequent fallbacks of other types There are 3
113          * MIGRATE_TYPES we care about.
114          */
115         recommended_min += pageblock_nr_pages * nr_zones *
116                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
117
118         /* don't ever allow to reserve more than 5% of the lowmem */
119         recommended_min = min(recommended_min,
120                               (unsigned long) nr_free_buffer_pages() / 20);
121         recommended_min <<= (PAGE_SHIFT-10);
122
123         if (recommended_min > min_free_kbytes)
124                 min_free_kbytes = recommended_min;
125         setup_per_zone_wmarks();
126         return 0;
127 }
128 late_initcall(set_recommended_min_free_kbytes);
129
130 static int start_khugepaged(void)
131 {
132         int err = 0;
133         if (khugepaged_enabled()) {
134                 int wakeup;
135                 if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
136                         err = -ENOMEM;
137                         goto out;
138                 }
139                 mutex_lock(&khugepaged_mutex);
140                 if (!khugepaged_thread)
141                         khugepaged_thread = kthread_run(khugepaged, NULL,
142                                                         "khugepaged");
143                 if (unlikely(IS_ERR(khugepaged_thread))) {
144                         printk(KERN_ERR
145                                "khugepaged: kthread_run(khugepaged) failed\n");
146                         err = PTR_ERR(khugepaged_thread);
147                         khugepaged_thread = NULL;
148                 }
149                 wakeup = !list_empty(&khugepaged_scan.mm_head);
150                 mutex_unlock(&khugepaged_mutex);
151                 if (wakeup)
152                         wake_up_interruptible(&khugepaged_wait);
153
154                 set_recommended_min_free_kbytes();
155         } else
156                 /* wakeup to exit */
157                 wake_up_interruptible(&khugepaged_wait);
158 out:
159         return err;
160 }
161
162 #ifdef CONFIG_SYSFS
163
164 static ssize_t double_flag_show(struct kobject *kobj,
165                                 struct kobj_attribute *attr, char *buf,
166                                 enum transparent_hugepage_flag enabled,
167                                 enum transparent_hugepage_flag req_madv)
168 {
169         if (test_bit(enabled, &transparent_hugepage_flags)) {
170                 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
171                 return sprintf(buf, "[always] madvise never\n");
172         } else if (test_bit(req_madv, &transparent_hugepage_flags))
173                 return sprintf(buf, "always [madvise] never\n");
174         else
175                 return sprintf(buf, "always madvise [never]\n");
176 }
177 static ssize_t double_flag_store(struct kobject *kobj,
178                                  struct kobj_attribute *attr,
179                                  const char *buf, size_t count,
180                                  enum transparent_hugepage_flag enabled,
181                                  enum transparent_hugepage_flag req_madv)
182 {
183         if (!memcmp("always", buf,
184                     min(sizeof("always")-1, count))) {
185                 set_bit(enabled, &transparent_hugepage_flags);
186                 clear_bit(req_madv, &transparent_hugepage_flags);
187         } else if (!memcmp("madvise", buf,
188                            min(sizeof("madvise")-1, count))) {
189                 clear_bit(enabled, &transparent_hugepage_flags);
190                 set_bit(req_madv, &transparent_hugepage_flags);
191         } else if (!memcmp("never", buf,
192                            min(sizeof("never")-1, count))) {
193                 clear_bit(enabled, &transparent_hugepage_flags);
194                 clear_bit(req_madv, &transparent_hugepage_flags);
195         } else
196                 return -EINVAL;
197
198         return count;
199 }
200
201 static ssize_t enabled_show(struct kobject *kobj,
202                             struct kobj_attribute *attr, char *buf)
203 {
204         return double_flag_show(kobj, attr, buf,
205                                 TRANSPARENT_HUGEPAGE_FLAG,
206                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
207 }
208 static ssize_t enabled_store(struct kobject *kobj,
209                              struct kobj_attribute *attr,
210                              const char *buf, size_t count)
211 {
212         ssize_t ret;
213
214         ret = double_flag_store(kobj, attr, buf, count,
215                                 TRANSPARENT_HUGEPAGE_FLAG,
216                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
217
218         if (ret > 0) {
219                 int err = start_khugepaged();
220                 if (err)
221                         ret = err;
222         }
223
224         if (ret > 0 &&
225             (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
226                       &transparent_hugepage_flags) ||
227              test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
228                       &transparent_hugepage_flags)))
229                 set_recommended_min_free_kbytes();
230
231         return ret;
232 }
233 static struct kobj_attribute enabled_attr =
234         __ATTR(enabled, 0644, enabled_show, enabled_store);
235
236 static ssize_t single_flag_show(struct kobject *kobj,
237                                 struct kobj_attribute *attr, char *buf,
238                                 enum transparent_hugepage_flag flag)
239 {
240         if (test_bit(flag, &transparent_hugepage_flags))
241                 return sprintf(buf, "[yes] no\n");
242         else
243                 return sprintf(buf, "yes [no]\n");
244 }
245 static ssize_t single_flag_store(struct kobject *kobj,
246                                  struct kobj_attribute *attr,
247                                  const char *buf, size_t count,
248                                  enum transparent_hugepage_flag flag)
249 {
250         if (!memcmp("yes", buf,
251                     min(sizeof("yes")-1, count))) {
252                 set_bit(flag, &transparent_hugepage_flags);
253         } else if (!memcmp("no", buf,
254                            min(sizeof("no")-1, count))) {
255                 clear_bit(flag, &transparent_hugepage_flags);
256         } else
257                 return -EINVAL;
258
259         return count;
260 }
261
262 /*
263  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
264  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
265  * memory just to allocate one more hugepage.
266  */
267 static ssize_t defrag_show(struct kobject *kobj,
268                            struct kobj_attribute *attr, char *buf)
269 {
270         return double_flag_show(kobj, attr, buf,
271                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
272                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
273 }
274 static ssize_t defrag_store(struct kobject *kobj,
275                             struct kobj_attribute *attr,
276                             const char *buf, size_t count)
277 {
278         return double_flag_store(kobj, attr, buf, count,
279                                  TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
280                                  TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
281 }
282 static struct kobj_attribute defrag_attr =
283         __ATTR(defrag, 0644, defrag_show, defrag_store);
284
285 #ifdef CONFIG_DEBUG_VM
286 static ssize_t debug_cow_show(struct kobject *kobj,
287                                 struct kobj_attribute *attr, char *buf)
288 {
289         return single_flag_show(kobj, attr, buf,
290                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
291 }
292 static ssize_t debug_cow_store(struct kobject *kobj,
293                                struct kobj_attribute *attr,
294                                const char *buf, size_t count)
295 {
296         return single_flag_store(kobj, attr, buf, count,
297                                  TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
298 }
299 static struct kobj_attribute debug_cow_attr =
300         __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
301 #endif /* CONFIG_DEBUG_VM */
302
303 static struct attribute *hugepage_attr[] = {
304         &enabled_attr.attr,
305         &defrag_attr.attr,
306 #ifdef CONFIG_DEBUG_VM
307         &debug_cow_attr.attr,
308 #endif
309         NULL,
310 };
311
312 static struct attribute_group hugepage_attr_group = {
313         .attrs = hugepage_attr,
314 };
315
316 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
317                                          struct kobj_attribute *attr,
318                                          char *buf)
319 {
320         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
321 }
322
323 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
324                                           struct kobj_attribute *attr,
325                                           const char *buf, size_t count)
326 {
327         unsigned long msecs;
328         int err;
329
330         err = strict_strtoul(buf, 10, &msecs);
331         if (err || msecs > UINT_MAX)
332                 return -EINVAL;
333
334         khugepaged_scan_sleep_millisecs = msecs;
335         wake_up_interruptible(&khugepaged_wait);
336
337         return count;
338 }
339 static struct kobj_attribute scan_sleep_millisecs_attr =
340         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
341                scan_sleep_millisecs_store);
342
343 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
344                                           struct kobj_attribute *attr,
345                                           char *buf)
346 {
347         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
348 }
349
350 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
351                                            struct kobj_attribute *attr,
352                                            const char *buf, size_t count)
353 {
354         unsigned long msecs;
355         int err;
356
357         err = strict_strtoul(buf, 10, &msecs);
358         if (err || msecs > UINT_MAX)
359                 return -EINVAL;
360
361         khugepaged_alloc_sleep_millisecs = msecs;
362         wake_up_interruptible(&khugepaged_wait);
363
364         return count;
365 }
366 static struct kobj_attribute alloc_sleep_millisecs_attr =
367         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
368                alloc_sleep_millisecs_store);
369
370 static ssize_t pages_to_scan_show(struct kobject *kobj,
371                                   struct kobj_attribute *attr,
372                                   char *buf)
373 {
374         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
375 }
376 static ssize_t pages_to_scan_store(struct kobject *kobj,
377                                    struct kobj_attribute *attr,
378                                    const char *buf, size_t count)
379 {
380         int err;
381         unsigned long pages;
382
383         err = strict_strtoul(buf, 10, &pages);
384         if (err || !pages || pages > UINT_MAX)
385                 return -EINVAL;
386
387         khugepaged_pages_to_scan = pages;
388
389         return count;
390 }
391 static struct kobj_attribute pages_to_scan_attr =
392         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
393                pages_to_scan_store);
394
395 static ssize_t pages_collapsed_show(struct kobject *kobj,
396                                     struct kobj_attribute *attr,
397                                     char *buf)
398 {
399         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
400 }
401 static struct kobj_attribute pages_collapsed_attr =
402         __ATTR_RO(pages_collapsed);
403
404 static ssize_t full_scans_show(struct kobject *kobj,
405                                struct kobj_attribute *attr,
406                                char *buf)
407 {
408         return sprintf(buf, "%u\n", khugepaged_full_scans);
409 }
410 static struct kobj_attribute full_scans_attr =
411         __ATTR_RO(full_scans);
412
413 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
414                                       struct kobj_attribute *attr, char *buf)
415 {
416         return single_flag_show(kobj, attr, buf,
417                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
418 }
419 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
420                                        struct kobj_attribute *attr,
421                                        const char *buf, size_t count)
422 {
423         return single_flag_store(kobj, attr, buf, count,
424                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
425 }
426 static struct kobj_attribute khugepaged_defrag_attr =
427         __ATTR(defrag, 0644, khugepaged_defrag_show,
428                khugepaged_defrag_store);
429
430 /*
431  * max_ptes_none controls if khugepaged should collapse hugepages over
432  * any unmapped ptes in turn potentially increasing the memory
433  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
434  * reduce the available free memory in the system as it
435  * runs. Increasing max_ptes_none will instead potentially reduce the
436  * free memory in the system during the khugepaged scan.
437  */
438 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
439                                              struct kobj_attribute *attr,
440                                              char *buf)
441 {
442         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
443 }
444 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
445                                               struct kobj_attribute *attr,
446                                               const char *buf, size_t count)
447 {
448         int err;
449         unsigned long max_ptes_none;
450
451         err = strict_strtoul(buf, 10, &max_ptes_none);
452         if (err || max_ptes_none > HPAGE_PMD_NR-1)
453                 return -EINVAL;
454
455         khugepaged_max_ptes_none = max_ptes_none;
456
457         return count;
458 }
459 static struct kobj_attribute khugepaged_max_ptes_none_attr =
460         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
461                khugepaged_max_ptes_none_store);
462
463 static struct attribute *khugepaged_attr[] = {
464         &khugepaged_defrag_attr.attr,
465         &khugepaged_max_ptes_none_attr.attr,
466         &pages_to_scan_attr.attr,
467         &pages_collapsed_attr.attr,
468         &full_scans_attr.attr,
469         &scan_sleep_millisecs_attr.attr,
470         &alloc_sleep_millisecs_attr.attr,
471         NULL,
472 };
473
474 static struct attribute_group khugepaged_attr_group = {
475         .attrs = khugepaged_attr,
476         .name = "khugepaged",
477 };
478 #endif /* CONFIG_SYSFS */
479
480 static int __init hugepage_init(void)
481 {
482         int err;
483 #ifdef CONFIG_SYSFS
484         static struct kobject *hugepage_kobj;
485
486         err = -ENOMEM;
487         hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
488         if (unlikely(!hugepage_kobj)) {
489                 printk(KERN_ERR "hugepage: failed kobject create\n");
490                 goto out;
491         }
492
493         err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
494         if (err) {
495                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
496                 goto out;
497         }
498
499         err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
500         if (err) {
501                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
502                 goto out;
503         }
504 #endif
505
506         err = khugepaged_slab_init();
507         if (err)
508                 goto out;
509
510         err = mm_slots_hash_init();
511         if (err) {
512                 khugepaged_slab_free();
513                 goto out;
514         }
515
516         start_khugepaged();
517
518         set_recommended_min_free_kbytes();
519
520 out:
521         return err;
522 }
523 module_init(hugepage_init)
524
525 static int __init setup_transparent_hugepage(char *str)
526 {
527         int ret = 0;
528         if (!str)
529                 goto out;
530         if (!strcmp(str, "always")) {
531                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
532                         &transparent_hugepage_flags);
533                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
534                           &transparent_hugepage_flags);
535                 ret = 1;
536         } else if (!strcmp(str, "madvise")) {
537                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
538                           &transparent_hugepage_flags);
539                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
540                         &transparent_hugepage_flags);
541                 ret = 1;
542         } else if (!strcmp(str, "never")) {
543                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
544                           &transparent_hugepage_flags);
545                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
546                           &transparent_hugepage_flags);
547                 ret = 1;
548         }
549 out:
550         if (!ret)
551                 printk(KERN_WARNING
552                        "transparent_hugepage= cannot parse, ignored\n");
553         return ret;
554 }
555 __setup("transparent_hugepage=", setup_transparent_hugepage);
556
557 static void prepare_pmd_huge_pte(pgtable_t pgtable,
558                                  struct mm_struct *mm)
559 {
560         assert_spin_locked(&mm->page_table_lock);
561
562         /* FIFO */
563         if (!mm->pmd_huge_pte)
564                 INIT_LIST_HEAD(&pgtable->lru);
565         else
566                 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
567         mm->pmd_huge_pte = pgtable;
568 }
569
570 static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
571 {
572         if (likely(vma->vm_flags & VM_WRITE))
573                 pmd = pmd_mkwrite(pmd);
574         return pmd;
575 }
576
577 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
578                                         struct vm_area_struct *vma,
579                                         unsigned long haddr, pmd_t *pmd,
580                                         struct page *page)
581 {
582         int ret = 0;
583         pgtable_t pgtable;
584
585         VM_BUG_ON(!PageCompound(page));
586         pgtable = pte_alloc_one(mm, haddr);
587         if (unlikely(!pgtable)) {
588                 mem_cgroup_uncharge_page(page);
589                 put_page(page);
590                 return VM_FAULT_OOM;
591         }
592
593         clear_huge_page(page, haddr, HPAGE_PMD_NR);
594         __SetPageUptodate(page);
595
596         spin_lock(&mm->page_table_lock);
597         if (unlikely(!pmd_none(*pmd))) {
598                 spin_unlock(&mm->page_table_lock);
599                 mem_cgroup_uncharge_page(page);
600                 put_page(page);
601                 pte_free(mm, pgtable);
602         } else {
603                 pmd_t entry;
604                 entry = mk_pmd(page, vma->vm_page_prot);
605                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
606                 entry = pmd_mkhuge(entry);
607                 /*
608                  * The spinlocking to take the lru_lock inside
609                  * page_add_new_anon_rmap() acts as a full memory
610                  * barrier to be sure clear_huge_page writes become
611                  * visible after the set_pmd_at() write.
612                  */
613                 page_add_new_anon_rmap(page, vma, haddr);
614                 set_pmd_at(mm, haddr, pmd, entry);
615                 prepare_pmd_huge_pte(pgtable, mm);
616                 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
617                 spin_unlock(&mm->page_table_lock);
618         }
619
620         return ret;
621 }
622
623 static inline struct page *alloc_hugepage(int defrag)
624 {
625         return alloc_pages(GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT),
626                            HPAGE_PMD_ORDER);
627 }
628
629 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
630                                unsigned long address, pmd_t *pmd,
631                                unsigned int flags)
632 {
633         struct page *page;
634         unsigned long haddr = address & HPAGE_PMD_MASK;
635         pte_t *pte;
636
637         if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
638                 if (unlikely(anon_vma_prepare(vma)))
639                         return VM_FAULT_OOM;
640                 if (unlikely(khugepaged_enter(vma)))
641                         return VM_FAULT_OOM;
642                 page = alloc_hugepage(transparent_hugepage_defrag(vma));
643                 if (unlikely(!page))
644                         goto out;
645                 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
646                         put_page(page);
647                         goto out;
648                 }
649
650                 return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
651         }
652 out:
653         /*
654          * Use __pte_alloc instead of pte_alloc_map, because we can't
655          * run pte_offset_map on the pmd, if an huge pmd could
656          * materialize from under us from a different thread.
657          */
658         if (unlikely(__pte_alloc(mm, vma, pmd, address)))
659                 return VM_FAULT_OOM;
660         /* if an huge pmd materialized from under us just retry later */
661         if (unlikely(pmd_trans_huge(*pmd)))
662                 return 0;
663         /*
664          * A regular pmd is established and it can't morph into a huge pmd
665          * from under us anymore at this point because we hold the mmap_sem
666          * read mode and khugepaged takes it in write mode. So now it's
667          * safe to run pte_offset_map().
668          */
669         pte = pte_offset_map(pmd, address);
670         return handle_pte_fault(mm, vma, address, pte, pmd, flags);
671 }
672
673 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
674                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
675                   struct vm_area_struct *vma)
676 {
677         struct page *src_page;
678         pmd_t pmd;
679         pgtable_t pgtable;
680         int ret;
681
682         ret = -ENOMEM;
683         pgtable = pte_alloc_one(dst_mm, addr);
684         if (unlikely(!pgtable))
685                 goto out;
686
687         spin_lock(&dst_mm->page_table_lock);
688         spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
689
690         ret = -EAGAIN;
691         pmd = *src_pmd;
692         if (unlikely(!pmd_trans_huge(pmd))) {
693                 pte_free(dst_mm, pgtable);
694                 goto out_unlock;
695         }
696         if (unlikely(pmd_trans_splitting(pmd))) {
697                 /* split huge page running from under us */
698                 spin_unlock(&src_mm->page_table_lock);
699                 spin_unlock(&dst_mm->page_table_lock);
700                 pte_free(dst_mm, pgtable);
701
702                 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
703                 goto out;
704         }
705         src_page = pmd_page(pmd);
706         VM_BUG_ON(!PageHead(src_page));
707         get_page(src_page);
708         page_dup_rmap(src_page);
709         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
710
711         pmdp_set_wrprotect(src_mm, addr, src_pmd);
712         pmd = pmd_mkold(pmd_wrprotect(pmd));
713         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
714         prepare_pmd_huge_pte(pgtable, dst_mm);
715
716         ret = 0;
717 out_unlock:
718         spin_unlock(&src_mm->page_table_lock);
719         spin_unlock(&dst_mm->page_table_lock);
720 out:
721         return ret;
722 }
723
724 /* no "address" argument so destroys page coloring of some arch */
725 pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
726 {
727         pgtable_t pgtable;
728
729         assert_spin_locked(&mm->page_table_lock);
730
731         /* FIFO */
732         pgtable = mm->pmd_huge_pte;
733         if (list_empty(&pgtable->lru))
734                 mm->pmd_huge_pte = NULL;
735         else {
736                 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
737                                               struct page, lru);
738                 list_del(&pgtable->lru);
739         }
740         return pgtable;
741 }
742
743 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
744                                         struct vm_area_struct *vma,
745                                         unsigned long address,
746                                         pmd_t *pmd, pmd_t orig_pmd,
747                                         struct page *page,
748                                         unsigned long haddr)
749 {
750         pgtable_t pgtable;
751         pmd_t _pmd;
752         int ret = 0, i;
753         struct page **pages;
754
755         pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
756                         GFP_KERNEL);
757         if (unlikely(!pages)) {
758                 ret |= VM_FAULT_OOM;
759                 goto out;
760         }
761
762         for (i = 0; i < HPAGE_PMD_NR; i++) {
763                 pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
764                                           vma, address);
765                 if (unlikely(!pages[i] ||
766                              mem_cgroup_newpage_charge(pages[i], mm,
767                                                        GFP_KERNEL))) {
768                         if (pages[i])
769                                 put_page(pages[i]);
770                         mem_cgroup_uncharge_start();
771                         while (--i >= 0) {
772                                 mem_cgroup_uncharge_page(pages[i]);
773                                 put_page(pages[i]);
774                         }
775                         mem_cgroup_uncharge_end();
776                         kfree(pages);
777                         ret |= VM_FAULT_OOM;
778                         goto out;
779                 }
780         }
781
782         for (i = 0; i < HPAGE_PMD_NR; i++) {
783                 copy_user_highpage(pages[i], page + i,
784                                    haddr + PAGE_SHIFT*i, vma);
785                 __SetPageUptodate(pages[i]);
786                 cond_resched();
787         }
788
789         spin_lock(&mm->page_table_lock);
790         if (unlikely(!pmd_same(*pmd, orig_pmd)))
791                 goto out_free_pages;
792         VM_BUG_ON(!PageHead(page));
793
794         pmdp_clear_flush_notify(vma, haddr, pmd);
795         /* leave pmd empty until pte is filled */
796
797         pgtable = get_pmd_huge_pte(mm);
798         pmd_populate(mm, &_pmd, pgtable);
799
800         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
801                 pte_t *pte, entry;
802                 entry = mk_pte(pages[i], vma->vm_page_prot);
803                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
804                 page_add_new_anon_rmap(pages[i], vma, haddr);
805                 pte = pte_offset_map(&_pmd, haddr);
806                 VM_BUG_ON(!pte_none(*pte));
807                 set_pte_at(mm, haddr, pte, entry);
808                 pte_unmap(pte);
809         }
810         kfree(pages);
811
812         mm->nr_ptes++;
813         smp_wmb(); /* make pte visible before pmd */
814         pmd_populate(mm, pmd, pgtable);
815         page_remove_rmap(page);
816         spin_unlock(&mm->page_table_lock);
817
818         ret |= VM_FAULT_WRITE;
819         put_page(page);
820
821 out:
822         return ret;
823
824 out_free_pages:
825         spin_unlock(&mm->page_table_lock);
826         mem_cgroup_uncharge_start();
827         for (i = 0; i < HPAGE_PMD_NR; i++) {
828                 mem_cgroup_uncharge_page(pages[i]);
829                 put_page(pages[i]);
830         }
831         mem_cgroup_uncharge_end();
832         kfree(pages);
833         goto out;
834 }
835
836 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
837                         unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
838 {
839         int ret = 0;
840         struct page *page, *new_page;
841         unsigned long haddr;
842
843         VM_BUG_ON(!vma->anon_vma);
844         spin_lock(&mm->page_table_lock);
845         if (unlikely(!pmd_same(*pmd, orig_pmd)))
846                 goto out_unlock;
847
848         page = pmd_page(orig_pmd);
849         VM_BUG_ON(!PageCompound(page) || !PageHead(page));
850         haddr = address & HPAGE_PMD_MASK;
851         if (page_mapcount(page) == 1) {
852                 pmd_t entry;
853                 entry = pmd_mkyoung(orig_pmd);
854                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
855                 if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
856                         update_mmu_cache(vma, address, entry);
857                 ret |= VM_FAULT_WRITE;
858                 goto out_unlock;
859         }
860         get_page(page);
861         spin_unlock(&mm->page_table_lock);
862
863         if (transparent_hugepage_enabled(vma) &&
864             !transparent_hugepage_debug_cow())
865                 new_page = alloc_hugepage(transparent_hugepage_defrag(vma));
866         else
867                 new_page = NULL;
868
869         if (unlikely(!new_page)) {
870                 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
871                                                    pmd, orig_pmd, page, haddr);
872                 put_page(page);
873                 goto out;
874         }
875
876         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
877                 put_page(new_page);
878                 put_page(page);
879                 ret |= VM_FAULT_OOM;
880                 goto out;
881         }
882
883         copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
884         __SetPageUptodate(new_page);
885
886         spin_lock(&mm->page_table_lock);
887         put_page(page);
888         if (unlikely(!pmd_same(*pmd, orig_pmd))) {
889                 mem_cgroup_uncharge_page(new_page);
890                 put_page(new_page);
891         } else {
892                 pmd_t entry;
893                 VM_BUG_ON(!PageHead(page));
894                 entry = mk_pmd(new_page, vma->vm_page_prot);
895                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
896                 entry = pmd_mkhuge(entry);
897                 pmdp_clear_flush_notify(vma, haddr, pmd);
898                 page_add_new_anon_rmap(new_page, vma, haddr);
899                 set_pmd_at(mm, haddr, pmd, entry);
900                 update_mmu_cache(vma, address, entry);
901                 page_remove_rmap(page);
902                 put_page(page);
903                 ret |= VM_FAULT_WRITE;
904         }
905 out_unlock:
906         spin_unlock(&mm->page_table_lock);
907 out:
908         return ret;
909 }
910
911 struct page *follow_trans_huge_pmd(struct mm_struct *mm,
912                                    unsigned long addr,
913                                    pmd_t *pmd,
914                                    unsigned int flags)
915 {
916         struct page *page = NULL;
917
918         assert_spin_locked(&mm->page_table_lock);
919
920         if (flags & FOLL_WRITE && !pmd_write(*pmd))
921                 goto out;
922
923         page = pmd_page(*pmd);
924         VM_BUG_ON(!PageHead(page));
925         if (flags & FOLL_TOUCH) {
926                 pmd_t _pmd;
927                 /*
928                  * We should set the dirty bit only for FOLL_WRITE but
929                  * for now the dirty bit in the pmd is meaningless.
930                  * And if the dirty bit will become meaningful and
931                  * we'll only set it with FOLL_WRITE, an atomic
932                  * set_bit will be required on the pmd to set the
933                  * young bit, instead of the current set_pmd_at.
934                  */
935                 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
936                 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
937         }
938         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
939         VM_BUG_ON(!PageCompound(page));
940         if (flags & FOLL_GET)
941                 get_page(page);
942
943 out:
944         return page;
945 }
946
947 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
948                  pmd_t *pmd)
949 {
950         int ret = 0;
951
952         spin_lock(&tlb->mm->page_table_lock);
953         if (likely(pmd_trans_huge(*pmd))) {
954                 if (unlikely(pmd_trans_splitting(*pmd))) {
955                         spin_unlock(&tlb->mm->page_table_lock);
956                         wait_split_huge_page(vma->anon_vma,
957                                              pmd);
958                 } else {
959                         struct page *page;
960                         pgtable_t pgtable;
961                         pgtable = get_pmd_huge_pte(tlb->mm);
962                         page = pmd_page(*pmd);
963                         pmd_clear(pmd);
964                         page_remove_rmap(page);
965                         VM_BUG_ON(page_mapcount(page) < 0);
966                         add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
967                         VM_BUG_ON(!PageHead(page));
968                         spin_unlock(&tlb->mm->page_table_lock);
969                         tlb_remove_page(tlb, page);
970                         pte_free(tlb->mm, pgtable);
971                         ret = 1;
972                 }
973         } else
974                 spin_unlock(&tlb->mm->page_table_lock);
975
976         return ret;
977 }
978
979 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
980                 unsigned long addr, unsigned long end,
981                 unsigned char *vec)
982 {
983         int ret = 0;
984
985         spin_lock(&vma->vm_mm->page_table_lock);
986         if (likely(pmd_trans_huge(*pmd))) {
987                 ret = !pmd_trans_splitting(*pmd);
988                 spin_unlock(&vma->vm_mm->page_table_lock);
989                 if (unlikely(!ret))
990                         wait_split_huge_page(vma->anon_vma, pmd);
991                 else {
992                         /*
993                          * All logical pages in the range are present
994                          * if backed by a huge page.
995                          */
996                         memset(vec, 1, (end - addr) >> PAGE_SHIFT);
997                 }
998         } else
999                 spin_unlock(&vma->vm_mm->page_table_lock);
1000
1001         return ret;
1002 }
1003
1004 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1005                 unsigned long addr, pgprot_t newprot)
1006 {
1007         struct mm_struct *mm = vma->vm_mm;
1008         int ret = 0;
1009
1010         spin_lock(&mm->page_table_lock);
1011         if (likely(pmd_trans_huge(*pmd))) {
1012                 if (unlikely(pmd_trans_splitting(*pmd))) {
1013                         spin_unlock(&mm->page_table_lock);
1014                         wait_split_huge_page(vma->anon_vma, pmd);
1015                 } else {
1016                         pmd_t entry;
1017
1018                         entry = pmdp_get_and_clear(mm, addr, pmd);
1019                         entry = pmd_modify(entry, newprot);
1020                         set_pmd_at(mm, addr, pmd, entry);
1021                         spin_unlock(&vma->vm_mm->page_table_lock);
1022                         flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
1023                         ret = 1;
1024                 }
1025         } else
1026                 spin_unlock(&vma->vm_mm->page_table_lock);
1027
1028         return ret;
1029 }
1030
1031 pmd_t *page_check_address_pmd(struct page *page,
1032                               struct mm_struct *mm,
1033                               unsigned long address,
1034                               enum page_check_address_pmd_flag flag)
1035 {
1036         pgd_t *pgd;
1037         pud_t *pud;
1038         pmd_t *pmd, *ret = NULL;
1039
1040         if (address & ~HPAGE_PMD_MASK)
1041                 goto out;
1042
1043         pgd = pgd_offset(mm, address);
1044         if (!pgd_present(*pgd))
1045                 goto out;
1046
1047         pud = pud_offset(pgd, address);
1048         if (!pud_present(*pud))
1049                 goto out;
1050
1051         pmd = pmd_offset(pud, address);
1052         if (pmd_none(*pmd))
1053                 goto out;
1054         if (pmd_page(*pmd) != page)
1055                 goto out;
1056         VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1057                   pmd_trans_splitting(*pmd));
1058         if (pmd_trans_huge(*pmd)) {
1059                 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1060                           !pmd_trans_splitting(*pmd));
1061                 ret = pmd;
1062         }
1063 out:
1064         return ret;
1065 }
1066
1067 static int __split_huge_page_splitting(struct page *page,
1068                                        struct vm_area_struct *vma,
1069                                        unsigned long address)
1070 {
1071         struct mm_struct *mm = vma->vm_mm;
1072         pmd_t *pmd;
1073         int ret = 0;
1074
1075         spin_lock(&mm->page_table_lock);
1076         pmd = page_check_address_pmd(page, mm, address,
1077                                      PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1078         if (pmd) {
1079                 /*
1080                  * We can't temporarily set the pmd to null in order
1081                  * to split it, the pmd must remain marked huge at all
1082                  * times or the VM won't take the pmd_trans_huge paths
1083                  * and it won't wait on the anon_vma->root->lock to
1084                  * serialize against split_huge_page*.
1085                  */
1086                 pmdp_splitting_flush_notify(vma, address, pmd);
1087                 ret = 1;
1088         }
1089         spin_unlock(&mm->page_table_lock);
1090
1091         return ret;
1092 }
1093
1094 static void __split_huge_page_refcount(struct page *page)
1095 {
1096         int i;
1097         unsigned long head_index = page->index;
1098         struct zone *zone = page_zone(page);
1099
1100         /* prevent PageLRU to go away from under us, and freeze lru stats */
1101         spin_lock_irq(&zone->lru_lock);
1102         compound_lock(page);
1103
1104         for (i = 1; i < HPAGE_PMD_NR; i++) {
1105                 struct page *page_tail = page + i;
1106
1107                 /* tail_page->_count cannot change */
1108                 atomic_sub(atomic_read(&page_tail->_count), &page->_count);
1109                 BUG_ON(page_count(page) <= 0);
1110                 atomic_add(page_mapcount(page) + 1, &page_tail->_count);
1111                 BUG_ON(atomic_read(&page_tail->_count) <= 0);
1112
1113                 /* after clearing PageTail the gup refcount can be released */
1114                 smp_mb();
1115
1116                 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1117                 page_tail->flags |= (page->flags &
1118                                      ((1L << PG_referenced) |
1119                                       (1L << PG_swapbacked) |
1120                                       (1L << PG_mlocked) |
1121                                       (1L << PG_uptodate)));
1122                 page_tail->flags |= (1L << PG_dirty);
1123
1124                 /*
1125                  * 1) clear PageTail before overwriting first_page
1126                  * 2) clear PageTail before clearing PageHead for VM_BUG_ON
1127                  */
1128                 smp_wmb();
1129
1130                 /*
1131                  * __split_huge_page_splitting() already set the
1132                  * splitting bit in all pmd that could map this
1133                  * hugepage, that will ensure no CPU can alter the
1134                  * mapcount on the head page. The mapcount is only
1135                  * accounted in the head page and it has to be
1136                  * transferred to all tail pages in the below code. So
1137                  * for this code to be safe, the split the mapcount
1138                  * can't change. But that doesn't mean userland can't
1139                  * keep changing and reading the page contents while
1140                  * we transfer the mapcount, so the pmd splitting
1141                  * status is achieved setting a reserved bit in the
1142                  * pmd, not by clearing the present bit.
1143                 */
1144                 BUG_ON(page_mapcount(page_tail));
1145                 page_tail->_mapcount = page->_mapcount;
1146
1147                 BUG_ON(page_tail->mapping);
1148                 page_tail->mapping = page->mapping;
1149
1150                 page_tail->index = ++head_index;
1151
1152                 BUG_ON(!PageAnon(page_tail));
1153                 BUG_ON(!PageUptodate(page_tail));
1154                 BUG_ON(!PageDirty(page_tail));
1155                 BUG_ON(!PageSwapBacked(page_tail));
1156
1157                 lru_add_page_tail(zone, page, page_tail);
1158         }
1159
1160         __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1161         __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1162
1163         ClearPageCompound(page);
1164         compound_unlock(page);
1165         spin_unlock_irq(&zone->lru_lock);
1166
1167         for (i = 1; i < HPAGE_PMD_NR; i++) {
1168                 struct page *page_tail = page + i;
1169                 BUG_ON(page_count(page_tail) <= 0);
1170                 /*
1171                  * Tail pages may be freed if there wasn't any mapping
1172                  * like if add_to_swap() is running on a lru page that
1173                  * had its mapping zapped. And freeing these pages
1174                  * requires taking the lru_lock so we do the put_page
1175                  * of the tail pages after the split is complete.
1176                  */
1177                 put_page(page_tail);
1178         }
1179
1180         /*
1181          * Only the head page (now become a regular page) is required
1182          * to be pinned by the caller.
1183          */
1184         BUG_ON(page_count(page) <= 0);
1185 }
1186
1187 static int __split_huge_page_map(struct page *page,
1188                                  struct vm_area_struct *vma,
1189                                  unsigned long address)
1190 {
1191         struct mm_struct *mm = vma->vm_mm;
1192         pmd_t *pmd, _pmd;
1193         int ret = 0, i;
1194         pgtable_t pgtable;
1195         unsigned long haddr;
1196
1197         spin_lock(&mm->page_table_lock);
1198         pmd = page_check_address_pmd(page, mm, address,
1199                                      PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1200         if (pmd) {
1201                 pgtable = get_pmd_huge_pte(mm);
1202                 pmd_populate(mm, &_pmd, pgtable);
1203
1204                 for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1205                      i++, haddr += PAGE_SIZE) {
1206                         pte_t *pte, entry;
1207                         BUG_ON(PageCompound(page+i));
1208                         entry = mk_pte(page + i, vma->vm_page_prot);
1209                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1210                         if (!pmd_write(*pmd))
1211                                 entry = pte_wrprotect(entry);
1212                         else
1213                                 BUG_ON(page_mapcount(page) != 1);
1214                         if (!pmd_young(*pmd))
1215                                 entry = pte_mkold(entry);
1216                         pte = pte_offset_map(&_pmd, haddr);
1217                         BUG_ON(!pte_none(*pte));
1218                         set_pte_at(mm, haddr, pte, entry);
1219                         pte_unmap(pte);
1220                 }
1221
1222                 mm->nr_ptes++;
1223                 smp_wmb(); /* make pte visible before pmd */
1224                 /*
1225                  * Up to this point the pmd is present and huge and
1226                  * userland has the whole access to the hugepage
1227                  * during the split (which happens in place). If we
1228                  * overwrite the pmd with the not-huge version
1229                  * pointing to the pte here (which of course we could
1230                  * if all CPUs were bug free), userland could trigger
1231                  * a small page size TLB miss on the small sized TLB
1232                  * while the hugepage TLB entry is still established
1233                  * in the huge TLB. Some CPU doesn't like that. See
1234                  * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1235                  * Erratum 383 on page 93. Intel should be safe but is
1236                  * also warns that it's only safe if the permission
1237                  * and cache attributes of the two entries loaded in
1238                  * the two TLB is identical (which should be the case
1239                  * here). But it is generally safer to never allow
1240                  * small and huge TLB entries for the same virtual
1241                  * address to be loaded simultaneously. So instead of
1242                  * doing "pmd_populate(); flush_tlb_range();" we first
1243                  * mark the current pmd notpresent (atomically because
1244                  * here the pmd_trans_huge and pmd_trans_splitting
1245                  * must remain set at all times on the pmd until the
1246                  * split is complete for this pmd), then we flush the
1247                  * SMP TLB and finally we write the non-huge version
1248                  * of the pmd entry with pmd_populate.
1249                  */
1250                 set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1251                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1252                 pmd_populate(mm, pmd, pgtable);
1253                 ret = 1;
1254         }
1255         spin_unlock(&mm->page_table_lock);
1256
1257         return ret;
1258 }
1259
1260 /* must be called with anon_vma->root->lock hold */
1261 static void __split_huge_page(struct page *page,
1262                               struct anon_vma *anon_vma)
1263 {
1264         int mapcount, mapcount2;
1265         struct anon_vma_chain *avc;
1266
1267         BUG_ON(!PageHead(page));
1268         BUG_ON(PageTail(page));
1269
1270         mapcount = 0;
1271         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1272                 struct vm_area_struct *vma = avc->vma;
1273                 unsigned long addr = vma_address(page, vma);
1274                 BUG_ON(is_vma_temporary_stack(vma));
1275                 if (addr == -EFAULT)
1276                         continue;
1277                 mapcount += __split_huge_page_splitting(page, vma, addr);
1278         }
1279         /*
1280          * It is critical that new vmas are added to the tail of the
1281          * anon_vma list. This guarantes that if copy_huge_pmd() runs
1282          * and establishes a child pmd before
1283          * __split_huge_page_splitting() freezes the parent pmd (so if
1284          * we fail to prevent copy_huge_pmd() from running until the
1285          * whole __split_huge_page() is complete), we will still see
1286          * the newly established pmd of the child later during the
1287          * walk, to be able to set it as pmd_trans_splitting too.
1288          */
1289         if (mapcount != page_mapcount(page))
1290                 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1291                        mapcount, page_mapcount(page));
1292         BUG_ON(mapcount != page_mapcount(page));
1293
1294         __split_huge_page_refcount(page);
1295
1296         mapcount2 = 0;
1297         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1298                 struct vm_area_struct *vma = avc->vma;
1299                 unsigned long addr = vma_address(page, vma);
1300                 BUG_ON(is_vma_temporary_stack(vma));
1301                 if (addr == -EFAULT)
1302                         continue;
1303                 mapcount2 += __split_huge_page_map(page, vma, addr);
1304         }
1305         if (mapcount != mapcount2)
1306                 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1307                        mapcount, mapcount2, page_mapcount(page));
1308         BUG_ON(mapcount != mapcount2);
1309 }
1310
1311 int split_huge_page(struct page *page)
1312 {
1313         struct anon_vma *anon_vma;
1314         int ret = 1;
1315
1316         BUG_ON(!PageAnon(page));
1317         anon_vma = page_lock_anon_vma(page);
1318         if (!anon_vma)
1319                 goto out;
1320         ret = 0;
1321         if (!PageCompound(page))
1322                 goto out_unlock;
1323
1324         BUG_ON(!PageSwapBacked(page));
1325         __split_huge_page(page, anon_vma);
1326
1327         BUG_ON(PageCompound(page));
1328 out_unlock:
1329         page_unlock_anon_vma(anon_vma);
1330 out:
1331         return ret;
1332 }
1333
1334 int hugepage_madvise(unsigned long *vm_flags)
1335 {
1336         /*
1337          * Be somewhat over-protective like KSM for now!
1338          */
1339         if (*vm_flags & (VM_HUGEPAGE | VM_SHARED  | VM_MAYSHARE   |
1340                          VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
1341                          VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
1342                          VM_MIXEDMAP | VM_SAO))
1343                 return -EINVAL;
1344
1345         *vm_flags |= VM_HUGEPAGE;
1346
1347         return 0;
1348 }
1349
1350 static int __init khugepaged_slab_init(void)
1351 {
1352         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1353                                           sizeof(struct mm_slot),
1354                                           __alignof__(struct mm_slot), 0, NULL);
1355         if (!mm_slot_cache)
1356                 return -ENOMEM;
1357
1358         return 0;
1359 }
1360
1361 static void __init khugepaged_slab_free(void)
1362 {
1363         kmem_cache_destroy(mm_slot_cache);
1364         mm_slot_cache = NULL;
1365 }
1366
1367 static inline struct mm_slot *alloc_mm_slot(void)
1368 {
1369         if (!mm_slot_cache)     /* initialization failed */
1370                 return NULL;
1371         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1372 }
1373
1374 static inline void free_mm_slot(struct mm_slot *mm_slot)
1375 {
1376         kmem_cache_free(mm_slot_cache, mm_slot);
1377 }
1378
1379 static int __init mm_slots_hash_init(void)
1380 {
1381         mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1382                                 GFP_KERNEL);
1383         if (!mm_slots_hash)
1384                 return -ENOMEM;
1385         return 0;
1386 }
1387
1388 #if 0
1389 static void __init mm_slots_hash_free(void)
1390 {
1391         kfree(mm_slots_hash);
1392         mm_slots_hash = NULL;
1393 }
1394 #endif
1395
1396 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1397 {
1398         struct mm_slot *mm_slot;
1399         struct hlist_head *bucket;
1400         struct hlist_node *node;
1401
1402         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1403                                 % MM_SLOTS_HASH_HEADS];
1404         hlist_for_each_entry(mm_slot, node, bucket, hash) {
1405                 if (mm == mm_slot->mm)
1406                         return mm_slot;
1407         }
1408         return NULL;
1409 }
1410
1411 static void insert_to_mm_slots_hash(struct mm_struct *mm,
1412                                     struct mm_slot *mm_slot)
1413 {
1414         struct hlist_head *bucket;
1415
1416         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1417                                 % MM_SLOTS_HASH_HEADS];
1418         mm_slot->mm = mm;
1419         hlist_add_head(&mm_slot->hash, bucket);
1420 }
1421
1422 static inline int khugepaged_test_exit(struct mm_struct *mm)
1423 {
1424         return atomic_read(&mm->mm_users) == 0;
1425 }
1426
1427 int __khugepaged_enter(struct mm_struct *mm)
1428 {
1429         struct mm_slot *mm_slot;
1430         int wakeup;
1431
1432         mm_slot = alloc_mm_slot();
1433         if (!mm_slot)
1434                 return -ENOMEM;
1435
1436         /* __khugepaged_exit() must not run from under us */
1437         VM_BUG_ON(khugepaged_test_exit(mm));
1438         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1439                 free_mm_slot(mm_slot);
1440                 return 0;
1441         }
1442
1443         spin_lock(&khugepaged_mm_lock);
1444         insert_to_mm_slots_hash(mm, mm_slot);
1445         /*
1446          * Insert just behind the scanning cursor, to let the area settle
1447          * down a little.
1448          */
1449         wakeup = list_empty(&khugepaged_scan.mm_head);
1450         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1451         spin_unlock(&khugepaged_mm_lock);
1452
1453         atomic_inc(&mm->mm_count);
1454         if (wakeup)
1455                 wake_up_interruptible(&khugepaged_wait);
1456
1457         return 0;
1458 }
1459
1460 int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1461 {
1462         unsigned long hstart, hend;
1463         if (!vma->anon_vma)
1464                 /*
1465                  * Not yet faulted in so we will register later in the
1466                  * page fault if needed.
1467                  */
1468                 return 0;
1469         if (vma->vm_file || vma->vm_ops)
1470                 /* khugepaged not yet working on file or special mappings */
1471                 return 0;
1472         VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1473         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1474         hend = vma->vm_end & HPAGE_PMD_MASK;
1475         if (hstart < hend)
1476                 return khugepaged_enter(vma);
1477         return 0;
1478 }
1479
1480 void __khugepaged_exit(struct mm_struct *mm)
1481 {
1482         struct mm_slot *mm_slot;
1483         int free = 0;
1484
1485         spin_lock(&khugepaged_mm_lock);
1486         mm_slot = get_mm_slot(mm);
1487         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1488                 hlist_del(&mm_slot->hash);
1489                 list_del(&mm_slot->mm_node);
1490                 free = 1;
1491         }
1492
1493         if (free) {
1494                 spin_unlock(&khugepaged_mm_lock);
1495                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1496                 free_mm_slot(mm_slot);
1497                 mmdrop(mm);
1498         } else if (mm_slot) {
1499                 spin_unlock(&khugepaged_mm_lock);
1500                 /*
1501                  * This is required to serialize against
1502                  * khugepaged_test_exit() (which is guaranteed to run
1503                  * under mmap sem read mode). Stop here (after we
1504                  * return all pagetables will be destroyed) until
1505                  * khugepaged has finished working on the pagetables
1506                  * under the mmap_sem.
1507                  */
1508                 down_write(&mm->mmap_sem);
1509                 up_write(&mm->mmap_sem);
1510         } else
1511                 spin_unlock(&khugepaged_mm_lock);
1512 }
1513
1514 static void release_pte_page(struct page *page)
1515 {
1516         /* 0 stands for page_is_file_cache(page) == false */
1517         dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1518         unlock_page(page);
1519         putback_lru_page(page);
1520 }
1521
1522 static void release_pte_pages(pte_t *pte, pte_t *_pte)
1523 {
1524         while (--_pte >= pte) {
1525                 pte_t pteval = *_pte;
1526                 if (!pte_none(pteval))
1527                         release_pte_page(pte_page(pteval));
1528         }
1529 }
1530
1531 static void release_all_pte_pages(pte_t *pte)
1532 {
1533         release_pte_pages(pte, pte + HPAGE_PMD_NR);
1534 }
1535
1536 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1537                                         unsigned long address,
1538                                         pte_t *pte)
1539 {
1540         struct page *page;
1541         pte_t *_pte;
1542         int referenced = 0, isolated = 0, none = 0;
1543         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1544              _pte++, address += PAGE_SIZE) {
1545                 pte_t pteval = *_pte;
1546                 if (pte_none(pteval)) {
1547                         if (++none <= khugepaged_max_ptes_none)
1548                                 continue;
1549                         else {
1550                                 release_pte_pages(pte, _pte);
1551                                 goto out;
1552                         }
1553                 }
1554                 if (!pte_present(pteval) || !pte_write(pteval)) {
1555                         release_pte_pages(pte, _pte);
1556                         goto out;
1557                 }
1558                 page = vm_normal_page(vma, address, pteval);
1559                 if (unlikely(!page)) {
1560                         release_pte_pages(pte, _pte);
1561                         goto out;
1562                 }
1563                 VM_BUG_ON(PageCompound(page));
1564                 BUG_ON(!PageAnon(page));
1565                 VM_BUG_ON(!PageSwapBacked(page));
1566
1567                 /* cannot use mapcount: can't collapse if there's a gup pin */
1568                 if (page_count(page) != 1) {
1569                         release_pte_pages(pte, _pte);
1570                         goto out;
1571                 }
1572                 /*
1573                  * We can do it before isolate_lru_page because the
1574                  * page can't be freed from under us. NOTE: PG_lock
1575                  * is needed to serialize against split_huge_page
1576                  * when invoked from the VM.
1577                  */
1578                 if (!trylock_page(page)) {
1579                         release_pte_pages(pte, _pte);
1580                         goto out;
1581                 }
1582                 /*
1583                  * Isolate the page to avoid collapsing an hugepage
1584                  * currently in use by the VM.
1585                  */
1586                 if (isolate_lru_page(page)) {
1587                         unlock_page(page);
1588                         release_pte_pages(pte, _pte);
1589                         goto out;
1590                 }
1591                 /* 0 stands for page_is_file_cache(page) == false */
1592                 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1593                 VM_BUG_ON(!PageLocked(page));
1594                 VM_BUG_ON(PageLRU(page));
1595
1596                 /* If there is no mapped pte young don't collapse the page */
1597                 if (pte_young(pteval))
1598                         referenced = 1;
1599         }
1600         if (unlikely(!referenced))
1601                 release_all_pte_pages(pte);
1602         else
1603                 isolated = 1;
1604 out:
1605         return isolated;
1606 }
1607
1608 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1609                                       struct vm_area_struct *vma,
1610                                       unsigned long address,
1611                                       spinlock_t *ptl)
1612 {
1613         pte_t *_pte;
1614         for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1615                 pte_t pteval = *_pte;
1616                 struct page *src_page;
1617
1618                 if (pte_none(pteval)) {
1619                         clear_user_highpage(page, address);
1620                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1621                 } else {
1622                         src_page = pte_page(pteval);
1623                         copy_user_highpage(page, src_page, address, vma);
1624                         VM_BUG_ON(page_mapcount(src_page) != 1);
1625                         VM_BUG_ON(page_count(src_page) != 2);
1626                         release_pte_page(src_page);
1627                         /*
1628                          * ptl mostly unnecessary, but preempt has to
1629                          * be disabled to update the per-cpu stats
1630                          * inside page_remove_rmap().
1631                          */
1632                         spin_lock(ptl);
1633                         /*
1634                          * paravirt calls inside pte_clear here are
1635                          * superfluous.
1636                          */
1637                         pte_clear(vma->vm_mm, address, _pte);
1638                         page_remove_rmap(src_page);
1639                         spin_unlock(ptl);
1640                         free_page_and_swap_cache(src_page);
1641                 }
1642
1643                 address += PAGE_SIZE;
1644                 page++;
1645         }
1646 }
1647
1648 static void collapse_huge_page(struct mm_struct *mm,
1649                                unsigned long address,
1650                                struct page **hpage)
1651 {
1652         struct vm_area_struct *vma;
1653         pgd_t *pgd;
1654         pud_t *pud;
1655         pmd_t *pmd, _pmd;
1656         pte_t *pte;
1657         pgtable_t pgtable;
1658         struct page *new_page;
1659         spinlock_t *ptl;
1660         int isolated;
1661         unsigned long hstart, hend;
1662
1663         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1664         VM_BUG_ON(!*hpage);
1665
1666         /*
1667          * Prevent all access to pagetables with the exception of
1668          * gup_fast later hanlded by the ptep_clear_flush and the VM
1669          * handled by the anon_vma lock + PG_lock.
1670          */
1671         down_write(&mm->mmap_sem);
1672         if (unlikely(khugepaged_test_exit(mm)))
1673                 goto out;
1674
1675         vma = find_vma(mm, address);
1676         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1677         hend = vma->vm_end & HPAGE_PMD_MASK;
1678         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1679                 goto out;
1680
1681         if (!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always())
1682                 goto out;
1683
1684         /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1685         if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
1686                 goto out;
1687         VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1688
1689         pgd = pgd_offset(mm, address);
1690         if (!pgd_present(*pgd))
1691                 goto out;
1692
1693         pud = pud_offset(pgd, address);
1694         if (!pud_present(*pud))
1695                 goto out;
1696
1697         pmd = pmd_offset(pud, address);
1698         /* pmd can't go away or become huge under us */
1699         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1700                 goto out;
1701
1702         new_page = *hpage;
1703         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
1704                 goto out;
1705
1706         anon_vma_lock(vma->anon_vma);
1707
1708         pte = pte_offset_map(pmd, address);
1709         ptl = pte_lockptr(mm, pmd);
1710
1711         spin_lock(&mm->page_table_lock); /* probably unnecessary */
1712         /*
1713          * After this gup_fast can't run anymore. This also removes
1714          * any huge TLB entry from the CPU so we won't allow
1715          * huge and small TLB entries for the same virtual address
1716          * to avoid the risk of CPU bugs in that area.
1717          */
1718         _pmd = pmdp_clear_flush_notify(vma, address, pmd);
1719         spin_unlock(&mm->page_table_lock);
1720
1721         spin_lock(ptl);
1722         isolated = __collapse_huge_page_isolate(vma, address, pte);
1723         spin_unlock(ptl);
1724         pte_unmap(pte);
1725
1726         if (unlikely(!isolated)) {
1727                 spin_lock(&mm->page_table_lock);
1728                 BUG_ON(!pmd_none(*pmd));
1729                 set_pmd_at(mm, address, pmd, _pmd);
1730                 spin_unlock(&mm->page_table_lock);
1731                 anon_vma_unlock(vma->anon_vma);
1732                 mem_cgroup_uncharge_page(new_page);
1733                 goto out;
1734         }
1735
1736         /*
1737          * All pages are isolated and locked so anon_vma rmap
1738          * can't run anymore.
1739          */
1740         anon_vma_unlock(vma->anon_vma);
1741
1742         __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
1743         __SetPageUptodate(new_page);
1744         pgtable = pmd_pgtable(_pmd);
1745         VM_BUG_ON(page_count(pgtable) != 1);
1746         VM_BUG_ON(page_mapcount(pgtable) != 0);
1747
1748         _pmd = mk_pmd(new_page, vma->vm_page_prot);
1749         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1750         _pmd = pmd_mkhuge(_pmd);
1751
1752         /*
1753          * spin_lock() below is not the equivalent of smp_wmb(), so
1754          * this is needed to avoid the copy_huge_page writes to become
1755          * visible after the set_pmd_at() write.
1756          */
1757         smp_wmb();
1758
1759         spin_lock(&mm->page_table_lock);
1760         BUG_ON(!pmd_none(*pmd));
1761         page_add_new_anon_rmap(new_page, vma, address);
1762         set_pmd_at(mm, address, pmd, _pmd);
1763         update_mmu_cache(vma, address, entry);
1764         prepare_pmd_huge_pte(pgtable, mm);
1765         mm->nr_ptes--;
1766         spin_unlock(&mm->page_table_lock);
1767
1768         *hpage = NULL;
1769         khugepaged_pages_collapsed++;
1770 out:
1771         up_write(&mm->mmap_sem);
1772 }
1773
1774 static int khugepaged_scan_pmd(struct mm_struct *mm,
1775                                struct vm_area_struct *vma,
1776                                unsigned long address,
1777                                struct page **hpage)
1778 {
1779         pgd_t *pgd;
1780         pud_t *pud;
1781         pmd_t *pmd;
1782         pte_t *pte, *_pte;
1783         int ret = 0, referenced = 0, none = 0;
1784         struct page *page;
1785         unsigned long _address;
1786         spinlock_t *ptl;
1787
1788         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1789
1790         pgd = pgd_offset(mm, address);
1791         if (!pgd_present(*pgd))
1792                 goto out;
1793
1794         pud = pud_offset(pgd, address);
1795         if (!pud_present(*pud))
1796                 goto out;
1797
1798         pmd = pmd_offset(pud, address);
1799         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1800                 goto out;
1801
1802         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1803         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1804              _pte++, _address += PAGE_SIZE) {
1805                 pte_t pteval = *_pte;
1806                 if (pte_none(pteval)) {
1807                         if (++none <= khugepaged_max_ptes_none)
1808                                 continue;
1809                         else
1810                                 goto out_unmap;
1811                 }
1812                 if (!pte_present(pteval) || !pte_write(pteval))
1813                         goto out_unmap;
1814                 page = vm_normal_page(vma, _address, pteval);
1815                 if (unlikely(!page))
1816                         goto out_unmap;
1817                 VM_BUG_ON(PageCompound(page));
1818                 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
1819                         goto out_unmap;
1820                 /* cannot use mapcount: can't collapse if there's a gup pin */
1821                 if (page_count(page) != 1)
1822                         goto out_unmap;
1823                 if (pte_young(pteval))
1824                         referenced = 1;
1825         }
1826         if (referenced)
1827                 ret = 1;
1828 out_unmap:
1829         pte_unmap_unlock(pte, ptl);
1830         if (ret) {
1831                 up_read(&mm->mmap_sem);
1832                 collapse_huge_page(mm, address, hpage);
1833         }
1834 out:
1835         return ret;
1836 }
1837
1838 static void collect_mm_slot(struct mm_slot *mm_slot)
1839 {
1840         struct mm_struct *mm = mm_slot->mm;
1841
1842         VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
1843
1844         if (khugepaged_test_exit(mm)) {
1845                 /* free mm_slot */
1846                 hlist_del(&mm_slot->hash);
1847                 list_del(&mm_slot->mm_node);
1848
1849                 /*
1850                  * Not strictly needed because the mm exited already.
1851                  *
1852                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1853                  */
1854
1855                 /* khugepaged_mm_lock actually not necessary for the below */
1856                 free_mm_slot(mm_slot);
1857                 mmdrop(mm);
1858         }
1859 }
1860
1861 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1862                                             struct page **hpage)
1863 {
1864         struct mm_slot *mm_slot;
1865         struct mm_struct *mm;
1866         struct vm_area_struct *vma;
1867         int progress = 0;
1868
1869         VM_BUG_ON(!pages);
1870         VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
1871
1872         if (khugepaged_scan.mm_slot)
1873                 mm_slot = khugepaged_scan.mm_slot;
1874         else {
1875                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1876                                      struct mm_slot, mm_node);
1877                 khugepaged_scan.address = 0;
1878                 khugepaged_scan.mm_slot = mm_slot;
1879         }
1880         spin_unlock(&khugepaged_mm_lock);
1881
1882         mm = mm_slot->mm;
1883         down_read(&mm->mmap_sem);
1884         if (unlikely(khugepaged_test_exit(mm)))
1885                 vma = NULL;
1886         else
1887                 vma = find_vma(mm, khugepaged_scan.address);
1888
1889         progress++;
1890         for (; vma; vma = vma->vm_next) {
1891                 unsigned long hstart, hend;
1892
1893                 cond_resched();
1894                 if (unlikely(khugepaged_test_exit(mm))) {
1895                         progress++;
1896                         break;
1897                 }
1898
1899                 if (!(vma->vm_flags & VM_HUGEPAGE) &&
1900                     !khugepaged_always()) {
1901                         progress++;
1902                         continue;
1903                 }
1904
1905                 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1906                 if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
1907                         khugepaged_scan.address = vma->vm_end;
1908                         progress++;
1909                         continue;
1910                 }
1911                 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1912
1913                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1914                 hend = vma->vm_end & HPAGE_PMD_MASK;
1915                 if (hstart >= hend) {
1916                         progress++;
1917                         continue;
1918                 }
1919                 if (khugepaged_scan.address < hstart)
1920                         khugepaged_scan.address = hstart;
1921                 if (khugepaged_scan.address > hend) {
1922                         khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
1923                         progress++;
1924                         continue;
1925                 }
1926                 BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1927
1928                 while (khugepaged_scan.address < hend) {
1929                         int ret;
1930                         cond_resched();
1931                         if (unlikely(khugepaged_test_exit(mm)))
1932                                 goto breakouterloop;
1933
1934                         VM_BUG_ON(khugepaged_scan.address < hstart ||
1935                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
1936                                   hend);
1937                         ret = khugepaged_scan_pmd(mm, vma,
1938                                                   khugepaged_scan.address,
1939                                                   hpage);
1940                         /* move to next address */
1941                         khugepaged_scan.address += HPAGE_PMD_SIZE;
1942                         progress += HPAGE_PMD_NR;
1943                         if (ret)
1944                                 /* we released mmap_sem so break loop */
1945                                 goto breakouterloop_mmap_sem;
1946                         if (progress >= pages)
1947                                 goto breakouterloop;
1948                 }
1949         }
1950 breakouterloop:
1951         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1952 breakouterloop_mmap_sem:
1953
1954         spin_lock(&khugepaged_mm_lock);
1955         BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1956         /*
1957          * Release the current mm_slot if this mm is about to die, or
1958          * if we scanned all vmas of this mm.
1959          */
1960         if (khugepaged_test_exit(mm) || !vma) {
1961                 /*
1962                  * Make sure that if mm_users is reaching zero while
1963                  * khugepaged runs here, khugepaged_exit will find
1964                  * mm_slot not pointing to the exiting mm.
1965                  */
1966                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1967                         khugepaged_scan.mm_slot = list_entry(
1968                                 mm_slot->mm_node.next,
1969                                 struct mm_slot, mm_node);
1970                         khugepaged_scan.address = 0;
1971                 } else {
1972                         khugepaged_scan.mm_slot = NULL;
1973                         khugepaged_full_scans++;
1974                 }
1975
1976                 collect_mm_slot(mm_slot);
1977         }
1978
1979         return progress;
1980 }
1981
1982 static int khugepaged_has_work(void)
1983 {
1984         return !list_empty(&khugepaged_scan.mm_head) &&
1985                 khugepaged_enabled();
1986 }
1987
1988 static int khugepaged_wait_event(void)
1989 {
1990         return !list_empty(&khugepaged_scan.mm_head) ||
1991                 !khugepaged_enabled();
1992 }
1993
1994 static void khugepaged_do_scan(struct page **hpage)
1995 {
1996         unsigned int progress = 0, pass_through_head = 0;
1997         unsigned int pages = khugepaged_pages_to_scan;
1998
1999         barrier(); /* write khugepaged_pages_to_scan to local stack */
2000
2001         while (progress < pages) {
2002                 cond_resched();
2003
2004                 if (!*hpage) {
2005                         *hpage = alloc_hugepage(khugepaged_defrag());
2006                         if (unlikely(!*hpage))
2007                                 break;
2008                 }
2009
2010                 spin_lock(&khugepaged_mm_lock);
2011                 if (!khugepaged_scan.mm_slot)
2012                         pass_through_head++;
2013                 if (khugepaged_has_work() &&
2014                     pass_through_head < 2)
2015                         progress += khugepaged_scan_mm_slot(pages - progress,
2016                                                             hpage);
2017                 else
2018                         progress = pages;
2019                 spin_unlock(&khugepaged_mm_lock);
2020         }
2021 }
2022
2023 static struct page *khugepaged_alloc_hugepage(void)
2024 {
2025         struct page *hpage;
2026
2027         do {
2028                 hpage = alloc_hugepage(khugepaged_defrag());
2029                 if (!hpage) {
2030                         DEFINE_WAIT(wait);
2031                         add_wait_queue(&khugepaged_wait, &wait);
2032                         schedule_timeout_interruptible(
2033                                 msecs_to_jiffies(
2034                                         khugepaged_alloc_sleep_millisecs));
2035                         remove_wait_queue(&khugepaged_wait, &wait);
2036                 }
2037         } while (unlikely(!hpage) &&
2038                  likely(khugepaged_enabled()));
2039         return hpage;
2040 }
2041
2042 static void khugepaged_loop(void)
2043 {
2044         struct page *hpage;
2045
2046         while (likely(khugepaged_enabled())) {
2047                 hpage = khugepaged_alloc_hugepage();
2048                 if (unlikely(!hpage))
2049                         break;
2050
2051                 khugepaged_do_scan(&hpage);
2052                 if (hpage)
2053                         put_page(hpage);
2054                 if (khugepaged_has_work()) {
2055                         DEFINE_WAIT(wait);
2056                         if (!khugepaged_scan_sleep_millisecs)
2057                                 continue;
2058                         add_wait_queue(&khugepaged_wait, &wait);
2059                         schedule_timeout_interruptible(
2060                                 msecs_to_jiffies(
2061                                         khugepaged_scan_sleep_millisecs));
2062                         remove_wait_queue(&khugepaged_wait, &wait);
2063                 } else if (khugepaged_enabled())
2064                         wait_event_interruptible(khugepaged_wait,
2065                                                  khugepaged_wait_event());
2066         }
2067 }
2068
2069 static int khugepaged(void *none)
2070 {
2071         struct mm_slot *mm_slot;
2072
2073         set_user_nice(current, 19);
2074
2075         /* serialize with start_khugepaged() */
2076         mutex_lock(&khugepaged_mutex);
2077
2078         for (;;) {
2079                 mutex_unlock(&khugepaged_mutex);
2080                 BUG_ON(khugepaged_thread != current);
2081                 khugepaged_loop();
2082                 BUG_ON(khugepaged_thread != current);
2083
2084                 mutex_lock(&khugepaged_mutex);
2085                 if (!khugepaged_enabled())
2086                         break;
2087         }
2088
2089         spin_lock(&khugepaged_mm_lock);
2090         mm_slot = khugepaged_scan.mm_slot;
2091         khugepaged_scan.mm_slot = NULL;
2092         if (mm_slot)
2093                 collect_mm_slot(mm_slot);
2094         spin_unlock(&khugepaged_mm_lock);
2095
2096         khugepaged_thread = NULL;
2097         mutex_unlock(&khugepaged_mutex);
2098
2099         return 0;
2100 }
2101
2102 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2103 {
2104         struct page *page;
2105
2106         spin_lock(&mm->page_table_lock);
2107         if (unlikely(!pmd_trans_huge(*pmd))) {
2108                 spin_unlock(&mm->page_table_lock);
2109                 return;
2110         }
2111         page = pmd_page(*pmd);
2112         VM_BUG_ON(!page_count(page));
2113         get_page(page);
2114         spin_unlock(&mm->page_table_lock);
2115
2116         split_huge_page(page);
2117
2118         put_page(page);
2119         BUG_ON(pmd_trans_huge(*pmd));
2120 }