memcg: move charges to root cgroup if use_hierarchy=0
[pandora-kernel.git] / mm / swap.c
1 /*
2  *  linux/mm/swap.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * This file contains the default values for the operation of the
9  * Linux VM subsystem. Fine-tuning documentation can be found in
10  * Documentation/sysctl/vm.txt.
11  * Started 18.12.91
12  * Swap aging added 23.2.95, Stephen Tweedie.
13  * Buffermem limits added 12.3.98, Rik van Riel.
14  */
15
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/swap.h>
20 #include <linux/mman.h>
21 #include <linux/pagemap.h>
22 #include <linux/pagevec.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/mm_inline.h>
26 #include <linux/percpu_counter.h>
27 #include <linux/percpu.h>
28 #include <linux/cpu.h>
29 #include <linux/notifier.h>
30 #include <linux/backing-dev.h>
31 #include <linux/memcontrol.h>
32 #include <linux/gfp.h>
33
34 #include "internal.h"
35
36 /* How many pages do we try to swap or page in/out together? */
37 int page_cluster;
38
39 static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
40 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
41 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
42
43 /*
44  * This path almost never happens for VM activity - pages are normally
45  * freed via pagevecs.  But it gets used by networking.
46  */
47 static void __page_cache_release(struct page *page)
48 {
49         if (PageLRU(page)) {
50                 unsigned long flags;
51                 struct zone *zone = page_zone(page);
52
53                 spin_lock_irqsave(&zone->lru_lock, flags);
54                 VM_BUG_ON(!PageLRU(page));
55                 __ClearPageLRU(page);
56                 del_page_from_lru_list(zone, page, page_off_lru(page));
57                 spin_unlock_irqrestore(&zone->lru_lock, flags);
58         }
59 }
60
61 static void __put_single_page(struct page *page)
62 {
63         __page_cache_release(page);
64         free_hot_cold_page(page, 0);
65 }
66
67 static void __put_compound_page(struct page *page)
68 {
69         compound_page_dtor *dtor;
70
71         __page_cache_release(page);
72         dtor = get_compound_page_dtor(page);
73         (*dtor)(page);
74 }
75
76 static void put_compound_page(struct page *page)
77 {
78         if (unlikely(PageTail(page))) {
79                 /* __split_huge_page_refcount can run under us */
80                 struct page *page_head = compound_trans_head(page);
81
82                 if (likely(page != page_head &&
83                            get_page_unless_zero(page_head))) {
84                         unsigned long flags;
85
86                         /*
87                          * THP can not break up slab pages so avoid taking
88                          * compound_lock().  Slab performs non-atomic bit ops
89                          * on page->flags for better performance.  In particular
90                          * slab_unlock() in slub used to be a hot path.  It is
91                          * still hot on arches that do not support
92                          * this_cpu_cmpxchg_double().
93                          */
94                         if (PageSlab(page_head)) {
95                                 if (PageTail(page)) {
96                                         if (put_page_testzero(page_head))
97                                                 VM_BUG_ON(1);
98
99                                         atomic_dec(&page->_mapcount);
100                                         goto skip_lock_tail;
101                                 } else
102                                         goto skip_lock;
103                         }
104                         /*
105                          * page_head wasn't a dangling pointer but it
106                          * may not be a head page anymore by the time
107                          * we obtain the lock. That is ok as long as it
108                          * can't be freed from under us.
109                          */
110                         flags = compound_lock_irqsave(page_head);
111                         if (unlikely(!PageTail(page))) {
112                                 /* __split_huge_page_refcount run before us */
113                                 compound_unlock_irqrestore(page_head, flags);
114 skip_lock:
115                                 if (put_page_testzero(page_head))
116                                         __put_single_page(page_head);
117 out_put_single:
118                                 if (put_page_testzero(page))
119                                         __put_single_page(page);
120                                 return;
121                         }
122                         VM_BUG_ON(page_head != page->first_page);
123                         /*
124                          * We can release the refcount taken by
125                          * get_page_unless_zero() now that
126                          * __split_huge_page_refcount() is blocked on
127                          * the compound_lock.
128                          */
129                         if (put_page_testzero(page_head))
130                                 VM_BUG_ON(1);
131                         /* __split_huge_page_refcount will wait now */
132                         VM_BUG_ON(page_mapcount(page) <= 0);
133                         atomic_dec(&page->_mapcount);
134                         VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
135                         VM_BUG_ON(atomic_read(&page->_count) != 0);
136                         compound_unlock_irqrestore(page_head, flags);
137
138 skip_lock_tail:
139                         if (put_page_testzero(page_head)) {
140                                 if (PageHead(page_head))
141                                         __put_compound_page(page_head);
142                                 else
143                                         __put_single_page(page_head);
144                         }
145                 } else {
146                         /* page_head is a dangling pointer */
147                         VM_BUG_ON(PageTail(page));
148                         goto out_put_single;
149                 }
150         } else if (put_page_testzero(page)) {
151                 if (PageHead(page))
152                         __put_compound_page(page);
153                 else
154                         __put_single_page(page);
155         }
156 }
157
158 void put_page(struct page *page)
159 {
160         if (unlikely(PageCompound(page)))
161                 put_compound_page(page);
162         else if (put_page_testzero(page))
163                 __put_single_page(page);
164 }
165 EXPORT_SYMBOL(put_page);
166
167 /*
168  * This function is exported but must not be called by anything other
169  * than get_page(). It implements the slow path of get_page().
170  */
171 bool __get_page_tail(struct page *page)
172 {
173         /*
174          * This takes care of get_page() if run on a tail page
175          * returned by one of the get_user_pages/follow_page variants.
176          * get_user_pages/follow_page itself doesn't need the compound
177          * lock because it runs __get_page_tail_foll() under the
178          * proper PT lock that already serializes against
179          * split_huge_page().
180          */
181         unsigned long flags;
182         bool got = false;
183         struct page *page_head = compound_trans_head(page);
184
185         if (likely(page != page_head && get_page_unless_zero(page_head))) {
186
187                 /* Ref to put_compound_page() comment. */
188                 if (PageSlab(page_head)) {
189                         if (likely(PageTail(page))) {
190                                 __get_page_tail_foll(page, false);
191                                 return true;
192                         } else {
193                                 put_page(page_head);
194                                 return false;
195                         }
196                 }
197
198                 /*
199                  * page_head wasn't a dangling pointer but it
200                  * may not be a head page anymore by the time
201                  * we obtain the lock. That is ok as long as it
202                  * can't be freed from under us.
203                  */
204                 flags = compound_lock_irqsave(page_head);
205                 /* here __split_huge_page_refcount won't run anymore */
206                 if (likely(PageTail(page))) {
207                         __get_page_tail_foll(page, false);
208                         got = true;
209                 }
210                 compound_unlock_irqrestore(page_head, flags);
211                 if (unlikely(!got))
212                         put_page(page_head);
213         }
214         return got;
215 }
216 EXPORT_SYMBOL(__get_page_tail);
217
218 /**
219  * put_pages_list() - release a list of pages
220  * @pages: list of pages threaded on page->lru
221  *
222  * Release a list of pages which are strung together on page.lru.  Currently
223  * used by read_cache_pages() and related error recovery code.
224  */
225 void put_pages_list(struct list_head *pages)
226 {
227         while (!list_empty(pages)) {
228                 struct page *victim;
229
230                 victim = list_entry(pages->prev, struct page, lru);
231                 list_del(&victim->lru);
232                 page_cache_release(victim);
233         }
234 }
235 EXPORT_SYMBOL(put_pages_list);
236
237 static void pagevec_lru_move_fn(struct pagevec *pvec,
238                                 void (*move_fn)(struct page *page, void *arg),
239                                 void *arg)
240 {
241         int i;
242         struct zone *zone = NULL;
243         unsigned long flags = 0;
244
245         for (i = 0; i < pagevec_count(pvec); i++) {
246                 struct page *page = pvec->pages[i];
247                 struct zone *pagezone = page_zone(page);
248
249                 if (pagezone != zone) {
250                         if (zone)
251                                 spin_unlock_irqrestore(&zone->lru_lock, flags);
252                         zone = pagezone;
253                         spin_lock_irqsave(&zone->lru_lock, flags);
254                 }
255
256                 (*move_fn)(page, arg);
257         }
258         if (zone)
259                 spin_unlock_irqrestore(&zone->lru_lock, flags);
260         release_pages(pvec->pages, pvec->nr, pvec->cold);
261         pagevec_reinit(pvec);
262 }
263
264 static void pagevec_move_tail_fn(struct page *page, void *arg)
265 {
266         int *pgmoved = arg;
267
268         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
269                 enum lru_list lru = page_lru_base_type(page);
270                 struct lruvec *lruvec;
271
272                 lruvec = mem_cgroup_lru_move_lists(page_zone(page),
273                                                    page, lru, lru);
274                 list_move_tail(&page->lru, &lruvec->lists[lru]);
275                 (*pgmoved)++;
276         }
277 }
278
279 /*
280  * pagevec_move_tail() must be called with IRQ disabled.
281  * Otherwise this may cause nasty races.
282  */
283 static void pagevec_move_tail(struct pagevec *pvec)
284 {
285         int pgmoved = 0;
286
287         pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
288         __count_vm_events(PGROTATED, pgmoved);
289 }
290
291 /*
292  * Writeback is about to end against a page which has been marked for immediate
293  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
294  * inactive list.
295  */
296 void rotate_reclaimable_page(struct page *page)
297 {
298         if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
299             !PageUnevictable(page) && PageLRU(page)) {
300                 struct pagevec *pvec;
301                 unsigned long flags;
302
303                 page_cache_get(page);
304                 local_irq_save(flags);
305                 pvec = &__get_cpu_var(lru_rotate_pvecs);
306                 if (!pagevec_add(pvec, page))
307                         pagevec_move_tail(pvec);
308                 local_irq_restore(flags);
309         }
310 }
311
312 static void update_page_reclaim_stat(struct zone *zone, struct page *page,
313                                      int file, int rotated)
314 {
315         struct zone_reclaim_stat *reclaim_stat;
316
317         reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
318         if (!reclaim_stat)
319                 reclaim_stat = &zone->lruvec.reclaim_stat;
320
321         reclaim_stat->recent_scanned[file]++;
322         if (rotated)
323                 reclaim_stat->recent_rotated[file]++;
324 }
325
326 static void __activate_page(struct page *page, void *arg)
327 {
328         struct zone *zone = page_zone(page);
329
330         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
331                 int file = page_is_file_cache(page);
332                 int lru = page_lru_base_type(page);
333                 del_page_from_lru_list(zone, page, lru);
334
335                 SetPageActive(page);
336                 lru += LRU_ACTIVE;
337                 add_page_to_lru_list(zone, page, lru);
338                 __count_vm_event(PGACTIVATE);
339
340                 update_page_reclaim_stat(zone, page, file, 1);
341         }
342 }
343
344 #ifdef CONFIG_SMP
345 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
346
347 static void activate_page_drain(int cpu)
348 {
349         struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
350
351         if (pagevec_count(pvec))
352                 pagevec_lru_move_fn(pvec, __activate_page, NULL);
353 }
354
355 void activate_page(struct page *page)
356 {
357         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
358                 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
359
360                 page_cache_get(page);
361                 if (!pagevec_add(pvec, page))
362                         pagevec_lru_move_fn(pvec, __activate_page, NULL);
363                 put_cpu_var(activate_page_pvecs);
364         }
365 }
366
367 #else
368 static inline void activate_page_drain(int cpu)
369 {
370 }
371
372 void activate_page(struct page *page)
373 {
374         struct zone *zone = page_zone(page);
375
376         spin_lock_irq(&zone->lru_lock);
377         __activate_page(page, NULL);
378         spin_unlock_irq(&zone->lru_lock);
379 }
380 #endif
381
382 /*
383  * Mark a page as having seen activity.
384  *
385  * inactive,unreferenced        ->      inactive,referenced
386  * inactive,referenced          ->      active,unreferenced
387  * active,unreferenced          ->      active,referenced
388  */
389 void mark_page_accessed(struct page *page)
390 {
391         if (!PageActive(page) && !PageUnevictable(page) &&
392                         PageReferenced(page) && PageLRU(page)) {
393                 activate_page(page);
394                 ClearPageReferenced(page);
395         } else if (!PageReferenced(page)) {
396                 SetPageReferenced(page);
397         }
398 }
399 EXPORT_SYMBOL(mark_page_accessed);
400
401 void __lru_cache_add(struct page *page, enum lru_list lru)
402 {
403         struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
404
405         page_cache_get(page);
406         if (!pagevec_add(pvec, page))
407                 __pagevec_lru_add(pvec, lru);
408         put_cpu_var(lru_add_pvecs);
409 }
410 EXPORT_SYMBOL(__lru_cache_add);
411
412 /**
413  * lru_cache_add_lru - add a page to a page list
414  * @page: the page to be added to the LRU.
415  * @lru: the LRU list to which the page is added.
416  */
417 void lru_cache_add_lru(struct page *page, enum lru_list lru)
418 {
419         if (PageActive(page)) {
420                 VM_BUG_ON(PageUnevictable(page));
421                 ClearPageActive(page);
422         } else if (PageUnevictable(page)) {
423                 VM_BUG_ON(PageActive(page));
424                 ClearPageUnevictable(page);
425         }
426
427         VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
428         __lru_cache_add(page, lru);
429 }
430
431 /**
432  * add_page_to_unevictable_list - add a page to the unevictable list
433  * @page:  the page to be added to the unevictable list
434  *
435  * Add page directly to its zone's unevictable list.  To avoid races with
436  * tasks that might be making the page evictable, through eg. munlock,
437  * munmap or exit, while it's not on the lru, we want to add the page
438  * while it's locked or otherwise "invisible" to other tasks.  This is
439  * difficult to do when using the pagevec cache, so bypass that.
440  */
441 void add_page_to_unevictable_list(struct page *page)
442 {
443         struct zone *zone = page_zone(page);
444
445         spin_lock_irq(&zone->lru_lock);
446         SetPageUnevictable(page);
447         SetPageLRU(page);
448         add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
449         spin_unlock_irq(&zone->lru_lock);
450 }
451
452 /*
453  * If the page can not be invalidated, it is moved to the
454  * inactive list to speed up its reclaim.  It is moved to the
455  * head of the list, rather than the tail, to give the flusher
456  * threads some time to write it out, as this is much more
457  * effective than the single-page writeout from reclaim.
458  *
459  * If the page isn't page_mapped and dirty/writeback, the page
460  * could reclaim asap using PG_reclaim.
461  *
462  * 1. active, mapped page -> none
463  * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
464  * 3. inactive, mapped page -> none
465  * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
466  * 5. inactive, clean -> inactive, tail
467  * 6. Others -> none
468  *
469  * In 4, why it moves inactive's head, the VM expects the page would
470  * be write it out by flusher threads as this is much more effective
471  * than the single-page writeout from reclaim.
472  */
473 static void lru_deactivate_fn(struct page *page, void *arg)
474 {
475         int lru, file;
476         bool active;
477         struct zone *zone = page_zone(page);
478
479         if (!PageLRU(page))
480                 return;
481
482         if (PageUnevictable(page))
483                 return;
484
485         /* Some processes are using the page */
486         if (page_mapped(page))
487                 return;
488
489         active = PageActive(page);
490
491         file = page_is_file_cache(page);
492         lru = page_lru_base_type(page);
493         del_page_from_lru_list(zone, page, lru + active);
494         ClearPageActive(page);
495         ClearPageReferenced(page);
496         add_page_to_lru_list(zone, page, lru);
497
498         if (PageWriteback(page) || PageDirty(page)) {
499                 /*
500                  * PG_reclaim could be raced with end_page_writeback
501                  * It can make readahead confusing.  But race window
502                  * is _really_ small and  it's non-critical problem.
503                  */
504                 SetPageReclaim(page);
505         } else {
506                 struct lruvec *lruvec;
507                 /*
508                  * The page's writeback ends up during pagevec
509                  * We moves tha page into tail of inactive.
510                  */
511                 lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
512                 list_move_tail(&page->lru, &lruvec->lists[lru]);
513                 __count_vm_event(PGROTATED);
514         }
515
516         if (active)
517                 __count_vm_event(PGDEACTIVATE);
518         update_page_reclaim_stat(zone, page, file, 0);
519 }
520
521 /*
522  * Drain pages out of the cpu's pagevecs.
523  * Either "cpu" is the current CPU, and preemption has already been
524  * disabled; or "cpu" is being hot-unplugged, and is already dead.
525  */
526 void lru_add_drain_cpu(int cpu)
527 {
528         struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
529         struct pagevec *pvec;
530         int lru;
531
532         for_each_lru(lru) {
533                 pvec = &pvecs[lru - LRU_BASE];
534                 if (pagevec_count(pvec))
535                         __pagevec_lru_add(pvec, lru);
536         }
537
538         pvec = &per_cpu(lru_rotate_pvecs, cpu);
539         if (pagevec_count(pvec)) {
540                 unsigned long flags;
541
542                 /* No harm done if a racing interrupt already did this */
543                 local_irq_save(flags);
544                 pagevec_move_tail(pvec);
545                 local_irq_restore(flags);
546         }
547
548         pvec = &per_cpu(lru_deactivate_pvecs, cpu);
549         if (pagevec_count(pvec))
550                 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
551
552         activate_page_drain(cpu);
553 }
554
555 /**
556  * deactivate_page - forcefully deactivate a page
557  * @page: page to deactivate
558  *
559  * This function hints the VM that @page is a good reclaim candidate,
560  * for example if its invalidation fails due to the page being dirty
561  * or under writeback.
562  */
563 void deactivate_page(struct page *page)
564 {
565         /*
566          * In a workload with many unevictable page such as mprotect, unevictable
567          * page deactivation for accelerating reclaim is pointless.
568          */
569         if (PageUnevictable(page))
570                 return;
571
572         if (likely(get_page_unless_zero(page))) {
573                 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
574
575                 if (!pagevec_add(pvec, page))
576                         pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
577                 put_cpu_var(lru_deactivate_pvecs);
578         }
579 }
580
581 void lru_add_drain(void)
582 {
583         lru_add_drain_cpu(get_cpu());
584         put_cpu();
585 }
586
587 static void lru_add_drain_per_cpu(struct work_struct *dummy)
588 {
589         lru_add_drain();
590 }
591
592 /*
593  * Returns 0 for success
594  */
595 int lru_add_drain_all(void)
596 {
597         return schedule_on_each_cpu(lru_add_drain_per_cpu);
598 }
599
600 /*
601  * Batched page_cache_release().  Decrement the reference count on all the
602  * passed pages.  If it fell to zero then remove the page from the LRU and
603  * free it.
604  *
605  * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
606  * for the remainder of the operation.
607  *
608  * The locking in this function is against shrink_inactive_list(): we recheck
609  * the page count inside the lock to see whether shrink_inactive_list()
610  * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
611  * will free it.
612  */
613 void release_pages(struct page **pages, int nr, int cold)
614 {
615         int i;
616         LIST_HEAD(pages_to_free);
617         struct zone *zone = NULL;
618         unsigned long uninitialized_var(flags);
619
620         for (i = 0; i < nr; i++) {
621                 struct page *page = pages[i];
622
623                 if (unlikely(PageCompound(page))) {
624                         if (zone) {
625                                 spin_unlock_irqrestore(&zone->lru_lock, flags);
626                                 zone = NULL;
627                         }
628                         put_compound_page(page);
629                         continue;
630                 }
631
632                 if (!put_page_testzero(page))
633                         continue;
634
635                 if (PageLRU(page)) {
636                         struct zone *pagezone = page_zone(page);
637
638                         if (pagezone != zone) {
639                                 if (zone)
640                                         spin_unlock_irqrestore(&zone->lru_lock,
641                                                                         flags);
642                                 zone = pagezone;
643                                 spin_lock_irqsave(&zone->lru_lock, flags);
644                         }
645                         VM_BUG_ON(!PageLRU(page));
646                         __ClearPageLRU(page);
647                         del_page_from_lru_list(zone, page, page_off_lru(page));
648                 }
649
650                 list_add(&page->lru, &pages_to_free);
651         }
652         if (zone)
653                 spin_unlock_irqrestore(&zone->lru_lock, flags);
654
655         free_hot_cold_page_list(&pages_to_free, cold);
656 }
657 EXPORT_SYMBOL(release_pages);
658
659 /*
660  * The pages which we're about to release may be in the deferred lru-addition
661  * queues.  That would prevent them from really being freed right now.  That's
662  * OK from a correctness point of view but is inefficient - those pages may be
663  * cache-warm and we want to give them back to the page allocator ASAP.
664  *
665  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
666  * and __pagevec_lru_add_active() call release_pages() directly to avoid
667  * mutual recursion.
668  */
669 void __pagevec_release(struct pagevec *pvec)
670 {
671         lru_add_drain();
672         release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
673         pagevec_reinit(pvec);
674 }
675 EXPORT_SYMBOL(__pagevec_release);
676
677 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
678 /* used by __split_huge_page_refcount() */
679 void lru_add_page_tail(struct zone* zone,
680                        struct page *page, struct page *page_tail)
681 {
682         int uninitialized_var(active);
683         enum lru_list lru;
684         const int file = 0;
685
686         VM_BUG_ON(!PageHead(page));
687         VM_BUG_ON(PageCompound(page_tail));
688         VM_BUG_ON(PageLRU(page_tail));
689         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
690
691         SetPageLRU(page_tail);
692
693         if (page_evictable(page_tail, NULL)) {
694                 if (PageActive(page)) {
695                         SetPageActive(page_tail);
696                         active = 1;
697                         lru = LRU_ACTIVE_ANON;
698                 } else {
699                         active = 0;
700                         lru = LRU_INACTIVE_ANON;
701                 }
702         } else {
703                 SetPageUnevictable(page_tail);
704                 lru = LRU_UNEVICTABLE;
705         }
706
707         if (likely(PageLRU(page)))
708                 list_add_tail(&page_tail->lru, &page->lru);
709         else {
710                 struct list_head *list_head;
711                 /*
712                  * Head page has not yet been counted, as an hpage,
713                  * so we must account for each subpage individually.
714                  *
715                  * Use the standard add function to put page_tail on the list,
716                  * but then correct its position so they all end up in order.
717                  */
718                 add_page_to_lru_list(zone, page_tail, lru);
719                 list_head = page_tail->lru.prev;
720                 list_move_tail(&page_tail->lru, list_head);
721         }
722
723         if (!PageUnevictable(page))
724                 update_page_reclaim_stat(zone, page_tail, file, active);
725 }
726 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
727
728 static void __pagevec_lru_add_fn(struct page *page, void *arg)
729 {
730         enum lru_list lru = (enum lru_list)arg;
731         struct zone *zone = page_zone(page);
732         int file = is_file_lru(lru);
733         int active = is_active_lru(lru);
734
735         VM_BUG_ON(PageActive(page));
736         VM_BUG_ON(PageUnevictable(page));
737         VM_BUG_ON(PageLRU(page));
738
739         SetPageLRU(page);
740         if (active)
741                 SetPageActive(page);
742         add_page_to_lru_list(zone, page, lru);
743         update_page_reclaim_stat(zone, page, file, active);
744 }
745
746 /*
747  * Add the passed pages to the LRU, then drop the caller's refcount
748  * on them.  Reinitialises the caller's pagevec.
749  */
750 void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
751 {
752         VM_BUG_ON(is_unevictable_lru(lru));
753
754         pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
755 }
756 EXPORT_SYMBOL(__pagevec_lru_add);
757
758 /**
759  * pagevec_lookup - gang pagecache lookup
760  * @pvec:       Where the resulting pages are placed
761  * @mapping:    The address_space to search
762  * @start:      The starting page index
763  * @nr_pages:   The maximum number of pages
764  *
765  * pagevec_lookup() will search for and return a group of up to @nr_pages pages
766  * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
767  * reference against the pages in @pvec.
768  *
769  * The search returns a group of mapping-contiguous pages with ascending
770  * indexes.  There may be holes in the indices due to not-present pages.
771  *
772  * pagevec_lookup() returns the number of pages which were found.
773  */
774 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
775                 pgoff_t start, unsigned nr_pages)
776 {
777         pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
778         return pagevec_count(pvec);
779 }
780 EXPORT_SYMBOL(pagevec_lookup);
781
782 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
783                 pgoff_t *index, int tag, unsigned nr_pages)
784 {
785         pvec->nr = find_get_pages_tag(mapping, index, tag,
786                                         nr_pages, pvec->pages);
787         return pagevec_count(pvec);
788 }
789 EXPORT_SYMBOL(pagevec_lookup_tag);
790
791 /*
792  * Perform any setup for the swap system
793  */
794 void __init swap_setup(void)
795 {
796         unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
797
798 #ifdef CONFIG_SWAP
799         bdi_init(swapper_space.backing_dev_info);
800 #endif
801
802         /* Use a smaller cluster for small-memory machines */
803         if (megs < 16)
804                 page_cluster = 2;
805         else
806                 page_cluster = 3;
807         /*
808          * Right now other parts of the system means that we
809          * _really_ don't want to cluster much more
810          */
811 }