Revert "mm: batch activate_page() to reduce lock contention"
[pandora-kernel.git] / mm / swap.c
1 /*
2  *  linux/mm/swap.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * This file contains the default values for the operation of the
9  * Linux VM subsystem. Fine-tuning documentation can be found in
10  * Documentation/sysctl/vm.txt.
11  * Started 18.12.91
12  * Swap aging added 23.2.95, Stephen Tweedie.
13  * Buffermem limits added 12.3.98, Rik van Riel.
14  */
15
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/swap.h>
20 #include <linux/mman.h>
21 #include <linux/pagemap.h>
22 #include <linux/pagevec.h>
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/mm_inline.h>
26 #include <linux/buffer_head.h>  /* for try_to_release_page() */
27 #include <linux/percpu_counter.h>
28 #include <linux/percpu.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/backing-dev.h>
32 #include <linux/memcontrol.h>
33 #include <linux/gfp.h>
34
35 #include "internal.h"
36
37 /* How many pages do we try to swap or page in/out together? */
38 int page_cluster;
39
40 static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
41 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
42
43 /*
44  * This path almost never happens for VM activity - pages are normally
45  * freed via pagevecs.  But it gets used by networking.
46  */
47 static void __page_cache_release(struct page *page)
48 {
49         if (PageLRU(page)) {
50                 unsigned long flags;
51                 struct zone *zone = page_zone(page);
52
53                 spin_lock_irqsave(&zone->lru_lock, flags);
54                 VM_BUG_ON(!PageLRU(page));
55                 __ClearPageLRU(page);
56                 del_page_from_lru(zone, page);
57                 spin_unlock_irqrestore(&zone->lru_lock, flags);
58         }
59 }
60
61 static void __put_single_page(struct page *page)
62 {
63         __page_cache_release(page);
64         free_hot_cold_page(page, 0);
65 }
66
67 static void __put_compound_page(struct page *page)
68 {
69         compound_page_dtor *dtor;
70
71         __page_cache_release(page);
72         dtor = get_compound_page_dtor(page);
73         (*dtor)(page);
74 }
75
76 static void put_compound_page(struct page *page)
77 {
78         if (unlikely(PageTail(page))) {
79                 /* __split_huge_page_refcount can run under us */
80                 struct page *page_head = page->first_page;
81                 smp_rmb();
82                 /*
83                  * If PageTail is still set after smp_rmb() we can be sure
84                  * that the page->first_page we read wasn't a dangling pointer.
85                  * See __split_huge_page_refcount() smp_wmb().
86                  */
87                 if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
88                         unsigned long flags;
89                         /*
90                          * Verify that our page_head wasn't converted
91                          * to a a regular page before we got a
92                          * reference on it.
93                          */
94                         if (unlikely(!PageHead(page_head))) {
95                                 /* PageHead is cleared after PageTail */
96                                 smp_rmb();
97                                 VM_BUG_ON(PageTail(page));
98                                 goto out_put_head;
99                         }
100                         /*
101                          * Only run compound_lock on a valid PageHead,
102                          * after having it pinned with
103                          * get_page_unless_zero() above.
104                          */
105                         smp_mb();
106                         /* page_head wasn't a dangling pointer */
107                         flags = compound_lock_irqsave(page_head);
108                         if (unlikely(!PageTail(page))) {
109                                 /* __split_huge_page_refcount run before us */
110                                 compound_unlock_irqrestore(page_head, flags);
111                                 VM_BUG_ON(PageHead(page_head));
112                         out_put_head:
113                                 if (put_page_testzero(page_head))
114                                         __put_single_page(page_head);
115                         out_put_single:
116                                 if (put_page_testzero(page))
117                                         __put_single_page(page);
118                                 return;
119                         }
120                         VM_BUG_ON(page_head != page->first_page);
121                         /*
122                          * We can release the refcount taken by
123                          * get_page_unless_zero now that
124                          * split_huge_page_refcount is blocked on the
125                          * compound_lock.
126                          */
127                         if (put_page_testzero(page_head))
128                                 VM_BUG_ON(1);
129                         /* __split_huge_page_refcount will wait now */
130                         VM_BUG_ON(atomic_read(&page->_count) <= 0);
131                         atomic_dec(&page->_count);
132                         VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
133                         compound_unlock_irqrestore(page_head, flags);
134                         if (put_page_testzero(page_head)) {
135                                 if (PageHead(page_head))
136                                         __put_compound_page(page_head);
137                                 else
138                                         __put_single_page(page_head);
139                         }
140                 } else {
141                         /* page_head is a dangling pointer */
142                         VM_BUG_ON(PageTail(page));
143                         goto out_put_single;
144                 }
145         } else if (put_page_testzero(page)) {
146                 if (PageHead(page))
147                         __put_compound_page(page);
148                 else
149                         __put_single_page(page);
150         }
151 }
152
153 void put_page(struct page *page)
154 {
155         if (unlikely(PageCompound(page)))
156                 put_compound_page(page);
157         else if (put_page_testzero(page))
158                 __put_single_page(page);
159 }
160 EXPORT_SYMBOL(put_page);
161
162 /**
163  * put_pages_list() - release a list of pages
164  * @pages: list of pages threaded on page->lru
165  *
166  * Release a list of pages which are strung together on page.lru.  Currently
167  * used by read_cache_pages() and related error recovery code.
168  */
169 void put_pages_list(struct list_head *pages)
170 {
171         while (!list_empty(pages)) {
172                 struct page *victim;
173
174                 victim = list_entry(pages->prev, struct page, lru);
175                 list_del(&victim->lru);
176                 page_cache_release(victim);
177         }
178 }
179 EXPORT_SYMBOL(put_pages_list);
180
181 static void pagevec_lru_move_fn(struct pagevec *pvec,
182                                 void (*move_fn)(struct page *page, void *arg),
183                                 void *arg)
184 {
185         int i;
186         struct zone *zone = NULL;
187         unsigned long flags = 0;
188
189         for (i = 0; i < pagevec_count(pvec); i++) {
190                 struct page *page = pvec->pages[i];
191                 struct zone *pagezone = page_zone(page);
192
193                 if (pagezone != zone) {
194                         if (zone)
195                                 spin_unlock_irqrestore(&zone->lru_lock, flags);
196                         zone = pagezone;
197                         spin_lock_irqsave(&zone->lru_lock, flags);
198                 }
199
200                 (*move_fn)(page, arg);
201         }
202         if (zone)
203                 spin_unlock_irqrestore(&zone->lru_lock, flags);
204         release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
205         pagevec_reinit(pvec);
206 }
207
208 static void pagevec_move_tail_fn(struct page *page, void *arg)
209 {
210         int *pgmoved = arg;
211         struct zone *zone = page_zone(page);
212
213         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
214                 int lru = page_lru_base_type(page);
215                 list_move_tail(&page->lru, &zone->lru[lru].list);
216                 (*pgmoved)++;
217         }
218 }
219
220 /*
221  * pagevec_move_tail() must be called with IRQ disabled.
222  * Otherwise this may cause nasty races.
223  */
224 static void pagevec_move_tail(struct pagevec *pvec)
225 {
226         int pgmoved = 0;
227
228         pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
229         __count_vm_events(PGROTATED, pgmoved);
230 }
231
232 /*
233  * Writeback is about to end against a page which has been marked for immediate
234  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
235  * inactive list.
236  */
237 void rotate_reclaimable_page(struct page *page)
238 {
239         if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
240             !PageUnevictable(page) && PageLRU(page)) {
241                 struct pagevec *pvec;
242                 unsigned long flags;
243
244                 page_cache_get(page);
245                 local_irq_save(flags);
246                 pvec = &__get_cpu_var(lru_rotate_pvecs);
247                 if (!pagevec_add(pvec, page))
248                         pagevec_move_tail(pvec);
249                 local_irq_restore(flags);
250         }
251 }
252
253 static void update_page_reclaim_stat(struct zone *zone, struct page *page,
254                                      int file, int rotated)
255 {
256         struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
257         struct zone_reclaim_stat *memcg_reclaim_stat;
258
259         memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
260
261         reclaim_stat->recent_scanned[file]++;
262         if (rotated)
263                 reclaim_stat->recent_rotated[file]++;
264
265         if (!memcg_reclaim_stat)
266                 return;
267
268         memcg_reclaim_stat->recent_scanned[file]++;
269         if (rotated)
270                 memcg_reclaim_stat->recent_rotated[file]++;
271 }
272
273 /*
274  * FIXME: speed this up?
275  */
276 void activate_page(struct page *page)
277 {
278         struct zone *zone = page_zone(page);
279
280         spin_lock_irq(&zone->lru_lock);
281         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
282                 int file = page_is_file_cache(page);
283                 int lru = page_lru_base_type(page);
284                 del_page_from_lru_list(zone, page, lru);
285
286                 SetPageActive(page);
287                 lru += LRU_ACTIVE;
288                 add_page_to_lru_list(zone, page, lru);
289                 __count_vm_event(PGACTIVATE);
290
291                 update_page_reclaim_stat(zone, page, file, 1);
292         }
293         spin_unlock_irq(&zone->lru_lock);
294 }
295
296 /*
297  * Mark a page as having seen activity.
298  *
299  * inactive,unreferenced        ->      inactive,referenced
300  * inactive,referenced          ->      active,unreferenced
301  * active,unreferenced          ->      active,referenced
302  */
303 void mark_page_accessed(struct page *page)
304 {
305         if (!PageActive(page) && !PageUnevictable(page) &&
306                         PageReferenced(page) && PageLRU(page)) {
307                 activate_page(page);
308                 ClearPageReferenced(page);
309         } else if (!PageReferenced(page)) {
310                 SetPageReferenced(page);
311         }
312 }
313
314 EXPORT_SYMBOL(mark_page_accessed);
315
316 void __lru_cache_add(struct page *page, enum lru_list lru)
317 {
318         struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
319
320         page_cache_get(page);
321         if (!pagevec_add(pvec, page))
322                 ____pagevec_lru_add(pvec, lru);
323         put_cpu_var(lru_add_pvecs);
324 }
325 EXPORT_SYMBOL(__lru_cache_add);
326
327 /**
328  * lru_cache_add_lru - add a page to a page list
329  * @page: the page to be added to the LRU.
330  * @lru: the LRU list to which the page is added.
331  */
332 void lru_cache_add_lru(struct page *page, enum lru_list lru)
333 {
334         if (PageActive(page)) {
335                 VM_BUG_ON(PageUnevictable(page));
336                 ClearPageActive(page);
337         } else if (PageUnevictable(page)) {
338                 VM_BUG_ON(PageActive(page));
339                 ClearPageUnevictable(page);
340         }
341
342         VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
343         __lru_cache_add(page, lru);
344 }
345
346 /**
347  * add_page_to_unevictable_list - add a page to the unevictable list
348  * @page:  the page to be added to the unevictable list
349  *
350  * Add page directly to its zone's unevictable list.  To avoid races with
351  * tasks that might be making the page evictable, through eg. munlock,
352  * munmap or exit, while it's not on the lru, we want to add the page
353  * while it's locked or otherwise "invisible" to other tasks.  This is
354  * difficult to do when using the pagevec cache, so bypass that.
355  */
356 void add_page_to_unevictable_list(struct page *page)
357 {
358         struct zone *zone = page_zone(page);
359
360         spin_lock_irq(&zone->lru_lock);
361         SetPageUnevictable(page);
362         SetPageLRU(page);
363         add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
364         spin_unlock_irq(&zone->lru_lock);
365 }
366
367 /*
368  * Drain pages out of the cpu's pagevecs.
369  * Either "cpu" is the current CPU, and preemption has already been
370  * disabled; or "cpu" is being hot-unplugged, and is already dead.
371  */
372 static void drain_cpu_pagevecs(int cpu)
373 {
374         struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
375         struct pagevec *pvec;
376         int lru;
377
378         for_each_lru(lru) {
379                 pvec = &pvecs[lru - LRU_BASE];
380                 if (pagevec_count(pvec))
381                         ____pagevec_lru_add(pvec, lru);
382         }
383
384         pvec = &per_cpu(lru_rotate_pvecs, cpu);
385         if (pagevec_count(pvec)) {
386                 unsigned long flags;
387
388                 /* No harm done if a racing interrupt already did this */
389                 local_irq_save(flags);
390                 pagevec_move_tail(pvec);
391                 local_irq_restore(flags);
392         }
393 }
394
395 void lru_add_drain(void)
396 {
397         drain_cpu_pagevecs(get_cpu());
398         put_cpu();
399 }
400
401 static void lru_add_drain_per_cpu(struct work_struct *dummy)
402 {
403         lru_add_drain();
404 }
405
406 /*
407  * Returns 0 for success
408  */
409 int lru_add_drain_all(void)
410 {
411         return schedule_on_each_cpu(lru_add_drain_per_cpu);
412 }
413
414 /*
415  * Batched page_cache_release().  Decrement the reference count on all the
416  * passed pages.  If it fell to zero then remove the page from the LRU and
417  * free it.
418  *
419  * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
420  * for the remainder of the operation.
421  *
422  * The locking in this function is against shrink_inactive_list(): we recheck
423  * the page count inside the lock to see whether shrink_inactive_list()
424  * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
425  * will free it.
426  */
427 void release_pages(struct page **pages, int nr, int cold)
428 {
429         int i;
430         struct pagevec pages_to_free;
431         struct zone *zone = NULL;
432         unsigned long uninitialized_var(flags);
433
434         pagevec_init(&pages_to_free, cold);
435         for (i = 0; i < nr; i++) {
436                 struct page *page = pages[i];
437
438                 if (unlikely(PageCompound(page))) {
439                         if (zone) {
440                                 spin_unlock_irqrestore(&zone->lru_lock, flags);
441                                 zone = NULL;
442                         }
443                         put_compound_page(page);
444                         continue;
445                 }
446
447                 if (!put_page_testzero(page))
448                         continue;
449
450                 if (PageLRU(page)) {
451                         struct zone *pagezone = page_zone(page);
452
453                         if (pagezone != zone) {
454                                 if (zone)
455                                         spin_unlock_irqrestore(&zone->lru_lock,
456                                                                         flags);
457                                 zone = pagezone;
458                                 spin_lock_irqsave(&zone->lru_lock, flags);
459                         }
460                         VM_BUG_ON(!PageLRU(page));
461                         __ClearPageLRU(page);
462                         del_page_from_lru(zone, page);
463                 }
464
465                 if (!pagevec_add(&pages_to_free, page)) {
466                         if (zone) {
467                                 spin_unlock_irqrestore(&zone->lru_lock, flags);
468                                 zone = NULL;
469                         }
470                         __pagevec_free(&pages_to_free);
471                         pagevec_reinit(&pages_to_free);
472                 }
473         }
474         if (zone)
475                 spin_unlock_irqrestore(&zone->lru_lock, flags);
476
477         pagevec_free(&pages_to_free);
478 }
479 EXPORT_SYMBOL(release_pages);
480
481 /*
482  * The pages which we're about to release may be in the deferred lru-addition
483  * queues.  That would prevent them from really being freed right now.  That's
484  * OK from a correctness point of view but is inefficient - those pages may be
485  * cache-warm and we want to give them back to the page allocator ASAP.
486  *
487  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
488  * and __pagevec_lru_add_active() call release_pages() directly to avoid
489  * mutual recursion.
490  */
491 void __pagevec_release(struct pagevec *pvec)
492 {
493         lru_add_drain();
494         release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
495         pagevec_reinit(pvec);
496 }
497
498 EXPORT_SYMBOL(__pagevec_release);
499
500 /* used by __split_huge_page_refcount() */
501 void lru_add_page_tail(struct zone* zone,
502                        struct page *page, struct page *page_tail)
503 {
504         int active;
505         enum lru_list lru;
506         const int file = 0;
507         struct list_head *head;
508
509         VM_BUG_ON(!PageHead(page));
510         VM_BUG_ON(PageCompound(page_tail));
511         VM_BUG_ON(PageLRU(page_tail));
512         VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
513
514         SetPageLRU(page_tail);
515
516         if (page_evictable(page_tail, NULL)) {
517                 if (PageActive(page)) {
518                         SetPageActive(page_tail);
519                         active = 1;
520                         lru = LRU_ACTIVE_ANON;
521                 } else {
522                         active = 0;
523                         lru = LRU_INACTIVE_ANON;
524                 }
525                 update_page_reclaim_stat(zone, page_tail, file, active);
526                 if (likely(PageLRU(page)))
527                         head = page->lru.prev;
528                 else
529                         head = &zone->lru[lru].list;
530                 __add_page_to_lru_list(zone, page_tail, lru, head);
531         } else {
532                 SetPageUnevictable(page_tail);
533                 add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
534         }
535 }
536
537 static void ____pagevec_lru_add_fn(struct page *page, void *arg)
538 {
539         enum lru_list lru = (enum lru_list)arg;
540         struct zone *zone = page_zone(page);
541         int file = is_file_lru(lru);
542         int active = is_active_lru(lru);
543
544         VM_BUG_ON(PageActive(page));
545         VM_BUG_ON(PageUnevictable(page));
546         VM_BUG_ON(PageLRU(page));
547
548         SetPageLRU(page);
549         if (active)
550                 SetPageActive(page);
551         update_page_reclaim_stat(zone, page, file, active);
552         add_page_to_lru_list(zone, page, lru);
553 }
554
555 /*
556  * Add the passed pages to the LRU, then drop the caller's refcount
557  * on them.  Reinitialises the caller's pagevec.
558  */
559 void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
560 {
561         VM_BUG_ON(is_unevictable_lru(lru));
562
563         pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
564 }
565
566 EXPORT_SYMBOL(____pagevec_lru_add);
567
568 /*
569  * Try to drop buffers from the pages in a pagevec
570  */
571 void pagevec_strip(struct pagevec *pvec)
572 {
573         int i;
574
575         for (i = 0; i < pagevec_count(pvec); i++) {
576                 struct page *page = pvec->pages[i];
577
578                 if (page_has_private(page) && trylock_page(page)) {
579                         if (page_has_private(page))
580                                 try_to_release_page(page, 0);
581                         unlock_page(page);
582                 }
583         }
584 }
585
586 /**
587  * pagevec_lookup - gang pagecache lookup
588  * @pvec:       Where the resulting pages are placed
589  * @mapping:    The address_space to search
590  * @start:      The starting page index
591  * @nr_pages:   The maximum number of pages
592  *
593  * pagevec_lookup() will search for and return a group of up to @nr_pages pages
594  * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
595  * reference against the pages in @pvec.
596  *
597  * The search returns a group of mapping-contiguous pages with ascending
598  * indexes.  There may be holes in the indices due to not-present pages.
599  *
600  * pagevec_lookup() returns the number of pages which were found.
601  */
602 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
603                 pgoff_t start, unsigned nr_pages)
604 {
605         pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
606         return pagevec_count(pvec);
607 }
608
609 EXPORT_SYMBOL(pagevec_lookup);
610
611 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
612                 pgoff_t *index, int tag, unsigned nr_pages)
613 {
614         pvec->nr = find_get_pages_tag(mapping, index, tag,
615                                         nr_pages, pvec->pages);
616         return pagevec_count(pvec);
617 }
618
619 EXPORT_SYMBOL(pagevec_lookup_tag);
620
621 /*
622  * Perform any setup for the swap system
623  */
624 void __init swap_setup(void)
625 {
626         unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
627
628 #ifdef CONFIG_SWAP
629         bdi_init(swapper_space.backing_dev_info);
630 #endif
631
632         /* Use a smaller cluster for small-memory machines */
633         if (megs < 16)
634                 page_cluster = 2;
635         else
636                 page_cluster = 3;
637         /*
638          * Right now other parts of the system means that we
639          * _really_ don't want to cluster much more
640          */
641 }