Merge branch 'x86/trampoline' into x86/urgent
[pandora-kernel.git] / mm / swap.c
index 5c13f13..4e7e2ec 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,13 +47,15 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
 static void __page_cache_release(struct page *page)
 {
        if (PageLRU(page)) {
-               unsigned long flags;
                struct zone *zone = page_zone(page);
+               struct lruvec *lruvec;
+               unsigned long flags;
 
                spin_lock_irqsave(&zone->lru_lock, flags);
+               lruvec = mem_cgroup_page_lruvec(page, zone);
                VM_BUG_ON(!PageLRU(page));
                __ClearPageLRU(page);
-               del_page_from_lru_list(zone, page, page_off_lru(page));
+               del_page_from_lru_list(page, lruvec, page_off_lru(page));
                spin_unlock_irqrestore(&zone->lru_lock, flags);
        }
 }
@@ -82,6 +84,25 @@ static void put_compound_page(struct page *page)
                if (likely(page != page_head &&
                           get_page_unless_zero(page_head))) {
                        unsigned long flags;
+
+                       /*
+                        * THP can not break up slab pages so avoid taking
+                        * compound_lock().  Slab performs non-atomic bit ops
+                        * on page->flags for better performance.  In particular
+                        * slab_unlock() in slub used to be a hot path.  It is
+                        * still hot on arches that do not support
+                        * this_cpu_cmpxchg_double().
+                        */
+                       if (PageSlab(page_head)) {
+                               if (PageTail(page)) {
+                                       if (put_page_testzero(page_head))
+                                               VM_BUG_ON(1);
+
+                                       atomic_dec(&page->_mapcount);
+                                       goto skip_lock_tail;
+                               } else
+                                       goto skip_lock;
+                       }
                        /*
                         * page_head wasn't a dangling pointer but it
                         * may not be a head page anymore by the time
@@ -92,10 +113,10 @@ static void put_compound_page(struct page *page)
                        if (unlikely(!PageTail(page))) {
                                /* __split_huge_page_refcount run before us */
                                compound_unlock_irqrestore(page_head, flags);
-                               VM_BUG_ON(PageHead(page_head));
+skip_lock:
                                if (put_page_testzero(page_head))
                                        __put_single_page(page_head);
-                       out_put_single:
+out_put_single:
                                if (put_page_testzero(page))
                                        __put_single_page(page);
                                return;
@@ -115,6 +136,8 @@ static void put_compound_page(struct page *page)
                        VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
                        VM_BUG_ON(atomic_read(&page->_count) != 0);
                        compound_unlock_irqrestore(page_head, flags);
+
+skip_lock_tail:
                        if (put_page_testzero(page_head)) {
                                if (PageHead(page_head))
                                        __put_compound_page(page_head);
@@ -162,6 +185,18 @@ bool __get_page_tail(struct page *page)
        struct page *page_head = compound_trans_head(page);
 
        if (likely(page != page_head && get_page_unless_zero(page_head))) {
+
+               /* Ref to put_compound_page() comment. */
+               if (PageSlab(page_head)) {
+                       if (likely(PageTail(page))) {
+                               __get_page_tail_foll(page, false);
+                               return true;
+                       } else {
+                               put_page(page_head);
+                               return false;
+                       }
+               }
+
                /*
                 * page_head wasn't a dangling pointer but it
                 * may not be a head page anymore by the time
@@ -202,11 +237,12 @@ void put_pages_list(struct list_head *pages)
 EXPORT_SYMBOL(put_pages_list);
 
 static void pagevec_lru_move_fn(struct pagevec *pvec,
-                               void (*move_fn)(struct page *page, void *arg),
-                               void *arg)
+       void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
+       void *arg)
 {
        int i;
        struct zone *zone = NULL;
+       struct lruvec *lruvec;
        unsigned long flags = 0;
 
        for (i = 0; i < pagevec_count(pvec); i++) {
@@ -220,7 +256,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
                        spin_lock_irqsave(&zone->lru_lock, flags);
                }
 
-               (*move_fn)(page, arg);
+               lruvec = mem_cgroup_page_lruvec(page, zone);
+               (*move_fn)(page, lruvec, arg);
        }
        if (zone)
                spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -228,16 +265,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
        pagevec_reinit(pvec);
 }
 
-static void pagevec_move_tail_fn(struct page *page, void *arg)
+static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
+                                void *arg)
 {
        int *pgmoved = arg;
 
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                enum lru_list lru = page_lru_base_type(page);
-               struct lruvec *lruvec;
-
-               lruvec = mem_cgroup_lru_move_lists(page_zone(page),
-                                                  page, lru, lru);
                list_move_tail(&page->lru, &lruvec->lists[lru]);
                (*pgmoved)++;
        }
@@ -276,41 +310,30 @@ void rotate_reclaimable_page(struct page *page)
        }
 }
 
-static void update_page_reclaim_stat(struct zone *zone, struct page *page,
+static void update_page_reclaim_stat(struct lruvec *lruvec,
                                     int file, int rotated)
 {
-       struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
-       struct zone_reclaim_stat *memcg_reclaim_stat;
-
-       memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
+       struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
        reclaim_stat->recent_scanned[file]++;
        if (rotated)
                reclaim_stat->recent_rotated[file]++;
-
-       if (!memcg_reclaim_stat)
-               return;
-
-       memcg_reclaim_stat->recent_scanned[file]++;
-       if (rotated)
-               memcg_reclaim_stat->recent_rotated[file]++;
 }
 
-static void __activate_page(struct page *page, void *arg)
+static void __activate_page(struct page *page, struct lruvec *lruvec,
+                           void *arg)
 {
-       struct zone *zone = page_zone(page);
-
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                int file = page_is_file_cache(page);
                int lru = page_lru_base_type(page);
-               del_page_from_lru_list(zone, page, lru);
 
+               del_page_from_lru_list(page, lruvec, lru);
                SetPageActive(page);
                lru += LRU_ACTIVE;
-               add_page_to_lru_list(zone, page, lru);
-               __count_vm_event(PGACTIVATE);
+               add_page_to_lru_list(page, lruvec, lru);
 
-               update_page_reclaim_stat(zone, page, file, 1);
+               __count_vm_event(PGACTIVATE);
+               update_page_reclaim_stat(lruvec, file, 1);
        }
 }
 
@@ -347,7 +370,7 @@ void activate_page(struct page *page)
        struct zone *zone = page_zone(page);
 
        spin_lock_irq(&zone->lru_lock);
-       __activate_page(page, NULL);
+       __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
        spin_unlock_irq(&zone->lru_lock);
 }
 #endif
@@ -414,11 +437,13 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
 void add_page_to_unevictable_list(struct page *page)
 {
        struct zone *zone = page_zone(page);
+       struct lruvec *lruvec;
 
        spin_lock_irq(&zone->lru_lock);
+       lruvec = mem_cgroup_page_lruvec(page, zone);
        SetPageUnevictable(page);
        SetPageLRU(page);
-       add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
+       add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
        spin_unlock_irq(&zone->lru_lock);
 }
 
@@ -443,11 +468,11 @@ void add_page_to_unevictable_list(struct page *page)
  * be write it out by flusher threads as this is much more effective
  * than the single-page writeout from reclaim.
  */
-static void lru_deactivate_fn(struct page *page, void *arg)
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+                             void *arg)
 {
        int lru, file;
        bool active;
-       struct zone *zone = page_zone(page);
 
        if (!PageLRU(page))
                return;
@@ -460,13 +485,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
                return;
 
        active = PageActive(page);
-
        file = page_is_file_cache(page);
        lru = page_lru_base_type(page);
-       del_page_from_lru_list(zone, page, lru + active);
+
+       del_page_from_lru_list(page, lruvec, lru + active);
        ClearPageActive(page);
        ClearPageReferenced(page);
-       add_page_to_lru_list(zone, page, lru);
+       add_page_to_lru_list(page, lruvec, lru);
 
        if (PageWriteback(page) || PageDirty(page)) {
                /*
@@ -476,19 +501,17 @@ static void lru_deactivate_fn(struct page *page, void *arg)
                 */
                SetPageReclaim(page);
        } else {
-               struct lruvec *lruvec;
                /*
                 * The page's writeback ends up during pagevec
                 * We moves tha page into tail of inactive.
                 */
-               lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
                list_move_tail(&page->lru, &lruvec->lists[lru]);
                __count_vm_event(PGROTATED);
        }
 
        if (active)
                __count_vm_event(PGDEACTIVATE);
-       update_page_reclaim_stat(zone, page, file, 0);
+       update_page_reclaim_stat(lruvec, file, 0);
 }
 
 /*
@@ -588,6 +611,7 @@ void release_pages(struct page **pages, int nr, int cold)
        int i;
        LIST_HEAD(pages_to_free);
        struct zone *zone = NULL;
+       struct lruvec *lruvec;
        unsigned long uninitialized_var(flags);
 
        for (i = 0; i < nr; i++) {
@@ -615,9 +639,11 @@ void release_pages(struct page **pages, int nr, int cold)
                                zone = pagezone;
                                spin_lock_irqsave(&zone->lru_lock, flags);
                        }
+
+                       lruvec = mem_cgroup_page_lruvec(page, zone);
                        VM_BUG_ON(!PageLRU(page));
                        __ClearPageLRU(page);
-                       del_page_from_lru_list(zone, page, page_off_lru(page));
+                       del_page_from_lru_list(page, lruvec, page_off_lru(page));
                }
 
                list_add(&page->lru, &pages_to_free);
@@ -649,8 +675,8 @@ EXPORT_SYMBOL(__pagevec_release);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /* used by __split_huge_page_refcount() */
-void lru_add_page_tail(struct zone* zone,
-                      struct page *page, struct page *page_tail)
+void lru_add_page_tail(struct page *page, struct page *page_tail,
+                      struct lruvec *lruvec)
 {
        int uninitialized_var(active);
        enum lru_list lru;
@@ -659,7 +685,8 @@ void lru_add_page_tail(struct zone* zone,
        VM_BUG_ON(!PageHead(page));
        VM_BUG_ON(PageCompound(page_tail));
        VM_BUG_ON(PageLRU(page_tail));
-       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
+       VM_BUG_ON(NR_CPUS != 1 &&
+                 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
 
        SetPageLRU(page_tail);
 
@@ -688,20 +715,20 @@ void lru_add_page_tail(struct zone* zone,
                 * Use the standard add function to put page_tail on the list,
                 * but then correct its position so they all end up in order.
                 */
-               add_page_to_lru_list(zone, page_tail, lru);
+               add_page_to_lru_list(page_tail, lruvec, lru);
                list_head = page_tail->lru.prev;
                list_move_tail(&page_tail->lru, list_head);
        }
 
        if (!PageUnevictable(page))
-               update_page_reclaim_stat(zone, page_tail, file, active);
+               update_page_reclaim_stat(lruvec, file, active);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static void __pagevec_lru_add_fn(struct page *page, void *arg)
+static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
+                                void *arg)
 {
        enum lru_list lru = (enum lru_list)arg;
-       struct zone *zone = page_zone(page);
        int file = is_file_lru(lru);
        int active = is_active_lru(lru);
 
@@ -712,8 +739,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
        SetPageLRU(page);
        if (active)
                SetPageActive(page);
-       add_page_to_lru_list(zone, page, lru);
-       update_page_reclaim_stat(zone, page, file, active);
+       add_page_to_lru_list(page, lruvec, lru);
+       update_page_reclaim_stat(lruvec, file, active);
 }
 
 /*