Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
[pandora-kernel.git] / mm / vmscan.c
index 9149444..e01ded3 100644 (file)
@@ -11,6 +11,8 @@
  *  Multiqueue VM started 5.8.00, Rik van Riel.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/gfp.h>
@@ -43,6 +45,7 @@
 #include <linux/sysctl.h>
 #include <linux/oom.h>
 #include <linux/prefetch.h>
+#include <linux/printk.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -83,6 +86,9 @@ struct scan_control {
        /* Scan (total_size >> priority) pages at once */
        int priority;
 
+       /* anon vs. file LRUs scanning "ratio" */
+       int swappiness;
+
        /*
         * The memory cgroup that hit its limit and as a result is the
         * primary target of this reclaim invocation.
@@ -477,7 +483,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                if (page_has_private(page)) {
                        if (try_to_free_buffers(page)) {
                                ClearPageDirty(page);
-                               printk("%s: orphaned page\n", __func__);
+                               pr_info("%s: orphaned page\n", __func__);
                                return PAGE_CLEAN;
                        }
                }
@@ -1567,20 +1573,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
                 * If dirty pages are scanned that are not queued for IO, it
                 * implies that flushers are not keeping up. In this case, flag
                 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
-                * pages from reclaim context. It will forcibly stall in the
-                * next check.
+                * pages from reclaim context.
                 */
                if (nr_unqueued_dirty == nr_taken)
                        zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
 
                /*
-                * In addition, if kswapd scans pages marked marked for
-                * immediate reclaim and under writeback (nr_immediate), it
-                * implies that pages are cycling through the LRU faster than
+                * If kswapd scans pages marked marked for immediate
+                * reclaim and under writeback (nr_immediate), it implies
+                * that pages are cycling through the LRU faster than
                 * they are written so also forcibly stall.
                 */
-               if ((nr_unqueued_dirty == nr_taken || nr_immediate) &&
-                   current_may_throttle())
+               if (nr_immediate && current_may_throttle())
                        congestion_wait(BLK_RW_ASYNC, HZ/10);
        }
 
@@ -1845,13 +1849,6 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
 }
 
-static int vmscan_swappiness(struct scan_control *sc)
-{
-       if (global_reclaim(sc))
-               return vm_swappiness;
-       return mem_cgroup_swappiness(sc->target_mem_cgroup);
-}
-
 enum scan_balance {
        SCAN_EQUAL,
        SCAN_FRACT,
@@ -1912,7 +1909,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
         * using the memory controller's swap limit feature would be
         * too expensive.
         */
-       if (!global_reclaim(sc) && !vmscan_swappiness(sc)) {
+       if (!global_reclaim(sc) && !sc->swappiness) {
                scan_balance = SCAN_FILE;
                goto out;
        }
@@ -1922,7 +1919,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
         * system is close to OOM, scan both anon and file equally
         * (unless the swappiness setting disagrees with swapping).
         */
-       if (!sc->priority && vmscan_swappiness(sc)) {
+       if (!sc->priority && sc->swappiness) {
                scan_balance = SCAN_EQUAL;
                goto out;
        }
@@ -1965,7 +1962,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
         * With swappiness at 100, anonymous and file have the same priority.
         * This scanning priority is essentially the inverse of IO cost.
         */
-       anon_prio = vmscan_swappiness(sc);
+       anon_prio = sc->swappiness;
        file_prio = 200 - anon_prio;
 
        /*
@@ -2265,6 +2262,7 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
 
                        lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
+                       sc->swappiness = mem_cgroup_swappiness(memcg);
                        shrink_lruvec(lruvec, sc);
 
                        /*
@@ -2731,6 +2729,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
                .may_swap = !noswap,
                .order = 0,
                .priority = 0,
+               .swappiness = mem_cgroup_swappiness(memcg),
                .target_mem_cgroup = memcg,
        };
        struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
@@ -3372,7 +3371,10 @@ static int kswapd(void *p)
                }
        }
 
+       tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
        current->reclaim_state = NULL;
+       lockdep_clear_current_reclaim_state();
+
        return 0;
 }