[media] media: Fix media device minor registration
[pandora-kernel.git] / mm / vmscan.c
index c917720..faa0a08 100644 (file)
@@ -173,7 +173,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
                                struct scan_control *sc, enum lru_list lru)
 {
        if (!scanning_global_lru(sc))
-               return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
+               return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru);
 
        return zone_page_state(zone, NR_LRU_BASE + lru);
 }
@@ -202,6 +202,14 @@ void unregister_shrinker(struct shrinker *shrinker)
 }
 EXPORT_SYMBOL(unregister_shrinker);
 
+static inline int do_shrinker_shrink(struct shrinker *shrinker,
+                                    struct shrink_control *sc,
+                                    unsigned long nr_to_scan)
+{
+       sc->nr_to_scan = nr_to_scan;
+       return (*shrinker->shrink)(shrinker, sc);
+}
+
 #define SHRINK_BATCH 128
 /*
  * Call the shrink functions to age shrinkable caches
@@ -222,25 +230,29 @@ EXPORT_SYMBOL(unregister_shrinker);
  *
  * Returns the number of slab objects which we shrunk.
  */
-unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
-                       unsigned long lru_pages)
+unsigned long shrink_slab(struct shrink_control *shrink,
+                         unsigned long nr_pages_scanned,
+                         unsigned long lru_pages)
 {
        struct shrinker *shrinker;
        unsigned long ret = 0;
 
-       if (scanned == 0)
-               scanned = SWAP_CLUSTER_MAX;
+       if (nr_pages_scanned == 0)
+               nr_pages_scanned = SWAP_CLUSTER_MAX;
 
-       if (!down_read_trylock(&shrinker_rwsem))
-               return 1;       /* Assume we'll be able to shrink next time */
+       if (!down_read_trylock(&shrinker_rwsem)) {
+               /* Assume we'll be able to shrink next time */
+               ret = 1;
+               goto out;
+       }
 
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
                unsigned long total_scan;
                unsigned long max_pass;
 
-               max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
-               delta = (4 * scanned) / shrinker->seeks;
+               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+               delta = (4 * nr_pages_scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
                shrinker->nr += delta;
@@ -267,9 +279,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                        int shrink_ret;
                        int nr_before;
 
-                       nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
-                       shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
-                                                               gfp_mask);
+                       nr_before = do_shrinker_shrink(shrinker, shrink, 0);
+                       shrink_ret = do_shrinker_shrink(shrinker, shrink,
+                                                       this_scan);
                        if (shrink_ret == -1)
                                break;
                        if (shrink_ret < nr_before)
@@ -283,6 +295,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                shrinker->nr += total_scan;
        }
        up_read(&shrinker_rwsem);
+out:
+       cond_resched();
        return ret;
 }
 
@@ -1202,13 +1216,16 @@ int isolate_lru_page(struct page *page)
 {
        int ret = -EBUSY;
 
+       VM_BUG_ON(!page_count(page));
+
        if (PageLRU(page)) {
                struct zone *zone = page_zone(page);
 
                spin_lock_irq(&zone->lru_lock);
-               if (PageLRU(page) && get_page_unless_zero(page)) {
+               if (PageLRU(page)) {
                        int lru = page_lru(page);
                        ret = 0;
+                       get_page(page);
                        ClearPageLRU(page);
 
                        del_page_from_lru_list(zone, page, lru);
@@ -1700,26 +1717,6 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
 }
 
-/*
- * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
- * until we collected @swap_cluster_max pages to scan.
- */
-static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
-                                      unsigned long *nr_saved_scan)
-{
-       unsigned long nr;
-
-       *nr_saved_scan += nr_to_scan;
-       nr = *nr_saved_scan;
-
-       if (nr >= SWAP_CLUSTER_MAX)
-               *nr_saved_scan = 0;
-       else
-               nr = 0;
-
-       return nr;
-}
-
 /*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
@@ -1738,6 +1735,22 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        u64 fraction[2], denominator;
        enum lru_list l;
        int noswap = 0;
+       int force_scan = 0;
+
+
+       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+
+       if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
+               /* kswapd does zone balancing and need to scan this zone */
+               if (scanning_global_lru(sc) && current_is_kswapd())
+                       force_scan = 1;
+               /* memcg may have small limit and need to avoid priority drop */
+               if (!scanning_global_lru(sc))
+                       force_scan = 1;
+       }
 
        /* If we have no swap space, do not bother scanning anon pages. */
        if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1748,11 +1761,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                goto out;
        }
 
-       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
-       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
-
        if (scanning_global_lru(sc)) {
                free  = zone_page_state(zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
@@ -1819,8 +1827,23 @@ out:
                        scan >>= priority;
                        scan = div64_u64(scan * fraction[file], denominator);
                }
-               nr[l] = nr_scan_try_batch(scan,
-                                         &reclaim_stat->nr_saved_scan[l]);
+
+               /*
+                * If zone is small or memcg is small, nr[l] can be 0.
+                * This results no-scan on this priority and priority drop down.
+                * For global direct reclaim, it can visit next zone and tend
+                * not to have problems. For global kswapd, it's for zone
+                * balancing and it need to scan a small amounts. When using
+                * memcg, priority drop can cause big latency. So, it's better
+                * to scan small amount. See may_noscan above.
+                */
+               if (!scan && force_scan) {
+                       if (file)
+                               scan = SWAP_CLUSTER_MAX;
+                       else if (!noswap)
+                               scan = SWAP_CLUSTER_MAX;
+               }
+               nr[l] = scan;
        }
 }
 
@@ -1960,11 +1983,14 @@ restart:
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static void shrink_zones(int priority, struct zonelist *zonelist,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
+       unsigned long total_scanned = 0;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1981,8 +2007,17 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
                                continue;       /* Let kswapd poll it */
                }
 
+               nr_soft_scanned = 0;
+               nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                                       sc->order, sc->gfp_mask,
+                                                       &nr_soft_scanned);
+               sc->nr_reclaimed += nr_soft_reclaimed;
+               total_scanned += nr_soft_scanned;
+
                shrink_zone(priority, zone, sc);
        }
+
+       return total_scanned;
 }
 
 static bool zone_reclaimable(struct zone *zone)
@@ -2027,7 +2062,8 @@ static bool all_unreclaimable(struct zonelist *zonelist,
  *             else, the number of pages reclaimed
  */
 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
-                                       struct scan_control *sc)
+                                       struct scan_control *sc,
+                                       struct shrink_control *shrink)
 {
        int priority;
        unsigned long total_scanned = 0;
@@ -2046,7 +2082,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               shrink_zones(priority, zonelist, sc);
+               total_scanned += shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -2061,7 +2097,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                lru_pages += zone_reclaimable_pages(zone);
                        }
 
-                       shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
+                       shrink_slab(shrink, sc->nr_scanned, lru_pages);
                        if (reclaim_state) {
                                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
                                reclaim_state->reclaimed_slab = 0;
@@ -2133,12 +2169,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .mem_cgroup = NULL,
                .nodemask = nodemask,
        };
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
+       };
 
        trace_mm_vmscan_direct_reclaim_begin(order,
                                sc.may_writepage,
                                gfp_mask);
 
-       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
 
        trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
 
@@ -2150,9 +2189,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                gfp_t gfp_mask, bool noswap,
                                                unsigned int swappiness,
-                                               struct zone *zone)
+                                               struct zone *zone,
+                                               unsigned long *nr_scanned)
 {
        struct scan_control sc = {
+               .nr_scanned = 0,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
@@ -2161,6 +2202,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                .order = 0,
                .mem_cgroup = mem,
        };
+
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
 
@@ -2179,6 +2221,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
+       *nr_scanned = sc.nr_scanned;
        return sc.nr_reclaimed;
 }
 
@@ -2189,6 +2232,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 {
        struct zonelist *zonelist;
        unsigned long nr_reclaimed;
+       int nid;
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
@@ -2198,17 +2242,27 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .order = 0,
                .mem_cgroup = mem_cont,
                .nodemask = NULL, /* we don't care the placement */
+               .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+                               (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
+       };
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
        };
 
-       sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
-                       (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
-       zonelist = NODE_DATA(numa_node_id())->node_zonelists;
+       /*
+        * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
+        * take care of from where we get pages. So the node where we start the
+        * scan does not need to be the current node.
+        */
+       nid = mem_cgroup_select_victim_node(mem_cont);
+
+       zonelist = NODE_DATA(nid)->node_zonelists;
 
        trace_mm_vmscan_memcg_reclaim_begin(0,
                                            sc.may_writepage,
                                            sc.gfp_mask);
 
-       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
 
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
 
@@ -2287,7 +2341,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
         * must be balanced
         */
        if (order)
-               return pgdat_balanced(pgdat, balanced, classzone_idx);
+               return !pgdat_balanced(pgdat, balanced, classzone_idx);
        else
                return !all_zones_ok;
 }
@@ -2323,6 +2377,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
        int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
        unsigned long total_scanned;
        struct reclaim_state *reclaim_state = current->reclaim_state;
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .may_unmap = 1,
@@ -2336,6 +2392,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                .order = order,
                .mem_cgroup = NULL,
        };
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
+       };
 loop_again:
        total_scanned = 0;
        sc.nr_reclaimed = 0;
@@ -2412,11 +2471,15 @@ loop_again:
 
                        sc.nr_scanned = 0;
 
+                       nr_soft_scanned = 0;
                        /*
                         * Call soft limit reclaim before calling shrink_zone.
-                        * For now we ignore the return value
                         */
-                       mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                                       order, sc.gfp_mask,
+                                                       &nr_soft_scanned);
+                       sc.nr_reclaimed += nr_soft_reclaimed;
+                       total_scanned += nr_soft_scanned;
 
                        /*
                         * We put equal pressure on every zone, unless
@@ -2435,8 +2498,7 @@ loop_again:
                                        end_zone, 0))
                                shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
-                       nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
-                                               lru_pages);
+                       nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
 
@@ -2788,7 +2850,10 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
                .swappiness = vm_swappiness,
                .order = 0,
        };
-       struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
+       };
+       struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
        struct task_struct *p = current;
        unsigned long nr_reclaimed;
 
@@ -2797,7 +2862,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
 
        p->reclaim_state = NULL;
        lockdep_clear_current_reclaim_state();
@@ -2972,6 +3037,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .swappiness = vm_swappiness,
                .order = order,
        };
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
+       };
        unsigned long nr_slab_pages0, nr_slab_pages1;
 
        cond_resched();
@@ -3013,7 +3081,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                        unsigned long lru_pages = zone_reclaimable_pages(zone);
 
                        /* No reclaimable slab or very low memory pressure */
-                       if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages))
+                       if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
                                break;
 
                        /* Freed enough memory */