mm: vmscan: do not writeback filesystem pages in direct reclaim
[pandora-kernel.git] / mm / vmscan.c
index 9fdfce7..10f9c59 100644 (file)
@@ -865,6 +865,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (PageDirty(page)) {
                        nr_dirty++;
 
+                       /*
+                        * Only kswapd can writeback filesystem pages to
+                        * avoid risk of stack overflow
+                        */
+                       if (page_is_file_cache(page) && !current_is_kswapd()) {
+                               inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP);
+                               goto keep_locked;
+                       }
+
                        if (references == PAGEREF_RECLAIM_CLEAN)
                                goto keep_locked;
                        if (!may_enter_fs)
@@ -1012,23 +1021,27 @@ keep_lumpy:
  *
  * returns 0 on success, -ve errno on failure.
  */
-int __isolate_lru_page(struct page *page, int mode, int file)
+int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
 {
+       bool all_lru_mode;
        int ret = -EINVAL;
 
        /* Only take pages on the LRU. */
        if (!PageLRU(page))
                return ret;
 
+       all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
+               (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
+
        /*
         * When checking the active state, we need to be sure we are
         * dealing with comparible boolean values.  Take the logical not
         * of each.
         */
-       if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
+       if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
                return ret;
 
-       if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
+       if (!all_lru_mode && !!page_is_file_cache(page) != file)
                return ret;
 
        /*
@@ -1041,6 +1054,12 @@ int __isolate_lru_page(struct page *page, int mode, int file)
 
        ret = -EBUSY;
 
+       if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page)))
+               return ret;
+
+       if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
+               return ret;
+
        if (likely(get_page_unless_zero(page))) {
                /*
                 * Be careful not to clear PageLRU until after we're
@@ -1076,7 +1095,8 @@ int __isolate_lru_page(struct page *page, int mode, int file)
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                struct list_head *src, struct list_head *dst,
-               unsigned long *scanned, int order, int mode, int file)
+               unsigned long *scanned, int order, isolate_mode_t mode,
+               int file)
 {
        unsigned long nr_taken = 0;
        unsigned long nr_lumpy_taken = 0;
@@ -1201,8 +1221,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 static unsigned long isolate_pages_global(unsigned long nr,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,
-                                       int mode, struct zone *z,
-                                       int active, int file)
+                                       isolate_mode_t mode,
+                                       struct zone *z, int active, int file)
 {
        int lru = LRU_BASE;
        if (active)
@@ -1448,6 +1468,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        unsigned long nr_taken;
        unsigned long nr_anon;
        unsigned long nr_file;
+       isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1458,15 +1479,21 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        }
 
        set_reclaim_mode(priority, sc, false);
+       if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
+               reclaim_mode |= ISOLATE_ACTIVE;
+
        lru_add_drain();
+
+       if (!sc->may_unmap)
+               reclaim_mode |= ISOLATE_UNMAPPED;
+       if (!sc->may_writepage)
+               reclaim_mode |= ISOLATE_CLEAN;
+
        spin_lock_irq(&zone->lru_lock);
 
        if (scanning_global_lru(sc)) {
-               nr_taken = isolate_pages_global(nr_to_scan,
-                       &page_list, &nr_scanned, sc->order,
-                       sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
-                                       ISOLATE_BOTH : ISOLATE_INACTIVE,
-                       zone, 0, file);
+               nr_taken = isolate_pages_global(nr_to_scan, &page_list,
+                       &nr_scanned, sc->order, reclaim_mode, zone, 0, file);
                zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
                        __count_zone_vm_events(PGSCAN_KSWAPD, zone,
@@ -1475,12 +1502,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                        __count_zone_vm_events(PGSCAN_DIRECT, zone,
                                               nr_scanned);
        } else {
-               nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
-                       &page_list, &nr_scanned, sc->order,
-                       sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
-                                       ISOLATE_BOTH : ISOLATE_INACTIVE,
-                       zone, sc->mem_cgroup,
-                       0, file);
+               nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list,
+                       &nr_scanned, sc->order, reclaim_mode, zone,
+                       sc->mem_cgroup, 0, file);
                /*
                 * mem_cgroup_isolate_pages() keeps track of
                 * scanned pages on its own.
@@ -1582,19 +1606,26 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        struct page *page;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
        unsigned long nr_rotated = 0;
+       isolate_mode_t reclaim_mode = ISOLATE_ACTIVE;
 
        lru_add_drain();
+
+       if (!sc->may_unmap)
+               reclaim_mode |= ISOLATE_UNMAPPED;
+       if (!sc->may_writepage)
+               reclaim_mode |= ISOLATE_CLEAN;
+
        spin_lock_irq(&zone->lru_lock);
        if (scanning_global_lru(sc)) {
                nr_taken = isolate_pages_global(nr_pages, &l_hold,
                                                &pgscanned, sc->order,
-                                               ISOLATE_ACTIVE, zone,
+                                               reclaim_mode, zone,
                                                1, file);
                zone->pages_scanned += pgscanned;
        } else {
                nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
                                                &pgscanned, sc->order,
-                                               ISOLATE_ACTIVE, zone,
+                                               reclaim_mode, zone,
                                                sc->mem_cgroup, 1, file);
                /*
                 * mem_cgroup_isolate_pages() keeps track of
@@ -1795,12 +1826,19 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        enum lru_list l;
        int noswap = 0;
        bool force_scan = false;
-       unsigned long nr_force_scan[2];
 
-       /* kswapd does zone balancing and needs to scan this zone */
+       /*
+        * If the zone or memcg is small, nr[l] can be 0.  This
+        * results in no scanning on this priority and a potential
+        * priority drop.  Global direct reclaim can go to the next
+        * zone and tends to have no problems. Global kswapd is for
+        * zone balancing and it needs to scan a minimum amount. When
+        * reclaiming for a memcg, a priority drop can cause high
+        * latencies, so it's better to scan a minimum amount there as
+        * well.
+        */
        if (scanning_global_lru(sc) && current_is_kswapd())
                force_scan = true;
-       /* memcg may have small limit and need to avoid priority drop */
        if (!scanning_global_lru(sc))
                force_scan = true;
 
@@ -1810,8 +1848,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                fraction[0] = 0;
                fraction[1] = 1;
                denominator = 1;
-               nr_force_scan[0] = 0;
-               nr_force_scan[1] = SWAP_CLUSTER_MAX;
                goto out;
        }
 
@@ -1828,8 +1864,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                        fraction[0] = 1;
                        fraction[1] = 0;
                        denominator = 1;
-                       nr_force_scan[0] = SWAP_CLUSTER_MAX;
-                       nr_force_scan[1] = 0;
                        goto out;
                }
        }
@@ -1878,11 +1912,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        fraction[0] = ap;
        fraction[1] = fp;
        denominator = ap + fp + 1;
-       if (force_scan) {
-               unsigned long scan = SWAP_CLUSTER_MAX;
-               nr_force_scan[0] = div64_u64(scan * ap, denominator);
-               nr_force_scan[1] = div64_u64(scan * fp, denominator);
-       }
 out:
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
@@ -1891,20 +1920,10 @@ out:
                scan = zone_nr_lru_pages(zone, sc, l);
                if (priority || noswap) {
                        scan >>= priority;
+                       if (!scan && force_scan)
+                               scan = SWAP_CLUSTER_MAX;
                        scan = div64_u64(scan * fraction[file], denominator);
                }
-
-               /*
-                * If zone is small or memcg is small, nr[l] can be 0.
-                * This results no-scan on this priority and priority drop down.
-                * For global direct reclaim, it can visit next zone and tend
-                * not to have problems. For global kswapd, it's for zone
-                * balancing and it need to scan a small amounts. When using
-                * memcg, priority drop can cause big latency. So, it's better
-                * to scan small amount. See may_noscan above.
-                */
-               if (!scan && force_scan)
-                       scan = nr_force_scan[file];
                nr[l] = scan;
        }
 }
@@ -1983,12 +2002,14 @@ static void shrink_zone(int priority, struct zone *zone,
        enum lru_list l;
        unsigned long nr_reclaimed, nr_scanned;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+       struct blk_plug plug;
 
 restart:
        nr_reclaimed = 0;
        nr_scanned = sc->nr_scanned;
        get_scan_count(zone, sc, nr, priority);
 
+       blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
                for_each_evictable_lru(l) {
@@ -2012,6 +2033,7 @@ restart:
                if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
                        break;
        }
+       blk_finish_plug(&plug);
        sc->nr_reclaimed += nr_reclaimed;
 
        /*