vmscan.c: fix invalid strict_strtoul() check in write_scan_unevictable_node()
[pandora-kernel.git] / mm / vmscan.c
index b68a934..ac644fe 100644 (file)
@@ -495,15 +495,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                        return PAGE_ACTIVATE;
                }
 
-               /*
-                * Wait on writeback if requested to. This happens when
-                * direct reclaiming a large contiguous area and the
-                * first attempt to free a range of pages fails.
-                */
-               if (PageWriteback(page) &&
-                   (sc->reclaim_mode & RECLAIM_MODE_SYNC))
-                       wait_on_page_writeback(page);
-
                if (!PageWriteback(page)) {
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
@@ -759,7 +750,10 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
                                      struct zone *zone,
-                                     struct scan_control *sc)
+                                     struct scan_control *sc,
+                                     int priority,
+                                     unsigned long *ret_nr_dirty,
+                                     unsigned long *ret_nr_writeback)
 {
        LIST_HEAD(ret_pages);
        LIST_HEAD(free_pages);
@@ -767,6 +761,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
        unsigned long nr_dirty = 0;
        unsigned long nr_congested = 0;
        unsigned long nr_reclaimed = 0;
+       unsigned long nr_writeback = 0;
 
        cond_resched();
 
@@ -803,13 +798,12 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
                if (PageWriteback(page)) {
+                       nr_writeback++;
                        /*
-                        * Synchronous reclaim is performed in two passes,
-                        * first an asynchronous pass over the list to
-                        * start parallel writeback, and a second synchronous
-                        * pass to wait for the IO to complete.  Wait here
-                        * for any page for which writeback has already
-                        * started.
+                        * Synchronous reclaim cannot queue pages for
+                        * writeback due to the possibility of stack overflow
+                        * but if it encounters a page under writeback, wait
+                        * for the IO to complete.
                         */
                        if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
                            may_enter_fs)
@@ -865,6 +859,25 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (PageDirty(page)) {
                        nr_dirty++;
 
+                       /*
+                        * Only kswapd can writeback filesystem pages to
+                        * avoid risk of stack overflow but do not writeback
+                        * unless under significant pressure.
+                        */
+                       if (page_is_file_cache(page) &&
+                                       (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
+                               /*
+                                * Immediately reclaim when written back.
+                                * Similar in principal to deactivate_page()
+                                * except we already have the page isolated
+                                * and know it's dirty
+                                */
+                               inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
+                               SetPageReclaim(page);
+
+                               goto keep_locked;
+                       }
+
                        if (references == PAGEREF_RECLAIM_CLEAN)
                                goto keep_locked;
                        if (!may_enter_fs)
@@ -999,6 +1012,8 @@ keep_lumpy:
 
        list_splice(&ret_pages, page_list);
        count_vm_events(PGACTIVATE, pgactivate);
+       *ret_nr_dirty += nr_dirty;
+       *ret_nr_writeback += nr_writeback;
        return nr_reclaimed;
 }
 
@@ -1405,7 +1420,7 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,
 }
 
 /*
- * Returns true if the caller should wait to clean dirty/writeback pages.
+ * Returns true if a direct reclaim should wait on pages under writeback.
  *
  * If we are direct reclaiming for contiguous pages and we do not reclaim
  * everything in the list, try again and wait for writeback IO to complete.
@@ -1459,6 +1474,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        unsigned long nr_taken;
        unsigned long nr_anon;
        unsigned long nr_file;
+       unsigned long nr_dirty = 0;
+       unsigned long nr_writeback = 0;
        isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
@@ -1511,12 +1528,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
 
        spin_unlock_irq(&zone->lru_lock);
 
-       nr_reclaimed = shrink_page_list(&page_list, zone, sc);
+       nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority,
+                                               &nr_dirty, &nr_writeback);
 
        /* Check if we should syncronously wait for writeback */
        if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
                set_reclaim_mode(priority, sc, true);
-               nr_reclaimed += shrink_page_list(&page_list, zone, sc);
+               nr_reclaimed += shrink_page_list(&page_list, zone, sc,
+                                       priority, &nr_dirty, &nr_writeback);
        }
 
        local_irq_disable();
@@ -1526,6 +1545,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
 
        putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
 
+       /*
+        * If reclaim is isolating dirty pages under writeback, it implies
+        * that the long-lived page allocation rate is exceeding the page
+        * laundering rate. Either the global limits are not being effective
+        * at throttling processes due to the page distribution throughout
+        * zones or there is heavy usage of a slow backing device. The
+        * only option is to throttle from reclaim context which is not ideal
+        * as there is no guarantee the dirtying process is throttled in the
+        * same way balance_dirty_pages() manages.
+        *
+        * This scales the number of dirty pages that must be under writeback
+        * before throttling depending on priority. It is a simple backoff
+        * function that has the most effect in the range DEF_PRIORITY to
+        * DEF_PRIORITY-2 which is the priority reclaim is considered to be
+        * in trouble and reclaim is considered to be in trouble.
+        *
+        * DEF_PRIORITY   100% isolated pages must be PageWriteback to throttle
+        * DEF_PRIORITY-1  50% must be PageWriteback
+        * DEF_PRIORITY-2  25% must be PageWriteback, kswapd in trouble
+        * ...
+        * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
+        *                     isolated page is PageWriteback
+        */
+       if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
+               wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
+
        trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
                zone_idx(zone),
                nr_scanned, nr_reclaimed,
@@ -1817,12 +1862,19 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        enum lru_list l;
        int noswap = 0;
        bool force_scan = false;
-       unsigned long nr_force_scan[2];
 
-       /* kswapd does zone balancing and needs to scan this zone */
+       /*
+        * If the zone or memcg is small, nr[l] can be 0.  This
+        * results in no scanning on this priority and a potential
+        * priority drop.  Global direct reclaim can go to the next
+        * zone and tends to have no problems. Global kswapd is for
+        * zone balancing and it needs to scan a minimum amount. When
+        * reclaiming for a memcg, a priority drop can cause high
+        * latencies, so it's better to scan a minimum amount there as
+        * well.
+        */
        if (scanning_global_lru(sc) && current_is_kswapd())
                force_scan = true;
-       /* memcg may have small limit and need to avoid priority drop */
        if (!scanning_global_lru(sc))
                force_scan = true;
 
@@ -1832,8 +1884,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                fraction[0] = 0;
                fraction[1] = 1;
                denominator = 1;
-               nr_force_scan[0] = 0;
-               nr_force_scan[1] = SWAP_CLUSTER_MAX;
                goto out;
        }
 
@@ -1850,8 +1900,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                        fraction[0] = 1;
                        fraction[1] = 0;
                        denominator = 1;
-                       nr_force_scan[0] = SWAP_CLUSTER_MAX;
-                       nr_force_scan[1] = 0;
                        goto out;
                }
        }
@@ -1900,11 +1948,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        fraction[0] = ap;
        fraction[1] = fp;
        denominator = ap + fp + 1;
-       if (force_scan) {
-               unsigned long scan = SWAP_CLUSTER_MAX;
-               nr_force_scan[0] = div64_u64(scan * ap, denominator);
-               nr_force_scan[1] = div64_u64(scan * fp, denominator);
-       }
 out:
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
@@ -1913,20 +1956,10 @@ out:
                scan = zone_nr_lru_pages(zone, sc, l);
                if (priority || noswap) {
                        scan >>= priority;
+                       if (!scan && force_scan)
+                               scan = SWAP_CLUSTER_MAX;
                        scan = div64_u64(scan * fraction[file], denominator);
                }
-
-               /*
-                * If zone is small or memcg is small, nr[l] can be 0.
-                * This results no-scan on this priority and priority drop down.
-                * For global direct reclaim, it can visit next zone and tend
-                * not to have problems. For global kswapd, it's for zone
-                * balancing and it need to scan a small amounts. When using
-                * memcg, priority drop can cause big latency. So, it's better
-                * to scan small amount. See may_noscan above.
-                */
-               if (!scan && force_scan)
-                       scan = nr_force_scan[file];
                nr[l] = scan;
        }
 }
@@ -2005,12 +2038,14 @@ static void shrink_zone(int priority, struct zone *zone,
        enum lru_list l;
        unsigned long nr_reclaimed, nr_scanned;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+       struct blk_plug plug;
 
 restart:
        nr_reclaimed = 0;
        nr_scanned = sc->nr_scanned;
        get_scan_count(zone, sc, nr, priority);
 
+       blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
                for_each_evictable_lru(l) {
@@ -2034,6 +2069,7 @@ restart:
                if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
                        break;
        }
+       blk_finish_plug(&plug);
        sc->nr_reclaimed += nr_reclaimed;
 
        /*
@@ -2712,6 +2748,8 @@ out:
 
                        /* If balanced, clear the congested flag */
                        zone_clear_flag(zone, ZONE_CONGESTED);
+                       if (i <= *classzone_idx)
+                               balanced += zone->present_pages;
                }
        }
 
@@ -2785,7 +2823,9 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
 static int kswapd(void *p)
 {
        unsigned long order, new_order;
+       unsigned balanced_order;
        int classzone_idx, new_classzone_idx;
+       int balanced_classzone_idx;
        pg_data_t *pgdat = (pg_data_t*)p;
        struct task_struct *tsk = current;
 
@@ -2816,7 +2856,9 @@ static int kswapd(void *p)
        set_freezable();
 
        order = new_order = 0;
+       balanced_order = 0;
        classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
+       balanced_classzone_idx = classzone_idx;
        for ( ; ; ) {
                int ret;
 
@@ -2825,7 +2867,8 @@ static int kswapd(void *p)
                 * new request of a similar or harder type will succeed soon
                 * so consider going to sleep on the basis we reclaimed at
                 */
-               if (classzone_idx >= new_classzone_idx && order == new_order) {
+               if (balanced_classzone_idx >= new_classzone_idx &&
+                                       balanced_order == new_order) {
                        new_order = pgdat->kswapd_max_order;
                        new_classzone_idx = pgdat->classzone_idx;
                        pgdat->kswapd_max_order =  0;
@@ -2840,9 +2883,12 @@ static int kswapd(void *p)
                        order = new_order;
                        classzone_idx = new_classzone_idx;
                } else {
-                       kswapd_try_to_sleep(pgdat, order, classzone_idx);
+                       kswapd_try_to_sleep(pgdat, balanced_order,
+                                               balanced_classzone_idx);
                        order = pgdat->kswapd_max_order;
                        classzone_idx = pgdat->classzone_idx;
+                       new_order = order;
+                       new_classzone_idx = classzone_idx;
                        pgdat->kswapd_max_order = 0;
                        pgdat->classzone_idx = pgdat->nr_zones - 1;
                }
@@ -2857,7 +2903,9 @@ static int kswapd(void *p)
                 */
                if (!ret) {
                        trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
-                       order = balance_pgdat(pgdat, order, &classzone_idx);
+                       balanced_classzone_idx = classzone_idx;
+                       balanced_order = balance_pgdat(pgdat, order,
+                                               &balanced_classzone_idx);
                }
        }
        return 0;
@@ -3472,8 +3520,8 @@ static ssize_t write_scan_unevictable_node(struct sys_device *dev,
        unsigned long res;
        unsigned long req = strict_strtoul(buf, 10, &res);
 
-       if (!req)
-               return 1;       /* zero is no-op */
+       if (req || !res)
+               return 1; /* Invalid input or zero is no-op */
 
        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
                if (!populated_zone(zone))