net: Fix dev dev_ethtool_get_rx_csum() for forced NETIF_F_RXCSUM
[pandora-kernel.git] / mm / vmscan.c
index 148c6e6..f73b865 100644 (file)
@@ -358,7 +358,7 @@ static int may_write_to_queue(struct backing_dev_info *bdi,
 static void handle_write_error(struct address_space *mapping,
                                struct page *page, int error)
 {
-       lock_page_nosync(page);
+       lock_page(page);
        if (page_mapping(page) == mapping)
                mapping_set_error(mapping, error);
        unlock_page(page);
@@ -514,7 +514,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
 
                freepage = mapping->a_ops->freepage;
 
-               __remove_from_page_cache(page);
+               __delete_from_page_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
                mem_cgroup_uncharge_cache_page(page);
 
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone,
        if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
                return false;
 
-       /*
-        * If we failed to reclaim and have scanned the full list, stop.
-        * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far
-        *       faster but obviously would be less likely to succeed
-        *       allocation. If this is desirable, use GFP_REPEAT to decide
-        *       if both reclaimed and scanned should be checked or just
-        *       reclaimed
-        */
-       if (!nr_reclaimed && !nr_scanned)
-               return false;
+       /* Consider stopping depending on scan and reclaim activity */
+       if (sc->gfp_mask & __GFP_REPEAT) {
+               /*
+                * For __GFP_REPEAT allocations, stop reclaiming if the
+                * full LRU list has been scanned and we are still failing
+                * to reclaim pages. This full LRU scan is potentially
+                * expensive but a __GFP_REPEAT caller really wants to succeed
+                */
+               if (!nr_reclaimed && !nr_scanned)
+                       return false;
+       } else {
+               /*
+                * For non-__GFP_REPEAT allocations which can presumably
+                * fail without consequence, stop if we failed to reclaim
+                * any pages from the last SWAP_CLUSTER_MAX number of
+                * pages that were scanned. This will return to the
+                * caller faster at the risk reclaim/compaction and
+                * the resulting allocation attempt fails
+                */
+               if (!nr_reclaimed)
+                       return false;
+       }
 
        /*
         * If we have not reclaimed enough pages for compaction and the
@@ -1882,12 +1894,12 @@ static void shrink_zone(int priority, struct zone *zone,
        unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
        enum lru_list l;
-       unsigned long nr_reclaimed;
+       unsigned long nr_reclaimed, nr_scanned;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
-       unsigned long nr_scanned = sc->nr_scanned;
 
 restart:
        nr_reclaimed = 0;
+       nr_scanned = sc->nr_scanned;
        get_scan_count(zone, sc, nr, priority);
 
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2385,9 +2397,9 @@ loop_again:
                 * cause too much scanning of the lower zones.
                 */
                for (i = 0; i <= end_zone; i++) {
-                       int compaction;
                        struct zone *zone = pgdat->node_zones + i;
                        int nr_slab;
+                       unsigned long balance_gap;
 
                        if (!populated_zone(zone))
                                continue;
@@ -2404,11 +2416,20 @@ loop_again:
                        mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
 
                        /*
-                        * We put equal pressure on every zone, unless one
-                        * zone has way too many pages free already.
+                        * We put equal pressure on every zone, unless
+                        * one zone has way too many pages free
+                        * already. The "too many pages" is defined
+                        * as the high wmark plus a "gap" where the
+                        * gap is either the low watermark or 1%
+                        * of the zone, whichever is smaller.
                         */
+                       balance_gap = min(low_wmark_pages(zone),
+                               (zone->present_pages +
+                                       KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+                               KSWAPD_ZONE_BALANCE_GAP_RATIO);
                        if (!zone_watermark_ok_safe(zone, order,
-                                       8*high_wmark_pages(zone), end_zone, 0))
+                                       high_wmark_pages(zone) + balance_gap,
+                                       end_zone, 0))
                                shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -2416,24 +2437,9 @@ loop_again:
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
 
-                       compaction = 0;
-                       if (order &&
-                           zone_watermark_ok(zone, 0,
-                                              high_wmark_pages(zone),
-                                             end_zone, 0) &&
-                           !zone_watermark_ok(zone, order,
-                                              high_wmark_pages(zone),
-                                              end_zone, 0)) {
-                               compact_zone_order(zone,
-                                                  order,
-                                                  sc.gfp_mask, false,
-                                                  COMPACT_MODE_KSWAPD);
-                               compaction = 1;
-                       }
-
                        if (zone->all_unreclaimable)
                                continue;
-                       if (!compaction && nr_slab == 0 &&
+                       if (nr_slab == 0 &&
                            !zone_reclaimable(zone))
                                zone->all_unreclaimable = 1;
                        /*