[Bluetooth] Always include MTU in L2CAP config responses
[pandora-kernel.git] / mm / vmscan.c
index 1618058..518540a 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/pagemap.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
+#include <linux/vmstat.h>
 #include <linux/file.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
@@ -62,6 +63,8 @@ struct scan_control {
        int swap_cluster_max;
 
        int swappiness;
+
+       int all_unreclaimable;
 };
 
 /*
@@ -368,24 +371,49 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
                }
-
+               inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
 
        return PAGE_CLEAN;
 }
 
+/*
+ * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
+ * someone else has a ref on the page, abort and return 0.  If it was
+ * successfully detached, return 1.  Assumes the caller has a single ref on
+ * this page.
+ */
 int remove_mapping(struct address_space *mapping, struct page *page)
 {
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
 
        write_lock_irq(&mapping->tree_lock);
-
        /*
-        * The non-racy check for busy page.  It is critical to check
-        * PageDirty _after_ making sure that the page is freeable and
-        * not in use by anybody.       (pagecache + us == 2)
+        * The non racy check for a busy page.
+        *
+        * Must be careful with the order of the tests. When someone has
+        * a ref to the page, it may be possible that they dirty it then
+        * drop the reference. So if PageDirty is tested before page_count
+        * here, then the following race may occur:
+        *
+        * get_user_pages(&page);
+        * [user mapping goes away]
+        * write_to(page);
+        *                              !PageDirty(page)    [good]
+        * SetPageDirty(page);
+        * put_page(page);
+        *                              !page_count(page)   [good, discard it]
+        *
+        * [oops, our write_to data is lost]
+        *
+        * Reversing the order of the tests ensures such a situation cannot
+        * escape unnoticed. The smp_rmb is needed to ensure the page->flags
+        * load is not satisfied before that of page->_count.
+        *
+        * Note that if SetPageDirty is always performed via set_page_dirty,
+        * and thus under tree_lock, then this ordering is not required.
         */
        if (unlikely(page_count(page) != 2))
                goto cannot_free;
@@ -695,6 +723,25 @@ done:
        return nr_reclaimed;
 }
 
+/*
+ * We are about to scan this zone at a certain priority level.  If that priority
+ * level is smaller (ie: more urgent) than the previous priority, then note
+ * that priority level within the zone.  This is done so that when the next
+ * process comes in to scan this zone, it will immediately start out at this
+ * priority level rather than having to build up its own scanning priority.
+ * Here, this priority affects only the reclaim-mapped threshold.
+ */
+static inline void note_zone_scanning_priority(struct zone *zone, int priority)
+{
+       if (priority < zone->prev_priority)
+               zone->prev_priority = priority;
+}
+
+static inline int zone_is_near_oom(struct zone *zone)
+{
+       return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3;
+}
+
 /*
  * This moves pages from the active list to the inactive list.
  *
@@ -713,7 +760,7 @@ done:
  * But we had to alter page->flags anyway.
  */
 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
-                               struct scan_control *sc)
+                               struct scan_control *sc, int priority)
 {
        unsigned long pgmoved;
        int pgdeactivate = 0;
@@ -730,11 +777,14 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                long distress;
                long swap_tendency;
 
+               if (zone_is_near_oom(zone))
+                       goto force_reclaim_mapped;
+
                /*
                 * `distress' is a measure of how much trouble we're having
                 * reclaiming pages.  0 -> no problems.  100 -> great trouble.
                 */
-               distress = 100 >> zone->prev_priority;
+               distress = 100 >> min(zone->prev_priority, priority);
 
                /*
                 * The point of this algorithm is to decide when to start
@@ -765,6 +815,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * memory onto the inactive list.
                 */
                if (swap_tendency >= 100)
+force_reclaim_mapped:
                        reclaim_mapped = 1;
        }
 
@@ -885,7 +936,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                        nr_to_scan = min(nr_active,
                                        (unsigned long)sc->swap_cluster_max);
                        nr_active -= nr_to_scan;
-                       shrink_active_list(nr_to_scan, zone, sc);
+                       shrink_active_list(nr_to_scan, zone, sc, priority);
                }
 
                if (nr_inactive) {
@@ -925,6 +976,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
        unsigned long nr_reclaimed = 0;
        int i;
 
+       sc->all_unreclaimable = 1;
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
@@ -934,13 +986,13 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
                if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
                        continue;
 
-               zone->temp_priority = priority;
-               if (zone->prev_priority > priority)
-                       zone->prev_priority = priority;
+               note_zone_scanning_priority(zone, priority);
 
                if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                        continue;       /* Let kswapd poll it */
 
+               sc->all_unreclaimable = 0;
+
                nr_reclaimed += shrink_zone(priority, zone, sc);
        }
        return nr_reclaimed;
@@ -984,7 +1036,6 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
                if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
                        continue;
 
-               zone->temp_priority = DEF_PRIORITY;
                lru_pages += zone->nr_active + zone->nr_inactive;
        }
 
@@ -1019,16 +1070,28 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
 
                /* Take a nap, wait for some writeback to complete */
                if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
-                       blk_congestion_wait(WRITE, HZ/10);
+                       congestion_wait(WRITE, HZ/10);
        }
+       /* top priority shrink_caches still had more to do? don't OOM, then */
+       if (!sc.all_unreclaimable)
+               ret = 1;
 out:
+       /*
+        * Now that we've scanned all the zones at this priority level, note
+        * that level within the zone so that the next thread which performs
+        * scanning of this zone will immediately start out at this priority
+        * level.  This affects only the decision whether or not to bring
+        * mapped pages onto the inactive list.
+        */
+       if (priority < 0)
+               priority = 0;
        for (i = 0; zones[i] != 0; i++) {
                struct zone *zone = zones[i];
 
                if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
                        continue;
 
-               zone->prev_priority = zone->temp_priority;
+               zone->prev_priority = priority;
        }
        return ret;
 }
@@ -1068,6 +1131,11 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = vm_swappiness,
        };
+       /*
+        * temp_priority is used to remember the scanning priority at which
+        * this zone was successfully refilled to free_pages == pages_high.
+        */
+       int temp_priority[MAX_NR_ZONES];
 
 loop_again:
        total_scanned = 0;
@@ -1075,11 +1143,8 @@ loop_again:
        sc.may_writepage = !laptop_mode;
        count_vm_event(PAGEOUTRUN);
 
-       for (i = 0; i < pgdat->nr_zones; i++) {
-               struct zone *zone = pgdat->node_zones + i;
-
-               zone->temp_priority = DEF_PRIORITY;
-       }
+       for (i = 0; i < pgdat->nr_zones; i++)
+               temp_priority[i] = DEF_PRIORITY;
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
@@ -1140,10 +1205,9 @@ scan:
                        if (!zone_watermark_ok(zone, order, zone->pages_high,
                                               end_zone, 0))
                                all_zones_ok = 0;
-                       zone->temp_priority = priority;
-                       if (zone->prev_priority > priority)
-                               zone->prev_priority = priority;
+                       temp_priority[i] = priority;
                        sc.nr_scanned = 0;
+                       note_zone_scanning_priority(zone, priority);
                        nr_reclaimed += shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -1153,7 +1217,7 @@ scan:
                        if (zone->all_unreclaimable)
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
-                                   (zone->nr_active + zone->nr_inactive) * 4)
+                                   (zone->nr_active + zone->nr_inactive) * 6)
                                zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and
@@ -1171,7 +1235,7 @@ scan:
                 * another pass across the zones.
                 */
                if (total_scanned && priority < DEF_PRIORITY - 2)
-                       blk_congestion_wait(WRITE, HZ/10);
+                       congestion_wait(WRITE, HZ/10);
 
                /*
                 * We do this so kswapd doesn't build up large priorities for
@@ -1183,10 +1247,15 @@ scan:
                        break;
        }
 out:
+       /*
+        * Note within each zone the priority level at which this zone was
+        * brought into a happy state.  So that the next thread which scans this
+        * zone will start out at that priority level.
+        */
        for (i = 0; i < pgdat->nr_zones; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
-               zone->prev_priority = zone->temp_priority;
+               zone->prev_priority = temp_priority[i];
        }
        if (!all_zones_ok) {
                cond_resched();
@@ -1315,7 +1384,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
                        if (zone->nr_scan_active >= nr_pages || pass > 3) {
                                zone->nr_scan_active = 0;
                                nr_to_scan = min(nr_pages, zone->nr_active);
-                               shrink_active_list(nr_to_scan, zone, sc);
+                               shrink_active_list(nr_to_scan, zone, sc, prio);
                        }
                }
 
@@ -1361,7 +1430,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        for_each_zone(zone)
                lru_pages += zone->nr_active + zone->nr_inactive;
 
-       nr_slab = global_page_state(NR_SLAB);
+       nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
                reclaim_state.reclaimed_slab = 0;
@@ -1415,7 +1484,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                                goto out;
 
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
-                               blk_congestion_wait(WRITE, HZ / 10);
+                               congestion_wait(WRITE, HZ / 10);
                }
 
                lru_pages = 0;
@@ -1510,7 +1579,6 @@ int zone_reclaim_mode __read_mostly;
 #define RECLAIM_ZONE (1<<0)    /* Run shrink_cache on the zone */
 #define RECLAIM_WRITE (1<<1)   /* Writeout pages during reclaim */
 #define RECLAIM_SWAP (1<<2)    /* Swap pages out during reclaim */
-#define RECLAIM_SLAB (1<<3)    /* Do a global slab shrink if the zone is out of memory */
 
 /*
  * Priority for ZONE_RECLAIM. This determines the fraction of pages
@@ -1525,6 +1593,12 @@ int zone_reclaim_mode __read_mostly;
  */
 int sysctl_min_unmapped_ratio = 1;
 
+/*
+ * If the number of slab pages in a zone grows beyond this percentage then
+ * slab reclaim needs to occur.
+ */
+int sysctl_min_slab_ratio = 5;
+
 /*
  * Try to free up some pages from this zone through reclaim.
  */
@@ -1544,6 +1618,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .gfp_mask = gfp_mask,
                .swappiness = vm_swappiness,
        };
+       unsigned long slab_reclaimable;
 
        disable_swap_token();
        cond_resched();
@@ -1556,29 +1631,44 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       /*
-        * Free memory by calling shrink zone with increasing priorities
-        * until we have enough memory freed.
-        */
-       priority = ZONE_RECLAIM_PRIORITY;
-       do {
-               nr_reclaimed += shrink_zone(priority, zone, &sc);
-               priority--;
-       } while (priority >= 0 && nr_reclaimed < nr_pages);
+       if (zone_page_state(zone, NR_FILE_PAGES) -
+               zone_page_state(zone, NR_FILE_MAPPED) >
+               zone->min_unmapped_pages) {
+               /*
+                * Free memory by calling shrink zone with increasing
+                * priorities until we have enough memory freed.
+                */
+               priority = ZONE_RECLAIM_PRIORITY;
+               do {
+                       note_zone_scanning_priority(zone, priority);
+                       nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       priority--;
+               } while (priority >= 0 && nr_reclaimed < nr_pages);
+       }
 
-       if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
+       slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+       if (slab_reclaimable > zone->min_slab_pages) {
                /*
                 * shrink_slab() does not currently allow us to determine how
-                * many pages were freed in this zone. So we just shake the slab
-                * a bit and then go off node for this particular allocation
-                * despite possibly having freed enough memory to allocate in
-                * this zone.  If we freed local memory then the next
-                * allocations will be local again.
+                * many pages were freed in this zone. So we take the current
+                * number of slab pages and shake the slab until it is reduced
+                * by the same nr_pages that we used for reclaiming unmapped
+                * pages.
                 *
-                * shrink_slab will free memory on all zones and may take
-                * a long time.
+                * Note that shrink_slab will free memory on all zones and may
+                * take a long time.
+                */
+               while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
+                       zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
+                               slab_reclaimable - nr_pages)
+                       ;
+
+               /*
+                * Update nr_reclaimed by the number of slab pages we
+                * reclaimed from this zone.
                 */
-               shrink_slab(sc.nr_scanned, gfp_mask, order);
+               nr_reclaimed += slab_reclaimable -
+                       zone_page_state(zone, NR_SLAB_RECLAIMABLE);
        }
 
        p->reclaim_state = NULL;
@@ -1592,7 +1682,8 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        int node_id;
 
        /*
-        * Zone reclaim reclaims unmapped file backed pages.
+        * Zone reclaim reclaims unmapped file backed pages and
+        * slab pages if we are over the defined limits.
         *
         * A small portion of unmapped file backed pages is needed for
         * file I/O otherwise pages read by file I/O will be immediately
@@ -1601,7 +1692,9 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * unmapped file backed pages.
         */
        if (zone_page_state(zone, NR_FILE_PAGES) -
-           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_ratio)
+           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
+           && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
+                       <= zone->min_slab_pages)
                return 0;
 
        /*
@@ -1621,7 +1714,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * over remote processors and spread off node memory allocations
         * as wide as possible.
         */
-       node_id = zone->zone_pgdat->node_id;
+       node_id = zone_to_nid(zone);
        mask = node_to_cpumask(node_id);
        if (!cpus_empty(mask) && node_id != numa_node_id())
                return 0;