[PATCH] invalidate: remove_mapping() fix
[pandora-kernel.git] / mm / vmscan.c
index 0960846..af73c14 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/pagemap.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
+#include <linux/vmstat.h>
 #include <linux/file.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
@@ -62,6 +63,8 @@ struct scan_control {
        int swap_cluster_max;
 
        int swappiness;
+
+       int all_unreclaimable;
 };
 
 /*
@@ -215,7 +218,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                                break;
                        if (shrink_ret < nr_before)
                                ret += nr_before - shrink_ret;
-                       mod_page_state(slabs_scanned, this_scan);
+                       count_vm_events(SLABS_SCANNED, this_scan);
                        total_scan -= this_scan;
 
                        cond_resched();
@@ -368,24 +371,49 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
                }
-
+               inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
 
        return PAGE_CLEAN;
 }
 
+/*
+ * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
+ * someone else has a ref on the page, abort and return 0.  If it was
+ * successfully detached, return 1.  Assumes the caller has a single ref on
+ * this page.
+ */
 int remove_mapping(struct address_space *mapping, struct page *page)
 {
-       if (!mapping)
-               return 0;               /* truncate got there first */
+       BUG_ON(!PageLocked(page));
+       BUG_ON(mapping != page_mapping(page));
 
        write_lock_irq(&mapping->tree_lock);
-
        /*
-        * The non-racy check for busy page.  It is critical to check
-        * PageDirty _after_ making sure that the page is freeable and
-        * not in use by anybody.       (pagecache + us == 2)
+        * The non racy check for a busy page.
+        *
+        * Must be careful with the order of the tests. When someone has
+        * a ref to the page, it may be possible that they dirty it then
+        * drop the reference. So if PageDirty is tested before page_count
+        * here, then the following race may occur:
+        *
+        * get_user_pages(&page);
+        * [user mapping goes away]
+        * write_to(page);
+        *                              !PageDirty(page)    [good]
+        * SetPageDirty(page);
+        * put_page(page);
+        *                              !page_count(page)   [good, discard it]
+        *
+        * [oops, our write_to data is lost]
+        *
+        * Reversing the order of the tests ensures such a situation cannot
+        * escape unnoticed. The smp_rmb is needed to ensure the page->flags
+        * load is not satisfied before that of page->_count.
+        *
+        * Note that if SetPageDirty is always performed via set_page_dirty,
+        * and thus under tree_lock, then this ordering is not required.
         */
        if (unlikely(page_count(page) != 2))
                goto cannot_free;
@@ -440,7 +468,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (TestSetPageLocked(page))
                        goto keep;
 
-               BUG_ON(PageActive(page));
+               VM_BUG_ON(PageActive(page));
 
                sc->nr_scanned++;
 
@@ -547,7 +575,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto free_it;
                }
 
-               if (!remove_mapping(mapping, page))
+               if (!mapping || !remove_mapping(mapping, page))
                        goto keep_locked;
 
 free_it:
@@ -564,12 +592,12 @@ keep_locked:
                unlock_page(page);
 keep:
                list_add(&page->lru, &ret_pages);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
        }
        list_splice(&ret_pages, page_list);
        if (pagevec_count(&freed_pvec))
                __pagevec_release_nonlru(&freed_pvec);
-       mod_page_state(pgactivate, pgactivate);
+       count_vm_events(PGACTIVATE, pgactivate);
        return nr_reclaimed;
 }
 
@@ -603,7 +631,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
-               BUG_ON(!PageLRU(page));
+               VM_BUG_ON(!PageLRU(page));
 
                list_del(&page->lru);
                target = src;
@@ -659,11 +687,11 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                nr_reclaimed += nr_freed;
                local_irq_disable();
                if (current_is_kswapd()) {
-                       __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
-                       __mod_page_state(kswapd_steal, nr_freed);
+                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+                       __count_vm_events(KSWAPD_STEAL, nr_freed);
                } else
-                       __mod_page_state_zone(zone, pgscan_direct, nr_scan);
-               __mod_page_state_zone(zone, pgsteal, nr_freed);
+                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
+               __count_vm_events(PGACTIVATE, nr_freed);
 
                if (nr_taken == 0)
                        goto done;
@@ -674,7 +702,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 */
                while (!list_empty(&page_list)) {
                        page = lru_to_page(&page_list);
-                       BUG_ON(PageLRU(page));
+                       VM_BUG_ON(PageLRU(page));
                        SetPageLRU(page);
                        list_del(&page->lru);
                        if (PageActive(page))
@@ -695,6 +723,11 @@ done:
        return nr_reclaimed;
 }
 
+static inline int zone_is_near_oom(struct zone *zone)
+{
+       return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3;
+}
+
 /*
  * This moves pages from the active list to the inactive list.
  *
@@ -730,6 +763,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                long distress;
                long swap_tendency;
 
+               if (zone_is_near_oom(zone))
+                       goto force_reclaim_mapped;
+
                /*
                 * `distress' is a measure of how much trouble we're having
                 * reclaiming pages.  0 -> no problems.  100 -> great trouble.
@@ -765,6 +801,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * memory onto the inactive list.
                 */
                if (swap_tendency >= 100)
+force_reclaim_mapped:
                        reclaim_mapped = 1;
        }
 
@@ -797,9 +834,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
                ClearPageActive(page);
 
                list_move(&page->lru, &zone->inactive_list);
@@ -827,9 +864,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        while (!list_empty(&l_active)) {
                page = lru_to_page(&l_active);
                prefetchw_prev_lru_page(page, &l_active, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
                list_move(&page->lru, &zone->active_list);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
@@ -841,11 +878,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                }
        }
        zone->nr_active += pgmoved;
-       spin_unlock(&zone->lru_lock);
 
-       __mod_page_state_zone(zone, pgrefill, pgscanned);
-       __mod_page_state(pgdeactivate, pgdeactivate);
-       local_irq_enable();
+       __count_zone_vm_events(PGREFILL, zone, pgscanned);
+       __count_vm_events(PGDEACTIVATE, pgdeactivate);
+       spin_unlock_irq(&zone->lru_lock);
 
        pagevec_release(&pvec);
 }
@@ -926,6 +962,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
        unsigned long nr_reclaimed = 0;
        int i;
 
+       sc->all_unreclaimable = 1;
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
@@ -942,6 +979,8 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
                if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                        continue;       /* Let kswapd poll it */
 
+               sc->all_unreclaimable = 0;
+
                nr_reclaimed += shrink_zone(priority, zone, sc);
        }
        return nr_reclaimed;
@@ -977,7 +1016,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
                .swappiness = vm_swappiness,
        };
 
-       inc_page_state(allocstall);
+       count_vm_event(ALLOCSTALL);
 
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
@@ -1022,6 +1061,9 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
                if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
                        blk_congestion_wait(WRITE, HZ/10);
        }
+       /* top priority shrink_caches still had more to do? don't OOM, then */
+       if (!sc.all_unreclaimable)
+               ret = 1;
 out:
        for (i = 0; zones[i] != 0; i++) {
                struct zone *zone = zones[i];
@@ -1074,7 +1116,7 @@ loop_again:
        total_scanned = 0;
        nr_reclaimed = 0;
        sc.may_writepage = !laptop_mode;
-       inc_page_state(pageoutrun);
+       count_vm_event(PAGEOUTRUN);
 
        for (i = 0; i < pgdat->nr_zones; i++) {
                struct zone *zone = pgdat->node_zones + i;
@@ -1154,7 +1196,7 @@ scan:
                        if (zone->all_unreclaimable)
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
-                                   (zone->nr_active + zone->nr_inactive) * 4)
+                                   (zone->nr_active + zone->nr_inactive) * 6)
                                zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and
@@ -1362,7 +1404,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        for_each_zone(zone)
                lru_pages += zone->nr_active + zone->nr_inactive;
 
-       nr_slab = read_page_state(nr_slab);
+       nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
                reclaim_state.reclaimed_slab = 0;
@@ -1504,10 +1546,6 @@ module_init(kswapd_init)
  *
  * If non-zero call zone_reclaim when the number of free pages falls below
  * the watermarks.
- *
- * In the future we may add flags to the mode. However, the page allocator
- * should only have to check that zone_reclaim_mode != 0 before calling
- * zone_reclaim().
  */
 int zone_reclaim_mode __read_mostly;
 
@@ -1515,7 +1553,6 @@ int zone_reclaim_mode __read_mostly;
 #define RECLAIM_ZONE (1<<0)    /* Run shrink_cache on the zone */
 #define RECLAIM_WRITE (1<<1)   /* Writeout pages during reclaim */
 #define RECLAIM_SWAP (1<<2)    /* Swap pages out during reclaim */
-#define RECLAIM_SLAB (1<<3)    /* Do a global slab shrink if the zone is out of memory */
 
 /*
  * Priority for ZONE_RECLAIM. This determines the fraction of pages
@@ -1524,6 +1561,18 @@ int zone_reclaim_mode __read_mostly;
  */
 #define ZONE_RECLAIM_PRIORITY 4
 
+/*
+ * Percentage of pages in a zone that must be unmapped for zone_reclaim to
+ * occur.
+ */
+int sysctl_min_unmapped_ratio = 1;
+
+/*
+ * If the number of slab pages in a zone grows beyond this percentage then
+ * slab reclaim needs to occur.
+ */
+int sysctl_min_slab_ratio = 5;
+
 /*
  * Try to free up some pages from this zone through reclaim.
  */
@@ -1543,6 +1592,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .gfp_mask = gfp_mask,
                .swappiness = vm_swappiness,
        };
+       unsigned long slab_reclaimable;
 
        disable_swap_token();
        cond_resched();
@@ -1555,29 +1605,43 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       /*
-        * Free memory by calling shrink zone with increasing priorities
-        * until we have enough memory freed.
-        */
-       priority = ZONE_RECLAIM_PRIORITY;
-       do {
-               nr_reclaimed += shrink_zone(priority, zone, &sc);
-               priority--;
-       } while (priority >= 0 && nr_reclaimed < nr_pages);
+       if (zone_page_state(zone, NR_FILE_PAGES) -
+               zone_page_state(zone, NR_FILE_MAPPED) >
+               zone->min_unmapped_pages) {
+               /*
+                * Free memory by calling shrink zone with increasing
+                * priorities until we have enough memory freed.
+                */
+               priority = ZONE_RECLAIM_PRIORITY;
+               do {
+                       nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       priority--;
+               } while (priority >= 0 && nr_reclaimed < nr_pages);
+       }
 
-       if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
+       slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+       if (slab_reclaimable > zone->min_slab_pages) {
                /*
                 * shrink_slab() does not currently allow us to determine how
-                * many pages were freed in this zone. So we just shake the slab
-                * a bit and then go off node for this particular allocation
-                * despite possibly having freed enough memory to allocate in
-                * this zone.  If we freed local memory then the next
-                * allocations will be local again.
+                * many pages were freed in this zone. So we take the current
+                * number of slab pages and shake the slab until it is reduced
+                * by the same nr_pages that we used for reclaiming unmapped
+                * pages.
                 *
-                * shrink_slab will free memory on all zones and may take
-                * a long time.
+                * Note that shrink_slab will free memory on all zones and may
+                * take a long time.
+                */
+               while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
+                       zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
+                               slab_reclaimable - nr_pages)
+                       ;
+
+               /*
+                * Update nr_reclaimed by the number of slab pages we
+                * reclaimed from this zone.
                 */
-               shrink_slab(sc.nr_scanned, gfp_mask, order);
+               nr_reclaimed += slab_reclaimable -
+                       zone_page_state(zone, NR_SLAB_RECLAIMABLE);
        }
 
        p->reclaim_state = NULL;
@@ -1591,18 +1655,20 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        int node_id;
 
        /*
-        * Do not reclaim if there are not enough reclaimable pages in this
-        * zone that would satify this allocations.
-        *
-        * All unmapped pagecache pages are reclaimable.
+        * Zone reclaim reclaims unmapped file backed pages and
+        * slab pages if we are over the defined limits.
         *
-        * Both counters may be temporarily off a bit so we use
-        * SWAP_CLUSTER_MAX as the boundary. It may also be good to
-        * leave a few frequently used unmapped pagecache pages around.
+        * A small portion of unmapped file backed pages is needed for
+        * file I/O otherwise pages read by file I/O will be immediately
+        * thrown out if the zone is overallocated. So we do not reclaim
+        * if less than a specified percentage of the zone is used by
+        * unmapped file backed pages.
         */
        if (zone_page_state(zone, NR_FILE_PAGES) -
-               zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX)
-                       return 0;
+           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
+           && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
+                       <= zone->min_slab_pages)
+               return 0;
 
        /*
         * Avoid concurrent zone reclaims, do not reclaim in a zone that does
@@ -1621,7 +1687,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * over remote processors and spread off node memory allocations
         * as wide as possible.
         */
-       node_id = zone->zone_pgdat->node_id;
+       node_id = zone_to_nid(zone);
        mask = node_to_cpumask(node_id);
        if (!cpus_empty(mask) && node_id != numa_node_id())
                return 0;