mm: page allocator: calculate a better estimate of NR_FREE_PAGES when memory is low...
[pandora-kernel.git] / mm / page_alloc.c
index 9bd339e..b2d21e0 100644 (file)
@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
+       int to_free = count;
 
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-       while (count) {
+       while (to_free) {
                struct page *page;
                struct list_head *list;
 
@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, zone, 0, page_private(page));
                        trace_mm_page_pcpu_drain(page, 0, page_private(page));
-               } while (--count && --batch_free && !list_empty(list));
+               } while (--to_free && --batch_free && !list_empty(list));
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
        spin_unlock(&zone->lock);
 }
 
@@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        __free_one_page(page, zone, order, migratetype);
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        spin_unlock(&zone->lock);
 }
 
@@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 {
        /* free_pages my go negative - that's OK */
        long min = mark;
-       long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
+       long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -1738,7 +1739,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        struct page *page;
 
        /* Acquire the OOM killer lock for the zones in zonelist */
-       if (!try_set_zone_oom(zonelist, gfp_mask)) {
+       if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
                schedule_timeout_uninterruptible(1);
                return NULL;
        }
@@ -1759,6 +1760,9 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                /* The OOM killer will not help higher order allocs */
                if (order > PAGE_ALLOC_COSTLY_ORDER)
                        goto out;
+               /* The OOM killer does not needlessly kill tasks for lowmem */
+               if (high_zoneidx < ZONE_NORMAL)
+                       goto out;
                /*
                 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
                 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
@@ -2052,15 +2056,23 @@ rebalance:
                        if (page)
                                goto got_pg;
 
-                       /*
-                        * The OOM killer does not trigger for high-order
-                        * ~__GFP_NOFAIL allocations so if no progress is being
-                        * made, there are no other options and retrying is
-                        * unlikely to help.
-                        */
-                       if (order > PAGE_ALLOC_COSTLY_ORDER &&
-                                               !(gfp_mask & __GFP_NOFAIL))
-                               goto nopage;
+                       if (!(gfp_mask & __GFP_NOFAIL)) {
+                               /*
+                                * The oom killer is not called for high-order
+                                * allocations that may fail, so if no progress
+                                * is being made, there are no other options and
+                                * retrying is unlikely to help.
+                                */
+                               if (order > PAGE_ALLOC_COSTLY_ORDER)
+                                       goto nopage;
+                               /*
+                                * The oom killer is not called for lowmem
+                                * allocations to prevent needlessly killing
+                                * innocent tasks.
+                                */
+                               if (high_zoneidx < ZONE_NORMAL)
+                                       goto nopage;
+                       }
 
                        goto restart;
                }
@@ -2412,7 +2424,7 @@ void show_free_areas(void)
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
-                       K(zone_page_state(zone, NR_FREE_PAGES)),
+                       K(zone_nr_free_pages(zone)),
                        K(min_wmark_pages(zone)),
                        K(low_wmark_pages(zone)),
                        K(high_wmark_pages(zone)),
@@ -4089,8 +4101,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone_seqlock_init(zone);
                zone->zone_pgdat = pgdat;
 
-               zone->prev_priority = DEF_PRIORITY;
-
                zone_pcp_init(zone);
                for_each_lru(l) {
                        INIT_LIST_HEAD(&zone->lru[l].list);