page-allocator: maintain rolling count of pages to free from the PCP
[pandora-kernel.git] / mm / page_alloc.c
index 1b1c39e..6877e22 100644 (file)
@@ -525,32 +525,38 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                                        struct per_cpu_pages *pcp)
 {
        int migratetype = 0;
+       int batch_free = 0;
 
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
 
        __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-       while (count--) {
+       while (count) {
                struct page *page;
                struct list_head *list;
 
                /*
-                * Remove pages from lists in a round-robin fashion. This spinning
-                * around potentially empty lists is bloody awful, alternatives that
-                * don't suck are welcome
+                * Remove pages from lists in a round-robin fashion. A
+                * batch_free count is maintained that is incremented when an
+                * empty list is encountered.  This is so more pages are freed
+                * off fuller lists instead of spinning excessively around empty
+                * lists
                 */
                do {
+                       batch_free++;
                        if (++migratetype == MIGRATE_PCPTYPES)
                                migratetype = 0;
                        list = &pcp->lists[migratetype];
                } while (list_empty(list));
 
-               page = list_entry(list->prev, struct page, lru);
-               /* have to delete it as __free_one_page list manipulates */
-               list_del(&page->lru);
-               trace_mm_page_pcpu_drain(page, 0, migratetype);
-               __free_one_page(page, zone, 0, migratetype);
+               do {
+                       page = list_entry(list->prev, struct page, lru);
+                       /* must delete as __free_one_page list manipulates */
+                       list_del(&page->lru);
+                       __free_one_page(page, zone, 0, migratetype);
+                       trace_mm_page_pcpu_drain(page, 0, migratetype);
+               } while (--count && --batch_free && !list_empty(list));
        }
        spin_unlock(&zone->lock);
 }