[PATCH] zone_reclaim: dynamic slab reclaim
[pandora-kernel.git] / mm / page_alloc.c
index ea49878..cf913bd 100644 (file)
@@ -448,9 +448,11 @@ static void free_pages_bulk(struct zone *zone, int count,
 
 static void free_one_page(struct zone *zone, struct page *page, int order)
 {
-       LIST_HEAD(list);
-       list_add(&page->lru, &list);
-       free_pages_bulk(zone, 1, &list, order);
+       spin_lock(&zone->lock);
+       zone->all_unreclaimable = 0;
+       zone->pages_scanned = 0;
+       __free_one_page(page, zone ,order);
+       spin_unlock(&zone->lock);
 }
 
 static void __free_pages_ok(struct page *page, unsigned int order)
@@ -631,7 +633,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
 #ifdef CONFIG_NUMA
 /*
  * Called from the slab reaper to drain pagesets on a particular node that
- * belong to the currently executing processor.
+ * belongs to the currently executing processor.
  * Note that this function must be called with the thread pinned to
  * a single processor.
  */
@@ -645,6 +647,9 @@ void drain_node_pages(int nodeid)
                struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
                struct per_cpu_pageset *pset;
 
+               if (!populated_zone(zone))
+                       continue;
+
                pset = zone_pcp(zone, smp_processor_id());
                for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
                        struct per_cpu_pages *pcp;
@@ -887,35 +892,37 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
        struct zone **z = zonelist->zones;
        struct page *page = NULL;
        int classzone_idx = zone_idx(*z);
+       struct zone *zone;
 
        /*
         * Go through the zonelist once, looking for a zone with enough free.
         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
         */
        do {
+               zone = *z;
                if (unlikely((gfp_mask & __GFP_THISNODE) &&
-                       (*z)->zone_pgdat != zonelist->zones[0]->zone_pgdat))
+                       zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
                                break;
                if ((alloc_flags & ALLOC_CPUSET) &&
-                               !cpuset_zone_allowed(*z, gfp_mask))
+                               !cpuset_zone_allowed(zone, gfp_mask))
                        continue;
 
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
                        if (alloc_flags & ALLOC_WMARK_MIN)
-                               mark = (*z)->pages_min;
+                               mark = zone->pages_min;
                        else if (alloc_flags & ALLOC_WMARK_LOW)
-                               mark = (*z)->pages_low;
+                               mark = zone->pages_low;
                        else
-                               mark = (*z)->pages_high;
-                       if (!zone_watermark_ok(*z, order, mark,
+                               mark = zone->pages_high;
+                       if (!zone_watermark_ok(zone , order, mark,
                                    classzone_idx, alloc_flags))
                                if (!zone_reclaim_mode ||
-                                   !zone_reclaim(*z, gfp_mask, order))
+                                   !zone_reclaim(zone, gfp_mask, order))
                                        continue;
                }
 
-               page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
+               page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
                if (page) {
                        break;
                }
@@ -1297,7 +1304,8 @@ void show_free_areas(void)
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
                nr_free_pages(),
-               global_page_state(NR_SLAB),
+               global_page_state(NR_SLAB_RECLAIMABLE) +
+                       global_page_state(NR_SLAB_UNRECLAIMABLE),
                global_page_state(NR_FILE_MAPPED),
                global_page_state(NR_PAGETABLE));
 
@@ -1995,8 +2003,9 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                zone->spanned_pages = size;
                zone->present_pages = realsize;
 #ifdef CONFIG_NUMA
-               zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio)
+               zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
                                                / 100;
+               zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
 #endif
                zone->name = zone_names[j];
                spin_lock_init(&zone->lock);
@@ -2306,10 +2315,26 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
                return rc;
 
        for_each_zone(zone)
-               zone->min_unmapped_ratio = (zone->present_pages *
+               zone->min_unmapped_pages = (zone->present_pages *
                                sysctl_min_unmapped_ratio) / 100;
        return 0;
 }
+
+int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
+       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+       struct zone *zone;
+       int rc;
+
+       rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       if (rc)
+               return rc;
+
+       for_each_zone(zone)
+               zone->min_slab_pages = (zone->present_pages *
+                               sysctl_min_slab_ratio) / 100;
+       return 0;
+}
 #endif
 
 /*