mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks
authorMarek Szyprowski <m.szyprowski@samsung.com>
Wed, 25 Jan 2012 11:49:24 +0000 (12:49 +0100)
committerGrazvydas Ignotas <notasas@gmail.com>
Tue, 26 Feb 2013 17:51:17 +0000 (19:51 +0200)
alloc_contig_range() performs memory allocation so it also should keep
track on keeping the correct level of memory watermarks. This commit adds
a call to *_slowpath style reclaim to grab enough pages to make sure that
the final collection of contiguous pages from freelists will not starve
the system.

[notasas@gmail.com: update out_of_memory args]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
CC: Michal Nazarewicz <mina86@mina86.com>
Tested-by: Rob Clark <rob.clark@linaro.org>
Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Tested-by: Robert Nelson <robertcnelson@gmail.com>
Tested-by: Barry Song <Baohua.Song@csr.com>
include/linux/mmzone.h
mm/page_alloc.c

index 3217b4c..132da3b 100644 (file)
@@ -63,8 +63,10 @@ enum {
 
 #ifdef CONFIG_CMA
 #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define cma_wmark_pages(zone)        zone->min_cma_pages
 #else
 #  define is_migrate_cma(migratetype) false
+#  define cma_wmark_pages(zone) 0
 #endif
 
 #define for_each_migratetype_order(order, type) \
@@ -366,6 +368,13 @@ struct zone {
 #ifdef CONFIG_MEMORY_HOTPLUG
        /* see spanned/present_pages for more description */
        seqlock_t               span_seqlock;
+#endif
+#ifdef CONFIG_CMA
+       /*
+        * CMA needs to increase watermark levels during the allocation
+        * process to make sure that the system is not starved.
+        */
+       unsigned long           min_cma_pages;
 #endif
        struct free_area        free_area[MAX_ORDER];
 
index 333e9c5..6efc081 100644 (file)
@@ -5371,6 +5371,11 @@ static void __setup_per_zone_wmarks(void)
 
                zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+               zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
+               zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
+               zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
+
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
        }
@@ -5968,6 +5973,54 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
        return ret > 0 ? 0 : ret;
 }
 
+/*
+ * Update zone's cma pages counter used for watermark level calculation.
+ */
+static inline void __update_cma_watermarks(struct zone *zone, int count)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&zone->lock, flags);
+       zone->min_cma_pages += count;
+       spin_unlock_irqrestore(&zone->lock, flags);
+       setup_per_zone_wmarks();
+}
+
+/*
+ * Trigger memory pressure bump to reclaim some pages in order to be able to
+ * allocate 'count' pages in single page units. Does similar work as
+ *__alloc_pages_slowpath() function.
+ */
+static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
+{
+       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+       struct zonelist *zonelist = node_zonelist(0, gfp_mask);
+       int did_some_progress = 0;
+       int order = 1;
+
+       /*
+        * Increase level of watermarks to force kswapd do his job
+        * to stabilise at new watermark level.
+        */
+       __update_cma_watermarks(zone, count);
+
+       /* Obey watermarks as if the page was being allocated */
+       while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
+               wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
+
+               did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+                                                     NULL);
+               if (!did_some_progress) {
+                       /* Exhausted what can be done so it's blamo time */
+                       out_of_memory(zonelist, gfp_mask, order, NULL);
+               }
+       }
+
+       /* Restore original watermark levels. */
+       __update_cma_watermarks(zone, -count);
+
+       return count;
+}
+
 /**
  * alloc_contig_range() -- tries to allocate given range of pages
  * @start:     start PFN to allocate
@@ -6066,6 +6119,13 @@ int alloc_contig_range(unsigned long start, unsigned long end,
                goto done;
        }
 
+       /*
+        * Reclaim enough pages to make sure that contiguous allocation
+        * will not starve the system.
+        */
+       __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+
+       /* Grab isolated pages from freelists. */
        outer_end = isolate_freepages_range(outer_start, end);
        if (!outer_end) {
                ret = -EBUSY;