tcp: enforce tcp_min_snd_mss in tcp_mtu_probing()
[pandora-kernel.git] / mm / page_alloc.c
index ad8cb2f..5961c93 100644 (file)
@@ -59,6 +59,7 @@
 #include <linux/prefetch.h>
 #include <linux/migrate.h>
 #include <linux/page-debug-flags.h>
+#include <linux/nmi.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -1241,9 +1242,14 @@ void drain_all_pages(void)
 
 #ifdef CONFIG_HIBERNATION
 
+/*
+ * Touch the watchdog for every WD_PAGE_COUNT pages.
+ */
+#define WD_PAGE_COUNT  (128*1024)
+
 void mark_free_pages(struct zone *zone)
 {
-       unsigned long pfn, max_zone_pfn;
+       unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
        unsigned long flags;
        int order, t;
        struct list_head *curr;
@@ -1258,6 +1264,11 @@ void mark_free_pages(struct zone *zone)
                if (pfn_valid(pfn)) {
                        struct page *page = pfn_to_page(pfn);
 
+                       if (!--page_count) {
+                               touch_nmi_watchdog();
+                               page_count = WD_PAGE_COUNT;
+                       }
+
                        if (!swsusp_page_is_forbidden(page))
                                swsusp_unset_page_free(page);
                }
@@ -1267,8 +1278,13 @@ void mark_free_pages(struct zone *zone)
                        unsigned long i;
 
                        pfn = page_to_pfn(list_entry(curr, struct page, lru));
-                       for (i = 0; i < (1UL << order); i++)
+                       for (i = 0; i < (1UL << order); i++) {
+                               if (!--page_count) {
+                                       touch_nmi_watchdog();
+                                       page_count = WD_PAGE_COUNT;
+                               }
                                swsusp_set_page_free(pfn_to_page(pfn + i));
+                       }
                }
        }
        spin_unlock_irqrestore(&zone->lock, flags);
@@ -1894,6 +1910,13 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
            debug_guardpage_minorder() > 0)
                return;
 
+       /*
+        * Walking all memory to count page types is very expensive and should
+        * be inhibited in non-blockable contexts.
+        */
+       if (!(gfp_mask & __GFP_WAIT))
+               filter |= SHOW_MEM_FILTER_PAGE_COUNT;
+
        /*
         * This documents exceptions given to allocations in certain
         * contexts that are allowed to allocate outside current's set
@@ -2200,7 +2223,7 @@ static inline int
 gfp_to_alloc_flags(gfp_t gfp_mask)
 {
        int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
-       const gfp_t wait = gfp_mask & __GFP_WAIT;
+       const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
 
        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -2209,20 +2232,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
         * The caller may dip into page reserves a bit more if the caller
         * cannot run direct reclaim, or if the caller has realtime scheduling
         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
-        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+        * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
         */
        alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 
-       if (!wait) {
+       if (atomic) {
                /*
-                * Not worth trying to allocate harder for
-                * __GFP_NOMEMALLOC even if it can't schedule.
+                * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
+                * if it can't schedule.
                 */
-               if  (!(gfp_mask & __GFP_NOMEMALLOC))
+               if (!(gfp_mask & __GFP_NOMEMALLOC))
                        alloc_flags |= ALLOC_HARDER;
                /*
-                * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
-                * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+                * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
+                * comment for __cpuset_node_allowed_softwall().
                 */
                alloc_flags &= ~ALLOC_CPUSET;
        } else if (unlikely(rt_task(current)) && !in_interrupt())
@@ -4397,10 +4420,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  * round what is now in bits to nearest long in bits, then return it in
  * bytes.
  */
-static unsigned long __init usemap_size(unsigned long zonesize)
+static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
 {
        unsigned long usemapsize;
 
+       zonesize += zone_start_pfn & (pageblock_nr_pages-1);
        usemapsize = roundup(zonesize, pageblock_nr_pages);
        usemapsize = usemapsize >> pageblock_order;
        usemapsize *= NR_PAGEBLOCK_BITS;
@@ -4410,17 +4434,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
 }
 
 static void __init setup_usemap(struct pglist_data *pgdat,
-                               struct zone *zone, unsigned long zonesize)
+                               struct zone *zone,
+                               unsigned long zone_start_pfn,
+                               unsigned long zonesize)
 {
-       unsigned long usemapsize = usemap_size(zonesize);
+       unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
        zone->pageblock_flags = NULL;
        if (usemapsize)
                zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
                                                                   usemapsize);
 }
 #else
-static inline void setup_usemap(struct pglist_data *pgdat,
-                               struct zone *zone, unsigned long zonesize) {}
+static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
+                               unsigned long zone_start_pfn, unsigned long zonesize) {}
 #endif /* CONFIG_SPARSEMEM */
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -4545,7 +4571,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                        continue;
 
                set_pageblock_order();
-               setup_usemap(pgdat, zone, size);
+               setup_usemap(pgdat, zone, zone_start_pfn, size);
                ret = init_currently_empty_zone(zone, zone_start_pfn,
                                                size, MEMMAP_EARLY);
                BUG_ON(ret);
@@ -6195,6 +6221,10 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
                zone->free_area[order].nr_free--;
                __mod_zone_page_state(zone, NR_FREE_PAGES,
                                      - (1UL << order));
+#ifdef CONFIG_HIGHMEM
+               if (PageHighMem(page))
+                       totalhigh_pages -= 1 << order;
+#endif
                for (i = 0; i < (1 << order); i++)
                        SetPageReserved((page+i));
                pfn += (1 << order);