Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / mm / memory_hotplug.c
index 2168489..1243ab5 100644 (file)
@@ -127,9 +127,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
        struct mem_section *ms;
        struct page *page, *memmap;
 
-       if (!pfn_valid(start_pfn))
-               return;
-
        section_nr = pfn_to_section_nr(start_pfn);
        ms = __nr_to_section(section_nr);
 
@@ -188,9 +185,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
        end_pfn = pfn + pgdat->node_spanned_pages;
 
        /* register_section info */
-       for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
-               register_page_bootmem_info_section(pfn);
-
+       for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+               /*
+                * Some platforms can assign the same pfn to multiple nodes - on
+                * node0 as well as nodeN.  To avoid registering a pfn against
+                * multiple nodes we check that this pfn does not already
+                * reside in some other node.
+                */
+               if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
+                       register_page_bootmem_info_section(pfn);
+       }
 }
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 
@@ -511,19 +515,20 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
 
        zone->present_pages += onlined_pages;
        zone->zone_pgdat->node_present_pages += onlined_pages;
-       if (need_zonelists_rebuild)
-               build_all_zonelists(zone);
-       else
-               zone_pcp_update(zone);
+       if (onlined_pages) {
+               node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
+               if (need_zonelists_rebuild)
+                       build_all_zonelists(zone);
+               else
+                       zone_pcp_update(zone);
+       }
 
        mutex_unlock(&zonelists_mutex);
 
        init_per_zone_wmark_min();
 
-       if (onlined_pages) {
+       if (onlined_pages)
                kswapd_run(zone_to_nid(zone));
-               node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
-       }
 
        vm_total_pages = nr_free_pagecache_pages();
 
@@ -711,23 +716,30 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
  */
 static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
 {
-       unsigned long pfn;
+       unsigned long pfn, sec_end_pfn;
        struct zone *zone = NULL;
        struct page *page;
        int i;
-       for (pfn = start_pfn;
+       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
             pfn < end_pfn;
-            pfn += MAX_ORDER_NR_PAGES) {
-               i = 0;
-               /* This is just a CONFIG_HOLES_IN_ZONE check.*/
-               while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
-                       i++;
-               if (i == MAX_ORDER_NR_PAGES)
+            pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
+               /* Make sure the memory section is present first */
+               if (!present_section_nr(pfn_to_section_nr(pfn)))
                        continue;
-               page = pfn_to_page(pfn + i);
-               if (zone && page_zone(page) != zone)
-                       return 0;
-               zone = page_zone(page);
+               for (; pfn < sec_end_pfn && pfn < end_pfn;
+                    pfn += MAX_ORDER_NR_PAGES) {
+                       i = 0;
+                       /* This is just a CONFIG_HOLES_IN_ZONE check.*/
+                       while ((i < MAX_ORDER_NR_PAGES) &&
+                               !pfn_valid_within(pfn + i))
+                               i++;
+                       if (i == MAX_ORDER_NR_PAGES)
+                               continue;
+                       page = pfn_to_page(pfn + i);
+                       if (zone && page_zone(page) != zone)
+                               return 0;
+                       zone = page_zone(page);
+               }
        }
        return 1;
 }
@@ -809,7 +821,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                }
                /* this function returns # of failed pages */
                ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
-                                                               true, true);
+                                                       true, MIGRATE_SYNC);
                if (ret)
                        putback_lru_pages(&source);
        }
@@ -891,7 +903,7 @@ static int __ref offline_pages(unsigned long start_pfn,
        nr_pages = end_pfn - start_pfn;
 
        /* set above range as isolated */
-       ret = start_isolate_page_range(start_pfn, end_pfn);
+       ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        if (ret)
                goto out;
 
@@ -956,7 +968,7 @@ repeat:
           We cannot do rollback at this point. */
        offline_isolated_pages(start_pfn, end_pfn);
        /* reset pagetype flags and makes migrate type to be MOVABLE */
-       undo_isolate_page_range(start_pfn, end_pfn);
+       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        /* removal success */
        zone->present_pages -= offlined_pages;
        zone->zone_pgdat->node_present_pages -= offlined_pages;
@@ -981,7 +993,7 @@ failed_removal:
                start_pfn, end_pfn);
        memory_notify(MEM_CANCEL_OFFLINE, &arg);
        /* pushback to free area */
-       undo_isolate_page_range(start_pfn, end_pfn);
+       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 
 out:
        unlock_memory_hotplug();