struct mem_section *ms;
struct page *page, *memmap;
- if (!pfn_valid(start_pfn))
- return;
-
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
end_pfn = pfn + pgdat->node_spanned_pages;
/* register_section info */
- for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
- register_page_bootmem_info_section(pfn);
-
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ /*
+ * Some platforms can assign the same pfn to multiple nodes - on
+ * node0 as well as nodeN. To avoid registering a pfn against
+ * multiple nodes we check that this pfn does not already
+ * reside in some other node.
+ */
+ if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
+ register_page_bootmem_info_section(pfn);
+ }
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
- if (need_zonelists_rebuild)
- build_all_zonelists(zone);
- else
- zone_pcp_update(zone);
+ if (onlined_pages) {
+ node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
+ if (need_zonelists_rebuild)
+ build_all_zonelists(zone);
+ else
+ zone_pcp_update(zone);
+ }
mutex_unlock(&zonelists_mutex);
init_per_zone_wmark_min();
- if (onlined_pages) {
+ if (onlined_pages)
kswapd_run(zone_to_nid(zone));
- node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
- }
vm_total_pages = nr_free_pagecache_pages();
}
/* this function returns # of failed pages */
ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
- true, true);
+ true, MIGRATE_SYNC);
if (ret)
putback_lru_pages(&source);
}