net/mlx4_en: Fix mixed PFC and Global pause user control requests
[pandora-kernel.git] / mm / sparse.c
index 61d7cde..42935b5 100644 (file)
@@ -353,29 +353,21 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
 
        usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
                                                                 usemap_count);
-       if (usemap) {
-               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
-                       if (!present_section_nr(pnum))
-                               continue;
-                       usemap_map[pnum] = usemap;
-                       usemap += size;
+       if (!usemap) {
+               usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
+               if (!usemap) {
+                       printk(KERN_WARNING "%s: allocation failed\n", __func__);
+                       return;
                }
-               return;
        }
 
-       usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
-       if (usemap) {
-               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
-                       if (!present_section_nr(pnum))
-                               continue;
-                       usemap_map[pnum] = usemap;
-                       usemap += size;
-                       check_usemap_section_nr(nodeid, usemap_map[pnum]);
-               }
-               return;
+       for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+               if (!present_section_nr(pnum))
+                       continue;
+               usemap_map[pnum] = usemap;
+               usemap += size;
+               check_usemap_section_nr(nodeid, usemap_map[pnum]);
        }
-
-       printk(KERN_WARNING "%s: allocation failed\n", __func__);
 }
 
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
@@ -494,6 +486,9 @@ void __init sparse_init(void)
        struct page **map_map;
 #endif
 
+       /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
+       set_pageblock_order();
+
        /*
         * map is using big page (aka 2M in x86 64 bit)
         * usemap is less one page (aka 24 bytes)
@@ -627,7 +622,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
 {
        return; /* XXX: Not implemented yet */
 }
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
 {
 }
 #else
@@ -668,10 +663,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
                           get_order(sizeof(struct page) * nr_pages));
 }
 
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
 {
        unsigned long maps_section_nr, removing_section_nr, i;
        unsigned long magic;
+       struct page *page = virt_to_page(memmap);
 
        for (i = 0; i < nr_pages; i++, page++) {
                magic = (unsigned long) page->lru.next;
@@ -720,13 +716,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
         */
 
        if (memmap) {
-               struct page *memmap_page;
-               memmap_page = virt_to_page(memmap);
-
                nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
                        >> PAGE_SHIFT;
 
-               free_map_bootmem(memmap_page, nr_pages);
+               free_map_bootmem(memmap, nr_pages);
        }
 }