Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[pandora-kernel.git] / arch / ia64 / mm / contig.c
index 9855ba3..719d476 100644 (file)
@@ -14,7 +14,6 @@
  * Routines used by ia64 machines with contiguous (or virtually contiguous)
  * memory.
  */
-#include <linux/config.h>
 #include <linux/bootmem.h>
 #include <linux/efi.h>
 #include <linux/mm.h>
@@ -27,7 +26,7 @@
 #include <asm/mca.h>
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-static unsigned long num_dma_physpages;
+static unsigned long max_gap;
 #endif
 
 /**
@@ -46,9 +45,15 @@ show_mem (void)
 
        printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
        i = max_mapnr;
-       while (i-- > 0) {
-               if (!pfn_valid(i))
+       for (i = 0; i < max_mapnr; i++) {
+               if (!pfn_valid(i)) {
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+                       if (max_gap < LARGE_GAP)
+                               continue;
+                       i = vmemmap_find_next_valid_pfn(0, i) - 1;
+#endif
                        continue;
+               }
                total++;
                if (PageReserved(mem_map+i))
                        reserved++;
@@ -97,7 +102,7 @@ find_max_pfn (unsigned long start, unsigned long end, void *arg)
  * Find a place to put the bootmap and return its starting address in
  * bootmap_start.  This address must be page-aligned.
  */
-int
+static int __init
 find_bootmap_location (unsigned long start, unsigned long end, void *arg)
 {
        unsigned long needed = *(unsigned long *)arg;
@@ -141,7 +146,7 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
  * Walk the EFI memory map and find usable memory for the system, taking
  * into account reserved areas.
  */
-void
+void __init
 find_memory (void)
 {
        unsigned long bootmap_size;
@@ -176,7 +181,7 @@ find_memory (void)
  *
  * Allocate and setup per-cpu data areas.
  */
-void *
+void * __cpuinit
 per_cpu_init (void)
 {
        void *cpu_data;
@@ -212,91 +217,54 @@ count_pages (u64 start, u64 end, void *arg)
        return 0;
 }
 
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static int
-count_dma_pages (u64 start, u64 end, void *arg)
-{
-       unsigned long *count = arg;
-
-       if (start < MAX_DMA_ADDRESS)
-               *count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT;
-       return 0;
-}
-#endif
-
 /*
  * Set up the page tables.
  */
 
-void
+void __init
 paging_init (void)
 {
        unsigned long max_dma;
-       unsigned long zones_size[MAX_NR_ZONES];
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-       unsigned long zholes_size[MAX_NR_ZONES];
-       unsigned long max_gap;
-#endif
-
-       /* initialize mem_map[] */
-
-       memset(zones_size, 0, sizeof(zones_size));
+       unsigned long nid = 0;
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
 
        num_physpages = 0;
        efi_memmap_walk(count_pages, &num_physpages);
 
        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_DMA] = max_dma;
+       max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-       memset(zholes_size, 0, sizeof(zholes_size));
-
-       num_dma_physpages = 0;
-       efi_memmap_walk(count_dma_pages, &num_dma_physpages);
-
-       if (max_low_pfn < max_dma) {
-               zones_size[ZONE_DMA] = max_low_pfn;
-               zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
-       } else {
-               zones_size[ZONE_DMA] = max_dma;
-               zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
-               if (num_physpages > num_dma_physpages) {
-                       zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
-                       zholes_size[ZONE_NORMAL] =
-                               ((max_low_pfn - max_dma) -
-                                (num_physpages - num_dma_physpages));
-               }
-       }
-
-       max_gap = 0;
+       efi_memmap_walk(register_active_ranges, &nid);
        efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
        if (max_gap < LARGE_GAP) {
                vmem_map = (struct page *) 0;
-               free_area_init_node(0, NODE_DATA(0), zones_size, 0,
-                                   zholes_size);
+               free_area_init_nodes(max_zone_pfns);
        } else {
                unsigned long map_size;
 
                /* allocate virtual_mem_map */
 
-               map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+               map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+                       sizeof(struct page));
                vmalloc_end -= map_size;
                vmem_map = (struct page *) vmalloc_end;
                efi_memmap_walk(create_mem_map_page_table, NULL);
 
-               NODE_DATA(0)->node_mem_map = vmem_map;
-               free_area_init_node(0, NODE_DATA(0), zones_size,
-                                   0, zholes_size);
+               /*
+                * alloc_node_mem_map makes an adjustment for mem_map
+                * which isn't compatible with vmem_map.
+                */
+               NODE_DATA(0)->node_mem_map = vmem_map +
+                       find_min_pfn_with_active_regions();
+               free_area_init_nodes(max_zone_pfns);
 
                printk("Virtual mem_map starts at 0x%p\n", mem_map);
        }
 #else /* !CONFIG_VIRTUAL_MEM_MAP */
-       if (max_low_pfn < max_dma)
-               zones_size[ZONE_DMA] = max_low_pfn;
-       else {
-               zones_size[ZONE_DMA] = max_dma;
-               zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
-       }
-       free_area_init(zones_size);
+       add_active_range(0, 0, max_low_pfn);
+       free_area_init_nodes(max_zone_pfns);
 #endif /* !CONFIG_VIRTUAL_MEM_MAP */
        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
 }