x86, NUMA: Initialize and use remap allocator from setup_node_bootmem()
[pandora-kernel.git] / arch / x86 / mm / numa_32.c
index bde3906..fbd558f 100644 (file)
@@ -41,9 +41,6 @@
 #include <asm/bios_ebda.h>
 #include <asm/proto.h>
 
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-
 /*
  * numa interface - we expect the numa architecture specific code to have
  *                  populated the following initialisation.
@@ -104,32 +101,21 @@ extern unsigned long highend_pfn, highstart_pfn;
 
 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
 
-unsigned long node_remap_size[MAX_NUMNODES];
 static void *node_remap_start_vaddr[MAX_NUMNODES];
 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
 
-static unsigned long kva_start_pfn;
-static unsigned long kva_pages;
-
-int __cpuinit numa_cpu_node(int cpu)
-{
-       return apic->x86_32_numa_cpu_node(cpu);
-}
-
 /*
  * FLAT - support for basic PC memory model with discontig enabled, essentially
  *        a single node with all available processors in it with a flat
  *        memory map.
  */
-int __init get_memcfg_numa_flat(void)
+static int __init get_memcfg_numa_flat(void)
 {
        printk(KERN_DEBUG "NUMA - single node, flat memory mode\n");
 
        node_start_pfn[0] = 0;
        node_end_pfn[0] = max_pfn;
        memblock_x86_register_active_regions(0, 0, max_pfn);
-       memory_present(0, 0, max_pfn);
-       node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);
 
         /* Indicate there is one node available. */
        nodes_clear(node_online_map);
@@ -164,9 +150,8 @@ static void __init allocate_pgdat(int nid)
 {
        char buf[16];
 
-       if (node_has_online_mem(nid) && node_remap_start_vaddr[nid])
-               NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
-       else {
+       NODE_DATA(nid) = alloc_remap(nid, ALIGN(sizeof(pg_data_t), PAGE_SIZE));
+       if (!NODE_DATA(nid)) {
                unsigned long pgdat_phys;
                pgdat_phys = memblock_find_in_range(min_low_pfn<<PAGE_SHIFT,
                                 max_pfn_mapped<<PAGE_SHIFT,
@@ -182,25 +167,38 @@ static void __init allocate_pgdat(int nid)
 }
 
 /*
- * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel
- * virtual address space (KVA) is reserved and portions of nodes are mapped
- * using it. This is to allow node-local memory to be allocated for
- * structures that would normally require ZONE_NORMAL. The memory is
- * allocated with alloc_remap() and callers should be prepared to allocate
- * from the bootmem allocator instead.
+ * Remap memory allocator
  */
 static unsigned long node_remap_start_pfn[MAX_NUMNODES];
 static void *node_remap_end_vaddr[MAX_NUMNODES];
 static void *node_remap_alloc_vaddr[MAX_NUMNODES];
-static unsigned long node_remap_offset[MAX_NUMNODES];
 
+/**
+ * alloc_remap - Allocate remapped memory
+ * @nid: NUMA node to allocate memory from
+ * @size: The size of allocation
+ *
+ * Allocate @size bytes from the remap area of NUMA node @nid.  The
+ * size of the remap area is predetermined by init_alloc_remap() and
+ * only the callers considered there should call this function.  For
+ * more info, please read the comment on top of init_alloc_remap().
+ *
+ * The caller must be ready to handle allocation failure from this
+ * function and fall back to regular memory allocator in such cases.
+ *
+ * CONTEXT:
+ * Single CPU early boot context.
+ *
+ * RETURNS:
+ * Pointer to the allocated memory on success, %NULL on failure.
+ */
 void *alloc_remap(int nid, unsigned long size)
 {
        void *allocation = node_remap_alloc_vaddr[nid];
 
        size = ALIGN(size, L1_CACHE_BYTES);
 
-       if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
+       if (!allocation || (allocation + size) > node_remap_end_vaddr[nid])
                return NULL;
 
        node_remap_alloc_vaddr[nid] += size;
@@ -209,26 +207,6 @@ void *alloc_remap(int nid, unsigned long size)
        return allocation;
 }
 
-static void __init remap_numa_kva(void)
-{
-       void *vaddr;
-       unsigned long pfn;
-       int node;
-
-       for_each_online_node(node) {
-               printk(KERN_DEBUG "remap_numa_kva: node %d\n", node);
-               for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
-                       vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
-                       printk(KERN_DEBUG "remap_numa_kva: %08lx to pfn %08lx\n",
-                               (unsigned long)vaddr,
-                               node_remap_start_pfn[node] + pfn);
-                       set_pmd_pfn((ulong) vaddr, 
-                               node_remap_start_pfn[node] + pfn, 
-                               PAGE_KERNEL_LARGE);
-               }
-       }
-}
-
 #ifdef CONFIG_HIBERNATION
 /**
  * resume_map_numa_kva - add KVA mapping to the temporary page tables created
@@ -240,15 +218,16 @@ void resume_map_numa_kva(pgd_t *pgd_base)
        int node;
 
        for_each_online_node(node) {
-               unsigned long start_va, start_pfn, size, pfn;
+               unsigned long start_va, start_pfn, nr_pages, pfn;
 
                start_va = (unsigned long)node_remap_start_vaddr[node];
                start_pfn = node_remap_start_pfn[node];
-               size = node_remap_size[node];
+               nr_pages = (node_remap_end_vaddr[node] -
+                           node_remap_start_vaddr[node]) >> PAGE_SHIFT;
 
                printk(KERN_DEBUG "%s: node %d\n", __func__, node);
 
-               for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
+               for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) {
                        unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
                        pgd_t *pgd = pgd_base + pgd_index(vaddr);
                        pud_t *pud = pud_offset(pgd, vaddr);
@@ -264,132 +243,156 @@ void resume_map_numa_kva(pgd_t *pgd_base)
 }
 #endif
 
-static __init unsigned long calculate_numa_remap_pages(void)
+/**
+ * init_alloc_remap - Initialize remap allocator for a NUMA node
+ * @nid: NUMA node to initizlie remap allocator for
+ *
+ * NUMA nodes may end up without any lowmem.  As allocating pgdat and
+ * memmap on a different node with lowmem is inefficient, a special
+ * remap allocator is implemented which can be used by alloc_remap().
+ *
+ * For each node, the amount of memory which will be necessary for
+ * pgdat and memmap is calculated and two memory areas of the size are
+ * allocated - one in the node and the other in lowmem; then, the area
+ * in the node is remapped to the lowmem area.
+ *
+ * As pgdat and memmap must be allocated in lowmem anyway, this
+ * doesn't waste lowmem address space; however, the actual lowmem
+ * which gets remapped over is wasted.  The amount shouldn't be
+ * problematic on machines this feature will be used.
+ *
+ * Initialization failure isn't fatal.  alloc_remap() is used
+ * opportunistically and the callers will fall back to other memory
+ * allocation mechanisms on failure.
+ */
+void __init init_alloc_remap(int nid, u64 start, u64 end)
 {
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long end_pfn = end >> PAGE_SHIFT;
+       unsigned long size, pfn;
+       u64 node_pa, remap_pa;
+       void *remap_va;
+
+       /*
+        * The acpi/srat node info can show hot-add memroy zones where
+        * memory could be added but not currently present.
+        */
+       printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
+              nid, start_pfn, end_pfn);
+
+       /* calculate the necessary space aligned to large page size */
+       size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
+       size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
+       size = ALIGN(size, LARGE_PAGE_BYTES);
+
+       /* allocate node memory and the lowmem remap area */
+       node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
+       if (node_pa == MEMBLOCK_ERROR) {
+               pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
+                          size, nid);
+               return;
+       }
+       memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
+
+       remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
+                                         max_low_pfn << PAGE_SHIFT,
+                                         size, LARGE_PAGE_BYTES);
+       if (remap_pa == MEMBLOCK_ERROR) {
+               pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
+                          size, nid);
+               memblock_x86_free_range(node_pa, node_pa + size);
+               return;
+       }
+       memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
+       remap_va = phys_to_virt(remap_pa);
+
+       /* perform actual remap */
+       for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
+               set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
+                           (node_pa >> PAGE_SHIFT) + pfn,
+                           PAGE_KERNEL_LARGE);
+
+       /* initialize remap allocator parameters */
+       node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
+       node_remap_start_vaddr[nid] = remap_va;
+       node_remap_end_vaddr[nid] = remap_va + size;
+       node_remap_alloc_vaddr[nid] = remap_va;
+
+       printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
+              nid, node_pa, node_pa + size, remap_va, remap_va + size);
+}
+
+static int get_memcfg_numaq(void)
+{
+#ifdef CONFIG_X86_NUMAQ
        int nid;
-       unsigned long size, reserve_pages = 0;
 
-       for_each_online_node(nid) {
-               u64 node_kva_target;
-               u64 node_kva_final;
-
-               /*
-                * The acpi/srat node info can show hot-add memroy zones
-                * where memory could be added but not currently present.
-                */
-               printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
-                       nid, node_start_pfn[nid], node_end_pfn[nid]);
-               if (node_start_pfn[nid] > max_pfn)
-                       continue;
-               if (!node_end_pfn[nid])
-                       continue;
-               if (node_end_pfn[nid] > max_pfn)
-                       node_end_pfn[nid] = max_pfn;
-
-               /* ensure the remap includes space for the pgdat. */
-               size = node_remap_size[nid] + sizeof(pg_data_t);
-
-               /* convert size to large (pmd size) pages, rounding up */
-               size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
-               /* now the roundup is correct, convert to PAGE_SIZE pages */
-               size = size * PTRS_PER_PTE;
-
-               node_kva_target = round_down(node_end_pfn[nid] - size,
-                                                PTRS_PER_PTE);
-               node_kva_target <<= PAGE_SHIFT;
-               do {
-                       node_kva_final = memblock_find_in_range(node_kva_target,
-                                       ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
-                                               ((u64)size)<<PAGE_SHIFT,
-                                               LARGE_PAGE_BYTES);
-                       node_kva_target -= LARGE_PAGE_BYTES;
-               } while (node_kva_final == MEMBLOCK_ERROR &&
-                        (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
-
-               if (node_kva_final == MEMBLOCK_ERROR)
-                       panic("Can not get kva ram\n");
-
-               node_remap_size[nid] = size;
-               node_remap_offset[nid] = reserve_pages;
-               reserve_pages += size;
-               printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
-                                 " node %d at %llx\n",
-                               size, nid, node_kva_final>>PAGE_SHIFT);
-
-               /*
-                *  prevent kva address below max_low_pfn want it on system
-                *  with less memory later.
-                *  layout will be: KVA address , KVA RAM
-                *
-                *  we are supposed to only record the one less then max_low_pfn
-                *  but we could have some hole in high memory, and it will only
-                *  check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide
-                *  to use it as free.
-                *  So memblock_x86_reserve_range here, hope we don't run out of that array
-                */
-               memblock_x86_reserve_range(node_kva_final,
-                             node_kva_final+(((u64)size)<<PAGE_SHIFT),
-                             "KVA RAM");
-
-               node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
+       if (numa_off)
+               return 0;
+
+       if (numaq_numa_init() < 0) {
+               nodes_clear(numa_nodes_parsed);
+               remove_all_active_ranges();
+               return 0;
+       }
+
+       for_each_node_mask(nid, numa_nodes_parsed)
+               node_set_online(nid);
+       sort_node_map();
+       return 1;
+#else
+       return 0;
+#endif
+}
+
+static int get_memcfg_from_srat(void)
+{
+#ifdef CONFIG_ACPI_NUMA
+       int nid;
+
+       if (numa_off)
+               return 0;
+
+       if (x86_acpi_numa_init() < 0) {
+               nodes_clear(numa_nodes_parsed);
+               remove_all_active_ranges();
+               return 0;
        }
-       printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
-                       reserve_pages);
-       return reserve_pages;
+
+       for_each_node_mask(nid, numa_nodes_parsed)
+               node_set_online(nid);
+       sort_node_map();
+       return 1;
+#else
+       return 0;
+#endif
 }
 
-static void init_remap_allocator(int nid)
+static void get_memcfg_numa(void)
 {
-       node_remap_start_vaddr[nid] = pfn_to_kaddr(
-                       kva_start_pfn + node_remap_offset[nid]);
-       node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
-               (node_remap_size[nid] * PAGE_SIZE);
-       node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
-               ALIGN(sizeof(pg_data_t), PAGE_SIZE);
-
-       printk(KERN_DEBUG "node %d will remap to vaddr %08lx - %08lx\n", nid,
-               (ulong) node_remap_start_vaddr[nid],
-               (ulong) node_remap_end_vaddr[nid]);
+       if (get_memcfg_numaq())
+               return;
+       if (get_memcfg_from_srat())
+               return;
+       get_memcfg_numa_flat();
 }
 
 void __init initmem_init(void)
 {
        int nid;
-       long kva_target_pfn;
-
-       /*
-        * When mapping a NUMA machine we allocate the node_mem_map arrays
-        * from node local memory.  They are then mapped directly into KVA
-        * between zone normal and vmalloc space.  Calculate the size of
-        * this space and use it to adjust the boundary between ZONE_NORMAL
-        * and ZONE_HIGHMEM.
-        */
 
        get_memcfg_numa();
        numa_init_array();
 
-       kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
-
-       kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
-       do {
-               kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
-                                       max_low_pfn<<PAGE_SHIFT,
-                                       kva_pages<<PAGE_SHIFT,
-                                       PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
-               kva_target_pfn -= PTRS_PER_PTE;
-       } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn);
-
-       if (kva_start_pfn == MEMBLOCK_ERROR)
-               panic("Can not get kva space\n");
+       for_each_online_node(nid) {
+               u64 start = (u64)node_start_pfn[nid] << PAGE_SHIFT;
+               u64 end = min((u64)node_end_pfn[nid] << PAGE_SHIFT,
+                             (u64)max_pfn << PAGE_SHIFT);
 
-       printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n",
-               kva_start_pfn, max_low_pfn);
-       printk(KERN_INFO "max_pfn = %lx\n", max_pfn);
+               if (start < end)
+                       init_alloc_remap(nid, start, end);
+       }
 
-       /* avoid clash with initrd */
-       memblock_x86_reserve_range(kva_start_pfn<<PAGE_SHIFT,
-                     (kva_start_pfn + kva_pages)<<PAGE_SHIFT,
-                    "KVA PG");
 #ifdef CONFIG_HIGHMEM
        highstart_pfn = highend_pfn = max_pfn;
        if (max_pfn > max_low_pfn)
@@ -409,12 +412,8 @@ void __init initmem_init(void)
 
        printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n",
                        (ulong) pfn_to_kaddr(max_low_pfn));
-       for_each_online_node(nid) {
-               init_remap_allocator(nid);
-
+       for_each_online_node(nid)
                allocate_pgdat(nid);
-       }
-       remap_numa_kva();
 
        printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
                        (ulong) pfn_to_kaddr(highstart_pfn));
@@ -457,3 +456,37 @@ int memory_add_physaddr_to_nid(u64 addr)
 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 #endif
 
+/* temporary shim, will go away soon */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long end_pfn = end >> PAGE_SHIFT;
+
+       printk(KERN_DEBUG "nid %d start_pfn %08lx end_pfn %08lx\n",
+              nid, start_pfn, end_pfn);
+
+       if (start >= (u64)max_pfn << PAGE_SHIFT) {
+               printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n",
+                      start_pfn, end_pfn);
+               return 0;
+       }
+
+       node_set_online(nid);
+       memblock_x86_register_active_regions(nid, start_pfn,
+                                            min(end_pfn, max_pfn));
+
+       if (!node_has_online_mem(nid)) {
+               node_start_pfn[nid] = start_pfn;
+               node_end_pfn[nid] = end_pfn;
+       } else {
+               node_start_pfn[nid] = min(node_start_pfn[nid], start_pfn);
+               node_end_pfn[nid] = max(node_end_pfn[nid], end_pfn);
+       }
+       return 0;
+}
+
+/* temporary shim, will go away soon */
+void __init numa_set_distance(int from, int to, int distance)
+{
+       /* nada */
+}