2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/nodemask.h>
16 #include <asm/proto.h>
25 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26 bootmem_data_t plat_node_bdata[MAX_NUMNODES];
28 struct memnode memnode;
30 unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
31 [0 ... NR_CPUS-1] = NUMA_NO_NODE
33 unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
34 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
36 cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
38 int numa_off __initdata;
39 unsigned long __initdata nodemap_addr;
40 unsigned long __initdata nodemap_size;
44 * Given a shift value, try to populate memnodemap[]
47 * 0 if memnodmap[] too small (of shift too small)
48 * -1 if node overlap or lost ram (shift too big)
51 populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
55 unsigned long addr, end;
57 memset(memnodemap, 0xff, memnodemapsize);
58 for (i = 0; i < numnodes; i++) {
59 addr = nodes[i].start;
63 if ((end >> shift) >= memnodemapsize)
66 if (memnodemap[addr >> shift] != 0xff)
68 memnodemap[addr >> shift] = i;
69 addr += (1UL << shift);
76 static int __init allocate_cachealigned_memnodemap(void)
78 unsigned long pad, pad_addr;
80 memnodemap = memnode.embedded_map;
81 if (memnodemapsize <= 48)
84 pad = L1_CACHE_BYTES - 1;
86 nodemap_size = pad + memnodemapsize;
87 nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
89 if (nodemap_addr == -1UL) {
91 "NUMA: Unable to allocate Memory to Node hash map\n");
92 nodemap_addr = nodemap_size = 0;
95 pad_addr = (nodemap_addr + pad) & ~pad;
96 memnodemap = phys_to_virt(pad_addr);
98 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
99 nodemap_addr, nodemap_addr + nodemap_size);
104 * The LSB of all start and end addresses in the node map is the value of the
105 * maximum possible shift.
108 extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes)
110 int i, nodes_used = 0;
111 unsigned long start, end;
112 unsigned long bitfield = 0, memtop = 0;
114 for (i = 0; i < numnodes; i++) {
115 start = nodes[i].start;
127 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
128 memnodemapsize = (memtop >> i)+1;
132 int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
136 shift = extract_lsb_from_nodes(nodes, numnodes);
137 if (allocate_cachealigned_memnodemap())
139 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
142 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
144 "Your memory is not aligned you need to rebuild your kernel "
145 "with a bigger NODEMAPSIZE shift=%d\n",
152 #ifdef CONFIG_SPARSEMEM
153 int early_pfn_to_nid(unsigned long pfn)
155 return phys_to_nid(pfn << PAGE_SHIFT);
160 early_node_mem(int nodeid, unsigned long start, unsigned long end,
163 unsigned long mem = find_e820_area(start, end, size);
167 ptr = __alloc_bootmem_nopanic(size,
168 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
170 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
177 /* Initialize bootmem allocator for a node */
178 void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
180 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start;
181 unsigned long nodedata_phys;
183 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
185 start = round_up(start, ZONE_ALIGN);
187 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
189 start_pfn = start >> PAGE_SHIFT;
190 end_pfn = end >> PAGE_SHIFT;
192 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
193 if (node_data[nodeid] == NULL)
195 nodedata_phys = __pa(node_data[nodeid]);
197 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
198 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
199 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
200 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
202 /* Find a place for the bootmem map */
203 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
204 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
205 bootmap = early_node_mem(nodeid, bootmap_start, end,
206 bootmap_pages<<PAGE_SHIFT);
207 if (bootmap == NULL) {
208 if (nodedata_phys < start || nodedata_phys >= end)
209 free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
210 node_data[nodeid] = NULL;
213 bootmap_start = __pa(bootmap);
214 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
216 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
217 bootmap_start >> PAGE_SHIFT,
220 free_bootmem_with_active_regions(nodeid, end);
222 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
223 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
224 #ifdef CONFIG_ACPI_NUMA
225 srat_reserve_add_area(nodeid);
227 node_set_online(nodeid);
230 /* Initialize final allocator for a zone */
231 void __init setup_node_zones(int nodeid)
233 unsigned long start_pfn, end_pfn, memmapsize, limit;
235 start_pfn = node_start_pfn(nodeid);
236 end_pfn = node_end_pfn(nodeid);
238 Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
239 nodeid, start_pfn, end_pfn);
241 /* Try to allocate mem_map at end to not fill up precious <4GB
243 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
244 limit = end_pfn << PAGE_SHIFT;
245 #ifdef CONFIG_FLAT_NODE_MEM_MAP
246 NODE_DATA(nodeid)->node_mem_map =
247 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
248 memmapsize, SMP_CACHE_BYTES,
249 round_down(limit - memmapsize, PAGE_SIZE),
254 void __init numa_init_array(void)
257 /* There are unfortunately some poorly designed mainboards around
258 that only connect memory to a single CPU. This breaks the 1:1 cpu->node
259 mapping. To avoid this fill in the mapping for all possible
260 CPUs, as the number of CPUs is not known yet.
261 We round robin the existing nodes. */
262 rr = first_node(node_online_map);
263 for (i = 0; i < NR_CPUS; i++) {
264 if (cpu_to_node[i] != NUMA_NO_NODE)
266 numa_set_node(i, rr);
267 rr = next_node(rr, node_online_map);
268 if (rr == MAX_NUMNODES)
269 rr = first_node(node_online_map);
274 #ifdef CONFIG_NUMA_EMU
275 int numa_fake __initdata = 0;
278 static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
281 struct bootnode nodes[MAX_NUMNODES];
282 unsigned long sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
284 /* Kludge needed for the hash function */
285 if (hweight64(sz) > 1) {
287 while ((x << 1) < sz)
290 printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
294 memset(&nodes,0,sizeof(nodes));
295 for (i = 0; i < numa_fake; i++) {
296 nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
297 if (i == numa_fake-1)
298 sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
299 nodes[i].end = nodes[i].start + sz;
300 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n",
302 nodes[i].start, nodes[i].end,
303 (nodes[i].end - nodes[i].start) >> 20);
306 memnode_shift = compute_hash_shift(nodes, numa_fake);
307 if (memnode_shift < 0) {
309 printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n");
312 for_each_online_node(i) {
313 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
314 nodes[i].end >> PAGE_SHIFT);
315 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
322 void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
326 #ifdef CONFIG_NUMA_EMU
327 if (numa_fake && !numa_emulation(start_pfn, end_pfn))
331 #ifdef CONFIG_ACPI_NUMA
332 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
333 end_pfn << PAGE_SHIFT))
337 #ifdef CONFIG_K8_NUMA
338 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
341 printk(KERN_INFO "%s\n",
342 numa_off ? "NUMA turned off" : "No NUMA configuration found");
344 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
345 start_pfn << PAGE_SHIFT,
346 end_pfn << PAGE_SHIFT);
347 /* setup dummy node covering all memory */
349 memnodemap = memnode.embedded_map;
351 nodes_clear(node_online_map);
353 for (i = 0; i < NR_CPUS; i++)
355 node_to_cpumask[0] = cpumask_of_cpu(0);
356 e820_register_active_regions(0, start_pfn, end_pfn);
357 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
360 __cpuinit void numa_add_cpu(int cpu)
362 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
365 void __cpuinit numa_set_node(int cpu, int node)
367 cpu_pda(cpu)->nodenumber = node;
368 cpu_to_node[cpu] = node;
371 unsigned long __init numa_free_all_bootmem(void)
374 unsigned long pages = 0;
375 for_each_online_node(i) {
376 pages += free_all_bootmem_node(NODE_DATA(i));
381 #ifdef CONFIG_SPARSEMEM
382 static void __init arch_sparse_init(void)
386 for_each_online_node(i)
387 memory_present(i, node_start_pfn(i), node_end_pfn(i));
392 #define arch_sparse_init() do {} while (0)
395 void __init paging_init(void)
398 unsigned long max_zone_pfns[MAX_NR_ZONES];
399 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
400 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
401 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
402 max_zone_pfns[ZONE_NORMAL] = end_pfn;
406 for_each_online_node(i) {
410 free_area_init_nodes(max_zone_pfns);
413 static __init int numa_setup(char *opt)
417 if (!strncmp(opt,"off",3))
419 #ifdef CONFIG_NUMA_EMU
420 if(!strncmp(opt, "fake=", 5)) {
421 numa_fake = simple_strtoul(opt+5,NULL,0); ;
422 if (numa_fake >= MAX_NUMNODES)
423 numa_fake = MAX_NUMNODES;
426 #ifdef CONFIG_ACPI_NUMA
427 if (!strncmp(opt,"noacpi",6))
429 if (!strncmp(opt,"hotadd=", 7))
430 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
435 early_param("numa", numa_setup);
438 * Setup early cpu_to_node.
440 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
441 * and apicid_to_node[] tables have valid entries for a CPU.
442 * This means we skip cpu_to_node[] initialisation for NUMA
443 * emulation and faking node case (when running a kernel compiled
444 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
445 * is already initialized in a round robin manner at numa_init_array,
446 * prior to this call, and this initialization is good enough
447 * for the fake NUMA cases.
449 void __init init_cpu_to_node(void)
452 for (i = 0; i < NR_CPUS; i++) {
453 u8 apicid = x86_cpu_to_apicid[i];
454 if (apicid == BAD_APICID)
456 if (apicid_to_node[apicid] == NUMA_NO_NODE)
458 numa_set_node(i,apicid_to_node[apicid]);
462 EXPORT_SYMBOL(cpu_to_node);
463 EXPORT_SYMBOL(node_to_cpumask);
464 EXPORT_SYMBOL(memnode);
465 EXPORT_SYMBOL(node_data);
467 #ifdef CONFIG_DISCONTIGMEM
469 * Functions to convert PFNs from/to per node page addresses.
470 * These are out of line because they are quite big.
471 * They could be all tuned by pre caching more state.
475 int pfn_valid(unsigned long pfn)
478 if (pfn >= num_physpages)
480 nid = pfn_to_nid(pfn);
483 return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
485 EXPORT_SYMBOL(pfn_valid);