2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/memblock.h>
11 #include <linux/mmzone.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/nodemask.h>
15 #include <linux/sched.h>
16 #include <linux/acpi.h>
19 #include <asm/proto.h>
23 #include <asm/amd_nb.h>
25 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26 EXPORT_SYMBOL(node_data);
28 nodemask_t cpu_nodes_parsed __initdata;
29 nodemask_t mem_nodes_parsed __initdata;
31 struct memnode memnode;
33 static unsigned long __initdata nodemap_addr;
34 static unsigned long __initdata nodemap_size;
36 static int num_node_memblks __initdata;
37 static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata;
38 static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata;
40 struct bootnode numa_nodes[MAX_NUMNODES] __initdata;
43 * Given a shift value, try to populate memnodemap[]
46 * 0 if memnodmap[] too small (of shift too small)
47 * -1 if node overlap or lost ram (shift too big)
49 static int __init populate_memnodemap(const struct bootnode *nodes,
50 int numnodes, int shift, int *nodeids)
52 unsigned long addr, end;
55 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
56 for (i = 0; i < numnodes; i++) {
57 addr = nodes[i].start;
61 if ((end >> shift) >= memnodemapsize)
64 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
66 memnodemap[addr >> shift] = nodeids[i];
67 addr += (1UL << shift);
74 static int __init allocate_cachealigned_memnodemap(void)
78 memnodemap = memnode.embedded_map;
79 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
83 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
84 nodemap_addr = memblock_find_in_range(addr, get_max_mapped(),
85 nodemap_size, L1_CACHE_BYTES);
86 if (nodemap_addr == MEMBLOCK_ERROR) {
88 "NUMA: Unable to allocate Memory to Node hash map\n");
89 nodemap_addr = nodemap_size = 0;
92 memnodemap = phys_to_virt(nodemap_addr);
93 memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
95 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
96 nodemap_addr, nodemap_addr + nodemap_size);
101 * The LSB of all start and end addresses in the node map is the value of the
102 * maximum possible shift.
104 static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
107 int i, nodes_used = 0;
108 unsigned long start, end;
109 unsigned long bitfield = 0, memtop = 0;
111 for (i = 0; i < numnodes; i++) {
112 start = nodes[i].start;
124 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
125 memnodemapsize = (memtop >> i)+1;
129 static int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
134 shift = extract_lsb_from_nodes(nodes, numnodes);
135 if (allocate_cachealigned_memnodemap())
137 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
140 if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
141 printk(KERN_INFO "Your memory is not aligned you need to "
142 "rebuild your kernel with a bigger NODEMAPSIZE "
143 "shift=%d\n", shift);
149 int __meminit __early_pfn_to_nid(unsigned long pfn)
151 return phys_to_nid(pfn << PAGE_SHIFT);
154 static void * __init early_node_mem(int nodeid, unsigned long start,
155 unsigned long end, unsigned long size,
161 * put it on high as possible
162 * something will go with NODE_DATA
164 if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
165 start = MAX_DMA_PFN<<PAGE_SHIFT;
166 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
167 end > (MAX_DMA32_PFN<<PAGE_SHIFT))
168 start = MAX_DMA32_PFN<<PAGE_SHIFT;
169 mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
170 if (mem != MEMBLOCK_ERROR)
173 /* extend the search scope */
174 end = max_pfn_mapped << PAGE_SHIFT;
175 start = MAX_DMA_PFN << PAGE_SHIFT;
176 mem = memblock_find_in_range(start, end, size, align);
177 if (mem != MEMBLOCK_ERROR)
180 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
186 static __init int conflicting_memblks(unsigned long start, unsigned long end)
189 for (i = 0; i < num_node_memblks; i++) {
190 struct bootnode *nd = &node_memblk_range[i];
191 if (nd->start == nd->end)
193 if (nd->end > start && nd->start < end)
194 return memblk_nodeid[i];
195 if (nd->end == end && nd->start == start)
196 return memblk_nodeid[i];
201 int __init numa_add_memblk(int nid, u64 start, u64 end)
205 i = conflicting_memblks(start, end);
207 printk(KERN_WARNING "NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
208 nid, start, end, numa_nodes[i].start, numa_nodes[i].end);
210 printk(KERN_ERR "NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
212 numa_nodes[i].start, numa_nodes[i].end);
216 node_memblk_range[num_node_memblks].start = start;
217 node_memblk_range[num_node_memblks].end = end;
218 memblk_nodeid[num_node_memblks] = nid;
223 static __init void cutoff_node(int i, unsigned long start, unsigned long end)
225 struct bootnode *nd = &numa_nodes[i];
227 if (nd->start < start) {
229 if (nd->end < nd->start)
234 if (nd->start > nd->end)
239 /* Initialize bootmem allocator for a node */
241 setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
243 unsigned long start_pfn, last_pfn, nodedata_phys;
244 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
251 * Don't confuse VM with a node that doesn't have the
252 * minimum amount of memory:
254 if (end && (end - start) < NODE_MIN_SIZE)
257 start = roundup(start, ZONE_ALIGN);
259 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
262 start_pfn = start >> PAGE_SHIFT;
263 last_pfn = end >> PAGE_SHIFT;
265 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
267 if (node_data[nodeid] == NULL)
269 nodedata_phys = __pa(node_data[nodeid]);
270 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
271 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
272 nodedata_phys + pgdat_size - 1);
273 nid = phys_to_nid(nodedata_phys);
275 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
277 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
278 NODE_DATA(nodeid)->node_id = nodeid;
279 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
280 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
282 node_set_online(nodeid);
286 * Sanity check to catch more bad NUMA configurations (they are amazingly
287 * common). Make sure the nodes cover all memory.
289 static int __init nodes_cover_memory(const struct bootnode *nodes)
291 unsigned long numaram, e820ram;
295 for_each_node_mask(i, mem_nodes_parsed) {
296 unsigned long s = nodes[i].start >> PAGE_SHIFT;
297 unsigned long e = nodes[i].end >> PAGE_SHIFT;
299 numaram -= __absent_pages_in_range(i, s, e);
300 if ((long)numaram < 0)
305 (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT) >> PAGE_SHIFT);
306 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
307 if ((long)(e820ram - numaram) >= (1<<(20 - PAGE_SHIFT))) {
308 printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
309 (numaram << PAGE_SHIFT) >> 20,
310 (e820ram << PAGE_SHIFT) >> 20);
316 static int __init numa_register_memblks(void)
321 * Join together blocks on the same node, holes between
322 * which don't overlap with memory on other nodes.
324 for (i = 0; i < num_node_memblks; ++i) {
327 for (j = i + 1; j < num_node_memblks; ++j) {
328 unsigned long start, end;
330 if (memblk_nodeid[i] != memblk_nodeid[j])
332 start = min(node_memblk_range[i].end,
333 node_memblk_range[j].end);
334 end = max(node_memblk_range[i].start,
335 node_memblk_range[j].start);
336 for (k = 0; k < num_node_memblks; ++k) {
337 if (memblk_nodeid[i] == memblk_nodeid[k])
339 if (start < node_memblk_range[k].end &&
340 end > node_memblk_range[k].start)
343 if (k < num_node_memblks)
345 start = min(node_memblk_range[i].start,
346 node_memblk_range[j].start);
347 end = max(node_memblk_range[i].end,
348 node_memblk_range[j].end);
349 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
351 node_memblk_range[i].start,
352 node_memblk_range[i].end,
353 node_memblk_range[j].start,
354 node_memblk_range[j].end,
356 node_memblk_range[i].start = start;
357 node_memblk_range[i].end = end;
358 k = --num_node_memblks - j;
359 memmove(memblk_nodeid + j, memblk_nodeid + j+1,
360 k * sizeof(*memblk_nodeid));
361 memmove(node_memblk_range + j, node_memblk_range + j+1,
362 k * sizeof(*node_memblk_range));
367 memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
369 if (memnode_shift < 0) {
370 printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n");
374 for (i = 0; i < num_node_memblks; i++)
375 memblock_x86_register_active_regions(memblk_nodeid[i],
376 node_memblk_range[i].start >> PAGE_SHIFT,
377 node_memblk_range[i].end >> PAGE_SHIFT);
379 /* for out of order entries */
381 if (!nodes_cover_memory(numa_nodes))
384 init_memory_mapping_high();
386 /* Finally register nodes. */
387 for_each_node_mask(i, node_possible_map)
388 setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end);
391 * Try again in case setup_node_bootmem missed one due to missing
394 for_each_node_mask(i, node_possible_map)
396 setup_node_bootmem(i, numa_nodes[i].start,
402 #ifdef CONFIG_NUMA_EMU
404 static struct bootnode nodes[MAX_NUMNODES] __initdata;
405 static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
406 static char *cmdline __initdata;
408 void __init numa_emu_cmdline(char *str)
413 static int __init setup_physnodes(unsigned long start, unsigned long end)
418 memset(physnodes, 0, sizeof(physnodes));
420 for_each_node_mask(i, mem_nodes_parsed) {
421 physnodes[i].start = numa_nodes[i].start;
422 physnodes[i].end = numa_nodes[i].end;
426 * Basic sanity checking on the physical node map: there may be errors
427 * if the SRAT or AMD code incorrectly reported the topology or the mem=
428 * kernel parameter is used.
430 for (i = 0; i < MAX_NUMNODES; i++) {
431 if (physnodes[i].start == physnodes[i].end)
433 if (physnodes[i].start > end) {
434 physnodes[i].end = physnodes[i].start;
437 if (physnodes[i].end < start) {
438 physnodes[i].start = physnodes[i].end;
441 if (physnodes[i].start < start)
442 physnodes[i].start = start;
443 if (physnodes[i].end > end)
444 physnodes[i].end = end;
449 * If no physical topology was detected, a single node is faked to cover
450 * the entire address space.
453 physnodes[ret].start = start;
454 physnodes[ret].end = end;
460 static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
465 #ifdef CONFIG_ACPI_NUMA
467 acpi_fake_nodes(nodes, nr_nodes);
469 #ifdef CONFIG_AMD_NUMA
471 amd_fake_nodes(nodes, nr_nodes);
474 for (i = 0; i < nr_cpu_ids; i++)
479 * Setups up nid to range from addr to addr + size. If the end
480 * boundary is greater than max_addr, then max_addr is used instead.
481 * The return value is 0 if there is additional memory left for
482 * allocation past addr and -1 otherwise. addr is adjusted to be at
483 * the end of the node.
485 static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
488 nodes[nid].start = *addr;
490 if (*addr >= max_addr) {
494 nodes[nid].end = *addr;
495 node_set(nid, node_possible_map);
496 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
497 nodes[nid].start, nodes[nid].end,
498 (nodes[nid].end - nodes[nid].start) >> 20);
503 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
504 * to max_addr. The return value is the number of nodes allocated.
506 static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
508 nodemask_t physnode_mask = NODE_MASK_NONE;
516 if (nr_nodes > MAX_NUMNODES) {
517 pr_info("numa=fake=%d too large, reducing to %d\n",
518 nr_nodes, MAX_NUMNODES);
519 nr_nodes = MAX_NUMNODES;
522 size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
524 * Calculate the number of big nodes that can be allocated as a result
525 * of consolidating the remainder.
527 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
530 size &= FAKE_NODE_MIN_HASH_MASK;
532 pr_err("Not enough memory for each node. "
533 "NUMA emulation disabled.\n");
537 for (i = 0; i < MAX_NUMNODES; i++)
538 if (physnodes[i].start != physnodes[i].end)
539 node_set(i, physnode_mask);
542 * Continue to fill physical nodes with fake nodes until there is no
543 * memory left on any of them.
545 while (nodes_weight(physnode_mask)) {
546 for_each_node_mask(i, physnode_mask) {
547 u64 end = physnodes[i].start + size;
548 u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
551 end += FAKE_NODE_MIN_SIZE;
554 * Continue to add memory to this fake node if its
555 * non-reserved memory is less than the per-node size.
557 while (end - physnodes[i].start -
558 memblock_x86_hole_size(physnodes[i].start, end) < size) {
559 end += FAKE_NODE_MIN_SIZE;
560 if (end > physnodes[i].end) {
561 end = physnodes[i].end;
567 * If there won't be at least FAKE_NODE_MIN_SIZE of
568 * non-reserved memory in ZONE_DMA32 for the next node,
569 * this one must extend to the boundary.
571 if (end < dma32_end && dma32_end - end -
572 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
576 * If there won't be enough non-reserved memory for the
577 * next node, this one must extend to the end of the
580 if (physnodes[i].end - end -
581 memblock_x86_hole_size(end, physnodes[i].end) < size)
582 end = physnodes[i].end;
585 * Avoid allocating more nodes than requested, which can
586 * happen as a result of rounding down each node's size
587 * to FAKE_NODE_MIN_SIZE.
589 if (nodes_weight(physnode_mask) + ret >= nr_nodes)
590 end = physnodes[i].end;
592 if (setup_node_range(ret++, &physnodes[i].start,
593 end - physnodes[i].start,
594 physnodes[i].end) < 0)
595 node_clear(i, physnode_mask);
602 * Returns the end address of a node so that there is at least `size' amount of
603 * non-reserved memory or `max_addr' is reached.
605 static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
607 u64 end = start + size;
609 while (end - start - memblock_x86_hole_size(start, end) < size) {
610 end += FAKE_NODE_MIN_SIZE;
611 if (end > max_addr) {
620 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
621 * `addr' to `max_addr'. The return value is the number of nodes allocated.
623 static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
625 nodemask_t physnode_mask = NODE_MASK_NONE;
633 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
634 * increased accordingly if the requested size is too small. This
635 * creates a uniform distribution of node sizes across the entire
636 * machine (but not necessarily over physical nodes).
638 min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
640 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
641 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
642 min_size = (min_size + FAKE_NODE_MIN_SIZE) &
643 FAKE_NODE_MIN_HASH_MASK;
644 if (size < min_size) {
645 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
646 size >> 20, min_size >> 20);
649 size &= FAKE_NODE_MIN_HASH_MASK;
651 for (i = 0; i < MAX_NUMNODES; i++)
652 if (physnodes[i].start != physnodes[i].end)
653 node_set(i, physnode_mask);
655 * Fill physical nodes with fake nodes of size until there is no memory
656 * left on any of them.
658 while (nodes_weight(physnode_mask)) {
659 for_each_node_mask(i, physnode_mask) {
660 u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
663 end = find_end_of_node(physnodes[i].start,
664 physnodes[i].end, size);
666 * If there won't be at least FAKE_NODE_MIN_SIZE of
667 * non-reserved memory in ZONE_DMA32 for the next node,
668 * this one must extend to the boundary.
670 if (end < dma32_end && dma32_end - end -
671 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
675 * If there won't be enough non-reserved memory for the
676 * next node, this one must extend to the end of the
679 if (physnodes[i].end - end -
680 memblock_x86_hole_size(end, physnodes[i].end) < size)
681 end = physnodes[i].end;
684 * Setup the fake node that will be allocated as bootmem
685 * later. If setup_node_range() returns non-zero, there
686 * is no more memory available on this physical node.
688 if (setup_node_range(ret++, &physnodes[i].start,
689 end - physnodes[i].start,
690 physnodes[i].end) < 0)
691 node_clear(i, physnode_mask);
698 * Sets up the system RAM area from start_pfn to last_pfn according to the
699 * numa=fake command-line option.
701 static int __init numa_emulation(unsigned long start_pfn,
702 unsigned long last_pfn, int acpi, int amd)
704 static int nodeid[NR_NODE_MEMBLKS] __initdata;
705 u64 addr = start_pfn << PAGE_SHIFT;
706 u64 max_addr = last_pfn << PAGE_SHIFT;
711 * If the numa=fake command-line contains a 'M' or 'G', it represents
712 * the fixed node size. Otherwise, if it is just a single number N,
713 * split the system RAM into N fake nodes.
715 if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) {
718 size = memparse(cmdline, &cmdline);
719 num_nodes = split_nodes_size_interleave(addr, max_addr, size);
723 n = simple_strtoul(cmdline, NULL, 0);
724 num_nodes = split_nodes_interleave(addr, max_addr, n);
730 for (i = 0; i < ARRAY_SIZE(nodeid); i++)
733 memnode_shift = compute_hash_shift(nodes, num_nodes, nodeid);
734 if (memnode_shift < 0) {
736 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
742 * We need to vacate all active ranges that may have been registered for
743 * the e820 memory map.
745 remove_all_active_ranges();
746 for_each_node_mask(i, node_possible_map)
747 memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
748 nodes[i].end >> PAGE_SHIFT);
749 init_memory_mapping_high();
750 for_each_node_mask(i, node_possible_map)
751 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
752 setup_physnodes(addr, max_addr);
753 fake_physnodes(acpi, amd, num_nodes);
757 #endif /* CONFIG_NUMA_EMU */
759 static int dummy_numa_init(void)
761 printk(KERN_INFO "%s\n",
762 numa_off ? "NUMA turned off" : "No NUMA configuration found");
763 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
764 0LU, max_pfn << PAGE_SHIFT);
766 node_set(0, cpu_nodes_parsed);
767 node_set(0, mem_nodes_parsed);
768 numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT);
769 numa_nodes[0].start = 0;
770 numa_nodes[0].end = (u64)max_pfn << PAGE_SHIFT;
775 void __init initmem_init(void)
777 int (*numa_init[])(void) = { [2] = dummy_numa_init };
781 #ifdef CONFIG_ACPI_NUMA
782 numa_init[0] = x86_acpi_numa_init;
784 #ifdef CONFIG_AMD_NUMA
785 numa_init[1] = amd_numa_init;
789 for (i = 0; i < ARRAY_SIZE(numa_init); i++) {
793 for (j = 0; j < MAX_LOCAL_APIC; j++)
794 set_apicid_to_node(j, NUMA_NO_NODE);
796 nodes_clear(cpu_nodes_parsed);
797 nodes_clear(mem_nodes_parsed);
798 nodes_clear(node_possible_map);
799 nodes_clear(node_online_map);
800 num_node_memblks = 0;
801 memset(node_memblk_range, 0, sizeof(node_memblk_range));
802 memset(memblk_nodeid, 0, sizeof(memblk_nodeid));
803 memset(numa_nodes, 0, sizeof(numa_nodes));
804 remove_all_active_ranges();
806 if (numa_init[i]() < 0)
809 /* clean up the node list */
810 for (j = 0; j < MAX_NUMNODES; j++)
811 cutoff_node(j, 0, max_pfn << PAGE_SHIFT);
813 #ifdef CONFIG_NUMA_EMU
814 setup_physnodes(0, max_pfn << PAGE_SHIFT);
815 if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1))
817 setup_physnodes(0, max_pfn << PAGE_SHIFT);
818 nodes_clear(node_possible_map);
819 nodes_clear(node_online_map);
821 /* Account for nodes with cpus and no memory */
822 nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed);
823 if (WARN_ON(nodes_empty(node_possible_map)))
826 if (numa_register_memblks() < 0)
829 for (j = 0; j < nr_cpu_ids; j++) {
830 int nid = early_cpu_to_node(j);
832 if (nid == NUMA_NO_NODE)
834 if (!node_online(nid))
843 unsigned long __init numa_free_all_bootmem(void)
845 unsigned long pages = 0;
848 for_each_online_node(i)
849 pages += free_all_bootmem_node(NODE_DATA(i));
851 pages += free_all_memory_core_early(MAX_NUMNODES);
856 int __cpuinit numa_cpu_node(int cpu)
858 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
860 if (apicid != BAD_APICID)
861 return __apicid_to_node[apicid];
866 * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
867 * of 64bit specific data structures. The distinction is artificial and
868 * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
869 * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
872 * NUMA emulation is planned to be made generic and the following and other
873 * related code should be moved to numa.c.
875 #ifdef CONFIG_NUMA_EMU
876 # ifndef CONFIG_DEBUG_PER_CPU_MAPS
877 void __cpuinit numa_add_cpu(int cpu)
882 nid = numa_cpu_node(cpu);
883 if (nid == NUMA_NO_NODE)
884 nid = early_cpu_to_node(cpu);
885 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
888 * Use the starting address of the emulated node to find which physical
889 * node it is allocated on.
891 addr = node_start_pfn(nid) << PAGE_SHIFT;
892 for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
893 if (addr >= physnodes[physnid].start &&
894 addr < physnodes[physnid].end)
898 * Map the cpu to each emulated node that is allocated on the physical
899 * node of the cpu's apic id.
901 for_each_online_node(nid) {
902 addr = node_start_pfn(nid) << PAGE_SHIFT;
903 if (addr >= physnodes[physnid].start &&
904 addr < physnodes[physnid].end)
905 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
909 void __cpuinit numa_remove_cpu(int cpu)
913 for_each_online_node(i)
914 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
916 # else /* !CONFIG_DEBUG_PER_CPU_MAPS */
917 static void __cpuinit numa_set_cpumask(int cpu, int enable)
919 int node = early_cpu_to_node(cpu);
920 struct cpumask *mask;
923 if (node == NUMA_NO_NODE) {
924 /* early_cpu_to_node() already emits a warning and trace */
927 for_each_online_node(i) {
930 addr = node_start_pfn(i) << PAGE_SHIFT;
931 if (addr < physnodes[node].start ||
932 addr >= physnodes[node].end)
934 mask = debug_cpumask_set_cpu(cpu, enable);
939 cpumask_set_cpu(cpu, mask);
941 cpumask_clear_cpu(cpu, mask);
945 void __cpuinit numa_add_cpu(int cpu)
947 numa_set_cpumask(cpu, 1);
950 void __cpuinit numa_remove_cpu(int cpu)
952 numa_set_cpumask(cpu, 0);
954 # endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
955 #endif /* CONFIG_NUMA_EMU */