2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/memblock.h>
11 #include <linux/mmzone.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/nodemask.h>
15 #include <linux/sched.h>
16 #include <linux/acpi.h>
19 #include <asm/proto.h>
23 #include <asm/amd_nb.h>
33 struct numa_memblk blk[NR_NODE_MEMBLKS];
36 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
37 EXPORT_SYMBOL(node_data);
39 nodemask_t cpu_nodes_parsed __initdata;
40 nodemask_t mem_nodes_parsed __initdata;
42 struct memnode memnode;
44 static unsigned long __initdata nodemap_addr;
45 static unsigned long __initdata nodemap_size;
47 static struct numa_meminfo numa_meminfo __initdata;
49 struct bootnode numa_nodes[MAX_NUMNODES] __initdata;
52 * Given a shift value, try to populate memnodemap[]
55 * 0 if memnodmap[] too small (of shift too small)
56 * -1 if node overlap or lost ram (shift too big)
58 static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift)
60 unsigned long addr, end;
63 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
64 for (i = 0; i < mi->nr_blks; i++) {
65 addr = mi->blk[i].start;
69 if ((end >> shift) >= memnodemapsize)
72 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
74 memnodemap[addr >> shift] = mi->blk[i].nid;
75 addr += (1UL << shift);
82 static int __init allocate_cachealigned_memnodemap(void)
86 memnodemap = memnode.embedded_map;
87 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
91 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
92 nodemap_addr = memblock_find_in_range(addr, get_max_mapped(),
93 nodemap_size, L1_CACHE_BYTES);
94 if (nodemap_addr == MEMBLOCK_ERROR) {
96 "NUMA: Unable to allocate Memory to Node hash map\n");
97 nodemap_addr = nodemap_size = 0;
100 memnodemap = phys_to_virt(nodemap_addr);
101 memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
103 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
104 nodemap_addr, nodemap_addr + nodemap_size);
109 * The LSB of all start and end addresses in the node map is the value of the
110 * maximum possible shift.
112 static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi)
114 int i, nodes_used = 0;
115 unsigned long start, end;
116 unsigned long bitfield = 0, memtop = 0;
118 for (i = 0; i < mi->nr_blks; i++) {
119 start = mi->blk[i].start;
120 end = mi->blk[i].end;
131 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
132 memnodemapsize = (memtop >> i)+1;
136 static int __init compute_hash_shift(const struct numa_meminfo *mi)
140 shift = extract_lsb_from_nodes(mi);
141 if (allocate_cachealigned_memnodemap())
143 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
146 if (populate_memnodemap(mi, shift) != 1) {
147 printk(KERN_INFO "Your memory is not aligned you need to "
148 "rebuild your kernel with a bigger NODEMAPSIZE "
149 "shift=%d\n", shift);
155 int __meminit __early_pfn_to_nid(unsigned long pfn)
157 return phys_to_nid(pfn << PAGE_SHIFT);
160 static void * __init early_node_mem(int nodeid, unsigned long start,
161 unsigned long end, unsigned long size,
167 * put it on high as possible
168 * something will go with NODE_DATA
170 if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
171 start = MAX_DMA_PFN<<PAGE_SHIFT;
172 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
173 end > (MAX_DMA32_PFN<<PAGE_SHIFT))
174 start = MAX_DMA32_PFN<<PAGE_SHIFT;
175 mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
176 if (mem != MEMBLOCK_ERROR)
179 /* extend the search scope */
180 end = max_pfn_mapped << PAGE_SHIFT;
181 start = MAX_DMA_PFN << PAGE_SHIFT;
182 mem = memblock_find_in_range(start, end, size, align);
183 if (mem != MEMBLOCK_ERROR)
186 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
192 int __init numa_add_memblk(int nid, u64 start, u64 end)
194 struct numa_meminfo *mi = &numa_meminfo;
196 /* ignore zero length blks */
200 /* whine about and ignore invalid blks */
201 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
202 pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
207 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
208 pr_err("NUMA: too many memblk ranges\n");
212 mi->blk[mi->nr_blks].start = start;
213 mi->blk[mi->nr_blks].end = end;
214 mi->blk[mi->nr_blks].nid = nid;
219 static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
222 memmove(&mi->blk[idx], &mi->blk[idx + 1],
223 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
226 /* Initialize bootmem allocator for a node */
228 setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
230 unsigned long start_pfn, last_pfn, nodedata_phys;
231 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
238 * Don't confuse VM with a node that doesn't have the
239 * minimum amount of memory:
241 if (end && (end - start) < NODE_MIN_SIZE)
244 start = roundup(start, ZONE_ALIGN);
246 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
249 start_pfn = start >> PAGE_SHIFT;
250 last_pfn = end >> PAGE_SHIFT;
252 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
254 if (node_data[nodeid] == NULL)
256 nodedata_phys = __pa(node_data[nodeid]);
257 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
258 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
259 nodedata_phys + pgdat_size - 1);
260 nid = phys_to_nid(nodedata_phys);
262 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
264 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
265 NODE_DATA(nodeid)->node_id = nodeid;
266 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
267 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
269 node_set_online(nodeid);
272 static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
275 const u64 high = (u64)max_pfn << PAGE_SHIFT;
278 for (i = 0; i < mi->nr_blks; i++) {
279 struct numa_memblk *bi = &mi->blk[i];
281 /* make sure all blocks are inside the limits */
282 bi->start = max(bi->start, low);
283 bi->end = min(bi->end, high);
285 /* and there's no empty block */
286 if (bi->start == bi->end) {
287 numa_remove_memblk_from(i--, mi);
291 for (j = i + 1; j < mi->nr_blks; j++) {
292 struct numa_memblk *bj = &mi->blk[j];
293 unsigned long start, end;
296 * See whether there are overlapping blocks. Whine
297 * about but allow overlaps of the same nid. They
298 * will be merged below.
300 if (bi->end > bj->start && bi->start < bj->end) {
301 if (bi->nid != bj->nid) {
302 pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
303 bi->nid, bi->start, bi->end,
304 bj->nid, bj->start, bj->end);
307 pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
308 bi->nid, bi->start, bi->end,
313 * Join together blocks on the same node, holes
314 * between which don't overlap with memory on other
317 if (bi->nid != bj->nid)
319 start = max(min(bi->start, bj->start), low);
320 end = min(max(bi->end, bj->end), high);
321 for (k = 0; k < mi->nr_blks; k++) {
322 struct numa_memblk *bk = &mi->blk[k];
324 if (bi->nid == bk->nid)
326 if (start < bk->end && end > bk->start)
331 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
332 bi->nid, bi->start, bi->end, bj->start, bj->end,
336 numa_remove_memblk_from(j--, mi);
340 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
341 mi->blk[i].start = mi->blk[i].end = 0;
342 mi->blk[i].nid = NUMA_NO_NODE;
349 * Sanity check to catch more bad NUMA configurations (they are amazingly
350 * common). Make sure the nodes cover all memory.
352 static int __init nodes_cover_memory(const struct bootnode *nodes)
354 unsigned long numaram, e820ram;
358 for_each_node_mask(i, mem_nodes_parsed) {
359 unsigned long s = nodes[i].start >> PAGE_SHIFT;
360 unsigned long e = nodes[i].end >> PAGE_SHIFT;
362 numaram -= __absent_pages_in_range(i, s, e);
363 if ((long)numaram < 0)
367 e820ram = max_pfn - (memblock_x86_hole_size(0,
368 max_pfn << PAGE_SHIFT) >> PAGE_SHIFT);
369 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
370 if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
371 printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
372 (numaram << PAGE_SHIFT) >> 20,
373 (e820ram << PAGE_SHIFT) >> 20);
379 static int __init numa_register_memblks(struct numa_meminfo *mi)
383 /* Account for nodes with cpus and no memory */
384 nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed);
385 if (WARN_ON(nodes_empty(node_possible_map)))
388 memnode_shift = compute_hash_shift(mi);
389 if (memnode_shift < 0) {
390 printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n");
394 for (i = 0; i < mi->nr_blks; i++)
395 memblock_x86_register_active_regions(mi->blk[i].nid,
396 mi->blk[i].start >> PAGE_SHIFT,
397 mi->blk[i].end >> PAGE_SHIFT);
399 /* for out of order entries */
401 if (!nodes_cover_memory(numa_nodes))
404 init_memory_mapping_high();
406 /* Finally register nodes. */
407 for_each_node_mask(i, node_possible_map)
408 setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end);
411 * Try again in case setup_node_bootmem missed one due to missing
414 for_each_node_mask(i, node_possible_map)
416 setup_node_bootmem(i, numa_nodes[i].start,
422 #ifdef CONFIG_NUMA_EMU
424 static struct bootnode nodes[MAX_NUMNODES] __initdata;
425 static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
426 static char *cmdline __initdata;
428 void __init numa_emu_cmdline(char *str)
433 static int __init setup_physnodes(unsigned long start, unsigned long end)
438 memset(physnodes, 0, sizeof(physnodes));
440 for_each_node_mask(i, mem_nodes_parsed) {
441 physnodes[i].start = numa_nodes[i].start;
442 physnodes[i].end = numa_nodes[i].end;
446 * Basic sanity checking on the physical node map: there may be errors
447 * if the SRAT or AMD code incorrectly reported the topology or the mem=
448 * kernel parameter is used.
450 for (i = 0; i < MAX_NUMNODES; i++) {
451 if (physnodes[i].start == physnodes[i].end)
453 if (physnodes[i].start > end) {
454 physnodes[i].end = physnodes[i].start;
457 if (physnodes[i].end < start) {
458 physnodes[i].start = physnodes[i].end;
461 if (physnodes[i].start < start)
462 physnodes[i].start = start;
463 if (physnodes[i].end > end)
464 physnodes[i].end = end;
469 * If no physical topology was detected, a single node is faked to cover
470 * the entire address space.
473 physnodes[ret].start = start;
474 physnodes[ret].end = end;
480 static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
485 #ifdef CONFIG_ACPI_NUMA
487 acpi_fake_nodes(nodes, nr_nodes);
489 #ifdef CONFIG_AMD_NUMA
491 amd_fake_nodes(nodes, nr_nodes);
494 for (i = 0; i < nr_cpu_ids; i++)
499 * Setups up nid to range from addr to addr + size. If the end
500 * boundary is greater than max_addr, then max_addr is used instead.
501 * The return value is 0 if there is additional memory left for
502 * allocation past addr and -1 otherwise. addr is adjusted to be at
503 * the end of the node.
505 static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
508 nodes[nid].start = *addr;
510 if (*addr >= max_addr) {
514 nodes[nid].end = *addr;
515 node_set(nid, node_possible_map);
516 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
517 nodes[nid].start, nodes[nid].end,
518 (nodes[nid].end - nodes[nid].start) >> 20);
523 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
524 * to max_addr. The return value is the number of nodes allocated.
526 static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
528 nodemask_t physnode_mask = NODE_MASK_NONE;
536 if (nr_nodes > MAX_NUMNODES) {
537 pr_info("numa=fake=%d too large, reducing to %d\n",
538 nr_nodes, MAX_NUMNODES);
539 nr_nodes = MAX_NUMNODES;
542 size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
544 * Calculate the number of big nodes that can be allocated as a result
545 * of consolidating the remainder.
547 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
550 size &= FAKE_NODE_MIN_HASH_MASK;
552 pr_err("Not enough memory for each node. "
553 "NUMA emulation disabled.\n");
557 for (i = 0; i < MAX_NUMNODES; i++)
558 if (physnodes[i].start != physnodes[i].end)
559 node_set(i, physnode_mask);
562 * Continue to fill physical nodes with fake nodes until there is no
563 * memory left on any of them.
565 while (nodes_weight(physnode_mask)) {
566 for_each_node_mask(i, physnode_mask) {
567 u64 end = physnodes[i].start + size;
568 u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
571 end += FAKE_NODE_MIN_SIZE;
574 * Continue to add memory to this fake node if its
575 * non-reserved memory is less than the per-node size.
577 while (end - physnodes[i].start -
578 memblock_x86_hole_size(physnodes[i].start, end) < size) {
579 end += FAKE_NODE_MIN_SIZE;
580 if (end > physnodes[i].end) {
581 end = physnodes[i].end;
587 * If there won't be at least FAKE_NODE_MIN_SIZE of
588 * non-reserved memory in ZONE_DMA32 for the next node,
589 * this one must extend to the boundary.
591 if (end < dma32_end && dma32_end - end -
592 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
596 * If there won't be enough non-reserved memory for the
597 * next node, this one must extend to the end of the
600 if (physnodes[i].end - end -
601 memblock_x86_hole_size(end, physnodes[i].end) < size)
602 end = physnodes[i].end;
605 * Avoid allocating more nodes than requested, which can
606 * happen as a result of rounding down each node's size
607 * to FAKE_NODE_MIN_SIZE.
609 if (nodes_weight(physnode_mask) + ret >= nr_nodes)
610 end = physnodes[i].end;
612 if (setup_node_range(ret++, &physnodes[i].start,
613 end - physnodes[i].start,
614 physnodes[i].end) < 0)
615 node_clear(i, physnode_mask);
622 * Returns the end address of a node so that there is at least `size' amount of
623 * non-reserved memory or `max_addr' is reached.
625 static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
627 u64 end = start + size;
629 while (end - start - memblock_x86_hole_size(start, end) < size) {
630 end += FAKE_NODE_MIN_SIZE;
631 if (end > max_addr) {
640 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
641 * `addr' to `max_addr'. The return value is the number of nodes allocated.
643 static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
645 nodemask_t physnode_mask = NODE_MASK_NONE;
653 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
654 * increased accordingly if the requested size is too small. This
655 * creates a uniform distribution of node sizes across the entire
656 * machine (but not necessarily over physical nodes).
658 min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
660 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
661 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
662 min_size = (min_size + FAKE_NODE_MIN_SIZE) &
663 FAKE_NODE_MIN_HASH_MASK;
664 if (size < min_size) {
665 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
666 size >> 20, min_size >> 20);
669 size &= FAKE_NODE_MIN_HASH_MASK;
671 for (i = 0; i < MAX_NUMNODES; i++)
672 if (physnodes[i].start != physnodes[i].end)
673 node_set(i, physnode_mask);
675 * Fill physical nodes with fake nodes of size until there is no memory
676 * left on any of them.
678 while (nodes_weight(physnode_mask)) {
679 for_each_node_mask(i, physnode_mask) {
680 u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
683 end = find_end_of_node(physnodes[i].start,
684 physnodes[i].end, size);
686 * If there won't be at least FAKE_NODE_MIN_SIZE of
687 * non-reserved memory in ZONE_DMA32 for the next node,
688 * this one must extend to the boundary.
690 if (end < dma32_end && dma32_end - end -
691 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
695 * If there won't be enough non-reserved memory for the
696 * next node, this one must extend to the end of the
699 if (physnodes[i].end - end -
700 memblock_x86_hole_size(end, physnodes[i].end) < size)
701 end = physnodes[i].end;
704 * Setup the fake node that will be allocated as bootmem
705 * later. If setup_node_range() returns non-zero, there
706 * is no more memory available on this physical node.
708 if (setup_node_range(ret++, &physnodes[i].start,
709 end - physnodes[i].start,
710 physnodes[i].end) < 0)
711 node_clear(i, physnode_mask);
718 * Sets up the system RAM area from start_pfn to last_pfn according to the
719 * numa=fake command-line option.
721 static int __init numa_emulation(unsigned long start_pfn,
722 unsigned long last_pfn, int acpi, int amd)
724 static struct numa_meminfo ei __initdata;
725 u64 addr = start_pfn << PAGE_SHIFT;
726 u64 max_addr = last_pfn << PAGE_SHIFT;
731 * If the numa=fake command-line contains a 'M' or 'G', it represents
732 * the fixed node size. Otherwise, if it is just a single number N,
733 * split the system RAM into N fake nodes.
735 if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) {
738 size = memparse(cmdline, &cmdline);
739 num_nodes = split_nodes_size_interleave(addr, max_addr, size);
743 n = simple_strtoul(cmdline, NULL, 0);
744 num_nodes = split_nodes_interleave(addr, max_addr, n);
750 ei.nr_blks = num_nodes;
751 for (i = 0; i < ei.nr_blks; i++) {
752 ei.blk[i].start = nodes[i].start;
753 ei.blk[i].end = nodes[i].end;
757 memnode_shift = compute_hash_shift(&ei);
758 if (memnode_shift < 0) {
760 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
766 * We need to vacate all active ranges that may have been registered for
767 * the e820 memory map.
769 remove_all_active_ranges();
770 for_each_node_mask(i, node_possible_map)
771 memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
772 nodes[i].end >> PAGE_SHIFT);
773 init_memory_mapping_high();
774 for_each_node_mask(i, node_possible_map)
775 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
776 setup_physnodes(addr, max_addr);
777 fake_physnodes(acpi, amd, num_nodes);
781 #endif /* CONFIG_NUMA_EMU */
783 static int dummy_numa_init(void)
785 printk(KERN_INFO "%s\n",
786 numa_off ? "NUMA turned off" : "No NUMA configuration found");
787 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
788 0LU, max_pfn << PAGE_SHIFT);
790 node_set(0, cpu_nodes_parsed);
791 node_set(0, mem_nodes_parsed);
792 numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT);
793 numa_nodes[0].start = 0;
794 numa_nodes[0].end = (u64)max_pfn << PAGE_SHIFT;
799 void __init initmem_init(void)
801 int (*numa_init[])(void) = { [2] = dummy_numa_init };
805 #ifdef CONFIG_ACPI_NUMA
806 numa_init[0] = x86_acpi_numa_init;
808 #ifdef CONFIG_AMD_NUMA
809 numa_init[1] = amd_numa_init;
813 for (i = 0; i < ARRAY_SIZE(numa_init); i++) {
817 for (j = 0; j < MAX_LOCAL_APIC; j++)
818 set_apicid_to_node(j, NUMA_NO_NODE);
820 nodes_clear(cpu_nodes_parsed);
821 nodes_clear(mem_nodes_parsed);
822 nodes_clear(node_possible_map);
823 nodes_clear(node_online_map);
824 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
825 memset(numa_nodes, 0, sizeof(numa_nodes));
826 remove_all_active_ranges();
828 if (numa_init[i]() < 0)
831 if (numa_cleanup_meminfo(&numa_meminfo) < 0)
833 #ifdef CONFIG_NUMA_EMU
834 setup_physnodes(0, max_pfn << PAGE_SHIFT);
835 if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1))
837 setup_physnodes(0, max_pfn << PAGE_SHIFT);
838 nodes_clear(node_possible_map);
839 nodes_clear(node_online_map);
841 if (numa_register_memblks(&numa_meminfo) < 0)
844 for (j = 0; j < nr_cpu_ids; j++) {
845 int nid = early_cpu_to_node(j);
847 if (nid == NUMA_NO_NODE)
849 if (!node_online(nid))
858 unsigned long __init numa_free_all_bootmem(void)
860 unsigned long pages = 0;
863 for_each_online_node(i)
864 pages += free_all_bootmem_node(NODE_DATA(i));
866 pages += free_all_memory_core_early(MAX_NUMNODES);
871 int __cpuinit numa_cpu_node(int cpu)
873 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
875 if (apicid != BAD_APICID)
876 return __apicid_to_node[apicid];
881 * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
882 * of 64bit specific data structures. The distinction is artificial and
883 * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
884 * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
887 * NUMA emulation is planned to be made generic and the following and other
888 * related code should be moved to numa.c.
890 #ifdef CONFIG_NUMA_EMU
891 # ifndef CONFIG_DEBUG_PER_CPU_MAPS
892 void __cpuinit numa_add_cpu(int cpu)
897 nid = numa_cpu_node(cpu);
898 if (nid == NUMA_NO_NODE)
899 nid = early_cpu_to_node(cpu);
900 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
903 * Use the starting address of the emulated node to find which physical
904 * node it is allocated on.
906 addr = node_start_pfn(nid) << PAGE_SHIFT;
907 for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
908 if (addr >= physnodes[physnid].start &&
909 addr < physnodes[physnid].end)
913 * Map the cpu to each emulated node that is allocated on the physical
914 * node of the cpu's apic id.
916 for_each_online_node(nid) {
917 addr = node_start_pfn(nid) << PAGE_SHIFT;
918 if (addr >= physnodes[physnid].start &&
919 addr < physnodes[physnid].end)
920 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
924 void __cpuinit numa_remove_cpu(int cpu)
928 for_each_online_node(i)
929 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
931 # else /* !CONFIG_DEBUG_PER_CPU_MAPS */
932 static void __cpuinit numa_set_cpumask(int cpu, int enable)
934 int node = early_cpu_to_node(cpu);
935 struct cpumask *mask;
938 if (node == NUMA_NO_NODE) {
939 /* early_cpu_to_node() already emits a warning and trace */
942 for_each_online_node(i) {
945 addr = node_start_pfn(i) << PAGE_SHIFT;
946 if (addr < physnodes[node].start ||
947 addr >= physnodes[node].end)
949 mask = debug_cpumask_set_cpu(cpu, enable);
954 cpumask_set_cpu(cpu, mask);
956 cpumask_clear_cpu(cpu, mask);
960 void __cpuinit numa_add_cpu(int cpu)
962 numa_set_cpumask(cpu, 1);
965 void __cpuinit numa_remove_cpu(int cpu)
967 numa_set_cpumask(cpu, 0);
969 # endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
970 #endif /* CONFIG_NUMA_EMU */