2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/memblock.h>
11 #include <linux/mmzone.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/nodemask.h>
15 #include <linux/sched.h>
16 #include <linux/acpi.h>
19 #include <asm/proto.h>
23 #include <asm/amd_nb.h>
33 struct numa_memblk blk[NR_NODE_MEMBLKS];
36 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
37 EXPORT_SYMBOL(node_data);
39 nodemask_t numa_nodes_parsed __initdata;
41 struct memnode memnode;
43 static unsigned long __initdata nodemap_addr;
44 static unsigned long __initdata nodemap_size;
46 static struct numa_meminfo numa_meminfo __initdata;
48 static int numa_distance_cnt;
49 static u8 *numa_distance;
52 * Given a shift value, try to populate memnodemap[]
55 * 0 if memnodmap[] too small (of shift too small)
56 * -1 if node overlap or lost ram (shift too big)
58 static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift)
60 unsigned long addr, end;
63 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
64 for (i = 0; i < mi->nr_blks; i++) {
65 addr = mi->blk[i].start;
69 if ((end >> shift) >= memnodemapsize)
72 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
74 memnodemap[addr >> shift] = mi->blk[i].nid;
75 addr += (1UL << shift);
82 static int __init allocate_cachealigned_memnodemap(void)
86 memnodemap = memnode.embedded_map;
87 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
91 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
92 nodemap_addr = memblock_find_in_range(addr, get_max_mapped(),
93 nodemap_size, L1_CACHE_BYTES);
94 if (nodemap_addr == MEMBLOCK_ERROR) {
96 "NUMA: Unable to allocate Memory to Node hash map\n");
97 nodemap_addr = nodemap_size = 0;
100 memnodemap = phys_to_virt(nodemap_addr);
101 memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
103 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
104 nodemap_addr, nodemap_addr + nodemap_size);
109 * The LSB of all start and end addresses in the node map is the value of the
110 * maximum possible shift.
112 static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi)
114 int i, nodes_used = 0;
115 unsigned long start, end;
116 unsigned long bitfield = 0, memtop = 0;
118 for (i = 0; i < mi->nr_blks; i++) {
119 start = mi->blk[i].start;
120 end = mi->blk[i].end;
131 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
132 memnodemapsize = (memtop >> i)+1;
136 static int __init compute_hash_shift(const struct numa_meminfo *mi)
140 shift = extract_lsb_from_nodes(mi);
141 if (allocate_cachealigned_memnodemap())
143 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
146 if (populate_memnodemap(mi, shift) != 1) {
147 printk(KERN_INFO "Your memory is not aligned you need to "
148 "rebuild your kernel with a bigger NODEMAPSIZE "
149 "shift=%d\n", shift);
155 int __meminit __early_pfn_to_nid(unsigned long pfn)
157 return phys_to_nid(pfn << PAGE_SHIFT);
160 static void * __init early_node_mem(int nodeid, unsigned long start,
161 unsigned long end, unsigned long size,
167 * put it on high as possible
168 * something will go with NODE_DATA
170 if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
171 start = MAX_DMA_PFN<<PAGE_SHIFT;
172 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
173 end > (MAX_DMA32_PFN<<PAGE_SHIFT))
174 start = MAX_DMA32_PFN<<PAGE_SHIFT;
175 mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
176 if (mem != MEMBLOCK_ERROR)
179 /* extend the search scope */
180 end = max_pfn_mapped << PAGE_SHIFT;
181 start = MAX_DMA_PFN << PAGE_SHIFT;
182 mem = memblock_find_in_range(start, end, size, align);
183 if (mem != MEMBLOCK_ERROR)
186 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
192 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
193 struct numa_meminfo *mi)
195 /* ignore zero length blks */
199 /* whine about and ignore invalid blks */
200 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
201 pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
206 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
207 pr_err("NUMA: too many memblk ranges\n");
211 mi->blk[mi->nr_blks].start = start;
212 mi->blk[mi->nr_blks].end = end;
213 mi->blk[mi->nr_blks].nid = nid;
218 static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
221 memmove(&mi->blk[idx], &mi->blk[idx + 1],
222 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
225 int __init numa_add_memblk(int nid, u64 start, u64 end)
227 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
230 /* Initialize bootmem allocator for a node */
232 setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
234 unsigned long start_pfn, last_pfn, nodedata_phys;
235 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
242 * Don't confuse VM with a node that doesn't have the
243 * minimum amount of memory:
245 if (end && (end - start) < NODE_MIN_SIZE)
248 start = roundup(start, ZONE_ALIGN);
250 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
253 start_pfn = start >> PAGE_SHIFT;
254 last_pfn = end >> PAGE_SHIFT;
256 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
258 if (node_data[nodeid] == NULL)
260 nodedata_phys = __pa(node_data[nodeid]);
261 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
262 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
263 nodedata_phys + pgdat_size - 1);
264 nid = phys_to_nid(nodedata_phys);
266 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
268 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
269 NODE_DATA(nodeid)->node_id = nodeid;
270 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
271 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
273 node_set_online(nodeid);
276 static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
279 const u64 high = (u64)max_pfn << PAGE_SHIFT;
282 for (i = 0; i < mi->nr_blks; i++) {
283 struct numa_memblk *bi = &mi->blk[i];
285 /* make sure all blocks are inside the limits */
286 bi->start = max(bi->start, low);
287 bi->end = min(bi->end, high);
289 /* and there's no empty block */
290 if (bi->start == bi->end) {
291 numa_remove_memblk_from(i--, mi);
295 for (j = i + 1; j < mi->nr_blks; j++) {
296 struct numa_memblk *bj = &mi->blk[j];
297 unsigned long start, end;
300 * See whether there are overlapping blocks. Whine
301 * about but allow overlaps of the same nid. They
302 * will be merged below.
304 if (bi->end > bj->start && bi->start < bj->end) {
305 if (bi->nid != bj->nid) {
306 pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
307 bi->nid, bi->start, bi->end,
308 bj->nid, bj->start, bj->end);
311 pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
312 bi->nid, bi->start, bi->end,
317 * Join together blocks on the same node, holes
318 * between which don't overlap with memory on other
321 if (bi->nid != bj->nid)
323 start = max(min(bi->start, bj->start), low);
324 end = min(max(bi->end, bj->end), high);
325 for (k = 0; k < mi->nr_blks; k++) {
326 struct numa_memblk *bk = &mi->blk[k];
328 if (bi->nid == bk->nid)
330 if (start < bk->end && end > bk->start)
335 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
336 bi->nid, bi->start, bi->end, bj->start, bj->end,
340 numa_remove_memblk_from(j--, mi);
344 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
345 mi->blk[i].start = mi->blk[i].end = 0;
346 mi->blk[i].nid = NUMA_NO_NODE;
353 * Set nodes, which have memory in @mi, in *@nodemask.
355 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
356 const struct numa_meminfo *mi)
360 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
361 if (mi->blk[i].start != mi->blk[i].end &&
362 mi->blk[i].nid != NUMA_NO_NODE)
363 node_set(mi->blk[i].nid, *nodemask);
367 * Reset distance table. The current table is freed. The next
368 * numa_set_distance() call will create a new one.
370 static void __init numa_reset_distance(void)
374 if (numa_distance_cnt) {
375 size = numa_distance_cnt * sizeof(numa_distance[0]);
376 memblock_x86_free_range(__pa(numa_distance),
377 __pa(numa_distance) + size);
378 numa_distance_cnt = 0;
380 numa_distance = NULL;
384 * Set the distance between node @from to @to to @distance. If distance
385 * table doesn't exist, one which is large enough to accomodate all the
386 * currently known nodes will be created.
388 void __init numa_set_distance(int from, int to, int distance)
390 if (!numa_distance) {
391 nodemask_t nodes_parsed;
396 /* size the new table and allocate it */
397 nodes_parsed = numa_nodes_parsed;
398 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
400 for_each_node_mask(i, nodes_parsed)
402 size = ++cnt * sizeof(numa_distance[0]);
404 phys = memblock_find_in_range(0,
405 (u64)max_pfn_mapped << PAGE_SHIFT,
407 if (phys == MEMBLOCK_ERROR) {
408 pr_warning("NUMA: Warning: can't allocate distance table!\n");
409 /* don't retry until explicitly reset */
410 numa_distance = (void *)1LU;
413 memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
415 numa_distance = __va(phys);
416 numa_distance_cnt = cnt;
418 /* fill with the default distances */
419 for (i = 0; i < cnt; i++)
420 for (j = 0; j < cnt; j++)
421 numa_distance[i * cnt + j] = i == j ?
422 LOCAL_DISTANCE : REMOTE_DISTANCE;
423 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
426 if (from >= numa_distance_cnt || to >= numa_distance_cnt) {
427 printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n",
432 if ((u8)distance != distance ||
433 (from == to && distance != LOCAL_DISTANCE)) {
434 pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
439 numa_distance[from * numa_distance_cnt + to] = distance;
442 int __node_distance(int from, int to)
444 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
445 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
446 return numa_distance[from * numa_distance_cnt + to];
448 EXPORT_SYMBOL(__node_distance);
451 * Sanity check to catch more bad NUMA configurations (they are amazingly
452 * common). Make sure the nodes cover all memory.
454 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
456 unsigned long numaram, e820ram;
460 for (i = 0; i < mi->nr_blks; i++) {
461 unsigned long s = mi->blk[i].start >> PAGE_SHIFT;
462 unsigned long e = mi->blk[i].end >> PAGE_SHIFT;
464 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
465 if ((long)numaram < 0)
469 e820ram = max_pfn - (memblock_x86_hole_size(0,
470 max_pfn << PAGE_SHIFT) >> PAGE_SHIFT);
471 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
472 if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
473 printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
474 (numaram << PAGE_SHIFT) >> 20,
475 (e820ram << PAGE_SHIFT) >> 20);
481 static int __init numa_register_memblks(struct numa_meminfo *mi)
485 /* Account for nodes with cpus and no memory */
486 node_possible_map = numa_nodes_parsed;
487 numa_nodemask_from_meminfo(&node_possible_map, mi);
488 if (WARN_ON(nodes_empty(node_possible_map)))
491 memnode_shift = compute_hash_shift(mi);
492 if (memnode_shift < 0) {
493 printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n");
497 for (i = 0; i < mi->nr_blks; i++)
498 memblock_x86_register_active_regions(mi->blk[i].nid,
499 mi->blk[i].start >> PAGE_SHIFT,
500 mi->blk[i].end >> PAGE_SHIFT);
502 /* for out of order entries */
504 if (!numa_meminfo_cover_memory(mi))
507 init_memory_mapping_high();
510 * Finally register nodes. Do it twice in case setup_node_bootmem
511 * missed one due to missing bootmem.
513 for (i = 0; i < 2; i++) {
514 for_each_node_mask(nid, node_possible_map) {
515 u64 start = (u64)max_pfn << PAGE_SHIFT;
518 if (node_online(nid))
521 for (j = 0; j < mi->nr_blks; j++) {
522 if (nid != mi->blk[j].nid)
524 start = min(mi->blk[j].start, start);
525 end = max(mi->blk[j].end, end);
529 setup_node_bootmem(nid, start, end);
536 #ifdef CONFIG_NUMA_EMU
538 static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata;
539 static char *emu_cmdline __initdata;
541 void __init numa_emu_cmdline(char *str)
546 static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
550 for (i = 0; i < mi->nr_blks; i++)
551 if (mi->blk[i].nid == nid)
557 * Sets up nid to range from @start to @end. The return value is -errno if
558 * something went wrong, 0 otherwise.
560 static int __init emu_setup_memblk(struct numa_meminfo *ei,
561 struct numa_meminfo *pi,
562 int nid, int phys_blk, u64 size)
564 struct numa_memblk *eb = &ei->blk[ei->nr_blks];
565 struct numa_memblk *pb = &pi->blk[phys_blk];
567 if (ei->nr_blks >= NR_NODE_MEMBLKS) {
568 pr_err("NUMA: Too many emulated memblks, failing emulation\n");
573 eb->start = pb->start;
574 eb->end = pb->start + size;
577 if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
578 emu_nid_to_phys[nid] = pb->nid;
581 if (pb->start >= pb->end) {
582 WARN_ON_ONCE(pb->start > pb->end);
583 numa_remove_memblk_from(phys_blk, pi);
586 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
587 eb->start, eb->end, (eb->end - eb->start) >> 20);
592 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
593 * to max_addr. The return value is the number of nodes allocated.
595 static int __init split_nodes_interleave(struct numa_meminfo *ei,
596 struct numa_meminfo *pi,
597 u64 addr, u64 max_addr, int nr_nodes)
599 nodemask_t physnode_mask = NODE_MASK_NONE;
607 if (nr_nodes > MAX_NUMNODES) {
608 pr_info("numa=fake=%d too large, reducing to %d\n",
609 nr_nodes, MAX_NUMNODES);
610 nr_nodes = MAX_NUMNODES;
613 size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
615 * Calculate the number of big nodes that can be allocated as a result
616 * of consolidating the remainder.
618 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
621 size &= FAKE_NODE_MIN_HASH_MASK;
623 pr_err("Not enough memory for each node. "
624 "NUMA emulation disabled.\n");
628 for (i = 0; i < pi->nr_blks; i++)
629 node_set(pi->blk[i].nid, physnode_mask);
632 * Continue to fill physical nodes with fake nodes until there is no
633 * memory left on any of them.
635 while (nodes_weight(physnode_mask)) {
636 for_each_node_mask(i, physnode_mask) {
637 u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
638 u64 start, limit, end;
641 phys_blk = emu_find_memblk_by_nid(i, pi);
643 node_clear(i, physnode_mask);
646 start = pi->blk[phys_blk].start;
647 limit = pi->blk[phys_blk].end;
651 end += FAKE_NODE_MIN_SIZE;
654 * Continue to add memory to this fake node if its
655 * non-reserved memory is less than the per-node size.
658 memblock_x86_hole_size(start, end) < size) {
659 end += FAKE_NODE_MIN_SIZE;
667 * If there won't be at least FAKE_NODE_MIN_SIZE of
668 * non-reserved memory in ZONE_DMA32 for the next node,
669 * this one must extend to the boundary.
671 if (end < dma32_end && dma32_end - end -
672 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
676 * If there won't be enough non-reserved memory for the
677 * next node, this one must extend to the end of the
681 memblock_x86_hole_size(end, limit) < size)
684 ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
686 min(end, limit) - start);
695 * Returns the end address of a node so that there is at least `size' amount of
696 * non-reserved memory or `max_addr' is reached.
698 static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
700 u64 end = start + size;
702 while (end - start - memblock_x86_hole_size(start, end) < size) {
703 end += FAKE_NODE_MIN_SIZE;
704 if (end > max_addr) {
713 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
714 * `addr' to `max_addr'. The return value is the number of nodes allocated.
716 static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
717 struct numa_meminfo *pi,
718 u64 addr, u64 max_addr, u64 size)
720 nodemask_t physnode_mask = NODE_MASK_NONE;
728 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
729 * increased accordingly if the requested size is too small. This
730 * creates a uniform distribution of node sizes across the entire
731 * machine (but not necessarily over physical nodes).
733 min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
735 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
736 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
737 min_size = (min_size + FAKE_NODE_MIN_SIZE) &
738 FAKE_NODE_MIN_HASH_MASK;
739 if (size < min_size) {
740 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
741 size >> 20, min_size >> 20);
744 size &= FAKE_NODE_MIN_HASH_MASK;
746 for (i = 0; i < pi->nr_blks; i++)
747 node_set(pi->blk[i].nid, physnode_mask);
750 * Fill physical nodes with fake nodes of size until there is no memory
751 * left on any of them.
753 while (nodes_weight(physnode_mask)) {
754 for_each_node_mask(i, physnode_mask) {
755 u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
756 u64 start, limit, end;
759 phys_blk = emu_find_memblk_by_nid(i, pi);
761 node_clear(i, physnode_mask);
764 start = pi->blk[phys_blk].start;
765 limit = pi->blk[phys_blk].end;
767 end = find_end_of_node(start, limit, size);
769 * If there won't be at least FAKE_NODE_MIN_SIZE of
770 * non-reserved memory in ZONE_DMA32 for the next node,
771 * this one must extend to the boundary.
773 if (end < dma32_end && dma32_end - end -
774 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
778 * If there won't be enough non-reserved memory for the
779 * next node, this one must extend to the end of the
783 memblock_x86_hole_size(end, limit) < size)
786 ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
788 min(end, limit) - start);
797 * Sets up the system RAM area from start_pfn to last_pfn according to the
798 * numa=fake command-line option.
800 static bool __init numa_emulation(void)
802 static struct numa_meminfo ei __initdata;
803 static struct numa_meminfo pi __initdata;
804 const u64 max_addr = max_pfn << PAGE_SHIFT;
805 int phys_dist_cnt = numa_distance_cnt;
806 u8 *phys_dist = NULL;
809 memset(&ei, 0, sizeof(ei));
812 for (i = 0; i < MAX_NUMNODES; i++)
813 emu_nid_to_phys[i] = NUMA_NO_NODE;
816 * If the numa=fake command-line contains a 'M' or 'G', it represents
817 * the fixed node size. Otherwise, if it is just a single number N,
818 * split the system RAM into N fake nodes.
820 if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
823 size = memparse(emu_cmdline, &emu_cmdline);
824 ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size);
828 n = simple_strtoul(emu_cmdline, NULL, 0);
829 ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n);
835 if (numa_cleanup_meminfo(&ei) < 0) {
836 pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n");
841 * Copy the original distance table. It's temporary so no need to
845 size_t size = phys_dist_cnt * sizeof(numa_distance[0]);
848 phys = memblock_find_in_range(0,
849 (u64)max_pfn_mapped << PAGE_SHIFT,
851 if (phys == MEMBLOCK_ERROR) {
852 pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
855 phys_dist = __va(phys);
856 memcpy(phys_dist, numa_distance, size);
863 * Transform __apicid_to_node table to use emulated nids by
864 * reverse-mapping phys_nid. The maps should always exist but fall
865 * back to zero just in case.
867 for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
868 if (__apicid_to_node[i] == NUMA_NO_NODE)
870 for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
871 if (__apicid_to_node[i] == emu_nid_to_phys[j])
873 __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0;
876 /* make sure all emulated nodes are mapped to a physical node */
877 for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
878 if (emu_nid_to_phys[i] == NUMA_NO_NODE)
879 emu_nid_to_phys[i] = 0;
881 /* transform distance table */
882 numa_reset_distance();
883 for (i = 0; i < MAX_NUMNODES; i++) {
884 for (j = 0; j < MAX_NUMNODES; j++) {
885 int physi = emu_nid_to_phys[i];
886 int physj = emu_nid_to_phys[j];
889 if (physi >= phys_dist_cnt || physj >= phys_dist_cnt)
890 dist = physi == physj ?
891 LOCAL_DISTANCE : REMOTE_DISTANCE;
893 dist = phys_dist[physi * phys_dist_cnt + physj];
895 numa_set_distance(i, j, dist);
900 #endif /* CONFIG_NUMA_EMU */
902 static int dummy_numa_init(void)
904 printk(KERN_INFO "%s\n",
905 numa_off ? "NUMA turned off" : "No NUMA configuration found");
906 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
907 0LU, max_pfn << PAGE_SHIFT);
909 node_set(0, numa_nodes_parsed);
910 numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT);
915 void __init initmem_init(void)
917 int (*numa_init[])(void) = { [2] = dummy_numa_init };
921 #ifdef CONFIG_ACPI_NUMA
922 numa_init[0] = x86_acpi_numa_init;
924 #ifdef CONFIG_AMD_NUMA
925 numa_init[1] = amd_numa_init;
929 for (i = 0; i < ARRAY_SIZE(numa_init); i++) {
933 for (j = 0; j < MAX_LOCAL_APIC; j++)
934 set_apicid_to_node(j, NUMA_NO_NODE);
936 nodes_clear(numa_nodes_parsed);
937 nodes_clear(node_possible_map);
938 nodes_clear(node_online_map);
939 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
940 remove_all_active_ranges();
941 numa_reset_distance();
943 if (numa_init[i]() < 0)
946 if (numa_cleanup_meminfo(&numa_meminfo) < 0)
948 #ifdef CONFIG_NUMA_EMU
950 * If requested, try emulation. If emulation is not used,
951 * build identity emu_nid_to_phys[] for numa_add_cpu()
953 if (!emu_cmdline || !numa_emulation())
954 for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
955 emu_nid_to_phys[j] = j;
957 if (numa_register_memblks(&numa_meminfo) < 0)
960 for (j = 0; j < nr_cpu_ids; j++) {
961 int nid = early_cpu_to_node(j);
963 if (nid == NUMA_NO_NODE)
965 if (!node_online(nid))
974 unsigned long __init numa_free_all_bootmem(void)
976 unsigned long pages = 0;
979 for_each_online_node(i)
980 pages += free_all_bootmem_node(NODE_DATA(i));
982 pages += free_all_memory_core_early(MAX_NUMNODES);
987 int __cpuinit numa_cpu_node(int cpu)
989 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
991 if (apicid != BAD_APICID)
992 return __apicid_to_node[apicid];
997 * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
998 * of 64bit specific data structures. The distinction is artificial and
999 * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
1000 * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
1003 * NUMA emulation is planned to be made generic and the following and other
1004 * related code should be moved to numa.c.
1006 #ifdef CONFIG_NUMA_EMU
1007 # ifndef CONFIG_DEBUG_PER_CPU_MAPS
1008 void __cpuinit numa_add_cpu(int cpu)
1012 nid = numa_cpu_node(cpu);
1013 if (nid == NUMA_NO_NODE)
1014 nid = early_cpu_to_node(cpu);
1015 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
1017 physnid = emu_nid_to_phys[nid];
1020 * Map the cpu to each emulated node that is allocated on the physical
1021 * node of the cpu's apic id.
1023 for_each_online_node(nid)
1024 if (emu_nid_to_phys[nid] == physnid)
1025 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
1028 void __cpuinit numa_remove_cpu(int cpu)
1032 for_each_online_node(i)
1033 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
1035 # else /* !CONFIG_DEBUG_PER_CPU_MAPS */
1036 static void __cpuinit numa_set_cpumask(int cpu, int enable)
1038 struct cpumask *mask;
1039 int nid, physnid, i;
1041 nid = early_cpu_to_node(cpu);
1042 if (nid == NUMA_NO_NODE) {
1043 /* early_cpu_to_node() already emits a warning and trace */
1047 physnid = emu_nid_to_phys[nid];
1049 for_each_online_node(i) {
1050 if (emu_nid_to_phys[nid] != physnid)
1053 mask = debug_cpumask_set_cpu(cpu, enable);
1058 cpumask_set_cpu(cpu, mask);
1060 cpumask_clear_cpu(cpu, mask);
1064 void __cpuinit numa_add_cpu(int cpu)
1066 numa_set_cpumask(cpu, 1);
1069 void __cpuinit numa_remove_cpu(int cpu)
1071 numa_set_cpumask(cpu, 0);
1073 # endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
1074 #endif /* CONFIG_NUMA_EMU */