X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Fx86_64%2Fmm%2Fsrat.c;h=502fce65e96ae246986e4b767b0e10897733b71c;hb=f17a2686b11453680e9662ef8bdc8d948d0dce18;hp=15ae9fcd65a700c9f9060860f0fce56f7f6f3fcf;hpb=32ea89ecb25789b1b7db28146558587a42f3b372;p=pandora-kernel.git diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index 15ae9fcd65a7..502fce65e96a 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c @@ -30,38 +30,21 @@ static struct acpi_table_slit *acpi_slit; static nodemask_t nodes_parsed __initdata; -static nodemask_t nodes_found __initdata; static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode nodes_add[MAX_NUMNODES] __initdata; static int found_add_area __initdata; -int hotadd_percent __initdata = 10; -static u8 pxm2node[256] = { [0 ... 255] = 0xff }; +int hotadd_percent __initdata = 0; +#ifndef RESERVE_HOTADD +#define hotadd_percent 0 /* Ignore all settings */ +#endif /* Too small nodes confuse the VM badly. Usually they result from BIOS bugs. */ #define NODE_MIN_SIZE (4*1024*1024) -static int node_to_pxm(int n); - -int pxm_to_node(int pxm) -{ - if ((unsigned)pxm >= 256) - return -1; - /* Extend 0xff to (int)-1 */ - return (signed char)pxm2node[pxm]; -} - static __init int setup_node(int pxm) { - unsigned node = pxm2node[pxm]; - if (node == 0xff) { - if (nodes_weight(nodes_found) >= MAX_NUMNODES) - return -1; - node = first_unset_node(nodes_found); - node_set(node, nodes_found); - pxm2node[pxm] = node; - } - return pxm2node[pxm]; + return acpi_map_pxm_to_node(pxm); } static __init int conflicting_nodes(unsigned long start, unsigned long end) @@ -103,6 +86,7 @@ static __init void bad_srat(void) int i; printk(KERN_ERR "SRAT: SRAT not used.\n"); acpi_numa = -1; + found_add_area = 0; for (i = 0; i < MAX_LOCAL_APIC; i++) apicid_to_node[i] = NUMA_NO_NODE; for (i = 0; i < MAX_NUMNODES; i++) @@ -154,7 +138,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) int pxm, node; if (srat_disabled()) return; - if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat(); + if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { + bad_srat(); return; } if (pa->flags.enabled == 0) @@ -191,15 +176,17 @@ static int hotadd_enough_memory(struct bootnode *nd) allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; allowed = (allowed / 100) * hotadd_percent; if (allocated + mem > allowed) { + unsigned long range; /* Give them at least part of their hotadd memory upto hotadd_percent It would be better to spread the limit out over multiple hotplug areas, but that is too complicated right now */ if (allocated >= allowed) return 0; - pages = (allowed - allocated + mem) / sizeof(struct page); + range = allowed - allocated; + pages = (range / PAGE_SIZE); mem = pages * sizeof(struct page); - nd->end = nd->start + pages*PAGE_SIZE; + nd->end = nd->start + range; } /* Not completely fool proof, but a good sanity check */ addr = find_e820_area(last_area_end, end_pfn<