Merge branch 'upstream'
[pandora-kernel.git] / arch / x86_64 / kernel / setup.c
index da0bc3e..28895c0 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/processor.h>
 #include <linux/console.h>
 #include <linux/seq_file.h>
+#include <linux/crash_dump.h>
 #include <linux/root_dev.h>
 #include <linux/pci.h>
 #include <linux/acpi.h>
@@ -42,6 +43,9 @@
 #include <linux/edd.h>
 #include <linux/mmzone.h>
 #include <linux/kexec.h>
+#include <linux/cpufreq.h>
+#include <linux/dmi.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/mtrr.h>
 #include <asm/uaccess.h>
@@ -60,6 +64,9 @@
 #include <asm/setup.h>
 #include <asm/mach_apic.h>
 #include <asm/numa.h>
+#include <asm/swiotlb.h>
+#include <asm/sections.h>
+#include <asm/gart-mapping.h>
 
 /*
  * Machine setup..
@@ -84,11 +91,6 @@ int bootloader_type;
 
 unsigned long saved_video_mode;
 
-#ifdef CONFIG_SWIOTLB
-int swiotlb;
-EXPORT_SYMBOL(swiotlb);
-#endif
-
 /*
  * Setup options
  */
@@ -103,7 +105,6 @@ struct edid_info edid_info;
 struct e820map e820;
 
 extern int root_mountflags;
-extern char _text, _etext, _edata, _end;
 
 char command_line[COMMAND_LINE_SIZE];
 
@@ -274,10 +275,7 @@ static __init void parse_cmdline_early (char ** cmdline_p)
 {
        char c = ' ', *to = command_line, *from = COMMAND_LINE;
        int len = 0;
-
-       /* Save unparsed command line copy for /proc/cmdline */
-       memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
-       saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+       int userdef = 0;
 
        for (;;) {
                if (c != ' ') 
@@ -345,10 +343,14 @@ static __init void parse_cmdline_early (char ** cmdline_p)
                    !memcmp(from, "disableapic", 11))
                        disable_apic = 1;
 
-               if (!memcmp(from, "noapic", 6)) 
+               /* Don't confuse with noapictimer */
+               if (!memcmp(from, "noapic", 6) &&
+                       (from[6] == ' ' || from[6] == 0))
                        skip_ioapic_setup = 1;
 
-               if (!memcmp(from, "apic", 4)) { 
+               /* Make sure to not confuse with apic= */
+               if (!memcmp(from, "apic", 4) &&
+                       (from[4] == ' ' || from[4] == 0)) {
                        skip_ioapic_setup = 0;
                        ioapic_force = 1;
                }
@@ -356,16 +358,36 @@ static __init void parse_cmdline_early (char ** cmdline_p)
                if (!memcmp(from, "mem=", 4))
                        parse_memopt(from+4, &from); 
 
+               if (!memcmp(from, "memmap=", 7)) {
+                       /* exactmap option is for used defined memory */
+                       if (!memcmp(from+7, "exactmap", 8)) {
+#ifdef CONFIG_CRASH_DUMP
+                               /* If we are doing a crash dump, we
+                                * still need to know the real mem
+                                * size before original memory map is
+                                * reset.
+                                */
+                               saved_max_pfn = e820_end_of_ram();
+#endif
+                               from += 8+7;
+                               end_pfn_map = 0;
+                               e820.nr_map = 0;
+                               userdef = 1;
+                       }
+                       else {
+                               parse_memmapopt(from+7, &from);
+                               userdef = 1;
+                       }
+               }
+
 #ifdef CONFIG_NUMA
                if (!memcmp(from, "numa=", 5))
                        numa_setup(from+5); 
 #endif
 
-#ifdef CONFIG_GART_IOMMU 
                if (!memcmp(from,"iommu=",6)) { 
                        iommu_setup(from+6); 
                }
-#endif
 
                if (!memcmp(from,"oops=panic", 10))
                        panic_on_oops = 1;
@@ -394,6 +416,14 @@ static __init void parse_cmdline_early (char ** cmdline_p)
                }
 #endif
 
+#ifdef CONFIG_PROC_VMCORE
+               /* elfcorehdr= specifies the location of elf core header
+                * stored by the crashed kernel. This option will be passed
+                * by kexec loader to the capture kernel.
+                */
+               else if(!memcmp(from, "elfcorehdr=", 11))
+                       elfcorehdr_addr = memparse(from+11, &from);
+#endif
        next_char:
                c = *(from++);
                if (!c)
@@ -402,6 +432,10 @@ static __init void parse_cmdline_early (char ** cmdline_p)
                        break;
                *(to++) = c;
        }
+       if (userdef) {
+               printk(KERN_INFO "user-defined physical RAM map:\n");
+               e820_print_map("user");
+       }
        *to = '\0';
        *cmdline_p = command_line;
 }
@@ -412,7 +446,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long bootmap_size, bootmap;
 
-       memory_present(0, start_pfn, end_pfn);
        bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
        bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
        if (bootmap == -1L)
@@ -443,6 +476,8 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
      k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
 }; 
 
+extern char __vsyscall_0;
+
 /* Replace instructions with better alternatives for this CPU type.
 
    This runs before SMP is initialized to avoid SMP problems with
@@ -454,11 +489,17 @@ void apply_alternatives(void *start, void *end)
        struct alt_instr *a; 
        int diff, i, k;
        for (a = start; (void *)a < end; a++) { 
+               u8 *instr;
+
                if (!boot_cpu_has(a->cpuid))
                        continue;
 
                BUG_ON(a->replacementlen > a->instrlen); 
-               __inline_memcpy(a->instr, a->replacement, a->replacementlen); 
+               instr = a->instr;
+               /* vsyscall code is not mapped yet. resolve it manually. */
+               if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
+                       instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
+               __inline_memcpy(instr, a->replacement, a->replacementlen);
                diff = a->instrlen - a->replacementlen; 
 
                /* Pad the rest with nops */
@@ -466,7 +507,7 @@ void apply_alternatives(void *start, void *end)
                        k = diff;
                        if (k > ASM_NOP_MAX)
                                k = ASM_NOP_MAX;
-                       __inline_memcpy(a->instr + i, k8_nops[k], k); 
+                       __inline_memcpy(instr + i, k8_nops[k], k);
                } 
        }
 } 
@@ -571,6 +612,8 @@ void __init setup_arch(char **cmdline_p)
 
        init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
 
+       zap_low_mappings(0);
+
 #ifdef CONFIG_ACPI
        /*
         * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
@@ -657,8 +700,6 @@ void __init setup_arch(char **cmdline_p)
        }
 #endif
 
-       sparse_init();
-
        paging_init();
 
        check_ioapic();
@@ -670,6 +711,8 @@ void __init setup_arch(char **cmdline_p)
        acpi_boot_init();
 #endif
 
+       init_cpu_to_node();
+
 #ifdef CONFIG_X86_LOCAL_APIC
        /*
         * get boot-time SMP configuration:
@@ -793,7 +836,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
 #endif
 
        bits = 0;
-       while ((1 << bits) < c->x86_num_cores)
+       while ((1 << bits) < c->x86_max_cores)
                bits++;
 
        /* Low order bits define the core id (index of core in socket) */
@@ -823,10 +866,10 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
                if (!node_online(node))
                        node = nearby_node(apicid);
        }
-       cpu_to_node[cpu] = node;
+       numa_set_node(cpu, node);
 
        printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
-                       cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
+                       cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
 #endif
 #endif
 }
@@ -834,7 +877,6 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
 static int __init init_amd(struct cpuinfo_x86 *c)
 {
        int r;
-       int level;
 
 #ifdef CONFIG_SMP
        unsigned long value;
@@ -857,11 +899,6 @@ static int __init init_amd(struct cpuinfo_x86 *c)
           3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
        clear_bit(0*32+31, &c->x86_capability);
        
-       /* C-stepping K8? */
-       level = cpuid_eax(1);
-       if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
-               set_bit(X86_FEATURE_K8_C, &c->x86_capability);
-
        r = get_model_name(c);
        if (!r) { 
                switch (c->x86) { 
@@ -874,10 +911,14 @@ static int __init init_amd(struct cpuinfo_x86 *c)
        } 
        display_cacheinfo(c);
 
+       /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
+       if (c->x86_power & (1<<8))
+               set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+
        if (c->extended_cpuid_level >= 0x80000008) {
-               c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
-               if (c->x86_num_cores & (c->x86_num_cores - 1))
-                       c->x86_num_cores = 1;
+               c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+               if (c->x86_max_cores & (c->x86_max_cores - 1))
+                       c->x86_max_cores = 1;
 
                amd_detect_cmp(c);
        }
@@ -889,54 +930,44 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
        u32     eax, ebx, ecx, edx;
-       int     index_msb, tmp;
+       int     index_msb, core_bits;
        int     cpu = smp_processor_id();
-       
+
+       cpuid(1, &eax, &ebx, &ecx, &edx);
+
+       c->apicid = phys_pkg_id(0);
+
        if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
                return;
 
-       cpuid(1, &eax, &ebx, &ecx, &edx);
        smp_num_siblings = (ebx & 0xff0000) >> 16;
-       
+
        if (smp_num_siblings == 1) {
                printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
-       } else if (smp_num_siblings > 1) {
-               index_msb = 31;
-               /*
-                * At this point we only support two siblings per
-                * processor package.
-                */
+       } else if (smp_num_siblings > 1 ) {
+
                if (smp_num_siblings > NR_CPUS) {
                        printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
                        smp_num_siblings = 1;
                        return;
                }
-               tmp = smp_num_siblings;
-               while ((tmp & 0x80000000 ) == 0) {
-                       tmp <<=1 ;
-                       index_msb--;
-               }
-               if (smp_num_siblings & (smp_num_siblings - 1))
-                       index_msb++;
+
+               index_msb = get_count_order(smp_num_siblings);
                phys_proc_id[cpu] = phys_pkg_id(index_msb);
-               
+
                printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
                       phys_proc_id[cpu]);
 
-               smp_num_siblings = smp_num_siblings / c->x86_num_cores;
+               smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 
-               tmp = smp_num_siblings;
-               index_msb = 31;
-               while ((tmp & 0x80000000) == 0) {
-                       tmp <<=1 ;
-                       index_msb--;
-               }
-               if (smp_num_siblings & (smp_num_siblings - 1))
-                       index_msb++;
+               index_msb = get_count_order(smp_num_siblings) ;
+
+               core_bits = get_count_order(c->x86_max_cores);
 
-               cpu_core_id[cpu] = phys_pkg_id(index_msb);
+               cpu_core_id[cpu] = phys_pkg_id(index_msb) &
+                                              ((1 << core_bits) - 1);
 
-               if (c->x86_num_cores > 1)
+               if (c->x86_max_cores > 1)
                        printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
                               cpu_core_id[cpu]);
        }
@@ -975,7 +1006,7 @@ static void srat_detect_node(void)
        node = apicid_to_node[hard_smp_processor_id()];
        if (node == NUMA_NO_NODE)
                node = 0;
-       cpu_to_node[cpu] = node;
+       numa_set_node(cpu, node);
 
        if (acpi_numa > 0)
                printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
@@ -993,13 +1024,20 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
                unsigned eax = cpuid_eax(0x80000008);
                c->x86_virt_bits = (eax >> 8) & 0xff;
                c->x86_phys_bits = eax & 0xff;
+               /* CPUID workaround for Intel 0F34 CPU */
+               if (c->x86_vendor == X86_VENDOR_INTEL &&
+                   c->x86 == 0xF && c->x86_model == 0x3 &&
+                   c->x86_mask == 0x4)
+                       c->x86_phys_bits = 36;
        }
 
        if (c->x86 == 15)
                c->x86_cache_alignment = c->x86_clflush_size * 2;
-       if (c->x86 >= 15)
+       if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+           (c->x86 == 0x6 && c->x86_model >= 0x0e))
                set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-       c->x86_num_cores = intel_num_cpu_cores(c);
+       set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+       c->x86_max_cores = intel_num_cpu_cores(c);
 
        srat_detect_node();
 }
@@ -1037,7 +1075,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
        c->x86_model_id[0] = '\0';  /* Unset */
        c->x86_clflush_size = 64;
        c->x86_cache_alignment = c->x86_clflush_size;
-       c->x86_num_cores = 1;
+       c->x86_max_cores = 1;
        c->extended_cpuid_level = 0;
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
 
@@ -1060,10 +1098,10 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
                c->x86 = (tfms >> 8) & 0xf;
                c->x86_model = (tfms >> 4) & 0xf;
                c->x86_mask = tfms & 0xf;
-               if (c->x86 == 0xf) {
+               if (c->x86 == 0xf)
                        c->x86 += (tfms >> 20) & 0xff;
+               if (c->x86 >= 0x6)
                        c->x86_model += ((tfms >> 16) & 0xF) << 4;
-               } 
                if (c->x86_capability[0] & (1<<19)) 
                        c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
        } else {
@@ -1197,7 +1235,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
                NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
                NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
-               NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
+               NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
 
                /* Transmeta-defined */
                "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -1225,7 +1263,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
 
                /* AMD-defined (#2) */
-               "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
+               "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -1236,7 +1274,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                "vid",  /* voltage id control */
                "ttp",  /* thermal trip */
                "tm",
-               "stc"
+               "stc",
+               NULL,
+               /* nothing */   /* constant_tsc - moved to flags */
        };
 
 
@@ -1262,8 +1302,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_printf(m, "stepping\t: unknown\n");
        
        if (cpu_has(c,X86_FEATURE_TSC)) {
+               unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
+               if (!freq)
+                       freq = cpu_khz;
                seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-                            cpu_khz / 1000, (cpu_khz % 1000));
+                            freq / 1000, (freq % 1000));
        }
 
        /* Cache size */
@@ -1271,13 +1314,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
        
 #ifdef CONFIG_SMP
-       if (smp_num_siblings * c->x86_num_cores > 1) {
+       if (smp_num_siblings * c->x86_max_cores > 1) {
                int cpu = c - cpu_data;
                seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
-               seq_printf(m, "siblings\t: %d\n",
-                               c->x86_num_cores * smp_num_siblings);
+               seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
                seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
-               seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
+               seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
        }
 #endif 
 
@@ -1314,8 +1356,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                unsigned i;
                for (i = 0; i < 32; i++) 
                        if (c->x86_power & (1 << i)) {
-                               if (i < ARRAY_SIZE(x86_power_flags))
-                                       seq_printf(m, " %s", x86_power_flags[i]);
+                               if (i < ARRAY_SIZE(x86_power_flags) &&
+                                       x86_power_flags[i])
+                                       seq_printf(m, "%s%s",
+                                               x86_power_flags[i][0]?" ":"",
+                                               x86_power_flags[i]);
                                else
                                        seq_printf(m, " [%d]", i);
                        }
@@ -1347,3 +1392,11 @@ struct seq_operations cpuinfo_op = {
        .stop = c_stop,
        .show = show_cpuinfo,
 };
+
+static int __init run_dmi_scan(void)
+{
+       dmi_scan_machine();
+       return 0;
+}
+core_initcall(run_dmi_scan);
+