Fix misreporting of #cores as #hyperthreads for Q9550
[pandora-kernel.git] / arch / x86 / kernel / cpu / intel_64.c
1 #include <linux/init.h>
2 #include <linux/smp.h>
3 #include <asm/processor.h>
4 #include <asm/ptrace.h>
5 #include <asm/topology.h>
6 #include <asm/numa_64.h>
7
8 #include "cpu.h"
9
10 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
11 {
12         /* Unmask CPUID levels if masked: */
13         if (c->x86 == 6 && c->x86_model >= 15) {
14                 u64 misc_enable;
15
16                 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
17
18                 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
19                         misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
20                         wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
21                         c->cpuid_level = cpuid_eax(0);
22                 }
23         }
24
25         if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
26             (c->x86 == 0x6 && c->x86_model >= 0x0e))
27                 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
28
29         set_cpu_cap(c, X86_FEATURE_SYSENTER32);
30 }
31
32 /*
33  * find out the number of processor cores on the die
34  */
35 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
36 {
37         unsigned int eax, t;
38
39         if (c->cpuid_level < 4)
40                 return 1;
41
42         cpuid_count(4, 0, &eax, &t, &t, &t);
43
44         if (eax & 0x1f)
45                 return ((eax >> 26) + 1);
46         else
47                 return 1;
48 }
49
50 static void __cpuinit srat_detect_node(void)
51 {
52 #ifdef CONFIG_NUMA
53         unsigned node;
54         int cpu = smp_processor_id();
55         int apicid = hard_smp_processor_id();
56
57         /* Don't do the funky fallback heuristics the AMD version employs
58            for now. */
59         node = apicid_to_node[apicid];
60         if (node == NUMA_NO_NODE || !node_online(node))
61                 node = first_node(node_online_map);
62         numa_set_node(cpu, node);
63
64         printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
65 #endif
66 }
67
68 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
69 {
70         init_intel_cacheinfo(c);
71         if (c->cpuid_level > 9) {
72                 unsigned eax = cpuid_eax(10);
73                 /* Check for version and the number of counters */
74                 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
75                         set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
76         }
77
78         if (cpu_has_ds) {
79                 unsigned int l1, l2;
80                 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
81                 if (!(l1 & (1<<11)))
82                         set_cpu_cap(c, X86_FEATURE_BTS);
83                 if (!(l1 & (1<<12)))
84                         set_cpu_cap(c, X86_FEATURE_PEBS);
85         }
86
87
88         if (cpu_has_bts)
89                 ds_init_intel(c);
90
91         if (c->x86 == 15)
92                 c->x86_cache_alignment = c->x86_clflush_size * 2;
93         if (c->x86 == 6)
94                 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
95         set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
96         c->x86_max_cores = intel_num_cpu_cores(c);
97
98         srat_detect_node();
99 }
100
101 static struct cpu_dev intel_cpu_dev __cpuinitdata = {
102         .c_vendor       = "Intel",
103         .c_ident        = { "GenuineIntel" },
104         .c_early_init   = early_init_intel,
105         .c_init         = init_intel,
106 };
107 cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
108