Merge branch 'x86/generalize-visws' into x86/core
[pandora-kernel.git] / arch / x86 / kernel / cpu / intel_64.c
1 #include <linux/init.h>
2 #include <linux/smp.h>
3 #include <asm/processor.h>
4 #include <asm/ptrace.h>
5 #include <asm/topology.h>
6 #include <asm/numa_64.h>
7
8 #include "cpu.h"
9
10 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
11 {
12         if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
13             (c->x86 == 0x6 && c->x86_model >= 0x0e))
14                 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
15
16         set_cpu_cap(c, X86_FEATURE_SYSENTER32);
17 }
18
19 /*
20  * find out the number of processor cores on the die
21  */
22 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
23 {
24         unsigned int eax, t;
25
26         if (c->cpuid_level < 4)
27                 return 1;
28
29         cpuid_count(4, 0, &eax, &t, &t, &t);
30
31         if (eax & 0x1f)
32                 return ((eax >> 26) + 1);
33         else
34                 return 1;
35 }
36
37 static void __cpuinit srat_detect_node(void)
38 {
39 #ifdef CONFIG_NUMA
40         unsigned node;
41         int cpu = smp_processor_id();
42         int apicid = hard_smp_processor_id();
43
44         /* Don't do the funky fallback heuristics the AMD version employs
45            for now. */
46         node = apicid_to_node[apicid];
47         if (node == NUMA_NO_NODE || !node_online(node))
48                 node = first_node(node_online_map);
49         numa_set_node(cpu, node);
50
51         printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
52 #endif
53 }
54
55 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
56 {
57         /* Cache sizes */
58         unsigned n;
59
60         init_intel_cacheinfo(c);
61         if (c->cpuid_level > 9) {
62                 unsigned eax = cpuid_eax(10);
63                 /* Check for version and the number of counters */
64                 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
65                         set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
66         }
67
68         if (cpu_has_ds) {
69                 unsigned int l1, l2;
70                 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
71                 if (!(l1 & (1<<11)))
72                         set_cpu_cap(c, X86_FEATURE_BTS);
73                 if (!(l1 & (1<<12)))
74                         set_cpu_cap(c, X86_FEATURE_PEBS);
75         }
76
77
78         if (cpu_has_bts)
79                 ds_init_intel(c);
80
81         n = c->extended_cpuid_level;
82         if (n >= 0x80000008) {
83                 unsigned eax = cpuid_eax(0x80000008);
84                 c->x86_virt_bits = (eax >> 8) & 0xff;
85                 c->x86_phys_bits = eax & 0xff;
86         }
87
88         if (c->x86 == 15)
89                 c->x86_cache_alignment = c->x86_clflush_size * 2;
90         if (c->x86 == 6)
91                 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
92         set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
93         c->x86_max_cores = intel_num_cpu_cores(c);
94
95         srat_detect_node();
96 }
97
98 static struct cpu_dev intel_cpu_dev __cpuinitdata = {
99         .c_vendor       = "Intel",
100         .c_ident        = { "GenuineIntel" },
101         .c_early_init   = early_init_intel,
102         .c_init         = init_intel,
103 };
104 cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
105