1 #include <linux/init.h>
2 #include <linux/string.h>
3 #include <linux/delay.h>
5 #include <linux/module.h>
6 #include <linux/percpu.h>
7 #include <linux/bootmem.h>
8 #include <asm/processor.h>
12 #include <asm/mmu_context.h>
17 #ifdef CONFIG_X86_LOCAL_APIC
18 #include <asm/mpspec.h>
20 #include <mach_apic.h>
25 /* We need valid kernel segments for data and code in long mode too
26 * IRET will check the segment types kkeil 2000/10/28
27 * Also sysret mandates a special GDT layout
29 /* The TLS descriptors are currently at a different place compared to i386.
30 Hopefully nobody expects them at a fixed place (Wine?) */
31 DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
32 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
33 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
34 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
35 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
36 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
37 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
39 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
41 __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
43 /* Current gdt points %fs at the "master" per-cpu area: after this,
44 * it's on the real one. */
45 void switch_to_new_gdt(void)
47 struct desc_ptr gdt_descr;
49 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
50 gdt_descr.size = GDT_SIZE - 1;
54 struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
56 static void __cpuinit default_init(struct cpuinfo_x86 *c)
61 static struct cpu_dev __cpuinitdata default_cpu = {
62 .c_init = default_init,
63 .c_vendor = "Unknown",
65 static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
67 int __cpuinit get_model_name(struct cpuinfo_x86 *c)
71 if (c->extended_cpuid_level < 0x80000004)
74 v = (unsigned int *) c->x86_model_id;
75 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
76 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
77 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
78 c->x86_model_id[48] = 0;
83 void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
85 unsigned int n, dummy, eax, ebx, ecx, edx;
87 n = c->extended_cpuid_level;
89 if (n >= 0x80000005) {
90 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
91 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
92 "D cache %dK (%d bytes/line)\n",
93 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
94 c->x86_cache_size = (ecx>>24) + (edx>>24);
95 /* On K8 L1 TLB is inclusive, so don't count it */
99 if (n >= 0x80000006) {
100 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
101 ecx = cpuid_ecx(0x80000006);
102 c->x86_cache_size = ecx >> 16;
103 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
105 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
106 c->x86_cache_size, ecx & 0xFF);
108 if (n >= 0x80000008) {
109 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
110 c->x86_virt_bits = (eax >> 8) & 0xff;
111 c->x86_phys_bits = eax & 0xff;
115 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
118 u32 eax, ebx, ecx, edx;
119 int index_msb, core_bits;
121 cpuid(1, &eax, &ebx, &ecx, &edx);
124 if (!cpu_has(c, X86_FEATURE_HT))
126 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
129 smp_num_siblings = (ebx & 0xff0000) >> 16;
131 if (smp_num_siblings == 1) {
132 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
133 } else if (smp_num_siblings > 1) {
135 if (smp_num_siblings > NR_CPUS) {
136 printk(KERN_WARNING "CPU: Unsupported number of "
137 "siblings %d", smp_num_siblings);
138 smp_num_siblings = 1;
142 index_msb = get_count_order(smp_num_siblings);
143 c->phys_proc_id = phys_pkg_id(index_msb);
145 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
147 index_msb = get_count_order(smp_num_siblings);
149 core_bits = get_count_order(c->x86_max_cores);
151 c->cpu_core_id = phys_pkg_id(index_msb) &
152 ((1 << core_bits) - 1);
155 if ((c->x86_max_cores * smp_num_siblings) > 1) {
156 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
158 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
165 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
167 char *v = c->x86_vendor_id;
171 for (i = 0; i < X86_VENDOR_NUM; i++) {
173 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
174 (cpu_devs[i]->c_ident[1] &&
175 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
177 this_cpu = cpu_devs[i];
184 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
185 printk(KERN_ERR "CPU: Your system may be unstable.\n");
187 c->x86_vendor = X86_VENDOR_UNKNOWN;
190 static void __init early_cpu_support_print(void)
193 struct cpu_dev *cpu_devx;
195 printk("KERNEL supported cpus:\n");
196 for (i = 0; i < X86_VENDOR_NUM; i++) {
197 cpu_devx = cpu_devs[i];
200 for (j = 0; j < 2; j++) {
201 if (!cpu_devx->c_ident[j])
203 printk(" %s %s\n", cpu_devx->c_vendor,
204 cpu_devx->c_ident[j]);
209 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
211 void __init early_cpu_init(void)
213 struct cpu_vendor_dev *cvdev;
215 for (cvdev = __x86cpuvendor_start ;
216 cvdev < __x86cpuvendor_end ;
218 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
219 early_cpu_support_print();
220 early_identify_cpu(&boot_cpu_data);
223 /* Do some early cpuid on the boot CPU to get some parameter that are
224 needed before check_bugs. Everything advanced is in identify_cpu
226 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
230 c->loops_per_jiffy = loops_per_jiffy;
231 c->x86_cache_size = -1;
232 c->x86_vendor = X86_VENDOR_UNKNOWN;
233 c->x86_model = c->x86_mask = 0; /* So far unknown... */
234 c->x86_vendor_id[0] = '\0'; /* Unset */
235 c->x86_model_id[0] = '\0'; /* Unset */
236 c->x86_clflush_size = 64;
237 c->x86_cache_alignment = c->x86_clflush_size;
238 c->x86_max_cores = 1;
239 c->x86_coreid_bits = 0;
240 c->extended_cpuid_level = 0;
241 memset(&c->x86_capability, 0, sizeof c->x86_capability);
243 /* Get vendor name */
244 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
245 (unsigned int *)&c->x86_vendor_id[0],
246 (unsigned int *)&c->x86_vendor_id[8],
247 (unsigned int *)&c->x86_vendor_id[4]);
251 /* Initialize the standard set of capabilities */
252 /* Note that the vendor-specific code below might override */
254 /* Intel-defined flags: level 0x00000001 */
255 if (c->cpuid_level >= 0x00000001) {
257 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
258 &c->x86_capability[0]);
259 c->x86 = (tfms >> 8) & 0xf;
260 c->x86_model = (tfms >> 4) & 0xf;
261 c->x86_mask = tfms & 0xf;
263 c->x86 += (tfms >> 20) & 0xff;
265 c->x86_model += ((tfms >> 16) & 0xF) << 4;
266 if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
267 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
269 /* Have CPUID level 0 only - unheard of */
273 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
275 c->phys_proc_id = c->initial_apicid;
277 /* AMD-defined flags: level 0x80000001 */
278 xlvl = cpuid_eax(0x80000000);
279 c->extended_cpuid_level = xlvl;
280 if ((xlvl & 0xffff0000) == 0x80000000) {
281 if (xlvl >= 0x80000001) {
282 c->x86_capability[1] = cpuid_edx(0x80000001);
283 c->x86_capability[6] = cpuid_ecx(0x80000001);
285 if (xlvl >= 0x80000004)
286 get_model_name(c); /* Default name */
289 /* Transmeta-defined flags: level 0x80860001 */
290 xlvl = cpuid_eax(0x80860000);
291 if ((xlvl & 0xffff0000) == 0x80860000) {
292 /* Don't set x86_cpuid_level here for now to not confuse. */
293 if (xlvl >= 0x80860001)
294 c->x86_capability[2] = cpuid_edx(0x80860001);
297 c->extended_cpuid_level = cpuid_eax(0x80000000);
298 if (c->extended_cpuid_level >= 0x80000007)
299 c->x86_power = cpuid_edx(0x80000007);
301 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
302 cpu_devs[c->x86_vendor]->c_early_init)
303 cpu_devs[c->x86_vendor]->c_early_init(c);
305 validate_pat_support(c);
307 /* early_param could clear that, but recall get it set again */
309 clear_cpu_cap(c, X86_FEATURE_APIC);
313 * This does the hard work of actually picking apart the CPU stuff...
315 static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
319 early_identify_cpu(c);
321 init_scattered_cpuid_features(c);
323 c->apicid = phys_pkg_id(0);
326 * Vendor-specific initialization. In this section we
327 * canonicalize the feature flags, meaning if there are
328 * features a certain CPU supports which CPUID doesn't
329 * tell us, CPUID claiming incorrect flags, or other bugs,
330 * we handle them here.
332 * At the end of this section, c->x86_capability better
333 * indicate the features this CPU genuinely supports!
335 if (this_cpu->c_init)
341 * On SMP, boot_cpu_data holds the common feature set between
342 * all CPUs; so make sure that we indicate which features are
343 * common between the CPUs. The first time this routine gets
344 * executed, c == &boot_cpu_data.
346 if (c != &boot_cpu_data) {
347 /* AND the already accumulated flags with these */
348 for (i = 0; i < NCAPINTS; i++)
349 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
352 /* Clear all flags overriden by options */
353 for (i = 0; i < NCAPINTS; i++)
354 c->x86_capability[i] &= ~cleared_cpu_caps[i];
356 #ifdef CONFIG_X86_MCE
359 select_idle_routine(c);
362 numa_add_cpu(smp_processor_id());
367 void __cpuinit identify_boot_cpu(void)
369 identify_cpu(&boot_cpu_data);
372 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
374 BUG_ON(c == &boot_cpu_data);
379 static __init int setup_noclflush(char *arg)
381 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
384 __setup("noclflush", setup_noclflush);
386 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
388 if (c->x86_model_id[0])
389 printk(KERN_CONT "%s", c->x86_model_id);
391 if (c->x86_mask || c->cpuid_level >= 0)
392 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
394 printk(KERN_CONT "\n");
397 static __init int setup_disablecpuid(char *arg)
400 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
401 setup_clear_cpu_cap(bit);
406 __setup("clearcpuid=", setup_disablecpuid);