#include <linux/smp.h>
#include <linux/module.h>
#include <linux/percpu.h>
+#include <linux/bootmem.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include "cpu.h"
-DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
-EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
+DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
+EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
-extern void mcheck_init(struct cpuinfo_x86 *c);
-
extern int disable_pse;
static void default_init(struct cpuinfo_x86 * c)
static struct cpu_dev default_cpu = {
.c_init = default_init,
+ .c_vendor = "Unknown",
};
static struct cpu_dev * this_cpu = &default_cpu;
{
char *v = c->x86_vendor_id;
int i;
+ static int printed;
for (i = 0; i < X86_VENDOR_NUM; i++) {
if (cpu_devs[i]) {
c->x86_vendor = i;
if (!early)
this_cpu = cpu_devs[i];
- break;
+ return;
}
}
}
+ if (!printed) {
+ printed++;
+ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+ printk(KERN_ERR "CPU: Your system may be unstable.\n");
+ }
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ this_cpu = &default_cpu;
}
/* Do minimum CPU detection early.
Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
- The others are not touched to avoid unwanted side effects. */
+ The others are not touched to avoid unwanted side effects.
+
+ WARNING: this function is only called on the BP. Don't add code here
+ that is supposed to run on all CPUs. */
static void __init early_cpu_detect(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf) {
+ if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
- }
c->x86_mask = tfms & 15;
if (cap0 & (1<<19))
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
}
-
- early_intel_workaround(c);
-
-#ifdef CONFIG_X86_HT
- phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
-#endif
}
void __devinit generic_identify(struct cpuinfo_x86 * c)
c->x86_capability[4] = excap;
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf) {
+ if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
- }
c->x86_mask = tfms & 15;
} else {
/* Have CPUID level 0 only - unheard of */
get_model_name(c); /* Default name */
}
}
+
+ early_intel_workaround(c);
+
+#ifdef CONFIG_X86_HT
+ phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
+#endif
}
static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
- c->x86_num_cores = 1;
+ c->x86_max_cores = 1;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (!have_cpuid_p()) {
}
/* Init Machine Check Exception if available. */
-#ifdef CONFIG_X86_MCE
mcheck_init(c);
-#endif
+
if (c == &boot_cpu_data)
sysenter_setup();
enable_sep_cpu();
void __devinit detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
- int index_msb, tmp;
+ int index_msb, core_bits;
int cpu = smp_processor_id();
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+
+ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
+
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
- cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1 ) {
- index_msb = 31;
if (smp_num_siblings > NR_CPUS) {
printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
smp_num_siblings = 1;
return;
}
- tmp = smp_num_siblings;
- while ((tmp & 0x80000000 ) == 0) {
- tmp <<=1 ;
- index_msb--;
- }
- if (smp_num_siblings & (smp_num_siblings - 1))
- index_msb++;
+
+ index_msb = get_count_order(smp_num_siblings);
phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
- smp_num_siblings = smp_num_siblings / c->x86_num_cores;
+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
- tmp = smp_num_siblings;
- index_msb = 31;
- while ((tmp & 0x80000000) == 0) {
- tmp <<=1 ;
- index_msb--;
- }
+ index_msb = get_count_order(smp_num_siblings) ;
- if (smp_num_siblings & (smp_num_siblings - 1))
- index_msb++;
+ core_bits = get_count_order(c->x86_max_cores);
- cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+ cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
+ ((1 << core_bits) - 1);
- if (c->x86_num_cores > 1)
+ if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
cpu_core_id[cpu]);
}
int cpu = smp_processor_id();
struct tss_struct * t = &per_cpu(init_tss, cpu);
struct thread_struct *thread = ¤t->thread;
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ struct desc_struct *gdt;
__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
if (cpu_test_and_set(cpu, cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
set_in_cr4(X86_CR4_TSD);
}
+ /*
+ * This is a horrible hack to allocate the GDT. The problem
+ * is that cpu_init() is called really early for the boot CPU
+ * (and hence needs bootmem) but much later for the secondary
+ * CPUs, when bootmem will have gone away
+ */
+ if (NODE_DATA(0)->bdata->node_bootmem_map) {
+ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
+ /* alloc_bootmem_pages panics on failure, so no check */
+ memset(gdt, 0, PAGE_SIZE);
+ } else {
+ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
+ if (unlikely(!gdt)) {
+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
+ for (;;)
+ local_irq_enable();
+ }
+ }
+
/*
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
(CPU_16BIT_STACK_SIZE - 1);
- cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
- cpu_gdt_descr[cpu].address = (unsigned long)gdt;
+ cpu_gdt_descr->size = GDT_SIZE - 1;
+ cpu_gdt_descr->address = (unsigned long)gdt;
- load_gdt(&cpu_gdt_descr[cpu]);
+ load_gdt(cpu_gdt_descr);
load_idt(&idt_descr);
- /*
- * Delete NT
- */
- __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
-
/*
* Set up and load the per-CPU TSS and LDT
*/
load_TR_desc();
load_LDT(&init_mm.context);
+#ifdef CONFIG_DOUBLEFAULT
/* Set up doublefault TSS pointer in the GDT */
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
+#endif
/* Clear %fs and %gs. */
asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");