2 * linux/arch/x86-64/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
11 * This file handles the architecture-dependent parts of initialization
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/slab.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/tty.h>
25 #include <linux/ioport.h>
26 #include <linux/delay.h>
27 #include <linux/config.h>
28 #include <linux/init.h>
29 #include <linux/initrd.h>
30 #include <linux/highmem.h>
31 #include <linux/bootmem.h>
32 #include <linux/module.h>
33 #include <asm/processor.h>
34 #include <linux/console.h>
35 #include <linux/seq_file.h>
36 #include <linux/crash_dump.h>
37 #include <linux/root_dev.h>
38 #include <linux/pci.h>
39 #include <linux/acpi.h>
40 #include <linux/kallsyms.h>
41 #include <linux/edd.h>
42 #include <linux/mmzone.h>
43 #include <linux/kexec.h>
44 #include <linux/cpufreq.h>
45 #include <linux/dmi.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/ctype.h>
50 #include <asm/uaccess.h>
51 #include <asm/system.h>
56 #include <video/edid.h>
59 #include <asm/mpspec.h>
60 #include <asm/mmu_context.h>
61 #include <asm/bootsetup.h>
62 #include <asm/proto.h>
63 #include <asm/setup.h>
64 #include <asm/mach_apic.h>
66 #include <asm/swiotlb.h>
67 #include <asm/sections.h>
68 #include <asm/gart-mapping.h>
75 struct cpuinfo_x86 boot_cpu_data __read_mostly;
77 unsigned long mmu_cr4_features;
80 EXPORT_SYMBOL(acpi_disabled);
82 extern int __initdata acpi_ht;
83 extern acpi_interrupt_flags acpi_sci_flags;
84 int __initdata acpi_force = 0;
87 int acpi_numa __initdata;
89 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
92 unsigned long saved_video_mode;
98 char dmi_alloc_data[DMI_MAX_DATA];
103 struct screen_info screen_info;
104 struct sys_desc_table_struct {
105 unsigned short length;
106 unsigned char table[0];
109 struct edid_info edid_info;
112 extern int root_mountflags;
114 char command_line[COMMAND_LINE_SIZE];
116 struct resource standard_io_resources[] = {
117 { .name = "dma1", .start = 0x00, .end = 0x1f,
118 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
119 { .name = "pic1", .start = 0x20, .end = 0x21,
120 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
121 { .name = "timer0", .start = 0x40, .end = 0x43,
122 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
123 { .name = "timer1", .start = 0x50, .end = 0x53,
124 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
125 { .name = "keyboard", .start = 0x60, .end = 0x6f,
126 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
127 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
128 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
129 { .name = "pic2", .start = 0xa0, .end = 0xa1,
130 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
131 { .name = "dma2", .start = 0xc0, .end = 0xdf,
132 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
133 { .name = "fpu", .start = 0xf0, .end = 0xff,
134 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
137 #define STANDARD_IO_RESOURCES \
138 (sizeof standard_io_resources / sizeof standard_io_resources[0])
140 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
142 struct resource data_resource = {
143 .name = "Kernel data",
146 .flags = IORESOURCE_RAM,
148 struct resource code_resource = {
149 .name = "Kernel code",
152 .flags = IORESOURCE_RAM,
155 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
157 static struct resource system_rom_resource = {
158 .name = "System ROM",
161 .flags = IORESOURCE_ROM,
164 static struct resource extension_rom_resource = {
165 .name = "Extension ROM",
168 .flags = IORESOURCE_ROM,
171 static struct resource adapter_rom_resources[] = {
172 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
173 .flags = IORESOURCE_ROM },
174 { .name = "Adapter ROM", .start = 0, .end = 0,
175 .flags = IORESOURCE_ROM },
176 { .name = "Adapter ROM", .start = 0, .end = 0,
177 .flags = IORESOURCE_ROM },
178 { .name = "Adapter ROM", .start = 0, .end = 0,
179 .flags = IORESOURCE_ROM },
180 { .name = "Adapter ROM", .start = 0, .end = 0,
181 .flags = IORESOURCE_ROM },
182 { .name = "Adapter ROM", .start = 0, .end = 0,
183 .flags = IORESOURCE_ROM }
186 #define ADAPTER_ROM_RESOURCES \
187 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
189 static struct resource video_rom_resource = {
193 .flags = IORESOURCE_ROM,
196 static struct resource video_ram_resource = {
197 .name = "Video RAM area",
200 .flags = IORESOURCE_RAM,
203 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
205 static int __init romchecksum(unsigned char *rom, unsigned long length)
207 unsigned char *p, sum = 0;
209 for (p = rom; p < rom + length; p++)
214 static void __init probe_roms(void)
216 unsigned long start, length, upper;
221 upper = adapter_rom_resources[0].start;
222 for (start = video_rom_resource.start; start < upper; start += 2048) {
223 rom = isa_bus_to_virt(start);
224 if (!romsignature(rom))
227 video_rom_resource.start = start;
229 /* 0 < length <= 0x7f * 512, historically */
230 length = rom[2] * 512;
232 /* if checksum okay, trust length byte */
233 if (length && romchecksum(rom, length))
234 video_rom_resource.end = start + length - 1;
236 request_resource(&iomem_resource, &video_rom_resource);
240 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
245 request_resource(&iomem_resource, &system_rom_resource);
246 upper = system_rom_resource.start;
248 /* check for extension rom (ignore length byte!) */
249 rom = isa_bus_to_virt(extension_rom_resource.start);
250 if (romsignature(rom)) {
251 length = extension_rom_resource.end - extension_rom_resource.start + 1;
252 if (romchecksum(rom, length)) {
253 request_resource(&iomem_resource, &extension_rom_resource);
254 upper = extension_rom_resource.start;
258 /* check for adapter roms on 2k boundaries */
259 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
260 rom = isa_bus_to_virt(start);
261 if (!romsignature(rom))
264 /* 0 < length <= 0x7f * 512, historically */
265 length = rom[2] * 512;
267 /* but accept any length that fits if checksum okay */
268 if (!length || start + length > upper || !romchecksum(rom, length))
271 adapter_rom_resources[i].start = start;
272 adapter_rom_resources[i].end = start + length - 1;
273 request_resource(&iomem_resource, &adapter_rom_resources[i]);
275 start = adapter_rom_resources[i++].end & ~2047UL;
279 /* Check for full argument with no trailing characters */
280 static int fullarg(char *p, char *arg)
283 return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
286 static __init void parse_cmdline_early (char ** cmdline_p)
288 char c = ' ', *to = command_line, *from = COMMAND_LINE;
298 * If the BIOS enumerates physical processors before logical,
299 * maxcpus=N at enumeration-time can be used to disable HT.
301 else if (!memcmp(from, "maxcpus=", 8)) {
302 extern unsigned int maxcpus;
304 maxcpus = simple_strtoul(from + 8, NULL, 0);
308 /* "acpi=off" disables both ACPI table parsing and interpreter init */
309 if (fullarg(from,"acpi=off"))
312 if (fullarg(from, "acpi=force")) {
313 /* add later when we do DMI horrors: */
318 /* acpi=ht just means: do ACPI MADT parsing
319 at bootup, but don't enable the full ACPI interpreter */
320 if (fullarg(from, "acpi=ht")) {
325 else if (fullarg(from, "pci=noacpi"))
327 else if (fullarg(from, "acpi=noirq"))
330 else if (fullarg(from, "acpi_sci=edge"))
331 acpi_sci_flags.trigger = 1;
332 else if (fullarg(from, "acpi_sci=level"))
333 acpi_sci_flags.trigger = 3;
334 else if (fullarg(from, "acpi_sci=high"))
335 acpi_sci_flags.polarity = 1;
336 else if (fullarg(from, "acpi_sci=low"))
337 acpi_sci_flags.polarity = 3;
339 /* acpi=strict disables out-of-spec workarounds */
340 else if (fullarg(from, "acpi=strict")) {
343 #ifdef CONFIG_X86_IO_APIC
344 else if (fullarg(from, "acpi_skip_timer_override"))
345 acpi_skip_timer_override = 1;
349 if (fullarg(from, "disable_timer_pin_1"))
350 disable_timer_pin_1 = 1;
351 if (fullarg(from, "enable_timer_pin_1"))
352 disable_timer_pin_1 = -1;
354 if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
355 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
359 if (fullarg(from, "noapic"))
360 skip_ioapic_setup = 1;
362 if (fullarg(from,"apic")) {
363 skip_ioapic_setup = 0;
367 if (!memcmp(from, "mem=", 4))
368 parse_memopt(from+4, &from);
370 if (!memcmp(from, "memmap=", 7)) {
371 /* exactmap option is for used defined memory */
372 if (!memcmp(from+7, "exactmap", 8)) {
373 #ifdef CONFIG_CRASH_DUMP
374 /* If we are doing a crash dump, we
375 * still need to know the real mem
376 * size before original memory map is
379 saved_max_pfn = e820_end_of_ram();
387 parse_memmapopt(from+7, &from);
393 if (!memcmp(from, "numa=", 5))
397 if (!memcmp(from,"iommu=",6)) {
401 if (fullarg(from,"oops=panic"))
404 if (!memcmp(from, "noexec=", 7))
405 nonx_setup(from + 7);
408 /* crashkernel=size@addr specifies the location to reserve for
409 * a crash kernel. By reserving this memory we guarantee
410 * that linux never set's it up as a DMA target.
411 * Useful for holding code to do something appropriate
412 * after a kernel panic.
414 else if (!memcmp(from, "crashkernel=", 12)) {
415 unsigned long size, base;
416 size = memparse(from+12, &from);
418 base = memparse(from+1, &from);
419 /* FIXME: Do I want a sanity check
420 * to validate the memory range?
422 crashk_res.start = base;
423 crashk_res.end = base + size - 1;
428 #ifdef CONFIG_PROC_VMCORE
429 /* elfcorehdr= specifies the location of elf core header
430 * stored by the crashed kernel. This option will be passed
431 * by kexec loader to the capture kernel.
433 else if(!memcmp(from, "elfcorehdr=", 11))
434 elfcorehdr_addr = memparse(from+11, &from);
437 #ifdef CONFIG_HOTPLUG_CPU
438 else if (!memcmp(from, "additional_cpus=", 16))
439 setup_additional_cpus(from+16);
446 if (COMMAND_LINE_SIZE <= ++len)
451 printk(KERN_INFO "user-defined physical RAM map:\n");
452 e820_print_map("user");
455 *cmdline_p = command_line;
460 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
462 unsigned long bootmap_size, bootmap;
464 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
465 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
467 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
468 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
469 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
470 reserve_bootmem(bootmap, bootmap_size);
474 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
476 #ifdef CONFIG_EDD_MODULE
480 * copy_edd() - Copy the BIOS EDD information
481 * from boot_params into a safe place.
484 static inline void copy_edd(void)
486 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
487 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
488 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
489 edd.edd_info_nr = EDD_NR;
492 static inline void copy_edd(void)
497 #define EBDA_ADDR_POINTER 0x40E
499 unsigned __initdata ebda_addr;
500 unsigned __initdata ebda_size;
502 static void discover_ebda(void)
505 * there is a real-mode segmented pointer pointing to the
506 * 4K EBDA area at 0x40E
508 ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
511 ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
513 /* Round EBDA up to pages */
517 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
518 if (ebda_size > 64*1024)
522 void __init setup_arch(char **cmdline_p)
524 unsigned long kernel_end;
526 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
527 screen_info = SCREEN_INFO;
528 edid_info = EDID_INFO;
529 saved_video_mode = SAVED_VIDEO_MODE;
530 bootloader_type = LOADER_TYPE;
532 #ifdef CONFIG_BLK_DEV_RAM
533 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
534 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
535 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
537 setup_memory_region();
540 if (!MOUNT_ROOT_RDONLY)
541 root_mountflags &= ~MS_RDONLY;
542 init_mm.start_code = (unsigned long) &_text;
543 init_mm.end_code = (unsigned long) &_etext;
544 init_mm.end_data = (unsigned long) &_edata;
545 init_mm.brk = (unsigned long) &_end;
547 code_resource.start = virt_to_phys(&_text);
548 code_resource.end = virt_to_phys(&_etext)-1;
549 data_resource.start = virt_to_phys(&_etext);
550 data_resource.end = virt_to_phys(&_edata)-1;
552 parse_cmdline_early(cmdline_p);
554 early_identify_cpu(&boot_cpu_data);
557 * partially used pages are not usable - thus
558 * we are rounding upwards:
560 end_pfn = e820_end_of_ram();
561 num_physpages = end_pfn; /* for pfn_valid */
567 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
575 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
576 * Call this early for SRAT node setup.
578 acpi_boot_table_init();
581 #ifdef CONFIG_ACPI_NUMA
583 * Parse SRAT to discover nodes.
589 numa_initmem_init(0, end_pfn);
591 contig_initmem_init(0, end_pfn);
594 /* Reserve direct mapping */
595 reserve_bootmem_generic(table_start << PAGE_SHIFT,
596 (table_end - table_start) << PAGE_SHIFT);
599 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
600 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
603 * reserve physical page 0 - it's a special BIOS page on many boxes,
604 * enabling clean reboots, SMP operation, laptop functions.
606 reserve_bootmem_generic(0, PAGE_SIZE);
608 /* reserve ebda region */
610 reserve_bootmem_generic(ebda_addr, ebda_size);
614 * But first pinch a few for the stack/trampoline stuff
615 * FIXME: Don't need the extra page at 4K, but need to fix
616 * trampoline before removing it. (see the GDT stuff)
618 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
620 /* Reserve SMP trampoline */
621 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
624 #ifdef CONFIG_ACPI_SLEEP
626 * Reserve low memory region for sleep support.
628 acpi_reserve_bootmem();
630 #ifdef CONFIG_X86_LOCAL_APIC
632 * Find and reserve possible boot-time SMP configuration:
636 #ifdef CONFIG_BLK_DEV_INITRD
637 if (LOADER_TYPE && INITRD_START) {
638 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
639 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
641 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
642 initrd_end = initrd_start+INITRD_SIZE;
645 printk(KERN_ERR "initrd extends beyond end of memory "
646 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
647 (unsigned long)(INITRD_START + INITRD_SIZE),
648 (unsigned long)(end_pfn << PAGE_SHIFT));
654 if (crashk_res.start != crashk_res.end) {
655 reserve_bootmem_generic(crashk_res.start,
656 crashk_res.end - crashk_res.start + 1);
665 * set this early, so we dont allocate cpu0
666 * if MADT list doesnt list BSP first
667 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
669 cpu_set(0, cpu_present_map);
672 * Read APIC and some other early information from ACPI tables.
679 #ifdef CONFIG_X86_LOCAL_APIC
681 * get boot-time SMP configuration:
683 if (smp_found_config)
685 init_apic_mappings();
689 * Request address space for all standard RAM and ROM resources
690 * and also for regions reported as reserved by the e820.
693 e820_reserve_resources();
695 request_resource(&iomem_resource, &video_ram_resource);
699 /* request I/O space for devices used on all i[345]86 PCs */
700 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
701 request_resource(&ioport_resource, &standard_io_resources[i]);
706 #ifdef CONFIG_GART_IOMMU
711 #if defined(CONFIG_VGA_CONSOLE)
712 conswitchp = &vga_con;
713 #elif defined(CONFIG_DUMMY_CONSOLE)
714 conswitchp = &dummy_con;
719 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
723 if (c->extended_cpuid_level < 0x80000004)
726 v = (unsigned int *) c->x86_model_id;
727 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
728 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
729 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
730 c->x86_model_id[48] = 0;
735 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
737 unsigned int n, dummy, eax, ebx, ecx, edx;
739 n = c->extended_cpuid_level;
741 if (n >= 0x80000005) {
742 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
743 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
744 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
745 c->x86_cache_size=(ecx>>24)+(edx>>24);
746 /* On K8 L1 TLB is inclusive, so don't count it */
750 if (n >= 0x80000006) {
751 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
752 ecx = cpuid_ecx(0x80000006);
753 c->x86_cache_size = ecx >> 16;
754 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
756 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
757 c->x86_cache_size, ecx & 0xFF);
761 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
762 if (n >= 0x80000008) {
763 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
764 c->x86_virt_bits = (eax >> 8) & 0xff;
765 c->x86_phys_bits = eax & 0xff;
770 static int nearby_node(int apicid)
773 for (i = apicid - 1; i >= 0; i--) {
774 int node = apicid_to_node[i];
775 if (node != NUMA_NO_NODE && node_online(node))
778 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
779 int node = apicid_to_node[i];
780 if (node != NUMA_NO_NODE && node_online(node))
783 return first_node(node_online_map); /* Shouldn't happen */
788 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
789 * Assumes number of cores is a power of two.
791 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
794 int cpu = smp_processor_id();
798 unsigned apicid = hard_smp_processor_id();
800 unsigned ecx = cpuid_ecx(0x80000008);
802 c->x86_max_cores = (ecx & 0xff) + 1;
804 /* CPU telling us the core id bits shift? */
805 bits = (ecx >> 12) & 0xF;
807 /* Otherwise recompute */
809 while ((1 << bits) < c->x86_max_cores)
813 /* Low order bits define the core id (index of core in socket) */
814 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
815 /* Convert the APIC ID into the socket ID */
816 phys_proc_id[cpu] = phys_pkg_id(bits);
819 node = phys_proc_id[cpu];
820 if (apicid_to_node[apicid] != NUMA_NO_NODE)
821 node = apicid_to_node[apicid];
822 if (!node_online(node)) {
823 /* Two possibilities here:
824 - The CPU is missing memory and no node was created.
825 In that case try picking one from a nearby CPU
826 - The APIC IDs differ from the HyperTransport node IDs
827 which the K8 northbridge parsing fills in.
828 Assume they are all increased by a constant offset,
829 but in the same order as the HT nodeids.
830 If that doesn't result in a usable node fall back to the
831 path for the previous case. */
832 int ht_nodeid = apicid - (phys_proc_id[0] << bits);
833 if (ht_nodeid >= 0 &&
834 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
835 node = apicid_to_node[ht_nodeid];
836 /* Pick a nearby node */
837 if (!node_online(node))
838 node = nearby_node(apicid);
840 numa_set_node(cpu, node);
842 printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n",
843 cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
848 static int __init init_amd(struct cpuinfo_x86 *c)
857 * Disable TLB flush filter by setting HWCR.FFDIS on K8
858 * bit 6 of msr C001_0015
860 * Errata 63 for SH-B3 steppings
861 * Errata 122 for all steppings (F+ have it disabled by default)
864 rdmsrl(MSR_K8_HWCR, value);
866 wrmsrl(MSR_K8_HWCR, value);
870 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
871 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
872 clear_bit(0*32+31, &c->x86_capability);
874 /* On C+ stepping K8 rep microcode works well for copy/memset */
875 level = cpuid_eax(1);
876 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
877 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
879 /* Enable workaround for FXSAVE leak */
881 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
883 r = get_model_name(c);
887 /* Should distinguish Models here, but this is only
888 a fallback anyways. */
889 strcpy(c->x86_model_id, "Hammer");
893 display_cacheinfo(c);
895 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
896 if (c->x86_power & (1<<8))
897 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
899 /* Multi core CPU? */
900 if (c->extended_cpuid_level >= 0x80000008)
903 /* Fix cpuid4 emulation for more */
904 num_cache_leaves = 3;
909 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
912 u32 eax, ebx, ecx, edx;
913 int index_msb, core_bits;
914 int cpu = smp_processor_id();
916 cpuid(1, &eax, &ebx, &ecx, &edx);
919 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
922 smp_num_siblings = (ebx & 0xff0000) >> 16;
924 if (smp_num_siblings == 1) {
925 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
926 } else if (smp_num_siblings > 1 ) {
928 if (smp_num_siblings > NR_CPUS) {
929 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
930 smp_num_siblings = 1;
934 index_msb = get_count_order(smp_num_siblings);
935 phys_proc_id[cpu] = phys_pkg_id(index_msb);
937 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
940 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
942 index_msb = get_count_order(smp_num_siblings) ;
944 core_bits = get_count_order(c->x86_max_cores);
946 cpu_core_id[cpu] = phys_pkg_id(index_msb) &
947 ((1 << core_bits) - 1);
949 if (c->x86_max_cores > 1)
950 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
957 * find out the number of processor cores on the die
959 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
963 if (c->cpuid_level < 4)
972 return ((eax >> 26) + 1);
977 static void srat_detect_node(void)
981 int cpu = smp_processor_id();
983 /* Don't do the funky fallback heuristics the AMD version employs
985 node = apicid_to_node[hard_smp_processor_id()];
986 if (node == NUMA_NO_NODE)
987 node = first_node(node_online_map);
988 numa_set_node(cpu, node);
991 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
995 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1000 init_intel_cacheinfo(c);
1001 n = c->extended_cpuid_level;
1002 if (n >= 0x80000008) {
1003 unsigned eax = cpuid_eax(0x80000008);
1004 c->x86_virt_bits = (eax >> 8) & 0xff;
1005 c->x86_phys_bits = eax & 0xff;
1006 /* CPUID workaround for Intel 0F34 CPU */
1007 if (c->x86_vendor == X86_VENDOR_INTEL &&
1008 c->x86 == 0xF && c->x86_model == 0x3 &&
1010 c->x86_phys_bits = 36;
1014 c->x86_cache_alignment = c->x86_clflush_size * 2;
1015 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
1016 (c->x86 == 0x6 && c->x86_model >= 0x0e))
1017 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1018 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1019 c->x86_max_cores = intel_num_cpu_cores(c);
1024 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1026 char *v = c->x86_vendor_id;
1028 if (!strcmp(v, "AuthenticAMD"))
1029 c->x86_vendor = X86_VENDOR_AMD;
1030 else if (!strcmp(v, "GenuineIntel"))
1031 c->x86_vendor = X86_VENDOR_INTEL;
1033 c->x86_vendor = X86_VENDOR_UNKNOWN;
1036 struct cpu_model_info {
1039 char *model_names[16];
1042 /* Do some early cpuid on the boot CPU to get some parameter that are
1043 needed before check_bugs. Everything advanced is in identify_cpu
1045 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1049 c->loops_per_jiffy = loops_per_jiffy;
1050 c->x86_cache_size = -1;
1051 c->x86_vendor = X86_VENDOR_UNKNOWN;
1052 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1053 c->x86_vendor_id[0] = '\0'; /* Unset */
1054 c->x86_model_id[0] = '\0'; /* Unset */
1055 c->x86_clflush_size = 64;
1056 c->x86_cache_alignment = c->x86_clflush_size;
1057 c->x86_max_cores = 1;
1058 c->extended_cpuid_level = 0;
1059 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1061 /* Get vendor name */
1062 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1063 (unsigned int *)&c->x86_vendor_id[0],
1064 (unsigned int *)&c->x86_vendor_id[8],
1065 (unsigned int *)&c->x86_vendor_id[4]);
1069 /* Initialize the standard set of capabilities */
1070 /* Note that the vendor-specific code below might override */
1072 /* Intel-defined flags: level 0x00000001 */
1073 if (c->cpuid_level >= 0x00000001) {
1075 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1076 &c->x86_capability[0]);
1077 c->x86 = (tfms >> 8) & 0xf;
1078 c->x86_model = (tfms >> 4) & 0xf;
1079 c->x86_mask = tfms & 0xf;
1081 c->x86 += (tfms >> 20) & 0xff;
1083 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1084 if (c->x86_capability[0] & (1<<19))
1085 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1087 /* Have CPUID level 0 only - unheard of */
1092 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1097 * This does the hard work of actually picking apart the CPU stuff...
1099 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1104 early_identify_cpu(c);
1106 /* AMD-defined flags: level 0x80000001 */
1107 xlvl = cpuid_eax(0x80000000);
1108 c->extended_cpuid_level = xlvl;
1109 if ((xlvl & 0xffff0000) == 0x80000000) {
1110 if (xlvl >= 0x80000001) {
1111 c->x86_capability[1] = cpuid_edx(0x80000001);
1112 c->x86_capability[6] = cpuid_ecx(0x80000001);
1114 if (xlvl >= 0x80000004)
1115 get_model_name(c); /* Default name */
1118 /* Transmeta-defined flags: level 0x80860001 */
1119 xlvl = cpuid_eax(0x80860000);
1120 if ((xlvl & 0xffff0000) == 0x80860000) {
1121 /* Don't set x86_cpuid_level here for now to not confuse. */
1122 if (xlvl >= 0x80860001)
1123 c->x86_capability[2] = cpuid_edx(0x80860001);
1126 c->apicid = phys_pkg_id(0);
1129 * Vendor-specific initialization. In this section we
1130 * canonicalize the feature flags, meaning if there are
1131 * features a certain CPU supports which CPUID doesn't
1132 * tell us, CPUID claiming incorrect flags, or other bugs,
1133 * we handle them here.
1135 * At the end of this section, c->x86_capability better
1136 * indicate the features this CPU genuinely supports!
1138 switch (c->x86_vendor) {
1139 case X86_VENDOR_AMD:
1143 case X86_VENDOR_INTEL:
1147 case X86_VENDOR_UNKNOWN:
1149 display_cacheinfo(c);
1153 select_idle_routine(c);
1157 * On SMP, boot_cpu_data holds the common feature set between
1158 * all CPUs; so make sure that we indicate which features are
1159 * common between the CPUs. The first time this routine gets
1160 * executed, c == &boot_cpu_data.
1162 if (c != &boot_cpu_data) {
1163 /* AND the already accumulated flags with these */
1164 for (i = 0 ; i < NCAPINTS ; i++)
1165 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1168 #ifdef CONFIG_X86_MCE
1171 if (c == &boot_cpu_data)
1176 numa_add_cpu(smp_processor_id());
1181 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1183 if (c->x86_model_id[0])
1184 printk("%s", c->x86_model_id);
1186 if (c->x86_mask || c->cpuid_level >= 0)
1187 printk(" stepping %02x\n", c->x86_mask);
1193 * Get CPU information for use by the procfs.
1196 static int show_cpuinfo(struct seq_file *m, void *v)
1198 struct cpuinfo_x86 *c = v;
1201 * These flag bits must match the definitions in <asm/cpufeature.h>.
1202 * NULL means this bit is undefined or reserved; either way it doesn't
1203 * have meaning as far as Linux is concerned. Note that it's important
1204 * to realize there is a difference between this table and CPUID -- if
1205 * applications want to get the raw CPUID data, they should access
1206 * /dev/cpu/<cpu_nr>/cpuid instead.
1208 static char *x86_cap_flags[] = {
1210 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1211 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1212 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1213 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1216 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1217 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1218 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1219 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
1221 /* Transmeta-defined */
1222 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1223 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1224 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1225 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1227 /* Other (Linux-defined) */
1228 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1229 "constant_tsc", NULL, NULL,
1230 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1231 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1232 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1234 /* Intel-defined (#2) */
1235 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1236 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1237 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1238 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1240 /* VIA/Cyrix/Centaur-defined */
1241 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1242 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1243 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1244 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1246 /* AMD-defined (#2) */
1247 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1248 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1249 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1250 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1252 static char *x86_power_flags[] = {
1253 "ts", /* temperature sensor */
1254 "fid", /* frequency id control */
1255 "vid", /* voltage id control */
1256 "ttp", /* thermal trip */
1260 /* nothing */ /* constant_tsc - moved to flags */
1265 if (!cpu_online(c-cpu_data))
1269 seq_printf(m,"processor\t: %u\n"
1271 "cpu family\t: %d\n"
1273 "model name\t: %s\n",
1274 (unsigned)(c-cpu_data),
1275 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1278 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1280 if (c->x86_mask || c->cpuid_level >= 0)
1281 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1283 seq_printf(m, "stepping\t: unknown\n");
1285 if (cpu_has(c,X86_FEATURE_TSC)) {
1286 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1289 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1290 freq / 1000, (freq % 1000));
1294 if (c->x86_cache_size >= 0)
1295 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1298 if (smp_num_siblings * c->x86_max_cores > 1) {
1299 int cpu = c - cpu_data;
1300 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1301 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1302 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1303 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1309 "fpu_exception\t: yes\n"
1310 "cpuid level\t: %d\n"
1317 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1318 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1319 seq_printf(m, " %s", x86_cap_flags[i]);
1322 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1323 c->loops_per_jiffy/(500000/HZ),
1324 (c->loops_per_jiffy/(5000/HZ)) % 100);
1326 if (c->x86_tlbsize > 0)
1327 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1328 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1329 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1331 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1332 c->x86_phys_bits, c->x86_virt_bits);
1334 seq_printf(m, "power management:");
1337 for (i = 0; i < 32; i++)
1338 if (c->x86_power & (1 << i)) {
1339 if (i < ARRAY_SIZE(x86_power_flags) &&
1341 seq_printf(m, "%s%s",
1342 x86_power_flags[i][0]?" ":"",
1343 x86_power_flags[i]);
1345 seq_printf(m, " [%d]", i);
1349 seq_printf(m, "\n\n");
1354 static void *c_start(struct seq_file *m, loff_t *pos)
1356 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1359 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1362 return c_start(m, pos);
1365 static void c_stop(struct seq_file *m, void *v)
1369 struct seq_operations cpuinfo_op = {
1373 .show = show_cpuinfo,
1376 #ifdef CONFIG_INPUT_PCSPKR
1377 #include <linux/platform_device.h>
1378 static __init int add_pcspkr(void)
1380 struct platform_device *pd;
1383 pd = platform_device_alloc("pcspkr", -1);
1387 ret = platform_device_add(pd);
1389 platform_device_put(pd);
1393 device_initcall(add_pcspkr);