Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/pci-2.6
[pandora-kernel.git] / arch / i386 / kernel / setup.c
1 /*
2  *  linux/arch/i386/kernel/setup.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *
6  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7  *
8  *  Memory region support
9  *      David Parsons <orc@pell.chi.il.us>, July-August 1999
10  *
11  *  Added E820 sanitization routine (removes overlapping memory regions);
12  *  Brian Moyle <bmoyle@mvista.com>, February 2001
13  *
14  * Moved CPU detection code to cpu/${cpu}.c
15  *    Patrick Mochel <mochel@osdl.org>, March 2002
16  *
17  *  Provisions for empty E820 memory regions (reported by certain BIOSes).
18  *  Alex Achenbach <xela@slit.de>, December 2002.
19  *
20  */
21
22 /*
23  * This file handles the architecture-dependent parts of initialization
24  */
25
26 #include <linux/config.h>
27 #include <linux/sched.h>
28 #include <linux/mm.h>
29 #include <linux/mmzone.h>
30 #include <linux/tty.h>
31 #include <linux/ioport.h>
32 #include <linux/acpi.h>
33 #include <linux/apm_bios.h>
34 #include <linux/initrd.h>
35 #include <linux/bootmem.h>
36 #include <linux/seq_file.h>
37 #include <linux/console.h>
38 #include <linux/mca.h>
39 #include <linux/root_dev.h>
40 #include <linux/highmem.h>
41 #include <linux/module.h>
42 #include <linux/efi.h>
43 #include <linux/init.h>
44 #include <linux/edd.h>
45 #include <linux/nodemask.h>
46 #include <linux/kexec.h>
47 #include <linux/crash_dump.h>
48
49 #include <video/edid.h>
50
51 #include <asm/apic.h>
52 #include <asm/e820.h>
53 #include <asm/mpspec.h>
54 #include <asm/setup.h>
55 #include <asm/arch_hooks.h>
56 #include <asm/sections.h>
57 #include <asm/io_apic.h>
58 #include <asm/ist.h>
59 #include <asm/io.h>
60 #include "setup_arch_pre.h"
61 #include <bios_ebda.h>
62
63 /* Forward Declaration. */
64 void __init find_max_pfn(void);
65
66 /* This value is set up by the early boot code to point to the value
67    immediately after the boot time page tables.  It contains a *physical*
68    address, and must not be in the .bss segment! */
69 unsigned long init_pg_tables_end __initdata = ~0UL;
70
71 int disable_pse __devinitdata = 0;
72
73 /*
74  * Machine setup..
75  */
76
77 #ifdef CONFIG_EFI
78 int efi_enabled = 0;
79 EXPORT_SYMBOL(efi_enabled);
80 #endif
81
82 /* cpu data as detected by the assembly code in head.S */
83 struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
84 /* common cpu data for all cpus */
85 struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
86 EXPORT_SYMBOL(boot_cpu_data);
87
88 unsigned long mmu_cr4_features;
89
90 #ifdef  CONFIG_ACPI
91         int acpi_disabled = 0;
92 #else
93         int acpi_disabled = 1;
94 #endif
95 EXPORT_SYMBOL(acpi_disabled);
96
97 #ifdef  CONFIG_ACPI
98 int __initdata acpi_force = 0;
99 extern acpi_interrupt_flags     acpi_sci_flags;
100 #endif
101
102 /* for MCA, but anyone else can use it if they want */
103 unsigned int machine_id;
104 #ifdef CONFIG_MCA
105 EXPORT_SYMBOL(machine_id);
106 #endif
107 unsigned int machine_submodel_id;
108 unsigned int BIOS_revision;
109 unsigned int mca_pentium_flag;
110
111 /* For PCI or other memory-mapped resources */
112 unsigned long pci_mem_start = 0x10000000;
113 #ifdef CONFIG_PCI
114 EXPORT_SYMBOL(pci_mem_start);
115 #endif
116
117 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
118 int bootloader_type;
119
120 /* user-defined highmem size */
121 static unsigned int highmem_pages = -1;
122
123 /*
124  * Setup options
125  */
126 struct drive_info_struct { char dummy[32]; } drive_info;
127 #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
128     defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
129 EXPORT_SYMBOL(drive_info);
130 #endif
131 struct screen_info screen_info;
132 #ifdef CONFIG_VT
133 EXPORT_SYMBOL(screen_info);
134 #endif
135 struct apm_info apm_info;
136 EXPORT_SYMBOL(apm_info);
137 struct sys_desc_table_struct {
138         unsigned short length;
139         unsigned char table[0];
140 };
141 struct edid_info edid_info;
142 EXPORT_SYMBOL_GPL(edid_info);
143 struct ist_info ist_info;
144 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
145         defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
146 EXPORT_SYMBOL(ist_info);
147 #endif
148 struct e820map e820;
149
150 extern void early_cpu_init(void);
151 extern void dmi_scan_machine(void);
152 extern void generic_apic_probe(char *);
153 extern int root_mountflags;
154
155 unsigned long saved_videomode;
156
157 #define RAMDISK_IMAGE_START_MASK        0x07FF
158 #define RAMDISK_PROMPT_FLAG             0x8000
159 #define RAMDISK_LOAD_FLAG               0x4000  
160
161 static char command_line[COMMAND_LINE_SIZE];
162
163 unsigned char __initdata boot_params[PARAM_SIZE];
164
165 static struct resource data_resource = {
166         .name   = "Kernel data",
167         .start  = 0,
168         .end    = 0,
169         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
170 };
171
172 static struct resource code_resource = {
173         .name   = "Kernel code",
174         .start  = 0,
175         .end    = 0,
176         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
177 };
178
179 static struct resource system_rom_resource = {
180         .name   = "System ROM",
181         .start  = 0xf0000,
182         .end    = 0xfffff,
183         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
184 };
185
186 static struct resource extension_rom_resource = {
187         .name   = "Extension ROM",
188         .start  = 0xe0000,
189         .end    = 0xeffff,
190         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
191 };
192
193 static struct resource adapter_rom_resources[] = { {
194         .name   = "Adapter ROM",
195         .start  = 0xc8000,
196         .end    = 0,
197         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
198 }, {
199         .name   = "Adapter ROM",
200         .start  = 0,
201         .end    = 0,
202         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
203 }, {
204         .name   = "Adapter ROM",
205         .start  = 0,
206         .end    = 0,
207         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
208 }, {
209         .name   = "Adapter ROM",
210         .start  = 0,
211         .end    = 0,
212         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
213 }, {
214         .name   = "Adapter ROM",
215         .start  = 0,
216         .end    = 0,
217         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
218 }, {
219         .name   = "Adapter ROM",
220         .start  = 0,
221         .end    = 0,
222         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
223 } };
224
225 #define ADAPTER_ROM_RESOURCES \
226         (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
227
228 static struct resource video_rom_resource = {
229         .name   = "Video ROM",
230         .start  = 0xc0000,
231         .end    = 0xc7fff,
232         .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
233 };
234
235 static struct resource video_ram_resource = {
236         .name   = "Video RAM area",
237         .start  = 0xa0000,
238         .end    = 0xbffff,
239         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
240 };
241
242 static struct resource standard_io_resources[] = { {
243         .name   = "dma1",
244         .start  = 0x0000,
245         .end    = 0x001f,
246         .flags  = IORESOURCE_BUSY | IORESOURCE_IO
247 }, {
248         .name   = "pic1",
249         .start  = 0x0020,
250         .end    = 0x0021,
251         .flags  = IORESOURCE_BUSY | IORESOURCE_IO
252 }, {
253         .name   = "timer0",
254         .start  = 0x0040,
255         .end    = 0x0043,
256         .flags  = IORESOURCE_BUSY | IORESOURCE_IO
257 }, {
258         .name   = "timer1",
259         .start  = 0x0050,
260         .end    = 0x0053,
261         .flags  = IORESOURCE_BUSY | IORESOURCE_IO
262 }, {
263         .name   = "keyboard",
264         .start  = 0x0060,
265         .end    = 0x006f,
266         .flags  = IORESOURCE_BUSY | IORESOURCE_IO
267 }, {
268         .name   = "dma page reg",
269         .start  = 0x0080,
270         .end    = 0x008f,
271         .flags  = IORESOURCE_BUSY | IORESOURCE_IO
272 }, {
273         .name   = "pic2",
274         .start  = 0x00a0,
275         .end    = 0x00a1,
276         .flags  = IORESOURCE_BUSY | IORESOURCE_IO
277 }, {
278         .name   = "dma2",
279         .start  = 0x00c0,
280         .end    = 0x00df,
281         .flags  = IORESOURCE_BUSY | IORESOURCE_IO
282 }, {
283         .name   = "fpu",
284         .start  = 0x00f0,
285         .end    = 0x00ff,
286         .flags  = IORESOURCE_BUSY | IORESOURCE_IO
287 } };
288
289 #define STANDARD_IO_RESOURCES \
290         (sizeof standard_io_resources / sizeof standard_io_resources[0])
291
292 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
293
294 static int __init romchecksum(unsigned char *rom, unsigned long length)
295 {
296         unsigned char *p, sum = 0;
297
298         for (p = rom; p < rom + length; p++)
299                 sum += *p;
300         return sum == 0;
301 }
302
303 static void __init probe_roms(void)
304 {
305         unsigned long start, length, upper;
306         unsigned char *rom;
307         int           i;
308
309         /* video rom */
310         upper = adapter_rom_resources[0].start;
311         for (start = video_rom_resource.start; start < upper; start += 2048) {
312                 rom = isa_bus_to_virt(start);
313                 if (!romsignature(rom))
314                         continue;
315
316                 video_rom_resource.start = start;
317
318                 /* 0 < length <= 0x7f * 512, historically */
319                 length = rom[2] * 512;
320
321                 /* if checksum okay, trust length byte */
322                 if (length && romchecksum(rom, length))
323                         video_rom_resource.end = start + length - 1;
324
325                 request_resource(&iomem_resource, &video_rom_resource);
326                 break;
327         }
328
329         start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
330         if (start < upper)
331                 start = upper;
332
333         /* system rom */
334         request_resource(&iomem_resource, &system_rom_resource);
335         upper = system_rom_resource.start;
336
337         /* check for extension rom (ignore length byte!) */
338         rom = isa_bus_to_virt(extension_rom_resource.start);
339         if (romsignature(rom)) {
340                 length = extension_rom_resource.end - extension_rom_resource.start + 1;
341                 if (romchecksum(rom, length)) {
342                         request_resource(&iomem_resource, &extension_rom_resource);
343                         upper = extension_rom_resource.start;
344                 }
345         }
346
347         /* check for adapter roms on 2k boundaries */
348         for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
349                 rom = isa_bus_to_virt(start);
350                 if (!romsignature(rom))
351                         continue;
352
353                 /* 0 < length <= 0x7f * 512, historically */
354                 length = rom[2] * 512;
355
356                 /* but accept any length that fits if checksum okay */
357                 if (!length || start + length > upper || !romchecksum(rom, length))
358                         continue;
359
360                 adapter_rom_resources[i].start = start;
361                 adapter_rom_resources[i].end = start + length - 1;
362                 request_resource(&iomem_resource, &adapter_rom_resources[i]);
363
364                 start = adapter_rom_resources[i++].end & ~2047UL;
365         }
366 }
367
368 static void __init limit_regions(unsigned long long size)
369 {
370         unsigned long long current_addr = 0;
371         int i;
372
373         if (efi_enabled) {
374                 efi_memory_desc_t *md;
375                 void *p;
376
377                 for (p = memmap.map, i = 0; p < memmap.map_end;
378                         p += memmap.desc_size, i++) {
379                         md = p;
380                         current_addr = md->phys_addr + (md->num_pages << 12);
381                         if (md->type == EFI_CONVENTIONAL_MEMORY) {
382                                 if (current_addr >= size) {
383                                         md->num_pages -=
384                                                 (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
385                                         memmap.nr_map = i + 1;
386                                         return;
387                                 }
388                         }
389                 }
390         }
391         for (i = 0; i < e820.nr_map; i++) {
392                 if (e820.map[i].type == E820_RAM) {
393                         current_addr = e820.map[i].addr + e820.map[i].size;
394                         if (current_addr >= size) {
395                                 e820.map[i].size -= current_addr-size;
396                                 e820.nr_map = i + 1;
397                                 return;
398                         }
399                 }
400         }
401 }
402
403 static void __init add_memory_region(unsigned long long start,
404                                   unsigned long long size, int type)
405 {
406         int x;
407
408         if (!efi_enabled) {
409                 x = e820.nr_map;
410
411                 if (x == E820MAX) {
412                     printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
413                     return;
414                 }
415
416                 e820.map[x].addr = start;
417                 e820.map[x].size = size;
418                 e820.map[x].type = type;
419                 e820.nr_map++;
420         }
421 } /* add_memory_region */
422
423 #define E820_DEBUG      1
424
425 static void __init print_memory_map(char *who)
426 {
427         int i;
428
429         for (i = 0; i < e820.nr_map; i++) {
430                 printk(" %s: %016Lx - %016Lx ", who,
431                         e820.map[i].addr,
432                         e820.map[i].addr + e820.map[i].size);
433                 switch (e820.map[i].type) {
434                 case E820_RAM:  printk("(usable)\n");
435                                 break;
436                 case E820_RESERVED:
437                                 printk("(reserved)\n");
438                                 break;
439                 case E820_ACPI:
440                                 printk("(ACPI data)\n");
441                                 break;
442                 case E820_NVS:
443                                 printk("(ACPI NVS)\n");
444                                 break;
445                 default:        printk("type %lu\n", e820.map[i].type);
446                                 break;
447                 }
448         }
449 }
450
451 /*
452  * Sanitize the BIOS e820 map.
453  *
454  * Some e820 responses include overlapping entries.  The following 
455  * replaces the original e820 map with a new one, removing overlaps.
456  *
457  */
458 struct change_member {
459         struct e820entry *pbios; /* pointer to original bios entry */
460         unsigned long long addr; /* address for this change point */
461 };
462 static struct change_member change_point_list[2*E820MAX] __initdata;
463 static struct change_member *change_point[2*E820MAX] __initdata;
464 static struct e820entry *overlap_list[E820MAX] __initdata;
465 static struct e820entry new_bios[E820MAX] __initdata;
466
467 static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
468 {
469         struct change_member *change_tmp;
470         unsigned long current_type, last_type;
471         unsigned long long last_addr;
472         int chgidx, still_changing;
473         int overlap_entries;
474         int new_bios_entry;
475         int old_nr, new_nr, chg_nr;
476         int i;
477
478         /*
479                 Visually we're performing the following (1,2,3,4 = memory types)...
480
481                 Sample memory map (w/overlaps):
482                    ____22__________________
483                    ______________________4_
484                    ____1111________________
485                    _44_____________________
486                    11111111________________
487                    ____________________33__
488                    ___________44___________
489                    __________33333_________
490                    ______________22________
491                    ___________________2222_
492                    _________111111111______
493                    _____________________11_
494                    _________________4______
495
496                 Sanitized equivalent (no overlap):
497                    1_______________________
498                    _44_____________________
499                    ___1____________________
500                    ____22__________________
501                    ______11________________
502                    _________1______________
503                    __________3_____________
504                    ___________44___________
505                    _____________33_________
506                    _______________2________
507                    ________________1_______
508                    _________________4______
509                    ___________________2____
510                    ____________________33__
511                    ______________________4_
512         */
513
514         /* if there's only one memory region, don't bother */
515         if (*pnr_map < 2)
516                 return -1;
517
518         old_nr = *pnr_map;
519
520         /* bail out if we find any unreasonable addresses in bios map */
521         for (i=0; i<old_nr; i++)
522                 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
523                         return -1;
524
525         /* create pointers for initial change-point information (for sorting) */
526         for (i=0; i < 2*old_nr; i++)
527                 change_point[i] = &change_point_list[i];
528
529         /* record all known change-points (starting and ending addresses),
530            omitting those that are for empty memory regions */
531         chgidx = 0;
532         for (i=0; i < old_nr; i++)      {
533                 if (biosmap[i].size != 0) {
534                         change_point[chgidx]->addr = biosmap[i].addr;
535                         change_point[chgidx++]->pbios = &biosmap[i];
536                         change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
537                         change_point[chgidx++]->pbios = &biosmap[i];
538                 }
539         }
540         chg_nr = chgidx;        /* true number of change-points */
541
542         /* sort change-point list by memory addresses (low -> high) */
543         still_changing = 1;
544         while (still_changing)  {
545                 still_changing = 0;
546                 for (i=1; i < chg_nr; i++)  {
547                         /* if <current_addr> > <last_addr>, swap */
548                         /* or, if current=<start_addr> & last=<end_addr>, swap */
549                         if ((change_point[i]->addr < change_point[i-1]->addr) ||
550                                 ((change_point[i]->addr == change_point[i-1]->addr) &&
551                                  (change_point[i]->addr == change_point[i]->pbios->addr) &&
552                                  (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
553                            )
554                         {
555                                 change_tmp = change_point[i];
556                                 change_point[i] = change_point[i-1];
557                                 change_point[i-1] = change_tmp;
558                                 still_changing=1;
559                         }
560                 }
561         }
562
563         /* create a new bios memory map, removing overlaps */
564         overlap_entries=0;       /* number of entries in the overlap table */
565         new_bios_entry=0;        /* index for creating new bios map entries */
566         last_type = 0;           /* start with undefined memory type */
567         last_addr = 0;           /* start with 0 as last starting address */
568         /* loop through change-points, determining affect on the new bios map */
569         for (chgidx=0; chgidx < chg_nr; chgidx++)
570         {
571                 /* keep track of all overlapping bios entries */
572                 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
573                 {
574                         /* add map entry to overlap list (> 1 entry implies an overlap) */
575                         overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
576                 }
577                 else
578                 {
579                         /* remove entry from list (order independent, so swap with last) */
580                         for (i=0; i<overlap_entries; i++)
581                         {
582                                 if (overlap_list[i] == change_point[chgidx]->pbios)
583                                         overlap_list[i] = overlap_list[overlap_entries-1];
584                         }
585                         overlap_entries--;
586                 }
587                 /* if there are overlapping entries, decide which "type" to use */
588                 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
589                 current_type = 0;
590                 for (i=0; i<overlap_entries; i++)
591                         if (overlap_list[i]->type > current_type)
592                                 current_type = overlap_list[i]->type;
593                 /* continue building up new bios map based on this information */
594                 if (current_type != last_type)  {
595                         if (last_type != 0)      {
596                                 new_bios[new_bios_entry].size =
597                                         change_point[chgidx]->addr - last_addr;
598                                 /* move forward only if the new size was non-zero */
599                                 if (new_bios[new_bios_entry].size != 0)
600                                         if (++new_bios_entry >= E820MAX)
601                                                 break;  /* no more space left for new bios entries */
602                         }
603                         if (current_type != 0)  {
604                                 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
605                                 new_bios[new_bios_entry].type = current_type;
606                                 last_addr=change_point[chgidx]->addr;
607                         }
608                         last_type = current_type;
609                 }
610         }
611         new_nr = new_bios_entry;   /* retain count for new bios entries */
612
613         /* copy new bios mapping into original location */
614         memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
615         *pnr_map = new_nr;
616
617         return 0;
618 }
619
620 /*
621  * Copy the BIOS e820 map into a safe place.
622  *
623  * Sanity-check it while we're at it..
624  *
625  * If we're lucky and live on a modern system, the setup code
626  * will have given us a memory map that we can use to properly
627  * set up memory.  If we aren't, we'll fake a memory map.
628  *
629  * We check to see that the memory map contains at least 2 elements
630  * before we'll use it, because the detection code in setup.S may
631  * not be perfect and most every PC known to man has two memory
632  * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
633  * thinkpad 560x, for example, does not cooperate with the memory
634  * detection code.)
635  */
636 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
637 {
638         /* Only one memory region (or negative)? Ignore it */
639         if (nr_map < 2)
640                 return -1;
641
642         do {
643                 unsigned long long start = biosmap->addr;
644                 unsigned long long size = biosmap->size;
645                 unsigned long long end = start + size;
646                 unsigned long type = biosmap->type;
647
648                 /* Overflow in 64 bits? Ignore the memory map. */
649                 if (start > end)
650                         return -1;
651
652                 /*
653                  * Some BIOSes claim RAM in the 640k - 1M region.
654                  * Not right. Fix it up.
655                  */
656                 if (type == E820_RAM) {
657                         if (start < 0x100000ULL && end > 0xA0000ULL) {
658                                 if (start < 0xA0000ULL)
659                                         add_memory_region(start, 0xA0000ULL-start, type);
660                                 if (end <= 0x100000ULL)
661                                         continue;
662                                 start = 0x100000ULL;
663                                 size = end - start;
664                         }
665                 }
666                 add_memory_region(start, size, type);
667         } while (biosmap++,--nr_map);
668         return 0;
669 }
670
671 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
672 struct edd edd;
673 #ifdef CONFIG_EDD_MODULE
674 EXPORT_SYMBOL(edd);
675 #endif
676 /**
677  * copy_edd() - Copy the BIOS EDD information
678  *              from boot_params into a safe place.
679  *
680  */
681 static inline void copy_edd(void)
682 {
683      memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
684      memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
685      edd.mbr_signature_nr = EDD_MBR_SIG_NR;
686      edd.edd_info_nr = EDD_NR;
687 }
688 #else
689 static inline void copy_edd(void)
690 {
691 }
692 #endif
693
694 /*
695  * Do NOT EVER look at the BIOS memory size location.
696  * It does not work on many machines.
697  */
698 #define LOWMEMSIZE()    (0x9f000)
699
700 static void __init parse_cmdline_early (char ** cmdline_p)
701 {
702         char c = ' ', *to = command_line, *from = saved_command_line;
703         int len = 0;
704         int userdef = 0;
705
706         /* Save unparsed command line copy for /proc/cmdline */
707         saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
708
709         for (;;) {
710                 if (c != ' ')
711                         goto next_char;
712                 /*
713                  * "mem=nopentium" disables the 4MB page tables.
714                  * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
715                  * to <mem>, overriding the bios size.
716                  * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
717                  * <start> to <start>+<mem>, overriding the bios size.
718                  *
719                  * HPA tells me bootloaders need to parse mem=, so no new
720                  * option should be mem=  [also see Documentation/i386/boot.txt]
721                  */
722                 if (!memcmp(from, "mem=", 4)) {
723                         if (to != command_line)
724                                 to--;
725                         if (!memcmp(from+4, "nopentium", 9)) {
726                                 from += 9+4;
727                                 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
728                                 disable_pse = 1;
729                         } else {
730                                 /* If the user specifies memory size, we
731                                  * limit the BIOS-provided memory map to
732                                  * that size. exactmap can be used to specify
733                                  * the exact map. mem=number can be used to
734                                  * trim the existing memory map.
735                                  */
736                                 unsigned long long mem_size;
737  
738                                 mem_size = memparse(from+4, &from);
739                                 limit_regions(mem_size);
740                                 userdef=1;
741                         }
742                 }
743
744                 else if (!memcmp(from, "memmap=", 7)) {
745                         if (to != command_line)
746                                 to--;
747                         if (!memcmp(from+7, "exactmap", 8)) {
748 #ifdef CONFIG_CRASH_DUMP
749                                 /* If we are doing a crash dump, we
750                                  * still need to know the real mem
751                                  * size before original memory map is
752                                  * reset.
753                                  */
754                                 find_max_pfn();
755                                 saved_max_pfn = max_pfn;
756 #endif
757                                 from += 8+7;
758                                 e820.nr_map = 0;
759                                 userdef = 1;
760                         } else {
761                                 /* If the user specifies memory size, we
762                                  * limit the BIOS-provided memory map to
763                                  * that size. exactmap can be used to specify
764                                  * the exact map. mem=number can be used to
765                                  * trim the existing memory map.
766                                  */
767                                 unsigned long long start_at, mem_size;
768  
769                                 mem_size = memparse(from+7, &from);
770                                 if (*from == '@') {
771                                         start_at = memparse(from+1, &from);
772                                         add_memory_region(start_at, mem_size, E820_RAM);
773                                 } else if (*from == '#') {
774                                         start_at = memparse(from+1, &from);
775                                         add_memory_region(start_at, mem_size, E820_ACPI);
776                                 } else if (*from == '$') {
777                                         start_at = memparse(from+1, &from);
778                                         add_memory_region(start_at, mem_size, E820_RESERVED);
779                                 } else {
780                                         limit_regions(mem_size);
781                                         userdef=1;
782                                 }
783                         }
784                 }
785
786                 else if (!memcmp(from, "noexec=", 7))
787                         noexec_setup(from + 7);
788
789
790 #ifdef  CONFIG_X86_SMP
791                 /*
792                  * If the BIOS enumerates physical processors before logical,
793                  * maxcpus=N at enumeration-time can be used to disable HT.
794                  */
795                 else if (!memcmp(from, "maxcpus=", 8)) {
796                         extern unsigned int maxcpus;
797
798                         maxcpus = simple_strtoul(from + 8, NULL, 0);
799                 }
800 #endif
801
802 #ifdef CONFIG_ACPI
803                 /* "acpi=off" disables both ACPI table parsing and interpreter */
804                 else if (!memcmp(from, "acpi=off", 8)) {
805                         disable_acpi();
806                 }
807
808                 /* acpi=force to over-ride black-list */
809                 else if (!memcmp(from, "acpi=force", 10)) {
810                         acpi_force = 1;
811                         acpi_ht = 1;
812                         acpi_disabled = 0;
813                 }
814
815                 /* acpi=strict disables out-of-spec workarounds */
816                 else if (!memcmp(from, "acpi=strict", 11)) {
817                         acpi_strict = 1;
818                 }
819
820                 /* Limit ACPI just to boot-time to enable HT */
821                 else if (!memcmp(from, "acpi=ht", 7)) {
822                         if (!acpi_force)
823                                 disable_acpi();
824                         acpi_ht = 1;
825                 }
826                 
827                 /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
828                 else if (!memcmp(from, "pci=noacpi", 10)) {
829                         acpi_disable_pci();
830                 }
831                 /* "acpi=noirq" disables ACPI interrupt routing */
832                 else if (!memcmp(from, "acpi=noirq", 10)) {
833                         acpi_noirq_set();
834                 }
835
836                 else if (!memcmp(from, "acpi_sci=edge", 13))
837                         acpi_sci_flags.trigger =  1;
838
839                 else if (!memcmp(from, "acpi_sci=level", 14))
840                         acpi_sci_flags.trigger = 3;
841
842                 else if (!memcmp(from, "acpi_sci=high", 13))
843                         acpi_sci_flags.polarity = 1;
844
845                 else if (!memcmp(from, "acpi_sci=low", 12))
846                         acpi_sci_flags.polarity = 3;
847
848 #ifdef CONFIG_X86_IO_APIC
849                 else if (!memcmp(from, "acpi_skip_timer_override", 24))
850                         acpi_skip_timer_override = 1;
851 #endif
852
853 #ifdef CONFIG_X86_LOCAL_APIC
854                 /* disable IO-APIC */
855                 else if (!memcmp(from, "noapic", 6))
856                         disable_ioapic_setup();
857 #endif /* CONFIG_X86_LOCAL_APIC */
858 #endif /* CONFIG_ACPI */
859
860 #ifdef CONFIG_X86_LOCAL_APIC
861                 /* enable local APIC */
862                 else if (!memcmp(from, "lapic", 5))
863                         lapic_enable();
864
865                 /* disable local APIC */
866                 else if (!memcmp(from, "nolapic", 6))
867                         lapic_disable();
868 #endif /* CONFIG_X86_LOCAL_APIC */
869
870 #ifdef CONFIG_KEXEC
871                 /* crashkernel=size@addr specifies the location to reserve for
872                  * a crash kernel.  By reserving this memory we guarantee
873                  * that linux never set's it up as a DMA target.
874                  * Useful for holding code to do something appropriate
875                  * after a kernel panic.
876                  */
877                 else if (!memcmp(from, "crashkernel=", 12)) {
878                         unsigned long size, base;
879                         size = memparse(from+12, &from);
880                         if (*from == '@') {
881                                 base = memparse(from+1, &from);
882                                 /* FIXME: Do I want a sanity check
883                                  * to validate the memory range?
884                                  */
885                                 crashk_res.start = base;
886                                 crashk_res.end   = base + size - 1;
887                         }
888                 }
889 #endif
890 #ifdef CONFIG_CRASH_DUMP
891                 /* elfcorehdr= specifies the location of elf core header
892                  * stored by the crashed kernel.
893                  */
894                 else if (!memcmp(from, "elfcorehdr=", 11))
895                         elfcorehdr_addr = memparse(from+11, &from);
896 #endif
897
898                 /*
899                  * highmem=size forces highmem to be exactly 'size' bytes.
900                  * This works even on boxes that have no highmem otherwise.
901                  * This also works to reduce highmem size on bigger boxes.
902                  */
903                 else if (!memcmp(from, "highmem=", 8))
904                         highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
905         
906                 /*
907                  * vmalloc=size forces the vmalloc area to be exactly 'size'
908                  * bytes. This can be used to increase (or decrease) the
909                  * vmalloc area - the default is 128m.
910                  */
911                 else if (!memcmp(from, "vmalloc=", 8))
912                         __VMALLOC_RESERVE = memparse(from+8, &from);
913
914         next_char:
915                 c = *(from++);
916                 if (!c)
917                         break;
918                 if (COMMAND_LINE_SIZE <= ++len)
919                         break;
920                 *(to++) = c;
921         }
922         *to = '\0';
923         *cmdline_p = command_line;
924         if (userdef) {
925                 printk(KERN_INFO "user-defined physical RAM map:\n");
926                 print_memory_map("user");
927         }
928 }
929
930 /*
931  * Callback for efi_memory_walk.
932  */
933 static int __init
934 efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
935 {
936         unsigned long *max_pfn = arg, pfn;
937
938         if (start < end) {
939                 pfn = PFN_UP(end -1);
940                 if (pfn > *max_pfn)
941                         *max_pfn = pfn;
942         }
943         return 0;
944 }
945
946
947 /*
948  * Find the highest page frame number we have available
949  */
950 void __init find_max_pfn(void)
951 {
952         int i;
953
954         max_pfn = 0;
955         if (efi_enabled) {
956                 efi_memmap_walk(efi_find_max_pfn, &max_pfn);
957                 return;
958         }
959
960         for (i = 0; i < e820.nr_map; i++) {
961                 unsigned long start, end;
962                 /* RAM? */
963                 if (e820.map[i].type != E820_RAM)
964                         continue;
965                 start = PFN_UP(e820.map[i].addr);
966                 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
967                 if (start >= end)
968                         continue;
969                 if (end > max_pfn)
970                         max_pfn = end;
971         }
972 }
973
974 /*
975  * Determine low and high memory ranges:
976  */
977 unsigned long __init find_max_low_pfn(void)
978 {
979         unsigned long max_low_pfn;
980
981         max_low_pfn = max_pfn;
982         if (max_low_pfn > MAXMEM_PFN) {
983                 if (highmem_pages == -1)
984                         highmem_pages = max_pfn - MAXMEM_PFN;
985                 if (highmem_pages + MAXMEM_PFN < max_pfn)
986                         max_pfn = MAXMEM_PFN + highmem_pages;
987                 if (highmem_pages + MAXMEM_PFN > max_pfn) {
988                         printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
989                         highmem_pages = 0;
990                 }
991                 max_low_pfn = MAXMEM_PFN;
992 #ifndef CONFIG_HIGHMEM
993                 /* Maximum memory usable is what is directly addressable */
994                 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
995                                         MAXMEM>>20);
996                 if (max_pfn > MAX_NONPAE_PFN)
997                         printk(KERN_WARNING "Use a PAE enabled kernel.\n");
998                 else
999                         printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
1000                 max_pfn = MAXMEM_PFN;
1001 #else /* !CONFIG_HIGHMEM */
1002 #ifndef CONFIG_X86_PAE
1003                 if (max_pfn > MAX_NONPAE_PFN) {
1004                         max_pfn = MAX_NONPAE_PFN;
1005                         printk(KERN_WARNING "Warning only 4GB will be used.\n");
1006                         printk(KERN_WARNING "Use a PAE enabled kernel.\n");
1007                 }
1008 #endif /* !CONFIG_X86_PAE */
1009 #endif /* !CONFIG_HIGHMEM */
1010         } else {
1011                 if (highmem_pages == -1)
1012                         highmem_pages = 0;
1013 #ifdef CONFIG_HIGHMEM
1014                 if (highmem_pages >= max_pfn) {
1015                         printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
1016                         highmem_pages = 0;
1017                 }
1018                 if (highmem_pages) {
1019                         if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
1020                                 printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
1021                                 highmem_pages = 0;
1022                         }
1023                         max_low_pfn -= highmem_pages;
1024                 }
1025 #else
1026                 if (highmem_pages)
1027                         printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
1028 #endif
1029         }
1030         return max_low_pfn;
1031 }
1032
1033 /*
1034  * Free all available memory for boot time allocation.  Used
1035  * as a callback function by efi_memory_walk()
1036  */
1037
1038 static int __init
1039 free_available_memory(unsigned long start, unsigned long end, void *arg)
1040 {
1041         /* check max_low_pfn */
1042         if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
1043                 return 0;
1044         if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
1045                 end = (max_low_pfn + 1) << PAGE_SHIFT;
1046         if (start < end)
1047                 free_bootmem(start, end - start);
1048
1049         return 0;
1050 }
1051 /*
1052  * Register fully available low RAM pages with the bootmem allocator.
1053  */
1054 static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
1055 {
1056         int i;
1057
1058         if (efi_enabled) {
1059                 efi_memmap_walk(free_available_memory, NULL);
1060                 return;
1061         }
1062         for (i = 0; i < e820.nr_map; i++) {
1063                 unsigned long curr_pfn, last_pfn, size;
1064                 /*
1065                  * Reserve usable low memory
1066                  */
1067                 if (e820.map[i].type != E820_RAM)
1068                         continue;
1069                 /*
1070                  * We are rounding up the start address of usable memory:
1071                  */
1072                 curr_pfn = PFN_UP(e820.map[i].addr);
1073                 if (curr_pfn >= max_low_pfn)
1074                         continue;
1075                 /*
1076                  * ... and at the end of the usable range downwards:
1077                  */
1078                 last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
1079
1080                 if (last_pfn > max_low_pfn)
1081                         last_pfn = max_low_pfn;
1082
1083                 /*
1084                  * .. finally, did all the rounding and playing
1085                  * around just make the area go away?
1086                  */
1087                 if (last_pfn <= curr_pfn)
1088                         continue;
1089
1090                 size = last_pfn - curr_pfn;
1091                 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
1092         }
1093 }
1094
1095 /*
1096  * workaround for Dell systems that neglect to reserve EBDA
1097  */
1098 static void __init reserve_ebda_region(void)
1099 {
1100         unsigned int addr;
1101         addr = get_bios_ebda();
1102         if (addr)
1103                 reserve_bootmem(addr, PAGE_SIZE);       
1104 }
1105
1106 #ifndef CONFIG_NEED_MULTIPLE_NODES
1107 void __init setup_bootmem_allocator(void);
1108 static unsigned long __init setup_memory(void)
1109 {
1110         /*
1111          * partially used pages are not usable - thus
1112          * we are rounding upwards:
1113          */
1114         min_low_pfn = PFN_UP(init_pg_tables_end);
1115
1116         find_max_pfn();
1117
1118         max_low_pfn = find_max_low_pfn();
1119
1120 #ifdef CONFIG_HIGHMEM
1121         highstart_pfn = highend_pfn = max_pfn;
1122         if (max_pfn > max_low_pfn) {
1123                 highstart_pfn = max_low_pfn;
1124         }
1125         printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
1126                 pages_to_mb(highend_pfn - highstart_pfn));
1127 #endif
1128         printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
1129                         pages_to_mb(max_low_pfn));
1130
1131         setup_bootmem_allocator();
1132
1133         return max_low_pfn;
1134 }
1135
1136 void __init zone_sizes_init(void)
1137 {
1138         unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
1139         unsigned int max_dma, low;
1140
1141         max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
1142         low = max_low_pfn;
1143
1144         if (low < max_dma)
1145                 zones_size[ZONE_DMA] = low;
1146         else {
1147                 zones_size[ZONE_DMA] = max_dma;
1148                 zones_size[ZONE_NORMAL] = low - max_dma;
1149 #ifdef CONFIG_HIGHMEM
1150                 zones_size[ZONE_HIGHMEM] = highend_pfn - low;
1151 #endif
1152         }
1153         free_area_init(zones_size);
1154 }
1155 #else
1156 extern unsigned long __init setup_memory(void);
1157 extern void zone_sizes_init(void);
1158 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
1159
1160 void __init setup_bootmem_allocator(void)
1161 {
1162         unsigned long bootmap_size;
1163         /*
1164          * Initialize the boot-time allocator (with low memory only):
1165          */
1166         bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
1167
1168         register_bootmem_low_pages(max_low_pfn);
1169
1170         /*
1171          * Reserve the bootmem bitmap itself as well. We do this in two
1172          * steps (first step was init_bootmem()) because this catches
1173          * the (very unlikely) case of us accidentally initializing the
1174          * bootmem allocator with an invalid RAM area.
1175          */
1176         reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
1177                          bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
1178
1179         /*
1180          * reserve physical page 0 - it's a special BIOS page on many boxes,
1181          * enabling clean reboots, SMP operation, laptop functions.
1182          */
1183         reserve_bootmem(0, PAGE_SIZE);
1184
1185         /* reserve EBDA region, it's a 4K region */
1186         reserve_ebda_region();
1187
1188     /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
1189        PCI prefetch into it (errata #56). Usually the page is reserved anyways,
1190        unless you have no PS/2 mouse plugged in. */
1191         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
1192             boot_cpu_data.x86 == 6)
1193              reserve_bootmem(0xa0000 - 4096, 4096);
1194
1195 #ifdef CONFIG_SMP
1196         /*
1197          * But first pinch a few for the stack/trampoline stuff
1198          * FIXME: Don't need the extra page at 4K, but need to fix
1199          * trampoline before removing it. (see the GDT stuff)
1200          */
1201         reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
1202 #endif
1203 #ifdef CONFIG_ACPI_SLEEP
1204         /*
1205          * Reserve low memory region for sleep support.
1206          */
1207         acpi_reserve_bootmem();
1208 #endif
1209 #ifdef CONFIG_X86_FIND_SMP_CONFIG
1210         /*
1211          * Find and reserve possible boot-time SMP configuration:
1212          */
1213         find_smp_config();
1214 #endif
1215
1216 #ifdef CONFIG_BLK_DEV_INITRD
1217         if (LOADER_TYPE && INITRD_START) {
1218                 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
1219                         reserve_bootmem(INITRD_START, INITRD_SIZE);
1220                         initrd_start =
1221                                 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
1222                         initrd_end = initrd_start+INITRD_SIZE;
1223                 }
1224                 else {
1225                         printk(KERN_ERR "initrd extends beyond end of memory "
1226                             "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
1227                             INITRD_START + INITRD_SIZE,
1228                             max_low_pfn << PAGE_SHIFT);
1229                         initrd_start = 0;
1230                 }
1231         }
1232 #endif
1233 #ifdef CONFIG_KEXEC
1234         if (crashk_res.start != crashk_res.end)
1235                 reserve_bootmem(crashk_res.start,
1236                         crashk_res.end - crashk_res.start + 1);
1237 #endif
1238 }
1239
1240 /*
1241  * The node 0 pgdat is initialized before all of these because
1242  * it's needed for bootmem.  node>0 pgdats have their virtual
1243  * space allocated before the pagetables are in place to access
1244  * them, so they can't be cleared then.
1245  *
1246  * This should all compile down to nothing when NUMA is off.
1247  */
1248 void __init remapped_pgdat_init(void)
1249 {
1250         int nid;
1251
1252         for_each_online_node(nid) {
1253                 if (nid != 0)
1254                         memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
1255         }
1256 }
1257
1258 /*
1259  * Request address space for all standard RAM and ROM resources
1260  * and also for regions reported as reserved by the e820.
1261  */
1262 static void __init
1263 legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
1264 {
1265         int i;
1266
1267         probe_roms();
1268         for (i = 0; i < e820.nr_map; i++) {
1269                 struct resource *res;
1270                 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
1271                         continue;
1272                 res = alloc_bootmem_low(sizeof(struct resource));
1273                 switch (e820.map[i].type) {
1274                 case E820_RAM:  res->name = "System RAM"; break;
1275                 case E820_ACPI: res->name = "ACPI Tables"; break;
1276                 case E820_NVS:  res->name = "ACPI Non-volatile Storage"; break;
1277                 default:        res->name = "reserved";
1278                 }
1279                 res->start = e820.map[i].addr;
1280                 res->end = res->start + e820.map[i].size - 1;
1281                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1282                 request_resource(&iomem_resource, res);
1283                 if (e820.map[i].type == E820_RAM) {
1284                         /*
1285                          *  We don't know which RAM region contains kernel data,
1286                          *  so we try it repeatedly and let the resource manager
1287                          *  test it.
1288                          */
1289                         request_resource(res, code_resource);
1290                         request_resource(res, data_resource);
1291 #ifdef CONFIG_KEXEC
1292                         request_resource(res, &crashk_res);
1293 #endif
1294                 }
1295         }
1296 }
1297
1298 /*
1299  * Request address space for all standard resources
1300  */
1301 static void __init register_memory(void)
1302 {
1303         unsigned long gapstart, gapsize;
1304         unsigned long long last;
1305         int           i;
1306
1307         if (efi_enabled)
1308                 efi_initialize_iomem_resources(&code_resource, &data_resource);
1309         else
1310                 legacy_init_iomem_resources(&code_resource, &data_resource);
1311
1312         /* EFI systems may still have VGA */
1313         request_resource(&iomem_resource, &video_ram_resource);
1314
1315         /* request I/O space for devices used on all i[345]86 PCs */
1316         for (i = 0; i < STANDARD_IO_RESOURCES; i++)
1317                 request_resource(&ioport_resource, &standard_io_resources[i]);
1318
1319         /*
1320          * Search for the bigest gap in the low 32 bits of the e820
1321          * memory space.
1322          */
1323         last = 0x100000000ull;
1324         gapstart = 0x10000000;
1325         gapsize = 0x400000;
1326         i = e820.nr_map;
1327         while (--i >= 0) {
1328                 unsigned long long start = e820.map[i].addr;
1329                 unsigned long long end = start + e820.map[i].size;
1330
1331                 /*
1332                  * Since "last" is at most 4GB, we know we'll
1333                  * fit in 32 bits if this condition is true
1334                  */
1335                 if (last > end) {
1336                         unsigned long gap = last - end;
1337
1338                         if (gap > gapsize) {
1339                                 gapsize = gap;
1340                                 gapstart = end;
1341                         }
1342                 }
1343                 if (start < last)
1344                         last = start;
1345         }
1346
1347         /*
1348          * Start allocating dynamic PCI memory a bit into the gap,
1349          * aligned up to the nearest megabyte.
1350          *
1351          * Question: should we try to pad it up a bit (do something
1352          * like " + (gapsize >> 3)" in there too?). We now have the
1353          * technology.
1354          */
1355         pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
1356
1357         printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
1358                 pci_mem_start, gapstart, gapsize);
1359 }
1360
1361 /* Use inline assembly to define this because the nops are defined 
1362    as inline assembly strings in the include files and we cannot 
1363    get them easily into strings. */
1364 asm("\t.data\nintelnops: " 
1365     GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
1366     GENERIC_NOP7 GENERIC_NOP8); 
1367 asm("\t.data\nk8nops: " 
1368     K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
1369     K8_NOP7 K8_NOP8); 
1370 asm("\t.data\nk7nops: " 
1371     K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
1372     K7_NOP7 K7_NOP8); 
1373     
1374 extern unsigned char intelnops[], k8nops[], k7nops[];
1375 static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 
1376      NULL,
1377      intelnops,
1378      intelnops + 1,
1379      intelnops + 1 + 2,
1380      intelnops + 1 + 2 + 3,
1381      intelnops + 1 + 2 + 3 + 4,
1382      intelnops + 1 + 2 + 3 + 4 + 5,
1383      intelnops + 1 + 2 + 3 + 4 + 5 + 6,
1384      intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1385 }; 
1386 static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 
1387      NULL,
1388      k8nops,
1389      k8nops + 1,
1390      k8nops + 1 + 2,
1391      k8nops + 1 + 2 + 3,
1392      k8nops + 1 + 2 + 3 + 4,
1393      k8nops + 1 + 2 + 3 + 4 + 5,
1394      k8nops + 1 + 2 + 3 + 4 + 5 + 6,
1395      k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1396 }; 
1397 static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 
1398      NULL,
1399      k7nops,
1400      k7nops + 1,
1401      k7nops + 1 + 2,
1402      k7nops + 1 + 2 + 3,
1403      k7nops + 1 + 2 + 3 + 4,
1404      k7nops + 1 + 2 + 3 + 4 + 5,
1405      k7nops + 1 + 2 + 3 + 4 + 5 + 6,
1406      k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1407 }; 
1408 static struct nop { 
1409      int cpuid; 
1410      unsigned char **noptable; 
1411 } noptypes[] = { 
1412      { X86_FEATURE_K8, k8_nops }, 
1413      { X86_FEATURE_K7, k7_nops }, 
1414      { -1, NULL }
1415 }; 
1416
1417 /* Replace instructions with better alternatives for this CPU type.
1418
1419    This runs before SMP is initialized to avoid SMP problems with
1420    self modifying code. This implies that assymetric systems where
1421    APs have less capabilities than the boot processor are not handled. 
1422    Tough. Make sure you disable such features by hand. */ 
1423 void apply_alternatives(void *start, void *end) 
1424
1425         struct alt_instr *a; 
1426         int diff, i, k;
1427         unsigned char **noptable = intel_nops; 
1428         for (i = 0; noptypes[i].cpuid >= 0; i++) { 
1429                 if (boot_cpu_has(noptypes[i].cpuid)) { 
1430                         noptable = noptypes[i].noptable;
1431                         break;
1432                 }
1433         } 
1434         for (a = start; (void *)a < end; a++) { 
1435                 if (!boot_cpu_has(a->cpuid))
1436                         continue;
1437                 BUG_ON(a->replacementlen > a->instrlen); 
1438                 memcpy(a->instr, a->replacement, a->replacementlen); 
1439                 diff = a->instrlen - a->replacementlen; 
1440                 /* Pad the rest with nops */
1441                 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
1442                         k = diff;
1443                         if (k > ASM_NOP_MAX)
1444                                 k = ASM_NOP_MAX;
1445                         memcpy(a->instr + i, noptable[k], k); 
1446                 } 
1447         }
1448
1449
1450 void __init alternative_instructions(void)
1451 {
1452         extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
1453         apply_alternatives(__alt_instructions, __alt_instructions_end);
1454 }
1455
1456 static char * __init machine_specific_memory_setup(void);
1457
1458 #ifdef CONFIG_MCA
1459 static void set_mca_bus(int x)
1460 {
1461         MCA_bus = x;
1462 }
1463 #else
1464 static void set_mca_bus(int x) { }
1465 #endif
1466
1467 /*
1468  * Determine if we were loaded by an EFI loader.  If so, then we have also been
1469  * passed the efi memmap, systab, etc., so we should use these data structures
1470  * for initialization.  Note, the efi init code path is determined by the
1471  * global efi_enabled. This allows the same kernel image to be used on existing
1472  * systems (with a traditional BIOS) as well as on EFI systems.
1473  */
1474 void __init setup_arch(char **cmdline_p)
1475 {
1476         unsigned long max_low_pfn;
1477
1478         memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
1479         pre_setup_arch_hook();
1480         early_cpu_init();
1481
1482         /*
1483          * FIXME: This isn't an official loader_type right
1484          * now but does currently work with elilo.
1485          * If we were configured as an EFI kernel, check to make
1486          * sure that we were loaded correctly from elilo and that
1487          * the system table is valid.  If not, then initialize normally.
1488          */
1489 #ifdef CONFIG_EFI
1490         if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
1491                 efi_enabled = 1;
1492 #endif
1493
1494         ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
1495         drive_info = DRIVE_INFO;
1496         screen_info = SCREEN_INFO;
1497         edid_info = EDID_INFO;
1498         apm_info.bios = APM_BIOS_INFO;
1499         ist_info = IST_INFO;
1500         saved_videomode = VIDEO_MODE;
1501         if( SYS_DESC_TABLE.length != 0 ) {
1502                 set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
1503                 machine_id = SYS_DESC_TABLE.table[0];
1504                 machine_submodel_id = SYS_DESC_TABLE.table[1];
1505                 BIOS_revision = SYS_DESC_TABLE.table[2];
1506         }
1507         bootloader_type = LOADER_TYPE;
1508
1509 #ifdef CONFIG_BLK_DEV_RAM
1510         rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
1511         rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
1512         rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
1513 #endif
1514         ARCH_SETUP
1515         if (efi_enabled)
1516                 efi_init();
1517         else {
1518                 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
1519                 print_memory_map(machine_specific_memory_setup());
1520         }
1521
1522         copy_edd();
1523
1524         if (!MOUNT_ROOT_RDONLY)
1525                 root_mountflags &= ~MS_RDONLY;
1526         init_mm.start_code = (unsigned long) _text;
1527         init_mm.end_code = (unsigned long) _etext;
1528         init_mm.end_data = (unsigned long) _edata;
1529         init_mm.brk = init_pg_tables_end + PAGE_OFFSET;
1530
1531         code_resource.start = virt_to_phys(_text);
1532         code_resource.end = virt_to_phys(_etext)-1;
1533         data_resource.start = virt_to_phys(_etext);
1534         data_resource.end = virt_to_phys(_edata)-1;
1535
1536         parse_cmdline_early(cmdline_p);
1537
1538         max_low_pfn = setup_memory();
1539
1540         /*
1541          * NOTE: before this point _nobody_ is allowed to allocate
1542          * any memory using the bootmem allocator.  Although the
1543          * alloctor is now initialised only the first 8Mb of the kernel
1544          * virtual address space has been mapped.  All allocations before
1545          * paging_init() has completed must use the alloc_bootmem_low_pages()
1546          * variant (which allocates DMA'able memory) and care must be taken
1547          * not to exceed the 8Mb limit.
1548          */
1549
1550 #ifdef CONFIG_SMP
1551         smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
1552 #endif
1553         paging_init();
1554         remapped_pgdat_init();
1555         sparse_init();
1556         zone_sizes_init();
1557
1558         /*
1559          * NOTE: at this point the bootmem allocator is fully available.
1560          */
1561
1562 #ifdef CONFIG_EARLY_PRINTK
1563         {
1564                 char *s = strstr(*cmdline_p, "earlyprintk=");
1565                 if (s) {
1566                         extern void setup_early_printk(char *);
1567
1568                         setup_early_printk(s);
1569                         printk("early console enabled\n");
1570                 }
1571         }
1572 #endif
1573
1574
1575         dmi_scan_machine();
1576
1577 #ifdef CONFIG_X86_GENERICARCH
1578         generic_apic_probe(*cmdline_p);
1579 #endif  
1580         if (efi_enabled)
1581                 efi_map_memmap();
1582
1583 #ifdef CONFIG_ACPI
1584         /*
1585          * Parse the ACPI tables for possible boot-time SMP configuration.
1586          */
1587         acpi_boot_table_init();
1588         acpi_boot_init();
1589
1590 #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
1591         if (def_to_bigsmp)
1592                 printk(KERN_WARNING "More than 8 CPUs detected and "
1593                         "CONFIG_X86_PC cannot handle it.\nUse "
1594                         "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
1595 #endif
1596 #endif
1597 #ifdef CONFIG_X86_LOCAL_APIC
1598         if (smp_found_config)
1599                 get_smp_config();
1600 #endif
1601
1602         register_memory();
1603
1604 #ifdef CONFIG_VT
1605 #if defined(CONFIG_VGA_CONSOLE)
1606         if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1607                 conswitchp = &vga_con;
1608 #elif defined(CONFIG_DUMMY_CONSOLE)
1609         conswitchp = &dummy_con;
1610 #endif
1611 #endif
1612 }
1613
1614 #include "setup_arch_post.h"
1615 /*
1616  * Local Variables:
1617  * mode:c
1618  * c-file-style:"k&r"
1619  * c-basic-offset:8
1620  * End:
1621  */